File 5b3cab8f-5-VMX-load-only-guest-MSR-entries.patch of Package xen.8389
# Commit 1ac46b55632626aeb935726e1b0a71605ef6763a
# Date 2018-07-04 12:12:15 +0100
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/vmx: Support load-only guest MSR list entries
Currently, the VMX_MSR_GUEST type maintains completely symmetric guest load
and save lists, by pointing VM_EXIT_MSR_STORE_ADDR and VM_ENTRY_MSR_LOAD_ADDR
at the same page, and setting VM_EXIT_MSR_STORE_COUNT and
VM_ENTRY_MSR_LOAD_COUNT to the same value.
However, for MSRs which we won't let the guest have direct access to, having
hardware save the current value on VMExit is unnecessary overhead.
To avoid this overhead, we must make the load and save lists asymmetric. By
making the entry load count greater than the exit store count, we can maintain
two adjacent lists of MSRs, the first of which is saved and restored, and the
second of which is only restored on VMEntry.
For simplicity:
* Both adjacent lists are still sorted by MSR index.
* It undefined behaviour to insert the same MSR into both lists.
* The total size of both lists is still limited at 256 entries (one 4k page).
Split the current msr_count field into msr_{load,save}_count, and introduce a
new VMX_MSR_GUEST_LOADONLY type, and update vmx_{add,find}_msr() to calculate
which sublist to search, based on type. VMX_MSR_HOST has no logical sublist,
whereas VMX_MSR_GUEST has a sublist between 0 and the save count, while
VMX_MSR_GUEST_LOADONLY has a sublist between the save count and the load
count.
One subtle point is that inserting an MSR into the load-save list involves
moving the entire load-only list, and updating both counts.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1313,7 +1313,7 @@ struct vmx_msr_entry *vmx_find_msr(const
{
const struct arch_vmx_struct *vmx = &v->arch.hvm_vmx;
struct vmx_msr_entry *start = NULL, *ent, *end;
- unsigned int total;
+ unsigned int substart, subend, total;
ASSERT(v == current || !vcpu_runnable(v));
@@ -1321,12 +1321,23 @@ struct vmx_msr_entry *vmx_find_msr(const
{
case VMX_MSR_HOST:
start = vmx->host_msr_area;
- total = vmx->host_msr_count;
+ substart = 0;
+ subend = vmx->host_msr_count;
+ total = subend;
break;
case VMX_MSR_GUEST:
start = vmx->msr_area;
- total = vmx->msr_count;
+ substart = 0;
+ subend = vmx->msr_save_count;
+ total = vmx->msr_load_count;
+ break;
+
+ case VMX_MSR_GUEST_LOADONLY:
+ start = vmx->msr_area;
+ substart = vmx->msr_save_count;
+ subend = vmx->msr_load_count;
+ total = subend;
break;
default:
@@ -1337,7 +1348,7 @@ struct vmx_msr_entry *vmx_find_msr(const
return NULL;
end = start + total;
- ent = locate_msr_entry(start, end, msr);
+ ent = locate_msr_entry(start + substart, start + subend, msr);
return ((ent < end) && (ent->index == msr)) ? ent : NULL;
}
@@ -1347,7 +1358,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t
{
struct arch_vmx_struct *vmx = &v->arch.hvm_vmx;
struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
- unsigned int total;
+ unsigned int substart, subend, total;
int rc;
ASSERT(v == current || !vcpu_runnable(v));
@@ -1356,12 +1367,23 @@ int vmx_add_msr(struct vcpu *v, uint32_t
{
case VMX_MSR_HOST:
ptr = &vmx->host_msr_area;
- total = vmx->host_msr_count;
+ substart = 0;
+ subend = vmx->host_msr_count;
+ total = subend;
break;
case VMX_MSR_GUEST:
ptr = &vmx->msr_area;
- total = vmx->msr_count;
+ substart = 0;
+ subend = vmx->msr_save_count;
+ total = vmx->msr_load_count;
+ break;
+
+ case VMX_MSR_GUEST_LOADONLY:
+ ptr = &vmx->msr_area;
+ substart = vmx->msr_save_count;
+ subend = vmx->msr_load_count;
+ total = subend;
break;
default:
@@ -1391,6 +1413,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t
break;
case VMX_MSR_GUEST:
+ case VMX_MSR_GUEST_LOADONLY:
__vmwrite(VM_EXIT_MSR_STORE_ADDR, addr);
__vmwrite(VM_ENTRY_MSR_LOAD_ADDR, addr);
break;
@@ -1399,7 +1422,7 @@ int vmx_add_msr(struct vcpu *v, uint32_t
start = *ptr;
end = start + total;
- ent = locate_msr_entry(start, end, msr);
+ ent = locate_msr_entry(start + substart, start + subend, msr);
if ( (ent < end) && (ent->index == msr) )
goto found;
@@ -1423,8 +1446,11 @@ int vmx_add_msr(struct vcpu *v, uint32_t
break;
case VMX_MSR_GUEST:
- __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++vmx->msr_count);
- __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_count);
+ __vmwrite(VM_EXIT_MSR_STORE_COUNT, ++vmx->msr_save_count);
+
+ /* Fallthrough */
+ case VMX_MSR_GUEST_LOADONLY:
+ __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, ++vmx->msr_load_count);
break;
}
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4227,7 +4227,7 @@ out:
static void lbr_tsx_fixup(void)
{
struct vcpu *curr = current;
- unsigned int msr_count = curr->arch.hvm_vmx.msr_count;
+ unsigned int msr_count = curr->arch.hvm_vmx.msr_save_count;
struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
struct vmx_msr_entry *msr;
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -139,7 +139,8 @@ struct arch_vmx_struct {
*/
struct vmx_msr_entry *msr_area;
struct vmx_msr_entry *host_msr_area;
- unsigned int msr_count;
+ unsigned int msr_load_count;
+ unsigned int msr_save_count;
unsigned int host_msr_count;
unsigned long eoi_exitmap_changed;
@@ -539,12 +540,16 @@ enum vmx_insn_errno
enum vmx_msr_list_type {
VMX_MSR_HOST, /* MSRs loaded on VMExit. */
VMX_MSR_GUEST, /* MSRs saved on VMExit, loaded on VMEntry. */
+ VMX_MSR_GUEST_LOADONLY, /* MSRs loaded on VMEntry only. */
};
/**
* Add an MSR to an MSR list (inserting space for the entry if necessary), and
* set the MSRs value.
*
+ * It is undefined behaviour to try and insert the same MSR into both the
+ * GUEST and GUEST_LOADONLY list.
+ *
* May fail if unable to allocate memory for the list, or the total number of
* entries exceeds the memory allocated.
*/