File 5a6b36cd-3-x86-migrate-MSR_SPEC_CTRL.patch of Package xen.11298
# Commit 0cf2a4eb769302b7d7d7835540e7b2f15006df30
# Date 2018-01-26 14:10:21 +0000
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/migrate: Move MSR_SPEC_CTRL on migrate
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1187,7 +1187,8 @@ long arch_do_domctl(
struct xen_domctl_vcpu_msrs *vmsrs = &domctl->u.vcpu_msrs;
struct xen_domctl_vcpu_msr msr;
struct vcpu *v;
- uint32_t nr_msrs = 0;
+ uint32_t nr_msrs = 0, edx, dummy;
+ bool_t has_ibrsb;
ret = -ESRCH;
if ( (vmsrs->vcpu >= d->max_vcpus) ||
@@ -1203,6 +1204,10 @@ long arch_do_domctl(
if ( boot_cpu_has(X86_FEATURE_DBEXT) )
nr_msrs += 4;
+ domain_cpuid(d, 7, 0, &dummy, &dummy, &dummy, &edx);
+ has_ibrsb = !!(edx & cpufeat_mask(X86_FEATURE_IBRSB));
+ nr_msrs += has_ibrsb;
+
if ( domctl->cmd == XEN_DOMCTL_get_vcpu_msrs )
{
ret = 0; copyback = 1;
@@ -1249,6 +1254,19 @@ long arch_do_domctl(
}
}
+ if ( has_ibrsb && v->arch.spec_ctrl )
+ {
+ if ( i < vmsrs->msr_count && !ret )
+ {
+ msr.index = MSR_SPEC_CTRL;
+ msr.reserved = 0;
+ msr.value = v->arch.spec_ctrl;
+ if ( copy_to_guest_offset(vmsrs->msrs, i, &msr, 1) )
+ ret = -EFAULT;
+ }
+ ++i;
+ }
+
vcpu_unpause(v);
if ( i > vmsrs->msr_count && !ret )
@@ -1276,6 +1294,20 @@ long arch_do_domctl(
switch ( msr.index )
{
+ case MSR_SPEC_CTRL:
+ if ( !boot_cpu_has(X86_FEATURE_IBRSB) )
+ break; /* MSR available? */
+
+ /*
+ * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e.
+ * ignored) when STIBP isn't enumerated in hardware.
+ */
+
+ if ( msr.value & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+ break;
+ v->arch.spec_ctrl = msr.value;
+ continue;
+
case MSR_AMD64_DR0_ADDRESS_MASK:
if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
(msr.value >> 32) )
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -628,13 +628,23 @@ static bool_t vmx_set_guest_bndcfgs(stru
static unsigned int __init vmx_init_msr(void)
{
- return cpu_has_mpx && cpu_has_vmx_mpx;
+ return !!boot_cpu_has(X86_FEATURE_IBRSB) +
+ (cpu_has_mpx && cpu_has_vmx_mpx);
}
static void vmx_save_msr(struct vcpu *v, struct hvm_msr *ctxt)
{
+ uint32_t edx, dummy;
+
vmx_vmcs_enter(v);
+ domain_cpuid(v->domain, 7, 0, &dummy, &dummy, &dummy, &edx);
+ if ( (edx & cpufeat_mask(X86_FEATURE_IBRSB)) && v->arch.spec_ctrl )
+ {
+ ctxt->msr[ctxt->count].index = MSR_SPEC_CTRL;
+ ctxt->msr[ctxt->count++].val = v->arch.spec_ctrl;
+ }
+
if ( cpu_has_mpx && cpu_has_vmx_mpx )
{
__vmread(GUEST_BNDCFGS, &ctxt->msr[ctxt->count].val);
@@ -656,6 +666,19 @@ static int vmx_load_msr(struct vcpu *v,
{
switch ( ctxt->msr[i].index )
{
+ case MSR_SPEC_CTRL:
+ if ( !boot_cpu_has(X86_FEATURE_IBRSB) )
+ err = -ENXIO; /* MSR available? */
+ /*
+ * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e.
+ * ignored) when STIBP isn't enumerated in hardware.
+ */
+ else if ( ctxt->msr[i].val &
+ ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+ err = -ENXIO;
+ else
+ v->arch.spec_ctrl = ctxt->msr[i].val;
+ break;
case MSR_IA32_BNDCFGS:
if ( !vmx_set_guest_bndcfgs(v, ctxt->msr[i].val) &&
ctxt->msr[i].val )