File 61f2dd76-x86-SPEC_CTRL-migration-compatibility.patch of Package xen.23582
# Commit 969a57f73f6b011b2ebf4c0ab1715efc65837335
# Date 2022-01-27 17:59:18 +0000
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/msr: Fix migration compatibility issue with MSR_SPEC_CTRL
This bug existed in early in 2018 between MSR_SPEC_CTRL arriving in microcode,
and SSBD arriving a few months later. It went unnoticed presumably because
everyone was busy rebooting everything.
The same bug will reappear when adding PSFD support.
Clamp the guest MSR_SPEC_CTRL value to that permitted by CPUID on migrate.
The guest is already playing with reserved bits at this point, and clamping
the value will prevent a migration to a less capable host from failing.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1370,6 +1370,7 @@ static const uint32_t msrs_to_send[] = {
static int hvm_save_cpu_msrs(struct vcpu *v, hvm_domain_context_t *h)
{
+ const struct domain *d = v->domain;
struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
struct hvm_msr *ctxt;
unsigned int i;
@@ -1385,7 +1386,8 @@ static int hvm_save_cpu_msrs(struct vcpu
for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
{
uint64_t val;
- int rc = guest_rdmsr(v, msrs_to_send[i], &val);
+ unsigned int msr = msrs_to_send[i];
+ int rc = guest_rdmsr(v, msr, &val);
/*
* It is the programmers responsibility to ensure that
@@ -1405,7 +1407,26 @@ static int hvm_save_cpu_msrs(struct vcpu
if ( !val )
continue; /* Skip empty MSRs. */
- ctxt->msr[ctxt->count].index = msrs_to_send[i];
+ /*
+ * Guests are given full access to certain MSRs for performance
+ * reasons. A consequence is that Xen is unable to enforce that all
+ * bits disallowed by the CPUID policy yield #GP, and an enterprising
+ * guest may be able to set and use a bit it ought to leave alone.
+ *
+ * When migrating from a more capable host to a less capable one, such
+ * bits may be rejected by the destination, and the migration failed.
+ *
+ * Discard such bits here on the source side. Such bits have reserved
+ * behaviour, and the guest has only itself to blame.
+ */
+ switch ( msr )
+ {
+ case MSR_SPEC_CTRL:
+ val &= msr_spec_ctrl_valid_bits(d->arch.cpuid);
+ break;
+ }
+
+ ctxt->msr[ctxt->count].index = msr;
ctxt->msr[ctxt->count++].val = val;
}
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -267,6 +267,8 @@ static inline void wrmsr_tsc_aux(uint32_
}
}
+uint64_t msr_spec_ctrl_valid_bits(const struct cpuid_policy *cp);
+
extern struct msr_policy raw_msr_policy,
host_msr_policy,
pv_max_msr_policy,
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -360,6 +360,24 @@ int guest_rdmsr(struct vcpu *v, uint32_t
return X86EMUL_EXCEPTION;
}
+/*
+ * Caller to confirm that MSR_SPEC_CTRL is available. Intel and AMD have
+ * separate CPUID features for this functionality, but only set will be
+ * active.
+ */
+uint64_t msr_spec_ctrl_valid_bits(const struct cpuid_policy *cp)
+{
+ bool ssbd = cp->feat.ssbd;
+
+ /*
+ * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e. ignored)
+ * when STIBP isn't enumerated in hardware.
+ */
+ return (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP |
+ (ssbd ? SPEC_CTRL_SSBD : 0) |
+ 0);
+}
+
int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
{
const struct vcpu *curr = current;
@@ -442,18 +460,9 @@ int guest_wrmsr(struct vcpu *v, uint32_t
break;
case MSR_SPEC_CTRL:
- if ( !cp->feat.ibrsb )
- goto gp_fault; /* MSR available? */
-
- /*
- * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e. ignored)
- * when STIBP isn't enumerated in hardware.
- */
- rsvd = ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP |
- (cp->feat.ssbd ? SPEC_CTRL_SSBD : 0));
-
- if ( val & rsvd )
- goto gp_fault; /* Rsvd bit set? */
+ if ( !cp->feat.ibrsb ||
+ (val & ~msr_spec_ctrl_valid_bits(cp)) )
+ goto gp_fault;
goto set_reg;
case MSR_PRED_CMD: