File 5a6b36cd-2-x86-msr-emulation-of-SPEC_CTRL-PRED_CMD.patch of Package xen.8005
# Commit ea58a679a6190e714a592f1369b660769a48a80c
# Date 2018-01-26 14:10:21 +0000
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/msr: Emulation of MSR_{SPEC_CTRL,PRED_CMD} for guests
As per the spec currently available here:
https://software.intel.com/sites/default/files/managed/c5/63/336996-Speculative-Execution-Side-Channel-Mitigations.pdf
MSR_ARCH_CAPABILITIES will only come into existence on new hardware, but is
implemented as a straight #GP for now to avoid being leaky when new hardware
arrives.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3227,6 +3227,21 @@ int hvm_msr_read_intercept(unsigned int
*msr_content = var_range_base[index];
break;
+ case MSR_PRED_CMD:
+ /* Write-only */
+ goto gp_fault;
+
+ case MSR_SPEC_CTRL:
+ hvm_cpuid(7, NULL, NULL, NULL, &edx);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+ goto gp_fault;
+ *msr_content = v->arch.spec_ctrl;
+ break;
+
+ case MSR_ARCH_CAPABILITIES:
+ /* Not implemented yet. */
+ goto gp_fault;
+
case MSR_K8_ENABLE_C1E:
case MSR_AMD64_NB_CFG:
/*
@@ -3263,7 +3278,7 @@ int hvm_msr_write_intercept(unsigned int
{
struct vcpu *v = current;
bool_t mtrr;
- unsigned int edx, index;
+ unsigned int edx, ebx, index;
int ret = X86EMUL_OKAY;
HVMTRACE_3D(MSR_WRITE, msr,
@@ -3356,6 +3371,41 @@ int hvm_msr_write_intercept(unsigned int
goto gp_fault;
break;
+ case MSR_SPEC_CTRL:
+ hvm_cpuid(7, NULL, NULL, NULL, &edx);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+ goto gp_fault; /* MSR available? */
+
+ /*
+ * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e. ignored)
+ * when STIBP isn't enumerated in hardware.
+ */
+
+ if ( msr_content & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+ goto gp_fault; /* Rsvd bit set? */
+
+ v->arch.spec_ctrl = msr_content;
+ break;
+
+ case MSR_PRED_CMD:
+ hvm_cpuid(7, NULL, NULL, NULL, &edx);
+ hvm_cpuid(0x80000008, NULL, &ebx, NULL, NULL);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) &&
+ !(ebx & cpufeat_mask(X86_FEATURE_IBPB)) )
+ goto gp_fault; /* MSR available? */
+
+ /*
+ * The only defined behaviour is when writing PRED_CMD_IBPB. In
+ * practice, real hardware accepts any value without faulting.
+ */
+ if ( msr_content & PRED_CMD_IBPB )
+ wrmsrl(MSR_PRED_CMD, PRED_CMD_IBPB);
+ break;
+
+ case MSR_ARCH_CAPABILITIES:
+ /* Read-only */
+ goto gp_fault;
+
case MSR_AMD64_NB_CFG:
/* ignore the write */
break;
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -729,6 +729,17 @@ int cpuid_hypervisor_leaves( uint32_t id
return 1;
}
+static void _domain_cpuid(struct domain *currd,
+ unsigned int leaf, unsigned int subleaf,
+ unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ if ( !is_control_domain(currd) && !is_hardware_domain(currd) )
+ domain_cpuid(currd, leaf, subleaf, eax, ebx, ecx, edx);
+ else
+ cpuid_count(leaf, subleaf, eax, ebx, ecx, edx);
+}
+
void pv_cpuid(struct cpu_user_regs *regs)
{
uint32_t a, b, c, d;
@@ -2449,6 +2460,8 @@ static int emulate_privileged_op(struct
msr_content = ((uint64_t)edx << 32) | eax;
switch ( (u32)regs->ecx )
{
+ uint32_t ebx, dummy;
+
case MSR_FS_BASE:
if ( is_pv_32on64_vcpu(v) || !is_canonical_address(msr_content) )
goto fail;
@@ -2567,6 +2580,41 @@ static int emulate_privileged_op(struct
if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
goto fail;
break;
+ case MSR_ARCH_CAPABILITIES:
+ /* The MSR is read-only. */
+ goto fail;
+
+ case MSR_SPEC_CTRL:
+ _domain_cpuid(v->domain, 7, 0, &dummy, &dummy, &dummy, &edx);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+ goto fail; /* MSR available? */
+
+ /*
+ * Note: SPEC_CTRL_STIBP is specified as safe to use (i.e. ignored)
+ * when STIBP isn't enumerated in hardware.
+ */
+
+ if ( msr_content & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP) )
+ goto fail; /* Rsvd bit set? */
+
+ v->arch.spec_ctrl = eax;
+ break;
+
+ case MSR_PRED_CMD:
+ _domain_cpuid(v->domain, 7, 0, &dummy, &dummy, &dummy, &edx);
+ _domain_cpuid(v->domain, 0x80000008, 0, &dummy, &ebx, &dummy,
+ &dummy);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) &&
+ !(ebx & cpufeat_mask(X86_FEATURE_IBPB)) )
+ goto fail; /* MSR available? */
+
+ /*
+ * The only defined behaviour is when writing PRED_CMD_IBPB. In
+ * practice, real hardware accepts any value without faulting.
+ */
+ if ( eax & PRED_CMD_IBPB )
+ wrmsrl(MSR_PRED_CMD, PRED_CMD_IBPB);
+ break;
default:
if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
break;
@@ -2600,6 +2648,8 @@ static int emulate_privileged_op(struct
case 0x32: /* RDMSR */
switch ( (u32)regs->ecx )
{
+ uint32_t edx, dummy;
+
case MSR_FS_BASE:
if ( is_pv_32on64_vcpu(v) )
goto fail;
@@ -2654,6 +2704,21 @@ static int emulate_privileged_op(struct
regs->eax = (uint32_t)msr_content;
regs->edx = (uint32_t)(msr_content >> 32);
break;
+ case MSR_PRED_CMD:
+ /* Write-only */
+ goto fail;
+
+ case MSR_SPEC_CTRL:
+ _domain_cpuid(v->domain, 7, 0, &dummy, &dummy, &dummy, &edx);
+ if ( !(edx & cpufeat_mask(X86_FEATURE_IBRSB)) )
+ goto fail;
+ regs->eax = v->arch.spec_ctrl;
+ regs->edx = 0;
+ break;
+
+ case MSR_ARCH_CAPABILITIES:
+ /* Not implemented yet. */
+ goto fail;
default:
if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
{
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -475,6 +475,8 @@ struct arch_vcpu
struct paging_vcpu paging;
+ uint32_t spec_ctrl;
+
uint32_t gdbsx_vcpu_event;
/* A secondary copy of the vcpu time info. */
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -39,6 +39,8 @@
#define MSR_PRED_CMD 0x00000049
#define PRED_CMD_IBPB (_AC(1, ULL) << 0)
+#define MSR_ARCH_CAPABILITIES 0x0000010a
+
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_A_PERFCTR0 0x000004c1