File 5afc13ae-7-x86-split-X86_FEATURE_SC_MSR.patch of Package xen.11298
# Commit fa9eb09d446a1279f5e861e6b84fa8675dabf148
# Date 2018-05-16 12:19:10 +0100
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/spec_ctrl: Split X86_FEATURE_SC_MSR into PV and HVM variants
In order to separately control whether MSR_SPEC_CTRL is virtualised for PV and
HVM guests, split the feature used to control runtime alternatives into two.
Xen will use MSR_SPEC_CTRL itself if either of these features are active.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -357,9 +357,12 @@ void __cpuinit identify_cpu(struct cpuin
if (test_bit(X86_FEATURE_IND_THUNK_JMP,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_IND_THUNK_JMP, c->x86_capability);
- if (test_bit(X86_FEATURE_SC_MSR,
+ if (test_bit(X86_FEATURE_SC_MSR_PV,
boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_SC_MSR, c->x86_capability);
+ __set_bit(X86_FEATURE_SC_MSR_PV, c->x86_capability);
+ if (test_bit(X86_FEATURE_SC_MSR_HVM,
+ boot_cpu_data.x86_capability))
+ __set_bit(X86_FEATURE_SC_MSR_HVM, c->x86_capability);
if (test_bit(X86_FEATURE_SC_RSB_PV,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_SC_RSB_PV, c->x86_capability);
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -114,7 +114,8 @@ static void __init print_details(enum in
thunk == THUNK_RETPOLINE ? "RETPOLINE" :
thunk == THUNK_LFENCE ? "LFENCE" :
thunk == THUNK_JMP ? "JMP" : "?",
- boot_cpu_has(X86_FEATURE_SC_MSR) ?
+ (boot_cpu_has(X86_FEATURE_SC_MSR_PV) ||
+ boot_cpu_has(X86_FEATURE_SC_MSR_HVM)) ?
default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" :
" IBRS-" : "",
opt_ibpb ? " IBPB" : "",
@@ -272,7 +273,8 @@ void __init init_speculation_mitigations
* need the IBRS entry/exit logic to virtualise IBRS support for
* guests.
*/
- __set_bit(X86_FEATURE_SC_MSR, boot_cpu_data.x86_capability);
+ __set_bit(X86_FEATURE_SC_MSR_PV, boot_cpu_data.x86_capability);
+ __set_bit(X86_FEATURE_SC_MSR_HVM, boot_cpu_data.x86_capability);
if ( ibrs )
default_xen_spec_ctrl |= SPEC_CTRL_IBRS;
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -67,8 +67,9 @@
#define X86_FEATURE_IND_THUNK_LFENCE (3*32+ 1) /* Use IND_THUNK_LFENCE */
#define X86_FEATURE_IND_THUNK_JMP (3*32+ 2) /* Use IND_THUNK_JMP */
#define X86_FEATURE_XEN_IBPB (3*32+ 3) /* IBRSB || IBPB */
-#define X86_FEATURE_SC_MSR (3*32+ 4) /* MSR_SPEC_CTRL used by Xen */
-#define X86_FEATURE_SC_MSR_IDLE (3*32+ 5) /* SC_MSR && default_xen_spec_ctrl */
+#define X86_FEATURE_SC_MSR_PV (3*32+ 4) /* MSR_SPEC_CTRL used by Xen for PV */
+#define X86_FEATURE_SC_MSR_HVM (3*32+ 5) /* MSR_SPEC_CTRL used by Xen for HVM */
+#define X86_FEATURE_SC_MSR_IDLE (3*32+ 6) /* (SC_MSR_PV || SC_MSR_HVM) && default_xen_spec_ctrl */
#define X86_FEATURE_MFENCE_RDTSC (3*32+ 7) /* MFENCE synchronizes RDTSC */
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_NONSTOP_TSC (3*32+ 9) /* TSC does not stop in C states */
--- a/xen/include/asm-x86/spec_ctrl_asm.h
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -223,36 +223,36 @@
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM; \
ALTERNATIVE __stringify(ASM_NOP34), \
- DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR
+ DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR_HVM
/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
#define SPEC_CTRL_ENTRY_FROM_PV \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE __stringify(ASM_NOP25), \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR_PV
/* Use in interrupt/exception context. May interrupt Xen or PV context. */
#define SPEC_CTRL_ENTRY_FROM_INTR \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE __stringify(ASM_NOP39), \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR_PV
/* Use when exiting to Xen context. */
#define SPEC_CTRL_EXIT_TO_XEN \
ALTERNATIVE __stringify(ASM_NOP23), \
- DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
+ DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR_PV
/* Use when exiting to PV guest context. */
#define SPEC_CTRL_EXIT_TO_PV \
ALTERNATIVE __stringify(ASM_NOP24), \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_PV
/* Use when exiting to HVM guest context. */
#define SPEC_CTRL_EXIT_TO_HVM \
ALTERNATIVE __stringify(ASM_NOP24), \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR_HVM
/*
* Use in IST interrupt/exception context. May interrupt Xen or PV context.