File 5afc13ae-4-x86-fold-XEN_IBRS-ALTERNATIVES.patch of Package xen.11298
# Commit af949407eaba7af71067f23d5866cd0bf1f1144d
# Date 2018-05-16 12:19:10 +0100
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/spec_ctrl: Fold the XEN_IBRS_{SET,CLEAR} ALTERNATIVES together
Currently, the SPEC_CTRL_{ENTRY,EXIT}_* macros encode Xen's choice of
MSR_SPEC_CTRL as an immediate constant, and chooses between IBRS or not by
doubling up the entire alternative block.
There is now a variable holding Xen's choice of value, so use that and
simplify the alternatives.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -357,13 +357,9 @@ void __cpuinit identify_cpu(struct cpuin
if (test_bit(X86_FEATURE_IND_THUNK_JMP,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_IND_THUNK_JMP, c->x86_capability);
- if (test_bit(X86_FEATURE_XEN_IBRS_SET,
+ if (test_bit(X86_FEATURE_SC_MSR,
boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_XEN_IBRS_SET, c->x86_capability);
- if (test_bit(X86_FEATURE_XEN_IBRS_CLEAR,
- boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_XEN_IBRS_CLEAR,
- c->x86_capability);
+ __set_bit(X86_FEATURE_SC_MSR, c->x86_capability);
if (test_bit(X86_FEATURE_RSB_NATIVE,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_RSB_NATIVE, c->x86_capability);
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -114,8 +114,9 @@ static void __init print_details(enum in
thunk == THUNK_RETPOLINE ? "RETPOLINE" :
thunk == THUNK_LFENCE ? "LFENCE" :
thunk == THUNK_JMP ? "JMP" : "?",
- boot_cpu_has(X86_FEATURE_XEN_IBRS_SET) ? " IBRS+" :
- boot_cpu_has(X86_FEATURE_XEN_IBRS_CLEAR) ? " IBRS-" : "",
+ boot_cpu_has(X86_FEATURE_SC_MSR) ?
+ default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" :
+ " IBRS-" : "",
opt_ibpb ? " IBPB" : "",
boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "",
boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : "");
@@ -271,13 +272,10 @@ void __init init_speculation_mitigations
* need the IBRS entry/exit logic to virtualise IBRS support for
* guests.
*/
+ __set_bit(X86_FEATURE_SC_MSR, boot_cpu_data.x86_capability);
+
if ( ibrs )
- {
default_xen_spec_ctrl |= SPEC_CTRL_IBRS;
- __set_bit(X86_FEATURE_XEN_IBRS_SET, boot_cpu_data.x86_capability);
- }
- else
- __set_bit(X86_FEATURE_XEN_IBRS_CLEAR, boot_cpu_data.x86_capability);
default_spec_ctrl_flags |= SCF_ist_wrmsr;
}
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -67,8 +67,7 @@
#define X86_FEATURE_IND_THUNK_LFENCE (3*32+ 1) /* Use IND_THUNK_LFENCE */
#define X86_FEATURE_IND_THUNK_JMP (3*32+ 2) /* Use IND_THUNK_JMP */
#define X86_FEATURE_XEN_IBPB (3*32+ 3) /* IBRSB || IBPB */
-#define X86_FEATURE_XEN_IBRS_SET (3*32+ 4) /* IBRSB && IRBS set in Xen */
-#define X86_FEATURE_XEN_IBRS_CLEAR (3*32+ 5) /* IBRSB && IBRS clear in Xen */
+#define X86_FEATURE_SC_MSR (3*32+ 4) /* MSR_SPEC_CTRL used by Xen */
#define X86_FEATURE_MFENCE_RDTSC (3*32+ 7) /* MFENCE synchronizes RDTSC */
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_NONSTOP_TSC (3*32+ 9) /* TSC does not stop in C states */
--- a/xen/include/asm-x86/nops.h
+++ b/xen/include/asm-x86/nops.h
@@ -61,11 +61,12 @@
#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7)
#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8)
-#define ASM_NOP22 ASM_NOP8; ASM_NOP8; ASM_NOP6
#define ASM_NOP23 ASM_NOP8; ASM_NOP8; ASM_NOP7
#define ASM_NOP24 ASM_NOP8; ASM_NOP8; ASM_NOP8
-#define ASM_NOP32 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8
-#define ASM_NOP36 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP4
+#define ASM_NOP25 ASM_NOP8; ASM_NOP8; ASM_NOP7; ASM_NOP2
+#define ASM_NOP33 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP7; ASM_NOP2
+#define ASM_NOP34 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP2
+#define ASM_NOP39 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP7
#define ASM_NOP40 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8
#define ASM_NOP_MAX 8
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -54,14 +54,16 @@ static always_inline void spec_ctrl_ente
barrier();
info->spec_ctrl_flags |= SCF_use_shadow;
barrier();
- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET)
- :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
+ asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", %c3)
+ :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0),
+ "i" (X86_FEATURE_SC_MSR)
+ : "memory" );
}
/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
{
- uint32_t val = SPEC_CTRL_IBRS;
+ uint32_t val = info->xen_spec_ctrl;
/*
* Disable shadowing before updating the MSR. There are no SMP issues
@@ -69,8 +71,10 @@ static always_inline void spec_ctrl_exit
*/
info->spec_ctrl_flags &= ~SCF_use_shadow;
barrier();
- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_XEN_IBRS_SET)
- :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
+ asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", %c3)
+ :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0),
+ "i" (X86_FEATURE_SC_MSR)
+ : "memory" );
}
#endif /* !__X86_SPEC_CTRL_H__ */
--- a/xen/include/asm-x86/spec_ctrl_asm.h
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -117,7 +117,7 @@
mov %\tmp, %rsp /* Restore old %rsp */
.endm
-.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT ibrs_val:req
+.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT
/*
* Requires %rbx=current, %rsp=regs/cpuinfo
* Clobbers %rax, %rcx, %rdx
@@ -137,11 +137,11 @@
andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
/* Load Xen's intended value. */
- mov $\ibrs_val, %eax
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
wrmsr
.endm
-.macro DO_SPEC_CTRL_ENTRY maybexen:req ibrs_val:req
+.macro DO_SPEC_CTRL_ENTRY maybexen:req
/*
* Requires %rsp=regs (also cpuinfo if !maybexen)
* Requires %r14=stack_end (if maybexen)
@@ -166,12 +166,12 @@
setnz %al
not %eax
and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax
.else
andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
.endif
- /* Load Xen's intended value. */
- mov $\ibrs_val, %eax
wrmsr
.endm
@@ -219,47 +219,32 @@
#define SPEC_CTRL_ENTRY_FROM_VMEXIT \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \
- ALTERNATIVE_2 __stringify(ASM_NOP33), \
- __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
- ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP34), \
+ DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, X86_FEATURE_SC_MSR
/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
#define SPEC_CTRL_ENTRY_FROM_PV \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
- ALTERNATIVE_2 __stringify(ASM_NOP22), \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP25), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR
/* Use in interrupt/exception context. May interrupt Xen or PV context. */
#define SPEC_CTRL_ENTRY_FROM_INTR \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
- ALTERNATIVE_2 __stringify(ASM_NOP36), \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP39), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR
/* Use when exiting to Xen context. */
#define SPEC_CTRL_EXIT_TO_XEN \
- ALTERNATIVE_2 __stringify(ASM_NOP23), \
- DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_SET, \
- DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP23), \
+ DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
/* Use when exiting to guest context. */
#define SPEC_CTRL_EXIT_TO_GUEST \
- ALTERNATIVE_2 __stringify(ASM_NOP24), \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_SET, \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP24), \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
.macro SPEC_CTRL_ENTRY_FROM_INTR_IST