File 5afc13ae-4-x86-fold-XEN_IBRS-ALTERNATIVES.patch of Package xen.7652
# Commit af949407eaba7af71067f23d5866cd0bf1f1144d
# Date 2018-05-16 12:19:10 +0100
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/spec_ctrl: Fold the XEN_IBRS_{SET,CLEAR} ALTERNATIVES together
Currently, the SPEC_CTRL_{ENTRY,EXIT}_* macros encode Xen's choice of
MSR_SPEC_CTRL as an immediate constant, and chooses between IBRS or not by
doubling up the entire alternative block.
There is now a variable holding Xen's choice of value, so use that and
simplify the alternatives.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- sle12sp2.orig/xen/arch/x86/cpu/common.c 2018-05-18 12:32:19.000000000 +0200
+++ sle12sp2/xen/arch/x86/cpu/common.c 2018-05-23 11:11:24.000000000 +0200
@@ -417,13 +417,9 @@ void identify_cpu(struct cpuinfo_x86 *c)
if (test_bit(X86_FEATURE_IND_THUNK_JMP,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_IND_THUNK_JMP, c->x86_capability);
- if (test_bit(X86_FEATURE_XEN_IBRS_SET,
+ if (test_bit(X86_FEATURE_SC_MSR,
boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_XEN_IBRS_SET, c->x86_capability);
- if (test_bit(X86_FEATURE_XEN_IBRS_CLEAR,
- boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_XEN_IBRS_CLEAR,
- c->x86_capability);
+ __set_bit(X86_FEATURE_SC_MSR, c->x86_capability);
if (test_bit(X86_FEATURE_RSB_NATIVE,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_RSB_NATIVE, c->x86_capability);
--- sle12sp2.orig/xen/arch/x86/spec_ctrl.c 2018-05-23 11:08:42.000000000 +0200
+++ sle12sp2/xen/arch/x86/spec_ctrl.c 2018-05-23 11:11:24.000000000 +0200
@@ -112,8 +112,9 @@ static void __init print_details(enum in
thunk == THUNK_RETPOLINE ? "RETPOLINE" :
thunk == THUNK_LFENCE ? "LFENCE" :
thunk == THUNK_JMP ? "JMP" : "?",
- boot_cpu_has(X86_FEATURE_XEN_IBRS_SET) ? " IBRS+" :
- boot_cpu_has(X86_FEATURE_XEN_IBRS_CLEAR) ? " IBRS-" : "",
+ boot_cpu_has(X86_FEATURE_SC_MSR) ?
+ default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" :
+ " IBRS-" : "",
opt_ibpb ? " IBPB" : "",
boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "",
boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : "");
@@ -285,13 +286,10 @@ void __init init_speculation_mitigations
* need the IBRS entry/exit logic to virtualise IBRS support for
* guests.
*/
+ __set_bit(X86_FEATURE_SC_MSR, boot_cpu_data.x86_capability);
+
if ( ibrs )
- {
default_xen_spec_ctrl |= SPEC_CTRL_IBRS;
- __set_bit(X86_FEATURE_XEN_IBRS_SET, boot_cpu_data.x86_capability);
- }
- else
- __set_bit(X86_FEATURE_XEN_IBRS_CLEAR, boot_cpu_data.x86_capability);
default_spec_ctrl_flags |= SCF_ist_wrmsr;
}
--- sle12sp2.orig/xen/include/asm-x86/cpufeature.h 2018-05-18 11:55:19.000000000 +0200
+++ sle12sp2/xen/include/asm-x86/cpufeature.h 2018-05-23 11:11:24.000000000 +0200
@@ -21,8 +21,7 @@ XEN_CPUFEATURE(LFENCE_DISPATCH, (FSCAPIN
XEN_CPUFEATURE(IND_THUNK_LFENCE, (FSCAPINTS+0)*32+ 11) /* Use IND_THUNK_LFENCE */
XEN_CPUFEATURE(IND_THUNK_JMP, (FSCAPINTS+0)*32+ 12) /* Use IND_THUNK_JMP */
XEN_CPUFEATURE(XEN_IBPB, (FSCAPINTS+0)*32+ 13) /* IBRSB || IBPB */
-XEN_CPUFEATURE(XEN_IBRS_SET, (FSCAPINTS+0)*32+ 14) /* IBRSB && IRBS set in Xen */
-XEN_CPUFEATURE(XEN_IBRS_CLEAR, (FSCAPINTS+0)*32+ 15) /* IBRSB && IBRS clear in Xen */
+XEN_CPUFEATURE(SC_MSR, (FSCAPINTS+0)*32+ 14) /* MSR_SPEC_CTRL used by Xen */
XEN_CPUFEATURE(RSB_NATIVE, (FSCAPINTS+0)*32+ 16) /* RSB overwrite needed for native */
XEN_CPUFEATURE(RSB_VMEXIT, (FSCAPINTS+0)*32+ 17) /* RSB overwrite needed for vmexit */
XEN_CPUFEATURE(MFENCE_RDTSC, (FSCAPINTS+0)*32+ 18) /* MFENCE synchronizes RDTSC */
--- sle12sp2.orig/xen/include/asm-x86/nops.h 2018-05-23 11:08:42.000000000 +0200
+++ sle12sp2/xen/include/asm-x86/nops.h 2018-05-23 11:11:24.000000000 +0200
@@ -64,9 +64,10 @@
#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8)
#define ASM_NOP17 ASM_NOP8; ASM_NOP7; ASM_NOP2
-#define ASM_NOP22 ASM_NOP8; ASM_NOP8; ASM_NOP6
#define ASM_NOP24 ASM_NOP8; ASM_NOP8; ASM_NOP8
+#define ASM_NOP25 ASM_NOP8; ASM_NOP8; ASM_NOP7; ASM_NOP2
#define ASM_NOP33 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP7; ASM_NOP2
+#define ASM_NOP36 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP4
#define ASM_NOP40 ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8; ASM_NOP8
#define ASM_NOP_MAX 8
--- sle12sp2.orig/xen/include/asm-x86/spec_ctrl.h 2018-05-23 11:08:42.000000000 +0200
+++ sle12sp2/xen/include/asm-x86/spec_ctrl.h 2018-05-23 11:11:24.000000000 +0200
@@ -54,14 +54,14 @@ static always_inline void spec_ctrl_ente
barrier();
asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", %c3)
:: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0),
- "i" (X86_FEATURE_XEN_IBRS_SET)
+ "i" (X86_FEATURE_SC_MSR)
: "memory" );
}
/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
{
- uint32_t val = SPEC_CTRL_IBRS;
+ uint32_t val = info->xen_spec_ctrl;
/*
* Disable shadowing before updating the MSR. There are no SMP issues
@@ -71,7 +71,7 @@ static always_inline void spec_ctrl_exit
barrier();
asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", %c3)
:: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0),
- "i" (X86_FEATURE_XEN_IBRS_SET)
+ "i" (X86_FEATURE_SC_MSR)
: "memory" );
}
--- sle12sp2.orig/xen/include/asm-x86/spec_ctrl_asm.h 2018-05-23 11:08:42.000000000 +0200
+++ sle12sp2/xen/include/asm-x86/spec_ctrl_asm.h 2018-05-23 11:11:24.000000000 +0200
@@ -117,7 +117,7 @@
mov %\tmp, %rsp /* Restore old %rsp */
.endm
-.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT ibrs_val:req
+.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT
/*
* Requires %rbx=current, %rsp=regs/cpuinfo
* Clobbers %rax, %rcx, %rdx
@@ -137,11 +137,11 @@
andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
/* Load Xen's intended value. */
- mov $\ibrs_val, %eax
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
wrmsr
.endm
-.macro DO_SPEC_CTRL_ENTRY maybexen:req ibrs_val:req
+.macro DO_SPEC_CTRL_ENTRY maybexen:req
/*
* Requires %rsp=regs (also cpuinfo if !maybexen)
* Requires %r14=stack_end (if maybexen)
@@ -166,12 +166,12 @@
setnz %al
not %eax
and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax
.else
andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
.endif
- /* Load Xen's intended value. */
- mov $\ibrs_val, %eax
wrmsr
.endm
@@ -219,47 +219,32 @@
#define SPEC_CTRL_ENTRY_FROM_VMEXIT \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \
- ALTERNATIVE_2 __stringify(ASM_NOP33), \
- __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY_FROM_VMEXIT \
- ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP36), \
+ DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, X86_FEATURE_SC_MSR
/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
#define SPEC_CTRL_ENTRY_FROM_PV \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
- ALTERNATIVE_2 __stringify(ASM_NOP22), \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=0 ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP25), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR
/* Use in interrupt/exception context. May interrupt Xen or PV context. */
#define SPEC_CTRL_ENTRY_FROM_INTR \
ALTERNATIVE __stringify(ASM_NOP40), \
DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
- ALTERNATIVE_2 __stringify(ASM_NOP33), \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 \
- ibrs_val=SPEC_CTRL_IBRS), \
- X86_FEATURE_XEN_IBRS_SET, \
- __stringify(DO_SPEC_CTRL_ENTRY maybexen=1 ibrs_val=0), \
- X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP33), \
+ __stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR
/* Use when exiting to Xen context. */
#define SPEC_CTRL_EXIT_TO_XEN \
- ALTERNATIVE_2 __stringify(ASM_NOP17), \
- DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_SET, \
- DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP17), \
+ DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
/* Use when exiting to guest context. */
#define SPEC_CTRL_EXIT_TO_GUEST \
- ALTERNATIVE_2 __stringify(ASM_NOP24), \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_SET, \
- DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_XEN_IBRS_CLEAR
+ ALTERNATIVE __stringify(ASM_NOP24), \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
.macro SPEC_CTRL_ENTRY_FROM_INTR_IST