File 5afc13ae-5-x86-rename-bits-of-spec_ctrl-infrastructure.patch of Package xen.11298
# Commit d9822b8a38114e96e4516dc998f4055249364d5d
# Date 2018-05-16 12:19:10 +0100
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/spec_ctrl: Rename bits of infrastructure to avoid NATIVE and VMEXIT
In hindsight, using NATIVE and VMEXIT as naming terminology was not clever.
A future change wants to split SPEC_CTRL_EXIT_TO_GUEST into PV and HVM
specific implementations, and using VMEXIT as a term is completely wrong.
Take the opportunity to fix some stale documentation in spec_ctrl_asm.h. The
IST helpers were missing from the large comment block, and since
SPEC_CTRL_ENTRY_FROM_INTR_IST was introduced, we've gained a new piece of
functionality which currently depends on the fine grain control, which exists
in lieu of livepatching. Note this in the comment.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -360,12 +360,12 @@ void __cpuinit identify_cpu(struct cpuin
if (test_bit(X86_FEATURE_SC_MSR,
boot_cpu_data.x86_capability))
__set_bit(X86_FEATURE_SC_MSR, c->x86_capability);
- if (test_bit(X86_FEATURE_RSB_NATIVE,
+ if (test_bit(X86_FEATURE_SC_RSB_PV,
boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_RSB_NATIVE, c->x86_capability);
- if (test_bit(X86_FEATURE_RSB_VMEXIT,
+ __set_bit(X86_FEATURE_SC_RSB_PV, c->x86_capability);
+ if (test_bit(X86_FEATURE_SC_RSB_HVM,
boot_cpu_data.x86_capability))
- __set_bit(X86_FEATURE_RSB_VMEXIT, c->x86_capability);
+ __set_bit(X86_FEATURE_SC_RSB_HVM, c->x86_capability);
/* AND the already accumulated flags with these */
for ( i = 0 ; i < NCAPINTS ; i++ )
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -82,7 +82,7 @@ UNLIKELY_END(svm_trace)
mov VCPU_arch_spec_ctrl(%rbx), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+ SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
pop %r15
pop %r14
@@ -107,7 +107,7 @@ UNLIKELY_END(svm_trace)
GET_CURRENT(%rbx)
- SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov VCPU_svm_vmcb(%rbx),%rcx
--- a/xen/arch/x86/hvm/vmx/entry.S
+++ b/xen/arch/x86/hvm/vmx/entry.S
@@ -38,7 +38,7 @@ ENTRY(vmx_asm_vmexit_handler)
movb $1,VCPU_vmx_launched(%rbx)
mov %rax,VCPU_hvm_guest_cr2(%rbx)
- SPEC_CTRL_ENTRY_FROM_VMEXIT /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
+ SPEC_CTRL_ENTRY_FROM_HVM /* Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
mov %rsp,%rdi
@@ -73,7 +73,7 @@ UNLIKELY_END(realmode)
mov VCPU_arch_spec_ctrl(%rbx), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+ SPEC_CTRL_EXIT_TO_HVM /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
mov VCPU_hvm_guest_cr2(%rbx),%rax
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -35,8 +35,8 @@ static enum ind_thunk {
THUNK_JMP,
} opt_thunk __initdata = THUNK_DEFAULT;
static int8_t __initdata opt_ibrs = -1;
-static bool_t __initdata opt_rsb_native = 1;
-static bool_t __initdata opt_rsb_vmexit = 1;
+static bool_t __initdata opt_rsb_pv = 1;
+static bool_t __initdata opt_rsb_hvm = 1;
bool_t __read_mostly opt_ibpb = 1;
uint8_t __read_mostly default_xen_spec_ctrl;
uint8_t __read_mostly default_spec_ctrl_flags;
@@ -69,9 +69,9 @@ static int __init parse_bti(const char *
else if ( (val = parse_boolean("ibpb", s, ss)) >= 0 )
opt_ibpb = val;
else if ( (val = parse_boolean("rsb_native", s, ss)) >= 0 )
- opt_rsb_native = val;
+ opt_rsb_pv = val;
else if ( (val = parse_boolean("rsb_vmexit", s, ss)) >= 0 )
- opt_rsb_vmexit = val;
+ opt_rsb_hvm = val;
else
rc = -EINVAL;
@@ -118,8 +118,8 @@ static void __init print_details(enum in
default_xen_spec_ctrl & SPEC_CTRL_IBRS ? " IBRS+" :
" IBRS-" : "",
opt_ibpb ? " IBPB" : "",
- boot_cpu_has(X86_FEATURE_RSB_NATIVE) ? " RSB_NATIVE" : "",
- boot_cpu_has(X86_FEATURE_RSB_VMEXIT) ? " RSB_VMEXIT" : "");
+ boot_cpu_has(X86_FEATURE_SC_RSB_PV) ? " RSB_NATIVE" : "",
+ boot_cpu_has(X86_FEATURE_SC_RSB_HVM) ? " RSB_VMEXIT" : "");
}
/* Calculate whether Retpoline is known-safe on this CPU. */
@@ -293,9 +293,9 @@ void __init init_speculation_mitigations
* If a processors speculates to 32bit PV guest kernel mappings, it is
* speculating in 64bit supervisor mode, and can leak data.
*/
- if ( opt_rsb_native )
+ if ( opt_rsb_pv )
{
- __set_bit(X86_FEATURE_RSB_NATIVE, boot_cpu_data.x86_capability);
+ __set_bit(X86_FEATURE_SC_RSB_PV, boot_cpu_data.x86_capability);
default_spec_ctrl_flags |= SCF_ist_rsb;
}
@@ -303,8 +303,8 @@ void __init init_speculation_mitigations
* HVM guests can always poison the RSB to point at Xen supervisor
* mappings.
*/
- if ( opt_rsb_vmexit )
- __set_bit(X86_FEATURE_RSB_VMEXIT, boot_cpu_data.x86_capability);
+ if ( opt_rsb_hvm )
+ __set_bit(X86_FEATURE_SC_RSB_HVM, boot_cpu_data.x86_capability);
/* Check we have hardware IBPB support before using it... */
if ( !boot_cpu_has(X86_FEATURE_IBRSB) && !boot_cpu_has(X86_FEATURE_IBPB) )
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -235,7 +235,7 @@ ENTRY(compat_restore_all_guest)
mov VCPU_arch_spec_ctrl(%rbx), %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+ SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
RESTORE_ALL adj=8 compat=1
.Lft0: iretq
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -72,7 +72,7 @@ restore_all_guest:
mov %r15d, %eax
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
- SPEC_CTRL_EXIT_TO_GUEST /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
+ SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -77,8 +77,8 @@
#define X86_FEATURE_XTOPOLOGY (3*32+13) /* cpu topology enum extensions */
#define X86_FEATURE_CPUID_FAULTING (3*32+14) /* cpuid faulting */
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+15) /* clflush reqd with monitor */
-#define X86_FEATURE_RSB_NATIVE (3*32+16) /* RSB overwrite needed for native */
-#define X86_FEATURE_RSB_VMEXIT (3*32+17) /* RSB overwrite needed for vmexit */
+#define X86_FEATURE_SC_RSB_PV (3*32+16) /* RSB overwrite needed for PV */
+#define X86_FEATURE_SC_RSB_HVM (3*32+17) /* RSB overwrite needed for HVM */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
--- a/xen/include/asm-x86/spec_ctrl_asm.h
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -72,11 +72,14 @@
*
* The following ASM fragments implement this algorithm. See their local
* comments for further details.
- * - SPEC_CTRL_ENTRY_FROM_VMEXIT
+ * - SPEC_CTRL_ENTRY_FROM_HVM
* - SPEC_CTRL_ENTRY_FROM_PV
* - SPEC_CTRL_ENTRY_FROM_INTR
+ * - SPEC_CTRL_ENTRY_FROM_INTR_IST
+ * - SPEC_CTRL_EXIT_TO_XEN_IST
* - SPEC_CTRL_EXIT_TO_XEN
- * - SPEC_CTRL_EXIT_TO_GUEST
+ * - SPEC_CTRL_EXIT_TO_PV
+ * - SPEC_CTRL_EXIT_TO_HVM
*/
.macro DO_OVERWRITE_RSB tmp=rax
@@ -117,7 +120,7 @@
mov %\tmp, %rsp /* Restore old %rsp */
.endm
-.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT
+.macro DO_SPEC_CTRL_ENTRY_FROM_HVM
/*
* Requires %rbx=current, %rsp=regs/cpuinfo
* Clobbers %rax, %rcx, %rdx
@@ -216,23 +219,23 @@
.endm
/* Use after a VMEXIT from an HVM guest. */
-#define SPEC_CTRL_ENTRY_FROM_VMEXIT \
+#define SPEC_CTRL_ENTRY_FROM_HVM \
ALTERNATIVE __stringify(ASM_NOP40), \
- DO_OVERWRITE_RSB, X86_FEATURE_RSB_VMEXIT; \
+ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM; \
ALTERNATIVE __stringify(ASM_NOP34), \
- DO_SPEC_CTRL_ENTRY_FROM_VMEXIT, X86_FEATURE_SC_MSR
+ DO_SPEC_CTRL_ENTRY_FROM_HVM, X86_FEATURE_SC_MSR
/* Use after an entry from PV context (syscall/sysenter/int80/int82/etc). */
#define SPEC_CTRL_ENTRY_FROM_PV \
ALTERNATIVE __stringify(ASM_NOP40), \
- DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
+ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE __stringify(ASM_NOP25), \
__stringify(DO_SPEC_CTRL_ENTRY maybexen=0), X86_FEATURE_SC_MSR
/* Use in interrupt/exception context. May interrupt Xen or PV context. */
#define SPEC_CTRL_ENTRY_FROM_INTR \
ALTERNATIVE __stringify(ASM_NOP40), \
- DO_OVERWRITE_RSB, X86_FEATURE_RSB_NATIVE; \
+ DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_PV; \
ALTERNATIVE __stringify(ASM_NOP39), \
__stringify(DO_SPEC_CTRL_ENTRY maybexen=1), X86_FEATURE_SC_MSR
@@ -241,12 +244,22 @@
ALTERNATIVE __stringify(ASM_NOP23), \
DO_SPEC_CTRL_EXIT_TO_XEN, X86_FEATURE_SC_MSR
-/* Use when exiting to guest context. */
-#define SPEC_CTRL_EXIT_TO_GUEST \
+/* Use when exiting to PV guest context. */
+#define SPEC_CTRL_EXIT_TO_PV \
ALTERNATIVE __stringify(ASM_NOP24), \
DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
-/* TODO: Drop these when the alternatives infrastructure is NMI/#MC safe. */
+/* Use when exiting to HVM guest context. */
+#define SPEC_CTRL_EXIT_TO_HVM \
+ ALTERNATIVE __stringify(ASM_NOP24), \
+ DO_SPEC_CTRL_EXIT_TO_GUEST, X86_FEATURE_SC_MSR
+
+/*
+ * Use in IST interrupt/exception context. May interrupt Xen or PV context.
+ * Fine grain control of SCF_ist_wrmsr is needed for safety in the S3 resume
+ * path to avoid using MSR_SPEC_CTRL before the microcode introducing it has
+ * been reloaded.
+ */
.macro SPEC_CTRL_ENTRY_FROM_INTR_IST
/*
* Requires %rsp=regs, %r14=stack_end
@@ -293,6 +306,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
UNLIKELY_END(\@_serialise)
.endm
+/* Use when exiting to Xen in IST context. */
.macro SPEC_CTRL_EXIT_TO_XEN_IST
/*
* Requires %rbx=stack_end