File xsa456-0i.patch of Package xen.35284
# Commit c62673c4334b3372ebd4292a7ac8185357e7ea27
# Date 2024-04-09 16:37:30 +0100
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/spec-ctrl: Rename spec_ctrl_flags to scf
XSA-455 was ultimately caused by having fields with too-similar names.
Both {xen,last}_spec_ctrl are fields containing an architectural MSR_SPEC_CTRL
value. The spec_ctrl_flags field contains Xen-internal flags.
To more-obviously distinguish the two, rename spec_ctrl_flags to scf, which is
also the prefix of the constants used by the fields.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/acpi/power.c
+++ b/xen/arch/x86/acpi/power.c
@@ -244,7 +244,7 @@ static int enter_state(u32 state)
ci = get_cpu_info();
/* Avoid NMI/#MC using unsafe MSRs until we've reloaded microcode. */
- ci->spec_ctrl_flags &= ~SCF_IST_MASK;
+ ci->scf &= ~SCF_IST_MASK;
ACPI_FLUSH_CPU_CACHE();
@@ -288,7 +288,7 @@ static int enter_state(u32 state)
panic("Missing previously available feature(s)\n");
/* Re-enabled default NMI/#MC use of MSRs now microcode is loaded. */
- ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_IST_MASK);
+ ci->scf |= (default_scf & SCF_IST_MASK);
if ( boot_cpu_has(X86_FEATURE_IBRSB) || boot_cpu_has(X86_FEATURE_IBRS) )
{
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1832,10 +1832,10 @@ void context_switch(struct vcpu *prev, s
}
}
- /* Update the top-of-stack block with the new spec_ctrl settings. */
- info->spec_ctrl_flags =
- (info->spec_ctrl_flags & ~SCF_DOM_MASK) |
- (nextd->arch.spec_ctrl_flags & SCF_DOM_MASK);
+ /* Update the top-of-stack block with the new speculation settings. */
+ info->scf =
+ (info->scf & ~SCF_DOM_MASK) |
+ (nextd->arch.scf & SCF_DOM_MASK);
}
sched_context_switched(prev, next);
--- a/xen/arch/x86/hvm/svm/entry.S
+++ b/xen/arch/x86/hvm/svm/entry.S
@@ -105,7 +105,7 @@ __UNLIKELY_END(nsvm_hap)
/* SPEC_CTRL_ENTRY_FROM_SVM Req: %rsp=regs/cpuinfo, %rdx=0 Clob: acd */
.macro svm_vmexit_cond_ibpb
- testb $SCF_entry_ibpb, CPUINFO_spec_ctrl_flags(%rsp)
+ testb $SCF_entry_ibpb, CPUINFO_scf(%rsp)
jz .L_skip_ibpb
mov $MSR_PRED_CMD, %ecx
--- a/xen/arch/x86/hvm/vmx/entry.S
+++ b/xen/arch/x86/hvm/vmx/entry.S
@@ -111,7 +111,7 @@ UNLIKELY_END(realmode)
BUILD_BUG_ON(SCF_verw & ~0xff)
movzbl VCPU_vmx_launched(%rbx), %ecx
shl $31, %ecx
- movzbl CPUINFO_spec_ctrl_flags(%rsp), %eax
+ movzbl CPUINFO_scf(%rsp), %eax
and $SCF_verw, %eax
or %eax, %ecx
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1375,7 +1375,7 @@ static int construct_vmcs(struct vcpu *v
rc = vmx_add_msr(v, MSR_FLUSH_CMD, FLUSH_CMD_L1D,
VMX_MSR_GUEST_LOADONLY);
- if ( !rc && (d->arch.spec_ctrl_flags & SCF_entry_ibpb) )
+ if ( !rc && (d->arch.scf & SCF_entry_ibpb) )
rc = vmx_add_msr(v, MSR_PRED_CMD, PRED_CMD_IBPB,
VMX_MSR_HOST);
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1992,7 +1992,7 @@ void __init noreturn __start_xen(unsigne
if ( bsp_delay_spec_ctrl )
{
- info->spec_ctrl_flags &= ~SCF_use_shadow;
+ info->scf &= ~SCF_use_shadow;
barrier();
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
info->last_spec_ctrl = default_xen_spec_ctrl;
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -67,7 +67,7 @@ static bool __initdata opt_lock_harden;
bool __initdata bsp_delay_spec_ctrl;
uint8_t __read_mostly default_xen_spec_ctrl;
-uint8_t __read_mostly default_spec_ctrl_flags;
+uint8_t __read_mostly default_scf;
paddr_t __read_mostly l1tf_addr_mask, __read_mostly l1tf_safe_maddr;
static bool __initdata cpu_has_bug_l1tf;
@@ -1052,7 +1052,7 @@ static void __init ibpb_calculations(voi
* NMI/#MC, so can't interrupt Xen ahead of having already flushed the
* BTB.
*/
- default_spec_ctrl_flags |= SCF_ist_ibpb;
+ default_scf |= SCF_ist_ibpb;
}
if ( opt_ibpb_entry_hvm )
setup_force_cpu_cap(X86_FEATURE_IBPB_ENTRY_HVM);
@@ -1553,7 +1553,7 @@ void spec_ctrl_init_domain(struct domain
bool ibpb = ((pv ? opt_ibpb_entry_pv : opt_ibpb_entry_hvm) &&
(d->domain_id != 0 || opt_ibpb_entry_dom0));
- d->arch.spec_ctrl_flags =
+ d->arch.scf =
(verw ? SCF_verw : 0) |
(ibpb ? SCF_entry_ibpb : 0) |
0;
@@ -1658,7 +1658,7 @@ void __init init_speculation_mitigations
{
if ( opt_msr_sc_pv )
{
- default_spec_ctrl_flags |= SCF_ist_sc_msr;
+ default_scf |= SCF_ist_sc_msr;
setup_force_cpu_cap(X86_FEATURE_SC_MSR_PV);
}
@@ -1669,7 +1669,7 @@ void __init init_speculation_mitigations
* Xen's value is not restored atomically. An early NMI hitting
* the VMExit path needs to restore Xen's value for safety.
*/
- default_spec_ctrl_flags |= SCF_ist_sc_msr;
+ default_scf |= SCF_ist_sc_msr;
setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM);
}
}
@@ -1756,7 +1756,7 @@ void __init init_speculation_mitigations
if ( opt_rsb_pv )
{
setup_force_cpu_cap(X86_FEATURE_SC_RSB_PV);
- default_spec_ctrl_flags |= SCF_ist_rsb;
+ default_scf |= SCF_ist_rsb;
}
/*
@@ -1779,7 +1779,7 @@ void __init init_speculation_mitigations
* possible rogue RSB speculation.
*/
if ( !cpu_has_svm )
- default_spec_ctrl_flags |= SCF_ist_rsb;
+ default_scf |= SCF_ist_rsb;
}
srso_calculations(hw_smt_enabled);
@@ -1792,7 +1792,7 @@ void __init init_speculation_mitigations
if ( opt_eager_fpu == -1 )
opt_eager_fpu = should_use_eager_fpu();
- /* (Re)init BSP state now that default_spec_ctrl_flags has been calculated. */
+ /* (Re)init BSP state now that default_scf has been calculated. */
init_shadow_spec_ctrl_state();
/*
@@ -2064,7 +2064,7 @@ void __init init_speculation_mitigations
{
info->shadow_spec_ctrl = 0;
barrier();
- info->spec_ctrl_flags |= SCF_use_shadow;
+ info->scf |= SCF_use_shadow;
barrier();
}
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -148,7 +148,7 @@ void __dummy__(void)
OFFSET(CPUINFO_shadow_spec_ctrl, struct cpu_info, shadow_spec_ctrl);
OFFSET(CPUINFO_xen_spec_ctrl, struct cpu_info, xen_spec_ctrl);
OFFSET(CPUINFO_last_spec_ctrl, struct cpu_info, last_spec_ctrl);
- OFFSET(CPUINFO_spec_ctrl_flags, struct cpu_info, spec_ctrl_flags);
+ OFFSET(CPUINFO_scf, struct cpu_info, scf);
OFFSET(CPUINFO_root_pgt_changed, struct cpu_info, root_pgt_changed);
OFFSET(CPUINFO_use_pv_cr3, struct cpu_info, use_pv_cr3);
DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -164,8 +164,8 @@ ENTRY(compat_restore_all_guest)
/* Account for ev/ec having already been popped off the stack. */
SPEC_CTRL_COND_VERW \
- scf=STK_REL(CPUINFO_spec_ctrl_flags, CPUINFO_rip), \
- sel=STK_REL(CPUINFO_verw_sel, CPUINFO_rip)
+ scf=STK_REL(CPUINFO_scf, CPUINFO_rip), \
+ sel=STK_REL(CPUINFO_verw_sel, CPUINFO_rip)
.Lft0: iretq
_ASM_PRE_EXTABLE(.Lft0, handle_exception)
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -609,7 +609,7 @@ UNLIKELY_END(exit_cr3)
/*
* When the CPU pushed this exception frame, it zero-extended eflags.
* For an IST exit, SPEC_CTRL_EXIT_TO_XEN stashed shadow copies of
- * spec_ctrl_flags and ver_sel above eflags, as we can't use any GPRs,
+ * scf and ver_sel above eflags, as we can't use any GPRs,
* and we're at a random place on the stack, not in a CPUFINFO block.
*
* Account for ev/ec having already been popped off the stack.
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -57,7 +57,7 @@ struct cpu_info {
unsigned int shadow_spec_ctrl;
uint8_t xen_spec_ctrl;
uint8_t last_spec_ctrl;
- uint8_t spec_ctrl_flags;
+ uint8_t scf; /* SCF_* */
/*
* The following field controls copying of the L4 page table of 64-bit
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -308,7 +308,7 @@ struct arch_domain
uint32_t pci_cf8;
uint8_t cmos_idx;
- uint8_t spec_ctrl_flags; /* See SCF_DOM_MASK */
+ uint8_t scf; /* See SCF_DOM_MASK */
union {
struct pv_domain pv;
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -21,10 +21,10 @@
#define __X86_SPEC_CTRL_H__
/*
- * Encoding of:
- * cpuinfo.spec_ctrl_flags
- * default_spec_ctrl_flags
- * domain.spec_ctrl_flags
+ * Encoding of Xen's speculation control flags in:
+ * cpuinfo.scf
+ * default_scf
+ * domain.scf
*
* Live settings are in the top-of-stack block, because they need to be
* accessable when XPTI is active. Some settings are fixed from boot, some
@@ -95,7 +95,7 @@ extern bool opt_branch_harden;
extern bool bsp_delay_spec_ctrl;
extern uint8_t default_xen_spec_ctrl;
-extern uint8_t default_spec_ctrl_flags;
+extern uint8_t default_scf;
extern int8_t opt_xpti_hwdom, opt_xpti_domu;
@@ -114,7 +114,7 @@ static inline void init_shadow_spec_ctrl
info->shadow_spec_ctrl = 0;
info->xen_spec_ctrl = default_xen_spec_ctrl;
- info->spec_ctrl_flags = default_spec_ctrl_flags;
+ info->scf = default_scf;
/*
* For least latency, the VERW selector should be a writeable data
@@ -138,7 +138,7 @@ static always_inline void spec_ctrl_ente
*/
info->shadow_spec_ctrl = val;
barrier();
- info->spec_ctrl_flags |= SCF_use_shadow;
+ info->scf |= SCF_use_shadow;
barrier();
alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
"a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
@@ -172,7 +172,7 @@ static always_inline void spec_ctrl_exit
* Disable shadowing before updating the MSR. There are no SMP issues
* here; only local processor ordering concerns.
*/
- info->spec_ctrl_flags &= ~SCF_use_shadow;
+ info->scf &= ~SCF_use_shadow;
barrier();
alternative_input("", "wrmsr", X86_FEATURE_SC_MSR_IDLE,
"a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
--- a/xen/include/asm-x86/spec_ctrl_asm.h
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -51,7 +51,7 @@
* shadowing logic.
*
* Factor 2 is harder. We maintain a shadow_spec_ctrl value, and a use_shadow
- * boolean in the per cpu spec_ctrl_flags. The synchronous use is:
+ * boolean in the per cpu scf. The synchronous use is:
*
* 1) Store guest value in shadow_spec_ctrl
* 2) Set the use_shadow boolean
@@ -98,11 +98,11 @@
* interrupting Xen.
*/
.if \maybexen
- testb $SCF_entry_ibpb, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ testb $SCF_entry_ibpb, STACK_CPUINFO_FIELD(scf)(%r14)
jz .L\@_skip
testb $3, UREGS_cs(%rsp)
.else
- testb $SCF_entry_ibpb, CPUINFO_spec_ctrl_flags(%rsp)
+ testb $SCF_entry_ibpb, CPUINFO_scf(%rsp)
.endif
jz .L\@_skip
@@ -172,8 +172,8 @@
#define STK_REL(field, top_of_stk) ((field) - (top_of_stk))
.macro SPEC_CTRL_COND_VERW \
- scf=STK_REL(CPUINFO_spec_ctrl_flags, CPUINFO_error_code), \
- sel=STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)
+ scf=STK_REL(CPUINFO_scf, CPUINFO_error_code), \
+ sel=STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)
/*
* Requires \scf and \sel as %rsp-relative expressions
* Clobbers eflags
@@ -228,10 +228,10 @@
testb $3, UREGS_cs(%rsp)
setnz %al
not %eax
- and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ and %al, STACK_CPUINFO_FIELD(scf)(%r14)
movzbl STACK_CPUINFO_FIELD(xen_spec_ctrl)(%r14), %eax
.else
- andb $~SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ andb $~SCF_use_shadow, CPUINFO_scf(%rsp)
movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
.endif
@@ -250,7 +250,7 @@
mov %eax, CPUINFO_shadow_spec_ctrl(%rsp)
/* Set SPEC_CTRL shadowing *before* loading the guest value. */
- orb $SCF_use_shadow, CPUINFO_spec_ctrl_flags(%rsp)
+ orb $SCF_use_shadow, CPUINFO_scf(%rsp)
mov $MSR_SPEC_CTRL, %ecx
xor %edx, %edx
@@ -328,7 +328,7 @@
* DO_SPEC_CTRL_ENTRY maybexen=1
* but with conditionals rather than alternatives.
*/
- movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
+ movzbl STACK_CPUINFO_FIELD(scf)(%r14), %ebx
test $SCF_ist_ibpb, %bl
jz .L\@_skip_ibpb
@@ -353,7 +353,7 @@
testb $3, UREGS_cs(%rsp)
setnz %al
not %eax
- and %al, STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14)
+ and %al, STACK_CPUINFO_FIELD(scf)(%r14)
/* Load Xen's intended value. */
mov $MSR_SPEC_CTRL, %ecx
@@ -387,7 +387,7 @@ UNLIKELY_DISPATCH_LABEL(\@_serialise):
* Requires %r12=ist_exit, %r14=stack_end, %rsp=regs
* Clobbers %rax, %rbx, %rcx, %rdx
*/
- movzbl STACK_CPUINFO_FIELD(spec_ctrl_flags)(%r14), %ebx
+ movzbl STACK_CPUINFO_FIELD(scf)(%r14), %ebx
testb $SCF_ist_sc_msr, %bl
jz .L\@_skip_sc_msr