File xsa469-06.patch of Package xen.39862
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/thunk: Build Xen with Return Thunks
The Indirect Target Selection speculative vulnerability means that indirect
branches (including RETs) are unsafe when in the first half of a cacheline.
In order to mitigate this, build with return thunks and arrange for
__x86_return_thunk to be (mis)aligned in the same manner as
__x86_indirect_thunk_* so the RET instruction is placed in a safe location.
place_ret() needs to conditionally emit JMP __x86_return_thunk instead of RET.
This is part of XSA-469 / CVE-2024-28956
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-y += msi.o
obj-y += msr.o
obj-$(CONFIG_INDIRECT_THUNK) += indirect-thunk.o
+obj-$(CONFIG_RETURN_THUNK) += indirect-thunk.o
obj-y += ioport_emulate.o
obj-y += irq.o
obj-$(CONFIG_KEXEC) += machine_kexec.o
--- a/xen/arch/x86/Rules.mk
+++ b/xen/arch/x86/Rules.mk
@@ -54,6 +54,13 @@ CFLAGS += -fno-jump-tables
export CONFIG_INDIRECT_THUNK=y
endif
+# Compile with return thunk support if available.
+ifneq ($(call cc-option,$(CC),-mfunction-return=thunk-extern,n),n)
+CFLAGS += -mfunction-return=thunk-extern
+CFLAGS += -DCONFIG_RETURN_THUNK
+export CONFIG_RETURN_THUNK=y
+endif
+
# If supported by the compiler, reduce stack alignment to 8 bytes. But allow
# this to be overridden elsewhere.
$(call cc-option-add,CFLAGS-stack-boundary,CC,-mpreferred-stack-boundary=3)
--- a/xen/arch/x86/acpi/wakeup_prot.S
+++ b/xen/arch/x86/acpi/wakeup_prot.S
@@ -98,7 +98,7 @@ ENTRY(__ret_point)
LOAD_GREG(13)
LOAD_GREG(14)
LOAD_GREG(15)
- ret
+ RET
.data
.align 16
--- a/xen/arch/x86/alternative.c
+++ b/xen/arch/x86/alternative.c
@@ -147,16 +147,45 @@ void init_or_livepatch add_nops(void *in
}
}
+void __x86_return_thunk(void);
+
/*
* Place a return at @ptr. @ptr must be in the writable alias of a stub.
*
+ * When CONFIG_RETURN_THUNK is active, this may be a JMP __x86_return_thunk
+ * instead, depending on the safety of @ptr with respect to Indirect Target
+ * Selection.
+ *
* Returns the next position to write into the stub.
*/
void *place_ret(void *ptr)
{
+ unsigned long addr = (unsigned long)ptr;
uint8_t *p = ptr;
- *p++ = 0xc3;
+ /*
+ * When Return Thunks are used, if a RET would be unsafe at this location
+ * with respect to Indirect Target Selection (i.e. if addr is in the first
+ * half of a cacheline), insert a JMP __x86_return_thunk instead.
+ *
+ * The displacement needs to be relative to the executable alias of the
+ * stub, not to @ptr which is the writeable alias.
+ */
+ if ( IS_ENABLED(CONFIG_RETURN_THUNK) && !(addr & 0x20) )
+ {
+ long stub_va = (this_cpu(stubs.addr) & PAGE_MASK) + (addr & ~PAGE_MASK);
+ long disp = (long)__x86_return_thunk - (stub_va + 5);
+
+ BUG_ON((int32_t)disp != disp);
+
+ *p++ = 0xe9;
+ *(int32_t *)p = disp;
+ p += 4;
+ }
+ else
+ {
+ *p++ = 0xc3;
+ }
return p;
}
--- a/xen/arch/x86/bhb-thunk.S
+++ b/xen/arch/x86/bhb-thunk.S
@@ -23,7 +23,7 @@ ENTRY(clear_bhb_tsx)
0: .byte 0xc6, 0xf8, 0 /* xabort $0 */
int3
1:
- ret
+ RET
.size clear_bhb_tsx, . - clear_bhb_tsx
.type clear_bhb_tsx, @function
--- a/xen/arch/x86/clear_page.S
+++ b/xen/arch/x86/clear_page.S
@@ -1,5 +1,6 @@
.file __FILE__
+#include <asm/asm_defns.h>
#include <asm/page.h>
ENTRY(clear_page_sse2)
@@ -15,4 +16,4 @@ ENTRY(clear_page_sse2)
jnz 0b
sfence
- ret
+ RET
--- a/xen/arch/x86/copy_page.S
+++ b/xen/arch/x86/copy_page.S
@@ -1,5 +1,6 @@
.file __FILE__
+#include <asm/asm_defns.h>
#include <asm/page.h>
#define src_reg %rsi
@@ -40,4 +41,4 @@ ENTRY(copy_page_sse2)
movnti tmp4_reg, 3*WORD_SIZE(dst_reg)
sfence
- ret
+ RET
--- a/xen/arch/x86/efi/check.c
+++ b/xen/arch/x86/efi/check.c
@@ -2,3 +2,6 @@ int __attribute__((__ms_abi__)) test(int
{
return i;
}
+
+/* In case -mfunction-return is in use. */
+void __x86_return_thunk(void) {}
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -341,6 +341,12 @@ static always_inline void stac(void)
subq $-(UREGS_error_code-UREGS_r15+\adj), %rsp
.endm
+#ifdef CONFIG_RETURN_THUNK
+# define RET jmp __x86_return_thunk
+#else
+# define RET ret
+#endif
+
#ifdef CONFIG_PV
#define CR4_PV32_RESTORE \
ALTERNATIVE_2 "", \
--- a/xen/arch/x86/indirect-thunk.S
+++ b/xen/arch/x86/indirect-thunk.S
@@ -11,6 +11,8 @@
#include <asm/asm_defns.h>
+#ifdef CONFIG_INDIRECT_THUNK
+
.macro IND_THUNK_RETPOLINE reg:req
call 2f
1:
@@ -62,3 +64,27 @@ ENTRY(__x86_indirect_thunk_\reg)
.irp reg, ax, cx, dx, bx, bp, si, di, 8, 9, 10, 11, 12, 13, 14, 15
GEN_INDIRECT_THUNK reg=r\reg
.endr
+
+#endif /* CONFIG_INDIRECT_THUNK */
+
+#ifdef CONFIG_RETURN_THUNK
+ .section .text.entry.__x86_return_thunk, "ax", @progbits
+
+ /*
+ * The Indirect Target Selection speculative vulnerability means that
+ * indirect branches (including RETs) are unsafe when in the first
+ * half of a cacheline. Arrange for them to be in the second half.
+ *
+ * Align to 64, then skip 32.
+ */
+ .balign 64
+ .fill 32, 1, 0xcc
+
+ENTRY(__x86_return_thunk)
+ ret
+ int3 /* Halt straight-line speculation */
+
+ .size __x86_return_thunk, . - __x86_return_thunk
+ .type __x86_return_thunk, @function
+
+#endif /* CONFIG_RETURN_THUNK */
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -101,7 +101,8 @@ static io_emul_stub_t *io_emul_stub_setu
BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX(8, /* Default emul stub */
5 + IOEMUL_QUIRK_STUB_BYTES) +
- 1 /* ret */);
+ (IS_ENABLED(CONFIG_RETURN_THUNK)
+ ? 5 : 1) /* ret */);
block_speculation(); /* SCSB */
--- a/xen/arch/x86/pv/gpr_switch.S
+++ b/xen/arch/x86/pv/gpr_switch.S
@@ -37,7 +37,7 @@ ENTRY(host_to_guest_gpr_switch)
movq %rcx,8(%rsp)
movq UREGS_rcx(%rdi), %rcx
movq UREGS_rdi(%rdi), %rdi
- ret
+ RET
ENTRY(guest_to_host_gpr_switch)
pushq %rdi
@@ -64,4 +64,4 @@ ENTRY(guest_to_host_gpr_switch)
popq %rbx
movq %rcx, UREGS_rcx(%rdi)
popq %rcx
- ret
+ RET
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -549,6 +549,9 @@ static void __init print_details(enum in
#ifdef CONFIG_INDIRECT_THUNK
" INDIRECT_THUNK"
#endif
+#ifdef CONFIG_RETURN_THUNK
+ " RETURN_THUNK"
+#endif
#ifdef CONFIG_SHADOW_PAGING
" SHADOW_PAGING"
#endif
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -182,7 +182,7 @@ ENTRY(cr4_pv32_restore)
mov %rax, %cr4
mov %rax, (%rdx)
pop %rdx
- ret
+ RET
0:
#ifndef NDEBUG
/* Check that _all_ of the bits intended to be set actually are. */
@@ -201,7 +201,7 @@ ENTRY(cr4_pv32_restore)
#endif
pop %rdx
xor %eax, %eax
- ret
+ RET
.section .text.entry, "ax", @progbits
@@ -365,7 +365,7 @@ __UNLIKELY_END(compat_bounce_null_select
xor %eax, %eax
mov %ax, TRAPBOUNCE_cs(%rdx)
mov %al, TRAPBOUNCE_flags(%rdx)
- ret
+ RET
.section .fixup,"ax"
.Lfx13:
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -505,7 +505,7 @@ __UNLIKELY_END(create_bounce_frame_bad_b
xor %eax, %eax
mov %rax, TRAPBOUNCE_eip(%rdx)
mov %al, TRAPBOUNCE_flags(%rdx)
- ret
+ RET
.pushsection .fixup, "ax", @progbits
# Numeric tags below represent the intended overall %rsi adjustment.
--- a/xen/arch/x86/xen.lds.S
+++ b/xen/arch/x86/xen.lds.S
@@ -74,6 +74,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_stextentry = .;
*(.text.entry)
+ *(.text.entry.*)
. = ALIGN(PAGE_SIZE);
_etextentry = .;