File 68221f25-x86-build-with-Return-Thunks.patch of Package xen
# Commit afcb4a06c740f7f71d2e9746c9d147c38a6e6c90
# Date 2025-05-12 17:17:38 +0100
# Author Jan Beulich <jbeulich@suse.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86/thunk: Build Xen with Return Thunks
The Indirect Target Selection speculative vulnerability means that indirect
branches (including RETs) are unsafe when in the first half of a cacheline.
In order to mitigate this, build with return thunks and arrange for
__x86_return_thunk to be (mis)aligned in the same manner as
__x86_indirect_thunk_* so the RET instruction is placed in a safe location.
place_ret() needs to conditionally emit JMP __x86_return_thunk instead of RET.
This is part of XSA-469 / CVE-2024-28956
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -38,9 +38,14 @@ config ARCH_DEFCONFIG
default "arch/x86/configs/x86_64_defconfig"
config CC_HAS_INDIRECT_THUNK
+ # GCC >= 8 or Clang >= 6
def_bool $(cc-option,-mindirect-branch-register) || \
$(cc-option,-mretpoline-external-thunk)
+config CC_HAS_RETURN_THUNK
+ # GCC >= 8 or Clang >= 15
+ def_bool $(cc-option,-mfunction-return=thunk-extern)
+
config HAS_AS_CET_SS
# binutils >= 2.29 or LLVM >= 6
def_bool $(as-instr,wrssq %rax$(comma)0;setssbsy)
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-y += msi.o
obj-y += msr.o
obj-$(CONFIG_INDIRECT_THUNK) += indirect-thunk.o
+obj-$(CONFIG_RETURN_THUNK) += indirect-thunk.o
obj-$(CONFIG_PV) += ioport_emulate.o
obj-y += irq.o
obj-$(CONFIG_KEXEC) += machine_kexec.o
--- a/xen/arch/x86/acpi/wakeup_prot.S
+++ b/xen/arch/x86/acpi/wakeup_prot.S
@@ -131,7 +131,7 @@ LABEL(s3_resume)
pop %r12
pop %rbx
pop %rbp
- ret
+ RET
END(do_suspend_lowlevel)
.data
--- a/xen/arch/x86/alternative.c
+++ b/xen/arch/x86/alternative.c
@@ -137,16 +137,45 @@ void init_or_livepatch add_nops(void *in
}
}
+void nocall __x86_return_thunk(void);
+
/*
* Place a return at @ptr. @ptr must be in the writable alias of a stub.
*
+ * When CONFIG_RETURN_THUNK is active, this may be a JMP __x86_return_thunk
+ * instead, depending on the safety of @ptr with respect to Indirect Target
+ * Selection.
+ *
* Returns the next position to write into the stub.
*/
void *place_ret(void *ptr)
{
+ unsigned long addr = (unsigned long)ptr;
uint8_t *p = ptr;
- *p++ = 0xc3;
+ /*
+ * When Return Thunks are used, if a RET would be unsafe at this location
+ * with respect to Indirect Target Selection (i.e. if addr is in the first
+ * half of a cacheline), insert a JMP __x86_return_thunk instead.
+ *
+ * The displacement needs to be relative to the executable alias of the
+ * stub, not to @ptr which is the writeable alias.
+ */
+ if ( IS_ENABLED(CONFIG_RETURN_THUNK) && !(addr & 0x20) )
+ {
+ long stub_va = (this_cpu(stubs.addr) & PAGE_MASK) + (addr & ~PAGE_MASK);
+ long disp = (long)__x86_return_thunk - (stub_va + 5);
+
+ BUG_ON((int32_t)disp != disp);
+
+ *p++ = 0xe9;
+ *(int32_t *)p = disp;
+ p += 4;
+ }
+ else
+ {
+ *p++ = 0xc3;
+ }
return p;
}
--- a/xen/arch/x86/arch.mk
+++ b/xen/arch/x86/arch.mk
@@ -44,6 +44,9 @@ CFLAGS-$(CONFIG_CC_IS_GCC) += -fno-jump-
CFLAGS-$(CONFIG_CC_IS_CLANG) += -mretpoline-external-thunk
endif
+# Compile with return thunk support if selected.
+CFLAGS-$(CONFIG_RETURN_THUNK) += -mfunction-return=thunk-extern
+
# Disable the addition of a .note.gnu.property section to object files when
# livepatch support is enabled. The contents of that section can change
# depending on the instructions used, and livepatch-build-tools doesn't know
--- a/xen/arch/x86/bhb-thunk.S
+++ b/xen/arch/x86/bhb-thunk.S
@@ -23,7 +23,7 @@ FUNC(clear_bhb_tsx)
0: .byte 0xc6, 0xf8, 0 /* xabort $0 */
int3
1:
- ret
+ RET
END(clear_bhb_tsx)
/*
--- a/xen/arch/x86/clear_page.S
+++ b/xen/arch/x86/clear_page.S
@@ -1,6 +1,8 @@
.file __FILE__
#include <xen/linkage.h>
+
+#include <asm/asm_defns.h>
#include <asm/page.h>
FUNC(clear_page_sse2)
@@ -16,5 +18,5 @@ FUNC(clear_page_sse2)
jnz 0b
sfence
- ret
+ RET
END(clear_page_sse2)
--- a/xen/arch/x86/copy_page.S
+++ b/xen/arch/x86/copy_page.S
@@ -1,6 +1,8 @@
.file __FILE__
#include <xen/linkage.h>
+
+#include <asm/asm_defns.h>
#include <asm/page.h>
#define src_reg %rsi
@@ -41,5 +43,5 @@ FUNC(copy_page_sse2)
movnti tmp4_reg, 3*WORD_SIZE(dst_reg)
sfence
- ret
+ RET
END(copy_page_sse2)
--- a/xen/arch/x86/efi/check.c
+++ b/xen/arch/x86/efi/check.c
@@ -3,6 +3,9 @@ int __attribute__((__ms_abi__)) test(int
return i;
}
+/* In case -mfunction-return is in use. */
+void __x86_return_thunk(void) {};
+
/*
* Populate an array with "addresses" of relocatable and absolute values.
* This is to probe ld for (a) emitting base relocations at all and (b) not
--- a/xen/arch/x86/include/asm/asm-defns.h
+++ b/xen/arch/x86/include/asm/asm-defns.h
@@ -58,6 +58,12 @@
.endif
.endm
+#ifdef CONFIG_RETURN_THUNK
+# define RET jmp __x86_return_thunk
+#else
+# define RET ret
+#endif
+
#ifdef CONFIG_XEN_IBT
# define ENDBR64 endbr64
#else
--- a/xen/arch/x86/indirect-thunk.S
+++ b/xen/arch/x86/indirect-thunk.S
@@ -15,6 +15,8 @@
#undef SYM_ALIGN
#define SYM_ALIGN(align...)
+#ifdef CONFIG_INDIRECT_THUNK
+
.macro IND_THUNK_RETPOLINE reg:req
call 1f
int3
@@ -62,3 +64,25 @@ END(__x86_indirect_thunk_\reg)
.irp reg, ax, cx, dx, bx, bp, si, di, 8, 9, 10, 11, 12, 13, 14, 15
GEN_INDIRECT_THUNK reg=r\reg
.endr
+
+#endif /* CONFIG_INDIRECT_THUNK */
+
+#ifdef CONFIG_RETURN_THUNK
+ .section .text.entry.__x86_return_thunk, "ax", @progbits
+
+ /*
+ * The Indirect Target Selection speculative vulnerability means that
+ * indirect branches (including RETs) are unsafe when in the first
+ * half of a cacheline. Arrange for them to be in the second half.
+ *
+ * Align to 64, then skip 32.
+ */
+ .balign 64
+ .fill 32, 1, 0xcc
+
+FUNC(__x86_return_thunk)
+ ret
+ int3 /* Halt straight-line speculation */
+END(__x86_return_thunk)
+
+#endif /* CONFIG_RETURN_THUNK */
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -131,7 +131,7 @@ static io_emul_stub_t *io_emul_stub_setu
BUILD_BUG_ON(STUB_BUF_SIZE / 2 <
(sizeof(prologue) + sizeof(epilogue) + 10 /* 2x call */ +
MAX(3 /* default stub */, IOEMUL_QUIRK_STUB_BYTES) +
- 1 /* ret */));
+ (IS_ENABLED(CONFIG_RETURN_THUNK) ? 5 : 1) /* ret */));
/* Runtime confirmation that we haven't clobbered an adjacent stub. */
BUG_ON(STUB_BUF_SIZE / 2 < (p - ctxt->io_emul_stub));
--- a/xen/arch/x86/pv/gpr_switch.S
+++ b/xen/arch/x86/pv/gpr_switch.S
@@ -26,7 +26,7 @@ FUNC(load_guest_gprs)
movq UREGS_r15(%rdi), %r15
movq UREGS_rcx(%rdi), %rcx
movq UREGS_rdi(%rdi), %rdi
- ret
+ RET
END(load_guest_gprs)
/* Save guest GPRs. Parameter on the stack above the return address. */
@@ -48,5 +48,5 @@ FUNC(save_guest_gprs)
movq %rbx, UREGS_rbx(%rdi)
movq %rdx, UREGS_rdx(%rdi)
movq %rcx, UREGS_rcx(%rdi)
- ret
+ RET
END(save_guest_gprs)
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -571,6 +571,9 @@ static void __init print_details(enum in
#ifdef CONFIG_INDIRECT_THUNK
" INDIRECT_THUNK"
#endif
+#ifdef CONFIG_RETURN_THUNK
+ " RETURN_THUNK"
+#endif
#ifdef CONFIG_SHADOW_PAGING
" SHADOW_PAGING"
#endif
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -180,7 +180,7 @@ FUNC(cr4_pv32_restore)
or cr4_pv32_mask(%rip), %rax
mov %rax, %cr4
mov %rax, (%rcx)
- ret
+ RET
0:
#ifndef NDEBUG
/* Check that _all_ of the bits intended to be set actually are. */
@@ -198,7 +198,7 @@ FUNC(cr4_pv32_restore)
1:
#endif
xor %eax, %eax
- ret
+ RET
END(cr4_pv32_restore)
FUNC(compat_syscall)
@@ -329,7 +329,7 @@ __UNLIKELY_END(compat_bounce_null_select
xor %eax, %eax
mov %ax, TRAPBOUNCE_cs(%rdx)
mov %al, TRAPBOUNCE_flags(%rdx)
- ret
+ RET
.section .fixup,"ax"
.Lfx13:
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -604,7 +604,7 @@ __UNLIKELY_END(create_bounce_frame_bad_b
xor %eax, %eax
mov %rax, TRAPBOUNCE_eip(%rdx)
mov %al, TRAPBOUNCE_flags(%rdx)
- ret
+ RET
.pushsection .fixup, "ax", @progbits
# Numeric tags below represent the intended overall %rsi adjustment.
--- a/xen/arch/x86/xen.lds.S
+++ b/xen/arch/x86/xen.lds.S
@@ -83,6 +83,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_stextentry = .;
*(.text.entry)
+ *(.text.entry.*)
. = ALIGN(PAGE_SIZE);
_etextentry = .;
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -136,6 +136,17 @@ config INDIRECT_THUNK
When enabled, indirect branches are implemented using a new construct
called "retpoline" that prevents speculation.
+config RETURN_THUNK
+ bool "Out-of-line Returns"
+ depends on CC_HAS_RETURN_THUNK
+ default INDIRECT_THUNK
+ help
+ Compile Xen with out-of-line returns.
+
+ This allows Xen to mitigate a variety of speculative vulnerabilities
+ by choosing a hardware-dependent instruction sequence to implement
+ function returns safely.
+
config SPECULATIVE_HARDEN_ARRAY
bool "Speculative Array Hardening"
default y
--- a/xen/lib/x86-generic-hweightl.c
+++ b/xen/lib/x86-generic-hweightl.c
@@ -51,7 +51,11 @@ asm (
"pop %rdx\n\t"
"pop %rdi\n\t"
+#ifdef CONFIG_RETURN_THUNK
+ "jmp __x86_return_thunk\n\t"
+#else
"ret\n\t"
+#endif
".size arch_generic_hweightl, . - arch_generic_hweightl\n\t"
);