File xsa398-4.patch of Package xen.26343
# Commit 62c91eb66a2904eefb1d1d9642e3697a1e3c3a3c
# Date 2022-03-08 16:38:02 +0000
# Author Rahul Singh <rahul.singh@arm.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
xen/arm: Add Spectre BHB handling
This commit is adding Spectre BHB handling to Xen on Arm.
The commit is introducing new alternative code to be executed during
exception entry:
- SMCC workaround 3 call
- loop workaround (with 8, 24 or 32 iterations)
- use of new clearbhb instruction
Cpuerrata is modified by this patch to apply the required workaround for
CPU affected by Spectre BHB when CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR is
enabled.
To do this the system previously used to apply smcc workaround 1 is
reused and new alternative code to be copied in the exception handler is
introduced.
To define the type of workaround required by a processor, 4 new cpu
capabilities are introduced (for each number of loop and for smcc
workaround 3).
When a processor is affected, enable_spectre_bhb_workaround is called
and if the processor does not have CSV2 set to 3 or ECBHB feature (which
would mean that the processor is doing what is required in hardware),
the proper code is enabled at exception entry.
In the case where workaround 3 is not supported by the firmware, we
enable workaround 1 when possible as it will also mitigate Spectre BHB
on systems without CSV2.
This is part of XSA-398 / CVE-2022-23960.
Signed-off-by: Bertrand Marquis <bertrand.marquis@arm.com>
Signed-off-by: Rahul Singh <rahul.singh@arm.com>
Acked-by: Julien Grall <julien@xen.org>
--- a/xen/arch/arm/arm64/bpi.S
+++ b/xen/arch/arm/arm64/bpi.S
@@ -58,16 +58,42 @@ ENTRY(__bp_harden_hyp_vecs_start)
.endr
ENTRY(__bp_harden_hyp_vecs_end)
-ENTRY(__smccc_workaround_1_smc_start)
+.macro mitigate_spectre_bhb_loop count
+ENTRY(__mitigate_spectre_bhb_loop_start_\count)
+ stp x0, x1, [sp, #-16]!
+ mov x0, \count
+.Lspectre_bhb_loop\@:
+ b . + 4
+ subs x0, x0, #1
+ b.ne .Lspectre_bhb_loop\@
+ sb
+ ldp x0, x1, [sp], #16
+ENTRY(__mitigate_spectre_bhb_loop_end_\count)
+.endm
+
+.macro smccc_workaround num smcc_id
+ENTRY(__smccc_workaround_smc_start_\num)
sub sp, sp, #(8 * 4)
stp x0, x1, [sp, #(8 * 2)]
stp x2, x3, [sp, #(8 * 0)]
- mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID
+ mov w0, \smcc_id
smc #0
ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4)
-ENTRY(__smccc_workaround_1_smc_end)
+ENTRY(__smccc_workaround_smc_end_\num)
+.endm
+
+ENTRY(__mitigate_spectre_bhb_clear_insn_start)
+ clearbhb
+ isb
+ENTRY(__mitigate_spectre_bhb_clear_insn_end)
+
+mitigate_spectre_bhb_loop 8
+mitigate_spectre_bhb_loop 24
+mitigate_spectre_bhb_loop 32
+smccc_workaround 1, #ARM_SMCCC_ARCH_WORKAROUND_1_FID
+smccc_workaround 3, #ARM_SMCCC_ARCH_WORKAROUND_3_FID
/*
* Local variables:
--- a/xen/arch/arm/cpuerrata.c
+++ b/xen/arch/arm/cpuerrata.c
@@ -144,7 +144,16 @@ install_bp_hardening_vec(const struct ar
return ret;
}
-extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[];
+extern char __smccc_workaround_smc_start_1[], __smccc_workaround_smc_end_1[];
+extern char __smccc_workaround_smc_start_3[], __smccc_workaround_smc_end_3[];
+extern char __mitigate_spectre_bhb_clear_insn_start[],
+ __mitigate_spectre_bhb_clear_insn_end[];
+extern char __mitigate_spectre_bhb_loop_start_8[],
+ __mitigate_spectre_bhb_loop_end_8[];
+extern char __mitigate_spectre_bhb_loop_start_24[],
+ __mitigate_spectre_bhb_loop_end_24[];
+extern char __mitigate_spectre_bhb_loop_start_32[],
+ __mitigate_spectre_bhb_loop_end_32[];
static int enable_smccc_arch_workaround_1(void *data)
{
@@ -176,8 +185,8 @@ static int enable_smccc_arch_workaround_
if ( (int)res.a0 < 0 )
goto warn;
- return !install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start,
- __smccc_workaround_1_smc_end,
+ return !install_bp_hardening_vec(entry,__smccc_workaround_smc_start_1,
+ __smccc_workaround_smc_end_1,
"call ARM_SMCCC_ARCH_WORKAROUND_1");
warn:
@@ -192,6 +201,93 @@ warn:
return 0;
}
+/*
+ * Spectre BHB Mitigation
+ *
+ * CPU is either:
+ * - Having CVS2.3 so it is not affected.
+ * - Having ECBHB and is clearing the branch history buffer when an exception
+ * to a different exception level is happening so no mitigation is needed.
+ * - Mitigating using a loop on exception entry (number of loop depending on
+ * the CPU).
+ * - Mitigating using the firmware.
+ */
+static int enable_spectre_bhb_workaround(void *data)
+{
+ const struct arm_cpu_capabilities *entry = data;
+
+ /*
+ * Enable callbacks are called on every CPU based on the capabilities, so
+ * double-check whether the CPU matches the entry.
+ */
+ if ( !entry->matches(entry) )
+ return 0;
+
+ if ( cpu_data[smp_processor_id()].pfr64.csv2 == 3 )
+ return 0;
+
+ if ( cpu_data[smp_processor_id()].mm64.ecbhb )
+ return 0;
+
+ if ( cpu_data[smp_processor_id()].isa64.clearbhb )
+ return !install_bp_hardening_vec(entry,
+ __mitigate_spectre_bhb_clear_insn_start,
+ __mitigate_spectre_bhb_clear_insn_end,
+ "use clearBHB instruction");
+
+ /* Apply solution depending on hwcaps set on arm_errata */
+ if ( cpus_have_cap(ARM_WORKAROUND_BHB_LOOP_8) )
+ return !install_bp_hardening_vec(entry,
+ __mitigate_spectre_bhb_loop_start_8,
+ __mitigate_spectre_bhb_loop_end_8,
+ "use 8 loops workaround");
+
+ if ( cpus_have_cap(ARM_WORKAROUND_BHB_LOOP_24) )
+ return !install_bp_hardening_vec(entry,
+ __mitigate_spectre_bhb_loop_start_24,
+ __mitigate_spectre_bhb_loop_end_24,
+ "use 24 loops workaround");
+
+ if ( cpus_have_cap(ARM_WORKAROUND_BHB_LOOP_32) )
+ return !install_bp_hardening_vec(entry,
+ __mitigate_spectre_bhb_loop_start_32,
+ __mitigate_spectre_bhb_loop_end_32,
+ "use 32 loops workaround");
+
+ if ( cpus_have_cap(ARM_WORKAROUND_BHB_SMCC_3) )
+ {
+ struct arm_smccc_res res;
+
+ if ( smccc_ver < SMCCC_VERSION(1, 1) )
+ goto warn;
+
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID,
+ ARM_SMCCC_ARCH_WORKAROUND_3_FID, &res);
+ /* The return value is in the lower 32-bits. */
+ if ( (int)res.a0 < 0 )
+ {
+ /*
+ * On processor affected with CSV2=0, workaround 1 will mitigate
+ * both Spectre v2 and BHB so use it when available
+ */
+ if ( enable_smccc_arch_workaround_1(data) )
+ return 1;
+
+ goto warn;
+ }
+
+ return !install_bp_hardening_vec(entry,__smccc_workaround_smc_start_3,
+ __smccc_workaround_smc_end_3,
+ "call ARM_SMCCC_ARCH_WORKAROUND_3");
+ }
+
+warn:
+ printk_once("**** No support for any spectre BHB workaround. ****\n"
+ "**** Please update your firmware. ****\n");
+
+ return 0;
+}
+
#endif /* CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR */
/* Hardening Branch predictor code for Arm32 */
@@ -451,19 +547,77 @@ static const struct arm_cpu_capabilities
},
{
.capability = ARM_HARDEN_BRANCH_PREDICTOR,
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_RANGE(MIDR_CORTEX_A72, 0, 1 << MIDR_VARIANT_SHIFT),
.enable = enable_smccc_arch_workaround_1,
},
{
- .capability = ARM_HARDEN_BRANCH_PREDICTOR,
+ .capability = ARM_WORKAROUND_BHB_SMCC_3,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
- .enable = enable_smccc_arch_workaround_1,
+ .enable = enable_spectre_bhb_workaround,
},
{
- .capability = ARM_HARDEN_BRANCH_PREDICTOR,
+ .capability = ARM_WORKAROUND_BHB_SMCC_3,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
- .enable = enable_smccc_arch_workaround_1,
+ .enable = enable_spectre_bhb_workaround,
+ },
+ /* spectre BHB */
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_8,
+ MIDR_RANGE(MIDR_CORTEX_A72, 1 << MIDR_VARIANT_SHIFT,
+ (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)),
+ .enable = enable_spectre_bhb_workaround,
+ },
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_24,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+ .enable = enable_spectre_bhb_workaround,
+ },
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_24,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+ .enable = enable_spectre_bhb_workaround,
+ },
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_32,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+ .enable = enable_spectre_bhb_workaround,
+ },
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_32,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+ .enable = enable_spectre_bhb_workaround,
},
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_32,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+ .enable = enable_spectre_bhb_workaround,
+ },
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_32,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+ .enable = enable_spectre_bhb_workaround,
+ },
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_32,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+ .enable = enable_spectre_bhb_workaround,
+ },
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_24,
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ .enable = enable_spectre_bhb_workaround,
+ },
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_32,
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ .enable = enable_spectre_bhb_workaround,
+ },
+ {
+ .capability = ARM_WORKAROUND_BHB_LOOP_32,
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ .enable = enable_spectre_bhb_workaround,
+ },
+
#endif
#ifdef CONFIG_ARM32_HARDEN_BRANCH_PREDICTOR
{
--- a/xen/include/asm-arm/arm64/macros.h
+++ b/xen/include/asm-arm/arm64/macros.h
@@ -21,5 +21,10 @@
ldr \dst, [\dst, \tmp]
.endm
+ /* clearbhb instruction clearing the branch history */
+ .macro clearbhb
+ hint #22
+ .endm
+
#endif /* __ASM_ARM_ARM64_MACROS_H */
--- a/xen/include/asm-arm/cpufeature.h
+++ b/xen/include/asm-arm/cpufeature.h
@@ -45,8 +45,12 @@
#define ARM_SSBD 7
#define ARM_SMCCC_1_1 8
#define ARM64_WORKAROUND_AT_SPECULATE 9
+#define ARM_WORKAROUND_BHB_LOOP_8 10
+#define ARM_WORKAROUND_BHB_LOOP_24 11
+#define ARM_WORKAROUND_BHB_LOOP_32 12
+#define ARM_WORKAROUND_BHB_SMCC_3 13
-#define ARM_NCAPS 10
+#define ARM_NCAPS 14
#ifndef __ASSEMBLY__
--- a/xen/include/asm-arm/smccc.h
+++ b/xen/include/asm-arm/smccc.h
@@ -334,6 +334,12 @@ void __arm_smccc_1_0_smc(register_t a0,
ARM_SMCCC_OWNER_ARCH, \
0x7FFF)
+#define ARM_SMCCC_ARCH_WORKAROUND_3_FID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_CONV_32, \
+ ARM_SMCCC_OWNER_ARCH, \
+ 0x3FFF)
+
/* SMCCC error codes */
#define ARM_SMCCC_NOT_REQUIRED (-2)
#define ARM_SMCCC_ERR_UNKNOWN_FUNCTION (-1)