File xsa297-2.patch of Package xen.11319
x86/spec-ctrl: Infrastructure to use VERW to flush pipeline buffers
Three synthetic features are introduced, as we need individual control of
each, depending on circumstances. A later change will enable them at
appropriate points.
The verw_sel field doesn't strictly need to live in struct cpu_info. It lives
there because there is a convenient hole it can fill, and it reduces the
complexity of the SPEC_CTRL_EXIT_TO_{PV,HVM} assembly by avoiding the need for
any temporary stack maintenance.
This is part of XSA-297, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -134,6 +134,7 @@ void __dummy__(void)
OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id);
+ OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3);
OFFSET(CPUINFO_pv_cr3, struct cpu_info, pv_cr3);
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -22,6 +22,7 @@ struct vcpu;
struct cpu_info {
struct cpu_user_regs guest_cpu_user_regs;
unsigned int processor_id;
+ unsigned short verw_sel;
/*
* use_pv_cr3 is set in case the value of pv_cr3 is to be written into
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -53,6 +53,13 @@ static inline void init_shadow_spec_ctrl
info->shadow_spec_ctrl = info->use_shadow_spec_ctrl = 0;
info->xen_spec_ctrl = default_xen_spec_ctrl;
info->xen_rsb = default_xen_rsb;
+
+ /*
+ * For least latency, the VERW selector should be a writeable data
+ * descriptor resident in the cache. __HYPERVISOR_DS32 shares a cache
+ * line with __HYPERVISOR_CS, so is expected to be very cache-hot.
+ */
+ info->verw_sel = __HYPERVISOR_DS32;
}
/* WARNING! `ret`, `call *`, `jmp *` not safe after this call. */
@@ -73,6 +80,21 @@ static always_inline void spec_ctrl_ente
wrmsr(MSR_SPEC_CTRL, 0, 0);
barrier();
}
+
+ /*
+ * Microarchitectural Store Buffer Data Sampling:
+ *
+ * On vulnerable systems, store buffer entries are statically partitioned
+ * between active threads. When entering idle, our store buffer entries
+ * are re-partitioned to allow the other threads to use them.
+ *
+ * Flush the buffers to ensure that no sensitive data of ours can be
+ * leaked by a sibling after it gets our store buffer entries.
+ *
+ * Note: VERW must be encoded with a memory operand, as it is only that
+ * form which causes a flush.
+ */
+ asm volatile ( "verw %[sel]" :: [sel] "m" (info->verw_sel) );
}
/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
@@ -91,6 +113,17 @@ static always_inline void spec_ctrl_exit
wrmsr(MSR_SPEC_CTRL, SPEC_CTRL_IBRS, 0);
barrier();
}
+
+ /*
+ * Microarchitectural Store Buffer Data Sampling:
+ *
+ * On vulnerable systems, store buffer entries are statically partitioned
+ * between active threads. When exiting idle, the other threads store
+ * buffer entries are re-partitioned to give us some.
+ *
+ * We now have store buffer entries with stale data from sibling threads.
+ * A flush if necessary will be performed on the return to guest path.
+ */
}
#endif /* !__X86_SPEC_CTRL_H__ */
--- a/xen/include/asm-x86/spec_ctrl_asm.h
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -238,6 +238,7 @@
mov $MSR_SPEC_CTRL, %ecx
wrmsr
.Lspec_exit_done\@:
+ verw CPUINFO_verw_sel(%rsp)
.endm
/* Use after a VMEXIT from an HVM guest. */