File 5b21825d-1-x86-support-fully-eager-FPU-context-switching.patch of Package xen.8005
# Commit 146dfe9277c2b4a8c399b229e00d819065e3167b
# Date 2018-06-13 21:45:17 +0100
# Author Andrew Cooper <andrew.cooper3@citrix.com>
# Committer Andrew Cooper <andrew.cooper3@citrix.com>
x86: Support fully eager FPU context switching
This is controlled on a per-vcpu bases for flexibility.
This is part of XSA-267 / CVE-2018-3665
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -215,8 +215,25 @@ void vcpu_restore_fpu_eager(struct vcpu
{
ASSERT(!is_idle_vcpu(v));
+ if ( v->arch.fully_eager_fpu )
+ {
+ /* Avoid recursion */
+ clts();
+
+ if ( cpu_has_xsave )
+ fpu_xrstor(v, XSTATE_ALL);
+ else
+ fpu_fxrstor(v);
+
+ v->fpu_initialised = 1;
+ v->fpu_dirtied = 1;
+
+ /* Xen doesn't need TS set, but the guest might. */
+ if ( is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS) )
+ stts();
+ }
/* save the nonlazy extended state which is not tracked by CR0.TS bit */
- if ( v->arch.nonlazy_xstate_used )
+ else if ( v->arch.nonlazy_xstate_used )
{
/* Avoid recursion */
clts();
@@ -238,6 +255,8 @@ void vcpu_restore_fpu_lazy(struct vcpu *
if ( v->fpu_dirtied )
return;
+ ASSERT(!v->arch.fully_eager_fpu);
+
if ( cpu_has_xsave )
fpu_xrstor(v, XSTATE_LAZY);
else
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -505,6 +505,9 @@ struct arch_vcpu
* and thus should be saved/restored. */
bool_t nonlazy_xstate_used;
+ /* Restore all FPU state (lazy and non-lazy state) on context switch? */
+ bool_t fully_eager_fpu;
+
struct vmce vmce;
struct paging_vcpu paging;