File 5aec744a-5-x86-xpti-no-global-pages.patch of Package xen.7652
From d543fa409358a9128d3629dcb28daae28c2d150f Mon Sep 17 00:00:00 2001
From: Juergen Gross <jgross@suse.com>
Date: Thu, 26 Apr 2018 13:33:14 +0200
Subject: [PATCH] xen/x86: disable global pages for domains with XPTI active
Instead of flushing the TLB from global pages when switching address
spaces with XPTI being active just disable global pages via %cr4
completely when a domain subject to XPTI is active. This avoids the
need for extra TLB flushes as loading %cr3 will remove all TLB
entries.
In order to avoid states with cr3/cr4 having inconsistent values
(e.g. global pages being activated while cr3 already specifies a XPTI
address space) move loading of the new cr4 value to write_ptbase()
(actually to switch_cr3_cr4() called by write_ptbase()).
This requires to use switch_cr3_cr4() instead of write_ptbase() when
building dom0 in order to avoid setting cr4 with cr4.smap set.
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- sle12sp2.orig/xen/arch/x86/domain.c 2018-05-23 11:48:07.000000000 +0200
+++ sle12sp2/xen/arch/x86/domain.c 2018-05-23 11:52:35.000000000 +0200
@@ -1964,7 +1964,6 @@ static void paravirt_ctxt_switch_from(st
static void paravirt_ctxt_switch_to(struct vcpu *v)
{
root_pgentry_t *root_pgt = this_cpu(root_pgt);
- unsigned long cr4;
switch_kernel_stack(v);
@@ -1973,10 +1972,6 @@ static void paravirt_ctxt_switch_to(stru
l4e_from_page(v->domain->arch.perdomain_l3_pg,
__PAGE_HYPERVISOR_RW);
- cr4 = pv_guest_cr4_to_real_cr4(v);
- if ( unlikely(cr4 != read_cr4()) )
- write_cr4(cr4);
-
if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
activate_debugregs(v);
--- sle12sp2.orig/xen/arch/x86/domain_build.c 2018-05-23 11:48:07.000000000 +0200
+++ sle12sp2/xen/arch/x86/domain_build.c 2018-05-23 11:52:35.000000000 +0200
@@ -1327,7 +1327,7 @@ int __init construct_dom0(
update_cr3(v);
/* We run on dom0's page tables for the final part of the build process. */
- write_ptbase(v);
+ switch_cr3_cr4(v->arch.cr3, read_cr4());
mapcache_override_current(v);
/* Copy the OS image and free temporary buffer. */
@@ -1347,7 +1347,7 @@ int __init construct_dom0(
(parms.virt_hypercall >= v_end) )
{
mapcache_override_current(NULL);
- write_ptbase(current);
+ switch_cr3_cr4(current->arch.cr3, read_cr4());
printk("Invalid HYPERCALL_PAGE field in ELF notes.\n");
rc = -1;
goto out;
@@ -1486,7 +1486,7 @@ int __init construct_dom0(
/* Return to idle domain's page tables. */
mapcache_override_current(NULL);
- write_ptbase(current);
+ switch_cr3_cr4(current->arch.cr3, read_cr4());
update_domain_wallclock_time(d);
--- sle12sp2.orig/xen/arch/x86/flushtlb.c 2018-05-23 11:49:31.000000000 +0200
+++ sle12sp2/xen/arch/x86/flushtlb.c 2018-05-23 11:52:35.000000000 +0200
@@ -91,20 +91,27 @@ static void do_tlb_flush(void)
post_flush(t);
}
-void switch_cr3(unsigned long cr3)
+void switch_cr3_cr4(unsigned long cr3, unsigned long cr4)
{
- unsigned long flags, cr4;
+ unsigned long flags, old_cr4;
u32 t;
/* This non-reentrant function is sometimes called in interrupt context. */
local_irq_save(flags);
t = pre_flush();
- cr4 = read_cr4();
- write_cr4(cr4 & ~X86_CR4_PGE);
+ old_cr4 = read_cr4();
+ if ( old_cr4 & X86_CR4_PGE )
+ {
+ old_cr4 = cr4 & ~X86_CR4_PGE;
+ write_cr4(old_cr4);
+ }
+
write_cr3(cr3);
- write_cr4(cr4);
+
+ if ( old_cr4 != cr4 )
+ write_cr4(cr4);
post_flush(t);
--- sle12sp2.orig/xen/arch/x86/mm.c 2018-05-23 11:48:07.000000000 +0200
+++ sle12sp2/xen/arch/x86/mm.c 2018-05-23 11:52:35.000000000 +0200
@@ -500,20 +500,28 @@ void make_cr3(struct vcpu *v, unsigned l
void write_ptbase(struct vcpu *v)
{
struct cpu_info *cpu_info = get_cpu_info();
+ unsigned long new_cr4;
+
+ new_cr4 = (is_pv_vcpu(v) && !is_idle_vcpu(v))
+ ? pv_guest_cr4_to_real_cr4(v)
+ : ((read_cr4() & ~X86_CR4_TSD) | X86_CR4_PGE);
if ( is_pv_vcpu(v) && v->domain->arch.pv_domain.xpti )
{
cpu_info->root_pgt_changed = 1;
cpu_info->pv_cr3 = __pa(this_cpu(root_pgt));
- switch_cr3(v->arch.cr3);
+ switch_cr3_cr4(v->arch.cr3, new_cr4);
}
else
{
- /* Make sure to clear xen_cr3 before pv_cr3; switch_cr3() serializes. */
+ /* Make sure to clear xen_cr3 before pv_cr3. */
cpu_info->xen_cr3 = 0;
- switch_cr3(v->arch.cr3);
+ /* switch_cr3_cr4() serializes. */
+ switch_cr3_cr4(v->arch.cr3, new_cr4);
cpu_info->pv_cr3 = 0;
}
+
+ ASSERT(is_pv_vcpu(v) || read_cr4() == mmu_cr4_features);
}
/*
--- sle12sp2.orig/xen/arch/x86/x86_64/entry.S 2018-05-23 11:45:26.000000000 +0200
+++ sle12sp2/xen/arch/x86/x86_64/entry.S 2018-05-23 11:52:35.000000000 +0200
@@ -70,13 +70,8 @@ restore_all_guest:
ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rdi
rep movsq
.Lrag_copy_done:
- mov STACK_CPUINFO_FIELD(cr4)(%rdx), %rdi
mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx)
- mov %rdi, %rsi
- and $~X86_CR4_PGE, %rdi
- mov %rdi, %cr4
mov %rax, %cr3
- mov %rsi, %cr4
.Lrag_keep_cr3:
/* Restore stashed SPEC_CTRL value. */
@@ -132,12 +127,7 @@ restore_all_xen:
* so "g" will have to do.
*/
UNLIKELY_START(g, exit_cr3)
- mov %cr4, %rdi
- mov %rdi, %rsi
- and $~X86_CR4_PGE, %rdi
- mov %rdi, %cr4
mov %rax, %cr3
- mov %rsi, %cr4
UNLIKELY_END(exit_cr3)
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
--- sle12sp2.orig/xen/common/efi/runtime.c 2018-05-23 11:45:31.000000000 +0200
+++ sle12sp2/xen/common/efi/runtime.c 2018-05-23 11:52:35.000000000 +0200
@@ -106,7 +106,7 @@ struct efi_rs_state efi_rs_enter(void)
asm volatile ( "lgdt %0" : : "m" (gdt_desc) );
}
- switch_cr3(virt_to_maddr(efi_l4_pgtable));
+ switch_cr3_cr4(virt_to_maddr(efi_l4_pgtable), read_cr4());
return state;
}
@@ -115,7 +115,7 @@ void efi_rs_leave(struct efi_rs_state *s
{
if ( !state->cr3 )
return;
- switch_cr3(state->cr3);
+ switch_cr3_cr4(state->cr3, read_cr4());
if ( is_pv_vcpu(current) && !is_idle_vcpu(current) )
{
struct desc_ptr gdt_desc = {
--- sle12sp2.orig/xen/include/asm-x86/domain.h 2018-05-23 11:48:07.000000000 +0200
+++ sle12sp2/xen/include/asm-x86/domain.h 2018-05-23 11:52:35.000000000 +0200
@@ -605,9 +605,10 @@ unsigned long pv_guest_cr4_fixup(const s
#define pv_guest_cr4_to_real_cr4(v) \
(((v)->arch.pv_vcpu.ctrlreg[4] \
| (mmu_cr4_features \
- & (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_SMEP | \
+ & (X86_CR4_PSE | X86_CR4_SMEP | \
X86_CR4_SMAP | X86_CR4_OSXSAVE | \
X86_CR4_FSGSBASE)) \
+ | ((v)->domain->arch.pv_domain.xpti ? 0 : X86_CR4_PGE) \
| ((v)->domain->arch.vtsc ? X86_CR4_TSD : 0)) \
& ~X86_CR4_DE)
#define real_cr4_to_pv_guest_cr4(c) \
--- sle12sp2.orig/xen/include/asm-x86/flushtlb.h 2018-05-23 11:48:07.000000000 +0200
+++ sle12sp2/xen/include/asm-x86/flushtlb.h 2018-05-23 11:52:35.000000000 +0200
@@ -84,7 +84,7 @@ static inline unsigned long read_cr3(voi
}
/* Write pagetable base and implicitly tick the tlbflush clock. */
-void switch_cr3(unsigned long cr3);
+void switch_cr3_cr4(unsigned long cr3, unsigned long cr4);
/* flush_* flag fields: */
/*