File 5aec744a-3-x86-xpti-per-domain-flag.patch of Package xen.8005
From 0d3e7f0b6bf01fbd6250fd3408a22fead1601bf0 Mon Sep 17 00:00:00 2001
From: Juergen Gross <jgross@suse.com>
Date: Thu, 26 Apr 2018 13:33:12 +0200
Subject: [PATCH] xen/x86: support per-domain flag for xpti
Instead of switching XPTI globally on or off add a per-domain flag for
that purpose. This allows to modify the xpti boot parameter to support
running dom0 without Meltdown mitigations. Using "xpti=no-dom0" as boot
parameter will achieve that.
Move the xpti boot parameter handling to xen/arch/x86/pv/domain.c as
it is pv-domain specific.
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -1230,14 +1230,24 @@ clustered mode. The default, given no h
mode.
### xpti
-> `= <boolean>`
+> `= List of [ default | <boolean> | dom0=<bool> | domu=<bool> ]`
-> Default: `false` on AMD hardware
+> Default: `false` on hardware not to be vulnerable to Meltdown (e.g. AMD)
> Default: `true` everywhere else
Override default selection of whether to isolate 64-bit PV guest page
tables.
+`true` activates page table isolation even on hardware not vulnerable by
+Meltdown for all domains.
+
+`false` deactivates page table isolation on all systems for all domains.
+
+`default` sets the default behaviour.
+
+With `dom0` and `domu` it is possible to control page table isolation
+for dom0 or guest domains only.
+
** WARNING: Not yet a complete isolation implementation, but better than
nothing. **
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -339,6 +339,15 @@ static void release_compat_l4(struct vcp
v->arch.guest_table_user = pagetable_null();
}
+static void set_domain_xpti(struct domain *d)
+{
+ if ( is_pv_32bit_domain(d) )
+ d->arch.pv_domain.xpti = 0;
+ else
+ d->arch.pv_domain.xpti = opt_xpti & (is_hardware_domain(d)
+ ? OPT_XPTI_DOM0 : OPT_XPTI_DOMU);
+}
+
static inline int may_switch_mode(struct domain *d)
{
return (!is_hvm_domain(d) && (d->tot_pages == 0));
@@ -365,6 +374,9 @@ int switch_native(struct domain *d)
d->arch.x87_fip_width = cpu_has_fpu_sel ? 0 : 8;
+ if ( is_pv_domain(d) )
+ set_domain_xpti(d);
+
return 0;
}
@@ -400,6 +412,9 @@ int switch_compat(struct domain *d)
d->arch.x87_fip_width = 4;
+ if ( is_pv_domain(d) )
+ set_domain_xpti(d);
+
return 0;
undo_and_fail:
@@ -600,10 +615,14 @@ int arch_domain_create(struct domain *d,
if ( (rc = hvm_domain_initialise(d)) != 0 )
goto fail;
}
- else
+ else if ( !is_idle_domain(d) )
+ {
/* 64-bit PV guest by default. */
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+ set_domain_xpti(d);
+ }
+
/* initialize default tsc behavior in case tools don't */
tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0);
spin_lock_init(&d->arch.vtsc_lock);
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -432,6 +432,7 @@ int __init construct_dom0(
if ( compat32 )
{
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
+ d->arch.pv_domain.xpti = 0;
v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0];
if ( setup_compat_arg_xlat(v) != 0 )
BUG();
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -498,8 +498,21 @@ void make_cr3(struct vcpu *v, unsigned l
void write_ptbase(struct vcpu *v)
{
- get_cpu_info()->root_pgt_changed = 1;
- switch_cr3(v->arch.cr3);
+ struct cpu_info *cpu_info = get_cpu_info();
+
+ if ( is_pv_vcpu(v) && v->domain->arch.pv_domain.xpti )
+ {
+ cpu_info->root_pgt_changed = 1;
+ cpu_info->pv_cr3 = __pa(this_cpu(root_pgt));
+ switch_cr3(v->arch.cr3);
+ }
+ else
+ {
+ /* Make sure to clear xen_cr3 before pv_cr3; switch_cr3() serializes. */
+ cpu_info->xen_cr3 = 0;
+ switch_cr3(v->arch.cr3);
+ cpu_info->pv_cr3 = 0;
+ }
}
/*
@@ -3822,7 +3835,7 @@ long do_mmu_update(
case PGT_l4_page_table:
rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
- if ( !rc && this_cpu(root_pgt) )
+ if ( !rc && pt_owner->arch.pv_domain.xpti )
{
bool_t local_in_use = 0;
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -351,7 +351,7 @@ void start_secondary(void *unused)
spin_debug_disable();
get_cpu_info()->xen_cr3 = 0;
- get_cpu_info()->pv_cr3 = this_cpu(root_pgt) ? __pa(this_cpu(root_pgt)) : 0;
+ get_cpu_info()->pv_cr3 = 0;
percpu_traps_init();
@@ -747,8 +747,6 @@ static int clone_mapping(const void *ptr
return 0;
}
-int8_t __read_mostly opt_xpti = -1;
-boolean_param("xpti", opt_xpti);
DEFINE_PER_CPU(root_pgentry_t *, root_pgt);
static int setup_cpu_root_pgt(unsigned int cpu)
@@ -954,24 +952,13 @@ void __init smp_prepare_cpus(unsigned in
stack_base[0] = stack_start;
- if ( opt_xpti < 0 )
- {
- uint64_t caps = 0;
-
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- caps = ARCH_CAPABILITIES_RDCL_NO;
- else if ( boot_cpu_has(X86_FEATURE_ARCH_CAPS) )
- rdmsrl(MSR_ARCH_CAPABILITIES, caps);
-
- opt_xpti = !(caps & ARCH_CAPABILITIES_RDCL_NO);
- }
rc = setup_cpu_root_pgt(0);
if ( rc )
panic("Error %d setting up PV root page table\n", rc);
if ( per_cpu(root_pgt, 0) )
{
- get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
+ get_cpu_info()->pv_cr3 = 0;
/*
* All entry points which may need to switch page tables have to start
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -192,9 +192,75 @@ static void __init print_details(enum in
opt_msr_sc_hvm ? " MSR_SPEC_CTRL" : "",
opt_rsb_hvm ? " RSB" : "");
- printk("XPTI: %s\n", opt_xpti ? "enabled" : "disabled");
+ printk(" XPTI (64-bit PV only): Dom0 %s, DomU %s\n",
+ opt_xpti & OPT_XPTI_DOM0 ? "enabled" : "disabled",
+ opt_xpti & OPT_XPTI_DOMU ? "enabled" : "disabled");
}
+#define OPT_XPTI_DEFAULT 0xff
+uint8_t __read_mostly opt_xpti = OPT_XPTI_DEFAULT;
+
+static __init void xpti_init_default(bool_t force)
+{
+ uint64_t caps = 0;
+
+ if ( !force && (opt_xpti != OPT_XPTI_DEFAULT) )
+ return;
+
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ caps = ARCH_CAPABILITIES_RDCL_NO;
+ else if ( boot_cpu_has(X86_FEATURE_ARCH_CAPS) )
+ rdmsrl(MSR_ARCH_CAPABILITIES, caps);
+
+ if ( caps & ARCH_CAPABILITIES_RDCL_NO )
+ opt_xpti = 0;
+ else
+ opt_xpti = OPT_XPTI_DOM0 | OPT_XPTI_DOMU;
+}
+
+static __init int parse_xpti(char *s)
+{
+ char *ss;
+ int val, rc = 0;
+
+ xpti_init_default(0);
+
+ do {
+ ss = strchr(s, ',');
+ if ( ss )
+ *ss = '\0';
+
+ switch ( parse_bool(s) )
+ {
+ case 0:
+ opt_xpti = 0;
+ break;
+
+ case 1:
+ opt_xpti = OPT_XPTI_DOM0 | OPT_XPTI_DOMU;
+ break;
+
+ default:
+ if ( !strcmp(s, "default") )
+ xpti_init_default(1);
+ else if ( (val = parse_boolean("dom0", s, ss)) >= 0 )
+ opt_xpti = (opt_xpti & ~OPT_XPTI_DOM0) |
+ (val ? OPT_XPTI_DOM0 : 0);
+ else if ( (val = parse_boolean("domu", s, ss)) >= 0 )
+ opt_xpti = (opt_xpti & ~OPT_XPTI_DOMU) |
+ (val ? OPT_XPTI_DOMU : 0);
+ else
+ rc = -EINVAL;
+ break;
+ }
+
+ s = ss + 1;
+ } while ( ss );
+
+ return rc;
+}
+custom_param("xpti", parse_xpti);
+
void __init init_speculation_mitigations(void)
{
enum ind_thunk thunk = THUNK_DEFAULT;
@@ -261,6 +327,8 @@ void __init init_speculation_mitigations
/* (Re)init BSP state now that default_xen_* have been calculated. */
init_shadow_spec_ctrl_state();
+ xpti_init_default(0);
+
print_details(thunk, caps);
/*
--- a/xen/include/asm-x86/current.h
+++ b/xen/include/asm-x86/current.h
@@ -27,7 +27,8 @@ struct cpu_info {
/*
* Of the two following fields the latter is being set to the CR3 value
* to be used on the given pCPU for loading whenever 64-bit PV guest
- * context is being entered. The value never changes once set.
+ * context is being entered. A value of zero indicates no setting of CR3
+ * is to be performed.
* The former is the value to restore when re-entering Xen, if any. IOW
* its value being zero means there's nothing to restore. However, its
* value can also be negative, indicating to the exit-to-Xen code that
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -251,6 +251,9 @@ struct pv_domain
* unmask the event channel */
bool_t auto_unmask;
+ /* XPTI active? */
+ bool_t xpti;
+
/* map_domain_page() mapping cache. */
struct mapcache_domain mapcache;
};
--- a/xen/include/asm-x86/flushtlb.h
+++ b/xen/include/asm-x86/flushtlb.h
@@ -136,7 +136,7 @@ void flush_area_mask(const cpumask_t *,
#define flush_root_pgtbl_domain(d) \
{ \
- if ( this_cpu(root_pgt) && is_pv_domain(d) && !is_pv_32bit_domain(d) ) \
+ if ( is_pv_domain(d) && (d)->arch.pv_domain.xpti ) \
flush_mask((d)->domain_dirty_cpumask, FLUSH_ROOT_PGTBL); \
}
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -25,7 +25,6 @@
void init_speculation_mitigations(void);
-extern int8_t opt_xpti;
extern bool_t opt_ibpb;
extern bool_t opt_ssbd;
extern bool_t opt_msr_sc_pv, opt_msr_sc_hvm;
@@ -34,6 +33,10 @@ extern bool_t bsp_delay_spec_ctrl;
extern int8_t default_xen_spec_ctrl;
extern uint8_t default_xen_rsb;
+extern uint8_t opt_xpti;
+#define OPT_XPTI_DOM0 0x01
+#define OPT_XPTI_DOMU 0x02
+
static inline void init_shadow_spec_ctrl_state(void)
{
struct cpu_info *info = get_cpu_info();