File 5c75615a-viridian-fix-HvFlushVirtualAddress.patch of Package xen.11173
# Commit ce98ee3050a824994ce4957faa8f53ecb8c7da9d
# Date 2019-02-26 16:55:06 +0100
# Author Paul Durrant <paul.durrant@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
viridian: fix the HvFlushVirtualAddress/List hypercall implementation
The current code uses hvm_asid_flush_vcpu() but this is insufficient for
a guest running in shadow mode, which results in guest crashes early in
boot if the 'hcall_remote_tlb_flush' is enabled.
This patch, instead of open coding a new flush algorithm, adapts the one
already used by the HVMOP_flush_tlbs Xen hypercall. The implementation is
modified to allow TLB flushing a subset of a domain's vCPUs. A callback
function determines whether or not a vCPU requires flushing. This mechanism
was chosen because, while it is the case that the currently implemented
viridian hypercalls specify a vCPU mask, there are newer variants which
specify a sparse HV_VP_SET and thus use of a callback will avoid needing to
expose details of this outside of the viridian subsystem if and when those
newer variants are implemented.
NOTE: Use of the common flush function requires that the hypercalls are
restartable and so, with this patch applied, viridian_hypercall()
can now return HVM_HCALL_preempted. This is safe as no modification
to struct cpu_user_regs is done before the return.
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3993,45 +3993,67 @@ static void hvm_s3_resume(struct domain
}
}
-static int hvmop_flush_tlb_all(void)
+bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt)
{
+ static DEFINE_PER_CPU(cpumask_t, flush_cpumask);
+ cpumask_t *mask = &this_cpu(flush_cpumask);
struct domain *d = current->domain;
struct vcpu *v;
- if ( !is_hvm_domain(d) )
- return -EINVAL;
-
/* Avoid deadlock if more than one vcpu tries this at the same time. */
if ( !spin_trylock(&d->hypercall_deadlock_mutex) )
- return -ERESTART;
+ return false;
/* Pause all other vcpus. */
for_each_vcpu ( d, v )
- if ( v != current )
+ if ( v != current && flush_vcpu(ctxt, v) )
vcpu_pause_nosync(v);
/* Now that all VCPUs are signalled to deschedule, we wait... */
for_each_vcpu ( d, v )
- if ( v != current )
+ if ( v != current && flush_vcpu(ctxt, v) )
while ( !vcpu_runnable(v) && v->is_running )
cpu_relax();
/* All other vcpus are paused, safe to unlock now. */
spin_unlock(&d->hypercall_deadlock_mutex);
+ cpumask_clear(mask);
+
/* Flush paging-mode soft state (e.g., va->gfn cache; PAE PDPE cache). */
for_each_vcpu ( d, v )
+ {
+ if ( !flush_vcpu(ctxt, v) )
+ continue;
+
paging_update_cr3(v);
- /* Flush all dirty TLBs. */
- flush_tlb_mask(d->domain_dirty_cpumask);
+ cpumask_or(mask, mask, v->vcpu_dirty_cpumask);
+ }
+
+ /* Flush TLBs on all CPUs with dirty vcpu state. */
+ flush_tlb_mask(mask);
/* Done. */
for_each_vcpu ( d, v )
- if ( v != current )
+ if ( v != current && flush_vcpu(ctxt, v) )
vcpu_unpause(v);
- return 0;
+ return true;
+}
+
+static bool always_flush(void *ctxt, struct vcpu *v)
+{
+ return true;
+}
+
+static int hvmop_flush_tlb_all(void)
+{
+ if ( !is_hvm_domain(current->domain) )
+ return -EINVAL;
+
+ return hvm_flush_vcpu_tlb(always_flush, NULL) ? 0 : -ERESTART;
}
static int hvmop_set_evtchn_upcall_vector(
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -829,7 +829,16 @@ void viridian_domain_deinit(struct domai
teardown_vp_assist(v);
}
-static DEFINE_PER_CPU(cpumask_t, ipi_cpumask);
+/*
+ * Windows should not issue the hypercalls requiring this callback in the
+ * case where vcpu_id would exceed the size of the mask.
+ */
+static bool need_flush(void *ctxt, struct vcpu *v)
+{
+ uint64_t vcpu_mask = *(uint64_t *)ctxt;
+
+ return vcpu_mask & (1ul << v->vcpu_id);
+}
int viridian_hypercall(struct cpu_user_regs *regs)
{
@@ -894,8 +903,6 @@ int viridian_hypercall(struct cpu_user_r
case HvFlushVirtualAddressSpace:
case HvFlushVirtualAddressList:
{
- cpumask_t *pcpu_mask;
- struct vcpu *v;
struct {
uint64_t address_space;
uint64_t flags;
@@ -925,36 +932,12 @@ int viridian_hypercall(struct cpu_user_r
if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS )
input_params.vcpu_mask = ~0ul;
- pcpu_mask = &this_cpu(ipi_cpumask);
- cpumask_clear(pcpu_mask);
-
- /*
- * For each specified virtual CPU flush all ASIDs to invalidate
- * TLB entries the next time it is scheduled and then, if it
- * is currently running, add its physical CPU to a mask of
- * those which need to be interrupted to force a flush.
- */
- for_each_vcpu ( currd, v )
- {
- if ( v->vcpu_id >= (sizeof(input_params.vcpu_mask) * 8) )
- break;
-
- if ( !(input_params.vcpu_mask & (1ul << v->vcpu_id)) )
- continue;
-
- hvm_asid_flush_vcpu(v);
- if ( v != curr && v->is_running )
- __cpumask_set_cpu(v->processor, pcpu_mask);
- }
-
/*
- * Since ASIDs have now been flushed it just remains to
- * force any CPUs currently running target vCPUs out of non-
- * root mode. It's possible that re-scheduling has taken place
- * so we may unnecessarily IPI some CPUs.
+ * A false return means that another vcpu is currently trying
+ * a similar operation, so back off.
*/
- if ( !cpumask_empty(pcpu_mask) )
- smp_send_event_check_mask(pcpu_mask);
+ if ( !hvm_flush_vcpu_tlb(need_flush, &input_params.vcpu_mask) )
+ return HVM_HCALL_preempted;
output.rep_complete = input.rep_count;
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -615,6 +615,9 @@ const char *hvm_efer_valid(const struct
signed int cr0_pg);
unsigned long hvm_cr4_guest_valid_bits(const struct vcpu *v, bool restore);
+bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
+ void *ctxt);
+
/*
* This must be defined as a macro instead of an inline function,
* because it uses 'struct vcpu' and 'struct domain' which have