File 19161-pv-ldt-handling.patch of Package xen

# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1233760126 0
# Node ID 13a0272c8c024fca83bc991c7e2da992d07bc8eb
# Parent  271697e6d9b2b85ce348548f99166be450d1cf5a
x86: Clean up PV guest LDT handling.
 1. Do not touch deferred_ops in invalidate_shadow_ldt(), as we may
 not always be in a context where deferred_ops is valid.
 2. Protected the shadow LDT with a lock, now that mmu updates are not
 protected by the per-domain lock.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -319,6 +319,8 @@ int vcpu_initialise(struct vcpu *v)
     v->arch.perdomain_ptes =
         d->arch.mm_perdomain_pt + (v->vcpu_id << GDT_LDT_VCPU_SHIFT);
 
+    spin_lock_init(&v->arch.shadow_ldt_lock);
+
     return (is_pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0);
 }
 
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -460,14 +460,18 @@ void update_cr3(struct vcpu *v)
 }
 
 
-static void invalidate_shadow_ldt(struct vcpu *v)
+static void invalidate_shadow_ldt(struct vcpu *v, int flush)
 {
     int i;
     unsigned long pfn;
     struct page_info *page;
     
+    BUG_ON(unlikely(in_irq()));
+
+    spin_lock(&v->arch.shadow_ldt_lock);
+
     if ( v->arch.shadow_ldt_mapcnt == 0 )
-        return;
+        goto out;
 
     v->arch.shadow_ldt_mapcnt = 0;
 
@@ -482,11 +486,12 @@ static void invalidate_shadow_ldt(struct
         put_page_and_type(page);
     }
 
-    /* Dispose of the (now possibly invalid) mappings from the TLB.  */
-    if ( v == current )
-        queue_deferred_ops(v->domain, DOP_FLUSH_TLB | DOP_RELOAD_LDT);
-    else
-        flush_tlb_mask(v->domain->domain_dirty_cpumask);
+    /* Rid TLBs of stale mappings (guest mappings and shadow mappings). */
+    if ( flush )
+        flush_tlb_mask(v->vcpu_dirty_cpumask);
+
+ out:
+    spin_unlock(&v->arch.shadow_ldt_lock);
 }
 
 
@@ -537,8 +542,10 @@ int map_ldt_shadow_page(unsigned int off
 
     nl1e = l1e_from_pfn(mfn, l1e_get_flags(l1e) | _PAGE_RW);
 
+    spin_lock(&v->arch.shadow_ldt_lock);
     l1e_write(&v->arch.perdomain_ptes[off + 16], nl1e);
     v->arch.shadow_ldt_mapcnt++;
+    spin_unlock(&v->arch.shadow_ldt_lock);
 
     return 1;
 }
@@ -936,7 +943,7 @@ void put_page_from_l1e(l1_pgentry_t l1e,
              (d == e) )
         {
             for_each_vcpu ( d, v )
-                invalidate_shadow_ldt(v);
+                invalidate_shadow_ldt(v, 1);
         }
         put_page(page);
     }
@@ -2347,7 +2354,7 @@ int new_guest_cr3(unsigned long mfn)
             return 0;
         }
 
-        invalidate_shadow_ldt(v);
+        invalidate_shadow_ldt(v, 0);
         write_ptbase(v);
 
         return 1;
@@ -2362,7 +2369,7 @@ int new_guest_cr3(unsigned long mfn)
         return 0;
     }
 
-    invalidate_shadow_ldt(v);
+    invalidate_shadow_ldt(v, 0);
 
     old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
 
@@ -2399,6 +2406,10 @@ static void process_deferred_ops(void)
             flush_tlb_local();
     }
 
+    /*
+     * Do this after flushing TLBs, to ensure we see fresh LDT mappings
+     * via the linear pagetable mapping.
+     */
     if ( deferred_ops & DOP_RELOAD_LDT )
         (void)map_ldt_shadow_page(0);
 
@@ -2771,7 +2782,7 @@ int do_mmuext_op(
             else if ( (v->arch.guest_context.ldt_ents != ents) || 
                       (v->arch.guest_context.ldt_base != ptr) )
             {
-                invalidate_shadow_ldt(v);
+                invalidate_shadow_ldt(v, 0);
                 v->arch.guest_context.ldt_base = ptr;
                 v->arch.guest_context.ldt_ents = ents;
                 load_LDT(v);
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -353,6 +353,7 @@ struct arch_vcpu
 
     /* Current LDT details. */
     unsigned long shadow_ldt_mapcnt;
+    spinlock_t shadow_ldt_lock;
 
     struct paging_vcpu paging;
 
openSUSE Build Service is sponsored by