File xenpaging.p2m_is_paged.patch of Package xen

# HG changeset patch
# Parent 4a0a6a1cd56a8f3d242f323fb5161c2d1f52dccb
xenpaging: add need_populate and paged_no_mfn checks

There is currently a mix of p2mt checks for the various paging types.
Some mean the p2mt needs to be populated, others mean a gfn without mfn.

Add a new p2m_do_populate() helper which covers the p2m_ram_paged and
p2m_ram_paging_out types. If a gfn is not in these states anymore another
populate request for the pager is not needed. This avoids a call to
p2m_mem_paging_populate() which in turn reduces the pressure on the ring
buffer because no temporary slot needs to be claimed. As such, this helper is
an optimization.

Modify the existing p2m_is_paged() helper which now covers also
p2m_ram_paging_in_start in addition to the current p2m_ram_paged type.  A gfn
in these two states is not backed by a mfn.

Signed-off-by: Olaf Hering <olaf@aepfle.de>

---
 xen/arch/x86/hvm/emulate.c       |    3 +
 xen/arch/x86/hvm/hvm.c           |   17 ++++++----
 xen/arch/x86/mm.c                |   63 ++++++++++++---------------------------
 xen/arch/x86/mm/guest_walk.c     |    3 +
 xen/arch/x86/mm/hap/guest_walk.c |    6 ++-
 xen/arch/x86/mm/hap/p2m-ept.c    |    3 -
 xen/arch/x86/mm/p2m.c            |    4 +-
 xen/common/grant_table.c         |    3 +
 xen/include/asm-x86/p2m.h        |    9 ++++-
 9 files changed, 51 insertions(+), 60 deletions(-)

Index: xen-4.1.2-testing/xen/arch/x86/hvm/emulate.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/emulate.c
+++ xen-4.1.2-testing/xen/arch/x86/hvm/emulate.c
@@ -66,7 +66,8 @@ static int hvmemul_do_io(
     ram_mfn = gfn_to_mfn_unshare(p2m, ram_gfn, &p2mt, 0);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, ram_gfn);
+        if ( p2m_do_populate(p2mt) )
+            p2m_mem_paging_populate(p2m, ram_gfn);
         return X86EMUL_RETRY;
     }
     if ( p2m_is_shared(p2mt) )
Index: xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/hvm.c
+++ xen-4.1.2-testing/xen/arch/x86/hvm/hvm.c
@@ -385,7 +385,8 @@ static int hvm_set_ioreq_page(
         return -EINVAL;
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gmfn);
+        if ( p2m_do_populate(p2mt) )
+            p2m_mem_paging_populate(p2m, gmfn);
         return -ENOENT;
     }
     if ( p2m_is_shared(p2mt) )
@@ -1199,7 +1200,7 @@ bool_t hvm_hap_nested_page_fault(unsigne
 
 #ifdef __x86_64__
     /* Check if the page has been paged out */
-    if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
+    if ( p2m_do_populate(p2mt) )
         p2m_mem_paging_populate(p2m, gfn);
 
     /* Mem sharing: unshare the page and try again */
@@ -1687,7 +1688,8 @@ static void *__hvm_map_guest_frame(unsig
         return NULL;
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gfn);
+        if ( p2m_do_populate(p2mt) )
+            p2m_mem_paging_populate(p2m, gfn);
         return NULL;
     }
 
@@ -2145,7 +2147,8 @@ static enum hvm_copy_result __hvm_copy(
 
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn);
+            if ( p2m_do_populate(p2mt) )
+                p2m_mem_paging_populate(p2m, gfn);
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
@@ -2233,7 +2236,8 @@ static enum hvm_copy_result __hvm_clear(
 
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn);
+            if ( p2m_do_populate(p2mt) )
+                p2m_mem_paging_populate(p2m, gfn);
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
@@ -3619,7 +3623,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
             mfn_t mfn = gfn_to_mfn(p2m, pfn, &t);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(p2m, pfn);
+                if ( p2m_do_populate(t) )
+                    p2m_mem_paging_populate(p2m, pfn);
 
                 rc = -EINVAL;
                 goto param_fail3;
@@ -3716,7 +3721,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
             mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0);
             if ( p2m_is_paging(t) )
             {
-                p2m_mem_paging_populate(p2m, pfn);
+                if ( p2m_do_populate(t) )
+                    p2m_mem_paging_populate(p2m, pfn);
 
                 rc = -EINVAL;
                 goto param_fail4;
Index: xen-4.1.2-testing/xen/arch/x86/mm.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/mm.c
+++ xen-4.1.2-testing/xen/arch/x86/mm.c
@@ -3465,9 +3465,10 @@ int do_mmu_update(
             if ( !p2m_is_valid(p2mt) )
               mfn = INVALID_MFN;
 
-            if ( p2m_is_paged(p2mt) )
+            if ( p2m_is_paging(p2mt) )
             {
-                p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), gmfn);
+                if ( p2m_do_populate(p2mt) )
+                    p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner), gmfn);
 
                 rc = -ENOENT;
                 break;
@@ -3492,24 +3493,18 @@ int do_mmu_update(
                 {
                     l1_pgentry_t l1e = l1e_from_intpte(req.val);
                     p2m_type_t l1e_p2mt;
-		    unsigned long l1emfn = mfn_x(
                     gfn_to_mfn(p2m_get_hostp2m(pg_owner),
-                        l1e_get_pfn(l1e), &l1e_p2mt));
+                        l1e_get_pfn(l1e), &l1e_p2mt);
 
-                    if ( p2m_is_paged(l1e_p2mt) )
+#ifdef __x86_64__
+                    if ( p2m_is_paging(l1e_p2mt) )
                     {
-                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
+                        if ( p2m_do_populate(l1e_p2mt) )
+                            p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
                             l1e_get_pfn(l1e));
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l1e_p2mt && 
-                                !mfn_valid(l1emfn) )
-                    {
-                        rc = -ENOENT;
-                        break;
-                    }
-#ifdef __x86_64__
                     /* XXX: Ugly: pull all the checks into a separate function. 
                      * Don't want to do it now, not to interfere with mem_paging
                      * patches */
@@ -3536,22 +3531,16 @@ int do_mmu_update(
                 {
                     l2_pgentry_t l2e = l2e_from_intpte(req.val);
                     p2m_type_t l2e_p2mt;
-		    unsigned long l2emfn = mfn_x(
-                    gfn_to_mfn(p2m_get_hostp2m(pg_owner), l2e_get_pfn(l2e), &l2e_p2mt));
+                    gfn_to_mfn(p2m_get_hostp2m(pg_owner), l2e_get_pfn(l2e), &l2e_p2mt);
 
-                    if ( p2m_is_paged(l2e_p2mt) )
+                    if ( p2m_is_paging(l2e_p2mt) )
                     {
-                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
+                        if ( p2m_do_populate(l2e_p2mt) )
+                            p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
                             l2e_get_pfn(l2e));
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l2e_p2mt && 
-                                !mfn_valid(l2emfn) )
-                    {
-                        rc = -ENOENT;
-                        break;
-                    }
                     else if ( p2m_ram_shared == l2e_p2mt )
                     {
                         MEM_LOG("Unexpected attempt to map shared page.\n");
@@ -3567,22 +3556,16 @@ int do_mmu_update(
                 {
                     l3_pgentry_t l3e = l3e_from_intpte(req.val);
                     p2m_type_t l3e_p2mt;
-		    unsigned long l3emfn = mfn_x(
-                    gfn_to_mfn(p2m_get_hostp2m(pg_owner), l3e_get_pfn(l3e), &l3e_p2mt));
+                    gfn_to_mfn(p2m_get_hostp2m(pg_owner), l3e_get_pfn(l3e), &l3e_p2mt);
 
-                    if ( p2m_is_paged(l3e_p2mt) )
+                    if ( p2m_is_paging(l3e_p2mt) )
                     {
-                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
+                        if ( p2m_do_populate(l3e_p2mt) )
+                            p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
                             l3e_get_pfn(l3e));
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l3e_p2mt && 
-                                !mfn_valid(l3emfn) )
-                    {
-                        rc = -ENOENT;
-                        break;
-                    }
                     else if ( p2m_ram_shared == l3e_p2mt )
                     {
                         MEM_LOG("Unexpected attempt to map shared page.\n");
@@ -3598,23 +3581,17 @@ int do_mmu_update(
                 {
                     l4_pgentry_t l4e = l4e_from_intpte(req.val);
                     p2m_type_t l4e_p2mt;
-		    unsigned long l4emfn = mfn_x(
                     gfn_to_mfn(p2m_get_hostp2m(pg_owner),
-                        l4e_get_pfn(l4e), &l4e_p2mt));
+                        l4e_get_pfn(l4e), &l4e_p2mt);
 
-                    if ( p2m_is_paged(l4e_p2mt) )
+                    if ( p2m_is_paging(l4e_p2mt) )
                     {
-                        p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
+                        if ( p2m_do_populate(l4e_p2mt) )
+                            p2m_mem_paging_populate(p2m_get_hostp2m(pg_owner),
                             l4e_get_pfn(l4e));
                         rc = -ENOENT;
                         break;
                     }
-                    else if ( p2m_ram_paging_in_start == l4e_p2mt && 
-                                !mfn_valid(l4emfn) )
-                    {
-                        rc = -ENOENT;
-                        break;
-                    }
                     else if ( p2m_ram_shared == l4e_p2mt )
                     {
                         MEM_LOG("Unexpected attempt to map shared page.\n");
Index: xen-4.1.2-testing/xen/arch/x86/mm/guest_walk.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/mm/guest_walk.c
+++ xen-4.1.2-testing/xen/arch/x86/mm/guest_walk.c
@@ -96,7 +96,8 @@ static inline void *map_domain_gfn(struc
     *mfn = gfn_to_mfn_unshare(p2m, gfn_x(gfn), p2mt, 0);
     if ( p2m_is_paging(*p2mt) )
     {
-        p2m_mem_paging_populate(p2m, gfn_x(gfn));
+        if ( p2m_do_populate(*p2mt) )
+            p2m_mem_paging_populate(p2m, gfn_x(gfn));
 
         *rc = _PAGE_PAGED;
         return NULL;
Index: xen-4.1.2-testing/xen/arch/x86/mm/hap/guest_walk.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/mm/hap/guest_walk.c
+++ xen-4.1.2-testing/xen/arch/x86/mm/hap/guest_walk.c
@@ -50,7 +50,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
     top_mfn = gfn_to_mfn_unshare(p2m, cr3 >> PAGE_SHIFT, &p2mt, 0);
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT);
+        if ( p2m_do_populate(p2mt) )
+            p2m_mem_paging_populate(p2m, cr3 >> PAGE_SHIFT);
 
         pfec[0] = PFEC_page_paged;
         return INVALID_GFN;
@@ -82,7 +83,8 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
         gfn_to_mfn_unshare(p2m, gfn_x(gfn), &p2mt, 0);
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn_x(gfn));
+            if ( p2m_do_populate(p2mt) )
+                p2m_mem_paging_populate(p2m, gfn_x(gfn));
 
             pfec[0] = PFEC_page_paged;
             return INVALID_GFN;
Index: xen-4.1.2-testing/xen/arch/x86/mm/hap/p2m-ept.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/mm/hap/p2m-ept.c
+++ xen-4.1.2-testing/xen/arch/x86/mm/hap/p2m-ept.c
@@ -377,8 +377,7 @@ ept_set_entry(struct p2m_domain *p2m, un
          * the intermediate tables will be freed below after the ept flush */
         old_entry = *ept_entry;
 
-        if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) ||
-             (p2mt == p2m_ram_paging_in_start) )
+        if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) )
         {
             /* Construct the new entry, and then write it once */
             new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn, &ipat,
Index: xen-4.1.2-testing/xen/arch/x86/mm/p2m.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/mm/p2m.c
+++ xen-4.1.2-testing/xen/arch/x86/mm/p2m.c
@@ -3049,7 +3049,7 @@ void p2m_mem_paging_populate(struct p2m_
     p2m_lock(p2m);
     mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query);
     /* Allow only nominated or evicted pages to enter page-in path */
-    if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
+    if ( p2m_do_populate(p2mt) )
     {
         /* Evict will fail now, tag this request for pager */
         if ( p2mt == p2m_ram_paging_out )
@@ -3067,7 +3067,7 @@ void p2m_mem_paging_populate(struct p2m_
         req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
     }
     /* No need to inform pager if the gfn is not in the page-out path */
-    else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
+    else if ( !p2m_do_populate(p2mt) )
     {
         /* gfn is already on its way back and vcpu is not paused */
         mem_event_put_req_producers(&d->mem_event->paging);
Index: xen-4.1.2-testing/xen/common/grant_table.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/grant_table.c
+++ xen-4.1.2-testing/xen/common/grant_table.c
@@ -158,7 +158,8 @@ static int __get_paged_frame(unsigned lo
         *frame = mfn_x(mfn);
         if ( p2m_is_paging(p2mt) )
         {
-            p2m_mem_paging_populate(p2m, gfn);
+            if ( p2m_do_populate(p2mt) )
+                p2m_mem_paging_populate(p2m, gfn);
             rc = GNTST_eagain;
         }
     } else {
Index: xen-4.1.2-testing/xen/include/asm-x86/p2m.h
===================================================================
--- xen-4.1.2-testing.orig/xen/include/asm-x86/p2m.h
+++ xen-4.1.2-testing/xen/include/asm-x86/p2m.h
@@ -157,7 +157,11 @@ typedef enum {
                           | p2m_to_mask(p2m_ram_paging_in_start) \
                           | p2m_to_mask(p2m_ram_paging_in))
 
-#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
+#define P2M_POPULATE_TYPES (p2m_to_mask(p2m_ram_paged) \
+                            | p2m_to_mask(p2m_ram_paging_out) )
+
+#define P2M_PAGED_NO_MFN_TYPES (p2m_to_mask(p2m_ram_paged) \
+                               | p2m_to_mask(p2m_ram_paging_in_start) )
 
 /* Shared types */
 /* XXX: Sharable types could include p2m_ram_ro too, but we would need to
@@ -179,7 +183,8 @@ typedef enum {
 #define p2m_has_emt(_t)  (p2m_to_mask(_t) & (P2M_RAM_TYPES | p2m_to_mask(p2m_mmio_direct)))
 #define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES)
 #define p2m_is_paging(_t)   (p2m_to_mask(_t) & P2M_PAGING_TYPES)
-#define p2m_is_paged(_t)    (p2m_to_mask(_t) & P2M_PAGED_TYPES)
+#define p2m_is_paged(_t)    (p2m_to_mask(_t) & P2M_PAGED_NO_MFN_TYPES)
+#define p2m_do_populate(_t) (p2m_to_mask(_t) & P2M_POPULATE_TYPES)
 #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
 #define p2m_is_shared(_t)   (p2m_to_mask(_t) & P2M_SHARED_TYPES)
 #define p2m_is_broken(_t)   (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
openSUSE Build Service is sponsored by