File 20368-ept-npt-nested-fault.patch of Package xen

# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1256563173 0
# Node ID b27f85b54ecc7276f43cdcd669bda269c0715373
# Parent  9c49133434cb920a0593e07e5970d1381086a4db
hvm: Clean up EPT/NPT 'nested page fault' handling.

Share most of the code.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>

# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1259779574 0
# Node ID aa0b5558564f882903ca1c84948bc2a3619d9f73
# Parent  ff04ff0b097e637b7b54afc4ad33e05077751d89
x86 hvm: fix up the unified HAP nested-pagefault handler.
A guest PFN may have been marked dirty and switched to p2m_ram_rw by
another CPU between the VMEXIT and lookup in this handler, so
we can't just check for p2m_ram_logdirty.  Also, handle_mmio
doesn't handle passthrough MMIO.

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -813,6 +813,36 @@ void hvm_triple_fault(void)
     domain_shutdown(v->domain, SHUTDOWN_reboot);
 }
 
+bool_t hvm_hap_nested_page_fault(unsigned long gfn)
+{
+    p2m_type_t p2mt;
+    mfn_t mfn;
+
+    mfn = gfn_to_mfn_current(gfn, &p2mt);
+
+    /*
+     * If this GFN is emulated MMIO or marked as read-only, pass the fault
+     * to the mmio handler.
+     */
+    if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) )
+    {
+        if ( !handle_mmio() )
+            hvm_inject_exception(TRAP_gp_fault, 0, 0);
+        return 1;
+    }
+
+    /* Log-dirty: mark the page dirty and let the guest write it again */
+    if ( paging_mode_log_dirty(current->domain)
+         && p2m_is_ram(p2mt) && (p2mt != p2m_ram_ro) )
+    {
+        paging_mark_dirty(current->domain, mfn_x(mfn));
+        p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+        return 1;
+    }
+
+    return 0;
+}
+
 int hvm_set_efer(uint64_t value)
 {
     struct vcpu *v = current;
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -880,27 +880,20 @@ int start_svm(struct cpuinfo_x86 *c)
     return 1;
 }
 
-static void svm_do_nested_pgfault(paddr_t gpa, struct cpu_user_regs *regs)
+static void svm_do_nested_pgfault(paddr_t gpa)
 {
-    p2m_type_t p2mt;
-    mfn_t mfn;
     unsigned long gfn = gpa >> PAGE_SHIFT;
+    mfn_t mfn;
+    p2m_type_t p2mt;
 
-    /*
-     * If this GFN is emulated MMIO or marked as read-only, pass the fault
-     * to the mmio handler.
-     */
-    mfn = gfn_to_mfn_current(gfn, &p2mt);
-    if ( (p2mt == p2m_mmio_dm) || (p2mt == p2m_ram_ro) )
-    {
-        if ( !handle_mmio() )
-            hvm_inject_exception(TRAP_gp_fault, 0, 0);
+    if ( hvm_hap_nested_page_fault(gfn) )
         return;
-    }
 
-    /* Log-dirty: mark the page dirty and let the guest write it again */
-    paging_mark_dirty(current->domain, mfn_x(mfn));
-    p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+    /* Everything else is an error. */
+    mfn = gfn_to_mfn_current(gfn, &p2mt);
+    gdprintk(XENLOG_ERR, "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
+             gpa, mfn_x(mfn), p2mt);
+    domain_crash(current->domain);
 }
 
 static void svm_fpu_dirty_intercept(void)
@@ -1385,7 +1378,7 @@ asmlinkage void svm_vmexit_handler(struc
     case VMEXIT_NPF:
         perfc_incra(svmexits, VMEXIT_NPF_PERFC);
         regs->error_code = vmcb->exitinfo1;
-        svm_do_nested_pgfault(vmcb->exitinfo2, regs);
+        svm_do_nested_pgfault(vmcb->exitinfo2);
         break;
 
     case VMEXIT_IRET:
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1926,36 +1926,15 @@ static void vmx_wbinvd_intercept(void)
 
 static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
 {
-    unsigned long gla_validity = qualification & EPT_GLA_VALIDITY_MASK;
-    struct domain *d = current->domain;
     unsigned long gla, gfn = gpa >> PAGE_SHIFT;
     mfn_t mfn;
-    p2m_type_t t;
+    p2m_type_t p2mt;
 
-    mfn = gfn_to_mfn(d, gfn, &t);
-
-    /* There are two legitimate reasons for taking an EPT violation. 
-     * One is a guest access to MMIO space. */
-    if ( gla_validity == EPT_GLA_VALIDITY_MATCH && p2m_is_mmio(t) )
-    {
-        handle_mmio();
-        return;
-    }
-
-    /* The other is log-dirty mode, writing to a read-only page */
-    if ( paging_mode_log_dirty(d)
-         && (gla_validity == EPT_GLA_VALIDITY_MATCH
-             || gla_validity == EPT_GLA_VALIDITY_GPT_WALK)
-         && p2m_is_ram(t) && (t != p2m_ram_ro) )
-    {
-        paging_mark_dirty(d, mfn_x(mfn));
-        p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
-        flush_tlb_mask(d->domain_dirty_cpumask);
+    if ( (qualification & EPT_GLA_VALID) &&
+         hvm_hap_nested_page_fault(gfn) )
         return;
-    }
-
     /* Everything else is an error. */
-    gla = __vmread(GUEST_LINEAR_ADDRESS);
+    mfn = gfn_to_mfn_current(gfn, &p2mt);
     gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
              "gpa %#"PRIpaddr", mfn %#lx, type %i.\n", 
              qualification, 
@@ -1965,29 +1944,20 @@ static void ept_handle_violation(unsigne
              (qualification & EPT_EFFECTIVE_READ) ? 'r' : '-',
              (qualification & EPT_EFFECTIVE_WRITE) ? 'w' : '-',
              (qualification & EPT_EFFECTIVE_EXEC) ? 'x' : '-',
-             gpa, mfn_x(mfn), t);
+             gpa, mfn_x(mfn), p2mt);
 
-    if ( qualification & EPT_GAW_VIOLATION )
-        gdprintk(XENLOG_ERR, " --- GPA too wide (max %u bits)\n", 
-                 9 * (unsigned) d->arch.hvm_domain.vmx.ept_control.gaw + 21);
-
-    switch ( gla_validity )
+    if ( qualification & EPT_GLA_VALID )
     {
-    case EPT_GLA_VALIDITY_PDPTR_LOAD:
-        gdprintk(XENLOG_ERR, " --- PDPTR load failed\n"); 
-        break;
-    case EPT_GLA_VALIDITY_GPT_WALK:
-        gdprintk(XENLOG_ERR, " --- guest PT walk to %#lx failed\n", gla);
-        break;
-    case EPT_GLA_VALIDITY_RSVD:
-        gdprintk(XENLOG_ERR, " --- GLA_validity 2 (reserved)\n");
-        break;
-    case EPT_GLA_VALIDITY_MATCH:
-        gdprintk(XENLOG_ERR, " --- guest access to %#lx failed\n", gla);
-        break;
+        gla = __vmread(GUEST_LINEAR_ADDRESS);
+        gdprintk(XENLOG_ERR, " --- GLA %#lx\n", gla);
     }
 
-    domain_crash(d);
+    if ( qualification & EPT_GAW_VIOLATION )
+        gdprintk(XENLOG_ERR, " --- GPA too wide (max %u bits)\n",
+                 9 * (unsigned int)current->domain->arch.hvm_domain.
+                 vmx.ept_control.gaw + 21);
+
+    domain_crash(current->domain);
 }
 
 static void vmx_failed_vmentry(unsigned int exit_reason,
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -318,4 +318,6 @@ static inline void hvm_set_info_guest(st
         return hvm_funcs.set_info_guest(v);
 }
 
+bool_t hvm_hap_nested_page_fault(unsigned long gfn);
+
 #endif /* __ASM_X86_HVM_HVM_H__ */
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -359,45 +359,24 @@ void vmx_inject_nmi(struct vcpu *v);
 void ept_p2m_init(struct domain *d);
 
 /* EPT violation qualifications definitions */
-/* bit offset 0 in exit qualification */
 #define _EPT_READ_VIOLATION         0
 #define EPT_READ_VIOLATION          (1UL<<_EPT_READ_VIOLATION)
-/* bit offset 1 in exit qualification */
 #define _EPT_WRITE_VIOLATION        1
 #define EPT_WRITE_VIOLATION         (1UL<<_EPT_WRITE_VIOLATION)
-/* bit offset 2 in exit qualification */
 #define _EPT_EXEC_VIOLATION         2
 #define EPT_EXEC_VIOLATION          (1UL<<_EPT_EXEC_VIOLATION)
-
-/* bit offset 3 in exit qualification */
 #define _EPT_EFFECTIVE_READ         3
 #define EPT_EFFECTIVE_READ          (1UL<<_EPT_EFFECTIVE_READ)
-/* bit offset 4 in exit qualification */
 #define _EPT_EFFECTIVE_WRITE        4
 #define EPT_EFFECTIVE_WRITE         (1UL<<_EPT_EFFECTIVE_WRITE)
-/* bit offset 5 in exit qualification */
 #define _EPT_EFFECTIVE_EXEC         5
 #define EPT_EFFECTIVE_EXEC          (1UL<<_EPT_EFFECTIVE_EXEC)
-
-/* bit offset 6 in exit qualification */
 #define _EPT_GAW_VIOLATION          6
 #define EPT_GAW_VIOLATION           (1UL<<_EPT_GAW_VIOLATION)
-
-/* bits offset 7 & 8 in exit qualification */
-#define _EPT_GLA_VALIDITY           7
-#define EPT_GLA_VALIDITY_MASK       (3UL<<_EPT_GLA_VALIDITY)
-/* gla != gpa, when load PDPTR */
-#define EPT_GLA_VALIDITY_PDPTR_LOAD (0UL<<_EPT_GLA_VALIDITY)
-/* gla != gpa, during guest page table walking */
-#define EPT_GLA_VALIDITY_GPT_WALK   (1UL<<_EPT_GLA_VALIDITY)
-/* reserved */
-#define EPT_GLA_VALIDITY_RSVD       (2UL<<_EPT_GLA_VALIDITY)
-/* gla == gpa, normal case */
-#define EPT_GLA_VALIDITY_MATCH      (3UL<<_EPT_GLA_VALIDITY)
-
-#define EPT_EFFECTIVE_MASK          (EPT_EFFECTIVE_READ  |  \
-                                     EPT_EFFECTIVE_WRITE |  \
-                                     EPT_EFFECTIVE_EXEC)
+#define _EPT_GLA_VALID              7
+#define EPT_GLA_VALID               (1UL<<_EPT_GLA_VALID)
+#define _EPT_GLA_FAULT              8
+#define EPT_GLA_FAULT               (1UL<<_EPT_GLA_FAULT)
 
 #define EPT_PAGETABLE_ENTRIES       512
 
openSUSE Build Service is sponsored by