File xsa321-7.patch of Package xen.23721

From: <security@xenproject.org>
Subject: x86/ept: flush cache when modifying PTEs and sharing page tables

Modifications made to the page tables by EPT code need to be written
to memory when the page tables are shared with the IOMMU, as Intel
IOMMUs can be non-coherent and thus require changes to be written to
memory in order to be visible to the IOMMU.

In order to achieve this make sure data is written back to memory
after writing an EPT entry when the recalc bit is not set in
atomic_write_ept_entry. If such bit is set, the entry will be
adjusted and atomic_write_ept_entry will be called a second time
without the recalc bit set. Note that when splitting a super page the
new tables resulting of the split should also be written back.

Failure to do so can allow devices behind the IOMMU access to the
stale super page, or cause coherency issues as changes made by the
processor to the page tables are not visible to the IOMMU.

This allows to remove the VT-d specific iommu_pte_flush helper, since
the cache write back is now performed by atomic_write_ept_entry, and
hence iommu_iotlb_flush can be used to flush the IOMMU TLB. The newly
used method (iommu_iotlb_flush) can result in less flushes, since it
might sometimes be called rightly with 0 flags, in which case it
becomes a no-op.

This is part of XSA-321.

Reviewed-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -91,6 +91,19 @@ static int atomic_write_ept_entry(ept_en
 
     write_atomic(&entryptr->epte, new.epte);
 
+    /*
+     * The recalc field on the EPT is used to signal either that a
+     * recalculation of the EMT field is required (which doesn't effect the
+     * IOMMU), or a type change. Type changes can only be between ram_rw,
+     * logdirty and ioreq_server: changes to/from logdirty won't work well with
+     * an IOMMU anyway, as IOMMU #PFs are not synchronous and will lead to
+     * aborts, and changes to/from ioreq_server are already fully flushed
+     * before returning to guest context (see
+     * XEN_DMOP_map_mem_type_to_ioreq_server).
+     */
+    if ( !new.recalc && iommu_hap_pt_share )
+        iommu_sync_cache(entryptr, sizeof(*entryptr));
+
     if ( unlikely(oldmfn != INVALID_MFN) )
         put_page(mfn_to_page(oldmfn));
 
@@ -314,6 +327,9 @@ static bool_t ept_split_super_page(struc
             break;
     }
 
+    if ( iommu_hap_pt_share )
+        iommu_sync_cache(table, EPT_PAGETABLE_ENTRIES * sizeof(ept_entry_t));
+
     unmap_domain_page(table);
 
     /* Even failed we should install the newly allocated ept page. */
@@ -373,6 +389,9 @@ static int ept_next_level(struct p2m_dom
         if ( !next )
             return GUEST_TABLE_MAP_FAILED;
 
+        if ( iommu_hap_pt_share )
+            iommu_sync_cache(next, EPT_PAGETABLE_ENTRIES * sizeof(ept_entry_t));
+
         rc = atomic_write_ept_entry(ept_entry, e, next_level);
         ASSERT(rc == 0);
     }
@@ -834,7 +853,7 @@ out:
          need_modify_vtd_table )
     {
         if ( iommu_hap_pt_share )
-            iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
+            iommu_flush_iotlb(d, gfn, vtd_pte_present, 1u << order);
         else
         {
             if ( iommu_flags )
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -585,8 +585,8 @@ static void iommu_flush_all(void)
     }
 }
 
-static void __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
-        int dma_old_pte_present, unsigned int page_count)
+void iommu_flush_iotlb(struct domain *d, unsigned long gfn,
+                       bool_t dma_old_pte_present, unsigned int page_count)
 {
     struct domain_iommu *hd = dom_iommu(d);
     struct acpi_drhd_unit *drhd;
@@ -630,12 +630,12 @@ static void __intel_iommu_iotlb_flush(st
 
 static void intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count)
 {
-    __intel_iommu_iotlb_flush(d, gfn, 1, page_count);
+    iommu_flush_iotlb(d, gfn, 1, page_count);
 }
 
 static void intel_iommu_iotlb_flush_all(struct domain *d)
 {
-    __intel_iommu_iotlb_flush(d, INVALID_GFN, 0, 0);
+    iommu_flush_iotlb(d, INVALID_GFN, 0, 0);
 }
 
 /* clear one page's page table */
@@ -669,7 +669,7 @@ static void dma_pte_clear_one(struct dom
     iommu_sync_cache(pte, sizeof(struct dma_pte));
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
-        __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1, 1);
+        intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1);
 
     unmap_vtd_domain_page(page);
 }
@@ -1788,7 +1788,7 @@ static int intel_iommu_map_page(
     unmap_vtd_domain_page(page);
 
     if ( !this_cpu(iommu_dont_flush_iotlb) )
-        __intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1);
+        iommu_flush_iotlb(d, gfn, dma_pte_present(old), 1);
 
     return 0;
 }
@@ -1804,34 +1804,6 @@ static int intel_iommu_unmap_page(struct
     return 0;
 }
 
-void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
-                     int order, int present)
-{
-    struct acpi_drhd_unit *drhd;
-    struct iommu *iommu = NULL;
-    struct domain_iommu *hd = dom_iommu(d);
-    int flush_dev_iotlb;
-    int iommu_domid;
-
-    iommu_sync_cache(pte, sizeof(struct dma_pte));
-
-    for_each_drhd_unit ( drhd )
-    {
-        iommu = drhd->iommu;
-        if ( !test_bit(iommu->index, &hd->arch.iommu_bitmap) )
-            continue;
-
-        flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
-        iommu_domid= domain_iommu_domid(d, iommu);
-        if ( iommu_domid == -1 )
-            continue;
-        if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
-                                   (paddr_t)gfn << PAGE_SHIFT_4K,
-                                   order, !present, flush_dev_iotlb) )
-            iommu_flush_write_buffer(iommu);
-    }
-}
-
 static int __init vtd_ept_page_compatible(struct iommu *iommu)
 {
     u64 ept_cap, vtd_cap = iommu->cap;
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -27,7 +27,8 @@ int iommu_setup_hpet_msi(struct msi_desc
 
 /* While VT-d specific, this must get declared in a generic header. */
 int adjust_vtd_irq_affinities(void);
-void iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int order, int present);
+void iommu_flush_iotlb(struct domain *d, unsigned long gfn,
+                       bool_t dma_old_pte_present, unsigned int page_count);
 bool_t iommu_supports_eim(void);
 int iommu_enable_x2apic_IR(void);
 void iommu_disable_x2apic_IR(void);
openSUSE Build Service is sponsored by