File xsa378-3.patch of Package xen.26348

IOMMU: also pass p2m_access_t to p2m_get_iommu_flags()

A subsequent change will want to customize the IOMMU permissions based
on this.

This is part of XSA-378.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul@xen.org>

# Commit e70a9a043a5ce6d4025420f729bc473f711bf5d1
# Date 2021-09-07 14:24:49 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/p2m-pt: fix p2m_flags_to_access()

The initial if() was inverted, invalidating all output from this
function. Which in turn means the mirroring of P2M mappings into the
IOMMU didn't always work as intended: Mappings may have got updated when
there was no need to. There would not have been too few (un)mappings;
what saves us is that alongside the flags comparison MFNs also get
compared, with non-present entries always having an MFN of 0 or
INVALID_MFN while present entries always have MFNs different from these
two (0 in the table also meant to cover INVALID_MFN):

OLD					NEW
P W	access	MFN			P W	access	MFN
0 0	r	0			0 0	n	0
0 1	rw	0			0 1	n	0
1 0	n	non-0			1 0	r	non-0
1 1	n	non-0			1 1	rw	non-0

present <-> non-present transitions are fine because the MFNs differ.
present -> present transitions as well as non-present -> non-present
ones are potentially causing too many map/unmap operations, but never
too few, because in that case old (bogus) and new access differ.

Fixes: d1bb6c97c31e ("IOMMU: also pass p2m_access_t to p2m_get_iommu_flags())
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -693,7 +693,7 @@ ept_set_entry(struct p2m_domain *p2m, un
     uint8_t ipat = 0;
     bool_t need_modify_vtd_table = 1;
     bool_t vtd_pte_present = 0;
-    unsigned int iommu_flags = p2m_get_iommu_flags(p2mt, mfn);
+    unsigned int iommu_flags = p2m_get_iommu_flags(p2mt, p2ma, mfn);
     bool_t needs_sync = 1;
     ept_entry_t old_entry = { .epte = 0 };
     ept_entry_t new_entry = { .epte = 0 };
@@ -819,8 +819,8 @@ ept_set_entry(struct p2m_domain *p2m, un
 
         /* Safe to read-then-write because we hold the p2m lock */
         if ( ept_entry->mfn == new_entry.mfn &&
-             p2m_get_iommu_flags(ept_entry->sa_p2mt, _mfn(ept_entry->mfn)) ==
-             iommu_flags )
+             p2m_get_iommu_flags(ept_entry->sa_p2mt, ept_entry->access,
+                                 _mfn(ept_entry->mfn)) == iommu_flags )
             need_modify_vtd_table = 0;
 
         ept_p2m_type_to_flags(p2m, &new_entry, p2mt, p2ma);
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -491,6 +491,16 @@ int p2m_pt_handle_deferred_changes(uint6
     return rc;
 }
 
+/* Reconstruct a fake p2m_access_t from stored PTE flags. */
+static p2m_access_t p2m_flags_to_access(unsigned int flags)
+{
+    if ( !(flags & _PAGE_PRESENT) )
+        return p2m_access_n;
+
+    /* No need to look at _PAGE_NX for now. */
+    return flags & _PAGE_RW ? p2m_access_rw : p2m_access_r;
+}
+
 /* Returns: 0 for success, -errno for failure */
 static int
 p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
@@ -506,7 +516,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
     l2_pgentry_t l2e_content;
     l3_pgentry_t l3e_content;
     int rc;
-    unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn);
+    unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, p2ma, mfn);
     /*
      * old_mfn and iommu_old_flags control possible flush/update needs on the
      * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
@@ -575,6 +585,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
                 old_mfn = l1e_get_pfn(*p2m_entry);
                 iommu_old_flags =
                     p2m_get_iommu_flags(p2m_flags_to_type(flags),
+                                        p2m_flags_to_access(flags),
                                         _mfn(old_mfn));
             }
             else
@@ -617,9 +628,10 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
                                    0, L1_PAGETABLE_ENTRIES);
         ASSERT(p2m_entry);
         old_mfn = l1e_get_pfn(*p2m_entry);
+        flags = l1e_get_flags(*p2m_entry);
         iommu_old_flags =
-            p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)),
-                                _mfn(old_mfn));
+            p2m_get_iommu_flags(p2m_flags_to_type(flags),
+                                p2m_flags_to_access(flags), _mfn(old_mfn));
 
         if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
             entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
@@ -648,6 +660,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
                 old_mfn = l1e_get_pfn(*p2m_entry);
                 iommu_old_flags =
                     p2m_get_iommu_flags(p2m_flags_to_type(flags),
+                                        p2m_flags_to_access(flags),
                                         _mfn(old_mfn));
             }
             else
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -814,7 +814,8 @@ void p2m_altp2m_propagate_change(struct
 /*
  * p2m type to IOMMU flags
  */
-static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt, mfn_t mfn)
+static inline unsigned int p2m_get_iommu_flags(p2m_type_t p2mt,
+                                               p2m_access_t p2ma, mfn_t mfn)
 {
     unsigned int flags;
 
openSUSE Build Service is sponsored by