File 612634f4-x86-mm-widen-locked-region-in-xatp1.patch of Package xen.31431
# Commit f147422bf9476fb8161b43e35f5901571ed17c35
# Date 2021-08-25 14:17:56 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/mm: widen locked region in xenmem_add_to_physmap_one()
For pages which can be made part of the P2M by the guest, but which can
also later be de-allocated (grant table v2 status pages being the
present example), it is imperative that they be mapped at no more than a
single GFN. We therefore need to make sure that of two parallel
XENMAPSPACE_grant_table requests for the same status page one completes
before the second checks at which other GFN the underlying MFN is
presently mapped.
Pull ahead the respective get_gfn() and push down the respective
put_gfn(). This leverages that gfn_lock() really aliases p2m_lock(), but
the function makes this assumption already anyway: In the
XENMAPSPACE_gmfn case lock nesting constraints for both involved GFNs
would otherwise need to be enforced to avoid ABBA deadlocks.
This is CVE-2021-28697 / XSA-379.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Julien Grall <jgrall@amazon.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4807,8 +4807,20 @@ int xenmem_add_to_physmap_one(
goto put_both;
}
- /* Remove previously mapped page if it was present. */
+ /*
+ * Note that we're (ab)using GFN locking (to really be locking of the
+ * entire P2M) here in (at least) two ways: Finer grained locking would
+ * expose lock oder violations in the XENMAPSPACE_gmfn case (due to the
+ * earlier get_gfn_unshare() above). Plus at the very least for the grant
+ * table v2 status page case we need to guarantee that the same page can
+ * only appear at a single GFN. While this is a property we want in
+ * general, for pages which can subsequently be freed this imperative:
+ * Upon freeing we wouldn't be able to find other mappings in the P2M
+ * (unless we did a brute force search).
+ */
prev_mfn = mfn_x(get_gfn(d, gfn_x(gpfn), &p2mt));
+
+ /* Remove previously mapped page if it was present. */
if ( p2mt == p2m_mmio_direct )
rc = -EPERM;
else if ( mfn_valid(_mfn(prev_mfn)) )
@@ -4820,27 +4832,21 @@ int xenmem_add_to_physmap_one(
/* Normal domain memory is freed, to avoid leaking memory. */
rc = guest_remove_page(d, gfn_x(gpfn));
}
- /* In the XENMAPSPACE_gmfn case we still hold a ref on the old page. */
- put_gfn(d, gfn_x(gpfn));
-
- if ( rc )
- goto put_both;
/* Unmap from old location, if any. */
old_gpfn = get_gpfn_from_mfn(mfn_x(mfn));
ASSERT(!SHARED_M2P(old_gpfn));
if ( space == XENMAPSPACE_gmfn && old_gpfn != gfn )
- {
rc = -EXDEV;
- goto put_both;
- }
- if ( old_gpfn != INVALID_M2P_ENTRY )
+ else if ( !rc && old_gpfn != INVALID_M2P_ENTRY )
rc = guest_physmap_remove_page(d, _gfn(old_gpfn), mfn, PAGE_ORDER_4K);
/* Map at new location. */
if ( !rc )
rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
+ put_gfn(d, gfn_x(gpfn));
+
put_both:
/*
* In the XENMAPSPACE_gmfn case, we took a ref of the gfn at the top.