File 19137-lock-domain-page-list.patch of Package xen
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1233313986 0
# Node ID 2d70ad9c3bc7546e8bd53f55c5f0d05c5852a8a1
# Parent 162cdb596b9a7e49994b9305f34fadf92cfb3933
amd-iommu: obtain page_alloc_lock before traversing a domain's page list
From all I can tell, this doesn't violate lock ordering as other
places call heap allocation functions from inside hd->mapping_lock.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -552,6 +552,8 @@ int amd_iommu_sync_p2m(struct domain *d)
if ( hd->p2m_synchronized )
goto out;
+ spin_lock(&d->page_alloc_lock);
+
page_list_for_each ( page, &d->page_list )
{
mfn = page_to_mfn(page);
@@ -564,6 +566,7 @@ int amd_iommu_sync_p2m(struct domain *d)
pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn);
if ( pte == NULL )
{
+ spin_unlock(&d->page_alloc_lock);
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
spin_unlock_irqrestore(&hd->mapping_lock, flags);
return -EFAULT;
@@ -571,6 +574,8 @@ int amd_iommu_sync_p2m(struct domain *d)
set_page_table_entry_present((u32 *)pte, maddr, iw, ir);
}
+ spin_unlock(&d->page_alloc_lock);
+
hd->p2m_synchronized = 1;
out: