File xsa400-03.patch of Package xen
From: Jan Beulich <jbeulich@suse.com>
Subject: IOMMU/x86: tighten iommu_alloc_pgtable()'s parameter
This is to make more obvious that nothing outside of domain_iommu(d)
actually changes or is otherwise needed by the function.
No functional change intended.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
--- a/xen/include/asm-x86/iommu.h
+++ b/xen/include/asm-x86/iommu.h
@@ -142,7 +142,8 @@ int pi_update_irte(const struct pi_desc
})
int __must_check iommu_free_pgtables(struct domain *d);
-struct page_info *__must_check iommu_alloc_pgtable(struct domain *d);
+struct domain_iommu;
+struct page_info *__must_check iommu_alloc_pgtable(struct domain_iommu *hd);
#endif /* !__ARCH_X86_IOMMU_H__ */
/*
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -184,7 +184,7 @@ static int iommu_pde_from_dfn(struct dom
unsigned long next_table_mfn;
unsigned int level;
struct page_info *table;
- const struct domain_iommu *hd = dom_iommu(d);
+ struct domain_iommu *hd = dom_iommu(d);
table = hd->arch.amd.root_table;
level = hd->arch.amd.paging_mode;
@@ -219,7 +219,7 @@ static int iommu_pde_from_dfn(struct dom
mfn = next_table_mfn;
/* allocate lower level page table */
- table = iommu_alloc_pgtable(d);
+ table = iommu_alloc_pgtable(hd);
if ( table == NULL )
{
AMD_IOMMU_ERROR("cannot allocate I/O page table\n");
@@ -249,7 +249,7 @@ static int iommu_pde_from_dfn(struct dom
if ( next_table_mfn == 0 )
{
- table = iommu_alloc_pgtable(d);
+ table = iommu_alloc_pgtable(hd);
if ( table == NULL )
{
AMD_IOMMU_ERROR("cannot allocate I/O page table\n");
@@ -553,7 +553,7 @@ int __init amd_iommu_quarantine_init(str
spin_lock(&hd->arch.mapping_lock);
- hd->arch.amd.root_table = iommu_alloc_pgtable(d);
+ hd->arch.amd.root_table = iommu_alloc_pgtable(hd);
if ( !hd->arch.amd.root_table )
goto out;
@@ -568,7 +568,7 @@ int __init amd_iommu_quarantine_init(str
* page table pages, and the resulting allocations are always
* zeroed.
*/
- pg = iommu_alloc_pgtable(d);
+ pg = iommu_alloc_pgtable(hd);
if ( !pg )
break;
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -242,7 +242,7 @@ int amd_iommu_alloc_root(struct domain *
if ( unlikely(!hd->arch.amd.root_table) )
{
- hd->arch.amd.root_table = iommu_alloc_pgtable(d);
+ hd->arch.amd.root_table = iommu_alloc_pgtable(hd);
if ( !hd->arch.amd.root_table )
return -ENOMEM;
}
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -330,7 +330,7 @@ static u64 addr_to_dma_page_maddr(struct
{
struct page_info *pg;
- if ( !alloc || !(pg = iommu_alloc_pgtable(domain)) )
+ if ( !alloc || !(pg = iommu_alloc_pgtable(hd)) )
goto out;
hd->arch.vtd.pgd_maddr = page_to_maddr(pg);
@@ -350,7 +350,7 @@ static u64 addr_to_dma_page_maddr(struct
if ( !alloc )
break;
- pg = iommu_alloc_pgtable(domain);
+ pg = iommu_alloc_pgtable(hd);
if ( !pg )
break;
@@ -2766,7 +2766,7 @@ static int __init intel_iommu_quarantine
goto out;
}
- pg = iommu_alloc_pgtable(d);
+ pg = iommu_alloc_pgtable(hd);
rc = -ENOMEM;
if ( !pg )
@@ -2785,7 +2785,7 @@ static int __init intel_iommu_quarantine
* page table pages, and the resulting allocations are always
* zeroed.
*/
- pg = iommu_alloc_pgtable(d);
+ pg = iommu_alloc_pgtable(hd);
if ( !pg )
goto out;
--- a/xen/drivers/passthrough/x86/iommu.c
+++ b/xen/drivers/passthrough/x86/iommu.c
@@ -416,9 +416,8 @@ int iommu_free_pgtables(struct domain *d
return 0;
}
-struct page_info *iommu_alloc_pgtable(struct domain *d)
+struct page_info *iommu_alloc_pgtable(struct domain_iommu *hd)
{
- struct domain_iommu *hd = dom_iommu(d);
unsigned int memflags = 0;
struct page_info *pg;
void *p;