File 5f046cb5-VT-d-prune-rename-cache-flush-funcs.patch of Package xen.16819
# Commit 62298825b9a44f45761acbd758138b5ba059ebd1
# Date 2020-07-07 14:38:13 +0200
# Author Roger Pau Monné <roger.pau@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
vtd: prune (and rename) cache flush functions
Rename __iommu_flush_cache to iommu_sync_cache and remove
iommu_flush_cache_page. Also remove the iommu_flush_cache_entry
wrapper and just use iommu_sync_cache instead. Note the _entry suffix
was meaningless as the wrapper was already taking a size parameter in
bytes. While there also constify the addr parameter.
No functional change intended.
This is part of XSA-321.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -38,8 +38,7 @@ void disable_qinval(struct iommu *iommu)
int enable_intremap(struct iommu *iommu, int eim);
void disable_intremap(struct iommu *iommu);
-void iommu_flush_cache_entry(void *addr, unsigned int size);
-void iommu_flush_cache_page(void *addr, unsigned long npages);
+void iommu_sync_cache(const void *addr, unsigned int size);
int iommu_alloc(struct acpi_drhd_unit *drhd);
void iommu_free(struct acpi_drhd_unit *drhd);
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -231,7 +231,7 @@ static void free_remap_entry(struct iomm
iremap_entries, iremap_entry);
update_irte(iommu, iremap_entry, &new_ire, false);
- iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
+ iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
iommu_flush_iec_index(iommu, 0, index);
unmap_vtd_domain_page(iremap_entries);
@@ -403,7 +403,7 @@ static int ioapic_rte_to_remap_entry(str
}
update_irte(iommu, iremap_entry, &new_ire, !init);
- iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
+ iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
iommu_flush_iec_index(iommu, 0, index);
unmap_vtd_domain_page(iremap_entries);
@@ -694,7 +694,7 @@ static int msi_msg_to_remap_entry(
update_irte(iommu, iremap_entry, &new_ire, msi_desc->irte_initialized);
msi_desc->irte_initialized = true;
- iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
+ iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
iommu_flush_iec_index(iommu, 0, index);
unmap_vtd_domain_page(iremap_entries);
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -158,7 +158,8 @@ static void __init free_intel_iommu(stru
}
static int iommus_incoherent;
-static void __iommu_flush_cache(void *addr, unsigned int size)
+
+void iommu_sync_cache(const void *addr, unsigned int size)
{
int i;
static unsigned int clflush_size = 0;
@@ -173,16 +174,6 @@ static void __iommu_flush_cache(void *ad
cacheline_flush((char *)addr + i);
}
-void iommu_flush_cache_entry(void *addr, unsigned int size)
-{
- __iommu_flush_cache(addr, size);
-}
-
-void iommu_flush_cache_page(void *addr, unsigned long npages)
-{
- __iommu_flush_cache(addr, PAGE_SIZE * npages);
-}
-
/* Allocate page table, return its machine address */
u64 alloc_pgtable_maddr(struct acpi_drhd_unit *drhd, unsigned long npages)
{
@@ -207,7 +198,7 @@ u64 alloc_pgtable_maddr(struct acpi_drhd
vaddr = __map_domain_page(cur_pg);
memset(vaddr, 0, PAGE_SIZE);
- iommu_flush_cache_page(vaddr, 1);
+ iommu_sync_cache(vaddr, PAGE_SIZE);
unmap_domain_page(vaddr);
cur_pg++;
}
@@ -242,7 +233,7 @@ static u64 bus_to_context_maddr(struct i
}
set_root_value(*root, maddr);
set_root_present(*root);
- iommu_flush_cache_entry(root, sizeof(struct root_entry));
+ iommu_sync_cache(root, sizeof(struct root_entry));
}
maddr = (u64) get_context_addr(*root);
unmap_vtd_domain_page(root_entries);
@@ -300,7 +291,7 @@ static u64 addr_to_dma_page_maddr(struct
*/
dma_set_pte_readable(*pte);
dma_set_pte_writable(*pte);
- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+ iommu_sync_cache(pte, sizeof(struct dma_pte));
}
if ( level == 2 )
@@ -681,7 +672,7 @@ static int __must_check dma_pte_clear_on
*flush_flags |= IOMMU_FLUSHF_modified;
spin_unlock(&hd->arch.mapping_lock);
- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+ iommu_sync_cache(pte, sizeof(struct dma_pte));
unmap_vtd_domain_page(page);
@@ -720,7 +711,7 @@ static void iommu_free_page_table(struct
iommu_free_pagetable(dma_pte_addr(*pte), next_level);
dma_clear_pte(*pte);
- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+ iommu_sync_cache(pte, sizeof(struct dma_pte));
}
unmap_vtd_domain_page(pt_vaddr);
@@ -1449,7 +1440,7 @@ int domain_context_mapping_one(
context_set_address_width(*context, agaw);
context_set_fault_enable(*context);
context_set_present(*context);
- iommu_flush_cache_entry(context, sizeof(struct context_entry));
+ iommu_sync_cache(context, sizeof(struct context_entry));
spin_unlock(&iommu->lock);
/* Context entry was previously non-present (with domid 0). */
@@ -1602,7 +1593,7 @@ int domain_context_unmap_one(
context_clear_present(*context);
context_clear_entry(*context);
- iommu_flush_cache_entry(context, sizeof(struct context_entry));
+ iommu_sync_cache(context, sizeof(struct context_entry));
iommu_domid= domain_iommu_domid(domain, iommu);
if ( iommu_domid == -1 )
@@ -1837,7 +1828,7 @@ static int __must_check intel_iommu_map_
*pte = new;
- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+ iommu_sync_cache(pte, sizeof(struct dma_pte));
spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
@@ -1912,7 +1903,7 @@ int iommu_pte_flush(struct domain *d, ui
int iommu_domid;
int rc = 0;
- iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+ iommu_sync_cache(pte, sizeof(struct dma_pte));
for_each_drhd_unit ( drhd )
{
@@ -2777,7 +2768,7 @@ static int __init intel_iommu_quarantine
dma_set_pte_addr(*pte, maddr);
dma_set_pte_readable(*pte);
}
- iommu_flush_cache_page(parent, 1);
+ iommu_sync_cache(parent, PAGE_SIZE);
unmap_vtd_domain_page(parent);
parent = map_vtd_domain_page(maddr);