File 60631c38-VT-d-QI-restore-flush-hooks.patch of Package xen.21118

# Commit 9c39dba2b179c0f4c42c98e97ea0878119718530
# Date 2021-03-30 14:40:24 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
VT-d: restore flush hooks when disabling qinval

Leaving the hooks untouched is at best a latent risk: There may well be
cases where some flush is needed, which then needs carrying out the
"register" way.

Switch from u<N> to uint<N>_t while needing to touch the function
headers anyway.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>

--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -45,6 +45,16 @@ int iommu_flush_iec_global(struct iommu
 int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx);
 void clear_fault_bits(struct iommu *iommu);
 
+int __must_check vtd_flush_context_reg(void *iommu, uint16_t did,
+                                       uint16_t source_id,
+                                       uint8_t function_mask, uint64_t type,
+                                       bool flush_non_present_entry);
+int __must_check vtd_flush_iotlb_reg(void *iommu, uint16_t did,
+                                     uint64_t addr, unsigned int size_order,
+                                     uint64_t type,
+                                     bool flush_non_present_entry,
+                                     bool flush_dev_iotlb);
+
 struct iommu * ioapic_to_iommu(unsigned int apic_id);
 struct iommu * hpet_to_iommu(unsigned int hpet_id);
 struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id);
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -364,9 +364,9 @@ static void iommu_flush_write_buffer(str
 }
 
 /* return value determine if we need a write buffer flush */
-static int __must_check flush_context_reg(void *_iommu, u16 did, u16 source_id,
-                                          u8 function_mask, u64 type,
-                                          bool_t flush_non_present_entry)
+int vtd_flush_context_reg(void *_iommu, uint16_t did,
+                          uint16_t source_id, uint8_t function_mask,
+                          uint64_t type, bool_t flush_non_present_entry)
 {
     struct iommu *iommu = (struct iommu *) _iommu;
     u64 val = 0;
@@ -436,10 +436,9 @@ static int __must_check iommu_flush_cont
 }
 
 /* return value determine if we need a write buffer flush */
-static int __must_check flush_iotlb_reg(void *_iommu, u16 did, u64 addr,
-                                        unsigned int size_order, u64 type,
-                                        bool_t flush_non_present_entry,
-                                        bool_t flush_dev_iotlb)
+int vtd_flush_iotlb_reg(void *_iommu, uint16_t did, uint64_t addr,
+                        unsigned int size_order, uint64_t type,
+                        bool_t flush_non_present_entry, bool_t flush_dev_iotlb)
 {
     struct iommu *iommu = (struct iommu *) _iommu;
     int tlb_offset = ecap_iotlb_offset(iommu->ecap);
@@ -2199,8 +2198,8 @@ static int __must_check init_vtd_hw(void
         if ( enable_qinval(iommu) != 0 )
         {
             flush = iommu_get_flush(iommu);
-            flush->context = flush_context_reg;
-            flush->iotlb = flush_iotlb_reg;
+            flush->context = vtd_flush_context_reg;
+            flush->iotlb = vtd_flush_iotlb_reg;
         }
     }
 
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -458,6 +458,7 @@ int enable_qinval(struct iommu *iommu)
 void disable_qinval(struct iommu *iommu)
 {
     u32 sts;
+    struct iommu_flush *flush;
     unsigned long flags;
 
     if ( !ecap_queued_inval(iommu->ecap) )
@@ -475,4 +476,8 @@ void disable_qinval(struct iommu *iommu)
                   !(sts & DMA_GSTS_QIES), sts);
 out:
     spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+    flush = iommu_get_flush(iommu);
+    flush->context = vtd_flush_context_reg;
+    flush->iotlb = vtd_flush_iotlb_reg;
 }
openSUSE Build Service is sponsored by