File 5d39811c-x86-IOMMU-dont-restrict-IRQ-affinities.patch of Package xen.21118
References: bsc#1135799
# Commit 05f41944a05cc89652b6ceb7a08ecb22468d9188
# Date 2019-07-25 12:14:52 +0200
# Author Jan Beulich <jbeulich@suse.com>
# Committer Jan Beulich <jbeulich@suse.com>
x86/IOMMU: don't restrict IRQ affinities to online CPUs
In line with "x86/IRQ: desc->affinity should strictly represent the
requested value" the internally used IRQ(s) also shouldn't be restricted
to online ones. Make set_desc_affinity() (set_msi_affinity() then does
by implication) cope with a NULL mask being passed (just like
assign_irq_vector() does), and have IOMMU code pass NULL instead of
&cpu_online_map (when, for VT-d, there's no NUMA node information
available).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: Brian Woods <brian.woods@amd.com>
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -751,20 +751,28 @@ unsigned int set_desc_affinity(struct ir
unsigned long flags;
cpumask_t dest_mask;
- if (!cpumask_intersects(mask, &cpu_online_map))
+ if ( mask && !cpumask_intersects(mask, &cpu_online_map) )
return BAD_APICID;
irq = desc->irq;
spin_lock_irqsave(&vector_lock, flags);
- ret = __assign_irq_vector(irq, desc, mask);
+ ret = __assign_irq_vector(irq, desc, mask ?: TARGET_CPUS);
spin_unlock_irqrestore(&vector_lock, flags);
- if (ret < 0)
+ if ( ret < 0 )
return BAD_APICID;
- cpumask_copy(desc->affinity, mask);
- cpumask_and(&dest_mask, mask, desc->arch.cpu_mask);
+ if ( mask )
+ {
+ cpumask_copy(desc->affinity, mask);
+ cpumask_and(&dest_mask, mask, desc->arch.cpu_mask);
+ }
+ else
+ {
+ cpumask_setall(desc->affinity);
+ cpumask_copy(&dest_mask, desc->arch.cpu_mask);
+ }
cpumask_and(&dest_mask, &dest_mask, &cpu_online_map);
return cpu_mask_to_apicid(&dest_mask);
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -888,7 +888,7 @@ static void enable_iommu(struct amd_iomm
desc = irq_to_desc(iommu->msi.irq);
spin_lock(&desc->lock);
- set_msi_affinity(desc, &cpu_online_map);
+ set_msi_affinity(desc, NULL);
spin_unlock(&desc->lock);
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -2135,10 +2135,10 @@ static void adjust_irq_affinity(struct a
const struct acpi_rhsa_unit *rhsa = drhd_to_rhsa(drhd);
unsigned int node = rhsa ? pxm_to_node(rhsa->proximity_domain)
: NUMA_NO_NODE;
- const cpumask_t *cpumask = &cpu_online_map;
+ const cpumask_t *cpumask = NULL;
if ( node < MAX_NUMNODES && node_online(node) &&
- cpumask_intersects(&node_to_cpumask(node), cpumask) )
+ cpumask_intersects(&node_to_cpumask(node), &cpu_online_map) )
cpumask = &node_to_cpumask(node);
dma_msi_set_affinity(irq_to_desc(drhd->iommu->msi.irq), cpumask);
}