File 19216-msix-fixmap.patch of Package xen
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1234868776 0
# Node ID 2e6de0f50f3fe199fea890d0c340a9f9260b143d
# Parent cb8ece5d764755e7f6fd4698a84d8e24f1d946b3
passthrough: fix MSI-X table fixmap allocation
Currently, msix table pages are allocated a fixmap page per vector,
the available fixmap pages will be depleted when assigning devices
with large number of vectors. This patch fixes it, and a bug that
prevents cross-page MSI-X table from working properly
It now allocates msix table fixmap pages per device, if the table
entries of two msix vectors share the same page, it will only be
mapped to fixmap once. A ref count is maintained so that it can
be unmapped when all the vectors are freed.
Also changes the meaning of msi_desc->mask_base from the va of msix
table start to the va of the target entry. The former one is currently
buggy (it always maps the first page but msix can support up to 2048
entries) and can't handle separately allocated pages.
Signed-off-by: Qing He <qing.he@intel.com>
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -29,18 +29,17 @@
/* bitmap indicate which fixed map is free */
DEFINE_SPINLOCK(msix_fixmap_lock);
-DECLARE_BITMAP(msix_fixmap_pages, MAX_MSIX_PAGES);
+DECLARE_BITMAP(msix_fixmap_pages, FIX_MSIX_MAX_PAGES);
static int msix_fixmap_alloc(void)
{
- int i;
- int rc = -1;
+ int i, rc = -ENOMEM;
spin_lock(&msix_fixmap_lock);
- for ( i = 0; i < MAX_MSIX_PAGES; i++ )
+ for ( i = 0; i < FIX_MSIX_MAX_PAGES; i++ )
if ( !test_bit(i, &msix_fixmap_pages) )
break;
- if ( i == MAX_MSIX_PAGES )
+ if ( i == FIX_MSIX_MAX_PAGES )
goto out;
rc = FIX_MSIX_IO_RESERV_BASE + i;
set_bit(i, &msix_fixmap_pages);
@@ -60,6 +59,62 @@ static void msix_fixmap_free(int idx)
spin_unlock(&msix_fixmap_lock);
}
+static int msix_get_fixmap(struct pci_dev *dev, unsigned long table_paddr,
+ unsigned long entry_paddr)
+{
+ int nr_page, idx;
+
+ nr_page = (entry_paddr >> PAGE_SHIFT) - (table_paddr >> PAGE_SHIFT);
+
+ if ( nr_page < 0 || nr_page >= MAX_MSIX_TABLE_PAGES )
+ return -EINVAL;
+
+ spin_lock(&dev->msix_table_lock);
+ if ( dev->msix_table_refcnt[nr_page]++ == 0 )
+ {
+ idx = msix_fixmap_alloc();
+ if ( idx < 0 )
+ {
+ dev->msix_table_refcnt[nr_page]--;
+ goto out;
+ }
+ set_fixmap_nocache(idx, entry_paddr);
+ dev->msix_table_idx[nr_page] = idx;
+ }
+ else
+ idx = dev->msix_table_idx[nr_page];
+
+ out:
+ spin_unlock(&dev->msix_table_lock);
+ return idx;
+}
+
+static void msix_put_fixmap(struct pci_dev *dev, int idx)
+{
+ int i;
+ unsigned long start;
+
+ spin_lock(&dev->msix_table_lock);
+ for ( i = 0; i < MAX_MSIX_TABLE_PAGES; i++ )
+ {
+ if ( dev->msix_table_idx[i] == idx )
+ break;
+ }
+ if ( i == MAX_MSIX_TABLE_PAGES )
+ goto out;
+
+ if ( --dev->msix_table_refcnt[i] == 0 )
+ {
+ start = fix_to_virt(idx);
+ destroy_xen_mappings(start, start + PAGE_SIZE);
+ msix_fixmap_free(idx);
+ dev->msix_table_idx[i] = 0;
+ }
+
+ out:
+ spin_unlock(&dev->msix_table_lock);
+}
+
/*
* MSI message composition
*/
@@ -127,8 +182,7 @@ static void read_msi_msg(struct msi_desc
case PCI_CAP_ID_MSIX:
{
void __iomem *base;
- base = entry->mask_base +
- entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ base = entry->mask_base;
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
@@ -201,8 +255,7 @@ static void write_msi_msg(struct msi_des
case PCI_CAP_ID_MSIX:
{
void __iomem *base;
- base = entry->mask_base +
- entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ base = entry->mask_base;
writel(msg->address_lo,
base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
@@ -292,8 +345,7 @@ static void msix_flush_writes(unsigned i
break;
case PCI_CAP_ID_MSIX:
{
- int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
+ int offset = PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
readl(entry->mask_base + offset);
break;
}
@@ -334,8 +386,7 @@ static void msi_set_mask_bit(unsigned in
break;
case PCI_CAP_ID_MSIX:
{
- int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
+ int offset = PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
writel(flag, entry->mask_base + offset);
readl(entry->mask_base + offset);
break;
@@ -402,13 +453,10 @@ static void msi_free_vector(int vector)
{
unsigned long start;
- writel(1, entry->mask_base + entry->msi_attrib.entry_nr
- * PCI_MSIX_ENTRY_SIZE
- + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ writel(1, entry->mask_base + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
start = (unsigned long)entry->mask_base & ~(PAGE_SIZE - 1);
- msix_fixmap_free(virt_to_fix(start));
- destroy_xen_mappings(start, start + PAGE_SIZE);
+ msix_put_fixmap(entry->dev, virt_to_fix(start));
}
list_del(&entry->list);
xfree(entry);
@@ -511,8 +559,8 @@ static int msix_capability_init(struct p
struct msi_desc *entry;
int pos;
u16 control;
- unsigned long phys_addr;
- u32 table_offset;
+ unsigned long table_paddr, entry_paddr;
+ u32 table_offset, entry_offset;
u8 bir;
void __iomem *base;
int idx;
@@ -533,15 +581,17 @@ static int msix_capability_init(struct p
table_offset = pci_conf_read32(bus, slot, func, msix_table_offset_reg(pos));
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
- phys_addr = msi->table_base + table_offset;
- idx = msix_fixmap_alloc();
+ entry_offset = msi->entry_nr * PCI_MSIX_ENTRY_SIZE;
+
+ table_paddr = msi->table_base + table_offset;
+ entry_paddr = table_paddr + entry_offset;
+ idx = msix_get_fixmap(dev, table_paddr, entry_paddr);
if ( idx < 0 )
{
xfree(entry);
- return -ENOMEM;
+ return idx;
}
- set_fixmap_nocache(idx, phys_addr);
- base = (void *)(fix_to_virt(idx) + (phys_addr & ((1UL << PAGE_SHIFT) - 1)));
+ base = (void *)(fix_to_virt(idx) + (entry_paddr & ((1UL << PAGE_SHIFT) - 1)));
entry->msi_attrib.type = PCI_CAP_ID_MSIX;
entry->msi_attrib.is_64 = 1;
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -39,6 +39,7 @@ struct pci_dev *alloc_pdev(u8 bus, u8 de
pdev = xmalloc(struct pci_dev);
if ( !pdev )
return NULL;
+ memset(pdev, 0, sizeof(*pdev));
*((u8*) &pdev->bus) = bus;
*((u8*) &pdev->devfn) = devfn;
@@ -46,6 +47,7 @@ struct pci_dev *alloc_pdev(u8 bus, u8 de
spin_lock_init(&pdev->lock);
INIT_LIST_HEAD(&pdev->msi_list);
list_add(&pdev->alldevs_list, &alldevs_list);
+ spin_lock_init(&pdev->msix_table_lock);
return pdev;
}
--- a/xen/include/asm-x86/fixmap.h
+++ b/xen/include/asm-x86/fixmap.h
@@ -50,7 +50,7 @@ enum fixed_addresses {
FIX_IOMMU_MMIO_END = FIX_IOMMU_MMIO_BASE_0 + IOMMU_PAGES -1,
FIX_TBOOT_SHARED_BASE,
FIX_MSIX_IO_RESERV_BASE,
- FIX_MSIX_IO_RESERV_END = FIX_MSIX_IO_RESERV_BASE + MAX_MSIX_PAGES -1,
+ FIX_MSIX_IO_RESERV_END = FIX_MSIX_IO_RESERV_BASE + FIX_MSIX_MAX_PAGES -1,
__end_of_fixed_addresses
};
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -49,9 +49,9 @@
/* MAX fixed pages reserved for mapping MSIX tables. */
#if defined(__x86_64__)
-#define MAX_MSIX_PAGES 512
+#define FIX_MSIX_MAX_PAGES 512
#else
-#define MAX_MSIX_PAGES 32
+#define FIX_MSIX_MAX_PAGES 32
#endif
struct msi_info {
@@ -89,7 +89,7 @@ struct msi_desc {
struct list_head list;
- void __iomem *mask_base;
+ void __iomem *mask_base; /* va for the entry in mask table */
struct pci_dev *dev;
int vector;
--- a/xen/include/xen/pci.h
+++ b/xen/include/xen/pci.h
@@ -29,10 +29,16 @@
#define PCI_BDF(b,d,f) ((((b) & 0xff) << 8) | PCI_DEVFN(d,f))
#define PCI_BDF2(b,df) ((((b) & 0xff) << 8) | ((df) & 0xff))
+#define MAX_MSIX_TABLE_PAGES 8 /* 2048 entries */
struct pci_dev {
struct list_head alldevs_list;
struct list_head domain_list;
+
struct list_head msi_list;
+ int msix_table_refcnt[MAX_MSIX_TABLE_PAGES];
+ int msix_table_idx[MAX_MSIX_TABLE_PAGES];
+ spinlock_t msix_table_lock;
+
struct domain *domain;
const u8 bus;
const u8 devfn;