File 19753-vtd-reg-write-lock.patch of Package xen
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1245148280 -3600
# Node ID cc07094a02e491240d15944d6e70bedcaca8d541
# Parent fa51db0871e1c4d8eca9913c6fc36c299be4e8d4
vtd: Clean up lock for VT-d register writes
It should get lock to write VT-d registers. Currently there are some
register writes without lock. This patch complements register_lock for
those writes.
Signed-off-by: Weidong Han <weidong.han@intel.com>
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -539,6 +539,7 @@ int enable_intremap(struct iommu *iommu)
{
struct ir_ctrl *ir_ctrl;
u32 gcmd;
+ unsigned long flags;
s_time_t start_time;
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
@@ -561,6 +562,8 @@ int enable_intremap(struct iommu *iommu)
ir_ctrl->iremap_maddr |=
ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIMI_SHIFT) : 0;
#endif
+ spin_lock_irqsave(&iommu->register_lock, flags);
+
/* size field = 256 entries per 4K page = 8 - 1 */
ir_ctrl->iremap_maddr |= 7;
dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
@@ -578,10 +581,12 @@ int enable_intremap(struct iommu *iommu)
panic("Cannot set SIRTP field for interrupt remapping\n");
cpu_relax();
}
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
/* After set SIRTP, must globally invalidate the interrupt entry cache */
iommu_flush_iec_global(iommu);
+ spin_lock_irqsave(&iommu->register_lock, flags);
/* enable comaptiblity format interrupt pass through */
gcmd |= DMA_GCMD_CFI;
dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
@@ -605,6 +610,7 @@ int enable_intremap(struct iommu *iommu)
panic("Cannot set IRE field for interrupt remapping\n");
cpu_relax();
}
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
init_ioapic_pin_intremap_index();
@@ -614,10 +620,12 @@ int enable_intremap(struct iommu *iommu)
void disable_intremap(struct iommu *iommu)
{
u32 sts;
+ unsigned long flags;
s_time_t start_time;
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
+ spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
@@ -628,4 +636,5 @@ void disable_intremap(struct iommu *iomm
panic("Cannot clear IRE field for interrupt remapping\n");
cpu_relax();
}
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
}
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -261,13 +261,13 @@ static u64 addr_to_dma_page_maddr(struct
static void iommu_flush_write_buffer(struct iommu *iommu)
{
u32 val;
- unsigned long flag;
+ unsigned long flags;
s_time_t start_time;
if ( !rwbf_quirk && !cap_rwbf(iommu->cap) )
return;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ spin_lock_irqsave(&iommu->register_lock, flags);
val = dmar_readl(iommu->reg, DMAR_GSTS_REG);
dmar_writel(iommu->reg, DMAR_GCMD_REG, val | DMA_GCMD_WBF);
@@ -283,7 +283,7 @@ static void iommu_flush_write_buffer(str
" please disable IOMMU\n", __func__);
cpu_relax();
}
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
}
/* return value determine if we need a write buffer flush */
@@ -294,7 +294,7 @@ static int flush_context_reg(
{
struct iommu *iommu = (struct iommu *) _iommu;
u64 val = 0;
- unsigned long flag;
+ unsigned long flags;
s_time_t start_time;
/*
@@ -329,7 +329,7 @@ static int flush_context_reg(
}
val |= DMA_CCMD_ICC;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);
/* Make sure hardware complete it */
@@ -344,7 +344,7 @@ static int flush_context_reg(
" please disable IOMMU\n", __func__);
cpu_relax();
}
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
/* flush context entry will implictly flush write buffer */
return 0;
}
@@ -383,7 +383,7 @@ static int flush_iotlb_reg(void *_iommu,
struct iommu *iommu = (struct iommu *) _iommu;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
- unsigned long flag;
+ unsigned long flags;
s_time_t start_time;
/*
@@ -424,7 +424,7 @@ static int flush_iotlb_reg(void *_iommu,
if ( cap_write_drain(iommu->cap) )
val |= DMA_TLB_WRITE_DRAIN;
- spin_lock_irqsave(&iommu->register_lock, flag);
+ spin_lock_irqsave(&iommu->register_lock, flags);
/* Note: Only uses first TLB reg currently */
if ( val_iva )
dmar_writeq(iommu->reg, tlb_offset, val_iva);
@@ -442,7 +442,7 @@ static int flush_iotlb_reg(void *_iommu,
" please disable IOMMU\n", __func__);
cpu_relax();
}
- spin_unlock_irqrestore(&iommu->register_lock, flag);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
/* check IOTLB invalidation granularity */
if ( DMA_TLB_IAIG(val) == 0 )
@@ -677,10 +677,10 @@ static int iommu_enable_translation(stru
" please disable IOMMU\n", __func__);
cpu_relax();
}
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
/* Disable PMRs when VT-d engine takes effect per spec definition */
disable_pmr(iommu);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0;
}
@@ -1705,7 +1705,9 @@ static void setup_dom0_devices(struct do
void clear_fault_bits(struct iommu *iommu)
{
u64 val;
+ unsigned long flags;
+ spin_lock_irqsave(&iommu->register_lock, flags);
val = dmar_readq(
iommu->reg,
cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8);
@@ -1714,6 +1716,7 @@ void clear_fault_bits(struct iommu *iomm
cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8,
val);
dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_FAULTS);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static int init_vtd_hw(void)
@@ -1723,6 +1726,7 @@ static int init_vtd_hw(void)
struct iommu_flush *flush = NULL;
int vector;
int ret;
+ unsigned long flags;
for_each_drhd_unit ( drhd )
{
@@ -1732,7 +1736,10 @@ static int init_vtd_hw(void)
dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
iommu->vector = vector;
clear_fault_bits(iommu);
+
+ spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
/* initialize flush functions */
flush = iommu_get_flush(iommu);
@@ -1994,6 +2001,7 @@ int iommu_resume(void)
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
u32 i;
+ unsigned long flags;
if ( !vtd_enabled )
return 0;
@@ -2006,6 +2014,7 @@ int iommu_resume(void)
iommu = drhd->iommu;
i = iommu->index;
+ spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_FECTL_REG,
(u32) iommu_state[i][DMAR_FECTL_REG]);
dmar_writel(iommu->reg, DMAR_FEDATA_REG,
@@ -2014,6 +2023,7 @@ int iommu_resume(void)
(u32) iommu_state[i][DMAR_FEADDR_REG]);
dmar_writel(iommu->reg, DMAR_FEUADDR_REG,
(u32) iommu_state[i][DMAR_FEUADDR_REG]);
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
if ( iommu_enable_translation(iommu) )
return -EIO;
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -416,6 +416,7 @@ static int flush_iotlb_qi(
int enable_qinval(struct iommu *iommu)
{
u32 sts;
+ unsigned long flags;
s_time_t start_time;
struct qi_ctrl *qi_ctrl;
struct iommu_flush *flush;
@@ -438,6 +439,7 @@ int enable_qinval(struct iommu *iommu)
flush->iotlb = flush_iotlb_qi;
}
+ spin_lock_irqsave(&iommu->register_lock, flags);
/* Setup Invalidation Queue Address(IQA) register with the
* address of the page we just allocated. QS field at
* bits[2:0] to indicate size of queue is one 4KB page.
@@ -461,6 +463,7 @@ int enable_qinval(struct iommu *iommu)
panic("Cannot set QIE field for queue invalidation\n");
cpu_relax();
}
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0;
}
@@ -468,10 +471,12 @@ int enable_qinval(struct iommu *iommu)
void disable_qinval(struct iommu *iommu)
{
u32 sts;
+ unsigned long flags;
s_time_t start_time;
ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval);
+ spin_lock_irqsave(&iommu->register_lock, flags);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE));
@@ -483,4 +488,5 @@ void disable_qinval(struct iommu *iommu)
panic("Cannot clear QIE field for queue invalidation\n");
cpu_relax();
}
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
}
--- a/xen/drivers/passthrough/vtd/utils.c
+++ b/xen/drivers/passthrough/vtd/utils.c
@@ -41,11 +41,13 @@ void disable_pmr(struct iommu *iommu)
{
s_time_t start_time;
unsigned int val;
+ unsigned long flags;
val = dmar_readl(iommu->reg, DMAR_PMEN_REG);
if ( !(val & DMA_PMEN_PRS) )
return;
+ spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM);
start_time = NOW();
@@ -60,6 +62,7 @@ void disable_pmr(struct iommu *iommu)
cpu_relax();
}
+ spin_unlock_irqrestore(&iommu->register_lock, flags);
dprintk(XENLOG_INFO VTDPREFIX,
"Disabled protected memory registers\n");