提交 3f398bc7 编写于 作者: S Suravee Suthikulpanit 提交者: Joerg Roedel

iommu/AMD: Per-thread IOMMU Interrupt Handling

In the current interrupt handling scheme, there are as many threads as
the number of IOMMUs. Each thread is created and assigned to an IOMMU at
the time of registering interrupt handlers (request_threaded_irq).
When an IOMMU HW generates an interrupt, the irq handler (top half) wakes up
the corresponding thread to process event and PPR logs of all IOMMUs
starting from the 1st IOMMU.

In the system with multiple IOMMU,this handling scheme complicates the
synchronization of the IOMMU data structures and status registers as
there could be multiple threads competing for the same IOMMU while
the other IOMMU could be left unhandled.

To simplify, this patch is proposing a different interrupt handling scheme
by having each thread only managing interrupts of the corresponding IOMMU.
This can be achieved by passing the struct amd_iommu when registering the
interrupt handlers. This structure is unique for each IOMMU and can be used
by the bottom half thread to identify the IOMMU to be handled instead
of calling for_each_iommu.  Besides this also eliminate the needs to lock
the IOMMU for processing event and PPR logs.
Signed-off-by: NSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: NJoerg Roedel <joro@8bytes.org>
上级 d3263bc2
...@@ -700,22 +700,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) ...@@ -700,22 +700,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
static void iommu_poll_events(struct amd_iommu *iommu) static void iommu_poll_events(struct amd_iommu *iommu)
{ {
u32 head, tail, status; u32 head, tail;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
/* enable event interrupts again */
do {
/*
* Workaround for Erratum ERBT1312
* Clearing the EVT_INT bit may race in the hardware, so read
* it again and make sure it was really cleared
*/
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
writel(MMIO_STATUS_EVT_INT_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET);
} while (status & MMIO_STATUS_EVT_INT_MASK);
head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
...@@ -726,8 +711,6 @@ static void iommu_poll_events(struct amd_iommu *iommu) ...@@ -726,8 +711,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
} }
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
spin_unlock_irqrestore(&iommu->lock, flags);
} }
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
...@@ -752,26 +735,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw) ...@@ -752,26 +735,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
static void iommu_poll_ppr_log(struct amd_iommu *iommu) static void iommu_poll_ppr_log(struct amd_iommu *iommu)
{ {
unsigned long flags; u32 head, tail;
u32 head, tail, status;
if (iommu->ppr_log == NULL) if (iommu->ppr_log == NULL)
return; return;
spin_lock_irqsave(&iommu->lock, flags);
/* enable ppr interrupts again */
do {
/*
* Workaround for Erratum ERBT1312
* Clearing the PPR_INT bit may race in the hardware, so read
* it again and make sure it was really cleared
*/
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
writel(MMIO_STATUS_PPR_INT_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET);
} while (status & MMIO_STATUS_PPR_INT_MASK);
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
...@@ -807,34 +775,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) ...@@ -807,34 +775,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
/*
* Release iommu->lock because ppr-handling might need to
* re-acquire it
*/
spin_unlock_irqrestore(&iommu->lock, flags);
/* Handle PPR entry */ /* Handle PPR entry */
iommu_handle_ppr_entry(iommu, entry); iommu_handle_ppr_entry(iommu, entry);
spin_lock_irqsave(&iommu->lock, flags);
/* Refresh ring-buffer information */ /* Refresh ring-buffer information */
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
} }
spin_unlock_irqrestore(&iommu->lock, flags);
} }
irqreturn_t amd_iommu_int_thread(int irq, void *data) irqreturn_t amd_iommu_int_thread(int irq, void *data)
{ {
struct amd_iommu *iommu; struct amd_iommu *iommu = (struct amd_iommu *) data;
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
for_each_iommu(iommu) { while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
iommu_poll_events(iommu); /* Enable EVT and PPR interrupts again */
iommu_poll_ppr_log(iommu); writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
} iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & MMIO_STATUS_EVT_INT_MASK) {
pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
iommu_poll_events(iommu);
}
if (status & MMIO_STATUS_PPR_INT_MASK) {
pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
iommu_poll_ppr_log(iommu);
}
/*
* Hardware bug: ERBT1312
* When re-enabling interrupt (by writing 1
* to clear the bit), the hardware might also try to set
* the interrupt bit in the event status register.
* In this scenario, the bit will be set, and disable
* subsequent interrupts.
*
* Workaround: The IOMMU driver should read back the
* status register and check if the interrupt bits are cleared.
* If not, driver will need to go through the interrupt handler
* again and re-clear the bits
*/
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -1324,7 +1324,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu) ...@@ -1324,7 +1324,7 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
amd_iommu_int_handler, amd_iommu_int_handler,
amd_iommu_int_thread, amd_iommu_int_thread,
0, "AMD-Vi", 0, "AMD-Vi",
iommu->dev); iommu);
if (r) { if (r) {
pci_disable_msi(iommu->dev); pci_disable_msi(iommu->dev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册