提交 09b42804 编写于 作者: J Joerg Roedel

x86/amd-iommu: Reimplement flush_all_domains_on_iommu()

This patch reimplements the function
flush_all_domains_on_iommu to use the global protection
domain list.
Signed-off-by: NJoerg Roedel <joerg.roedel@amd.com>
上级 e3306664
...@@ -499,43 +499,48 @@ static void iommu_flush_tlb_pde(struct protection_domain *domain) ...@@ -499,43 +499,48 @@ static void iommu_flush_tlb_pde(struct protection_domain *domain)
} }
/* /*
* This function flushes one domain on one IOMMU * This function flushes all domains that have devices on the given IOMMU
*/ */
static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
{ {
struct iommu_cmd cmd; u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
struct protection_domain *domain;
unsigned long flags; unsigned long flags;
__iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, spin_lock_irqsave(&amd_iommu_pd_lock, flags);
domid, 1, 1);
spin_lock_irqsave(&iommu->lock, flags);
__iommu_queue_command(iommu, &cmd);
__iommu_completion_wait(iommu);
__iommu_wait_for_completion(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
{
int i;
for (i = 1; i < MAX_DOMAIN_ID; ++i) { list_for_each_entry(domain, &amd_iommu_pd_list, list) {
if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) if (domain->dev_iommu[iommu->index] == 0)
continue; continue;
flush_domain_on_iommu(iommu, i);
spin_lock(&domain->lock);
iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
iommu_flush_complete(domain);
spin_unlock(&domain->lock);
} }
spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
} }
/*
* This function uses heavy locking and may disable irqs for some time. But
* this is no issue because it is only called during resume.
*/
void amd_iommu_flush_all_domains(void) void amd_iommu_flush_all_domains(void)
{ {
struct protection_domain *domain; struct protection_domain *domain;
unsigned long flags;
spin_lock_irqsave(&amd_iommu_pd_lock, flags);
list_for_each_entry(domain, &amd_iommu_pd_list, list) { list_for_each_entry(domain, &amd_iommu_pd_list, list) {
spin_lock(&domain->lock);
iommu_flush_tlb_pde(domain); iommu_flush_tlb_pde(domain);
iommu_flush_complete(domain); iommu_flush_complete(domain);
spin_unlock(&domain->lock);
} }
spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
} }
static void flush_all_devices_for_iommu(struct amd_iommu *iommu) static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册