提交 c42d9f32 编写于 作者: S Suresh Siddha 提交者: Ingo Molnar

x64, x2apic/intr-remap: fix the need for sequential array allocation of iommus

Clean up the intel-iommu code related to deferred iommu flush logic. There is
no need to allocate all the iommu's as a sequential array.

This will be used later in the interrupt-remapping patch series to
allocate iommu much early and individually for each device remapping
hardware unit.
Signed-off-by: NSuresh Siddha <suresh.b.siddha@intel.com>
Cc: akpm@linux-foundation.org
Cc: arjan@linux.intel.com
Cc: andi@firstfloor.org
Cc: ebiederm@xmission.com
Cc: jbarnes@virtuousgeek.org
Cc: steiner@sgi.com
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 e61d98d8
...@@ -377,11 +377,18 @@ int __init early_dmar_detect(void) ...@@ -377,11 +377,18 @@ int __init early_dmar_detect(void)
return (ACPI_SUCCESS(status) ? 1 : 0); return (ACPI_SUCCESS(status) ? 1 : 0);
} }
struct intel_iommu *alloc_iommu(struct intel_iommu *iommu, struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd)
struct dmar_drhd_unit *drhd)
{ {
struct intel_iommu *iommu;
int map_size; int map_size;
u32 ver; u32 ver;
static int iommu_allocated = 0;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
return NULL;
iommu->seq_id = iommu_allocated++;
iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
if (!iommu->reg) { if (!iommu->reg) {
......
...@@ -58,8 +58,6 @@ static void flush_unmaps_timeout(unsigned long data); ...@@ -58,8 +58,6 @@ static void flush_unmaps_timeout(unsigned long data);
DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
static struct intel_iommu *g_iommus;
#define HIGH_WATER_MARK 250 #define HIGH_WATER_MARK 250
struct deferred_flush_tables { struct deferred_flush_tables {
int next; int next;
...@@ -1649,8 +1647,6 @@ int __init init_dmars(void) ...@@ -1649,8 +1647,6 @@ int __init init_dmars(void)
* endfor * endfor
*/ */
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
if (drhd->ignored)
continue;
g_num_of_iommus++; g_num_of_iommus++;
/* /*
* lock not needed as this is only incremented in the single * lock not needed as this is only incremented in the single
...@@ -1659,26 +1655,17 @@ int __init init_dmars(void) ...@@ -1659,26 +1655,17 @@ int __init init_dmars(void)
*/ */
} }
g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
if (!g_iommus) {
ret = -ENOMEM;
goto error;
}
deferred_flush = kzalloc(g_num_of_iommus * deferred_flush = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_tables), GFP_KERNEL); sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) { if (!deferred_flush) {
kfree(g_iommus);
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }
i = 0;
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
if (drhd->ignored) if (drhd->ignored)
continue; continue;
iommu = alloc_iommu(&g_iommus[i], drhd); iommu = alloc_iommu(drhd);
i++;
if (!iommu) { if (!iommu) {
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
...@@ -1770,7 +1757,6 @@ int __init init_dmars(void) ...@@ -1770,7 +1757,6 @@ int __init init_dmars(void)
iommu = drhd->iommu; iommu = drhd->iommu;
free_iommu(iommu); free_iommu(iommu);
} }
kfree(g_iommus);
return ret; return ret;
} }
...@@ -1927,7 +1913,10 @@ static void flush_unmaps(void) ...@@ -1927,7 +1913,10 @@ static void flush_unmaps(void)
/* just flush them all */ /* just flush them all */
for (i = 0; i < g_num_of_iommus; i++) { for (i = 0; i < g_num_of_iommus; i++) {
if (deferred_flush[i].next) { if (deferred_flush[i].next) {
iommu_flush_iotlb_global(&g_iommus[i], 0); struct intel_iommu *iommu =
deferred_flush[i].domain[0]->iommu;
iommu_flush_iotlb_global(iommu, 0);
for (j = 0; j < deferred_flush[i].next; j++) { for (j = 0; j < deferred_flush[i].next; j++) {
__free_iova(&deferred_flush[i].domain[j]->iovad, __free_iova(&deferred_flush[i].domain[j]->iovad,
deferred_flush[i].iova[j]); deferred_flush[i].iova[j]);
...@@ -1957,7 +1946,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) ...@@ -1957,7 +1946,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
if (list_size == HIGH_WATER_MARK) if (list_size == HIGH_WATER_MARK)
flush_unmaps(); flush_unmaps();
iommu_id = dom->iommu - g_iommus; iommu_id = dom->iommu->seq_id;
next = deferred_flush[iommu_id].next; next = deferred_flush[iommu_id].next;
deferred_flush[iommu_id].domain[next] = dom; deferred_flush[iommu_id].domain[next] = dom;
deferred_flush[iommu_id].iova[next] = iova; deferred_flush[iommu_id].iova[next] = iova;
......
...@@ -182,6 +182,7 @@ struct intel_iommu { ...@@ -182,6 +182,7 @@ struct intel_iommu {
int seg; int seg;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
spinlock_t register_lock; /* protect register handling */ spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
#ifdef CONFIG_DMAR #ifdef CONFIG_DMAR
unsigned long *domain_ids; /* bitmap of domains */ unsigned long *domain_ids; /* bitmap of domains */
...@@ -198,8 +199,7 @@ struct intel_iommu { ...@@ -198,8 +199,7 @@ struct intel_iommu {
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern struct intel_iommu *alloc_iommu(struct intel_iommu *iommu, extern struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd);
struct dmar_drhd_unit *drhd);
extern void free_iommu(struct intel_iommu *iommu); extern void free_iommu(struct intel_iommu *iommu);
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册