提交 360eb3c5 编写于 作者: J Jiang Liu 提交者: Joerg Roedel

iommu/vt-d: use dedicated bitmap to track remapping entry allocation status

Currently Intel interrupt remapping drivers uses the "present" flag bit
in remapping entry to track whether an entry is allocated or not.
It works as follow:
1) allocate a remapping entry and set its "present" flag bit to 1
2) compose other fields for the entry
3) update the remapping entry with the composed value

The remapping hardware may access the entry between step 1 and step 3,
which then observers an entry with the "present" flag set but random
values in all other fields.

This patch introduces a dedicated bitmap to track remapping entry
allocation status instead of sharing the "present" flag with hardware,
thus eliminate the race window. It also simplifies the implementation.
Tested-and-reviewed-by: NYijing Wang <wangyijing@huawei.com>
Signed-off-by: NJiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: NJoerg Roedel <joro@8bytes.org>
上级 dbad0864
...@@ -72,7 +72,6 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) ...@@ -72,7 +72,6 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
u16 index, start_index; u16 index, start_index;
unsigned int mask = 0; unsigned int mask = 0;
unsigned long flags; unsigned long flags;
int i;
if (!count || !irq_iommu) if (!count || !irq_iommu)
return -1; return -1;
...@@ -96,32 +95,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) ...@@ -96,32 +95,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
} }
raw_spin_lock_irqsave(&irq_2_ir_lock, flags); raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
do { index = bitmap_find_free_region(table->bitmap,
for (i = index; i < index + count; i++) INTR_REMAP_TABLE_ENTRIES, mask);
if (table->base[i].present) if (index < 0) {
break; pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
/* empty index found */ } else {
if (i == index + count) cfg->remapped = 1;
break; irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
index = (index + count) % INTR_REMAP_TABLE_ENTRIES; irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
if (index == start_index) { }
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate an IRTE\n");
return -1;
}
} while (1);
for (i = index; i < index + count; i++)
table->base[i].present = 1;
cfg->remapped = 1;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return index; return index;
...@@ -254,6 +238,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) ...@@ -254,6 +238,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
set_64bit(&entry->low, 0); set_64bit(&entry->low, 0);
set_64bit(&entry->high, 0); set_64bit(&entry->high, 0);
} }
bitmap_release_region(iommu->ir_table->bitmap, index,
irq_iommu->irte_mask);
return qi_flush_iec(iommu, index, irq_iommu->irte_mask); return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
} }
...@@ -453,6 +439,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) ...@@ -453,6 +439,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
{ {
struct ir_table *ir_table; struct ir_table *ir_table;
struct page *pages; struct page *pages;
unsigned long *bitmap;
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
GFP_ATOMIC); GFP_ATOMIC);
...@@ -464,13 +451,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) ...@@ -464,13 +451,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
INTR_REMAP_PAGE_ORDER); INTR_REMAP_PAGE_ORDER);
if (!pages) { if (!pages) {
printk(KERN_ERR "failed to allocate pages of order %d\n", pr_err("IR%d: failed to allocate pages of order %d\n",
INTR_REMAP_PAGE_ORDER); iommu->seq_id, INTR_REMAP_PAGE_ORDER);
kfree(iommu->ir_table); kfree(iommu->ir_table);
return -ENOMEM; return -ENOMEM;
} }
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
kfree(ir_table);
return -ENOMEM;
}
ir_table->base = page_address(pages); ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
iommu_set_irq_remapping(iommu, mode); iommu_set_irq_remapping(iommu, mode);
return 0; return 0;
......
...@@ -288,6 +288,7 @@ struct q_inval { ...@@ -288,6 +288,7 @@ struct q_inval {
struct ir_table { struct ir_table {
struct irte *base; struct irte *base;
unsigned long *bitmap;
}; };
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册