提交 1b39b077 编写于 作者: F FUJITA Tomonori 提交者: Linus Torvalds

iommu sg: x86: convert calgary IOMMU to use the IOMMU helper

This patch converts calgary IOMMU to use the IOMMU helper
functions. The IOMMU doesn't allocate a memory area spanning LLD's
segment boundary anymore.
Signed-off-by: NFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Muli Ben-Yehuda <mulix@mulix.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 383af952
...@@ -465,6 +465,9 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT ...@@ -465,6 +465,9 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
Calgary anyway, pass 'iommu=calgary' on the kernel command line. Calgary anyway, pass 'iommu=calgary' on the kernel command line.
If unsure, say Y. If unsure, say Y.
config IOMMU_HELPER
def_bool CALGARY_IOMMU
# need this always selected by IOMMU for the VIA workaround # need this always selected by IOMMU for the VIA workaround
config SWIOTLB config SWIOTLB
bool bool
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <asm/gart.h> #include <asm/gart.h>
#include <asm/calgary.h> #include <asm/calgary.h>
#include <asm/tce.h> #include <asm/tce.h>
...@@ -260,22 +261,28 @@ static void iommu_range_reserve(struct iommu_table *tbl, ...@@ -260,22 +261,28 @@ static void iommu_range_reserve(struct iommu_table *tbl,
spin_unlock_irqrestore(&tbl->it_lock, flags); spin_unlock_irqrestore(&tbl->it_lock, flags);
} }
static unsigned long iommu_range_alloc(struct iommu_table *tbl, static unsigned long iommu_range_alloc(struct device *dev,
unsigned int npages) struct iommu_table *tbl,
unsigned int npages)
{ {
unsigned long flags; unsigned long flags;
unsigned long offset; unsigned long offset;
unsigned long boundary_size;
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
BUG_ON(npages == 0); BUG_ON(npages == 0);
spin_lock_irqsave(&tbl->it_lock, flags); spin_lock_irqsave(&tbl->it_lock, flags);
offset = find_next_zero_string(tbl->it_map, tbl->it_hint, offset = iommu_area_alloc(tbl->it_map, tbl->it_size, tbl->it_hint,
tbl->it_size, npages); npages, 0, boundary_size, 0);
if (offset == ~0UL) { if (offset == ~0UL) {
tbl->chip_ops->tce_cache_blast(tbl); tbl->chip_ops->tce_cache_blast(tbl);
offset = find_next_zero_string(tbl->it_map, 0,
tbl->it_size, npages); offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0,
npages, 0, boundary_size, 0);
if (offset == ~0UL) { if (offset == ~0UL) {
printk(KERN_WARNING "Calgary: IOMMU full.\n"); printk(KERN_WARNING "Calgary: IOMMU full.\n");
spin_unlock_irqrestore(&tbl->it_lock, flags); spin_unlock_irqrestore(&tbl->it_lock, flags);
...@@ -286,7 +293,6 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, ...@@ -286,7 +293,6 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
} }
} }
set_bit_string(tbl->it_map, offset, npages);
tbl->it_hint = offset + npages; tbl->it_hint = offset + npages;
BUG_ON(tbl->it_hint > tbl->it_size); BUG_ON(tbl->it_hint > tbl->it_size);
...@@ -295,13 +301,13 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl, ...@@ -295,13 +301,13 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
return offset; return offset;
} }
static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr, static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
unsigned int npages, int direction) void *vaddr, unsigned int npages, int direction)
{ {
unsigned long entry; unsigned long entry;
dma_addr_t ret = bad_dma_address; dma_addr_t ret = bad_dma_address;
entry = iommu_range_alloc(tbl, npages); entry = iommu_range_alloc(dev, tbl, npages);
if (unlikely(entry == bad_dma_address)) if (unlikely(entry == bad_dma_address))
goto error; goto error;
...@@ -354,7 +360,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, ...@@ -354,7 +360,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
badbit, tbl, dma_addr, entry, npages); badbit, tbl, dma_addr, entry, npages);
} }
__clear_bit_string(tbl->it_map, entry, npages); iommu_area_free(tbl->it_map, entry, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags); spin_unlock_irqrestore(&tbl->it_lock, flags);
} }
...@@ -438,7 +444,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -438,7 +444,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
vaddr = (unsigned long) sg_virt(s); vaddr = (unsigned long) sg_virt(s);
npages = num_dma_pages(vaddr, s->length); npages = num_dma_pages(vaddr, s->length);
entry = iommu_range_alloc(tbl, npages); entry = iommu_range_alloc(dev, tbl, npages);
if (entry == bad_dma_address) { if (entry == bad_dma_address) {
/* makes sure unmap knows to stop */ /* makes sure unmap knows to stop */
s->dma_length = 0; s->dma_length = 0;
...@@ -476,7 +482,7 @@ static dma_addr_t calgary_map_single(struct device *dev, void *vaddr, ...@@ -476,7 +482,7 @@ static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
npages = num_dma_pages(uaddr, size); npages = num_dma_pages(uaddr, size);
if (translation_enabled(tbl)) if (translation_enabled(tbl))
dma_handle = iommu_alloc(tbl, vaddr, npages, direction); dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction);
else else
dma_handle = virt_to_bus(vaddr); dma_handle = virt_to_bus(vaddr);
...@@ -516,7 +522,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, ...@@ -516,7 +522,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
if (translation_enabled(tbl)) { if (translation_enabled(tbl)) {
/* set up tces to cover the allocated range */ /* set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL); mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
if (mapping == bad_dma_address) if (mapping == bad_dma_address)
goto free; goto free;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册