提交 fde9a109 编写于 作者: F FUJITA Tomonori 提交者: Linus Torvalds

iommu sg: x86: convert gart IOMMU to use the IOMMU helper

This patch converts gart IOMMU to use the IOMMU helper functions. The
IOMMU doesn't allocate a memory area spanning LLD's segment boundary
anymore.
Signed-off-by: NFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Muli Ben-Yehuda <mulix@mulix.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 1b39b077
...@@ -466,7 +466,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT ...@@ -466,7 +466,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
If unsure, say Y. If unsure, say Y.
config IOMMU_HELPER config IOMMU_HELPER
def_bool CALGARY_IOMMU def_bool (CALGARY_IOMMU || GART_IOMMU)
# need this always selected by IOMMU for the VIA workaround # need this always selected by IOMMU for the VIA workaround
config SWIOTLB config SWIOTLB
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
...@@ -82,17 +83,24 @@ AGPEXTERN __u32 *agp_gatt_table; ...@@ -82,17 +83,24 @@ AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */ static unsigned long next_bit; /* protected by iommu_bitmap_lock */
static int need_flush; /* global flush state. set for each gart wrap */ static int need_flush; /* global flush state. set for each gart wrap */
static unsigned long alloc_iommu(int size) static unsigned long alloc_iommu(struct device *dev, int size)
{ {
unsigned long offset, flags; unsigned long offset, flags;
unsigned long boundary_size;
unsigned long base_index;
base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
PAGE_SIZE) >> PAGE_SHIFT;
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
spin_lock_irqsave(&iommu_bitmap_lock, flags); spin_lock_irqsave(&iommu_bitmap_lock, flags);
offset = find_next_zero_string(iommu_gart_bitmap, next_bit, offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
iommu_pages, size); size, base_index, boundary_size, 0);
if (offset == -1) { if (offset == -1) {
need_flush = 1; need_flush = 1;
offset = find_next_zero_string(iommu_gart_bitmap, 0, offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
iommu_pages, size); size, base_index, boundary_size, 0);
} }
if (offset != -1) { if (offset != -1) {
set_bit_string(iommu_gart_bitmap, offset, size); set_bit_string(iommu_gart_bitmap, offset, size);
...@@ -114,7 +122,7 @@ static void free_iommu(unsigned long offset, int size) ...@@ -114,7 +122,7 @@ static void free_iommu(unsigned long offset, int size)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags); spin_lock_irqsave(&iommu_bitmap_lock, flags);
__clear_bit_string(iommu_gart_bitmap, offset, size); iommu_area_free(iommu_gart_bitmap, offset, size);
spin_unlock_irqrestore(&iommu_bitmap_lock, flags); spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
} }
...@@ -235,7 +243,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, ...@@ -235,7 +243,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir) size_t size, int dir)
{ {
unsigned long npages = to_pages(phys_mem, size); unsigned long npages = to_pages(phys_mem, size);
unsigned long iommu_page = alloc_iommu(npages); unsigned long iommu_page = alloc_iommu(dev, npages);
int i; int i;
if (iommu_page == -1) { if (iommu_page == -1) {
...@@ -355,10 +363,11 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, ...@@ -355,10 +363,11 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
} }
/* Map multiple scatterlist entries continuous into the first. */ /* Map multiple scatterlist entries continuous into the first. */
static int __dma_map_cont(struct scatterlist *start, int nelems, static int __dma_map_cont(struct device *dev, struct scatterlist *start,
struct scatterlist *sout, unsigned long pages) int nelems, struct scatterlist *sout,
unsigned long pages)
{ {
unsigned long iommu_start = alloc_iommu(pages); unsigned long iommu_start = alloc_iommu(dev, pages);
unsigned long iommu_page = iommu_start; unsigned long iommu_page = iommu_start;
struct scatterlist *s; struct scatterlist *s;
int i; int i;
...@@ -394,8 +403,8 @@ static int __dma_map_cont(struct scatterlist *start, int nelems, ...@@ -394,8 +403,8 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
} }
static inline int static inline int
dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout, dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
unsigned long pages, int need) struct scatterlist *sout, unsigned long pages, int need)
{ {
if (!need) { if (!need) {
BUG_ON(nelems != 1); BUG_ON(nelems != 1);
...@@ -403,7 +412,7 @@ dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout, ...@@ -403,7 +412,7 @@ dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
sout->dma_length = start->length; sout->dma_length = start->length;
return 0; return 0;
} }
return __dma_map_cont(start, nelems, sout, pages); return __dma_map_cont(dev, start, nelems, sout, pages);
} }
/* /*
...@@ -449,8 +458,8 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) ...@@ -449,8 +458,8 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
if (!iommu_merge || !nextneed || !need || s->offset || if (!iommu_merge || !nextneed || !need || s->offset ||
(s->length + seg_size > max_seg_size) || (s->length + seg_size > max_seg_size) ||
(ps->offset + ps->length) % PAGE_SIZE) { (ps->offset + ps->length) % PAGE_SIZE) {
if (dma_map_cont(start_sg, i - start, sgmap, if (dma_map_cont(dev, start_sg, i - start,
pages, need) < 0) sgmap, pages, need) < 0)
goto error; goto error;
out++; out++;
seg_size = 0; seg_size = 0;
...@@ -466,7 +475,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) ...@@ -466,7 +475,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
pages += to_pages(s->offset, s->length); pages += to_pages(s->offset, s->length);
ps = s; ps = s;
} }
if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0) if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
goto error; goto error;
out++; out++;
flush_gart(); flush_gart();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册