提交 29111f57 编写于 作者: J Jesse Barnes 提交者: Jesse Barnes

Merge branch 'x86/iommu' of...

Merge branch 'x86/iommu' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip into for-linus
...@@ -29,9 +29,6 @@ ...@@ -29,9 +29,6 @@
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
#define to_pages(addr, size) \
(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
#define EXIT_LOOP_COUNT 10000000 #define EXIT_LOOP_COUNT 10000000
static DEFINE_RWLOCK(amd_iommu_devtable_lock); static DEFINE_RWLOCK(amd_iommu_devtable_lock);
...@@ -185,7 +182,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, ...@@ -185,7 +182,7 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
u64 address, size_t size) u64 address, size_t size)
{ {
int s = 0; int s = 0;
unsigned pages = to_pages(address, size); unsigned pages = iommu_num_pages(address, size);
address &= PAGE_MASK; address &= PAGE_MASK;
...@@ -557,8 +554,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, ...@@ -557,8 +554,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
if (iommu->exclusion_start && if (iommu->exclusion_start &&
iommu->exclusion_start < dma_dom->aperture_size) { iommu->exclusion_start < dma_dom->aperture_size) {
unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
int pages = to_pages(iommu->exclusion_start, int pages = iommu_num_pages(iommu->exclusion_start,
iommu->exclusion_length); iommu->exclusion_length);
dma_ops_reserve_addresses(dma_dom, startpage, pages); dma_ops_reserve_addresses(dma_dom, startpage, pages);
} }
...@@ -767,7 +764,7 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -767,7 +764,7 @@ static dma_addr_t __map_single(struct device *dev,
unsigned int pages; unsigned int pages;
int i; int i;
pages = to_pages(paddr, size); pages = iommu_num_pages(paddr, size);
paddr &= PAGE_MASK; paddr &= PAGE_MASK;
address = dma_ops_alloc_addresses(dev, dma_dom, pages); address = dma_ops_alloc_addresses(dev, dma_dom, pages);
...@@ -802,7 +799,7 @@ static void __unmap_single(struct amd_iommu *iommu, ...@@ -802,7 +799,7 @@ static void __unmap_single(struct amd_iommu *iommu,
if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
return; return;
pages = to_pages(dma_addr, size); pages = iommu_num_pages(dma_addr, size);
dma_addr &= PAGE_MASK; dma_addr &= PAGE_MASK;
start = dma_addr; start = dma_addr;
......
...@@ -67,9 +67,6 @@ static u32 gart_unmapped_entry; ...@@ -67,9 +67,6 @@ static u32 gart_unmapped_entry;
(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
#define to_pages(addr, size) \
(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
#define EMERGENCY_PAGES 32 /* = 128KB */ #define EMERGENCY_PAGES 32 /* = 128KB */
#ifdef CONFIG_AGP #ifdef CONFIG_AGP
...@@ -241,7 +238,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size) ...@@ -241,7 +238,7 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir) size_t size, int dir)
{ {
unsigned long npages = to_pages(phys_mem, size); unsigned long npages = iommu_num_pages(phys_mem, size);
unsigned long iommu_page = alloc_iommu(dev, npages); unsigned long iommu_page = alloc_iommu(dev, npages);
int i; int i;
...@@ -304,7 +301,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, ...@@ -304,7 +301,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
return; return;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = to_pages(dma_addr, size); npages = iommu_num_pages(dma_addr, size);
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
CLEAR_LEAK(iommu_page + i); CLEAR_LEAK(iommu_page + i);
...@@ -387,7 +384,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, ...@@ -387,7 +384,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
} }
addr = phys_addr; addr = phys_addr;
pages = to_pages(s->offset, s->length); pages = iommu_num_pages(s->offset, s->length);
while (pages--) { while (pages--) {
iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
SET_LEAK(iommu_page); SET_LEAK(iommu_page);
...@@ -470,7 +467,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) ...@@ -470,7 +467,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
seg_size += s->length; seg_size += s->length;
need = nextneed; need = nextneed;
pages += to_pages(s->offset, s->length); pages += iommu_num_pages(s->offset, s->length);
ps = s; ps = s;
} }
if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
......
...@@ -8,3 +8,4 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, ...@@ -8,3 +8,4 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long align_mask); unsigned long align_mask);
extern void iommu_area_free(unsigned long *map, unsigned long start, extern void iommu_area_free(unsigned long *map, unsigned long start,
unsigned int nr); unsigned int nr);
extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
...@@ -80,3 +80,11 @@ void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr) ...@@ -80,3 +80,11 @@ void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
} }
} }
EXPORT_SYMBOL(iommu_area_free); EXPORT_SYMBOL(iommu_area_free);
unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
{
unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
return size >> PAGE_SHIFT;
}
EXPORT_SYMBOL(iommu_num_pages);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册