提交 51231740 编写于 作者: C Christoph Hellwig

dma-mapping: always use VM_DMA_COHERENT for generic DMA remap

Currently the generic dma remap allocator gets a vm_flags passed by
the caller that is a little confusing.  We just introduced a generic
vmalloc-level flag to identify the dma coherent allocations, so use
that everywhere and remove the now pointless argument.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
上级 fe9041c2
...@@ -343,13 +343,12 @@ static void * ...@@ -343,13 +343,12 @@ static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller) const void *caller)
{ {
return dma_common_contiguous_remap(page, size, VM_DMA_COHERENT, return dma_common_contiguous_remap(page, size, prot, caller);
prot, caller);
} }
static void __dma_free_remap(void *cpu_addr, size_t size) static void __dma_free_remap(void *cpu_addr, size_t size)
{ {
dma_common_free_remap(cpu_addr, size, VM_DMA_COHERENT); dma_common_free_remap(cpu_addr, size);
} }
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
...@@ -1365,8 +1364,7 @@ static void * ...@@ -1365,8 +1364,7 @@ static void *
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller) const void *caller)
{ {
return dma_common_pages_remap(pages, size, VM_DMA_COHERENT, prot, return dma_common_pages_remap(pages, size, prot, caller);
caller);
} }
/* /*
...@@ -1609,7 +1607,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -1609,7 +1607,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
} }
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
dma_common_free_remap(cpu_addr, size, VM_DMA_COHERENT); dma_common_free_remap(cpu_addr, size);
__iommu_remove_mapping(dev, handle, size); __iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size, attrs); __iommu_free_buffer(dev, pages, size, attrs);
......
...@@ -167,7 +167,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, ...@@ -167,7 +167,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (PageHighMem(page)) { if (PageHighMem(page)) {
void *p; void *p;
p = dma_common_contiguous_remap(page, size, VM_MAP, p = dma_common_contiguous_remap(page, size,
pgprot_noncached(PAGE_KERNEL), pgprot_noncached(PAGE_KERNEL),
__builtin_return_address(0)); __builtin_return_address(0));
if (!p) { if (!p) {
...@@ -192,7 +192,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -192,7 +192,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
page = virt_to_page(platform_vaddr_to_cached(vaddr)); page = virt_to_page(platform_vaddr_to_cached(vaddr));
} else { } else {
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
dma_common_free_remap(vaddr, size, VM_MAP); dma_common_free_remap(vaddr, size);
#endif #endif
page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle))); page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
} }
......
...@@ -617,7 +617,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, ...@@ -617,7 +617,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
< size) < size)
goto out_free_sg; goto out_free_sg;
vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, vaddr = dma_common_pages_remap(pages, size, prot,
__builtin_return_address(0)); __builtin_return_address(0));
if (!vaddr) if (!vaddr)
goto out_unmap; goto out_unmap;
...@@ -941,7 +941,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) ...@@ -941,7 +941,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
pages = __iommu_dma_get_pages(cpu_addr); pages = __iommu_dma_get_pages(cpu_addr);
if (!pages) if (!pages)
page = vmalloc_to_page(cpu_addr); page = vmalloc_to_page(cpu_addr);
dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP); dma_common_free_remap(cpu_addr, alloc_size);
} else { } else {
/* Lowmem means a coherent atomic or CMA allocation */ /* Lowmem means a coherent atomic or CMA allocation */
page = virt_to_page(cpu_addr); page = virt_to_page(cpu_addr);
...@@ -979,7 +979,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, ...@@ -979,7 +979,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size, cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0)); prot, __builtin_return_address(0));
if (!cpu_addr) if (!cpu_addr)
goto out_free_pages; goto out_free_pages;
......
...@@ -627,13 +627,11 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -627,13 +627,11 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long attrs); unsigned long attrs);
void *dma_common_contiguous_remap(struct page *page, size_t size, void *dma_common_contiguous_remap(struct page *page, size_t size,
unsigned long vm_flags,
pgprot_t prot, const void *caller); pgprot_t prot, const void *caller);
void *dma_common_pages_remap(struct page **pages, size_t size, void *dma_common_pages_remap(struct page **pages, size_t size,
unsigned long vm_flags, pgprot_t prot, pgprot_t prot, const void *caller);
const void *caller); void dma_common_free_remap(void *cpu_addr, size_t size);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
bool dma_in_atomic_pool(void *start, size_t size); bool dma_in_atomic_pool(void *start, size_t size);
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
......
...@@ -12,12 +12,11 @@ ...@@ -12,12 +12,11 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
static struct vm_struct *__dma_common_pages_remap(struct page **pages, static struct vm_struct *__dma_common_pages_remap(struct page **pages,
size_t size, unsigned long vm_flags, pgprot_t prot, size_t size, pgprot_t prot, const void *caller)
const void *caller)
{ {
struct vm_struct *area; struct vm_struct *area;
area = get_vm_area_caller(size, vm_flags, caller); area = get_vm_area_caller(size, VM_DMA_COHERENT, caller);
if (!area) if (!area)
return NULL; return NULL;
...@@ -34,12 +33,11 @@ static struct vm_struct *__dma_common_pages_remap(struct page **pages, ...@@ -34,12 +33,11 @@ static struct vm_struct *__dma_common_pages_remap(struct page **pages,
* Cannot be used in non-sleeping contexts * Cannot be used in non-sleeping contexts
*/ */
void *dma_common_pages_remap(struct page **pages, size_t size, void *dma_common_pages_remap(struct page **pages, size_t size,
unsigned long vm_flags, pgprot_t prot, pgprot_t prot, const void *caller)
const void *caller)
{ {
struct vm_struct *area; struct vm_struct *area;
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); area = __dma_common_pages_remap(pages, size, prot, caller);
if (!area) if (!area)
return NULL; return NULL;
...@@ -53,7 +51,6 @@ void *dma_common_pages_remap(struct page **pages, size_t size, ...@@ -53,7 +51,6 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
* Cannot be used in non-sleeping contexts * Cannot be used in non-sleeping contexts
*/ */
void *dma_common_contiguous_remap(struct page *page, size_t size, void *dma_common_contiguous_remap(struct page *page, size_t size,
unsigned long vm_flags,
pgprot_t prot, const void *caller) pgprot_t prot, const void *caller)
{ {
int i; int i;
...@@ -67,7 +64,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, ...@@ -67,7 +64,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
for (i = 0; i < (size >> PAGE_SHIFT); i++) for (i = 0; i < (size >> PAGE_SHIFT); i++)
pages[i] = nth_page(page, i); pages[i] = nth_page(page, i);
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); area = __dma_common_pages_remap(pages, size, prot, caller);
kfree(pages); kfree(pages);
...@@ -79,11 +76,11 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, ...@@ -79,11 +76,11 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
/* /*
* Unmaps a range previously mapped by dma_common_*_remap * Unmaps a range previously mapped by dma_common_*_remap
*/ */
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) void dma_common_free_remap(void *cpu_addr, size_t size)
{ {
struct vm_struct *area = find_vm_area(cpu_addr); struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || (area->flags & vm_flags) != vm_flags) { if (!area || area->flags != VM_DMA_COHERENT) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return; return;
} }
...@@ -136,7 +133,7 @@ static int __init dma_atomic_pool_init(void) ...@@ -136,7 +133,7 @@ static int __init dma_atomic_pool_init(void)
if (!atomic_pool) if (!atomic_pool)
goto free_page; goto free_page;
addr = dma_common_contiguous_remap(page, atomic_pool_size, VM_USERMAP, addr = dma_common_contiguous_remap(page, atomic_pool_size,
pgprot_dmacoherent(PAGE_KERNEL), pgprot_dmacoherent(PAGE_KERNEL),
__builtin_return_address(0)); __builtin_return_address(0));
if (!addr) if (!addr)
...@@ -153,7 +150,7 @@ static int __init dma_atomic_pool_init(void) ...@@ -153,7 +150,7 @@ static int __init dma_atomic_pool_init(void)
return 0; return 0;
remove_mapping: remove_mapping:
dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); dma_common_free_remap(addr, atomic_pool_size);
destroy_genpool: destroy_genpool:
gen_pool_destroy(atomic_pool); gen_pool_destroy(atomic_pool);
atomic_pool = NULL; atomic_pool = NULL;
...@@ -228,7 +225,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -228,7 +225,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
arch_dma_prep_coherent(page, size); arch_dma_prep_coherent(page, size);
/* create a coherent mapping */ /* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size, VM_USERMAP, ret = dma_common_contiguous_remap(page, size,
dma_pgprot(dev, PAGE_KERNEL, attrs), dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0)); __builtin_return_address(0));
if (!ret) { if (!ret) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册