提交 15237e1f 编写于 作者: M Marek Szyprowski

ARM: dma-mapping: move all dma bounce code to separate dma ops structure

This patch removes dma bounce hooks from the common dma mapping
implementation on ARM architecture and creates a separate set of
dma_map_ops for dma bounce devices.
Signed-off-by: NMarek Szyprowski <m.szyprowski@samsung.com>
Acked-by: NKyungmin Park <kyungmin.park@samsung.com>
Tested-By: NSubash Patel <subash.ramaswamy@linaro.org>
上级 2a550e73
...@@ -308,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf, ...@@ -308,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
* substitute the safe buffer for the unsafe one. * substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one) * (basically move the buffer from an unsafe area to a safe one)
*/ */
dma_addr_t __dma_map_page(struct device *dev, struct page *page, static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir) unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{ {
dma_addr_t dma_addr; dma_addr_t dma_addr;
int ret; int ret;
...@@ -324,7 +325,7 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page, ...@@ -324,7 +325,7 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE; return DMA_ERROR_CODE;
if (ret == 0) { if (ret == 0) {
__dma_page_cpu_to_dev(page, offset, size, dir); arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
return dma_addr; return dma_addr;
} }
...@@ -335,7 +336,6 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page, ...@@ -335,7 +336,6 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
return map_single(dev, page_address(page) + offset, size, dir); return map_single(dev, page_address(page) + offset, size, dir);
} }
EXPORT_SYMBOL(__dma_map_page);
/* /*
* see if a mapped address was really a "safe" buffer and if so, copy * see if a mapped address was really a "safe" buffer and if so, copy
...@@ -343,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page); ...@@ -343,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
* the safe buffer. (basically return things back to the way they * the safe buffer. (basically return things back to the way they
* should be) * should be)
*/ */
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir, struct dma_attrs *attrs)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
...@@ -353,16 +353,14 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -353,16 +353,14 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
buf = find_safe_buffer_dev(dev, dma_addr, __func__); buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) { if (!buf) {
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
dma_addr & ~PAGE_MASK, size, dir);
return; return;
} }
unmap_single(dev, buf, size, dir); unmap_single(dev, buf, size, dir);
} }
EXPORT_SYMBOL(__dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
size_t sz, enum dma_data_direction dir) size_t sz, enum dma_data_direction dir)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
...@@ -392,9 +390,17 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, ...@@ -392,9 +390,17 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
} }
return 0; return 0;
} }
EXPORT_SYMBOL(dmabounce_sync_for_cpu);
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, static void dmabounce_sync_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
return;
arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
}
static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
size_t sz, enum dma_data_direction dir) size_t sz, enum dma_data_direction dir)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
...@@ -424,7 +430,35 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, ...@@ -424,7 +430,35 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
} }
return 0; return 0;
} }
EXPORT_SYMBOL(dmabounce_sync_for_device);
static void dmabounce_sync_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
if (!__dmabounce_sync_for_device(dev, handle, size, dir))
return;
arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
}
static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
{
if (dev->archdata.dmabounce)
return 0;
return arm_dma_ops.set_dma_mask(dev, dma_mask);
}
static struct dma_map_ops dmabounce_ops = {
.map_page = dmabounce_map_page,
.unmap_page = dmabounce_unmap_page,
.sync_single_for_cpu = dmabounce_sync_for_cpu,
.sync_single_for_device = dmabounce_sync_for_device,
.map_sg = arm_dma_map_sg,
.unmap_sg = arm_dma_unmap_sg,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.set_dma_mask = dmabounce_set_mask,
};
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
const char *name, unsigned long size) const char *name, unsigned long size)
...@@ -486,6 +520,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, ...@@ -486,6 +520,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
#endif #endif
dev->archdata.dmabounce = device_info; dev->archdata.dmabounce = device_info;
set_dma_ops(dev, &dmabounce_ops);
dev_info(dev, "dmabounce: registered device\n"); dev_info(dev, "dmabounce: registered device\n");
...@@ -504,6 +539,7 @@ void dmabounce_unregister_dev(struct device *dev) ...@@ -504,6 +539,7 @@ void dmabounce_unregister_dev(struct device *dev)
struct dmabounce_device_info *device_info = dev->archdata.dmabounce; struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dev->archdata.dmabounce = NULL; dev->archdata.dmabounce = NULL;
set_dma_ops(dev, NULL);
if (!device_info) { if (!device_info) {
dev_warn(dev, dev_warn(dev,
......
...@@ -84,62 +84,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) ...@@ -84,62 +84,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
} }
#endif #endif
/*
* The DMA API is built upon the notion of "buffer ownership". A buffer
* is either exclusively owned by the CPU (and therefore may be accessed
* by it) or exclusively owned by the DMA device. These helper functions
* represent the transitions between these two ownership states.
*
* Note, however, that on later ARMs, this notion does not work due to
* speculative prefetches. We model our approach on the assumption that
* the CPU does do speculative prefetches, which means we clean caches
* before transfers and delay cache invalidation until transfer completion.
*
* Private support functions: these are not part of the API and are
* liable to change. Drivers must not use these.
*/
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
extern void ___dma_single_cpu_to_dev(const void *, size_t,
enum dma_data_direction);
if (!arch_is_coherent())
___dma_single_cpu_to_dev(kaddr, size, dir);
}
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
extern void ___dma_single_dev_to_cpu(const void *, size_t,
enum dma_data_direction);
if (!arch_is_coherent())
___dma_single_dev_to_cpu(kaddr, size, dir);
}
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
size_t, enum dma_data_direction);
if (!arch_is_coherent())
___dma_page_cpu_to_dev(page, off, size, dir);
}
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
size_t, enum dma_data_direction);
if (!arch_is_coherent())
___dma_page_dev_to_cpu(page, off, size, dir);
}
extern int dma_supported(struct device *, u64);
extern int dma_set_mask(struct device *, u64);
/* /*
* DMA errors are defined by all-bits-set in the DMA address. * DMA errors are defined by all-bits-set in the DMA address.
*/ */
...@@ -163,6 +107,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, ...@@ -163,6 +107,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{ {
} }
extern int dma_supported(struct device *dev, u64 mask);
/** /**
* dma_alloc_coherent - allocate consistent memory for DMA * dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...@@ -235,7 +181,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, ...@@ -235,7 +181,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
extern void __init init_consistent_dma_size(unsigned long size); extern void __init init_consistent_dma_size(unsigned long size);
#ifdef CONFIG_DMABOUNCE
/* /*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows. * and utilize bounce buffers as needed to work around limited DMA windows.
...@@ -275,47 +220,7 @@ extern int dmabounce_register_dev(struct device *, unsigned long, ...@@ -275,47 +220,7 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/ */
extern void dmabounce_unregister_dev(struct device *); extern void dmabounce_unregister_dev(struct device *);
/*
* The DMA API, implemented by dmabounce.c. See below for descriptions.
*/
extern dma_addr_t __dma_map_page(struct device *, struct page *,
unsigned long, size_t, enum dma_data_direction);
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
/*
* Private functions
*/
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
#else
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
return 1;
}
static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
return 1;
}
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
}
#endif /* CONFIG_DMABOUNCE */
/* /*
* The scatter list versions of the above methods. * The scatter list versions of the above methods.
......
...@@ -29,6 +29,75 @@ ...@@ -29,6 +29,75 @@
#include "mm.h" #include "mm.h"
/*
* The DMA API is built upon the notion of "buffer ownership". A buffer
* is either exclusively owned by the CPU (and therefore may be accessed
* by it) or exclusively owned by the DMA device. These helper functions
* represent the transitions between these two ownership states.
*
* Note, however, that on later ARMs, this notion does not work due to
* speculative prefetches. We model our approach on the assumption that
* the CPU does do speculative prefetches, which means we clean caches
* before transfers and delay cache invalidation until transfer completion.
*
* Private support functions: these are not part of the API and are
* liable to change. Drivers must not use these.
*/
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
extern void ___dma_single_cpu_to_dev(const void *, size_t,
enum dma_data_direction);
if (!arch_is_coherent())
___dma_single_cpu_to_dev(kaddr, size, dir);
}
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
extern void ___dma_single_dev_to_cpu(const void *, size_t,
enum dma_data_direction);
if (!arch_is_coherent())
___dma_single_dev_to_cpu(kaddr, size, dir);
}
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
size_t, enum dma_data_direction);
if (!arch_is_coherent())
___dma_page_cpu_to_dev(page, off, size, dir);
}
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
size_t, enum dma_data_direction);
if (!arch_is_coherent())
___dma_page_dev_to_cpu(page, off, size, dir);
}
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
}
/** /**
* arm_dma_map_page - map a portion of a page for streaming DMA * arm_dma_map_page - map a portion of a page for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...@@ -76,9 +145,6 @@ static inline void arm_dma_sync_single_for_cpu(struct device *dev, ...@@ -76,9 +145,6 @@ static inline void arm_dma_sync_single_for_cpu(struct device *dev,
{ {
unsigned int offset = handle & (PAGE_SIZE - 1); unsigned int offset = handle & (PAGE_SIZE - 1);
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
return;
__dma_page_dev_to_cpu(page, offset, size, dir); __dma_page_dev_to_cpu(page, offset, size, dir);
} }
...@@ -87,9 +153,6 @@ static inline void arm_dma_sync_single_for_device(struct device *dev, ...@@ -87,9 +153,6 @@ static inline void arm_dma_sync_single_for_device(struct device *dev,
{ {
unsigned int offset = handle & (PAGE_SIZE - 1); unsigned int offset = handle & (PAGE_SIZE - 1);
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
if (!dmabounce_sync_for_device(dev, handle, size, dir))
return;
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
} }
...@@ -599,7 +662,6 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, ...@@ -599,7 +662,6 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
} }
/* FIXME: non-speculating: flush on bidirectional mappings? */ /* FIXME: non-speculating: flush on bidirectional mappings? */
} }
EXPORT_SYMBOL(___dma_page_cpu_to_dev);
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
...@@ -619,7 +681,6 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, ...@@ -619,7 +681,6 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
set_bit(PG_dcache_clean, &page->flags); set_bit(PG_dcache_clean, &page->flags);
} }
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
/** /**
* arm_dma_map_sg - map a set of SG buffers for streaming mode DMA * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
...@@ -737,9 +798,7 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask) ...@@ -737,9 +798,7 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
if (!dev->dma_mask || !dma_supported(dev, dma_mask)) if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO; return -EIO;
#ifndef CONFIG_DMABOUNCE
*dev->dma_mask = dma_mask; *dev->dma_mask = dma_mask;
#endif
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册