提交 24056f52 编写于 作者: R Russell King

ARM: DMA: add support for DMA debugging

Add ARM support for the DMA debug infrastructure, which allows the
DMA API usage to be debugged.
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 9eedd963
...@@ -2,6 +2,7 @@ config ARM ...@@ -2,6 +2,7 @@ config ARM
bool bool
default y default y
select HAVE_AOUT select HAVE_AOUT
select HAVE_DMA_API_DEBUG
select HAVE_IDE select HAVE_IDE
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select RTC_LIB select RTC_LIB
......
...@@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, ...@@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
* substitute the safe buffer for the unsafe one. * substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one) * (basically move the buffer from an unsafe area to a safe one)
*/ */
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
...@@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, ...@@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
return map_single(dev, ptr, size, dir); return map_single(dev, ptr, size, dir);
} }
EXPORT_SYMBOL(dma_map_single); EXPORT_SYMBOL(__dma_map_single);
/* /*
* see if a mapped address was really a "safe" buffer and if so, copy * see if a mapped address was really a "safe" buffer and if so, copy
...@@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single); ...@@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single);
* the safe buffer. (basically return things back to the way they * the safe buffer. (basically return things back to the way they
* should be) * should be)
*/ */
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
...@@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single(dev, dma_addr, size, dir); unmap_single(dev, dma_addr, size, dir);
} }
EXPORT_SYMBOL(dma_unmap_single); EXPORT_SYMBOL(__dma_unmap_single);
dma_addr_t dma_map_page(struct device *dev, struct page *page, dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir) unsigned long offset, size_t size, enum dma_data_direction dir)
{ {
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
...@@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, ...@@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
return map_single(dev, page_address(page) + offset, size, dir); return map_single(dev, page_address(page) + offset, size, dir);
} }
EXPORT_SYMBOL(dma_map_page); EXPORT_SYMBOL(__dma_map_page);
/* /*
* see if a mapped address was really a "safe" buffer and if so, copy * see if a mapped address was really a "safe" buffer and if so, copy
...@@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page); ...@@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page);
* the safe buffer. (basically return things back to the way they * the safe buffer. (basically return things back to the way they
* should be) * should be)
*/ */
void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
...@@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single(dev, dma_addr, size, dir); unmap_single(dev, dma_addr, size, dir);
} }
EXPORT_SYMBOL(dma_unmap_page); EXPORT_SYMBOL(__dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir) unsigned long off, size_t sz, enum dma_data_direction dir)
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h> #include <asm-generic/dma-coherent.h>
#include <asm/memory.h> #include <asm/memory.h>
...@@ -297,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); ...@@ -297,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
/* /*
* The DMA API, implemented by dmabounce.c. See below for descriptions. * The DMA API, implemented by dmabounce.c. See below for descriptions.
*/ */
extern dma_addr_t dma_map_single(struct device *, void *, size_t, extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
enum dma_data_direction); enum dma_data_direction);
extern void dma_unmap_single(struct device *, dma_addr_t, size_t, extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
enum dma_data_direction); enum dma_data_direction);
extern dma_addr_t dma_map_page(struct device *, struct page *, extern dma_addr_t __dma_map_page(struct device *, struct page *,
unsigned long, size_t, enum dma_data_direction); unsigned long, size_t, enum dma_data_direction);
extern void dma_unmap_page(struct device *, dma_addr_t, size_t, extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
enum dma_data_direction); enum dma_data_direction);
/* /*
...@@ -327,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, ...@@ -327,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
} }
static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
size_t size, enum dma_data_direction dir)
{
__dma_single_cpu_to_dev(cpu_addr, size, dir);
return virt_to_dma(dev, cpu_addr);
}
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
}
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
}
#endif /* CONFIG_DMABOUNCE */
/** /**
* dma_map_single - map a single buffer for streaming DMA * dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...@@ -344,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, ...@@ -344,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
__dma_single_cpu_to_dev(cpu_addr, size, dir); addr = __dma_map_single(dev, cpu_addr, size, dir);
debug_dma_map_page(dev, virt_to_page(cpu_addr),
(unsigned long)cpu_addr & ~PAGE_MASK, size,
dir, addr, true);
return virt_to_dma(dev, cpu_addr); return addr;
} }
/** /**
...@@ -368,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, ...@@ -368,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir) unsigned long offset, size_t size, enum dma_data_direction dir)
{ {
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
__dma_page_cpu_to_dev(page, offset, size, dir); addr = __dma_map_page(dev, page, offset, size, dir);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return pfn_to_dma(dev, page_to_pfn(page)) + offset; return addr;
} }
/** /**
...@@ -392,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, ...@@ -392,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); debug_dma_unmap_page(dev, handle, size, dir, true);
__dma_unmap_single(dev, handle, size, dir);
} }
/** /**
...@@ -412,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, ...@@ -412,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), debug_dma_unmap_page(dev, handle, size, dir, false);
handle & ~PAGE_MASK, size, dir); __dma_unmap_page(dev, handle, size, dir);
} }
#endif /* CONFIG_DMABOUNCE */
/** /**
* dma_sync_single_range_for_cpu * dma_sync_single_range_for_cpu
...@@ -441,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, ...@@ -441,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
{ {
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
return; return;
...@@ -453,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, ...@@ -453,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
{ {
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
return; return;
......
...@@ -554,17 +554,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -554,17 +554,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
struct scatterlist *s; struct scatterlist *s;
int i, j; int i, j;
BUG_ON(!valid_dma_direction(dir));
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
s->dma_address = dma_map_page(dev, sg_page(s), s->offset, s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
s->length, dir); s->length, dir);
if (dma_mapping_error(dev, s->dma_address)) if (dma_mapping_error(dev, s->dma_address))
goto bad_mapping; goto bad_mapping;
} }
debug_dma_map_sg(dev, sg, nents, nents, dir);
return nents; return nents;
bad_mapping: bad_mapping:
for_each_sg(sg, s, i, j) for_each_sg(sg, s, i, j)
dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
return 0; return 0;
} }
EXPORT_SYMBOL(dma_map_sg); EXPORT_SYMBOL(dma_map_sg);
...@@ -585,8 +588,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -585,8 +588,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
struct scatterlist *s; struct scatterlist *s;
int i; int i;
debug_dma_unmap_sg(dev, sg, nents, dir);
for_each_sg(sg, s, nents, i) for_each_sg(sg, s, nents, i)
dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
} }
EXPORT_SYMBOL(dma_unmap_sg); EXPORT_SYMBOL(dma_unmap_sg);
...@@ -611,6 +616,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -611,6 +616,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
__dma_page_dev_to_cpu(sg_page(s), s->offset, __dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir); s->length, dir);
} }
debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
} }
EXPORT_SYMBOL(dma_sync_sg_for_cpu); EXPORT_SYMBOL(dma_sync_sg_for_cpu);
...@@ -635,5 +642,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -635,5 +642,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
__dma_page_cpu_to_dev(sg_page(s), s->offset, __dma_page_cpu_to_dev(sg_page(s), s->offset,
s->length, dir); s->length, dir);
} }
debug_dma_sync_sg_for_device(dev, sg, nents, dir);
} }
EXPORT_SYMBOL(dma_sync_sg_for_device); EXPORT_SYMBOL(dma_sync_sg_for_device);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
static int __init dma_debug_do_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
fs_initcall(dma_debug_do_init);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册