提交 4ac1c68e 编写于 作者: C Christoph Hellwig

nds32: consolidate DMA cache maintainance routines

Make sure all other DMA methods call nds32_dma_sync_single_for_{device,cpu}
to perform cache maintaince, and remove the consisteny_sync helper that
implemented both with entirely separate code based off an argument.

Also make sure these helpers handled highmem properly, for which code
is copy and pasted from mips.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Acked-by: NGreentime Hu <greentime@andestech.com>
Tested-by: NGreentime Hu <greentime@andestech.com>
上级 0ead51c3
...@@ -22,11 +22,6 @@ ...@@ -22,11 +22,6 @@
static pte_t *consistent_pte; static pte_t *consistent_pte;
static DEFINE_RAW_SPINLOCK(consistent_lock); static DEFINE_RAW_SPINLOCK(consistent_lock);
enum master_type {
FOR_CPU = 0,
FOR_DEVICE = 1,
};
/* /*
* VM region handling support. * VM region handling support.
* *
...@@ -333,106 +328,105 @@ static int __init consistent_init(void) ...@@ -333,106 +328,105 @@ static int __init consistent_init(void)
} }
core_initcall(consistent_init); core_initcall(consistent_init);
static void consistent_sync(void *vaddr, size_t size, int direction, int master_type);
static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE);
return page_to_phys(page) + offset;
}
static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU);
}
/* static inline void cache_op(phys_addr_t paddr, size_t size,
* Make an area consistent for devices. void (*fn)(unsigned long start, unsigned long end))
*/
static void consistent_sync(void *vaddr, size_t size, int direction, int master_type)
{ {
unsigned long start = (unsigned long)vaddr; struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned long end = start + size; unsigned offset = paddr & ~PAGE_MASK;
size_t left = size;
unsigned long start;
if (master_type == FOR_CPU) { do {
switch (direction) { size_t len = left;
case DMA_TO_DEVICE:
break;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
cpu_dma_inval_range(start, end);
break;
default:
BUG();
}
} else {
/* FOR_DEVICE */
switch (direction) {
case DMA_FROM_DEVICE:
break;
case DMA_TO_DEVICE:
case DMA_BIDIRECTIONAL:
cpu_dma_wb_range(start, end);
break;
default:
BUG();
}
}
}
static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
int i;
for (i = 0; i < nents; i++, sg++) {
void *virt;
unsigned long pfn;
struct page *page = sg_page(sg);
sg->dma_address = sg_phys(sg);
pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE;
page = pfn_to_page(pfn);
if (PageHighMem(page)) { if (PageHighMem(page)) {
virt = kmap_atomic(page); void *addr;
consistent_sync(virt, sg->length, dir, FOR_CPU);
kunmap_atomic(virt); if (offset + len > PAGE_SIZE) {
if (offset >= PAGE_SIZE) {
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
}
len = PAGE_SIZE - offset;
}
addr = kmap_atomic(page);
start = (unsigned long)(addr + offset);
fn(start, start + len);
kunmap_atomic(addr);
} else { } else {
if (sg->offset > PAGE_SIZE) start = (unsigned long)phys_to_virt(paddr);
panic("sg->offset:%08x > PAGE_SIZE\n", fn(start, start + size);
sg->offset);
virt = page_address(page) + sg->offset;
consistent_sync(virt, sg->length, dir, FOR_CPU);
} }
} offset = 0;
return nents; page++;
left -= len;
} while (left);
} }
static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg, static void
int nhwentries, enum dma_data_direction dir, nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
unsigned long attrs) size_t size, enum dma_data_direction dir)
{ {
switch (dir) {
case DMA_FROM_DEVICE:
break;
case DMA_TO_DEVICE:
case DMA_BIDIRECTIONAL:
cache_op(handle, size, cpu_dma_wb_range);
break;
default:
BUG();
}
} }
static void static void
nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU); switch (dir) {
case DMA_TO_DEVICE:
break;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
cache_op(handle, size, cpu_dma_inval_range);
break;
default:
BUG();
}
}
static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
dma_addr_t dma_addr = page_to_phys(page) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
nds32_dma_sync_single_for_device(dev, handle, size, dir);
return dma_addr;
}
static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
nds32_dma_sync_single_for_cpu(dev, handle, size, dir);
} }
static void static void
nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
size_t size, enum dma_data_direction dir) int nents, enum dma_data_direction dir)
{ {
consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE); int i;
for (i = 0; i < nents; i++, sg++) {
nds32_dma_sync_single_for_device(dev, sg_dma_address(sg),
sg->length, dir);
}
} }
static void static void
...@@ -442,23 +436,28 @@ nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, ...@@ -442,23 +436,28 @@ nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
int i; int i;
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
char *virt = nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
page_address((struct page *)sg->page_link) + sg->offset; sg->length, dir);
consistent_sync(virt, sg->length, dir, FOR_CPU);
} }
} }
static void static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir,
int nents, enum dma_data_direction dir) unsigned long attrs)
{ {
int i; int i;
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
char *virt = nds32_dma_sync_single_for_device(dev, sg_dma_address(sg),
page_address((struct page *)sg->page_link) + sg->offset; sg->length, dir);
consistent_sync(virt, sg->length, dir, FOR_DEVICE);
} }
return nents;
}
static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction dir,
unsigned long attrs)
{
} }
struct dma_map_ops nds32_dma_ops = { struct dma_map_ops nds32_dma_ops = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册