提交 c9eb6172 编写于 作者: C Christoph Hellwig

dma-mapping: turn dma_cache_sync into a dma_map_ops method

After we removed all the dead wood it turns out only two architectures
actually implement dma_cache_sync as a real op: mips and parisc.  Add
a cache_sync method to struct dma_map_ops and implement it for the
mips defualt DMA ops, and the parisc pa11 ops.

Note that arm, arc and openrisc support DMA_ATTR_NON_CONSISTENT, but
never provided a functional dma_cache_sync implementations, which
seems somewhat odd.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NRobin Murphy <robin.murphy@arm.com>
上级 e0c6584d
...@@ -8,6 +8,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -8,6 +8,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops; return dma_ops;
} }
#define dma_cache_sync(dev, va, size, dir) ((void)0)
#endif /* _ALPHA_DMA_MAPPING_H */ #endif /* _ALPHA_DMA_MAPPING_H */
...@@ -16,10 +16,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -16,10 +16,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
} }
#endif #endif
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
#endif #endif
...@@ -14,10 +14,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -14,10 +14,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &frv_dma_ops; return &frv_dma_ops;
} }
static inline
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
#endif /* _ASM_DMA_MAPPING_H */ #endif /* _ASM_DMA_MAPPING_H */
...@@ -37,9 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -37,9 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops; return dma_ops;
} }
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
if (!dev->dma_mask) if (!dev->dma_mask)
......
...@@ -44,10 +44,4 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) ...@@ -44,10 +44,4 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return daddr; return daddr;
} }
static inline void
dma_cache_sync (struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
}
#endif /* _ASM_IA64_DMA_MAPPING_H */ #endif /* _ASM_IA64_DMA_MAPPING_H */
...@@ -13,11 +13,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -13,11 +13,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &dma_noop_ops; return &dma_noop_ops;
} }
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
if (!dev->dma_mask) if (!dev->dma_mask)
......
...@@ -8,10 +8,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -8,10 +8,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &m68k_dma_ops; return &m68k_dma_ops;
} }
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
/* we use coherent allocation, so not much to do here. */
}
#endif /* _M68K_DMA_MAPPING_H */ #endif /* _M68K_DMA_MAPPING_H */
...@@ -8,14 +8,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -8,14 +8,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &metag_dma_ops; return &metag_dma_ops;
} }
/*
* dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
* do any flushing here.
*/
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
#endif #endif
...@@ -25,9 +25,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -25,9 +25,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &dma_direct_ops; return &dma_direct_ops;
} }
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */ #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
...@@ -26,9 +26,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) ...@@ -26,9 +26,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline void dma_mark_clean(void *addr, size_t size) {} static inline void dma_mark_clean(void *addr, size_t size) {}
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#define arch_setup_dma_ops arch_setup_dma_ops #define arch_setup_dma_ops arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, const struct iommu_ops *iommu, u64 size, const struct iommu_ops *iommu,
......
...@@ -383,7 +383,7 @@ static int mips_dma_supported(struct device *dev, u64 mask) ...@@ -383,7 +383,7 @@ static int mips_dma_supported(struct device *dev, u64 mask)
return plat_dma_supported(dev, mask); return plat_dma_supported(dev, mask);
} }
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
...@@ -392,8 +392,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, ...@@ -392,8 +392,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
__dma_sync_virtual(vaddr, size, direction); __dma_sync_virtual(vaddr, size, direction);
} }
EXPORT_SYMBOL(dma_cache_sync);
static const struct dma_map_ops mips_default_dma_map_ops = { static const struct dma_map_ops mips_default_dma_map_ops = {
.alloc = mips_dma_alloc_coherent, .alloc = mips_dma_alloc_coherent,
.free = mips_dma_free_coherent, .free = mips_dma_free_coherent,
...@@ -407,7 +405,8 @@ static const struct dma_map_ops mips_default_dma_map_ops = { ...@@ -407,7 +405,8 @@ static const struct dma_map_ops mips_default_dma_map_ops = {
.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
.sync_sg_for_device = mips_dma_sync_sg_for_device, .sync_sg_for_device = mips_dma_sync_sg_for_device,
.mapping_error = mips_dma_mapping_error, .mapping_error = mips_dma_mapping_error,
.dma_supported = mips_dma_supported .dma_supported = mips_dma_supported,
.cache_sync = mips_dma_cache_sync,
}; };
const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
......
...@@ -18,10 +18,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -18,10 +18,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &mn10300_dma_ops; return &mn10300_dma_ops;
} }
static inline
void dma_cache_sync(void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
#endif #endif
...@@ -17,13 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -17,13 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &nios2_dma_ops; return &nios2_dma_ops;
} }
/*
* dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
* do any flushing here.
*/
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
#endif /* _ASM_NIOS2_DMA_MAPPING_H */ #endif /* _ASM_NIOS2_DMA_MAPPING_H */
...@@ -32,14 +32,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -32,14 +32,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return hppa_dma_ops; return hppa_dma_ops;
} }
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
if (hppa_dma_ops->sync_single_for_cpu)
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
static inline void * static inline void *
parisc_walk_tree(struct device *dev) parisc_walk_tree(struct device *dev)
{ {
......
...@@ -571,6 +571,12 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist * ...@@ -571,6 +571,12 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
flush_kernel_vmap_range(sg_virt(sg), sg->length); flush_kernel_vmap_range(sg_virt(sg), sg->length);
} }
static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
const struct dma_map_ops pcxl_dma_ops = { const struct dma_map_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported, .dma_supported = pa11_dma_supported,
.alloc = pa11_dma_alloc, .alloc = pa11_dma_alloc,
...@@ -583,6 +589,7 @@ const struct dma_map_ops pcxl_dma_ops = { ...@@ -583,6 +589,7 @@ const struct dma_map_ops pcxl_dma_ops = {
.sync_single_for_device = pa11_dma_sync_single_for_device, .sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device, .sync_sg_for_device = pa11_dma_sync_sg_for_device,
.cache_sync = pa11_dma_cache_sync,
}; };
static void *pcx_dma_alloc(struct device *dev, size_t size, static void *pcx_dma_alloc(struct device *dev, size_t size,
...@@ -619,4 +626,5 @@ const struct dma_map_ops pcx_dma_ops = { ...@@ -619,4 +626,5 @@ const struct dma_map_ops pcx_dma_ops = {
.sync_single_for_device = pa11_dma_sync_single_for_device, .sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device, .sync_sg_for_device = pa11_dma_sync_sg_for_device,
.cache_sync = pa11_dma_cache_sync,
}; };
...@@ -141,10 +141,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) ...@@ -141,10 +141,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define ARCH_HAS_DMA_MMAP_COHERENT #define ARCH_HAS_DMA_MMAP_COHERENT
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_DMA_MAPPING_H */ #endif /* _ASM_DMA_MAPPING_H */
...@@ -15,11 +15,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -15,11 +15,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &dma_noop_ops; return &dma_noop_ops;
} }
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
if (!dev->dma_mask) if (!dev->dma_mask)
......
...@@ -9,12 +9,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -9,12 +9,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops; return dma_ops;
} }
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
}
/* arch/sh/mm/consistent.c */
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag, dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs); unsigned long attrs);
......
...@@ -5,14 +5,6 @@ ...@@ -5,14 +5,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-debug.h> #include <linux/dma-debug.h>
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
/* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
* routine can be a nop.
*/
}
extern const struct dma_map_ops *dma_ops; extern const struct dma_map_ops *dma_ops;
extern const struct dma_map_ops pci32_dma_ops; extern const struct dma_map_ops pci32_dma_ops;
......
...@@ -67,13 +67,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) ...@@ -67,13 +67,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
#define HAVE_ARCH_DMA_SET_MASK 1 #define HAVE_ARCH_DMA_SET_MASK 1
int dma_set_mask(struct device *dev, u64 mask); int dma_set_mask(struct device *dev, u64 mask);
/*
* dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
* do any flushing here.
*/
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
#endif /* _ASM_TILE_DMA_MAPPING_H */ #endif /* _ASM_TILE_DMA_MAPPING_H */
...@@ -45,10 +45,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) ...@@ -45,10 +45,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
static inline void dma_mark_clean(void *addr, size_t size) {} static inline void dma_mark_clean(void *addr, size_t size) {}
static inline void dma_cache_sync(struct device *dev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -67,12 +67,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) ...@@ -67,12 +67,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
} }
#endif /* CONFIG_X86_DMA_REMAP */ #endif /* CONFIG_X86_DMA_REMAP */
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
}
static inline unsigned long dma_alloc_coherent_mask(struct device *dev, static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp) gfp_t gfp)
{ {
......
...@@ -23,11 +23,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -23,11 +23,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &xtensa_dma_map_ops; return &xtensa_dma_map_ops;
} }
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
return (dma_addr_t)paddr; return (dma_addr_t)paddr;
......
...@@ -126,6 +126,8 @@ struct dma_map_ops { ...@@ -126,6 +126,8 @@ struct dma_map_ops {
void (*sync_sg_for_device)(struct device *dev, void (*sync_sg_for_device)(struct device *dev,
struct scatterlist *sg, int nents, struct scatterlist *sg, int nents,
enum dma_data_direction dir); enum dma_data_direction dir);
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask); int (*dma_supported)(struct device *dev, u64 mask);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
...@@ -436,6 +438,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -436,6 +438,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->cache_sync)
ops->cache_sync(dev, vaddr, size, dir);
}
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size); void *cpu_addr, dma_addr_t dma_addr, size_t size);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册