dma-mapping.h 5.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
S
Stephen Rothwell 已提交
2 3 4 5
 * Copyright (C) 2004 IBM
 *
 * Implements the generic device dma API for powerpc.
 * the pci and vio busses
L
Linus Torvalds 已提交
6
 */
S
Stephen Rothwell 已提交
7 8
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
9 10 11 12 13 14 15
#ifdef __KERNEL__

#include <linux/types.h>
#include <linux/cache.h>
/* need struct page definitions */
#include <linux/mm.h>
#include <linux/scatterlist.h>
16
#include <linux/dma-attrs.h>
17
#include <linux/dma-debug.h>
18
#include <asm/io.h>
19
#include <asm/swiotlb.h>
20 21 22

#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)

23 24
/* Some dma direct funcs must be visible for use in other dma_ops */
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
25 26
				       dma_addr_t *dma_handle, gfp_t flag,
				       struct dma_attrs *attrs);
27
extern void dma_direct_free_coherent(struct device *dev, size_t size,
28 29
				     void *vaddr, dma_addr_t dma_handle,
				     struct dma_attrs *attrs);
30 31


32 33 34 35 36 37 38 39
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
 * DMA-consistent mapping functions for PowerPCs that don't support
 * cache snooping.  These allocate/free a region of uncached mapped
 * memory space for use with DMA devices.  Alternatively, you could
 * allocate the space "normally" and use the cache management functions
 * to ensure it is consistent.
 */
40 41 42
struct device;
extern void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *handle, gfp_t gfp);
43 44 45 46
extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
				 size_t size, int direction);
47
extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
48 49 50 51 52 53

#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
 * Cache coherent cores.
 */

54
#define __dma_alloc_coherent(dev, gfp, size, handle)	NULL
55 56 57 58 59 60
#define __dma_free_coherent(size, addr)		((void)0)
#define __dma_sync(addr, size, rw)		((void)0)
#define __dma_sync_page(pg, off, sz, rw)	((void)0)

#endif /* ! CONFIG_NOT_COHERENT_CACHE */

61 62 63 64 65 66 67 68
static inline unsigned long device_to_mask(struct device *dev)
{
	if (dev->dma_mask && *dev->dma_mask)
		return *dev->dma_mask;
	/* Assume devices without mask can take 32 bit addresses */
	return 0xfffffffful;
}

69 70 71 72
/*
 * Available generic sets of operations
 */
#ifdef CONFIG_PPC64
73
extern struct dma_map_ops dma_iommu_ops;
74
#endif
75
extern struct dma_map_ops dma_direct_ops;
76

77
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
78 79 80 81 82 83
{
	/* We don't handle the NULL dev case for ISA for now. We could
	 * do it via an out of line call but it is not needed for now. The
	 * only ISA DMA device we support is the floppy and we have a hack
	 * in the floppy driver directly to get a device for us.
	 */
84
	if (unlikely(dev == NULL))
85
		return NULL;
86

87
	return dev->archdata.dma_ops;
88 89
}

90
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
91 92
{
	dev->archdata.dma_ops = ops;
93 94
}

95 96 97 98 99 100 101 102
/*
 * get_dma_offset()
 *
 * Get the dma offset on configurations where the dma address can be determined
 * from the physical address by looking at a simple offset.  Direct dma and
 * swiotlb use this function, but it is typically not used by implementations
 * with an iommu.
 */
103
static inline dma_addr_t get_dma_offset(struct device *dev)
104 105
{
	if (dev)
106
		return dev->archdata.dma_data.dma_offset;
107 108 109 110

	return PCI_DRAM_OFFSET;
}

111 112 113 114 115 116
static inline void set_dma_offset(struct device *dev, dma_addr_t off)
{
	if (dev)
		dev->archdata.dma_data.dma_offset = off;
}

117 118 119 120 121
/* this will be removed soon */
#define flush_write_buffers()

#include <asm-generic/dma-mapping-common.h>

122 123
static inline int dma_supported(struct device *dev, u64 mask)
{
124
	struct dma_map_ops *dma_ops = get_dma_ops(dev);
125 126 127 128 129 130 131 132

	if (unlikely(dma_ops == NULL))
		return 0;
	if (dma_ops->dma_supported == NULL)
		return 1;
	return dma_ops->dma_supported(dev, mask);
}

133
extern int dma_set_mask(struct device *dev, u64 dma_mask);
134

135 136 137 138 139
#define dma_alloc_coherent(d,s,h,f)	dma_alloc_attrs(d,s,h,f,NULL)

static inline void *dma_alloc_attrs(struct device *dev, size_t size,
				    dma_addr_t *dma_handle, gfp_t flag,
				    struct dma_attrs *attrs)
140
{
141
	struct dma_map_ops *dma_ops = get_dma_ops(dev);
142
	void *cpu_addr;
143 144

	BUG_ON(!dma_ops);
145

146
	cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
147 148 149 150

	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);

	return cpu_addr;
151 152
}

153 154 155 156 157
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)

static inline void dma_free_attrs(struct device *dev, size_t size,
				  void *cpu_addr, dma_addr_t dma_handle,
				  struct dma_attrs *attrs)
158
{
159
	struct dma_map_ops *dma_ops = get_dma_ops(dev);
160 161

	BUG_ON(!dma_ops);
162 163 164

	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);

165
	dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
166 167
}

168
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
S
Stephen Rothwell 已提交
169
{
170 171 172 173 174
	struct dma_map_ops *dma_ops = get_dma_ops(dev);

	if (dma_ops->mapping_error)
		return dma_ops->mapping_error(dev, dma_addr);

S
Stephen Rothwell 已提交
175 176 177 178 179 180 181
#ifdef CONFIG_PPC64
	return (dma_addr == DMA_ERROR_CODE);
#else
	return 0;
#endif
}

182 183
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
184 185
#ifdef CONFIG_SWIOTLB
	struct dev_archdata *sd = &dev->archdata;
186

187
	if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
188
		return 0;
189
#endif
190 191 192 193

	if (!dev->dma_mask)
		return 0;

194
	return addr + size - 1 <= *dev->dma_mask;
195 196
}

197 198
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
199
	return paddr + get_dma_offset(dev);
200 201 202 203
}

static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
204
	return daddr - get_dma_offset(dev);
205 206
}

L
Linus Torvalds 已提交
207 208 209
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)

210 211 212 213 214
extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
			     void *, dma_addr_t, size_t);
#define ARCH_HAS_DMA_MMAP_COHERENT


215
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
S
Stephen Rothwell 已提交
216
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
217
{
S
Stephen Rothwell 已提交
218
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
219 220 221
	__dma_sync(vaddr, size, (int)direction);
}

222
#endif /* __KERNEL__ */
S
Stephen Rothwell 已提交
223
#endif	/* _ASM_DMA_MAPPING_H */