dma-mapping.h 7.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
#ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H

#ifdef __KERNEL__

6
#include <linux/mm_types.h>
J
Jens Axboe 已提交
7
#include <linux/scatterlist.h>
8
#include <linux/dma-debug.h>
L
Linus Torvalds 已提交
9

10 11
#include <asm/memory.h>

12 13 14
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>

15
#define DMA_ERROR_CODE	(~(dma_addr_t)0x0)
16 17
extern const struct dma_map_ops arm_dma_ops;
extern const struct dma_map_ops arm_coherent_dma_ops;
18

19
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
20
{
21
	return &arm_dma_ops;
22 23
}

24 25 26
#define HAVE_ARCH_DMA_SUPPORTED 1
extern int dma_supported(struct device *dev, u64 mask);

27 28 29 30
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif

31
/*
32 33 34
 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
 * functions used internally by the DMA-mapping API to provide DMA
 * addresses. They must not be used by drivers.
35
 */
36 37
#ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
38
{
39 40
	if (dev)
		pfn -= dev->dma_pfn_offset;
41
	return (dma_addr_t)__pfn_to_bus(pfn);
42
}
43

44
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
45
{
46 47 48 49 50 51
	unsigned long pfn = __bus_to_pfn(addr);

	if (dev)
		pfn += dev->dma_pfn_offset;

	return pfn;
52 53
}

54 55
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
56 57 58 59 60 61
	if (dev) {
		unsigned long pfn = dma_to_pfn(dev, addr);

		return phys_to_virt(__pfn_to_phys(pfn));
	}

62
	return (void *)__bus_to_virt((unsigned long)addr);
63 64 65 66
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
67 68 69
	if (dev)
		return pfn_to_dma(dev, virt_to_pfn(addr));

70 71
	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
72

73
#else
74
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
75
{
76
	return __arch_pfn_to_dma(dev, pfn);
77 78
}

79
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
80
{
81
	return __arch_dma_to_pfn(dev, addr);
82 83
}

84 85 86 87 88 89 90 91 92 93
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return __arch_dma_to_virt(dev, addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return __arch_virt_to_dma(dev, addr);
}
#endif
94

95 96 97
/* The ARM override for dma_max_pfn() */
static inline unsigned long dma_max_pfn(struct device *dev)
{
98
	return dma_to_pfn(dev, *dev->dma_mask);
99 100 101
}
#define dma_max_pfn(dev) dma_max_pfn(dev)

102
#define arch_setup_dma_ops arch_setup_dma_ops
103
extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
104
			       const struct iommu_ops *iommu, bool coherent);
105 106 107

#define arch_teardown_dma_ops arch_teardown_dma_ops
extern void arch_teardown_dma_ops(struct device *dev);
108

109 110 111 112 113 114
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
{
	return dev->archdata.dma_coherent;
}

S
Stefano Stabellini 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
	unsigned int offset = paddr & ~PAGE_MASK;
	return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
}

static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
{
	unsigned int offset = dev_addr & ~PAGE_MASK;
	return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
}

static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
	u64 limit, mask;

	if (!dev->dma_mask)
		return 0;

	mask = *dev->dma_mask;

	limit = (mask + 1) & ~mask;
	if (limit && size > limit)
		return 0;

	if ((addr | (addr + size - 1)) & ~mask)
		return 0;

	return 1;
}

static inline void dma_mark_clean(void *addr, size_t size) { }

L
Linus Torvalds 已提交
148
/**
149
 * arm_dma_alloc - allocate consistent memory for DMA
L
Linus Torvalds 已提交
150 151 152
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
153
 * @attrs: optinal attributes that specific mapping properties
L
Linus Torvalds 已提交
154
 *
155 156 157
 * Allocate some memory for a device for performing DMA.  This function
 * allocates pages, and will return the CPU-viewed address, and sets @handle
 * to be the device-viewed address.
L
Linus Torvalds 已提交
158
 */
159
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
160
			   gfp_t gfp, unsigned long attrs);
161

L
Linus Torvalds 已提交
162
/**
163
 * arm_dma_free - free memory allocated by arm_dma_alloc
L
Linus Torvalds 已提交
164 165 166 167
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: size of memory originally requested in dma_alloc_coherent
 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
168
 * @attrs: optinal attributes that specific mapping properties
L
Linus Torvalds 已提交
169 170
 *
 * Free (and unmap) a DMA buffer previously allocated by
171
 * arm_dma_alloc().
L
Linus Torvalds 已提交
172 173 174 175
 *
 * References to memory and mappings associated with cpu_addr/handle
 * during and after this call executing are illegal.
 */
176
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
177
			 dma_addr_t handle, unsigned long attrs);
178

L
Linus Torvalds 已提交
179
/**
180
 * arm_dma_mmap - map a coherent DMA allocation into user space
L
Linus Torvalds 已提交
181 182 183 184 185
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @vma: vm_area_struct describing requested user mapping
 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 * @size: size of memory originally requested in dma_alloc_coherent
186
 * @attrs: optinal attributes that specific mapping properties
L
Linus Torvalds 已提交
187 188 189 190 191
 *
 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
 * into user space.  The coherent DMA buffer must not be freed by the
 * driver until the user space mapping has been released.
 */
192 193
extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
			void *cpu_addr, dma_addr_t dma_addr, size_t size,
194
			unsigned long attrs);
L
Linus Torvalds 已提交
195

196 197 198 199 200 201 202
/*
 * This can be called during early boot to increase the size of the atomic
 * coherent DMA pool above the default value of 256KiB. It must be called
 * before postcore_initcall.
 */
extern void __init init_dma_coherent_pool_size(unsigned long size);

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
 *
 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 *
 * The following are helper functions used by the dmabounce subystem
 *
 */

/**
 * dmabounce_register_dev
 *
 * @dev: valid struct device pointer
 * @small_buf_size: size of buffers to use with small buffer pool
 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
221
 * @needs_bounce_fn: called to determine whether buffer needs bouncing
222 223 224 225 226
 *
 * This function should be called by low-level platform code to register
 * a device as requireing DMA buffer bouncing. The function will allocate
 * appropriate DMA pools for the device.
 */
R
Russell King 已提交
227
extern int dmabounce_register_dev(struct device *, unsigned long,
228
		unsigned long, int (*)(struct device *, dma_addr_t, size_t));
229 230 231 232 233 234 235 236 237 238 239 240 241 242

/**
 * dmabounce_unregister_dev
 *
 * @dev: valid struct device pointer
 *
 * This function should be called by low-level platform code when device
 * that was previously registered with dmabounce_register_dev is removed
 * from the system.
 *
 */
extern void dmabounce_unregister_dev(struct device *);


243

244 245
/*
 * The scatter list versions of the above methods.
L
Linus Torvalds 已提交
246
 */
247
extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
248
		enum dma_data_direction, unsigned long attrs);
249
extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
250
		enum dma_data_direction, unsigned long attrs);
251
extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
R
Russell King 已提交
252
		enum dma_data_direction);
253
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
R
Russell King 已提交
254
		enum dma_data_direction);
255 256
extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
257
		unsigned long attrs);
L
Linus Torvalds 已提交
258 259 260

#endif /* __KERNEL__ */
#endif