dma-mapping.h 15.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
#ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H

#ifdef __KERNEL__

6
#include <linux/mm_types.h>
J
Jens Axboe 已提交
7
#include <linux/scatterlist.h>
8
#include <linux/dma-debug.h>
L
Linus Torvalds 已提交
9

10
#include <asm-generic/dma-coherent.h>
11 12
#include <asm/memory.h>

13 14 15 16
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif

17
/*
18 19 20
 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
 * functions used internally by the DMA-mapping API to provide DMA
 * addresses. They must not be used by drivers.
21
 */
22 23
#ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
24
{
25
	return (dma_addr_t)__pfn_to_bus(pfn);
26
}
27

28
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
29
{
30
	return __bus_to_pfn(addr);
31 32
}

33 34 35 36 37 38 39 40 41 42
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return (void *)__bus_to_virt(addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
43
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
44
{
45
	return __arch_pfn_to_dma(dev, pfn);
46 47
}

48
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
49
{
50
	return __arch_dma_to_pfn(dev, addr);
51 52
}

53 54 55 56 57 58 59 60 61 62
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return __arch_dma_to_virt(dev, addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return __arch_virt_to_dma(dev, addr);
}
#endif
63

64 65 66 67 68 69
/*
 * The DMA API is built upon the notion of "buffer ownership".  A buffer
 * is either exclusively owned by the CPU (and therefore may be accessed
 * by it) or exclusively owned by the DMA device.  These helper functions
 * represent the transitions between these two ownership states.
 *
70 71 72 73 74 75 76
 * Note, however, that on later ARMs, this notion does not work due to
 * speculative prefetches.  We model our approach on the assumption that
 * the CPU does do speculative prefetches, which means we clean caches
 * before transfers and delay cache invalidation until transfer completion.
 *
 * Private support functions: these are not part of the API and are
 * liable to change.  Drivers must not use these.
77 78 79 80
 */
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
81 82 83
	extern void ___dma_single_cpu_to_dev(const void *, size_t,
		enum dma_data_direction);

84
	if (!arch_is_coherent())
85
		___dma_single_cpu_to_dev(kaddr, size, dir);
86 87 88 89 90
}

static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
91 92 93 94 95
	extern void ___dma_single_dev_to_cpu(const void *, size_t,
		enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_single_dev_to_cpu(kaddr, size, dir);
96 97 98 99 100
}

static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
101 102 103
	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
		size_t, enum dma_data_direction);

104
	if (!arch_is_coherent())
105
		___dma_page_cpu_to_dev(page, off, size, dir);
106 107 108 109 110
}

static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
111 112 113 114 115
	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
		size_t, enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_page_dev_to_cpu(page, off, size, dir);
116 117
}

118 119
extern int dma_supported(struct device *, u64);
extern int dma_set_mask(struct device *, u64);
L
Linus Torvalds 已提交
120 121 122 123

/*
 * DMA errors are defined by all-bits-set in the DMA address.
 */
124
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
125 126 127 128
{
	return dma_addr == ~0;
}

129 130 131 132
/*
 * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
 * function so drivers using this API are highlighted with build warnings.
 */
R
Russell King 已提交
133 134
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp)
135 136 137 138
{
	return NULL;
}

R
Russell King 已提交
139 140
static inline void dma_free_noncoherent(struct device *dev, size_t size,
		void *cpu_addr, dma_addr_t handle)
141 142 143
{
}

L
Linus Torvalds 已提交
144 145 146 147 148 149 150 151 152 153 154
/**
 * dma_alloc_coherent - allocate consistent memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, unbuffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
155
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169

/**
 * dma_free_coherent - free memory allocated by dma_alloc_coherent
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: size of memory originally requested in dma_alloc_coherent
 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 *
 * Free (and unmap) a DMA buffer previously allocated by
 * dma_alloc_coherent().
 *
 * References to memory and mappings associated with cpu_addr/handle
 * during and after this call executing are illegal.
 */
R
Russell King 已提交
170
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
L
Linus Torvalds 已提交
171 172 173 174 175 176 177 178 179 180 181 182 183

/**
 * dma_mmap_coherent - map a coherent DMA allocation into user space
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @vma: vm_area_struct describing requested user mapping
 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 * @size: size of memory originally requested in dma_alloc_coherent
 *
 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
 * into user space.  The coherent DMA buffer must not be freed by the
 * driver until the user space mapping has been released.
 */
R
Russell King 已提交
184 185
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198


/**
 * dma_alloc_writecombine - allocate writecombining memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, buffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
199 200
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
		gfp_t);
L
Linus Torvalds 已提交
201 202 203 204

#define dma_free_writecombine(dev,size,cpu_addr,handle) \
	dma_free_coherent(dev,size,cpu_addr,handle)

R
Russell King 已提交
205 206
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
207 208


209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
#ifdef CONFIG_DMABOUNCE
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
 *
 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 *
 * The following are helper functions used by the dmabounce subystem
 *
 */

/**
 * dmabounce_register_dev
 *
 * @dev: valid struct device pointer
 * @small_buf_size: size of buffers to use with small buffer pool
 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
228
 * @needs_bounce_fn: called to determine whether buffer needs bouncing
229 230 231 232 233
 *
 * This function should be called by low-level platform code to register
 * a device as requireing DMA buffer bouncing. The function will allocate
 * appropriate DMA pools for the device.
 */
R
Russell King 已提交
234
extern int dmabounce_register_dev(struct device *, unsigned long,
235
		unsigned long, int (*)(struct device *, dma_addr_t, size_t));
236 237 238 239 240 241 242 243 244 245 246 247 248

/**
 * dmabounce_unregister_dev
 *
 * @dev: valid struct device pointer
 *
 * This function should be called by low-level platform code when device
 * that was previously registered with dmabounce_register_dev is removed
 * from the system.
 *
 */
extern void dmabounce_unregister_dev(struct device *);

249 250 251
/*
 * The DMA API, implemented by dmabounce.c.  See below for descriptions.
 */
252
extern dma_addr_t __dma_map_page(struct device *, struct page *,
R
Russell King 已提交
253
		unsigned long, size_t, enum dma_data_direction);
254
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
R
Russell King 已提交
255
		enum dma_data_direction);
256

257 258 259 260
/*
 * Private functions
 */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
261
		size_t, enum dma_data_direction);
262
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
263
		size_t, enum dma_data_direction);
264
#else
265 266 267 268 269 270 271 272 273 274 275
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}

static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}
276 277


278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
{
	__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}

static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
{
	__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
		handle & ~PAGE_MASK, size, dir);
}
#endif /* CONFIG_DMABOUNCE */

L
Linus Torvalds 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306
/**
 * dma_map_single - map a single buffer for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @cpu_addr: CPU direct mapped address of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_single() or
 * dma_sync_single_for_cpu().
 */
R
Russell King 已提交
307 308
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
309
{
310 311
	unsigned long offset;
	struct page *page;
312 313
	dma_addr_t addr;

314 315
	BUG_ON(!virt_addr_valid(cpu_addr));
	BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
316 317
	BUG_ON(!valid_dma_direction(dir));

318 319 320 321
	page = virt_to_page(cpu_addr);
	offset = (unsigned long)cpu_addr & ~PAGE_MASK;
	addr = __dma_map_page(dev, page, offset, size, dir);
	debug_dma_map_page(dev, page, offset, size, dir, addr, true);
322

323
	return addr;
L
Linus Torvalds 已提交
324
}
325

L
Linus Torvalds 已提交
326 327 328 329 330 331 332 333 334 335 336 337
/**
 * dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
338
 * can regain ownership by calling dma_unmap_page().
L
Linus Torvalds 已提交
339
 */
R
Russell King 已提交
340 341
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
342
{
343 344
	dma_addr_t addr;

345 346
	BUG_ON(!valid_dma_direction(dir));

347 348
	addr = __dma_map_page(dev, page, offset, size, dir);
	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
349

350
	return addr;
L
Linus Torvalds 已提交
351 352 353 354 355 356
}

/**
 * dma_unmap_single - unmap a single buffer previously mapped
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
357 358
 * @size: size of buffer (same as passed to dma_map_single)
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
359 360 361 362 363 364 365 366
 *
 * Unmap a single streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_single() call.
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
367 368
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
369
{
370
	debug_dma_unmap_page(dev, handle, size, dir, true);
371
	__dma_unmap_page(dev, handle, size, dir);
L
Linus Torvalds 已提交
372 373 374 375 376 377
}

/**
 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
378 379
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
L
Linus Torvalds 已提交
380
 *
381 382
 * Unmap a page streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_page() call.
L
Linus Torvalds 已提交
383 384 385 386 387
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
388 389
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
390
{
391 392
	debug_dma_unmap_page(dev, handle, size, dir, false);
	__dma_unmap_page(dev, handle, size, dir);
L
Linus Torvalds 已提交
393 394 395
}

/**
396
 * dma_sync_single_range_for_cpu
L
Linus Torvalds 已提交
397 398
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
399 400 401
 * @offset: offset of region to start sync
 * @size: size of region to sync
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
402 403 404 405 406 407 408 409 410 411 412
 *
 * Make physical memory consistent for a single streaming mode DMA
 * translation after a transfer.
 *
 * If you perform a dma_map_single() but wish to interrogate the
 * buffer using the cpu, yet do not wish to teardown the PCI dma
 * mapping, you must call this function before doing so.  At the
 * next point you give the PCI dma address back to the card, you
 * must first the perform a dma_sync_for_device, and then the
 * device again owns the buffer.
 */
R
Russell King 已提交
413 414 415
static inline void dma_sync_single_range_for_cpu(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
416
{
417 418
	BUG_ON(!valid_dma_direction(dir));

419 420
	debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);

421 422 423 424
	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
		return;

	__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
425 426
}

R
Russell King 已提交
427 428 429
static inline void dma_sync_single_range_for_device(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
430
{
431 432
	BUG_ON(!valid_dma_direction(dir));

433 434
	debug_dma_sync_single_for_device(dev, handle + offset, size, dir);

435 436 437
	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
		return;

438
	__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
439 440
}

R
Russell King 已提交
441 442
static inline void dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
443 444 445 446
{
	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}

R
Russell King 已提交
447 448
static inline void dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
449 450 451 452
{
	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
}

453 454
/*
 * The scatter list versions of the above methods.
L
Linus Torvalds 已提交
455
 */
R
Russell King 已提交
456 457 458 459 460 461 462 463
extern int dma_map_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
464

L
Linus Torvalds 已提交
465 466 467

#endif /* __KERNEL__ */
#endif