dma-mapping.h 15.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
#ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H

#ifdef __KERNEL__

6
#include <linux/mm_types.h>
J
Jens Axboe 已提交
7
#include <linux/scatterlist.h>
8
#include <linux/dma-debug.h>
L
Linus Torvalds 已提交
9

10
#include <asm-generic/dma-coherent.h>
11 12
#include <asm/memory.h>

13 14
#define DMA_ERROR_CODE	(~0)

15 16 17 18
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif

19
/*
20 21 22
 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
 * functions used internally by the DMA-mapping API to provide DMA
 * addresses. They must not be used by drivers.
23
 */
24 25
#ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
26
{
27
	return (dma_addr_t)__pfn_to_bus(pfn);
28
}
29

30
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
31
{
32
	return __bus_to_pfn(addr);
33 34
}

35 36
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
37
	return (void *)__bus_to_virt((unsigned long)addr);
38 39 40 41 42 43 44
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
45
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
46
{
47
	return __arch_pfn_to_dma(dev, pfn);
48 49
}

50
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
51
{
52
	return __arch_dma_to_pfn(dev, addr);
53 54
}

55 56 57 58 59 60 61 62 63 64
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return __arch_dma_to_virt(dev, addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return __arch_virt_to_dma(dev, addr);
}
#endif
65

66 67 68 69 70 71
/*
 * The DMA API is built upon the notion of "buffer ownership".  A buffer
 * is either exclusively owned by the CPU (and therefore may be accessed
 * by it) or exclusively owned by the DMA device.  These helper functions
 * represent the transitions between these two ownership states.
 *
72 73 74 75 76 77 78
 * Note, however, that on later ARMs, this notion does not work due to
 * speculative prefetches.  We model our approach on the assumption that
 * the CPU does do speculative prefetches, which means we clean caches
 * before transfers and delay cache invalidation until transfer completion.
 *
 * Private support functions: these are not part of the API and are
 * liable to change.  Drivers must not use these.
79 80 81 82
 */
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
83 84 85
	extern void ___dma_single_cpu_to_dev(const void *, size_t,
		enum dma_data_direction);

86
	if (!arch_is_coherent())
87
		___dma_single_cpu_to_dev(kaddr, size, dir);
88 89 90 91 92
}

static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
93 94 95 96 97
	extern void ___dma_single_dev_to_cpu(const void *, size_t,
		enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_single_dev_to_cpu(kaddr, size, dir);
98 99 100 101 102
}

static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
103 104 105
	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
		size_t, enum dma_data_direction);

106
	if (!arch_is_coherent())
107
		___dma_page_cpu_to_dev(page, off, size, dir);
108 109 110 111 112
}

static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
113 114 115 116 117
	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
		size_t, enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_page_dev_to_cpu(page, off, size, dir);
118 119
}

120 121
extern int dma_supported(struct device *, u64);
extern int dma_set_mask(struct device *, u64);
L
Linus Torvalds 已提交
122 123 124 125

/*
 * DMA errors are defined by all-bits-set in the DMA address.
 */
126
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
127
{
128
	return dma_addr == DMA_ERROR_CODE;
L
Linus Torvalds 已提交
129 130
}

131 132 133 134
/*
 * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
 * function so drivers using this API are highlighted with build warnings.
 */
R
Russell King 已提交
135 136
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp)
137 138 139 140
{
	return NULL;
}

R
Russell King 已提交
141 142
static inline void dma_free_noncoherent(struct device *dev, size_t size,
		void *cpu_addr, dma_addr_t handle)
143 144 145
{
}

L
Linus Torvalds 已提交
146 147 148 149 150 151 152 153 154 155 156
/**
 * dma_alloc_coherent - allocate consistent memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, unbuffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
157
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
L
Linus Torvalds 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171

/**
 * dma_free_coherent - free memory allocated by dma_alloc_coherent
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: size of memory originally requested in dma_alloc_coherent
 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 *
 * Free (and unmap) a DMA buffer previously allocated by
 * dma_alloc_coherent().
 *
 * References to memory and mappings associated with cpu_addr/handle
 * during and after this call executing are illegal.
 */
R
Russell King 已提交
172
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
L
Linus Torvalds 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185

/**
 * dma_mmap_coherent - map a coherent DMA allocation into user space
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @vma: vm_area_struct describing requested user mapping
 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 * @size: size of memory originally requested in dma_alloc_coherent
 *
 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
 * into user space.  The coherent DMA buffer must not be freed by the
 * driver until the user space mapping has been released.
 */
R
Russell King 已提交
186 187
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200


/**
 * dma_alloc_writecombine - allocate writecombining memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, buffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
201 202
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
		gfp_t);
L
Linus Torvalds 已提交
203 204 205 206

#define dma_free_writecombine(dev,size,cpu_addr,handle) \
	dma_free_coherent(dev,size,cpu_addr,handle)

R
Russell King 已提交
207 208
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
209

210 211 212 213 214 215 216
/*
 * This can be called during boot to increase the size of the consistent
 * DMA region above it's default value of 2MB. It must be called before the
 * memory allocator is initialised, i.e. before any core_initcall.
 */
extern void __init init_consistent_dma_size(unsigned long size);

L
Linus Torvalds 已提交
217

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
#ifdef CONFIG_DMABOUNCE
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
 *
 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 *
 * The following are helper functions used by the dmabounce subystem
 *
 */

/**
 * dmabounce_register_dev
 *
 * @dev: valid struct device pointer
 * @small_buf_size: size of buffers to use with small buffer pool
 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
237
 * @needs_bounce_fn: called to determine whether buffer needs bouncing
238 239 240 241 242
 *
 * This function should be called by low-level platform code to register
 * a device as requireing DMA buffer bouncing. The function will allocate
 * appropriate DMA pools for the device.
 */
R
Russell King 已提交
243
extern int dmabounce_register_dev(struct device *, unsigned long,
244
		unsigned long, int (*)(struct device *, dma_addr_t, size_t));
245 246 247 248 249 250 251 252 253 254 255 256 257

/**
 * dmabounce_unregister_dev
 *
 * @dev: valid struct device pointer
 *
 * This function should be called by low-level platform code when device
 * that was previously registered with dmabounce_register_dev is removed
 * from the system.
 *
 */
extern void dmabounce_unregister_dev(struct device *);

258 259 260
/*
 * The DMA API, implemented by dmabounce.c.  See below for descriptions.
 */
261
extern dma_addr_t __dma_map_page(struct device *, struct page *,
R
Russell King 已提交
262
		unsigned long, size_t, enum dma_data_direction);
263
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
R
Russell King 已提交
264
		enum dma_data_direction);
265

266 267 268 269
/*
 * Private functions
 */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
270
		size_t, enum dma_data_direction);
271
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
272
		size_t, enum dma_data_direction);
273
#else
274 275 276 277 278 279 280 281 282 283 284
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}

static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}
285 286


287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
{
	__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}

static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
{
	__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
		handle & ~PAGE_MASK, size, dir);
}
#endif /* CONFIG_DMABOUNCE */

L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315
/**
 * dma_map_single - map a single buffer for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @cpu_addr: CPU direct mapped address of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_single() or
 * dma_sync_single_for_cpu().
 */
R
Russell King 已提交
316 317
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
318
{
319 320
	unsigned long offset;
	struct page *page;
321 322
	dma_addr_t addr;

323 324
	BUG_ON(!virt_addr_valid(cpu_addr));
	BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
325 326
	BUG_ON(!valid_dma_direction(dir));

327 328 329 330
	page = virt_to_page(cpu_addr);
	offset = (unsigned long)cpu_addr & ~PAGE_MASK;
	addr = __dma_map_page(dev, page, offset, size, dir);
	debug_dma_map_page(dev, page, offset, size, dir, addr, true);
331

332
	return addr;
L
Linus Torvalds 已提交
333
}
334

L
Linus Torvalds 已提交
335 336 337 338 339 340 341 342 343 344 345 346
/**
 * dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
347
 * can regain ownership by calling dma_unmap_page().
L
Linus Torvalds 已提交
348
 */
R
Russell King 已提交
349 350
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
351
{
352 353
	dma_addr_t addr;

354 355
	BUG_ON(!valid_dma_direction(dir));

356 357
	addr = __dma_map_page(dev, page, offset, size, dir);
	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
358

359
	return addr;
L
Linus Torvalds 已提交
360 361 362 363 364 365
}

/**
 * dma_unmap_single - unmap a single buffer previously mapped
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
366 367
 * @size: size of buffer (same as passed to dma_map_single)
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
368 369 370 371 372 373 374 375
 *
 * Unmap a single streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_single() call.
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
376 377
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
378
{
379
	debug_dma_unmap_page(dev, handle, size, dir, true);
380
	__dma_unmap_page(dev, handle, size, dir);
L
Linus Torvalds 已提交
381 382 383 384 385 386
}

/**
 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
387 388
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
L
Linus Torvalds 已提交
389
 *
390 391
 * Unmap a page streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_page() call.
L
Linus Torvalds 已提交
392 393 394 395 396
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
397 398
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
399
{
400 401
	debug_dma_unmap_page(dev, handle, size, dir, false);
	__dma_unmap_page(dev, handle, size, dir);
L
Linus Torvalds 已提交
402 403 404
}

/**
405
 * dma_sync_single_range_for_cpu
L
Linus Torvalds 已提交
406 407
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
408 409 410
 * @offset: offset of region to start sync
 * @size: size of region to sync
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
411 412 413 414 415 416 417 418 419 420 421
 *
 * Make physical memory consistent for a single streaming mode DMA
 * translation after a transfer.
 *
 * If you perform a dma_map_single() but wish to interrogate the
 * buffer using the cpu, yet do not wish to teardown the PCI dma
 * mapping, you must call this function before doing so.  At the
 * next point you give the PCI dma address back to the card, you
 * must first the perform a dma_sync_for_device, and then the
 * device again owns the buffer.
 */
R
Russell King 已提交
422 423 424
static inline void dma_sync_single_range_for_cpu(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
425
{
426 427
	BUG_ON(!valid_dma_direction(dir));

428 429
	debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);

430 431 432 433
	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
		return;

	__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
434 435
}

R
Russell King 已提交
436 437 438
static inline void dma_sync_single_range_for_device(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
439
{
440 441
	BUG_ON(!valid_dma_direction(dir));

442 443
	debug_dma_sync_single_for_device(dev, handle + offset, size, dir);

444 445 446
	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
		return;

447
	__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
448 449
}

R
Russell King 已提交
450 451
static inline void dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
452 453 454 455
{
	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}

R
Russell King 已提交
456 457
static inline void dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
458 459 460 461
{
	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
}

462 463
/*
 * The scatter list versions of the above methods.
L
Linus Torvalds 已提交
464
 */
R
Russell King 已提交
465 466 467 468 469 470 471 472
extern int dma_map_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
473

L
Linus Torvalds 已提交
474 475 476

#endif /* __KERNEL__ */
#endif