dma-mapping.h 16.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
#ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H

#ifdef __KERNEL__

6
#include <linux/mm_types.h>
J
Jens Axboe 已提交
7
#include <linux/scatterlist.h>
8
#include <linux/dma-debug.h>
L
Linus Torvalds 已提交
9

10
#include <asm-generic/dma-coherent.h>
11 12
#include <asm/memory.h>

13 14 15 16
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif

17
/*
18 19 20
 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
 * functions used internally by the DMA-mapping API to provide DMA
 * addresses. They must not be used by drivers.
21
 */
22 23
#ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
24
{
25
	return (dma_addr_t)__pfn_to_bus(pfn);
26
}
27

28
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
29
{
30
	return __bus_to_pfn(addr);
31 32
}

33 34 35 36 37 38 39 40 41 42
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return (void *)__bus_to_virt(addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
43
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
44
{
45
	return __arch_pfn_to_dma(dev, pfn);
46 47
}

48
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
49
{
50
	return __arch_dma_to_pfn(dev, addr);
51 52
}

53 54 55 56 57 58 59 60 61 62
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return __arch_dma_to_virt(dev, addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return __arch_virt_to_dma(dev, addr);
}
#endif
63

64 65 66 67 68 69
/*
 * The DMA API is built upon the notion of "buffer ownership".  A buffer
 * is either exclusively owned by the CPU (and therefore may be accessed
 * by it) or exclusively owned by the DMA device.  These helper functions
 * represent the transitions between these two ownership states.
 *
70 71 72 73 74 75 76
 * Note, however, that on later ARMs, this notion does not work due to
 * speculative prefetches.  We model our approach on the assumption that
 * the CPU does do speculative prefetches, which means we clean caches
 * before transfers and delay cache invalidation until transfer completion.
 *
 * Private support functions: these are not part of the API and are
 * liable to change.  Drivers must not use these.
77 78 79 80
 */
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
81 82 83
	extern void ___dma_single_cpu_to_dev(const void *, size_t,
		enum dma_data_direction);

84
	if (!arch_is_coherent())
85
		___dma_single_cpu_to_dev(kaddr, size, dir);
86 87 88 89 90
}

static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
91 92 93 94 95
	extern void ___dma_single_dev_to_cpu(const void *, size_t,
		enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_single_dev_to_cpu(kaddr, size, dir);
96 97 98 99 100
}

static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
101 102 103
	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
		size_t, enum dma_data_direction);

104
	if (!arch_is_coherent())
105
		___dma_page_cpu_to_dev(page, off, size, dir);
106 107 108 109 110
}

static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
111 112 113 114 115
	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
		size_t, enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_page_dev_to_cpu(page, off, size, dir);
116 117
}

L
Linus Torvalds 已提交
118 119 120 121 122
/*
 * Return whether the given device DMA address mask can be supported
 * properly.  For example, if your device can only drive the low 24-bits
 * during bus mastering, then you would pass 0x00ffffff as the mask
 * to this function.
123 124 125
 *
 * FIXME: This should really be a platform specific issue - we should
 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
L
Linus Torvalds 已提交
126 127 128
 */
static inline int dma_supported(struct device *dev, u64 mask)
{
129 130 131
	if (mask < ISA_DMA_THRESHOLD)
		return 0;
	return 1;
L
Linus Torvalds 已提交
132 133 134 135
}

static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
136 137 138 139 140 141 142 143
#ifdef CONFIG_DMABOUNCE
	if (dev->archdata.dmabounce) {
		if (dma_mask >= ISA_DMA_THRESHOLD)
			return 0;
		else
			return -EIO;
	}
#endif
L
Linus Torvalds 已提交
144 145 146 147 148 149 150 151 152 153 154
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}

/*
 * DMA errors are defined by all-bits-set in the DMA address.
 */
155
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
156 157 158 159
{
	return dma_addr == ~0;
}

160 161 162 163
/*
 * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
 * function so drivers using this API are highlighted with build warnings.
 */
R
Russell King 已提交
164 165
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp)
166 167 168 169
{
	return NULL;
}

R
Russell King 已提交
170 171
static inline void dma_free_noncoherent(struct device *dev, size_t size,
		void *cpu_addr, dma_addr_t handle)
172 173 174
{
}

L
Linus Torvalds 已提交
175 176 177 178 179 180 181 182 183 184 185
/**
 * dma_alloc_coherent - allocate consistent memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, unbuffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
186
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
L
Linus Torvalds 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200

/**
 * dma_free_coherent - free memory allocated by dma_alloc_coherent
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: size of memory originally requested in dma_alloc_coherent
 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 *
 * Free (and unmap) a DMA buffer previously allocated by
 * dma_alloc_coherent().
 *
 * References to memory and mappings associated with cpu_addr/handle
 * during and after this call executing are illegal.
 */
R
Russell King 已提交
201
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
L
Linus Torvalds 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214

/**
 * dma_mmap_coherent - map a coherent DMA allocation into user space
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @vma: vm_area_struct describing requested user mapping
 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 * @size: size of memory originally requested in dma_alloc_coherent
 *
 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
 * into user space.  The coherent DMA buffer must not be freed by the
 * driver until the user space mapping has been released.
 */
R
Russell King 已提交
215 216
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
217 218 219 220 221 222 223 224 225 226 227 228 229


/**
 * dma_alloc_writecombine - allocate writecombining memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, buffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
230 231
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
		gfp_t);
L
Linus Torvalds 已提交
232 233 234 235

#define dma_free_writecombine(dev,size,cpu_addr,handle) \
	dma_free_coherent(dev,size,cpu_addr,handle)

R
Russell King 已提交
236 237
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
238 239


240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
#ifdef CONFIG_DMABOUNCE
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
 *
 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 *
 * The following are helper functions used by the dmabounce subystem
 *
 */

/**
 * dmabounce_register_dev
 *
 * @dev: valid struct device pointer
 * @small_buf_size: size of buffers to use with small buffer pool
 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
 *
 * This function should be called by low-level platform code to register
 * a device as requireing DMA buffer bouncing. The function will allocate
 * appropriate DMA pools for the device.
 *
 */
R
Russell King 已提交
265 266
extern int dmabounce_register_dev(struct device *, unsigned long,
		unsigned long);
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

/**
 * dmabounce_unregister_dev
 *
 * @dev: valid struct device pointer
 *
 * This function should be called by low-level platform code when device
 * that was previously registered with dmabounce_register_dev is removed
 * from the system.
 *
 */
extern void dmabounce_unregister_dev(struct device *);

/**
 * dma_needs_bounce
 *
 * @dev: valid struct device pointer
 * @dma_handle: dma_handle of unbounced buffer
 * @size: size of region being mapped
 *
 * Platforms that utilize the dmabounce mechanism must implement
 * this function.
 *
 * The dmabounce routines call this function whenever a dma-mapping
 * is requested to determine whether a given buffer needs to be bounced
 * or not. The function must return 0 if the buffer is OK for
 * DMA access and 1 if the buffer needs to be bounced.
 *
 */
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);

298 299 300
/*
 * The DMA API, implemented by dmabounce.c.  See below for descriptions.
 */
301
extern dma_addr_t __dma_map_page(struct device *, struct page *,
R
Russell King 已提交
302
		unsigned long, size_t, enum dma_data_direction);
303
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
R
Russell King 已提交
304
		enum dma_data_direction);
305

306 307 308 309
/*
 * Private functions
 */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
310
		size_t, enum dma_data_direction);
311
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
312
		size_t, enum dma_data_direction);
313
#else
314 315 316 317 318 319 320 321 322 323 324
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}

static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}
325 326


327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
{
	__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}

static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
{
	__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
		handle & ~PAGE_MASK, size, dir);
}
#endif /* CONFIG_DMABOUNCE */

L
Linus Torvalds 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354 355
/**
 * dma_map_single - map a single buffer for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @cpu_addr: CPU direct mapped address of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_single() or
 * dma_sync_single_for_cpu().
 */
R
Russell King 已提交
356 357
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
358
{
359 360
	unsigned long offset;
	struct page *page;
361 362
	dma_addr_t addr;

363 364
	BUG_ON(!virt_addr_valid(cpu_addr));
	BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
365 366
	BUG_ON(!valid_dma_direction(dir));

367 368 369 370
	page = virt_to_page(cpu_addr);
	offset = (unsigned long)cpu_addr & ~PAGE_MASK;
	addr = __dma_map_page(dev, page, offset, size, dir);
	debug_dma_map_page(dev, page, offset, size, dir, addr, true);
371

372
	return addr;
L
Linus Torvalds 已提交
373
}
374

L
Linus Torvalds 已提交
375 376 377 378 379 380 381 382 383 384 385 386
/**
 * dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
387
 * can regain ownership by calling dma_unmap_page().
L
Linus Torvalds 已提交
388
 */
R
Russell King 已提交
389 390
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
391
{
392 393
	dma_addr_t addr;

394 395
	BUG_ON(!valid_dma_direction(dir));

396 397
	addr = __dma_map_page(dev, page, offset, size, dir);
	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
398

399
	return addr;
L
Linus Torvalds 已提交
400 401 402 403 404 405
}

/**
 * dma_unmap_single - unmap a single buffer previously mapped
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
406 407
 * @size: size of buffer (same as passed to dma_map_single)
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
408 409 410 411 412 413 414 415
 *
 * Unmap a single streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_single() call.
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
416 417
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
418
{
419
	debug_dma_unmap_page(dev, handle, size, dir, true);
420
	__dma_unmap_page(dev, handle, size, dir);
L
Linus Torvalds 已提交
421 422 423 424 425 426
}

/**
 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
427 428
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
L
Linus Torvalds 已提交
429
 *
430 431
 * Unmap a page streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_page() call.
L
Linus Torvalds 已提交
432 433 434 435 436
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
437 438
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
439
{
440 441
	debug_dma_unmap_page(dev, handle, size, dir, false);
	__dma_unmap_page(dev, handle, size, dir);
L
Linus Torvalds 已提交
442 443 444
}

/**
445
 * dma_sync_single_range_for_cpu
L
Linus Torvalds 已提交
446 447
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
448 449 450
 * @offset: offset of region to start sync
 * @size: size of region to sync
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
451 452 453 454 455 456 457 458 459 460 461
 *
 * Make physical memory consistent for a single streaming mode DMA
 * translation after a transfer.
 *
 * If you perform a dma_map_single() but wish to interrogate the
 * buffer using the cpu, yet do not wish to teardown the PCI dma
 * mapping, you must call this function before doing so.  At the
 * next point you give the PCI dma address back to the card, you
 * must first the perform a dma_sync_for_device, and then the
 * device again owns the buffer.
 */
R
Russell King 已提交
462 463 464
static inline void dma_sync_single_range_for_cpu(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
465
{
466 467
	BUG_ON(!valid_dma_direction(dir));

468 469
	debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);

470 471 472 473
	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
		return;

	__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
474 475
}

R
Russell King 已提交
476 477 478
static inline void dma_sync_single_range_for_device(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
479
{
480 481
	BUG_ON(!valid_dma_direction(dir));

482 483
	debug_dma_sync_single_for_device(dev, handle + offset, size, dir);

484 485 486
	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
		return;

487
	__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
488 489
}

R
Russell King 已提交
490 491
static inline void dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
492 493 494 495
{
	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}

R
Russell King 已提交
496 497
static inline void dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
498 499 500 501
{
	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
}

502 503
/*
 * The scatter list versions of the above methods.
L
Linus Torvalds 已提交
504
 */
R
Russell King 已提交
505 506 507 508 509 510 511 512
extern int dma_map_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
513

L
Linus Torvalds 已提交
514 515 516

#endif /* __KERNEL__ */
#endif