dma-mapping.h 15.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
#ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H

#ifdef __KERNEL__

6
#include <linux/mm_types.h>
J
Jens Axboe 已提交
7
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
8

9
#include <asm-generic/dma-coherent.h>
10 11 12 13 14 15 16 17
#include <asm/memory.h>

/*
 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
 * used internally by the DMA-mapping API to provide DMA addresses. They
 * must not be used by drivers.
 */
#ifndef __arch_page_to_dma
18 19 20 21
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
	return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
}
22

23 24 25 26 27
static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
{
	return pfn_to_page(__bus_to_pfn(addr));
}

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return (void *)__bus_to_virt(addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
	return __arch_page_to_dma(dev, page);
}

43 44 45 46 47
static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
{
	return __arch_dma_to_page(dev, addr);
}

48 49 50 51 52 53 54 55 56 57
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return __arch_dma_to_virt(dev, addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return __arch_virt_to_dma(dev, addr);
}
#endif
58

59 60 61 62 63 64
/*
 * The DMA API is built upon the notion of "buffer ownership".  A buffer
 * is either exclusively owned by the CPU (and therefore may be accessed
 * by it) or exclusively owned by the DMA device.  These helper functions
 * represent the transitions between these two ownership states.
 *
65 66 67 68 69 70 71
 * Note, however, that on later ARMs, this notion does not work due to
 * speculative prefetches.  We model our approach on the assumption that
 * the CPU does do speculative prefetches, which means we clean caches
 * before transfers and delay cache invalidation until transfer completion.
 *
 * Private support functions: these are not part of the API and are
 * liable to change.  Drivers must not use these.
72 73 74 75
 */
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
76 77 78
	extern void ___dma_single_cpu_to_dev(const void *, size_t,
		enum dma_data_direction);

79
	if (!arch_is_coherent())
80
		___dma_single_cpu_to_dev(kaddr, size, dir);
81 82 83 84 85
}

static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
	enum dma_data_direction dir)
{
86 87 88 89 90
	extern void ___dma_single_dev_to_cpu(const void *, size_t,
		enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_single_dev_to_cpu(kaddr, size, dir);
91 92 93 94 95
}

static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
96 97 98
	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
		size_t, enum dma_data_direction);

99
	if (!arch_is_coherent())
100
		___dma_page_cpu_to_dev(page, off, size, dir);
101 102 103 104 105
}

static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
	size_t size, enum dma_data_direction dir)
{
106 107 108 109 110
	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
		size_t, enum dma_data_direction);

	if (!arch_is_coherent())
		___dma_page_dev_to_cpu(page, off, size, dir);
111 112
}

L
Linus Torvalds 已提交
113 114 115 116 117
/*
 * Return whether the given device DMA address mask can be supported
 * properly.  For example, if your device can only drive the low 24-bits
 * during bus mastering, then you would pass 0x00ffffff as the mask
 * to this function.
118 119 120
 *
 * FIXME: This should really be a platform specific issue - we should
 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
L
Linus Torvalds 已提交
121 122 123
 */
static inline int dma_supported(struct device *dev, u64 mask)
{
124 125 126
	if (mask < ISA_DMA_THRESHOLD)
		return 0;
	return 1;
L
Linus Torvalds 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
}

static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}

static inline int dma_get_cache_alignment(void)
{
	return 32;
}

144
static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
L
Linus Torvalds 已提交
145
{
146
	return !!arch_is_coherent();
L
Linus Torvalds 已提交
147 148 149 150 151
}

/*
 * DMA errors are defined by all-bits-set in the DMA address.
 */
152
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
153 154 155 156
{
	return dma_addr == ~0;
}

157 158 159 160
/*
 * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
 * function so drivers using this API are highlighted with build warnings.
 */
R
Russell King 已提交
161 162
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp)
163 164 165 166
{
	return NULL;
}

R
Russell King 已提交
167 168
static inline void dma_free_noncoherent(struct device *dev, size_t size,
		void *cpu_addr, dma_addr_t handle)
169 170 171
{
}

L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180 181 182
/**
 * dma_alloc_coherent - allocate consistent memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, unbuffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
183
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
L
Linus Torvalds 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197

/**
 * dma_free_coherent - free memory allocated by dma_alloc_coherent
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: size of memory originally requested in dma_alloc_coherent
 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 *
 * Free (and unmap) a DMA buffer previously allocated by
 * dma_alloc_coherent().
 *
 * References to memory and mappings associated with cpu_addr/handle
 * during and after this call executing are illegal.
 */
R
Russell King 已提交
198
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
L
Linus Torvalds 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211

/**
 * dma_mmap_coherent - map a coherent DMA allocation into user space
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @vma: vm_area_struct describing requested user mapping
 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 * @size: size of memory originally requested in dma_alloc_coherent
 *
 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
 * into user space.  The coherent DMA buffer must not be freed by the
 * driver until the user space mapping has been released.
 */
R
Russell King 已提交
212 213
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226


/**
 * dma_alloc_writecombine - allocate writecombining memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, buffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
227 228
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
		gfp_t);
L
Linus Torvalds 已提交
229 230 231 232

#define dma_free_writecombine(dev,size,cpu_addr,handle) \
	dma_free_coherent(dev,size,cpu_addr,handle)

R
Russell King 已提交
233 234
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
235 236


237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
#ifdef CONFIG_DMABOUNCE
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
 *
 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 *
 * The following are helper functions used by the dmabounce subystem
 *
 */

/**
 * dmabounce_register_dev
 *
 * @dev: valid struct device pointer
 * @small_buf_size: size of buffers to use with small buffer pool
 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
 *
 * This function should be called by low-level platform code to register
 * a device as requireing DMA buffer bouncing. The function will allocate
 * appropriate DMA pools for the device.
 *
 */
R
Russell King 已提交
262 263
extern int dmabounce_register_dev(struct device *, unsigned long,
		unsigned long);
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294

/**
 * dmabounce_unregister_dev
 *
 * @dev: valid struct device pointer
 *
 * This function should be called by low-level platform code when device
 * that was previously registered with dmabounce_register_dev is removed
 * from the system.
 *
 */
extern void dmabounce_unregister_dev(struct device *);

/**
 * dma_needs_bounce
 *
 * @dev: valid struct device pointer
 * @dma_handle: dma_handle of unbounced buffer
 * @size: size of region being mapped
 *
 * Platforms that utilize the dmabounce mechanism must implement
 * this function.
 *
 * The dmabounce routines call this function whenever a dma-mapping
 * is requested to determine whether a given buffer needs to be bounced
 * or not. The function must return 0 if the buffer is OK for
 * DMA access and 1 if the buffer needs to be bounced.
 *
 */
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);

295 296 297
/*
 * The DMA API, implemented by dmabounce.c.  See below for descriptions.
 */
R
Russell King 已提交
298 299
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
		enum dma_data_direction);
300 301
extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
		enum dma_data_direction);
R
Russell King 已提交
302 303
extern dma_addr_t dma_map_page(struct device *, struct page *,
		unsigned long, size_t, enum dma_data_direction);
304
extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
R
Russell King 已提交
305
		enum dma_data_direction);
306

307 308 309 310
/*
 * Private functions
 */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
311
		size_t, enum dma_data_direction);
312
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
313
		size_t, enum dma_data_direction);
314
#else
315 316 317 318 319 320 321 322 323 324 325
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}

static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}
326 327


L
Linus Torvalds 已提交
328 329 330 331 332 333 334 335 336 337 338 339 340 341
/**
 * dma_map_single - map a single buffer for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @cpu_addr: CPU direct mapped address of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_single() or
 * dma_sync_single_for_cpu().
 */
R
Russell King 已提交
342 343
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
344
{
345 346
	BUG_ON(!valid_dma_direction(dir));

347
	__dma_single_cpu_to_dev(cpu_addr, size, dir);
348

349
	return virt_to_dma(dev, cpu_addr);
L
Linus Torvalds 已提交
350
}
351

L
Linus Torvalds 已提交
352 353 354 355 356 357 358 359 360 361 362 363
/**
 * dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
364
 * can regain ownership by calling dma_unmap_page().
L
Linus Torvalds 已提交
365
 */
R
Russell King 已提交
366 367
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
368
{
369 370
	BUG_ON(!valid_dma_direction(dir));

371
	__dma_page_cpu_to_dev(page, offset, size, dir);
372 373

	return page_to_dma(dev, page) + offset;
L
Linus Torvalds 已提交
374 375 376 377 378 379
}

/**
 * dma_unmap_single - unmap a single buffer previously mapped
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
380 381
 * @size: size of buffer (same as passed to dma_map_single)
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
382 383 384 385 386 387 388 389
 *
 * Unmap a single streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_single() call.
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
390 391
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
392
{
393
	__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
L
Linus Torvalds 已提交
394 395 396 397 398 399
}

/**
 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
400 401
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
L
Linus Torvalds 已提交
402
 *
403 404
 * Unmap a page streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_page() call.
L
Linus Torvalds 已提交
405 406 407 408 409
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
410 411
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
412
{
413 414
	__dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
		size, dir);
L
Linus Torvalds 已提交
415
}
416
#endif /* CONFIG_DMABOUNCE */
L
Linus Torvalds 已提交
417 418

/**
419
 * dma_sync_single_range_for_cpu
L
Linus Torvalds 已提交
420 421
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
422 423 424
 * @offset: offset of region to start sync
 * @size: size of region to sync
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
425 426 427 428 429 430 431 432 433 434 435
 *
 * Make physical memory consistent for a single streaming mode DMA
 * translation after a transfer.
 *
 * If you perform a dma_map_single() but wish to interrogate the
 * buffer using the cpu, yet do not wish to teardown the PCI dma
 * mapping, you must call this function before doing so.  At the
 * next point you give the PCI dma address back to the card, you
 * must first the perform a dma_sync_for_device, and then the
 * device again owns the buffer.
 */
R
Russell King 已提交
436 437 438
static inline void dma_sync_single_range_for_cpu(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
439
{
440 441
	BUG_ON(!valid_dma_direction(dir));

442 443 444 445
	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
		return;

	__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
446 447
}

R
Russell King 已提交
448 449 450
static inline void dma_sync_single_range_for_device(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
451
{
452 453
	BUG_ON(!valid_dma_direction(dir));

454 455 456
	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
		return;

457
	__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
458 459
}

R
Russell King 已提交
460 461
static inline void dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
462 463 464 465
{
	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}

R
Russell King 已提交
466 467
static inline void dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
468 469 470 471
{
	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
}

472 473
/*
 * The scatter list versions of the above methods.
L
Linus Torvalds 已提交
474
 */
R
Russell King 已提交
475 476 477 478 479 480 481 482
extern int dma_map_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
483

L
Linus Torvalds 已提交
484 485 486

#endif /* __KERNEL__ */
#endif