dma-mapping.h 13.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
#ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H

#ifdef __KERNEL__

6
#include <linux/mm_types.h>
J
Jens Axboe 已提交
7
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
8

9
#include <asm-generic/dma-coherent.h>
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
#include <asm/memory.h>

/*
 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
 * used internally by the DMA-mapping API to provide DMA addresses. They
 * must not be used by drivers.
 */
#ifndef __arch_page_to_dma
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
	return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
}

static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return (void *)__bus_to_virt(addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
	return __arch_page_to_dma(dev, page);
}

static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return __arch_dma_to_virt(dev, addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return __arch_virt_to_dma(dev, addr);
}
#endif
48

L
Linus Torvalds 已提交
49 50 51 52 53
/*
 * DMA-consistent mapping functions.  These allocate/free a region of
 * uncached, unwrite-buffered mapped memory space for use with DMA
 * devices.  This is the "generic" version.  The PCI specific version
 * is in pci.h
54 55 56 57
 *
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
L
Linus Torvalds 已提交
58
 */
59
extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
60 61
extern void dma_cache_maint_page(struct page *page, unsigned long offset,
				 size_t size, int rw);
L
Linus Torvalds 已提交
62 63 64 65 66 67

/*
 * Return whether the given device DMA address mask can be supported
 * properly.  For example, if your device can only drive the low 24-bits
 * during bus mastering, then you would pass 0x00ffffff as the mask
 * to this function.
68 69 70
 *
 * FIXME: This should really be a platform specific issue - we should
 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
L
Linus Torvalds 已提交
71 72 73
 */
static inline int dma_supported(struct device *dev, u64 mask)
{
74 75 76
	if (mask < ISA_DMA_THRESHOLD)
		return 0;
	return 1;
L
Linus Torvalds 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
}

static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}

static inline int dma_get_cache_alignment(void)
{
	return 32;
}

94
static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
L
Linus Torvalds 已提交
95
{
96
	return !!arch_is_coherent();
L
Linus Torvalds 已提交
97 98 99 100 101
}

/*
 * DMA errors are defined by all-bits-set in the DMA address.
 */
102
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
103 104 105 106
{
	return dma_addr == ~0;
}

107 108 109 110
/*
 * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
 * function so drivers using this API are highlighted with build warnings.
 */
R
Russell King 已提交
111 112
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp)
113 114 115 116
{
	return NULL;
}

R
Russell King 已提交
117 118
static inline void dma_free_noncoherent(struct device *dev, size_t size,
		void *cpu_addr, dma_addr_t handle)
119 120 121
{
}

L
Linus Torvalds 已提交
122 123 124 125 126 127 128 129 130 131 132
/**
 * dma_alloc_coherent - allocate consistent memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, unbuffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
133
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147

/**
 * dma_free_coherent - free memory allocated by dma_alloc_coherent
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: size of memory originally requested in dma_alloc_coherent
 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 *
 * Free (and unmap) a DMA buffer previously allocated by
 * dma_alloc_coherent().
 *
 * References to memory and mappings associated with cpu_addr/handle
 * during and after this call executing are illegal.
 */
R
Russell King 已提交
148
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
L
Linus Torvalds 已提交
149 150 151 152 153 154 155 156 157 158 159 160 161

/**
 * dma_mmap_coherent - map a coherent DMA allocation into user space
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @vma: vm_area_struct describing requested user mapping
 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 * @size: size of memory originally requested in dma_alloc_coherent
 *
 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
 * into user space.  The coherent DMA buffer must not be freed by the
 * driver until the user space mapping has been released.
 */
R
Russell King 已提交
162 163
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176


/**
 * dma_alloc_writecombine - allocate writecombining memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, buffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
177 178
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
		gfp_t);
L
Linus Torvalds 已提交
179 180 181 182

#define dma_free_writecombine(dev,size,cpu_addr,handle) \
	dma_free_coherent(dev,size,cpu_addr,handle)

R
Russell King 已提交
183 184
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
185 186


187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
#ifdef CONFIG_DMABOUNCE
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
 *
 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 *
 * The following are helper functions used by the dmabounce subystem
 *
 */

/**
 * dmabounce_register_dev
 *
 * @dev: valid struct device pointer
 * @small_buf_size: size of buffers to use with small buffer pool
 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
 *
 * This function should be called by low-level platform code to register
 * a device as requireing DMA buffer bouncing. The function will allocate
 * appropriate DMA pools for the device.
 *
 */
R
Russell King 已提交
212 213
extern int dmabounce_register_dev(struct device *, unsigned long,
		unsigned long);
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244

/**
 * dmabounce_unregister_dev
 *
 * @dev: valid struct device pointer
 *
 * This function should be called by low-level platform code when device
 * that was previously registered with dmabounce_register_dev is removed
 * from the system.
 *
 */
extern void dmabounce_unregister_dev(struct device *);

/**
 * dma_needs_bounce
 *
 * @dev: valid struct device pointer
 * @dma_handle: dma_handle of unbounced buffer
 * @size: size of region being mapped
 *
 * Platforms that utilize the dmabounce mechanism must implement
 * this function.
 *
 * The dmabounce routines call this function whenever a dma-mapping
 * is requested to determine whether a given buffer needs to be bounced
 * or not. The function must return 0 if the buffer is OK for
 * DMA access and 1 if the buffer needs to be bounced.
 *
 */
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);

245 246 247
/*
 * The DMA API, implemented by dmabounce.c.  See below for descriptions.
 */
R
Russell King 已提交
248 249 250 251 252 253
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
		enum dma_data_direction);
extern dma_addr_t dma_map_page(struct device *, struct page *,
		unsigned long, size_t, enum dma_data_direction);
extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
		enum dma_data_direction);
254

255 256 257 258
/*
 * Private functions
 */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
259
		size_t, enum dma_data_direction);
260
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
261
		size_t, enum dma_data_direction);
262
#else
263 264 265 266 267 268 269 270 271 272 273
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}

static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}
274 275


L
Linus Torvalds 已提交
276 277 278 279 280 281 282 283 284 285 286 287 288 289
/**
 * dma_map_single - map a single buffer for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @cpu_addr: CPU direct mapped address of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_single() or
 * dma_sync_single_for_cpu().
 */
R
Russell King 已提交
290 291
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
292
{
293 294
	BUG_ON(!valid_dma_direction(dir));

295
	if (!arch_is_coherent())
296
		dma_cache_maint(cpu_addr, size, dir);
297

298
	return virt_to_dma(dev, cpu_addr);
L
Linus Torvalds 已提交
299
}
300

L
Linus Torvalds 已提交
301 302 303 304 305 306 307 308 309 310 311 312
/**
 * dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
313
 * can regain ownership by calling dma_unmap_page().
L
Linus Torvalds 已提交
314
 */
R
Russell King 已提交
315 316
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
317
{
318 319
	BUG_ON(!valid_dma_direction(dir));

320
	if (!arch_is_coherent())
321
		dma_cache_maint_page(page, offset, size, dir);
322 323

	return page_to_dma(dev, page) + offset;
L
Linus Torvalds 已提交
324 325 326 327 328 329
}

/**
 * dma_unmap_single - unmap a single buffer previously mapped
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
330 331
 * @size: size of buffer (same as passed to dma_map_single)
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
332 333 334 335 336 337 338 339
 *
 * Unmap a single streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_single() call.
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
340 341
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
342 343 344
{
	/* nothing to do */
}
345
#endif /* CONFIG_DMABOUNCE */
L
Linus Torvalds 已提交
346 347 348 349 350

/**
 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
351 352
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
L
Linus Torvalds 已提交
353
 *
354 355
 * Unmap a page streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_page() call.
L
Linus Torvalds 已提交
356 357 358 359 360
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
361 362
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
363
{
364
	dma_unmap_single(dev, handle, size, dir);
L
Linus Torvalds 已提交
365 366 367
}

/**
368
 * dma_sync_single_range_for_cpu
L
Linus Torvalds 已提交
369 370
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
371 372 373
 * @offset: offset of region to start sync
 * @size: size of region to sync
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
374 375 376 377 378 379 380 381 382 383 384
 *
 * Make physical memory consistent for a single streaming mode DMA
 * translation after a transfer.
 *
 * If you perform a dma_map_single() but wish to interrogate the
 * buffer using the cpu, yet do not wish to teardown the PCI dma
 * mapping, you must call this function before doing so.  At the
 * next point you give the PCI dma address back to the card, you
 * must first the perform a dma_sync_for_device, and then the
 * device again owns the buffer.
 */
R
Russell King 已提交
385 386 387
static inline void dma_sync_single_range_for_cpu(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
388
{
389 390
	BUG_ON(!valid_dma_direction(dir));

391
	dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
L
Linus Torvalds 已提交
392 393
}

R
Russell King 已提交
394 395 396
static inline void dma_sync_single_range_for_device(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
397
{
398 399
	BUG_ON(!valid_dma_direction(dir));

400 401 402
	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
		return;

403
	if (!arch_is_coherent())
404
		dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
405 406
}

R
Russell King 已提交
407 408
static inline void dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
409 410 411 412
{
	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}

R
Russell King 已提交
413 414
static inline void dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
415 416 417 418
{
	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
}

419 420
/*
 * The scatter list versions of the above methods.
L
Linus Torvalds 已提交
421
 */
R
Russell King 已提交
422 423 424 425 426 427 428 429
extern int dma_map_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
430

L
Linus Torvalds 已提交
431 432 433

#endif /* __KERNEL__ */
#endif