dma-mapping.h 13.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
#ifndef ASMARM_DMA_MAPPING_H
#define ASMARM_DMA_MAPPING_H

#ifdef __KERNEL__

6
#include <linux/mm_types.h>
J
Jens Axboe 已提交
7
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
8

9
#include <asm-generic/dma-coherent.h>
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
#include <asm/memory.h>

/*
 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
 * used internally by the DMA-mapping API to provide DMA addresses. They
 * must not be used by drivers.
 */
#ifndef __arch_page_to_dma
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
	return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
}

static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return (void *)__bus_to_virt(addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
	return __arch_page_to_dma(dev, page);
}

static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
	return __arch_dma_to_virt(dev, addr);
}

static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{
	return __arch_virt_to_dma(dev, addr);
}
#endif
48

L
Linus Torvalds 已提交
49 50 51 52 53
/*
 * DMA-consistent mapping functions.  These allocate/free a region of
 * uncached, unwrite-buffered mapped memory space for use with DMA
 * devices.  This is the "generic" version.  The PCI specific version
 * is in pci.h
54 55 56 57
 *
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
L
Linus Torvalds 已提交
58
 */
59
extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
L
Linus Torvalds 已提交
60 61 62 63 64 65

/*
 * Return whether the given device DMA address mask can be supported
 * properly.  For example, if your device can only drive the low 24-bits
 * during bus mastering, then you would pass 0x00ffffff as the mask
 * to this function.
66 67 68
 *
 * FIXME: This should really be a platform specific issue - we should
 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
L
Linus Torvalds 已提交
69 70 71
 */
static inline int dma_supported(struct device *dev, u64 mask)
{
72 73 74
	if (mask < ISA_DMA_THRESHOLD)
		return 0;
	return 1;
L
Linus Torvalds 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
}

static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}

static inline int dma_get_cache_alignment(void)
{
	return 32;
}

92
static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
L
Linus Torvalds 已提交
93
{
94
	return !!arch_is_coherent();
L
Linus Torvalds 已提交
95 96 97 98 99
}

/*
 * DMA errors are defined by all-bits-set in the DMA address.
 */
100
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
101 102 103 104
{
	return dma_addr == ~0;
}

105 106 107 108
/*
 * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
 * function so drivers using this API are highlighted with build warnings.
 */
R
Russell King 已提交
109 110
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp)
111 112 113 114
{
	return NULL;
}

R
Russell King 已提交
115 116
static inline void dma_free_noncoherent(struct device *dev, size_t size,
		void *cpu_addr, dma_addr_t handle)
117 118 119
{
}

L
Linus Torvalds 已提交
120 121 122 123 124 125 126 127 128 129 130
/**
 * dma_alloc_coherent - allocate consistent memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, unbuffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
131
extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
L
Linus Torvalds 已提交
132 133 134 135 136 137 138 139 140 141 142 143 144 145

/**
 * dma_free_coherent - free memory allocated by dma_alloc_coherent
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: size of memory originally requested in dma_alloc_coherent
 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 *
 * Free (and unmap) a DMA buffer previously allocated by
 * dma_alloc_coherent().
 *
 * References to memory and mappings associated with cpu_addr/handle
 * during and after this call executing are illegal.
 */
R
Russell King 已提交
146
extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
L
Linus Torvalds 已提交
147 148 149 150 151 152 153 154 155 156 157 158 159

/**
 * dma_mmap_coherent - map a coherent DMA allocation into user space
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @vma: vm_area_struct describing requested user mapping
 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
 * @handle: device-view address returned from dma_alloc_coherent
 * @size: size of memory originally requested in dma_alloc_coherent
 *
 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
 * into user space.  The coherent DMA buffer must not be freed by the
 * driver until the user space mapping has been released.
 */
R
Russell King 已提交
160 161
int dma_mmap_coherent(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174


/**
 * dma_alloc_writecombine - allocate writecombining memory for DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @size: required memory size
 * @handle: bus-specific DMA address
 *
 * Allocate some uncached, buffered memory for a device for
 * performing DMA.  This function allocates pages, and will
 * return the CPU-viewed address, and sets @handle to be the
 * device-viewed address.
 */
R
Russell King 已提交
175 176
extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
		gfp_t);
L
Linus Torvalds 已提交
177 178 179 180

#define dma_free_writecombine(dev,size,cpu_addr,handle) \
	dma_free_coherent(dev,size,cpu_addr,handle)

R
Russell King 已提交
181 182
int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
		void *, dma_addr_t, size_t);
L
Linus Torvalds 已提交
183 184


185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
#ifdef CONFIG_DMABOUNCE
/*
 * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
 * and utilize bounce buffers as needed to work around limited DMA windows.
 *
 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
 *
 * The following are helper functions used by the dmabounce subystem
 *
 */

/**
 * dmabounce_register_dev
 *
 * @dev: valid struct device pointer
 * @small_buf_size: size of buffers to use with small buffer pool
 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
 *
 * This function should be called by low-level platform code to register
 * a device as requireing DMA buffer bouncing. The function will allocate
 * appropriate DMA pools for the device.
 *
 */
R
Russell King 已提交
210 211
extern int dmabounce_register_dev(struct device *, unsigned long,
		unsigned long);
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242

/**
 * dmabounce_unregister_dev
 *
 * @dev: valid struct device pointer
 *
 * This function should be called by low-level platform code when device
 * that was previously registered with dmabounce_register_dev is removed
 * from the system.
 *
 */
extern void dmabounce_unregister_dev(struct device *);

/**
 * dma_needs_bounce
 *
 * @dev: valid struct device pointer
 * @dma_handle: dma_handle of unbounced buffer
 * @size: size of region being mapped
 *
 * Platforms that utilize the dmabounce mechanism must implement
 * this function.
 *
 * The dmabounce routines call this function whenever a dma-mapping
 * is requested to determine whether a given buffer needs to be bounced
 * or not. The function must return 0 if the buffer is OK for
 * DMA access and 1 if the buffer needs to be bounced.
 *
 */
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);

243 244 245
/*
 * The DMA API, implemented by dmabounce.c.  See below for descriptions.
 */
R
Russell King 已提交
246 247 248 249 250 251
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
		enum dma_data_direction);
extern dma_addr_t dma_map_page(struct device *, struct page *,
		unsigned long, size_t, enum dma_data_direction);
extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
		enum dma_data_direction);
252

253 254 255 256
/*
 * Private functions
 */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
257
		size_t, enum dma_data_direction);
258
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
R
Russell King 已提交
259
		size_t, enum dma_data_direction);
260
#else
261 262 263 264 265 266 267 268 269 270 271
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}

static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
	unsigned long offset, size_t size, enum dma_data_direction dir)
{
	return 1;
}
272 273


L
Linus Torvalds 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286 287
/**
 * dma_map_single - map a single buffer for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @cpu_addr: CPU direct mapped address of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_single() or
 * dma_sync_single_for_cpu().
 */
R
Russell King 已提交
288 289
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
290
{
291 292
	BUG_ON(!valid_dma_direction(dir));

293
	if (!arch_is_coherent())
294
		dma_cache_maint(cpu_addr, size, dir);
295

296
	return virt_to_dma(dev, cpu_addr);
L
Linus Torvalds 已提交
297
}
298

L
Linus Torvalds 已提交
299 300 301 302 303 304 305 306 307 308 309 310
/**
 * dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
311
 * can regain ownership by calling dma_unmap_page().
L
Linus Torvalds 已提交
312
 */
R
Russell King 已提交
313 314
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
315
{
316 317
	BUG_ON(!valid_dma_direction(dir));

318 319 320 321
	if (!arch_is_coherent())
		dma_cache_maint(page_address(page) + offset, size, dir);

	return page_to_dma(dev, page) + offset;
L
Linus Torvalds 已提交
322 323 324 325 326 327
}

/**
 * dma_unmap_single - unmap a single buffer previously mapped
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
328 329
 * @size: size of buffer (same as passed to dma_map_single)
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
330 331 332 333 334 335 336 337
 *
 * Unmap a single streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_single() call.
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
338 339
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
340 341 342
{
	/* nothing to do */
}
343
#endif /* CONFIG_DMABOUNCE */
L
Linus Torvalds 已提交
344 345 346 347 348

/**
 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
349 350
 * @size: size of buffer (same as passed to dma_map_page)
 * @dir: DMA transfer direction (same as passed to dma_map_page)
L
Linus Torvalds 已提交
351
 *
352 353
 * Unmap a page streaming mode DMA translation.  The handle and size
 * must match what was provided in the previous dma_map_page() call.
L
Linus Torvalds 已提交
354 355 356 357 358
 * All other usages are undefined.
 *
 * After this call, reads by the CPU to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
R
Russell King 已提交
359 360
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
361
{
362
	dma_unmap_single(dev, handle, size, dir);
L
Linus Torvalds 已提交
363 364 365
}

/**
366
 * dma_sync_single_range_for_cpu
L
Linus Torvalds 已提交
367 368
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @handle: DMA address of buffer
369 370 371
 * @offset: offset of region to start sync
 * @size: size of region to sync
 * @dir: DMA transfer direction (same as passed to dma_map_single)
L
Linus Torvalds 已提交
372 373 374 375 376 377 378 379 380 381 382
 *
 * Make physical memory consistent for a single streaming mode DMA
 * translation after a transfer.
 *
 * If you perform a dma_map_single() but wish to interrogate the
 * buffer using the cpu, yet do not wish to teardown the PCI dma
 * mapping, you must call this function before doing so.  At the
 * next point you give the PCI dma address back to the card, you
 * must first the perform a dma_sync_for_device, and then the
 * device again owns the buffer.
 */
R
Russell King 已提交
383 384 385
static inline void dma_sync_single_range_for_cpu(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
386
{
387 388
	BUG_ON(!valid_dma_direction(dir));

389
	dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
L
Linus Torvalds 已提交
390 391
}

R
Russell King 已提交
392 393 394
static inline void dma_sync_single_range_for_device(struct device *dev,
		dma_addr_t handle, unsigned long offset, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
395
{
396 397
	BUG_ON(!valid_dma_direction(dir));

398 399 400
	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
		return;

401
	if (!arch_is_coherent())
402
		dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
L
Linus Torvalds 已提交
403 404
}

R
Russell King 已提交
405 406
static inline void dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
407 408 409 410
{
	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}

R
Russell King 已提交
411 412
static inline void dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
413 414 415 416
{
	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
}

417 418
/*
 * The scatter list versions of the above methods.
L
Linus Torvalds 已提交
419
 */
R
Russell King 已提交
420 421 422 423 424 425 426 427
extern int dma_map_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
		enum dma_data_direction);
428

L
Linus Torvalds 已提交
429 430 431

#endif /* __KERNEL__ */
#endif