dma-mapping.h 12.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
S
Stephen Rothwell 已提交
2 3 4 5
 * Copyright (C) 2004 IBM
 *
 * Implements the generic device dma API for powerpc.
 * the pci and vio busses
L
Linus Torvalds 已提交
6
 */
S
Stephen Rothwell 已提交
7 8
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
9 10 11 12 13 14 15
#ifdef __KERNEL__

#include <linux/types.h>
#include <linux/cache.h>
/* need struct page definitions */
#include <linux/mm.h>
#include <linux/scatterlist.h>
16
#include <linux/dma-attrs.h>
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
#include <asm/io.h>

#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)

#ifdef CONFIG_NOT_COHERENT_CACHE
/*
 * DMA-consistent mapping functions for PowerPCs that don't support
 * cache snooping.  These allocate/free a region of uncached mapped
 * memory space for use with DMA devices.  Alternatively, you could
 * allocate the space "normally" and use the cache management functions
 * to ensure it is consistent.
 */
extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
				 size_t size, int direction);

#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
 * Cache coherent cores.
 */

#define __dma_alloc_coherent(gfp, size, handle)	NULL
#define __dma_free_coherent(size, addr)		((void)0)
#define __dma_sync(addr, size, rw)		((void)0)
#define __dma_sync_page(pg, off, sz, rw)	((void)0)

#endif /* ! CONFIG_NOT_COHERENT_CACHE */

#ifdef CONFIG_PPC64
48 49 50 51 52 53 54 55 56

static inline unsigned long device_to_mask(struct device *dev)
{
	if (dev->dma_mask && *dev->dma_mask)
		return *dev->dma_mask;
	/* Assume devices without mask can take 32 bit addresses */
	return 0xfffffffful;
}

57 58 59 60 61 62 63 64 65
/*
 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
 */
struct dma_mapping_ops {
	void *		(*alloc_coherent)(struct device *dev, size_t size,
				dma_addr_t *dma_handle, gfp_t flag);
	void		(*free_coherent)(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle);
	dma_addr_t	(*map_single)(struct device *dev, void *ptr,
66 67
				size_t size, enum dma_data_direction direction,
				struct dma_attrs *attrs);
68
	void		(*unmap_single)(struct device *dev, dma_addr_t dma_addr,
69 70
				size_t size, enum dma_data_direction direction,
				struct dma_attrs *attrs);
71
	int		(*map_sg)(struct device *dev, struct scatterlist *sg,
72 73
				int nents, enum dma_data_direction direction,
				struct dma_attrs *attrs);
74
	void		(*unmap_sg)(struct device *dev, struct scatterlist *sg,
75 76
				int nents, enum dma_data_direction direction,
				struct dma_attrs *attrs);
77 78 79 80 81 82 83 84 85 86 87 88 89 90
	int		(*dma_supported)(struct device *dev, u64 mask);
	int		(*set_dma_mask)(struct device *dev, u64 dma_mask);
};

static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
{
	/* We don't handle the NULL dev case for ISA for now. We could
	 * do it via an out of line call but it is not needed for now. The
	 * only ISA DMA device we support is the floppy and we have a hack
	 * in the floppy driver directly to get a device for us.
	 */
	if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
		return NULL;
	return dev->archdata.dma_ops;
91 92 93 94 95
}

static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
{
	dev->archdata.dma_ops = ops;
96 97 98 99 100 101 102 103 104 105 106 107 108
}

static inline int dma_supported(struct device *dev, u64 mask)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	if (unlikely(dma_ops == NULL))
		return 0;
	if (dma_ops->dma_supported == NULL)
		return 1;
	return dma_ops->dma_supported(dev, mask);
}

109 110 111
/* We have our own implementation of pci_set_dma_mask() */
#define HAVE_ARCH_PCI_SET_DMA_MASK

112 113 114 115 116 117 118 119 120 121 122 123 124 125
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	if (unlikely(dma_ops == NULL))
		return -EIO;
	if (dma_ops->set_dma_mask != NULL)
		return dma_ops->set_dma_mask(dev, dma_mask);
	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
		return -EIO;
	*dev->dma_mask = dma_mask;
	return 0;
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
static inline dma_addr_t dma_map_single_attrs(struct device *dev,
					      void *cpu_addr,
					      size_t size,
					      enum dma_data_direction direction,
					      struct dma_attrs *attrs)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	BUG_ON(!dma_ops);
	return dma_ops->map_single(dev, cpu_addr, size, direction, attrs);
}

static inline void dma_unmap_single_attrs(struct device *dev,
					  dma_addr_t dma_addr,
					  size_t size,
					  enum dma_data_direction direction,
					  struct dma_attrs *attrs)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	BUG_ON(!dma_ops);
	dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
}

static inline dma_addr_t dma_map_page_attrs(struct device *dev,
					    struct page *page,
					    unsigned long offset, size_t size,
					    enum dma_data_direction direction,
					    struct dma_attrs *attrs)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	BUG_ON(!dma_ops);
	return dma_ops->map_single(dev, page_address(page) + offset, size,
			direction, attrs);
}

static inline void dma_unmap_page_attrs(struct device *dev,
					dma_addr_t dma_address,
					size_t size,
					enum dma_data_direction direction,
					struct dma_attrs *attrs)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	BUG_ON(!dma_ops);
	dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
}

static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
				   int nents, enum dma_data_direction direction,
				   struct dma_attrs *attrs)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	BUG_ON(!dma_ops);
	return dma_ops->map_sg(dev, sg, nents, direction, attrs);
}

static inline void dma_unmap_sg_attrs(struct device *dev,
				      struct scatterlist *sg,
				      int nhwentries,
				      enum dma_data_direction direction,
				      struct dma_attrs *attrs)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	BUG_ON(!dma_ops);
	dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
				       dma_addr_t *dma_handle, gfp_t flag)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	BUG_ON(!dma_ops);
	return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
}

static inline void dma_free_coherent(struct device *dev, size_t size,
				     void *cpu_addr, dma_addr_t dma_handle)
{
	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);

	BUG_ON(!dma_ops);
	dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
}

static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
					size_t size,
					enum dma_data_direction direction)
{
219
	return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
220 221 222 223 224 225
}

static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
				    size_t size,
				    enum dma_data_direction direction)
{
226
	dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
227 228 229 230 231 232
}

static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
				      unsigned long offset, size_t size,
				      enum dma_data_direction direction)
{
233
	return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
234
}
235 236 237 238 239

static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
				  size_t size,
				  enum dma_data_direction direction)
{
240
	dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
241 242 243 244 245
}

static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
			     int nents, enum dma_data_direction direction)
{
246
	return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
247 248 249 250 251 252
}

static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
				int nhwentries,
				enum dma_data_direction direction)
{
253
	dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
254
}
S
Stephen Rothwell 已提交
255

256 257 258 259 260
/*
 * Available generic sets of operations
 */
extern struct dma_mapping_ops dma_iommu_ops;
extern struct dma_mapping_ops dma_direct_ops;
S
Stephen Rothwell 已提交
261 262 263

#else /* CONFIG_PPC64 */

L
Linus Torvalds 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276
#define dma_supported(dev, mask)	(1)

static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
	if (!dev->dma_mask || !dma_supported(dev, mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}

static inline void *dma_alloc_coherent(struct device *dev, size_t size,
277
				       dma_addr_t * dma_handle,
A
Al Viro 已提交
278
				       gfp_t gfp)
L
Linus Torvalds 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
{
#ifdef CONFIG_NOT_COHERENT_CACHE
	return __dma_alloc_coherent(size, dma_handle, gfp);
#else
	void *ret;
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);

	if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
		gfp |= GFP_DMA;

	ret = (void *)__get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
		*dma_handle = virt_to_bus(ret);
	}

	return ret;
#endif
}

static inline void
dma_free_coherent(struct device *dev, size_t size, void *vaddr,
		  dma_addr_t dma_handle)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
	__dma_free_coherent(size, vaddr);
#else
	free_pages((unsigned long)vaddr, get_order(size));
#endif
}

static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
	       enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

	__dma_sync(ptr, size, direction);

	return virt_to_bus(ptr);
}

323 324 325 326 327 328
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
				    size_t size,
				    enum dma_data_direction direction)
{
	/* We do nothing. */
}
L
Linus Torvalds 已提交
329 330 331 332 333 334 335 336 337 338

static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size,
	     enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

	__dma_sync_page(page, offset, size, direction);

339
	return page_to_bus(page) + offset;
L
Linus Torvalds 已提交
340 341
}

342 343 344 345 346 347
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
				  size_t size,
				  enum dma_data_direction direction)
{
	/* We do nothing. */
}
L
Linus Torvalds 已提交
348 349

static inline int
J
Jens Axboe 已提交
350
dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
L
Linus Torvalds 已提交
351 352
	   enum dma_data_direction direction)
{
J
Jens Axboe 已提交
353
	struct scatterlist *sg;
L
Linus Torvalds 已提交
354 355 356 357
	int i;

	BUG_ON(direction == DMA_NONE);

J
Jens Axboe 已提交
358
	for_each_sg(sgl, sg, nents, i) {
359 360 361
		BUG_ON(!sg_page(sg));
		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
		sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
L
Linus Torvalds 已提交
362 363 364 365 366
	}

	return nents;
}

367 368 369 370 371 372
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
				int nhwentries,
				enum dma_data_direction direction)
{
	/* We don't do anything here. */
}
L
Linus Torvalds 已提交
373

S
Stephen Rothwell 已提交
374 375 376 377 378
#endif /* CONFIG_PPC64 */

static inline void dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
379 380 381 382 383
{
	BUG_ON(direction == DMA_NONE);
	__dma_sync(bus_to_virt(dma_handle), size, direction);
}

S
Stephen Rothwell 已提交
384 385 386
static inline void dma_sync_single_for_device(struct device *dev,
		dma_addr_t dma_handle, size_t size,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
387 388 389 390 391
{
	BUG_ON(direction == DMA_NONE);
	__dma_sync(bus_to_virt(dma_handle), size, direction);
}

S
Stephen Rothwell 已提交
392
static inline void dma_sync_sg_for_cpu(struct device *dev,
J
Jens Axboe 已提交
393
		struct scatterlist *sgl, int nents,
S
Stephen Rothwell 已提交
394
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
395
{
J
Jens Axboe 已提交
396
	struct scatterlist *sg;
L
Linus Torvalds 已提交
397 398 399 400
	int i;

	BUG_ON(direction == DMA_NONE);

J
Jens Axboe 已提交
401
	for_each_sg(sgl, sg, nents, i)
402
		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
L
Linus Torvalds 已提交
403 404
}

S
Stephen Rothwell 已提交
405
static inline void dma_sync_sg_for_device(struct device *dev,
J
Jens Axboe 已提交
406
		struct scatterlist *sgl, int nents,
S
Stephen Rothwell 已提交
407
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
408
{
J
Jens Axboe 已提交
409
	struct scatterlist *sg;
L
Linus Torvalds 已提交
410 411 412 413
	int i;

	BUG_ON(direction == DMA_NONE);

J
Jens Axboe 已提交
414
	for_each_sg(sgl, sg, nents, i)
415
		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
L
Linus Torvalds 已提交
416 417
}

418
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
S
Stephen Rothwell 已提交
419 420 421 422 423 424 425 426
{
#ifdef CONFIG_PPC64
	return (dma_addr == DMA_ERROR_CODE);
#else
	return 0;
#endif
}

L
Linus Torvalds 已提交
427 428 429
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE
430
#define dma_is_consistent(d, h)	(0)
L
Linus Torvalds 已提交
431
#else
432
#define dma_is_consistent(d, h)	(1)
L
Linus Torvalds 已提交
433 434 435 436
#endif

static inline int dma_get_cache_alignment(void)
{
S
Stephen Rothwell 已提交
437 438 439
#ifdef CONFIG_PPC64
	/* no easy way to get cache size on all processors, so return
	 * the maximum possible, to be safe */
440
	return (1 << INTERNODE_CACHE_SHIFT);
S
Stephen Rothwell 已提交
441
#else
L
Linus Torvalds 已提交
442 443 444 445 446
	/*
	 * Each processor family will define its own L1_CACHE_SHIFT,
	 * L1_CACHE_BYTES wraps to this, so this is always safe.
	 */
	return L1_CACHE_BYTES;
S
Stephen Rothwell 已提交
447
#endif
L
Linus Torvalds 已提交
448 449
}

S
Stephen Rothwell 已提交
450 451 452
static inline void dma_sync_single_range_for_cpu(struct device *dev,
		dma_addr_t dma_handle, unsigned long offset, size_t size,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
453 454 455 456 457
{
	/* just sync everything for now */
	dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
}

S
Stephen Rothwell 已提交
458 459 460
static inline void dma_sync_single_range_for_device(struct device *dev,
		dma_addr_t dma_handle, unsigned long offset, size_t size,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
461 462 463 464 465
{
	/* just sync everything for now */
	dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
}

466
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
S
Stephen Rothwell 已提交
467
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
468
{
S
Stephen Rothwell 已提交
469
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
470 471 472
	__dma_sync(vaddr, size, (int)direction);
}

473
#endif /* __KERNEL__ */
S
Stephen Rothwell 已提交
474
#endif	/* _ASM_DMA_MAPPING_H */