dmabounce.c 14.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  arch/arm/common/dmabounce.c
 *
 *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
 *  limited DMA windows. These functions utilize bounce buffers to
 *  copy data to/from buffers located outside the DMA region. This
 *  only works for systems in which DMA memory is at the bottom of
8
 *  RAM, the remainder of memory is at the top and the DMA memory
S
Simon Arlott 已提交
9
 *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
L
Linus Torvalds 已提交
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *  DMA windows will require custom implementations that reserve memory
 *  areas at early bootup.
 *
 *  Original version by Brad Parker (brad@heeltoe.com)
 *  Re-written by Christopher Hoover <ch@murgatroid.com>
 *  Made generic by Deepak Saxena <dsaxena@plexity.net>
 *
 *  Copyright (C) 2002 Hewlett Packard Company.
 *  Copyright (C) 2004 MontaVista Software, Inc.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  version 2 as published by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
28
#include <linux/page-flags.h>
L
Linus Torvalds 已提交
29 30 31 32
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/list.h>
F
FUJITA Tomonori 已提交
33
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
34

35 36
#include <asm/cacheflush.h>

L
Linus Torvalds 已提交
37
#undef STATS
R
Russell King 已提交
38

L
Linus Torvalds 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
#ifdef STATS
#define DO_STATS(X) do { X ; } while (0)
#else
#define DO_STATS(X) do { } while (0)
#endif

/* ************************************************** */

struct safe_buffer {
	struct list_head node;

	/* original request */
	void		*ptr;
	size_t		size;
	int		direction;

	/* safe buffer info */
R
Russell King 已提交
56
	struct dmabounce_pool *pool;
L
Linus Torvalds 已提交
57 58 59 60
	void		*safe;
	dma_addr_t	safe_dma_addr;
};

R
Russell King 已提交
61 62 63 64 65 66 67 68
struct dmabounce_pool {
	unsigned long	size;
	struct dma_pool	*pool;
#ifdef STATS
	unsigned long	allocs;
#endif
};

L
Linus Torvalds 已提交
69 70 71 72 73 74 75
struct dmabounce_device_info {
	struct device *dev;
	struct list_head safe_buffers;
#ifdef STATS
	unsigned long total_allocs;
	unsigned long map_op_count;
	unsigned long bounce_count;
76
	int attr_res;
L
Linus Torvalds 已提交
77
#endif
R
Russell King 已提交
78 79
	struct dmabounce_pool	small;
	struct dmabounce_pool	large;
80 81

	rwlock_t lock;
L
Linus Torvalds 已提交
82 83 84
};

#ifdef STATS
85 86
static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
			      char *buf)
L
Linus Torvalds 已提交
87
{
88 89 90 91
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
		device_info->small.allocs,
		device_info->large.allocs,
R
Russell King 已提交
92 93
		device_info->total_allocs - device_info->small.allocs -
			device_info->large.allocs,
94 95 96
		device_info->total_allocs,
		device_info->map_op_count,
		device_info->bounce_count);
L
Linus Torvalds 已提交
97
}
98 99

static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
L
Linus Torvalds 已提交
100 101 102 103 104 105
#endif


/* allocate a 'safe' buffer and keep track of it */
static inline struct safe_buffer *
alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
R
Russell King 已提交
106
		  size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
107 108
{
	struct safe_buffer *buf;
R
Russell King 已提交
109
	struct dmabounce_pool *pool;
L
Linus Torvalds 已提交
110
	struct device *dev = device_info->dev;
111
	unsigned long flags;
L
Linus Torvalds 已提交
112 113 114 115

	dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
		__func__, ptr, size, dir);

R
Russell King 已提交
116 117 118 119 120 121 122
	if (size <= device_info->small.size) {
		pool = &device_info->small;
	} else if (size <= device_info->large.size) {
		pool = &device_info->large;
	} else {
		pool = NULL;
	}
L
Linus Torvalds 已提交
123 124 125 126 127 128 129

	buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
	if (buf == NULL) {
		dev_warn(dev, "%s: kmalloc failed\n", __func__);
		return NULL;
	}

R
Russell King 已提交
130 131 132 133
	buf->ptr = ptr;
	buf->size = size;
	buf->direction = dir;
	buf->pool = pool;
L
Linus Torvalds 已提交
134

R
Russell King 已提交
135 136 137
	if (pool) {
		buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
					   &buf->safe_dma_addr);
L
Linus Torvalds 已提交
138
	} else {
R
Russell King 已提交
139 140
		buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
					       GFP_ATOMIC);
L
Linus Torvalds 已提交
141 142
	}

R
Russell King 已提交
143 144 145 146
	if (buf->safe == NULL) {
		dev_warn(dev,
			 "%s: could not alloc dma memory (size=%d)\n",
			 __func__, size);
L
Linus Torvalds 已提交
147 148 149 150 151
		kfree(buf);
		return NULL;
	}

#ifdef STATS
R
Russell King 已提交
152 153 154
	if (pool)
		pool->allocs++;
	device_info->total_allocs++;
L
Linus Torvalds 已提交
155 156
#endif

157
	write_lock_irqsave(&device_info->lock, flags);
L
Linus Torvalds 已提交
158
	list_add(&buf->node, &device_info->safe_buffers);
159 160
	write_unlock_irqrestore(&device_info->lock, flags);

L
Linus Torvalds 已提交
161 162 163 164 165 166 167
	return buf;
}

/* determine if a buffer is from our "safe" pool */
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
168
	struct safe_buffer *b, *rb = NULL;
169 170 171
	unsigned long flags;

	read_lock_irqsave(&device_info->lock, flags);
L
Linus Torvalds 已提交
172

173
	list_for_each_entry(b, &device_info->safe_buffers, node)
174 175
		if (b->safe_dma_addr == safe_dma_addr) {
			rb = b;
176
			break;
177
		}
L
Linus Torvalds 已提交
178

179
	read_unlock_irqrestore(&device_info->lock, flags);
180
	return rb;
L
Linus Torvalds 已提交
181 182 183 184 185
}

static inline void
free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
{
186 187
	unsigned long flags;

L
Linus Torvalds 已提交
188 189
	dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);

190 191
	write_lock_irqsave(&device_info->lock, flags);

L
Linus Torvalds 已提交
192 193
	list_del(&buf->node);

194 195
	write_unlock_irqrestore(&device_info->lock, flags);

L
Linus Torvalds 已提交
196
	if (buf->pool)
R
Russell King 已提交
197
		dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
L
Linus Torvalds 已提交
198 199 200 201 202 203 204 205 206
	else
		dma_free_coherent(device_info->dev, buf->size, buf->safe,
				    buf->safe_dma_addr);

	kfree(buf);
}

/* ************************************************** */

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
		dma_addr_t dma_addr, const char *where)
{
	if (!dev || !dev->archdata.dmabounce)
		return NULL;
	if (dma_mapping_error(dev, dma_addr)) {
		if (dev)
			dev_err(dev, "Trying to %s invalid mapping\n", where);
		else
			pr_err("unknown device: Trying to %s invalid mapping\n", where);
		return NULL;
	}
	return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}

R
Russell King 已提交
222
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
L
Linus Torvalds 已提交
223 224
		enum dma_data_direction dir)
{
225
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
L
Linus Torvalds 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask + 1) & ~mask;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
			return 0;
		}

		dev_dbg(dev,
262 263 264
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);
L
Linus Torvalds 已提交
265 266 267 268 269 270 271

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
		}
R
Russell King 已提交
272
		ptr = buf->safe;
L
Linus Torvalds 已提交
273 274

		dma_addr = buf->safe_dma_addr;
275 276 277 278 279
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
280
		dma_cache_maint(ptr, size, dir);
L
Linus Torvalds 已提交
281 282 283 284 285
	}

	return dma_addr;
}

R
Russell King 已提交
286 287
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
288
{
289
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
L
Linus Torvalds 已提交
290 291 292

	if (buf) {
		BUG_ON(buf->size != size);
293
		BUG_ON(buf->direction != dir);
L
Linus Torvalds 已提交
294 295

		dev_dbg(dev,
296 297 298
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);
L
Linus Torvalds 已提交
299

300
		DO_STATS(dev->archdata.dmabounce->bounce_count++);
L
Linus Torvalds 已提交
301

302
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
303
			void *ptr = buf->ptr;
304

L
Linus Torvalds 已提交
305 306
			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
307 308
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);
309 310 311 312 313 314 315 316 317 318

			/*
			 * DMA buffers must have the same cache properties
			 * as if they were really used for DMA - which means
			 * data must be written back to RAM.  Note that
			 * we don't use dmac_flush_range() here for the
			 * bidirectional case because we know the cache
			 * lines will be coherent with the data written.
			 */
			dmac_clean_range(ptr, ptr + size);
319
			outer_clean_range(__pa(ptr), __pa(ptr) + size);
L
Linus Torvalds 已提交
320
		}
321
		free_safe_buffer(dev->archdata.dmabounce, buf);
L
Linus Torvalds 已提交
322 323 324 325 326 327 328 329 330 331 332
	}
}

/* ************************************************** */

/*
 * see if a buffer address is in an 'unsafe' range.  if it is
 * allocate a 'safe' buffer and copy the unsafe buffer into it.
 * substitute the safe buffer for the unsafe one.
 * (basically move the buffer from an unsafe area to a safe one)
 */
R
Russell King 已提交
333
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
L
Linus Torvalds 已提交
334 335 336 337 338
		enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, ptr, size, dir);

339
	BUG_ON(!valid_dma_direction(dir));
L
Linus Torvalds 已提交
340

R
Russell King 已提交
341
	return map_single(dev, ptr, size, dir);
L
Linus Torvalds 已提交
342
}
R
Russell King 已提交
343
EXPORT_SYMBOL(dma_map_single);
L
Linus Torvalds 已提交
344

345
dma_addr_t dma_map_page(struct device *dev, struct page *page,
R
Russell King 已提交
346
		unsigned long offset, size_t size, enum dma_data_direction dir)
347 348 349 350
{
	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
		__func__, page, offset, size, dir);

351
	BUG_ON(!valid_dma_direction(dir));
352

353 354 355 356 357 358
	if (PageHighMem(page)) {
		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
			     "is not supported\n");
		return ~0;
	}

359 360 361 362
	return map_single(dev, page_address(page) + offset, size, dir);
}
EXPORT_SYMBOL(dma_map_page);

L
Linus Torvalds 已提交
363 364 365 366 367 368 369
/*
 * see if a mapped address was really a "safe" buffer and if so, copy
 * the data from the safe buffer back to the unsafe buffer and free up
 * the safe buffer.  (basically return things back to the way they
 * should be)
 */

R
Russell King 已提交
370 371
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
372 373 374 375 376 377
{
	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, (void *) dma_addr, size, dir);

	unmap_single(dev, dma_addr, size, dir);
}
R
Russell King 已提交
378
EXPORT_SYMBOL(dma_unmap_single);
L
Linus Torvalds 已提交
379

380 381
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
L
Linus Torvalds 已提交
382
{
383 384 385
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
386
		__func__, addr, off, sz, dir);
387 388 389 390 391

	buf = find_safe_buffer_dev(dev, addr, __func__);
	if (!buf)
		return 1;

392 393
	BUG_ON(buf->direction != dir);

394 395 396 397 398 399 400 401 402 403 404 405
	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
			__func__, buf->safe + off, buf->ptr + off, sz);
		memcpy(buf->ptr + off, buf->safe + off, sz);
	}
	return 0;
L
Linus Torvalds 已提交
406
}
407
EXPORT_SYMBOL(dmabounce_sync_for_cpu);
L
Linus Torvalds 已提交
408

409 410
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
L
Linus Torvalds 已提交
411
{
412 413 414
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
415
		__func__, addr, off, sz, dir);
416 417 418 419 420

	buf = find_safe_buffer_dev(dev, addr, __func__);
	if (!buf)
		return 1;

421 422
	BUG_ON(buf->direction != dir);

423 424 425 426 427 428 429 430 431 432 433 434
	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
			__func__,buf->ptr + off, buf->safe + off, sz);
		memcpy(buf->safe + off, buf->ptr + off, sz);
	}
	return 0;
L
Linus Torvalds 已提交
435
}
436
EXPORT_SYMBOL(dmabounce_sync_for_device);
L
Linus Torvalds 已提交
437

R
Russell King 已提交
438 439
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
		const char *name, unsigned long size)
R
Russell King 已提交
440 441 442 443 444 445 446 447 448 449
{
	pool->size = size;
	DO_STATS(pool->allocs = 0);
	pool->pool = dma_pool_create(name, dev, size,
				     0 /* byte alignment */,
				     0 /* no page-crossing issues */);

	return pool->pool ? 0 : -ENOMEM;
}

R
Russell King 已提交
450 451
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
		unsigned long large_buffer_size)
L
Linus Torvalds 已提交
452 453
{
	struct dmabounce_device_info *device_info;
R
Russell King 已提交
454
	int ret;
L
Linus Torvalds 已提交
455 456 457

	device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
	if (!device_info) {
458 459
		dev_err(dev,
			"Could not allocated dmabounce_device_info\n");
L
Linus Torvalds 已提交
460 461 462
		return -ENOMEM;
	}

R
Russell King 已提交
463 464 465 466 467 468 469
	ret = dmabounce_init_pool(&device_info->small, dev,
				  "small_dmabounce_pool", small_buffer_size);
	if (ret) {
		dev_err(dev,
			"dmabounce: could not allocate DMA pool for %ld byte objects\n",
			small_buffer_size);
		goto err_free;
L
Linus Torvalds 已提交
470 471 472
	}

	if (large_buffer_size) {
R
Russell King 已提交
473 474 475 476 477 478 479 480
		ret = dmabounce_init_pool(&device_info->large, dev,
					  "large_dmabounce_pool",
					  large_buffer_size);
		if (ret) {
			dev_err(dev,
				"dmabounce: could not allocate DMA pool for %ld byte objects\n",
				large_buffer_size);
			goto err_destroy;
L
Linus Torvalds 已提交
481 482 483 484 485
		}
	}

	device_info->dev = dev;
	INIT_LIST_HEAD(&device_info->safe_buffers);
486
	rwlock_init(&device_info->lock);
L
Linus Torvalds 已提交
487 488 489 490 491

#ifdef STATS
	device_info->total_allocs = 0;
	device_info->map_op_count = 0;
	device_info->bounce_count = 0;
492
	device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
L
Linus Torvalds 已提交
493 494
#endif

495
	dev->archdata.dmabounce = device_info;
L
Linus Torvalds 已提交
496

497
	dev_info(dev, "dmabounce: registered device\n");
L
Linus Torvalds 已提交
498 499

	return 0;
R
Russell King 已提交
500 501 502 503 504 505

 err_destroy:
	dma_pool_destroy(device_info->small.pool);
 err_free:
	kfree(device_info);
	return ret;
L
Linus Torvalds 已提交
506
}
R
Russell King 已提交
507
EXPORT_SYMBOL(dmabounce_register_dev);
L
Linus Torvalds 已提交
508

R
Russell King 已提交
509
void dmabounce_unregister_dev(struct device *dev)
L
Linus Torvalds 已提交
510
{
511 512 513
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;

	dev->archdata.dmabounce = NULL;
L
Linus Torvalds 已提交
514 515

	if (!device_info) {
516 517 518
		dev_warn(dev,
			 "Never registered with dmabounce but attempting"
			 "to unregister!\n");
L
Linus Torvalds 已提交
519 520 521 522
		return;
	}

	if (!list_empty(&device_info->safe_buffers)) {
523 524
		dev_err(dev,
			"Removing from dmabounce with pending buffers!\n");
L
Linus Torvalds 已提交
525 526 527
		BUG();
	}

R
Russell King 已提交
528 529 530 531
	if (device_info->small.pool)
		dma_pool_destroy(device_info->small.pool);
	if (device_info->large.pool)
		dma_pool_destroy(device_info->large.pool);
L
Linus Torvalds 已提交
532 533

#ifdef STATS
534 535
	if (device_info->attr_res == 0)
		device_remove_file(dev, &dev_attr_dmabounce_stats);
L
Linus Torvalds 已提交
536 537 538 539
#endif

	kfree(device_info);

540
	dev_info(dev, "dmabounce: device unregistered\n");
L
Linus Torvalds 已提交
541 542 543 544 545 546
}
EXPORT_SYMBOL(dmabounce_unregister_dev);

MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
MODULE_LICENSE("GPL");