dmabounce.c 14.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *  arch/arm/common/dmabounce.c
 *
 *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
 *  limited DMA windows. These functions utilize bounce buffers to
 *  copy data to/from buffers located outside the DMA region. This
 *  only works for systems in which DMA memory is at the bottom of
8
 *  RAM, the remainder of memory is at the top and the DMA memory
S
Simon Arlott 已提交
9
 *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
L
Linus Torvalds 已提交
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *  DMA windows will require custom implementations that reserve memory
 *  areas at early bootup.
 *
 *  Original version by Brad Parker (brad@heeltoe.com)
 *  Re-written by Christopher Hoover <ch@murgatroid.com>
 *  Made generic by Deepak Saxena <dsaxena@plexity.net>
 *
 *  Copyright (C) 2002 Hewlett Packard Company.
 *  Copyright (C) 2004 MontaVista Software, Inc.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  version 2 as published by the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
28
#include <linux/page-flags.h>
L
Linus Torvalds 已提交
29 30 31 32
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/list.h>
F
FUJITA Tomonori 已提交
33
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
34

35 36
#include <asm/cacheflush.h>

L
Linus Torvalds 已提交
37
#undef STATS
R
Russell King 已提交
38

L
Linus Torvalds 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
#ifdef STATS
#define DO_STATS(X) do { X ; } while (0)
#else
#define DO_STATS(X) do { } while (0)
#endif

/* ************************************************** */

struct safe_buffer {
	struct list_head node;

	/* original request */
	void		*ptr;
	size_t		size;
	int		direction;

	/* safe buffer info */
R
Russell King 已提交
56
	struct dmabounce_pool *pool;
L
Linus Torvalds 已提交
57 58 59 60
	void		*safe;
	dma_addr_t	safe_dma_addr;
};

R
Russell King 已提交
61 62 63 64 65 66 67 68
struct dmabounce_pool {
	unsigned long	size;
	struct dma_pool	*pool;
#ifdef STATS
	unsigned long	allocs;
#endif
};

L
Linus Torvalds 已提交
69 70 71 72 73 74 75
struct dmabounce_device_info {
	struct device *dev;
	struct list_head safe_buffers;
#ifdef STATS
	unsigned long total_allocs;
	unsigned long map_op_count;
	unsigned long bounce_count;
76
	int attr_res;
L
Linus Torvalds 已提交
77
#endif
R
Russell King 已提交
78 79
	struct dmabounce_pool	small;
	struct dmabounce_pool	large;
80 81

	rwlock_t lock;
L
Linus Torvalds 已提交
82 83 84
};

#ifdef STATS
85 86
static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
			      char *buf)
L
Linus Torvalds 已提交
87
{
88 89 90 91
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
		device_info->small.allocs,
		device_info->large.allocs,
R
Russell King 已提交
92 93
		device_info->total_allocs - device_info->small.allocs -
			device_info->large.allocs,
94 95 96
		device_info->total_allocs,
		device_info->map_op_count,
		device_info->bounce_count);
L
Linus Torvalds 已提交
97
}
98 99

static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
L
Linus Torvalds 已提交
100 101 102 103 104 105
#endif


/* allocate a 'safe' buffer and keep track of it */
static inline struct safe_buffer *
alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
R
Russell King 已提交
106
		  size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
107 108
{
	struct safe_buffer *buf;
R
Russell King 已提交
109
	struct dmabounce_pool *pool;
L
Linus Torvalds 已提交
110
	struct device *dev = device_info->dev;
111
	unsigned long flags;
L
Linus Torvalds 已提交
112 113 114 115

	dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
		__func__, ptr, size, dir);

R
Russell King 已提交
116 117 118 119 120 121 122
	if (size <= device_info->small.size) {
		pool = &device_info->small;
	} else if (size <= device_info->large.size) {
		pool = &device_info->large;
	} else {
		pool = NULL;
	}
L
Linus Torvalds 已提交
123 124 125 126 127 128 129

	buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
	if (buf == NULL) {
		dev_warn(dev, "%s: kmalloc failed\n", __func__);
		return NULL;
	}

R
Russell King 已提交
130 131 132 133
	buf->ptr = ptr;
	buf->size = size;
	buf->direction = dir;
	buf->pool = pool;
L
Linus Torvalds 已提交
134

R
Russell King 已提交
135 136 137
	if (pool) {
		buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
					   &buf->safe_dma_addr);
L
Linus Torvalds 已提交
138
	} else {
R
Russell King 已提交
139 140
		buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
					       GFP_ATOMIC);
L
Linus Torvalds 已提交
141 142
	}

R
Russell King 已提交
143 144 145 146
	if (buf->safe == NULL) {
		dev_warn(dev,
			 "%s: could not alloc dma memory (size=%d)\n",
			 __func__, size);
L
Linus Torvalds 已提交
147 148 149 150 151
		kfree(buf);
		return NULL;
	}

#ifdef STATS
R
Russell King 已提交
152 153 154
	if (pool)
		pool->allocs++;
	device_info->total_allocs++;
L
Linus Torvalds 已提交
155 156
#endif

157
	write_lock_irqsave(&device_info->lock, flags);
L
Linus Torvalds 已提交
158
	list_add(&buf->node, &device_info->safe_buffers);
159 160
	write_unlock_irqrestore(&device_info->lock, flags);

L
Linus Torvalds 已提交
161 162 163 164 165 166 167
	return buf;
}

/* determine if a buffer is from our "safe" pool */
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
168
	struct safe_buffer *b, *rb = NULL;
169 170 171
	unsigned long flags;

	read_lock_irqsave(&device_info->lock, flags);
L
Linus Torvalds 已提交
172

173
	list_for_each_entry(b, &device_info->safe_buffers, node)
174 175
		if (b->safe_dma_addr == safe_dma_addr) {
			rb = b;
176
			break;
177
		}
L
Linus Torvalds 已提交
178

179
	read_unlock_irqrestore(&device_info->lock, flags);
180
	return rb;
L
Linus Torvalds 已提交
181 182 183 184 185
}

static inline void
free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
{
186 187
	unsigned long flags;

L
Linus Torvalds 已提交
188 189
	dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);

190 191
	write_lock_irqsave(&device_info->lock, flags);

L
Linus Torvalds 已提交
192 193
	list_del(&buf->node);

194 195
	write_unlock_irqrestore(&device_info->lock, flags);

L
Linus Torvalds 已提交
196
	if (buf->pool)
R
Russell King 已提交
197
		dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
L
Linus Torvalds 已提交
198 199 200 201 202 203 204 205 206
	else
		dma_free_coherent(device_info->dev, buf->size, buf->safe,
				    buf->safe_dma_addr);

	kfree(buf);
}

/* ************************************************** */

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
		dma_addr_t dma_addr, const char *where)
{
	if (!dev || !dev->archdata.dmabounce)
		return NULL;
	if (dma_mapping_error(dev, dma_addr)) {
		if (dev)
			dev_err(dev, "Trying to %s invalid mapping\n", where);
		else
			pr_err("unknown device: Trying to %s invalid mapping\n", where);
		return NULL;
	}
	return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}

R
Russell King 已提交
222
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
L
Linus Torvalds 已提交
223 224
		enum dma_data_direction dir)
{
225
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
L
Linus Torvalds 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
	dma_addr_t dma_addr;
	int needs_bounce = 0;

	if (device_info)
		DO_STATS ( device_info->map_op_count++ );

	dma_addr = virt_to_dma(dev, ptr);

	if (dev->dma_mask) {
		unsigned long mask = *dev->dma_mask;
		unsigned long limit;

		limit = (mask + 1) & ~mask;
		if (limit && size > limit) {
			dev_err(dev, "DMA mapping too big (requested %#x "
				"mask %#Lx)\n", size, *dev->dma_mask);
			return ~0;
		}

		/*
		 * Figure out if we need to bounce from the DMA mask.
		 */
		needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
	}

	if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
		struct safe_buffer *buf;

		buf = alloc_safe_buffer(device_info, ptr, size, dir);
		if (buf == 0) {
			dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
			       __func__, ptr);
258
			return ~0;
L
Linus Torvalds 已提交
259 260 261
		}

		dev_dbg(dev,
262 263 264
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);
L
Linus Torvalds 已提交
265 266 267 268 269 270 271

		if ((dir == DMA_TO_DEVICE) ||
		    (dir == DMA_BIDIRECTIONAL)) {
			dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
				__func__, ptr, buf->safe, size);
			memcpy(buf->safe, ptr, size);
		}
R
Russell King 已提交
272
		ptr = buf->safe;
L
Linus Torvalds 已提交
273 274

		dma_addr = buf->safe_dma_addr;
275 276 277 278 279
	} else {
		/*
		 * We don't need to sync the DMA buffer since
		 * it was allocated via the coherent allocators.
		 */
280
		__dma_single_cpu_to_dev(ptr, size, dir);
L
Linus Torvalds 已提交
281 282 283 284 285
	}

	return dma_addr;
}

R
Russell King 已提交
286 287
static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir)
L
Linus Torvalds 已提交
288
{
289
	struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
L
Linus Torvalds 已提交
290 291 292

	if (buf) {
		BUG_ON(buf->size != size);
293
		BUG_ON(buf->direction != dir);
L
Linus Torvalds 已提交
294 295

		dev_dbg(dev,
296 297 298
			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
			__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
			buf->safe, buf->safe_dma_addr);
L
Linus Torvalds 已提交
299

300
		DO_STATS(dev->archdata.dmabounce->bounce_count++);
L
Linus Torvalds 已提交
301

302
		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
303
			void *ptr = buf->ptr;
304

L
Linus Torvalds 已提交
305 306
			dev_dbg(dev,
				"%s: copy back safe %p to unsafe %p size %d\n",
307 308
				__func__, buf->safe, ptr, size);
			memcpy(ptr, buf->safe, size);
309 310

			/*
311 312 313
			 * Since we may have written to a page cache page,
			 * we need to ensure that the data will be coherent
			 * with user mappings.
314
			 */
315
			__cpuc_flush_dcache_area(ptr, size);
L
Linus Torvalds 已提交
316
		}
317
		free_safe_buffer(dev->archdata.dmabounce, buf);
318 319
	} else {
		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
L
Linus Torvalds 已提交
320 321 322 323 324 325 326 327 328 329 330
	}
}

/* ************************************************** */

/*
 * see if a buffer address is in an 'unsafe' range.  if it is
 * allocate a 'safe' buffer and copy the unsafe buffer into it.
 * substitute the safe buffer for the unsafe one.
 * (basically move the buffer from an unsafe area to a safe one)
 */
331
dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
L
Linus Torvalds 已提交
332 333 334 335 336
		enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, ptr, size, dir);

337
	BUG_ON(!valid_dma_direction(dir));
L
Linus Torvalds 已提交
338

R
Russell King 已提交
339
	return map_single(dev, ptr, size, dir);
L
Linus Torvalds 已提交
340
}
341
EXPORT_SYMBOL(__dma_map_single);
L
Linus Torvalds 已提交
342

343 344 345 346 347 348
/*
 * see if a mapped address was really a "safe" buffer and if so, copy
 * the data from the safe buffer back to the unsafe buffer and free up
 * the safe buffer.  (basically return things back to the way they
 * should be)
 */
349
void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
350 351 352 353 354 355 356
		enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, (void *) dma_addr, size, dir);

	unmap_single(dev, dma_addr, size, dir);
}
357
EXPORT_SYMBOL(__dma_unmap_single);
358

359
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
R
Russell King 已提交
360
		unsigned long offset, size_t size, enum dma_data_direction dir)
361 362 363 364
{
	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
		__func__, page, offset, size, dir);

365
	BUG_ON(!valid_dma_direction(dir));
366

367 368 369 370 371 372
	if (PageHighMem(page)) {
		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
			     "is not supported\n");
		return ~0;
	}

373 374
	return map_single(dev, page_address(page) + offset, size, dir);
}
375
EXPORT_SYMBOL(__dma_map_page);
376

L
Linus Torvalds 已提交
377 378 379 380 381 382
/*
 * see if a mapped address was really a "safe" buffer and if so, copy
 * the data from the safe buffer back to the unsafe buffer and free up
 * the safe buffer.  (basically return things back to the way they
 * should be)
 */
383
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
R
Russell King 已提交
384
		enum dma_data_direction dir)
L
Linus Torvalds 已提交
385 386 387 388 389 390
{
	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, (void *) dma_addr, size, dir);

	unmap_single(dev, dma_addr, size, dir);
}
391
EXPORT_SYMBOL(__dma_unmap_page);
L
Linus Torvalds 已提交
392

393 394
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
L
Linus Torvalds 已提交
395
{
396 397 398
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
399
		__func__, addr, off, sz, dir);
400 401 402 403 404

	buf = find_safe_buffer_dev(dev, addr, __func__);
	if (!buf)
		return 1;

405 406
	BUG_ON(buf->direction != dir);

407 408 409 410 411 412 413 414 415 416 417 418
	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
			__func__, buf->safe + off, buf->ptr + off, sz);
		memcpy(buf->ptr + off, buf->safe + off, sz);
	}
	return 0;
L
Linus Torvalds 已提交
419
}
420
EXPORT_SYMBOL(dmabounce_sync_for_cpu);
L
Linus Torvalds 已提交
421

422 423
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
		unsigned long off, size_t sz, enum dma_data_direction dir)
L
Linus Torvalds 已提交
424
{
425 426 427
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
428
		__func__, addr, off, sz, dir);
429 430 431 432 433

	buf = find_safe_buffer_dev(dev, addr, __func__);
	if (!buf)
		return 1;

434 435
	BUG_ON(buf->direction != dir);

436 437 438 439 440 441 442 443 444 445 446 447
	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
		dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
			__func__,buf->ptr + off, buf->safe + off, sz);
		memcpy(buf->safe + off, buf->ptr + off, sz);
	}
	return 0;
L
Linus Torvalds 已提交
448
}
449
EXPORT_SYMBOL(dmabounce_sync_for_device);
L
Linus Torvalds 已提交
450

R
Russell King 已提交
451 452
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
		const char *name, unsigned long size)
R
Russell King 已提交
453 454 455 456 457 458 459 460 461 462
{
	pool->size = size;
	DO_STATS(pool->allocs = 0);
	pool->pool = dma_pool_create(name, dev, size,
				     0 /* byte alignment */,
				     0 /* no page-crossing issues */);

	return pool->pool ? 0 : -ENOMEM;
}

R
Russell King 已提交
463 464
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
		unsigned long large_buffer_size)
L
Linus Torvalds 已提交
465 466
{
	struct dmabounce_device_info *device_info;
R
Russell King 已提交
467
	int ret;
L
Linus Torvalds 已提交
468 469 470

	device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
	if (!device_info) {
471 472
		dev_err(dev,
			"Could not allocated dmabounce_device_info\n");
L
Linus Torvalds 已提交
473 474 475
		return -ENOMEM;
	}

R
Russell King 已提交
476 477 478 479 480 481 482
	ret = dmabounce_init_pool(&device_info->small, dev,
				  "small_dmabounce_pool", small_buffer_size);
	if (ret) {
		dev_err(dev,
			"dmabounce: could not allocate DMA pool for %ld byte objects\n",
			small_buffer_size);
		goto err_free;
L
Linus Torvalds 已提交
483 484 485
	}

	if (large_buffer_size) {
R
Russell King 已提交
486 487 488 489 490 491 492 493
		ret = dmabounce_init_pool(&device_info->large, dev,
					  "large_dmabounce_pool",
					  large_buffer_size);
		if (ret) {
			dev_err(dev,
				"dmabounce: could not allocate DMA pool for %ld byte objects\n",
				large_buffer_size);
			goto err_destroy;
L
Linus Torvalds 已提交
494 495 496 497 498
		}
	}

	device_info->dev = dev;
	INIT_LIST_HEAD(&device_info->safe_buffers);
499
	rwlock_init(&device_info->lock);
L
Linus Torvalds 已提交
500 501 502 503 504

#ifdef STATS
	device_info->total_allocs = 0;
	device_info->map_op_count = 0;
	device_info->bounce_count = 0;
505
	device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
L
Linus Torvalds 已提交
506 507
#endif

508
	dev->archdata.dmabounce = device_info;
L
Linus Torvalds 已提交
509

510
	dev_info(dev, "dmabounce: registered device\n");
L
Linus Torvalds 已提交
511 512

	return 0;
R
Russell King 已提交
513 514 515 516 517 518

 err_destroy:
	dma_pool_destroy(device_info->small.pool);
 err_free:
	kfree(device_info);
	return ret;
L
Linus Torvalds 已提交
519
}
R
Russell King 已提交
520
EXPORT_SYMBOL(dmabounce_register_dev);
L
Linus Torvalds 已提交
521

R
Russell King 已提交
522
void dmabounce_unregister_dev(struct device *dev)
L
Linus Torvalds 已提交
523
{
524 525 526
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;

	dev->archdata.dmabounce = NULL;
L
Linus Torvalds 已提交
527 528

	if (!device_info) {
529 530 531
		dev_warn(dev,
			 "Never registered with dmabounce but attempting"
			 "to unregister!\n");
L
Linus Torvalds 已提交
532 533 534 535
		return;
	}

	if (!list_empty(&device_info->safe_buffers)) {
536 537
		dev_err(dev,
			"Removing from dmabounce with pending buffers!\n");
L
Linus Torvalds 已提交
538 539 540
		BUG();
	}

R
Russell King 已提交
541 542 543 544
	if (device_info->small.pool)
		dma_pool_destroy(device_info->small.pool);
	if (device_info->large.pool)
		dma_pool_destroy(device_info->large.pool);
L
Linus Torvalds 已提交
545 546

#ifdef STATS
547 548
	if (device_info->attr_res == 0)
		device_remove_file(dev, &dev_attr_dmabounce_stats);
L
Linus Torvalds 已提交
549 550 551 552
#endif

	kfree(device_info);

553
	dev_info(dev, "dmabounce: device unregistered\n");
L
Linus Torvalds 已提交
554 555 556 557 558 559
}
EXPORT_SYMBOL(dmabounce_unregister_dev);

MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
MODULE_LICENSE("GPL");