dma-default.c 8.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7
 * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
L
Linus Torvalds 已提交
8 9
 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
 */
10

L
Linus Torvalds 已提交
11
#include <linux/types.h>
12
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
13 14
#include <linux/mm.h>
#include <linux/module.h>
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/string.h>
17
#include <linux/gfp.h>
L
Linus Torvalds 已提交
18 19 20 21

#include <asm/cache.h>
#include <asm/io.h>

22 23
#include <dma-coherence.h>

24 25
static inline unsigned long dma_addr_to_virt(struct device *dev,
	dma_addr_t dma_addr)
26
{
27
	unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
28 29 30 31

	return (unsigned long)phys_to_virt(addr);
}

L
Linus Torvalds 已提交
32 33 34 35 36 37
/*
 * Warning on the terminology - Linux calls an uncached area coherent;
 * MIPS terminology calls memory areas with hardware maintained coherency
 * coherent.
 */

38 39 40
static inline int cpu_is_noncoherent_r10000(struct device *dev)
{
	return !plat_device_is_coherent(dev) &&
41 42
	       (current_cpu_type() == CPU_R10000 ||
	       current_cpu_type() == CPU_R12000);
43 44
}

45 46 47 48 49
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

50
#ifdef CONFIG_ZONE_DMA
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
	if (dev == NULL)
		gfp |= __GFP_DMA;
	else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
		gfp |= __GFP_DMA;
	else
#endif
#ifdef CONFIG_ZONE_DMA32
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
		gfp |= __GFP_DMA32;
	else
#endif
		;

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

	return gfp;
}

L
Linus Torvalds 已提交
70
void *dma_alloc_noncoherent(struct device *dev, size_t size,
A
Al Viro 已提交
71
	dma_addr_t * dma_handle, gfp_t gfp)
L
Linus Torvalds 已提交
72 73
{
	void *ret;
74

75
	gfp = massage_gfp_flags(dev, gfp);
L
Linus Torvalds 已提交
76 77 78 79 80

	ret = (void *) __get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
81
		*dma_handle = plat_map_dma_mem(dev, ret, size);
L
Linus Torvalds 已提交
82 83 84 85 86 87 88 89
	}

	return ret;
}

EXPORT_SYMBOL(dma_alloc_noncoherent);

void *dma_alloc_coherent(struct device *dev, size_t size,
A
Al Viro 已提交
90
	dma_addr_t * dma_handle, gfp_t gfp)
L
Linus Torvalds 已提交
91 92 93
{
	void *ret;

94 95 96
	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
		return ret;

97
	gfp = massage_gfp_flags(dev, gfp);
98 99 100

	ret = (void *) __get_free_pages(gfp, get_order(size));

L
Linus Torvalds 已提交
101
	if (ret) {
102 103 104 105 106 107 108
		memset(ret, 0, size);
		*dma_handle = plat_map_dma_mem(dev, ret, size);

		if (!plat_device_is_coherent(dev)) {
			dma_cache_wback_inv((unsigned long) ret, size);
			ret = UNCAC_ADDR(ret);
		}
L
Linus Torvalds 已提交
109 110 111 112 113 114 115 116 117 118
	}

	return ret;
}

EXPORT_SYMBOL(dma_alloc_coherent);

void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
119
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
L
Linus Torvalds 已提交
120 121 122 123 124 125 126 127 128
	free_pages((unsigned long) vaddr, get_order(size));
}

EXPORT_SYMBOL(dma_free_noncoherent);

void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
	unsigned long addr = (unsigned long) vaddr;
129 130 131 132
	int order = get_order(size);

	if (dma_release_from_coherent(dev, order, vaddr))
		return;
L
Linus Torvalds 已提交
133

134
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
135

136 137 138
	if (!plat_device_is_coherent(dev))
		addr = CAC_ADDR(addr);

L
Linus Torvalds 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
	free_pages(addr, get_order(size));
}

EXPORT_SYMBOL(dma_free_coherent);

static inline void __dma_sync(unsigned long addr, size_t size,
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
		dma_cache_wback(addr, size);
		break;

	case DMA_FROM_DEVICE:
		dma_cache_inv(addr, size);
		break;

	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(addr, size);
		break;

	default:
		BUG();
	}
}

dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
	enum dma_data_direction direction)
{
	unsigned long addr = (unsigned long) ptr;

170 171
	if (!plat_device_is_coherent(dev))
		__dma_sync(addr, size, direction);
L
Linus Torvalds 已提交
172

173
	return plat_map_dma_mem(dev, ptr, size);
L
Linus Torvalds 已提交
174 175 176 177 178 179 180
}

EXPORT_SYMBOL(dma_map_single);

void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
	enum dma_data_direction direction)
{
181
	if (cpu_is_noncoherent_r10000(dev))
182
		__dma_sync(dma_addr_to_virt(dev, dma_addr), size,
183
		           direction);
L
Linus Torvalds 已提交
184

185
	plat_unmap_dma_mem(dev, dma_addr, size, direction);
L
Linus Torvalds 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198
}

EXPORT_SYMBOL(dma_unmap_single);

int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
	enum dma_data_direction direction)
{
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nents; i++, sg++) {
		unsigned long addr;
199

J
Jens Axboe 已提交
200
		addr = (unsigned long) sg_virt(sg);
201
		if (!plat_device_is_coherent(dev) && addr)
J
Jens Axboe 已提交
202
			__dma_sync(addr, sg->length, direction);
203
		sg->dma_address = plat_map_dma_mem(dev,
J
Jens Axboe 已提交
204
				                   (void *)addr, sg->length);
L
Linus Torvalds 已提交
205 206 207 208 209 210 211 212 213 214 215 216
	}

	return nents;
}

EXPORT_SYMBOL(dma_map_sg);

dma_addr_t dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

217 218 219 220
	if (!plat_device_is_coherent(dev)) {
		unsigned long addr;

		addr = (unsigned long) page_address(page) + offset;
221
		__dma_sync(addr, size, direction);
222
	}
L
Linus Torvalds 已提交
223

224
	return plat_map_dma_mem_page(dev, page) + offset;
L
Linus Torvalds 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237
}

EXPORT_SYMBOL(dma_map_page);

void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
	enum dma_data_direction direction)
{
	unsigned long addr;
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nhwentries; i++, sg++) {
238 239
		if (!plat_device_is_coherent(dev) &&
		    direction != DMA_TO_DEVICE) {
J
Jens Axboe 已提交
240
			addr = (unsigned long) sg_virt(sg);
241
			if (addr)
J
Jens Axboe 已提交
242
				__dma_sync(addr, sg->length, direction);
243
		}
244
		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
L
Linus Torvalds 已提交
245 246 247 248 249 250 251 252 253
	}
}

EXPORT_SYMBOL(dma_unmap_sg);

void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
	size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
254

255 256 257
	if (cpu_is_noncoherent_r10000(dev)) {
		unsigned long addr;

258
		addr = dma_addr_to_virt(dev, dma_handle);
259 260
		__dma_sync(addr, size, direction);
	}
L
Linus Torvalds 已提交
261 262 263 264 265 266 267 268 269
}

EXPORT_SYMBOL(dma_sync_single_for_cpu);

void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
	size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

270
	plat_extra_sync_for_device(dev);
271
	if (!plat_device_is_coherent(dev)) {
272 273
		unsigned long addr;

274
		addr = dma_addr_to_virt(dev, dma_handle);
275 276
		__dma_sync(addr, size, direction);
	}
L
Linus Torvalds 已提交
277 278 279 280 281 282 283 284 285
}

EXPORT_SYMBOL(dma_sync_single_for_device);

void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

286 287 288
	if (cpu_is_noncoherent_r10000(dev)) {
		unsigned long addr;

289
		addr = dma_addr_to_virt(dev, dma_handle);
290 291
		__dma_sync(addr + offset, size, direction);
	}
L
Linus Torvalds 已提交
292 293 294 295 296 297 298 299 300
}

EXPORT_SYMBOL(dma_sync_single_range_for_cpu);

void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

301
	plat_extra_sync_for_device(dev);
302
	if (!plat_device_is_coherent(dev)) {
303 304
		unsigned long addr;

305
		addr = dma_addr_to_virt(dev, dma_handle);
306 307
		__dma_sync(addr + offset, size, direction);
	}
L
Linus Torvalds 已提交
308 309 310 311 312 313 314 315
}

EXPORT_SYMBOL(dma_sync_single_range_for_device);

void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
	enum dma_data_direction direction)
{
	int i;
316

L
Linus Torvalds 已提交
317
	BUG_ON(direction == DMA_NONE);
318

L
Linus Torvalds 已提交
319
	/* Make sure that gcc doesn't leave the empty loop body.  */
320
	for (i = 0; i < nelems; i++, sg++) {
321
		if (cpu_is_noncoherent_r10000(dev))
J
Jens Axboe 已提交
322
			__dma_sync((unsigned long)page_address(sg_page(sg)),
323 324
			           sg->length, direction);
	}
L
Linus Torvalds 已提交
325 326 327 328 329 330 331 332 333 334 335 336
}

EXPORT_SYMBOL(dma_sync_sg_for_cpu);

void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
	enum dma_data_direction direction)
{
	int i;

	BUG_ON(direction == DMA_NONE);

	/* Make sure that gcc doesn't leave the empty loop body.  */
337 338
	for (i = 0; i < nelems; i++, sg++) {
		if (!plat_device_is_coherent(dev))
J
Jens Axboe 已提交
339
			__dma_sync((unsigned long)page_address(sg_page(sg)),
340 341
			           sg->length, direction);
	}
L
Linus Torvalds 已提交
342 343 344 345
}

EXPORT_SYMBOL(dma_sync_sg_for_device);

346
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
347
{
348
	return plat_dma_mapping_error(dev, dma_addr);
L
Linus Torvalds 已提交
349 350 351 352 353 354
}

EXPORT_SYMBOL(dma_mapping_error);

int dma_supported(struct device *dev, u64 mask)
{
355
	return plat_dma_supported(dev, mask);
L
Linus Torvalds 已提交
356 357 358 359
}

EXPORT_SYMBOL(dma_supported);

360
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
361
	       enum dma_data_direction direction)
L
Linus Torvalds 已提交
362
{
363
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
364

365
	plat_extra_sync_for_device(dev);
366
	if (!plat_device_is_coherent(dev))
367
		__dma_sync((unsigned long)vaddr, size, direction);
L
Linus Torvalds 已提交
368 369 370
}

EXPORT_SYMBOL(dma_cache_sync);