dma-default.c 8.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7
 * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
L
Linus Torvalds 已提交
8 9
 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
 */
10

L
Linus Torvalds 已提交
11
#include <linux/types.h>
12
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
13 14
#include <linux/mm.h>
#include <linux/module.h>
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/string.h>
17
#include <linux/gfp.h>
L
Linus Torvalds 已提交
18 19 20 21

#include <asm/cache.h>
#include <asm/io.h>

22 23
#include <dma-coherence.h>

24 25
static inline unsigned long dma_addr_to_virt(struct device *dev,
	dma_addr_t dma_addr)
26
{
27
	unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
28 29 30 31

	return (unsigned long)phys_to_virt(addr);
}

L
Linus Torvalds 已提交
32 33 34 35 36 37
/*
 * Warning on the terminology - Linux calls an uncached area coherent;
 * MIPS terminology calls memory areas with hardware maintained coherency
 * coherent.
 */

38 39 40
static inline int cpu_is_noncoherent_r10000(struct device *dev)
{
	return !plat_device_is_coherent(dev) &&
41 42
	       (current_cpu_type() == CPU_R10000 ||
	       current_cpu_type() == CPU_R12000);
43 44
}

45 46
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
47 48
	gfp_t dma_flag;

49 50 51
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

52
#ifdef CONFIG_ISA
53
	if (dev == NULL)
54
		dma_flag = __GFP_DMA;
55 56
	else
#endif
57
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
58
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
59 60 61 62 63 64 65 66 67 68 69 70 71
			dma_flag = __GFP_DMA;
	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
			dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
		dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
		dma_flag = __GFP_DMA;
72 73
	else
#endif
74
		dma_flag = 0;
75 76 77 78

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

79
	return gfp | dma_flag;
80 81
}

L
Linus Torvalds 已提交
82
void *dma_alloc_noncoherent(struct device *dev, size_t size,
A
Al Viro 已提交
83
	dma_addr_t * dma_handle, gfp_t gfp)
L
Linus Torvalds 已提交
84 85
{
	void *ret;
86

87
	gfp = massage_gfp_flags(dev, gfp);
L
Linus Torvalds 已提交
88 89 90 91 92

	ret = (void *) __get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
93
		*dma_handle = plat_map_dma_mem(dev, ret, size);
L
Linus Torvalds 已提交
94 95 96 97 98 99 100 101
	}

	return ret;
}

EXPORT_SYMBOL(dma_alloc_noncoherent);

void *dma_alloc_coherent(struct device *dev, size_t size,
A
Al Viro 已提交
102
	dma_addr_t * dma_handle, gfp_t gfp)
L
Linus Torvalds 已提交
103 104 105
{
	void *ret;

106 107 108
	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
		return ret;

109
	gfp = massage_gfp_flags(dev, gfp);
110 111 112

	ret = (void *) __get_free_pages(gfp, get_order(size));

L
Linus Torvalds 已提交
113
	if (ret) {
114 115 116 117 118 119 120
		memset(ret, 0, size);
		*dma_handle = plat_map_dma_mem(dev, ret, size);

		if (!plat_device_is_coherent(dev)) {
			dma_cache_wback_inv((unsigned long) ret, size);
			ret = UNCAC_ADDR(ret);
		}
L
Linus Torvalds 已提交
121 122 123 124 125 126 127 128 129 130
	}

	return ret;
}

EXPORT_SYMBOL(dma_alloc_coherent);

void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
131
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
L
Linus Torvalds 已提交
132 133 134 135 136 137 138 139 140
	free_pages((unsigned long) vaddr, get_order(size));
}

EXPORT_SYMBOL(dma_free_noncoherent);

void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
	unsigned long addr = (unsigned long) vaddr;
141 142 143 144
	int order = get_order(size);

	if (dma_release_from_coherent(dev, order, vaddr))
		return;
L
Linus Torvalds 已提交
145

146
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
147

148 149 150
	if (!plat_device_is_coherent(dev))
		addr = CAC_ADDR(addr);

L
Linus Torvalds 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	free_pages(addr, get_order(size));
}

EXPORT_SYMBOL(dma_free_coherent);

static inline void __dma_sync(unsigned long addr, size_t size,
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
		dma_cache_wback(addr, size);
		break;

	case DMA_FROM_DEVICE:
		dma_cache_inv(addr, size);
		break;

	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(addr, size);
		break;

	default:
		BUG();
	}
}

dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
	enum dma_data_direction direction)
{
	unsigned long addr = (unsigned long) ptr;

182 183
	if (!plat_device_is_coherent(dev))
		__dma_sync(addr, size, direction);
L
Linus Torvalds 已提交
184

185
	return plat_map_dma_mem(dev, ptr, size);
L
Linus Torvalds 已提交
186 187 188 189 190 191 192
}

EXPORT_SYMBOL(dma_map_single);

void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
	enum dma_data_direction direction)
{
193
	if (cpu_is_noncoherent_r10000(dev))
194
		__dma_sync(dma_addr_to_virt(dev, dma_addr), size,
195
		           direction);
L
Linus Torvalds 已提交
196

197
	plat_unmap_dma_mem(dev, dma_addr, size, direction);
L
Linus Torvalds 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210
}

EXPORT_SYMBOL(dma_unmap_single);

int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
	enum dma_data_direction direction)
{
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nents; i++, sg++) {
		unsigned long addr;
211

J
Jens Axboe 已提交
212
		addr = (unsigned long) sg_virt(sg);
213
		if (!plat_device_is_coherent(dev) && addr)
J
Jens Axboe 已提交
214
			__dma_sync(addr, sg->length, direction);
215
		sg->dma_address = plat_map_dma_mem(dev,
J
Jens Axboe 已提交
216
				                   (void *)addr, sg->length);
L
Linus Torvalds 已提交
217 218 219 220 221 222 223 224 225 226 227 228
	}

	return nents;
}

EXPORT_SYMBOL(dma_map_sg);

dma_addr_t dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

229 230 231 232
	if (!plat_device_is_coherent(dev)) {
		unsigned long addr;

		addr = (unsigned long) page_address(page) + offset;
233
		__dma_sync(addr, size, direction);
234
	}
L
Linus Torvalds 已提交
235

236
	return plat_map_dma_mem_page(dev, page) + offset;
L
Linus Torvalds 已提交
237 238 239 240 241 242 243 244 245 246 247 248 249
}

EXPORT_SYMBOL(dma_map_page);

void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
	enum dma_data_direction direction)
{
	unsigned long addr;
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nhwentries; i++, sg++) {
250 251
		if (!plat_device_is_coherent(dev) &&
		    direction != DMA_TO_DEVICE) {
J
Jens Axboe 已提交
252
			addr = (unsigned long) sg_virt(sg);
253
			if (addr)
J
Jens Axboe 已提交
254
				__dma_sync(addr, sg->length, direction);
255
		}
256
		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
L
Linus Torvalds 已提交
257 258 259 260 261 262 263 264 265
	}
}

EXPORT_SYMBOL(dma_unmap_sg);

void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
	size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);
266

267 268 269
	if (cpu_is_noncoherent_r10000(dev)) {
		unsigned long addr;

270
		addr = dma_addr_to_virt(dev, dma_handle);
271 272
		__dma_sync(addr, size, direction);
	}
L
Linus Torvalds 已提交
273 274 275 276 277 278 279 280 281
}

EXPORT_SYMBOL(dma_sync_single_for_cpu);

void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
	size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

282
	plat_extra_sync_for_device(dev);
283
	if (!plat_device_is_coherent(dev)) {
284 285
		unsigned long addr;

286
		addr = dma_addr_to_virt(dev, dma_handle);
287 288
		__dma_sync(addr, size, direction);
	}
L
Linus Torvalds 已提交
289 290 291 292 293 294 295 296 297
}

EXPORT_SYMBOL(dma_sync_single_for_device);

void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

298 299 300
	if (cpu_is_noncoherent_r10000(dev)) {
		unsigned long addr;

301
		addr = dma_addr_to_virt(dev, dma_handle);
302 303
		__dma_sync(addr + offset, size, direction);
	}
L
Linus Torvalds 已提交
304 305 306 307 308 309 310 311 312
}

EXPORT_SYMBOL(dma_sync_single_range_for_cpu);

void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

313
	plat_extra_sync_for_device(dev);
314
	if (!plat_device_is_coherent(dev)) {
315 316
		unsigned long addr;

317
		addr = dma_addr_to_virt(dev, dma_handle);
318 319
		__dma_sync(addr + offset, size, direction);
	}
L
Linus Torvalds 已提交
320 321 322 323 324 325 326 327
}

EXPORT_SYMBOL(dma_sync_single_range_for_device);

void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
	enum dma_data_direction direction)
{
	int i;
328

L
Linus Torvalds 已提交
329
	BUG_ON(direction == DMA_NONE);
330

L
Linus Torvalds 已提交
331
	/* Make sure that gcc doesn't leave the empty loop body.  */
332
	for (i = 0; i < nelems; i++, sg++) {
333
		if (cpu_is_noncoherent_r10000(dev))
J
Jens Axboe 已提交
334
			__dma_sync((unsigned long)page_address(sg_page(sg)),
335 336
			           sg->length, direction);
	}
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346 347 348
}

EXPORT_SYMBOL(dma_sync_sg_for_cpu);

void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
	enum dma_data_direction direction)
{
	int i;

	BUG_ON(direction == DMA_NONE);

	/* Make sure that gcc doesn't leave the empty loop body.  */
349 350
	for (i = 0; i < nelems; i++, sg++) {
		if (!plat_device_is_coherent(dev))
J
Jens Axboe 已提交
351
			__dma_sync((unsigned long)page_address(sg_page(sg)),
352 353
			           sg->length, direction);
	}
L
Linus Torvalds 已提交
354 355 356 357
}

EXPORT_SYMBOL(dma_sync_sg_for_device);

358
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
359
{
360
	return plat_dma_mapping_error(dev, dma_addr);
L
Linus Torvalds 已提交
361 362 363 364 365 366
}

EXPORT_SYMBOL(dma_mapping_error);

int dma_supported(struct device *dev, u64 mask)
{
367
	return plat_dma_supported(dev, mask);
L
Linus Torvalds 已提交
368 369 370 371
}

EXPORT_SYMBOL(dma_supported);

372
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
373
	       enum dma_data_direction direction)
L
Linus Torvalds 已提交
374
{
375
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
376

377
	plat_extra_sync_for_device(dev);
378
	if (!plat_device_is_coherent(dev))
379
		__dma_sync((unsigned long)vaddr, size, direction);
L
Linus Torvalds 已提交
380 381 382
}

EXPORT_SYMBOL(dma_cache_sync);