dma-default.c 9.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
R
Ralf Baechle 已提交
7
 * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
L
Linus Torvalds 已提交
8 9
 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
 */
10

L
Linus Torvalds 已提交
11
#include <linux/types.h>
12
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
13 14
#include <linux/mm.h>
#include <linux/module.h>
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/string.h>
17
#include <linux/gfp.h>
18
#include <linux/highmem.h>
L
Linus Torvalds 已提交
19 20 21 22

#include <asm/cache.h>
#include <asm/io.h>

23 24
#include <dma-coherence.h>

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
int coherentio = 0;	/* User defined DMA coherency from command line. */
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */

static int __init setcoherentio(char *str)
{
	coherentio = 1;
	pr_info("Hardware DMA cache coherency (command line)\n");
	return 0;
}
early_param("coherentio", setcoherentio);

static int __init setnocoherentio(char *str)
{
	coherentio = 0;
	pr_info("Software DMA cache coherency (command line)\n");
	return 0;
}
early_param("nocoherentio", setnocoherentio);

45
static inline struct page *dma_addr_to_page(struct device *dev,
46
	dma_addr_t dma_addr)
47
{
48 49
	return pfn_to_page(
		plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
50 51
}

L
Linus Torvalds 已提交
52 53 54 55 56 57
/*
 * Warning on the terminology - Linux calls an uncached area coherent;
 * MIPS terminology calls memory areas with hardware maintained coherency
 * coherent.
 */

58 59 60
static inline int cpu_is_noncoherent_r10000(struct device *dev)
{
	return !plat_device_is_coherent(dev) &&
61 62
	       (current_cpu_type() == CPU_R10000 ||
	       current_cpu_type() == CPU_R12000);
63 64
}

65 66
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
67 68
	gfp_t dma_flag;

69 70 71
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

72
#ifdef CONFIG_ISA
73
	if (dev == NULL)
74
		dma_flag = __GFP_DMA;
75 76
	else
#endif
77
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
78
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
79 80 81 82 83 84 85 86 87 88 89 90 91
			dma_flag = __GFP_DMA;
	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
			dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
		dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
		dma_flag = __GFP_DMA;
92 93
	else
#endif
94
		dma_flag = 0;
95 96 97 98

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

99
	return gfp | dma_flag;
100 101
}

L
Linus Torvalds 已提交
102
void *dma_alloc_noncoherent(struct device *dev, size_t size,
A
Al Viro 已提交
103
	dma_addr_t * dma_handle, gfp_t gfp)
L
Linus Torvalds 已提交
104 105
{
	void *ret;
106

107
	gfp = massage_gfp_flags(dev, gfp);
L
Linus Torvalds 已提交
108 109 110 111 112

	ret = (void *) __get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
113
		*dma_handle = plat_map_dma_mem(dev, ret, size);
L
Linus Torvalds 已提交
114 115 116 117 118 119
	}

	return ret;
}
EXPORT_SYMBOL(dma_alloc_noncoherent);

120
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
121
	dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
122 123 124
{
	void *ret;

125 126 127
	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
		return ret;

128
	gfp = massage_gfp_flags(dev, gfp);
129 130 131

	ret = (void *) __get_free_pages(gfp, get_order(size));

L
Linus Torvalds 已提交
132
	if (ret) {
133 134 135 136 137
		memset(ret, 0, size);
		*dma_handle = plat_map_dma_mem(dev, ret, size);

		if (!plat_device_is_coherent(dev)) {
			dma_cache_wback_inv((unsigned long) ret, size);
138 139
			if (!hw_coherentio)
				ret = UNCAC_ADDR(ret);
140
		}
L
Linus Torvalds 已提交
141 142 143 144 145 146 147 148 149
	}

	return ret;
}


void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
150
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
L
Linus Torvalds 已提交
151 152 153 154
	free_pages((unsigned long) vaddr, get_order(size));
}
EXPORT_SYMBOL(dma_free_noncoherent);

155
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
156
	dma_addr_t dma_handle, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
157 158
{
	unsigned long addr = (unsigned long) vaddr;
159 160 161 162
	int order = get_order(size);

	if (dma_release_from_coherent(dev, order, vaddr))
		return;
L
Linus Torvalds 已提交
163

164
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
165

166
	if (!plat_device_is_coherent(dev) && !hw_coherentio)
167 168
		addr = CAC_ADDR(addr);

L
Linus Torvalds 已提交
169 170 171
	free_pages(addr, get_order(size));
}

172
static inline void __dma_sync_virtual(void *addr, size_t size,
L
Linus Torvalds 已提交
173 174 175 176
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
177
		dma_cache_wback((unsigned long)addr, size);
L
Linus Torvalds 已提交
178 179 180
		break;

	case DMA_FROM_DEVICE:
181
		dma_cache_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
182 183 184
		break;

	case DMA_BIDIRECTIONAL:
185
		dma_cache_wback_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
186 187 188 189 190 191 192
		break;

	default:
		BUG();
	}
}

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
/*
 * A single sg entry may refer to multiple physically contiguous
 * pages. But we still need to process highmem pages individually.
 * If highmem is not configured then the bulk of this loop gets
 * optimized out.
 */
static inline void __dma_sync(struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	size_t left = size;

	do {
		size_t len = left;

		if (PageHighMem(page)) {
			void *addr;

			if (offset + len > PAGE_SIZE) {
				if (offset >= PAGE_SIZE) {
					page += offset >> PAGE_SHIFT;
					offset &= ~PAGE_MASK;
				}
				len = PAGE_SIZE - offset;
			}

			addr = kmap_atomic(page);
			__dma_sync_virtual(addr + offset, len, direction);
			kunmap_atomic(addr);
		} else
			__dma_sync_virtual(page_address(page) + offset,
					   size, direction);
		offset = 0;
		page++;
		left -= len;
	} while (left);
}

230 231
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
	size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
232
{
233
	if (cpu_is_noncoherent_r10000(dev))
234 235
		__dma_sync(dma_addr_to_page(dev, dma_addr),
			   dma_addr & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
236

237
	plat_unmap_dma_mem(dev, dma_addr, size, direction);
L
Linus Torvalds 已提交
238 239
}

240 241
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
	int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
242 243 244 245
{
	int i;

	for (i = 0; i < nents; i++, sg++) {
246 247 248
		if (!plat_device_is_coherent(dev))
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
249 250 251
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		sg->dma_length = sg->length;
#endif
252 253
		sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
				  sg->offset;
L
Linus Torvalds 已提交
254 255 256 257 258
	}

	return nents;
}

259 260 261
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction,
	struct dma_attrs *attrs)
L
Linus Torvalds 已提交
262
{
263
	if (!plat_device_is_coherent(dev))
264
		__dma_sync(page, offset, size, direction);
L
Linus Torvalds 已提交
265

266
	return plat_map_dma_mem_page(dev, page) + offset;
L
Linus Torvalds 已提交
267 268
}

269 270 271
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
	int nhwentries, enum dma_data_direction direction,
	struct dma_attrs *attrs)
L
Linus Torvalds 已提交
272 273 274 275
{
	int i;

	for (i = 0; i < nhwentries; i++, sg++) {
276
		if (!plat_device_is_coherent(dev) &&
277 278 279
		    direction != DMA_TO_DEVICE)
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
280
		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
L
Linus Torvalds 已提交
281 282 283
	}
}

284 285
static void mips_dma_sync_single_for_cpu(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
286
{
287 288 289
	if (cpu_is_noncoherent_r10000(dev))
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
290 291
}

292 293
static void mips_dma_sync_single_for_device(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
294
{
295
	plat_extra_sync_for_device(dev);
296 297 298
	if (!plat_device_is_coherent(dev))
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
299 300
}

301 302
static void mips_dma_sync_sg_for_cpu(struct device *dev,
	struct scatterlist *sg, int nelems, enum dma_data_direction direction)
L
Linus Torvalds 已提交
303 304
{
	int i;
305

L
Linus Torvalds 已提交
306
	/* Make sure that gcc doesn't leave the empty loop body.  */
307
	for (i = 0; i < nelems; i++, sg++) {
308
		if (cpu_is_noncoherent_r10000(dev))
309 310
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
311
	}
L
Linus Torvalds 已提交
312 313
}

314 315
static void mips_dma_sync_sg_for_device(struct device *dev,
	struct scatterlist *sg, int nelems, enum dma_data_direction direction)
L
Linus Torvalds 已提交
316 317 318 319
{
	int i;

	/* Make sure that gcc doesn't leave the empty loop body.  */
320 321
	for (i = 0; i < nelems; i++, sg++) {
		if (!plat_device_is_coherent(dev))
322 323
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
324
	}
L
Linus Torvalds 已提交
325 326
}

327
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
328
{
329
	return plat_dma_mapping_error(dev, dma_addr);
L
Linus Torvalds 已提交
330 331
}

332
int mips_dma_supported(struct device *dev, u64 mask)
L
Linus Torvalds 已提交
333
{
334
	return plat_dma_supported(dev, mask);
L
Linus Torvalds 已提交
335 336
}

337
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
338
			 enum dma_data_direction direction)
L
Linus Torvalds 已提交
339
{
340
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
341

342
	plat_extra_sync_for_device(dev);
343
	if (!plat_device_is_coherent(dev))
344
		__dma_sync_virtual(vaddr, size, direction);
L
Linus Torvalds 已提交
345 346
}

347 348
EXPORT_SYMBOL(dma_cache_sync);

349
static struct dma_map_ops mips_default_dma_map_ops = {
350 351
	.alloc = mips_dma_alloc_coherent,
	.free = mips_dma_free_coherent,
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
	.map_page = mips_dma_map_page,
	.unmap_page = mips_dma_unmap_page,
	.map_sg = mips_dma_map_sg,
	.unmap_sg = mips_dma_unmap_sg,
	.sync_single_for_cpu = mips_dma_sync_single_for_cpu,
	.sync_single_for_device = mips_dma_sync_single_for_device,
	.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
	.sync_sg_for_device = mips_dma_sync_sg_for_device,
	.mapping_error = mips_dma_mapping_error,
	.dma_supported = mips_dma_supported
};

struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
EXPORT_SYMBOL(mips_dma_map_ops);

#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

static int __init mips_dma_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);

	return 0;
}
fs_initcall(mips_dma_init);