dma-default.c 10.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
R
Ralf Baechle 已提交
7
 * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
L
Linus Torvalds 已提交
8 9
 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
 */
10

L
Linus Torvalds 已提交
11
#include <linux/types.h>
12
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
13 14
#include <linux/mm.h>
#include <linux/module.h>
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/string.h>
17
#include <linux/gfp.h>
18
#include <linux/highmem.h>
19
#include <linux/dma-contiguous.h>
L
Linus Torvalds 已提交
20 21

#include <asm/cache.h>
22
#include <asm/cpu-type.h>
L
Linus Torvalds 已提交
23 24
#include <asm/io.h>

25 26
#include <dma-coherence.h>

27
#ifdef CONFIG_DMA_MAYBE_COHERENT
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
int coherentio = 0;	/* User defined DMA coherency from command line. */
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */

static int __init setcoherentio(char *str)
{
	coherentio = 1;
	pr_info("Hardware DMA cache coherency (command line)\n");
	return 0;
}
early_param("coherentio", setcoherentio);

static int __init setnocoherentio(char *str)
{
	coherentio = 0;
	pr_info("Software DMA cache coherency (command line)\n");
	return 0;
}
early_param("nocoherentio", setnocoherentio);
47
#endif
48

49
static inline struct page *dma_addr_to_page(struct device *dev,
50
	dma_addr_t dma_addr)
51
{
52 53
	return pfn_to_page(
		plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
54 55
}

L
Linus Torvalds 已提交
56
/*
57 58 59 60
 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
 * speculatively fill random cachelines with stale data at any time,
 * requiring an extra flush post-DMA.
 *
L
Linus Torvalds 已提交
61 62 63
 * Warning on the terminology - Linux calls an uncached area coherent;
 * MIPS terminology calls memory areas with hardware maintained coherency
 * coherent.
64 65 66 67 68
 *
 * Note that the R14000 and R16000 should also be checked for in this
 * condition.  However this function is only called on non-I/O-coherent
 * systems and only the R10000 and R12000 are used in such systems, the
 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
L
Linus Torvalds 已提交
69
 */
70
static inline int cpu_needs_post_dma_flush(struct device *dev)
71 72
{
	return !plat_device_is_coherent(dev) &&
73
	       (boot_cpu_type() == CPU_R10000 ||
74 75
		boot_cpu_type() == CPU_R12000 ||
		boot_cpu_type() == CPU_BMIPS5000);
76 77
}

78 79
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
80 81
	gfp_t dma_flag;

82 83 84
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

85
#ifdef CONFIG_ISA
86
	if (dev == NULL)
87
		dma_flag = __GFP_DMA;
88 89
	else
#endif
90
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
91
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
92 93 94 95 96 97 98 99 100 101 102 103 104
			dma_flag = __GFP_DMA;
	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
			dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
		dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
		dma_flag = __GFP_DMA;
105 106
	else
#endif
107
		dma_flag = 0;
108 109 110 111

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

112
	return gfp | dma_flag;
113 114
}

L
Linus Torvalds 已提交
115
void *dma_alloc_noncoherent(struct device *dev, size_t size,
A
Al Viro 已提交
116
	dma_addr_t * dma_handle, gfp_t gfp)
L
Linus Torvalds 已提交
117 118
{
	void *ret;
119

120
	gfp = massage_gfp_flags(dev, gfp);
L
Linus Torvalds 已提交
121 122 123 124 125

	ret = (void *) __get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
126
		*dma_handle = plat_map_dma_mem(dev, ret, size);
L
Linus Torvalds 已提交
127 128 129 130 131 132
	}

	return ret;
}
EXPORT_SYMBOL(dma_alloc_noncoherent);

133
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
134
	dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
135 136
{
	void *ret;
137 138
	struct page *page = NULL;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
139

140 141 142
	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
		return ret;

143
	gfp = massage_gfp_flags(dev, gfp);
144

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
		page = dma_alloc_from_contiguous(dev,
					count, get_order(size));
	if (!page)
		page = alloc_pages(gfp, get_order(size));

	if (!page)
		return NULL;

	ret = page_address(page);
	memset(ret, 0, size);
	*dma_handle = plat_map_dma_mem(dev, ret, size);
	if (!plat_device_is_coherent(dev)) {
		dma_cache_wback_inv((unsigned long) ret, size);
		if (!hw_coherentio)
			ret = UNCAC_ADDR(ret);
L
Linus Torvalds 已提交
161 162 163 164 165 166 167 168 169
	}

	return ret;
}


void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
170
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
L
Linus Torvalds 已提交
171 172 173 174
	free_pages((unsigned long) vaddr, get_order(size));
}
EXPORT_SYMBOL(dma_free_noncoherent);

175
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
176
	dma_addr_t dma_handle, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
177 178
{
	unsigned long addr = (unsigned long) vaddr;
179
	int order = get_order(size);
180 181
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = NULL;
182 183 184

	if (dma_release_from_coherent(dev, order, vaddr))
		return;
L
Linus Torvalds 已提交
185

186
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
187

188
	if (!plat_device_is_coherent(dev) && !hw_coherentio)
189 190
		addr = CAC_ADDR(addr);

191 192 193 194
	page = virt_to_page((void *) addr);

	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
L
Linus Torvalds 已提交
195 196
}

197
static inline void __dma_sync_virtual(void *addr, size_t size,
L
Linus Torvalds 已提交
198 199 200 201
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
202
		dma_cache_wback((unsigned long)addr, size);
L
Linus Torvalds 已提交
203 204 205
		break;

	case DMA_FROM_DEVICE:
206
		dma_cache_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
207 208 209
		break;

	case DMA_BIDIRECTIONAL:
210
		dma_cache_wback_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
211 212 213 214 215 216 217
		break;

	default:
		BUG();
	}
}

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
/*
 * A single sg entry may refer to multiple physically contiguous
 * pages. But we still need to process highmem pages individually.
 * If highmem is not configured then the bulk of this loop gets
 * optimized out.
 */
static inline void __dma_sync(struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	size_t left = size;

	do {
		size_t len = left;

		if (PageHighMem(page)) {
			void *addr;

			if (offset + len > PAGE_SIZE) {
				if (offset >= PAGE_SIZE) {
					page += offset >> PAGE_SHIFT;
					offset &= ~PAGE_MASK;
				}
				len = PAGE_SIZE - offset;
			}

			addr = kmap_atomic(page);
			__dma_sync_virtual(addr + offset, len, direction);
			kunmap_atomic(addr);
		} else
			__dma_sync_virtual(page_address(page) + offset,
					   size, direction);
		offset = 0;
		page++;
		left -= len;
	} while (left);
}

255 256
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
	size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
257
{
258
	if (cpu_needs_post_dma_flush(dev))
259 260
		__dma_sync(dma_addr_to_page(dev, dma_addr),
			   dma_addr & ~PAGE_MASK, size, direction);
261
	plat_post_dma_flush(dev);
262
	plat_unmap_dma_mem(dev, dma_addr, size, direction);
L
Linus Torvalds 已提交
263 264
}

A
Akinobu Mita 已提交
265
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
266
	int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
267 268
{
	int i;
A
Akinobu Mita 已提交
269
	struct scatterlist *sg;
L
Linus Torvalds 已提交
270

A
Akinobu Mita 已提交
271
	for_each_sg(sglist, sg, nents, i) {
272 273 274
		if (!plat_device_is_coherent(dev))
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
275 276 277
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		sg->dma_length = sg->length;
#endif
278 279
		sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
				  sg->offset;
L
Linus Torvalds 已提交
280 281 282 283 284
	}

	return nents;
}

285 286 287
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction,
	struct dma_attrs *attrs)
L
Linus Torvalds 已提交
288
{
289
	if (!plat_device_is_coherent(dev))
290
		__dma_sync(page, offset, size, direction);
L
Linus Torvalds 已提交
291

292
	return plat_map_dma_mem_page(dev, page) + offset;
L
Linus Torvalds 已提交
293 294
}

A
Akinobu Mita 已提交
295
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
296 297
	int nhwentries, enum dma_data_direction direction,
	struct dma_attrs *attrs)
L
Linus Torvalds 已提交
298 299
{
	int i;
A
Akinobu Mita 已提交
300
	struct scatterlist *sg;
L
Linus Torvalds 已提交
301

A
Akinobu Mita 已提交
302
	for_each_sg(sglist, sg, nhwentries, i) {
303
		if (!plat_device_is_coherent(dev) &&
304 305 306
		    direction != DMA_TO_DEVICE)
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
307
		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
L
Linus Torvalds 已提交
308 309 310
	}
}

311 312
static void mips_dma_sync_single_for_cpu(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
313
{
314
	if (cpu_needs_post_dma_flush(dev))
315 316
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
317
	plat_post_dma_flush(dev);
L
Linus Torvalds 已提交
318 319
}

320 321
static void mips_dma_sync_single_for_device(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
322
{
323 324 325
	if (!plat_device_is_coherent(dev))
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
326 327
}

328
static void mips_dma_sync_sg_for_cpu(struct device *dev,
A
Akinobu Mita 已提交
329 330
	struct scatterlist *sglist, int nelems,
	enum dma_data_direction direction)
L
Linus Torvalds 已提交
331 332
{
	int i;
A
Akinobu Mita 已提交
333
	struct scatterlist *sg;
334

A
Akinobu Mita 已提交
335 336
	if (cpu_needs_post_dma_flush(dev)) {
		for_each_sg(sglist, sg, nelems, i) {
337 338
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
A
Akinobu Mita 已提交
339 340
		}
	}
341
	plat_post_dma_flush(dev);
L
Linus Torvalds 已提交
342 343
}

344
static void mips_dma_sync_sg_for_device(struct device *dev,
A
Akinobu Mita 已提交
345 346
	struct scatterlist *sglist, int nelems,
	enum dma_data_direction direction)
L
Linus Torvalds 已提交
347 348
{
	int i;
A
Akinobu Mita 已提交
349
	struct scatterlist *sg;
L
Linus Torvalds 已提交
350

A
Akinobu Mita 已提交
351 352
	if (!plat_device_is_coherent(dev)) {
		for_each_sg(sglist, sg, nelems, i) {
353 354
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
A
Akinobu Mita 已提交
355 356
		}
	}
L
Linus Torvalds 已提交
357 358
}

359
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
360
{
361
	return 0;
L
Linus Torvalds 已提交
362 363
}

364
int mips_dma_supported(struct device *dev, u64 mask)
L
Linus Torvalds 已提交
365
{
366
	return plat_dma_supported(dev, mask);
L
Linus Torvalds 已提交
367 368
}

369
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
370
			 enum dma_data_direction direction)
L
Linus Torvalds 已提交
371
{
372
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
373

374
	if (!plat_device_is_coherent(dev))
375
		__dma_sync_virtual(vaddr, size, direction);
L
Linus Torvalds 已提交
376 377
}

378 379
EXPORT_SYMBOL(dma_cache_sync);

380
static struct dma_map_ops mips_default_dma_map_ops = {
381 382
	.alloc = mips_dma_alloc_coherent,
	.free = mips_dma_free_coherent,
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
	.map_page = mips_dma_map_page,
	.unmap_page = mips_dma_unmap_page,
	.map_sg = mips_dma_map_sg,
	.unmap_sg = mips_dma_unmap_sg,
	.sync_single_for_cpu = mips_dma_sync_single_for_cpu,
	.sync_single_for_device = mips_dma_sync_single_for_device,
	.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
	.sync_sg_for_device = mips_dma_sync_sg_for_device,
	.mapping_error = mips_dma_mapping_error,
	.dma_supported = mips_dma_supported
};

struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
EXPORT_SYMBOL(mips_dma_map_ops);

#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

static int __init mips_dma_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);

	return 0;
}
fs_initcall(mips_dma_init);