dma-default.c 11.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
R
Ralf Baechle 已提交
7
 * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
L
Linus Torvalds 已提交
8 9
 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
 */
10

L
Linus Torvalds 已提交
11
#include <linux/types.h>
12
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
13
#include <linux/mm.h>
14
#include <linux/export.h>
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/string.h>
17
#include <linux/gfp.h>
18
#include <linux/highmem.h>
19
#include <linux/dma-contiguous.h>
L
Linus Torvalds 已提交
20 21

#include <asm/cache.h>
22
#include <asm/cpu-type.h>
L
Linus Torvalds 已提交
23 24
#include <asm/io.h>

25 26
#include <dma-coherence.h>

27
#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
P
Paul Burton 已提交
28 29
/* User defined DMA coherency from command line. */
enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
30 31 32 33 34
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */

static int __init setcoherentio(char *str)
{
P
Paul Burton 已提交
35
	coherentio = IO_COHERENCE_ENABLED;
36 37 38 39 40 41 42
	pr_info("Hardware DMA cache coherency (command line)\n");
	return 0;
}
early_param("coherentio", setcoherentio);

static int __init setnocoherentio(char *str)
{
P
Paul Burton 已提交
43
	coherentio = IO_COHERENCE_DISABLED;
44 45 46 47
	pr_info("Software DMA cache coherency (command line)\n");
	return 0;
}
early_param("nocoherentio", setnocoherentio);
48
#endif
49

50
static inline struct page *dma_addr_to_page(struct device *dev,
51
	dma_addr_t dma_addr)
52
{
53 54
	return pfn_to_page(
		plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
55 56
}

L
Linus Torvalds 已提交
57
/*
58 59 60 61
 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
 * speculatively fill random cachelines with stale data at any time,
 * requiring an extra flush post-DMA.
 *
L
Linus Torvalds 已提交
62 63 64
 * Warning on the terminology - Linux calls an uncached area coherent;
 * MIPS terminology calls memory areas with hardware maintained coherency
 * coherent.
65 66 67 68 69
 *
 * Note that the R14000 and R16000 should also be checked for in this
 * condition.  However this function is only called on non-I/O-coherent
 * systems and only the R10000 and R12000 are used in such systems, the
 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
L
Linus Torvalds 已提交
70
 */
71
static inline int cpu_needs_post_dma_flush(struct device *dev)
72 73
{
	return !plat_device_is_coherent(dev) &&
74
	       (boot_cpu_type() == CPU_R10000 ||
75 76
		boot_cpu_type() == CPU_R12000 ||
		boot_cpu_type() == CPU_BMIPS5000);
77 78
}

79 80
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
81 82
	gfp_t dma_flag;

83 84 85
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

86
#ifdef CONFIG_ISA
87
	if (dev == NULL)
88
		dma_flag = __GFP_DMA;
89 90
	else
#endif
91
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
92
	     if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
93 94 95 96 97 98
			dma_flag = __GFP_DMA;
	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
			dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
99
	     if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
100 101 102 103
		dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
104 105
	     if (dev == NULL ||
		 dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
106
		dma_flag = __GFP_DMA;
107 108
	else
#endif
109
		dma_flag = 0;
110 111 112 113

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

114
	return gfp | dma_flag;
115 116
}

117
static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size,
A
Al Viro 已提交
118
	dma_addr_t * dma_handle, gfp_t gfp)
L
Linus Torvalds 已提交
119 120
{
	void *ret;
121

122
	gfp = massage_gfp_flags(dev, gfp);
L
Linus Torvalds 已提交
123 124 125 126 127

	ret = (void *) __get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
128
		*dma_handle = plat_map_dma_mem(dev, ret, size);
L
Linus Torvalds 已提交
129 130 131 132 133
	}

	return ret;
}

134
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
135
	dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
L
Linus Torvalds 已提交
136 137
{
	void *ret;
138 139
	struct page *page = NULL;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
140

141 142 143 144
	/*
	 * XXX: seems like the coherent and non-coherent implementations could
	 * be consolidated.
	 */
145
	if (attrs & DMA_ATTR_NON_CONSISTENT)
146 147
		return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);

148
	gfp = massage_gfp_flags(dev, gfp);
149

Q
Qais Yousef 已提交
150
	if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
151 152
		page = dma_alloc_from_contiguous(dev, count, get_order(size),
						 gfp);
153 154 155 156 157 158 159 160 161 162 163
	if (!page)
		page = alloc_pages(gfp, get_order(size));

	if (!page)
		return NULL;

	ret = page_address(page);
	memset(ret, 0, size);
	*dma_handle = plat_map_dma_mem(dev, ret, size);
	if (!plat_device_is_coherent(dev)) {
		dma_cache_wback_inv((unsigned long) ret, size);
164
		ret = UNCAC_ADDR(ret);
L
Linus Torvalds 已提交
165 166 167 168 169 170
	}

	return ret;
}


171 172
static void mips_dma_free_noncoherent(struct device *dev, size_t size,
		void *vaddr, dma_addr_t dma_handle)
L
Linus Torvalds 已提交
173
{
174
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
L
Linus Torvalds 已提交
175 176 177
	free_pages((unsigned long) vaddr, get_order(size));
}

178
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
179
	dma_addr_t dma_handle, unsigned long attrs)
L
Linus Torvalds 已提交
180 181
{
	unsigned long addr = (unsigned long) vaddr;
182 183
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = NULL;
184

185
	if (attrs & DMA_ATTR_NON_CONSISTENT) {
186 187 188 189
		mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
		return;
	}

190
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
191

192
	if (!plat_device_is_coherent(dev))
193 194
		addr = CAC_ADDR(addr);

195 196 197 198
	page = virt_to_page((void *) addr);

	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
L
Linus Torvalds 已提交
199 200
}

201 202
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
	void *cpu_addr, dma_addr_t dma_addr, size_t size,
203
	unsigned long attrs)
204 205 206 207 208 209 210 211
{
	unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long addr = (unsigned long)cpu_addr;
	unsigned long off = vma->vm_pgoff;
	unsigned long pfn;
	int ret = -ENXIO;

212
	if (!plat_device_is_coherent(dev))
213 214 215 216
		addr = CAC_ADDR(addr);

	pfn = page_to_pfn(virt_to_page((void *)addr));

217
	if (attrs & DMA_ATTR_WRITE_COMBINE)
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	else
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off < count && user_count <= (count - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      user_count << PAGE_SHIFT,
				      vma->vm_page_prot);
	}

	return ret;
}

235
static inline void __dma_sync_virtual(void *addr, size_t size,
L
Linus Torvalds 已提交
236 237 238 239
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
240
		dma_cache_wback((unsigned long)addr, size);
L
Linus Torvalds 已提交
241 242 243
		break;

	case DMA_FROM_DEVICE:
244
		dma_cache_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
245 246 247
		break;

	case DMA_BIDIRECTIONAL:
248
		dma_cache_wback_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
249 250 251 252 253 254 255
		break;

	default:
		BUG();
	}
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
/*
 * A single sg entry may refer to multiple physically contiguous
 * pages. But we still need to process highmem pages individually.
 * If highmem is not configured then the bulk of this loop gets
 * optimized out.
 */
static inline void __dma_sync(struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	size_t left = size;

	do {
		size_t len = left;

		if (PageHighMem(page)) {
			void *addr;

			if (offset + len > PAGE_SIZE) {
				if (offset >= PAGE_SIZE) {
					page += offset >> PAGE_SHIFT;
					offset &= ~PAGE_MASK;
				}
				len = PAGE_SIZE - offset;
			}

			addr = kmap_atomic(page);
			__dma_sync_virtual(addr + offset, len, direction);
			kunmap_atomic(addr);
		} else
			__dma_sync_virtual(page_address(page) + offset,
					   size, direction);
		offset = 0;
		page++;
		left -= len;
	} while (left);
}

293
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
294
	size_t size, enum dma_data_direction direction, unsigned long attrs)
L
Linus Torvalds 已提交
295
{
296
	if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
297 298
		__dma_sync(dma_addr_to_page(dev, dma_addr),
			   dma_addr & ~PAGE_MASK, size, direction);
299
	plat_post_dma_flush(dev);
300
	plat_unmap_dma_mem(dev, dma_addr, size, direction);
L
Linus Torvalds 已提交
301 302
}

A
Akinobu Mita 已提交
303
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
304
	int nents, enum dma_data_direction direction, unsigned long attrs)
L
Linus Torvalds 已提交
305 306
{
	int i;
A
Akinobu Mita 已提交
307
	struct scatterlist *sg;
L
Linus Torvalds 已提交
308

A
Akinobu Mita 已提交
309
	for_each_sg(sglist, sg, nents, i) {
310 311
		if (!plat_device_is_coherent(dev) &&
		    !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
312 313
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
314 315 316
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		sg->dma_length = sg->length;
#endif
317 318
		sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
				  sg->offset;
L
Linus Torvalds 已提交
319 320 321 322 323
	}

	return nents;
}

324 325
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction,
326
	unsigned long attrs)
L
Linus Torvalds 已提交
327
{
328
	if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
329
		__dma_sync(page, offset, size, direction);
L
Linus Torvalds 已提交
330

331
	return plat_map_dma_mem_page(dev, page) + offset;
L
Linus Torvalds 已提交
332 333
}

A
Akinobu Mita 已提交
334
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
335
	int nhwentries, enum dma_data_direction direction,
336
	unsigned long attrs)
L
Linus Torvalds 已提交
337 338
{
	int i;
A
Akinobu Mita 已提交
339
	struct scatterlist *sg;
L
Linus Torvalds 已提交
340

A
Akinobu Mita 已提交
341
	for_each_sg(sglist, sg, nhwentries, i) {
342
		if (!plat_device_is_coherent(dev) &&
343
		    !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
344 345 346
		    direction != DMA_TO_DEVICE)
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
347
		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
L
Linus Torvalds 已提交
348 349 350
	}
}

351 352
static void mips_dma_sync_single_for_cpu(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
353
{
354
	if (cpu_needs_post_dma_flush(dev))
355 356
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
357
	plat_post_dma_flush(dev);
L
Linus Torvalds 已提交
358 359
}

360 361
static void mips_dma_sync_single_for_device(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
362
{
363 364 365
	if (!plat_device_is_coherent(dev))
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
366 367
}

368
static void mips_dma_sync_sg_for_cpu(struct device *dev,
A
Akinobu Mita 已提交
369 370
	struct scatterlist *sglist, int nelems,
	enum dma_data_direction direction)
L
Linus Torvalds 已提交
371 372
{
	int i;
A
Akinobu Mita 已提交
373
	struct scatterlist *sg;
374

A
Akinobu Mita 已提交
375 376
	if (cpu_needs_post_dma_flush(dev)) {
		for_each_sg(sglist, sg, nelems, i) {
377 378
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
A
Akinobu Mita 已提交
379 380
		}
	}
381
	plat_post_dma_flush(dev);
L
Linus Torvalds 已提交
382 383
}

384
static void mips_dma_sync_sg_for_device(struct device *dev,
A
Akinobu Mita 已提交
385 386
	struct scatterlist *sglist, int nelems,
	enum dma_data_direction direction)
L
Linus Torvalds 已提交
387 388
{
	int i;
A
Akinobu Mita 已提交
389
	struct scatterlist *sg;
L
Linus Torvalds 已提交
390

A
Akinobu Mita 已提交
391 392
	if (!plat_device_is_coherent(dev)) {
		for_each_sg(sglist, sg, nelems, i) {
393 394
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
A
Akinobu Mita 已提交
395 396
		}
	}
L
Linus Torvalds 已提交
397 398
}

399
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
400
{
401
	return 0;
L
Linus Torvalds 已提交
402 403
}

404
int mips_dma_supported(struct device *dev, u64 mask)
L
Linus Torvalds 已提交
405
{
406
	return plat_dma_supported(dev, mask);
L
Linus Torvalds 已提交
407 408
}

409
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
410
			 enum dma_data_direction direction)
L
Linus Torvalds 已提交
411
{
412
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
413

414
	if (!plat_device_is_coherent(dev))
415
		__dma_sync_virtual(vaddr, size, direction);
L
Linus Torvalds 已提交
416 417
}

418 419
EXPORT_SYMBOL(dma_cache_sync);

420
static struct dma_map_ops mips_default_dma_map_ops = {
421 422
	.alloc = mips_dma_alloc_coherent,
	.free = mips_dma_free_coherent,
423
	.mmap = mips_dma_mmap,
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	.map_page = mips_dma_map_page,
	.unmap_page = mips_dma_unmap_page,
	.map_sg = mips_dma_map_sg,
	.unmap_sg = mips_dma_unmap_sg,
	.sync_single_for_cpu = mips_dma_sync_single_for_cpu,
	.sync_single_for_device = mips_dma_sync_single_for_device,
	.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
	.sync_sg_for_device = mips_dma_sync_sg_for_device,
	.mapping_error = mips_dma_mapping_error,
	.dma_supported = mips_dma_supported
};

struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
EXPORT_SYMBOL(mips_dma_map_ops);

#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

static int __init mips_dma_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);

	return 0;
}
fs_initcall(mips_dma_init);