dma-default.c 11.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
R
Ralf Baechle 已提交
7
 * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
L
Linus Torvalds 已提交
8 9
 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
 */
10

L
Linus Torvalds 已提交
11
#include <linux/types.h>
12
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
13 14
#include <linux/mm.h>
#include <linux/module.h>
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/string.h>
17
#include <linux/gfp.h>
18
#include <linux/highmem.h>
19
#include <linux/dma-contiguous.h>
L
Linus Torvalds 已提交
20 21

#include <asm/cache.h>
22
#include <asm/cpu-type.h>
L
Linus Torvalds 已提交
23 24
#include <asm/io.h>

25 26
#include <dma-coherence.h>

27
#ifdef CONFIG_DMA_MAYBE_COHERENT
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
int coherentio = 0;	/* User defined DMA coherency from command line. */
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */

static int __init setcoherentio(char *str)
{
	coherentio = 1;
	pr_info("Hardware DMA cache coherency (command line)\n");
	return 0;
}
early_param("coherentio", setcoherentio);

static int __init setnocoherentio(char *str)
{
	coherentio = 0;
	pr_info("Software DMA cache coherency (command line)\n");
	return 0;
}
early_param("nocoherentio", setnocoherentio);
47
#endif
48

49
static inline struct page *dma_addr_to_page(struct device *dev,
50
	dma_addr_t dma_addr)
51
{
52 53
	return pfn_to_page(
		plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
54 55
}

L
Linus Torvalds 已提交
56
/*
57 58 59 60
 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
 * speculatively fill random cachelines with stale data at any time,
 * requiring an extra flush post-DMA.
 *
L
Linus Torvalds 已提交
61 62 63
 * Warning on the terminology - Linux calls an uncached area coherent;
 * MIPS terminology calls memory areas with hardware maintained coherency
 * coherent.
64 65 66 67 68
 *
 * Note that the R14000 and R16000 should also be checked for in this
 * condition.  However this function is only called on non-I/O-coherent
 * systems and only the R10000 and R12000 are used in such systems, the
 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
L
Linus Torvalds 已提交
69
 */
70
static inline int cpu_needs_post_dma_flush(struct device *dev)
71 72
{
	return !plat_device_is_coherent(dev) &&
73
	       (boot_cpu_type() == CPU_R10000 ||
74 75
		boot_cpu_type() == CPU_R12000 ||
		boot_cpu_type() == CPU_BMIPS5000);
76 77
}

78 79
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
80 81
	gfp_t dma_flag;

82 83 84
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

85
#ifdef CONFIG_ISA
86
	if (dev == NULL)
87
		dma_flag = __GFP_DMA;
88 89
	else
#endif
90
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
91
	     if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
92 93 94 95 96 97
			dma_flag = __GFP_DMA;
	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
			dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
98
	     if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
99 100 101 102
		dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
103 104
	     if (dev == NULL ||
		 dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
105
		dma_flag = __GFP_DMA;
106 107
	else
#endif
108
		dma_flag = 0;
109 110 111 112

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

113
	return gfp | dma_flag;
114 115
}

116
static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size,
A
Al Viro 已提交
117
	dma_addr_t * dma_handle, gfp_t gfp)
L
Linus Torvalds 已提交
118 119
{
	void *ret;
120

121
	gfp = massage_gfp_flags(dev, gfp);
L
Linus Torvalds 已提交
122 123 124 125 126

	ret = (void *) __get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
127
		*dma_handle = plat_map_dma_mem(dev, ret, size);
L
Linus Torvalds 已提交
128 129 130 131 132
	}

	return ret;
}

133
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
134
	dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
L
Linus Torvalds 已提交
135 136
{
	void *ret;
137 138
	struct page *page = NULL;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
139

140 141 142 143
	/*
	 * XXX: seems like the coherent and non-coherent implementations could
	 * be consolidated.
	 */
144
	if (attrs & DMA_ATTR_NON_CONSISTENT)
145 146
		return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);

147
	gfp = massage_gfp_flags(dev, gfp);
148

Q
Qais Yousef 已提交
149
	if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
		page = dma_alloc_from_contiguous(dev,
					count, get_order(size));
	if (!page)
		page = alloc_pages(gfp, get_order(size));

	if (!page)
		return NULL;

	ret = page_address(page);
	memset(ret, 0, size);
	*dma_handle = plat_map_dma_mem(dev, ret, size);
	if (!plat_device_is_coherent(dev)) {
		dma_cache_wback_inv((unsigned long) ret, size);
		if (!hw_coherentio)
			ret = UNCAC_ADDR(ret);
L
Linus Torvalds 已提交
165 166 167 168 169 170
	}

	return ret;
}


171 172
static void mips_dma_free_noncoherent(struct device *dev, size_t size,
		void *vaddr, dma_addr_t dma_handle)
L
Linus Torvalds 已提交
173
{
174
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
L
Linus Torvalds 已提交
175 176 177
	free_pages((unsigned long) vaddr, get_order(size));
}

178
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
179
	dma_addr_t dma_handle, unsigned long attrs)
L
Linus Torvalds 已提交
180 181
{
	unsigned long addr = (unsigned long) vaddr;
182 183
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = NULL;
184

185
	if (attrs & DMA_ATTR_NON_CONSISTENT) {
186 187 188 189
		mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
		return;
	}

190
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
191

192
	if (!plat_device_is_coherent(dev) && !hw_coherentio)
193 194
		addr = CAC_ADDR(addr);

195 196 197 198
	page = virt_to_page((void *) addr);

	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
L
Linus Torvalds 已提交
199 200
}

201 202
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
	void *cpu_addr, dma_addr_t dma_addr, size_t size,
203
	unsigned long attrs)
204 205 206 207 208 209 210 211 212 213 214 215 216
{
	unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long addr = (unsigned long)cpu_addr;
	unsigned long off = vma->vm_pgoff;
	unsigned long pfn;
	int ret = -ENXIO;

	if (!plat_device_is_coherent(dev) && !hw_coherentio)
		addr = CAC_ADDR(addr);

	pfn = page_to_pfn(virt_to_page((void *)addr));

217
	if (attrs & DMA_ATTR_WRITE_COMBINE)
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	else
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off < count && user_count <= (count - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      user_count << PAGE_SHIFT,
				      vma->vm_page_prot);
	}

	return ret;
}

235
static inline void __dma_sync_virtual(void *addr, size_t size,
L
Linus Torvalds 已提交
236 237 238 239
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
240
		dma_cache_wback((unsigned long)addr, size);
L
Linus Torvalds 已提交
241 242 243
		break;

	case DMA_FROM_DEVICE:
244
		dma_cache_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
245 246 247
		break;

	case DMA_BIDIRECTIONAL:
248
		dma_cache_wback_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
249 250 251 252 253 254 255
		break;

	default:
		BUG();
	}
}

256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
/*
 * A single sg entry may refer to multiple physically contiguous
 * pages. But we still need to process highmem pages individually.
 * If highmem is not configured then the bulk of this loop gets
 * optimized out.
 */
static inline void __dma_sync(struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	size_t left = size;

	do {
		size_t len = left;

		if (PageHighMem(page)) {
			void *addr;

			if (offset + len > PAGE_SIZE) {
				if (offset >= PAGE_SIZE) {
					page += offset >> PAGE_SHIFT;
					offset &= ~PAGE_MASK;
				}
				len = PAGE_SIZE - offset;
			}

			addr = kmap_atomic(page);
			__dma_sync_virtual(addr + offset, len, direction);
			kunmap_atomic(addr);
		} else
			__dma_sync_virtual(page_address(page) + offset,
					   size, direction);
		offset = 0;
		page++;
		left -= len;
	} while (left);
}

293
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
294
	size_t size, enum dma_data_direction direction, unsigned long attrs)
L
Linus Torvalds 已提交
295
{
296
	if (cpu_needs_post_dma_flush(dev))
297 298
		__dma_sync(dma_addr_to_page(dev, dma_addr),
			   dma_addr & ~PAGE_MASK, size, direction);
299
	plat_post_dma_flush(dev);
300
	plat_unmap_dma_mem(dev, dma_addr, size, direction);
L
Linus Torvalds 已提交
301 302
}

A
Akinobu Mita 已提交
303
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
304
	int nents, enum dma_data_direction direction, unsigned long attrs)
L
Linus Torvalds 已提交
305 306
{
	int i;
A
Akinobu Mita 已提交
307
	struct scatterlist *sg;
L
Linus Torvalds 已提交
308

A
Akinobu Mita 已提交
309
	for_each_sg(sglist, sg, nents, i) {
310 311 312
		if (!plat_device_is_coherent(dev))
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
313 314 315
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		sg->dma_length = sg->length;
#endif
316 317
		sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
				  sg->offset;
L
Linus Torvalds 已提交
318 319 320 321 322
	}

	return nents;
}

323 324
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction,
325
	unsigned long attrs)
L
Linus Torvalds 已提交
326
{
327
	if (!plat_device_is_coherent(dev))
328
		__dma_sync(page, offset, size, direction);
L
Linus Torvalds 已提交
329

330
	return plat_map_dma_mem_page(dev, page) + offset;
L
Linus Torvalds 已提交
331 332
}

A
Akinobu Mita 已提交
333
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
334
	int nhwentries, enum dma_data_direction direction,
335
	unsigned long attrs)
L
Linus Torvalds 已提交
336 337
{
	int i;
A
Akinobu Mita 已提交
338
	struct scatterlist *sg;
L
Linus Torvalds 已提交
339

A
Akinobu Mita 已提交
340
	for_each_sg(sglist, sg, nhwentries, i) {
341
		if (!plat_device_is_coherent(dev) &&
342 343 344
		    direction != DMA_TO_DEVICE)
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
345
		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
L
Linus Torvalds 已提交
346 347 348
	}
}

349 350
static void mips_dma_sync_single_for_cpu(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
351
{
352
	if (cpu_needs_post_dma_flush(dev))
353 354
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
355
	plat_post_dma_flush(dev);
L
Linus Torvalds 已提交
356 357
}

358 359
static void mips_dma_sync_single_for_device(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
360
{
361 362 363
	if (!plat_device_is_coherent(dev))
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
364 365
}

366
static void mips_dma_sync_sg_for_cpu(struct device *dev,
A
Akinobu Mita 已提交
367 368
	struct scatterlist *sglist, int nelems,
	enum dma_data_direction direction)
L
Linus Torvalds 已提交
369 370
{
	int i;
A
Akinobu Mita 已提交
371
	struct scatterlist *sg;
372

A
Akinobu Mita 已提交
373 374
	if (cpu_needs_post_dma_flush(dev)) {
		for_each_sg(sglist, sg, nelems, i) {
375 376
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
A
Akinobu Mita 已提交
377 378
		}
	}
379
	plat_post_dma_flush(dev);
L
Linus Torvalds 已提交
380 381
}

382
static void mips_dma_sync_sg_for_device(struct device *dev,
A
Akinobu Mita 已提交
383 384
	struct scatterlist *sglist, int nelems,
	enum dma_data_direction direction)
L
Linus Torvalds 已提交
385 386
{
	int i;
A
Akinobu Mita 已提交
387
	struct scatterlist *sg;
L
Linus Torvalds 已提交
388

A
Akinobu Mita 已提交
389 390
	if (!plat_device_is_coherent(dev)) {
		for_each_sg(sglist, sg, nelems, i) {
391 392
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
A
Akinobu Mita 已提交
393 394
		}
	}
L
Linus Torvalds 已提交
395 396
}

397
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
398
{
399
	return 0;
L
Linus Torvalds 已提交
400 401
}

402
int mips_dma_supported(struct device *dev, u64 mask)
L
Linus Torvalds 已提交
403
{
404
	return plat_dma_supported(dev, mask);
L
Linus Torvalds 已提交
405 406
}

407
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
408
			 enum dma_data_direction direction)
L
Linus Torvalds 已提交
409
{
410
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
411

412
	if (!plat_device_is_coherent(dev))
413
		__dma_sync_virtual(vaddr, size, direction);
L
Linus Torvalds 已提交
414 415
}

416 417
EXPORT_SYMBOL(dma_cache_sync);

418
static struct dma_map_ops mips_default_dma_map_ops = {
419 420
	.alloc = mips_dma_alloc_coherent,
	.free = mips_dma_free_coherent,
421
	.mmap = mips_dma_mmap,
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
	.map_page = mips_dma_map_page,
	.unmap_page = mips_dma_unmap_page,
	.map_sg = mips_dma_map_sg,
	.unmap_sg = mips_dma_unmap_sg,
	.sync_single_for_cpu = mips_dma_sync_single_for_cpu,
	.sync_single_for_device = mips_dma_sync_single_for_device,
	.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
	.sync_sg_for_device = mips_dma_sync_sg_for_device,
	.mapping_error = mips_dma_mapping_error,
	.dma_supported = mips_dma_supported
};

struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
EXPORT_SYMBOL(mips_dma_map_ops);

#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

static int __init mips_dma_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);

	return 0;
}
fs_initcall(mips_dma_init);