dma-default.c 11.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
R
Ralf Baechle 已提交
7
 * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
L
Linus Torvalds 已提交
8 9
 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
 */
10

L
Linus Torvalds 已提交
11
#include <linux/types.h>
12
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
13
#include <linux/mm.h>
14
#include <linux/export.h>
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/string.h>
17
#include <linux/gfp.h>
18
#include <linux/highmem.h>
19
#include <linux/dma-contiguous.h>
L
Linus Torvalds 已提交
20 21

#include <asm/cache.h>
22
#include <asm/cpu-type.h>
L
Linus Torvalds 已提交
23 24
#include <asm/io.h>

25 26
#include <dma-coherence.h>

27
#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
P
Paul Burton 已提交
28 29
/* User defined DMA coherency from command line. */
enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
30 31 32 33 34
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */

static int __init setcoherentio(char *str)
{
P
Paul Burton 已提交
35
	coherentio = IO_COHERENCE_ENABLED;
36 37 38 39 40 41 42
	pr_info("Hardware DMA cache coherency (command line)\n");
	return 0;
}
early_param("coherentio", setcoherentio);

static int __init setnocoherentio(char *str)
{
P
Paul Burton 已提交
43
	coherentio = IO_COHERENCE_DISABLED;
44 45 46 47
	pr_info("Software DMA cache coherency (command line)\n");
	return 0;
}
early_param("nocoherentio", setnocoherentio);
48
#endif
49

50
static inline struct page *dma_addr_to_page(struct device *dev,
51
	dma_addr_t dma_addr)
52
{
53 54
	return pfn_to_page(
		plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
55 56
}

L
Linus Torvalds 已提交
57
/*
58 59 60 61
 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
 * speculatively fill random cachelines with stale data at any time,
 * requiring an extra flush post-DMA.
 *
L
Linus Torvalds 已提交
62 63 64
 * Warning on the terminology - Linux calls an uncached area coherent;
 * MIPS terminology calls memory areas with hardware maintained coherency
 * coherent.
65 66 67 68 69
 *
 * Note that the R14000 and R16000 should also be checked for in this
 * condition.  However this function is only called on non-I/O-coherent
 * systems and only the R10000 and R12000 are used in such systems, the
 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
L
Linus Torvalds 已提交
70
 */
71
static inline bool cpu_needs_post_dma_flush(struct device *dev)
72
{
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	if (plat_device_is_coherent(dev))
		return false;

	switch (boot_cpu_type()) {
	case CPU_R10000:
	case CPU_R12000:
	case CPU_BMIPS5000:
		return true;

	default:
		/*
		 * Presence of MAARs suggests that the CPU supports
		 * speculatively prefetching data, and therefore requires
		 * the post-DMA flush/invalidate.
		 */
		return cpu_has_maar;
	}
90 91
}

92 93
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
94 95
	gfp_t dma_flag;

96 97 98
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

99
#ifdef CONFIG_ISA
100
	if (dev == NULL)
101
		dma_flag = __GFP_DMA;
102 103
	else
#endif
104
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
105
	     if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
106 107 108 109 110 111
			dma_flag = __GFP_DMA;
	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
			dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
112
	     if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
113 114 115 116
		dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
117 118
	     if (dev == NULL ||
		 dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
119
		dma_flag = __GFP_DMA;
120 121
	else
#endif
122
		dma_flag = 0;
123 124 125 126

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

127
	return gfp | dma_flag;
128 129
}

130
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
131
	dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
L
Linus Torvalds 已提交
132 133
{
	void *ret;
134 135
	struct page *page = NULL;
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
136

137
	gfp = massage_gfp_flags(dev, gfp);
138

Q
Qais Yousef 已提交
139
	if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
140 141
		page = dma_alloc_from_contiguous(dev, count, get_order(size),
						 gfp);
142 143 144 145 146 147 148 149 150
	if (!page)
		page = alloc_pages(gfp, get_order(size));

	if (!page)
		return NULL;

	ret = page_address(page);
	memset(ret, 0, size);
	*dma_handle = plat_map_dma_mem(dev, ret, size);
151 152
	if (!(attrs & DMA_ATTR_NON_CONSISTENT) &&
	    !plat_device_is_coherent(dev)) {
153
		dma_cache_wback_inv((unsigned long) ret, size);
154
		ret = UNCAC_ADDR(ret);
L
Linus Torvalds 已提交
155 156 157 158 159
	}

	return ret;
}

160
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
161
	dma_addr_t dma_handle, unsigned long attrs)
L
Linus Torvalds 已提交
162 163
{
	unsigned long addr = (unsigned long) vaddr;
164 165
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = NULL;
166

167
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
168

169
	if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev))
170 171
		addr = CAC_ADDR(addr);

172 173 174 175
	page = virt_to_page((void *) addr);

	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
L
Linus Torvalds 已提交
176 177
}

178 179
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
	void *cpu_addr, dma_addr_t dma_addr, size_t size,
180
	unsigned long attrs)
181 182 183 184 185 186 187 188
{
	unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long addr = (unsigned long)cpu_addr;
	unsigned long off = vma->vm_pgoff;
	unsigned long pfn;
	int ret = -ENXIO;

189
	if (!plat_device_is_coherent(dev))
190 191 192 193
		addr = CAC_ADDR(addr);

	pfn = page_to_pfn(virt_to_page((void *)addr));

194
	if (attrs & DMA_ATTR_WRITE_COMBINE)
195 196 197 198
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	else
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

199
	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
200 201 202 203 204 205 206 207 208 209 210 211
		return ret;

	if (off < count && user_count <= (count - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      user_count << PAGE_SHIFT,
				      vma->vm_page_prot);
	}

	return ret;
}

212
static inline void __dma_sync_virtual(void *addr, size_t size,
L
Linus Torvalds 已提交
213 214 215 216
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
217
		dma_cache_wback((unsigned long)addr, size);
L
Linus Torvalds 已提交
218 219 220
		break;

	case DMA_FROM_DEVICE:
221
		dma_cache_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
222 223 224
		break;

	case DMA_BIDIRECTIONAL:
225
		dma_cache_wback_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
226 227 228 229 230 231 232
		break;

	default:
		BUG();
	}
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
/*
 * A single sg entry may refer to multiple physically contiguous
 * pages. But we still need to process highmem pages individually.
 * If highmem is not configured then the bulk of this loop gets
 * optimized out.
 */
static inline void __dma_sync(struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	size_t left = size;

	do {
		size_t len = left;

		if (PageHighMem(page)) {
			void *addr;

			if (offset + len > PAGE_SIZE) {
				if (offset >= PAGE_SIZE) {
					page += offset >> PAGE_SHIFT;
					offset &= ~PAGE_MASK;
				}
				len = PAGE_SIZE - offset;
			}

			addr = kmap_atomic(page);
			__dma_sync_virtual(addr + offset, len, direction);
			kunmap_atomic(addr);
		} else
			__dma_sync_virtual(page_address(page) + offset,
					   size, direction);
		offset = 0;
		page++;
		left -= len;
	} while (left);
}

270
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
271
	size_t size, enum dma_data_direction direction, unsigned long attrs)
L
Linus Torvalds 已提交
272
{
273
	if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
274 275
		__dma_sync(dma_addr_to_page(dev, dma_addr),
			   dma_addr & ~PAGE_MASK, size, direction);
276
	plat_post_dma_flush(dev);
277
	plat_unmap_dma_mem(dev, dma_addr, size, direction);
L
Linus Torvalds 已提交
278 279
}

A
Akinobu Mita 已提交
280
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
281
	int nents, enum dma_data_direction direction, unsigned long attrs)
L
Linus Torvalds 已提交
282 283
{
	int i;
A
Akinobu Mita 已提交
284
	struct scatterlist *sg;
L
Linus Torvalds 已提交
285

A
Akinobu Mita 已提交
286
	for_each_sg(sglist, sg, nents, i) {
287 288
		if (!plat_device_is_coherent(dev) &&
		    !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
289 290
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
291 292 293
#ifdef CONFIG_NEED_SG_DMA_LENGTH
		sg->dma_length = sg->length;
#endif
294 295
		sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
				  sg->offset;
L
Linus Torvalds 已提交
296 297 298 299 300
	}

	return nents;
}

301 302
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction,
303
	unsigned long attrs)
L
Linus Torvalds 已提交
304
{
305
	if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
306
		__dma_sync(page, offset, size, direction);
L
Linus Torvalds 已提交
307

308
	return plat_map_dma_mem_page(dev, page) + offset;
L
Linus Torvalds 已提交
309 310
}

A
Akinobu Mita 已提交
311
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
312
	int nhwentries, enum dma_data_direction direction,
313
	unsigned long attrs)
L
Linus Torvalds 已提交
314 315
{
	int i;
A
Akinobu Mita 已提交
316
	struct scatterlist *sg;
L
Linus Torvalds 已提交
317

A
Akinobu Mita 已提交
318
	for_each_sg(sglist, sg, nhwentries, i) {
319
		if (!plat_device_is_coherent(dev) &&
320
		    !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
321 322 323
		    direction != DMA_TO_DEVICE)
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
324
		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
L
Linus Torvalds 已提交
325 326 327
	}
}

328 329
static void mips_dma_sync_single_for_cpu(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
330
{
331
	if (cpu_needs_post_dma_flush(dev))
332 333
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
334
	plat_post_dma_flush(dev);
L
Linus Torvalds 已提交
335 336
}

337 338
static void mips_dma_sync_single_for_device(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
339
{
340 341 342
	if (!plat_device_is_coherent(dev))
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
343 344
}

345
static void mips_dma_sync_sg_for_cpu(struct device *dev,
A
Akinobu Mita 已提交
346 347
	struct scatterlist *sglist, int nelems,
	enum dma_data_direction direction)
L
Linus Torvalds 已提交
348 349
{
	int i;
A
Akinobu Mita 已提交
350
	struct scatterlist *sg;
351

A
Akinobu Mita 已提交
352 353
	if (cpu_needs_post_dma_flush(dev)) {
		for_each_sg(sglist, sg, nelems, i) {
354 355
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
A
Akinobu Mita 已提交
356 357
		}
	}
358
	plat_post_dma_flush(dev);
L
Linus Torvalds 已提交
359 360
}

361
static void mips_dma_sync_sg_for_device(struct device *dev,
A
Akinobu Mita 已提交
362 363
	struct scatterlist *sglist, int nelems,
	enum dma_data_direction direction)
L
Linus Torvalds 已提交
364 365
{
	int i;
A
Akinobu Mita 已提交
366
	struct scatterlist *sg;
L
Linus Torvalds 已提交
367

A
Akinobu Mita 已提交
368 369
	if (!plat_device_is_coherent(dev)) {
		for_each_sg(sglist, sg, nelems, i) {
370 371
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
A
Akinobu Mita 已提交
372 373
		}
	}
L
Linus Torvalds 已提交
374 375
}

376
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
377
{
378
	return 0;
L
Linus Torvalds 已提交
379 380
}

381
int mips_dma_supported(struct device *dev, u64 mask)
L
Linus Torvalds 已提交
382
{
383
	return plat_dma_supported(dev, mask);
L
Linus Torvalds 已提交
384 385
}

386
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
387
			 enum dma_data_direction direction)
L
Linus Torvalds 已提交
388
{
389
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
390

391
	if (!plat_device_is_coherent(dev))
392
		__dma_sync_virtual(vaddr, size, direction);
L
Linus Torvalds 已提交
393 394
}

395 396
EXPORT_SYMBOL(dma_cache_sync);

397
static const struct dma_map_ops mips_default_dma_map_ops = {
398 399
	.alloc = mips_dma_alloc_coherent,
	.free = mips_dma_free_coherent,
400
	.mmap = mips_dma_mmap,
401 402 403 404 405 406 407 408 409 410 411 412
	.map_page = mips_dma_map_page,
	.unmap_page = mips_dma_unmap_page,
	.map_sg = mips_dma_map_sg,
	.unmap_sg = mips_dma_unmap_sg,
	.sync_single_for_cpu = mips_dma_sync_single_for_cpu,
	.sync_single_for_device = mips_dma_sync_single_for_device,
	.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
	.sync_sg_for_device = mips_dma_sync_sg_for_device,
	.mapping_error = mips_dma_mapping_error,
	.dma_supported = mips_dma_supported
};

413
const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
414 415 416 417 418 419 420 421 422 423 424
EXPORT_SYMBOL(mips_dma_map_ops);

#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

static int __init mips_dma_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);

	return 0;
}
fs_initcall(mips_dma_init);