dma-default.c 8.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
R
Ralf Baechle 已提交
7
 * Copyright (C) 2000, 2001, 06	 Ralf Baechle <ralf@linux-mips.org>
L
Linus Torvalds 已提交
8 9
 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
 */
10

L
Linus Torvalds 已提交
11
#include <linux/types.h>
12
#include <linux/dma-mapping.h>
L
Linus Torvalds 已提交
13 14
#include <linux/mm.h>
#include <linux/module.h>
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
16
#include <linux/string.h>
17
#include <linux/gfp.h>
18
#include <linux/highmem.h>
L
Linus Torvalds 已提交
19 20 21 22

#include <asm/cache.h>
#include <asm/io.h>

23 24
#include <dma-coherence.h>

25
static inline struct page *dma_addr_to_page(struct device *dev,
26
	dma_addr_t dma_addr)
27
{
28 29
	return pfn_to_page(
		plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
30 31
}

L
Linus Torvalds 已提交
32 33 34 35 36 37
/*
 * Warning on the terminology - Linux calls an uncached area coherent;
 * MIPS terminology calls memory areas with hardware maintained coherency
 * coherent.
 */

38 39 40
static inline int cpu_is_noncoherent_r10000(struct device *dev)
{
	return !plat_device_is_coherent(dev) &&
41 42
	       (current_cpu_type() == CPU_R10000 ||
	       current_cpu_type() == CPU_R12000);
43 44
}

45 46
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
47 48
	gfp_t dma_flag;

49 50 51
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);

52
#ifdef CONFIG_ISA
53
	if (dev == NULL)
54
		dma_flag = __GFP_DMA;
55 56
	else
#endif
57
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
58
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
59 60 61 62 63 64 65 66 67 68 69 70 71
			dma_flag = __GFP_DMA;
	else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
			dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
		dma_flag = __GFP_DMA32;
	else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
		dma_flag = __GFP_DMA;
72 73
	else
#endif
74
		dma_flag = 0;
75 76 77 78

	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

79
	return gfp | dma_flag;
80 81
}

L
Linus Torvalds 已提交
82
void *dma_alloc_noncoherent(struct device *dev, size_t size,
A
Al Viro 已提交
83
	dma_addr_t * dma_handle, gfp_t gfp)
L
Linus Torvalds 已提交
84 85
{
	void *ret;
86

87
	gfp = massage_gfp_flags(dev, gfp);
L
Linus Torvalds 已提交
88 89 90 91 92

	ret = (void *) __get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
93
		*dma_handle = plat_map_dma_mem(dev, ret, size);
L
Linus Torvalds 已提交
94 95 96 97 98 99
	}

	return ret;
}
EXPORT_SYMBOL(dma_alloc_noncoherent);

100
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
101
	dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
102 103 104
{
	void *ret;

105 106 107
	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
		return ret;

108
	gfp = massage_gfp_flags(dev, gfp);
109 110 111

	ret = (void *) __get_free_pages(gfp, get_order(size));

L
Linus Torvalds 已提交
112
	if (ret) {
113 114 115 116 117 118 119
		memset(ret, 0, size);
		*dma_handle = plat_map_dma_mem(dev, ret, size);

		if (!plat_device_is_coherent(dev)) {
			dma_cache_wback_inv((unsigned long) ret, size);
			ret = UNCAC_ADDR(ret);
		}
L
Linus Torvalds 已提交
120 121 122 123 124 125 126 127 128
	}

	return ret;
}


void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
	dma_addr_t dma_handle)
{
129
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
L
Linus Torvalds 已提交
130 131 132 133
	free_pages((unsigned long) vaddr, get_order(size));
}
EXPORT_SYMBOL(dma_free_noncoherent);

134
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
135
	dma_addr_t dma_handle, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
136 137
{
	unsigned long addr = (unsigned long) vaddr;
138 139 140 141
	int order = get_order(size);

	if (dma_release_from_coherent(dev, order, vaddr))
		return;
L
Linus Torvalds 已提交
142

143
	plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
144

145 146 147
	if (!plat_device_is_coherent(dev))
		addr = CAC_ADDR(addr);

L
Linus Torvalds 已提交
148 149 150
	free_pages(addr, get_order(size));
}

151
static inline void __dma_sync_virtual(void *addr, size_t size,
L
Linus Torvalds 已提交
152 153 154 155
	enum dma_data_direction direction)
{
	switch (direction) {
	case DMA_TO_DEVICE:
156
		dma_cache_wback((unsigned long)addr, size);
L
Linus Torvalds 已提交
157 158 159
		break;

	case DMA_FROM_DEVICE:
160
		dma_cache_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
161 162 163
		break;

	case DMA_BIDIRECTIONAL:
164
		dma_cache_wback_inv((unsigned long)addr, size);
L
Linus Torvalds 已提交
165 166 167 168 169 170 171
		break;

	default:
		BUG();
	}
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
/*
 * A single sg entry may refer to multiple physically contiguous
 * pages. But we still need to process highmem pages individually.
 * If highmem is not configured then the bulk of this loop gets
 * optimized out.
 */
static inline void __dma_sync(struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction)
{
	size_t left = size;

	do {
		size_t len = left;

		if (PageHighMem(page)) {
			void *addr;

			if (offset + len > PAGE_SIZE) {
				if (offset >= PAGE_SIZE) {
					page += offset >> PAGE_SHIFT;
					offset &= ~PAGE_MASK;
				}
				len = PAGE_SIZE - offset;
			}

			addr = kmap_atomic(page);
			__dma_sync_virtual(addr + offset, len, direction);
			kunmap_atomic(addr);
		} else
			__dma_sync_virtual(page_address(page) + offset,
					   size, direction);
		offset = 0;
		page++;
		left -= len;
	} while (left);
}

209 210
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
	size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
211
{
212
	if (cpu_is_noncoherent_r10000(dev))
213 214
		__dma_sync(dma_addr_to_page(dev, dma_addr),
			   dma_addr & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
215

216
	plat_unmap_dma_mem(dev, dma_addr, size, direction);
L
Linus Torvalds 已提交
217 218
}

219 220
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
	int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
221 222 223 224
{
	int i;

	for (i = 0; i < nents; i++, sg++) {
225 226 227 228 229
		if (!plat_device_is_coherent(dev))
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
		sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
				  sg->offset;
L
Linus Torvalds 已提交
230 231 232 233 234
	}

	return nents;
}

235 236 237
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
	unsigned long offset, size_t size, enum dma_data_direction direction,
	struct dma_attrs *attrs)
L
Linus Torvalds 已提交
238
{
239
	if (!plat_device_is_coherent(dev))
240
		__dma_sync(page, offset, size, direction);
L
Linus Torvalds 已提交
241

242
	return plat_map_dma_mem_page(dev, page) + offset;
L
Linus Torvalds 已提交
243 244
}

245 246 247
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
	int nhwentries, enum dma_data_direction direction,
	struct dma_attrs *attrs)
L
Linus Torvalds 已提交
248 249 250 251
{
	int i;

	for (i = 0; i < nhwentries; i++, sg++) {
252
		if (!plat_device_is_coherent(dev) &&
253 254 255
		    direction != DMA_TO_DEVICE)
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
256
		plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
L
Linus Torvalds 已提交
257 258 259
	}
}

260 261
static void mips_dma_sync_single_for_cpu(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
262
{
263 264 265
	if (cpu_is_noncoherent_r10000(dev))
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
266 267
}

268 269
static void mips_dma_sync_single_for_device(struct device *dev,
	dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
L
Linus Torvalds 已提交
270
{
271
	plat_extra_sync_for_device(dev);
272 273 274
	if (!plat_device_is_coherent(dev))
		__dma_sync(dma_addr_to_page(dev, dma_handle),
			   dma_handle & ~PAGE_MASK, size, direction);
L
Linus Torvalds 已提交
275 276
}

277 278
static void mips_dma_sync_sg_for_cpu(struct device *dev,
	struct scatterlist *sg, int nelems, enum dma_data_direction direction)
L
Linus Torvalds 已提交
279 280
{
	int i;
281

L
Linus Torvalds 已提交
282
	/* Make sure that gcc doesn't leave the empty loop body.  */
283
	for (i = 0; i < nelems; i++, sg++) {
284
		if (cpu_is_noncoherent_r10000(dev))
285 286
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
287
	}
L
Linus Torvalds 已提交
288 289
}

290 291
static void mips_dma_sync_sg_for_device(struct device *dev,
	struct scatterlist *sg, int nelems, enum dma_data_direction direction)
L
Linus Torvalds 已提交
292 293 294 295
{
	int i;

	/* Make sure that gcc doesn't leave the empty loop body.  */
296 297
	for (i = 0; i < nelems; i++, sg++) {
		if (!plat_device_is_coherent(dev))
298 299
			__dma_sync(sg_page(sg), sg->offset, sg->length,
				   direction);
300
	}
L
Linus Torvalds 已提交
301 302
}

303
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
L
Linus Torvalds 已提交
304
{
305
	return plat_dma_mapping_error(dev, dma_addr);
L
Linus Torvalds 已提交
306 307
}

308
int mips_dma_supported(struct device *dev, u64 mask)
L
Linus Torvalds 已提交
309
{
310
	return plat_dma_supported(dev, mask);
L
Linus Torvalds 已提交
311 312
}

313
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
314
			 enum dma_data_direction direction)
L
Linus Torvalds 已提交
315
{
316
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
317

318
	plat_extra_sync_for_device(dev);
319
	if (!plat_device_is_coherent(dev))
320
		__dma_sync_virtual(vaddr, size, direction);
L
Linus Torvalds 已提交
321 322
}

323 324
EXPORT_SYMBOL(dma_cache_sync);

325
static struct dma_map_ops mips_default_dma_map_ops = {
326 327
	.alloc = mips_dma_alloc_coherent,
	.free = mips_dma_free_coherent,
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	.map_page = mips_dma_map_page,
	.unmap_page = mips_dma_unmap_page,
	.map_sg = mips_dma_map_sg,
	.unmap_sg = mips_dma_unmap_sg,
	.sync_single_for_cpu = mips_dma_sync_single_for_cpu,
	.sync_single_for_device = mips_dma_sync_single_for_device,
	.sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
	.sync_sg_for_device = mips_dma_sync_sg_for_device,
	.mapping_error = mips_dma_mapping_error,
	.dma_supported = mips_dma_supported
};

struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
EXPORT_SYMBOL(mips_dma_map_ops);

#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

static int __init mips_dma_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);

	return 0;
}
fs_initcall(mips_dma_init);