pci-dma.c 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * DMA coherent memory allocation.
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 *
 * Copyright (C) 2002 - 2005 Tensilica Inc.
10
 * Copyright (C) 2015 Cadence Design Systems Inc.
11 12 13 14 15 16 17
 *
 * Based on version for i386.
 *
 * Chris Zankel <chris@zankel.net>
 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
 */

M
Max Filippov 已提交
18
#include <linux/dma-contiguous.h>
19
#include <linux/gfp.h>
M
Max Filippov 已提交
20 21
#include <linux/highmem.h>
#include <linux/mm.h>
M
Max Filippov 已提交
22
#include <linux/module.h>
M
Max Filippov 已提交
23 24 25
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/types.h>
26
#include <asm/cacheflush.h>
M
Max Filippov 已提交
27
#include <asm/io.h>
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
		    enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_BIDIRECTIONAL:
		__flush_invalidate_dcache_range((unsigned long)vaddr, size);
		break;

	case DMA_FROM_DEVICE:
		__invalidate_dcache_range((unsigned long)vaddr, size);
		break;

	case DMA_TO_DEVICE:
		__flush_dcache_range((unsigned long)vaddr, size);
		break;

	case DMA_NONE:
		BUG();
		break;
	}
}
EXPORT_SYMBOL(dma_cache_sync);

M
Max Filippov 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
static void do_cache_op(dma_addr_t dma_handle, size_t size,
			void (*fn)(unsigned long, unsigned long))
{
	unsigned long off = dma_handle & (PAGE_SIZE - 1);
	unsigned long pfn = PFN_DOWN(dma_handle);
	struct page *page = pfn_to_page(pfn);

	if (!PageHighMem(page))
		fn((unsigned long)bus_to_virt(dma_handle), size);
	else
		while (size > 0) {
			size_t sz = min_t(size_t, size, PAGE_SIZE - off);
			void *vaddr = kmap_atomic(page);

			fn((unsigned long)vaddr + off, sz);
			kunmap_atomic(vaddr);
			off = 0;
			++page;
			size -= sz;
		}
}

74 75 76 77 78 79 80
static void xtensa_sync_single_for_cpu(struct device *dev,
				       dma_addr_t dma_handle, size_t size,
				       enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_BIDIRECTIONAL:
	case DMA_FROM_DEVICE:
M
Max Filippov 已提交
81
		do_cache_op(dma_handle, size, __invalidate_dcache_range);
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
		break;

	case DMA_NONE:
		BUG();
		break;

	default:
		break;
	}
}

static void xtensa_sync_single_for_device(struct device *dev,
					  dma_addr_t dma_handle, size_t size,
					  enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_BIDIRECTIONAL:
	case DMA_TO_DEVICE:
M
Max Filippov 已提交
100 101
		if (XCHAL_DCACHE_IS_WRITEBACK)
			do_cache_op(dma_handle, size, __flush_dcache_range);
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
		break;

	case DMA_NONE:
		BUG();
		break;

	default:
		break;
	}
}

static void xtensa_sync_sg_for_cpu(struct device *dev,
				   struct scatterlist *sg, int nents,
				   enum dma_data_direction dir)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
		xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
					   sg_dma_len(s), dir);
	}
}

static void xtensa_sync_sg_for_device(struct device *dev,
				      struct scatterlist *sg, int nents,
				      enum dma_data_direction dir)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
		xtensa_sync_single_for_device(dev, sg_dma_address(s),
					      sg_dma_len(s), dir);
	}
}

139 140 141 142 143
/*
 * Note: We assume that the full memory space is always mapped to 'kseg'
 *	 Otherwise we have to use page attributes (not implemented).
 */

144 145
static void *xtensa_dma_alloc(struct device *dev, size_t size,
			      dma_addr_t *handle, gfp_t flag,
146
			      unsigned long attrs)
147
{
148 149
	unsigned long ret;
	unsigned long uncached = 0;
M
Max Filippov 已提交
150 151
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page *page = NULL;
152 153 154

	/* ignore region speicifiers */

155
	flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
156

157 158 159
	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
		flag |= GFP_DMA;

M
Max Filippov 已提交
160 161 162 163 164 165 166
	if (gfpflags_allow_blocking(flag))
		page = dma_alloc_from_contiguous(dev, count, get_order(size));

	if (!page)
		page = alloc_pages(flag, get_order(size));

	if (!page)
167 168
		return NULL;

M
Max Filippov 已提交
169 170
	ret = (unsigned long)page_address(page);

171 172
	/* We currently don't support coherent memory outside KSEG */

173 174
	BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
	       ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
175

176 177 178
	uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
	*handle = virt_to_bus((void *)ret);
	__invalidate_dcache_range(ret, size);
179

180
	return (void *)uncached;
181 182
}

M
Max Filippov 已提交
183
static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
184
			    dma_addr_t dma_handle, unsigned long attrs)
185
{
186 187
	unsigned long addr = (unsigned long)vaddr +
		XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
M
Max Filippov 已提交
188 189
	struct page *page = virt_to_page(addr);
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
190

191 192
	BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
	       addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
193

M
Max Filippov 已提交
194 195
	if (!dma_release_from_contiguous(dev, page, count))
		__free_pages(page, get_order(size));
196 197
}

198 199 200
static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
				  unsigned long offset, size_t size,
				  enum dma_data_direction dir,
201
				  unsigned long attrs)
202 203 204 205 206 207
{
	dma_addr_t dma_handle = page_to_phys(page) + offset;

	xtensa_sync_single_for_device(dev, dma_handle, size, dir);
	return dma_handle;
}
208

209 210
static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
			      size_t size, enum dma_data_direction dir,
211
			      unsigned long attrs)
212
{
213 214
	xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
}
215

216 217
static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
			 int nents, enum dma_data_direction dir,
218
			 unsigned long attrs)
219 220 221 222 223 224 225 226 227 228 229 230 231 232
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
		s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
						 s->length, dir, attrs);
	}
	return nents;
}

static void xtensa_unmap_sg(struct device *dev,
			    struct scatterlist *sg, int nents,
			    enum dma_data_direction dir,
233
			    unsigned long attrs)
234 235 236 237 238 239 240
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
		xtensa_unmap_page(dev, sg_dma_address(s),
				  sg_dma_len(s), dir, attrs);
241 242
	}
}
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271

int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return 0;
}

struct dma_map_ops xtensa_dma_map_ops = {
	.alloc = xtensa_dma_alloc,
	.free = xtensa_dma_free,
	.map_page = xtensa_map_page,
	.unmap_page = xtensa_unmap_page,
	.map_sg = xtensa_map_sg,
	.unmap_sg = xtensa_unmap_sg,
	.sync_single_for_cpu = xtensa_sync_single_for_cpu,
	.sync_single_for_device = xtensa_sync_single_for_device,
	.sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
	.sync_sg_for_device = xtensa_sync_sg_for_device,
	.mapping_error = xtensa_dma_mapping_error,
};
EXPORT_SYMBOL(xtensa_dma_map_ops);

#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

static int __init xtensa_dma_init(void)
{
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
	return 0;
}
fs_initcall(xtensa_dma_init);