dma.c 5.5 KB
Newer Older
V
Vineet Gupta 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

/*
 * DMA Coherent API Notes
 *
 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
 * implemented by accessintg it using a kernel virtual address, with
 * Cache bit off in the TLB entry.
 *
 * The default DMA address == Phy address which is 0x8000_0000 based.
 */

#include <linux/dma-mapping.h>
20
#include <asm/cache.h>
V
Vineet Gupta 已提交
21 22
#include <asm/cacheflush.h>

C
Christoph Hellwig 已提交
23 24 25

static void *arc_dma_alloc(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
V
Vineet Gupta 已提交
26
{
27 28 29 30
	unsigned long order = get_order(size);
	struct page *page;
	phys_addr_t paddr;
	void *kvaddr;
31
	int need_coh = 1, need_kvaddr = 0;
V
Vineet Gupta 已提交
32

33 34
	page = alloc_pages(gfp, order);
	if (!page)
V
Vineet Gupta 已提交
35 36
		return NULL;

37 38 39 40 41
	/*
	 * IOC relies on all data (even coherent DMA data) being in cache
	 * Thus allocate normal cached memory
	 *
	 * The gains with IOC are two pronged:
42
	 *   -For streaming data, elides need for cache maintenance, saving
43 44 45 46 47
	 *    cycles in flush code, and bus bandwidth as all the lines of a
	 *    buffer need to be flushed out to memory
	 *   -For coherent data, Read/Write to buffers terminate early in cache
	 *   (vs. always going to memory - thus are faster)
	 */
C
Christoph Hellwig 已提交
48 49
	if ((is_isa_arcv2() && ioc_exists) ||
	    dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
50 51 52 53 54 55 56 57 58 59 60 61 62
		need_coh = 0;

	/*
	 * - A coherent buffer needs MMU mapping to enforce non-cachability
	 * - A highmem page needs a virtual handle (hence MMU mapping)
	 *   independent of cachability
	 */
	if (PageHighMem(page) || need_coh)
		need_kvaddr = 1;

	/* This is linear addr (0x8000_0000 based) */
	paddr = page_to_phys(page);

63
	*dma_handle = plat_phys_to_dma(dev, paddr);
V
Vineet Gupta 已提交
64 65

	/* This is kernel Virtual address (0x7000_0000 based) */
66
	if (need_kvaddr) {
67
		kvaddr = ioremap_nocache(paddr, size);
68 69 70 71 72
		if (kvaddr == NULL) {
			__free_pages(page, order);
			return NULL;
		}
	} else {
73
		kvaddr = (void *)(u32)paddr;
74
	}
V
Vineet Gupta 已提交
75

76 77 78 79 80 81 82 83 84 85
	/*
	 * Evict any existing L1 and/or L2 lines for the backing page
	 * in case it was used earlier as a normal "cached" page.
	 * Yeah this bit us - STAR 9000898266
	 *
	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
	 * can't be used to efficiently flush L1 and/or L2 which need paddr
	 * Currently flush_cache_vmap nukes the L1 cache completely which
	 * will be optimized as a separate commit
	 */
86
	if (need_coh)
87
		dma_cache_wback_inv(paddr, size);
88

V
Vineet Gupta 已提交
89 90 91
	return kvaddr;
}

C
Christoph Hellwig 已提交
92 93
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, struct dma_attrs *attrs)
V
Vineet Gupta 已提交
94
{
95
	struct page *page = virt_to_page(dma_handle);
96 97 98 99
	int is_non_coh = 1;

	is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
			(is_isa_arcv2() && ioc_exists);
100

101
	if (PageHighMem(page) || !is_non_coh)
C
Christoph Hellwig 已提交
102
		iounmap((void __force __iomem *)vaddr);
V
Vineet Gupta 已提交
103

104
	__free_pages(page, get_order(size));
V
Vineet Gupta 已提交
105 106 107
}

/*
C
Christoph Hellwig 已提交
108 109 110
 * streaming DMA Mapping API...
 * CPU accesses page via normal paddr, thus needs to explicitly made
 * consistent before each use
V
Vineet Gupta 已提交
111
 */
112
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
C
Christoph Hellwig 已提交
113 114 115 116 117 118 119 120 121 122 123 124 125
		enum dma_data_direction dir)
{
	switch (dir) {
	case DMA_FROM_DEVICE:
		dma_cache_inv(paddr, size);
		break;
	case DMA_TO_DEVICE:
		dma_cache_wback(paddr, size);
		break;
	case DMA_BIDIRECTIONAL:
		dma_cache_wback_inv(paddr, size);
		break;
	default:
126
		pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
C
Christoph Hellwig 已提交
127 128 129 130 131 132 133
	}
}

static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		struct dma_attrs *attrs)
{
134
	phys_addr_t paddr = page_to_phys(page) + offset;
C
Christoph Hellwig 已提交
135
	_dma_cache_sync(paddr, size, dir);
136
	return plat_phys_to_dma(dev, paddr);
C
Christoph Hellwig 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
}

static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
	   int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i)
		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
					       s->length, dir);

	return nents;
}

static void arc_dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
155
	_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
C
Christoph Hellwig 已提交
156 157 158 159
}

static void arc_dma_sync_single_for_device(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
V
Vineet Gupta 已提交
160
{
161
	_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
V
Vineet Gupta 已提交
162
}
C
Christoph Hellwig 已提交
163 164 165 166 167 168 169 170 171

static void arc_dma_sync_sg_for_cpu(struct device *dev,
		struct scatterlist *sglist, int nelems,
		enum dma_data_direction dir)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sglist, sg, nelems, i)
172
		_dma_cache_sync(sg_phys(sg), sg->length, dir);
C
Christoph Hellwig 已提交
173 174 175 176 177 178 179 180 181 182
}

static void arc_dma_sync_sg_for_device(struct device *dev,
		struct scatterlist *sglist, int nelems,
		enum dma_data_direction dir)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sglist, sg, nelems, i)
183
		_dma_cache_sync(sg_phys(sg), sg->length, dir);
C
Christoph Hellwig 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
}

static int arc_dma_supported(struct device *dev, u64 dma_mask)
{
	/* Support 32 bit DMA mask exclusively */
	return dma_mask == DMA_BIT_MASK(32);
}

struct dma_map_ops arc_dma_ops = {
	.alloc			= arc_dma_alloc,
	.free			= arc_dma_free,
	.map_page		= arc_dma_map_page,
	.map_sg			= arc_dma_map_sg,
	.sync_single_for_device	= arc_dma_sync_single_for_device,
	.sync_single_for_cpu	= arc_dma_sync_single_for_cpu,
	.sync_sg_for_cpu	= arc_dma_sync_sg_for_cpu,
	.sync_sg_for_device	= arc_dma_sync_sg_for_device,
	.dma_supported		= arc_dma_supported,
};
EXPORT_SYMBOL(arc_dma_ops);