dma.c 5.3 KB
Newer Older
M
Michal Simek 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * Copyright (C) 2009-2010 PetaLogix
 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
 *
 * Provide default implementations of the DMA mapping callbacks for
 * directly mapped busses.
 */

#include <linux/device.h>
#include <linux/dma-mapping.h>
11
#include <linux/gfp.h>
M
Michal Simek 已提交
12
#include <linux/dma-debug.h>
13
#include <linux/export.h>
14
#include <linux/bug.h>
M
Michal Simek 已提交
15

16 17 18
#define NOT_COHERENT_CACHE

static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
19
				       dma_addr_t *dma_handle, gfp_t flag,
20
				       unsigned long attrs)
M
Michal Simek 已提交
21
{
22 23 24
#ifdef NOT_COHERENT_CACHE
	return consistent_alloc(flag, size, dma_handle);
#else
M
Michal Simek 已提交
25 26 27 28 29 30 31 32 33 34 35 36
	void *ret;
	struct page *page;
	int node = dev_to_node(dev);

	/* ignore region specifiers */
	flag  &= ~(__GFP_HIGHMEM);

	page = alloc_pages_node(node, flag, get_order(size));
	if (page == NULL)
		return NULL;
	ret = page_address(page);
	memset(ret, 0, size);
37
	*dma_handle = virt_to_phys(ret);
M
Michal Simek 已提交
38 39

	return ret;
40
#endif
M
Michal Simek 已提交
41 42
}

43
static void dma_direct_free_coherent(struct device *dev, size_t size,
44
				     void *vaddr, dma_addr_t dma_handle,
45
				     unsigned long attrs)
M
Michal Simek 已提交
46
{
47
#ifdef NOT_COHERENT_CACHE
M
Michal Simek 已提交
48
	consistent_free(size, vaddr);
49
#else
M
Michal Simek 已提交
50
	free_pages((unsigned long)vaddr, get_order(size));
51
#endif
M
Michal Simek 已提交
52 53 54 55
}

static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
			     int nents, enum dma_data_direction direction,
56
			     unsigned long attrs)
M
Michal Simek 已提交
57 58 59 60
{
	struct scatterlist *sg;
	int i;

61
	/* FIXME this part of code is untested */
M
Michal Simek 已提交
62
	for_each_sg(sgl, sg, nents, i) {
63
		sg->dma_address = sg_phys(sg);
64 65 66 67

		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
			continue;

G
Geliang Tang 已提交
68
		__dma_sync(sg_phys(sg), sg->length, direction);
M
Michal Simek 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82
	}

	return nents;
}

static int dma_direct_dma_supported(struct device *dev, u64 mask)
{
	return 1;
}

static inline dma_addr_t dma_direct_map_page(struct device *dev,
					     struct page *page,
					     unsigned long offset,
					     size_t size,
83
					     enum dma_data_direction direction,
84
					     unsigned long attrs)
M
Michal Simek 已提交
85
{
86 87
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		__dma_sync(page_to_phys(page) + offset, size, direction);
88
	return page_to_phys(page) + offset;
M
Michal Simek 已提交
89 90 91 92 93 94
}

static inline void dma_direct_unmap_page(struct device *dev,
					 dma_addr_t dma_address,
					 size_t size,
					 enum dma_data_direction direction,
95
					 unsigned long attrs)
M
Michal Simek 已提交
96
{
97 98 99 100 101
/* There is not necessary to do cache cleanup
 *
 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
 * dma_address is physical address
 */
102 103
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		__dma_sync(dma_address, size, direction);
M
Michal Simek 已提交
104 105
}

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
static inline void
dma_direct_sync_single_for_cpu(struct device *dev,
			       dma_addr_t dma_handle, size_t size,
			       enum dma_data_direction direction)
{
	/*
	 * It's pointless to flush the cache as the memory segment
	 * is given to the CPU
	 */

	if (direction == DMA_FROM_DEVICE)
		__dma_sync(dma_handle, size, direction);
}

static inline void
dma_direct_sync_single_for_device(struct device *dev,
				  dma_addr_t dma_handle, size_t size,
				  enum dma_data_direction direction)
{
	/*
	 * It's pointless to invalidate the cache if the device isn't
	 * supposed to write to the relevant region
	 */

	if (direction == DMA_TO_DEVICE)
		__dma_sync(dma_handle, size, direction);
}

static inline void
dma_direct_sync_sg_for_cpu(struct device *dev,
			   struct scatterlist *sgl, int nents,
			   enum dma_data_direction direction)
{
	struct scatterlist *sg;
	int i;

	/* FIXME this part of code is untested */
	if (direction == DMA_FROM_DEVICE)
		for_each_sg(sgl, sg, nents, i)
			__dma_sync(sg->dma_address, sg->length, direction);
}

static inline void
dma_direct_sync_sg_for_device(struct device *dev,
			      struct scatterlist *sgl, int nents,
			      enum dma_data_direction direction)
{
	struct scatterlist *sg;
	int i;

	/* FIXME this part of code is untested */
	if (direction == DMA_TO_DEVICE)
		for_each_sg(sgl, sg, nents, i)
			__dma_sync(sg->dma_address, sg->length, direction);
}

162
static
163 164
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
			     void *cpu_addr, dma_addr_t handle, size_t size,
165
			     unsigned long attrs)
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
{
#ifdef CONFIG_MMU
	unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;
	unsigned long pfn;

	if (off >= count || user_count > (count - off))
		return -ENXIO;

#ifdef NOT_COHERENT_CACHE
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	pfn = consistent_virt_to_pfn(cpu_addr);
#else
	pfn = virt_to_pfn(cpu_addr);
#endif
	return remap_pfn_range(vma, vma->vm_start, pfn + off,
			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
#else
	return -ENXIO;
#endif
}

189
const struct dma_map_ops dma_direct_ops = {
190 191
	.alloc		= dma_direct_alloc_coherent,
	.free		= dma_direct_free_coherent,
192
	.mmap		= dma_direct_mmap_coherent,
M
Michal Simek 已提交
193 194 195 196
	.map_sg		= dma_direct_map_sg,
	.dma_supported	= dma_direct_dma_supported,
	.map_page	= dma_direct_map_page,
	.unmap_page	= dma_direct_unmap_page,
197 198 199 200
	.sync_single_for_cpu		= dma_direct_sync_single_for_cpu,
	.sync_single_for_device		= dma_direct_sync_single_for_device,
	.sync_sg_for_cpu		= dma_direct_sync_sg_for_cpu,
	.sync_sg_for_device		= dma_direct_sync_sg_for_device,
M
Michal Simek 已提交
201 202 203 204 205 206 207 208
};
EXPORT_SYMBOL(dma_direct_ops);

/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)

static int __init dma_init(void)
{
209
	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
M
Michal Simek 已提交
210

211
	return 0;
M
Michal Simek 已提交
212 213
}
fs_initcall(dma_init);