dma.c 3.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file COPYING in the main directory of this archive
 * for more details.
 */

#undef DEBUG

#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>

#include <asm/pgalloc.h>

19
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
20

C
Christoph Hellwig 已提交
21
static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
22
		gfp_t flag, unsigned long attrs)
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
{
	struct page *page, **map;
	pgprot_t pgprot;
	void *addr;
	int i, order;

	pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);

	size = PAGE_ALIGN(size);
	order = get_order(size);

	page = alloc_pages(flag, order);
	if (!page)
		return NULL;

	*handle = page_to_phys(page);
	map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
	if (!map) {
		__free_pages(page, order);
		return NULL;
	}
	split_page(page, order);

	order = 1 << order;
	size >>= PAGE_SHIFT;
	map[0] = page;
	for (i = 1; i < size; i++)
		map[i] = page + i;
	for (; i < order; i++)
		__free_page(page + i);
	pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
	if (CPU_IS_040_OR_060)
		pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
	else
		pgprot_val(pgprot) |= _PAGE_NOCACHE030;
	addr = vmap(map, size, VM_MAP, pgprot);
	kfree(map);

	return addr;
}

C
Christoph Hellwig 已提交
64
static void m68k_dma_free(struct device *dev, size_t size, void *addr,
65
		dma_addr_t handle, unsigned long attrs)
66 67 68 69 70
{
	pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
	vfree(addr);
}

71
#else
72 73 74

#include <asm/cacheflush.h>

C
Christoph Hellwig 已提交
75
static void *m68k_dma_alloc(struct device *dev, size_t size,
76
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
{
	void *ret;
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);

	if (dev == NULL || (*dev->dma_mask < 0xffffffff))
		gfp |= GFP_DMA;
	ret = (void *)__get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
		*dma_handle = virt_to_phys(ret);
	}
	return ret;
}

C
Christoph Hellwig 已提交
93
static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
94
		dma_addr_t dma_handle, unsigned long attrs)
95 96 97 98
{
	free_pages((unsigned long)vaddr, get_order(size));
}

99
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
100

C
Christoph Hellwig 已提交
101 102
static void m68k_dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
103 104
{
	switch (dir) {
105
	case DMA_BIDIRECTIONAL:
106 107 108 109 110 111 112 113 114 115 116 117 118
	case DMA_TO_DEVICE:
		cache_push(handle, size);
		break;
	case DMA_FROM_DEVICE:
		cache_clear(handle, size);
		break;
	default:
		if (printk_ratelimit())
			printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
		break;
	}
}

C
Christoph Hellwig 已提交
119 120
static void m68k_dma_sync_sg_for_device(struct device *dev,
		struct scatterlist *sglist, int nents, enum dma_data_direction dir)
121 122
{
	int i;
A
Akinobu Mita 已提交
123
	struct scatterlist *sg;
124

A
Akinobu Mita 已提交
125 126 127 128
	for_each_sg(sglist, sg, nents, i) {
		dma_sync_single_for_device(dev, sg->dma_address, sg->length,
					   dir);
	}
129 130
}

C
Christoph Hellwig 已提交
131 132
static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
133
		unsigned long attrs)
134 135 136 137 138 139 140
{
	dma_addr_t handle = page_to_phys(page) + offset;

	dma_sync_single_for_device(dev, handle, size, dir);
	return handle;
}

C
Christoph Hellwig 已提交
141
static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
142
		int nents, enum dma_data_direction dir, unsigned long attrs)
143 144
{
	int i;
A
Akinobu Mita 已提交
145
	struct scatterlist *sg;
146

A
Akinobu Mita 已提交
147
	for_each_sg(sglist, sg, nents, i) {
148
		sg->dma_address = sg_phys(sg);
A
Akinobu Mita 已提交
149 150
		dma_sync_single_for_device(dev, sg->dma_address, sg->length,
					   dir);
151 152 153
	}
	return nents;
}
C
Christoph Hellwig 已提交
154 155 156 157 158 159 160 161 162 163

struct dma_map_ops m68k_dma_ops = {
	.alloc			= m68k_dma_alloc,
	.free			= m68k_dma_free,
	.map_page		= m68k_dma_map_page,
	.map_sg			= m68k_dma_map_sg,
	.sync_single_for_device	= m68k_dma_sync_single_for_device,
	.sync_sg_for_device	= m68k_dma_sync_sg_for_device,
};
EXPORT_SYMBOL(m68k_dma_ops);