pci-nommu.c 3.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/* Fallback functions when the main IOMMU code is not compiled in. This
   code is roughly equivalent to i386. */
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
7
#include <linux/dma-mapping.h>
J
Jens Axboe 已提交
8
#include <linux/scatterlist.h>
9

10
#include <asm/iommu.h>
L
Linus Torvalds 已提交
11
#include <asm/processor.h>
12
#include <asm/dma.h>
L
Linus Torvalds 已提交
13

14 15
static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
L
Linus Torvalds 已提交
16
{
G
Glauber Costa 已提交
17
	if (hwdev && bus + size > *hwdev->dma_mask) {
18
		if (*hwdev->dma_mask >= DMA_32BIT_MASK)
19
			printk(KERN_ERR
20 21 22
			    "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
				name, (long long)bus, size,
				(long long)*hwdev->dma_mask);
23
		return 0;
L
Linus Torvalds 已提交
24
	}
25 26
	return 1;
}
L
Linus Torvalds 已提交
27

28
static dma_addr_t
I
Ingo Molnar 已提交
29
nommu_map_single(struct device *hwdev, phys_addr_t paddr, size_t size,
30 31
	       int direction)
{
I
Ingo Molnar 已提交
32
	dma_addr_t bus = paddr;
33
	WARN_ON(size == 0);
34 35
	if (!check_addr("map_single", hwdev, bus, size))
				return bad_dma_address;
36
	flush_write_buffers();
37
	return bus;
L
Linus Torvalds 已提交
38 39 40
}


41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
/* Map a set of buffers described by scatterlist in streaming
 * mode for DMA.  This is the scatter-gather version of the
 * above pci_map_single interface.  Here the scatter gather list
 * elements are each tagged with the appropriate dma address
 * and length.  They are obtained via sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for pci_map_single are
 * the same here.
 */
56
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
57
	       int nents, int direction)
L
Linus Torvalds 已提交
58
{
J
Jens Axboe 已提交
59
	struct scatterlist *s;
60
	int i;
L
Linus Torvalds 已提交
61

62 63
	WARN_ON(nents == 0 || sg[0].length == 0);

J
Jens Axboe 已提交
64
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
65
		BUG_ON(!sg_page(s));
G
Glauber Costa 已提交
66
		s->dma_address = sg_phys(s);
67 68 69 70
		if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
			return 0;
		s->dma_length = s->length;
	}
71
	flush_write_buffers();
72 73
	return nents;
}
L
Linus Torvalds 已提交
74

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
static void *
nommu_alloc_coherent(struct device *hwdev, size_t size,
		     dma_addr_t *dma_addr, gfp_t gfp)
{
	unsigned long dma_mask;
	int node;
	struct page *page;

	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
	gfp |= __GFP_ZERO;

	dma_mask = hwdev->coherent_dma_mask;
	if (!dma_mask)
		dma_mask = *(hwdev->dma_mask);

	if (dma_mask < DMA_24BIT_MASK)
		return NULL;

	node = dev_to_node(hwdev);

#ifdef CONFIG_X86_64
	if (dma_mask <= DMA_32BIT_MASK)
		gfp |= GFP_DMA32;
#endif

	/* No alloc-free penalty for ISA devices */
	if (dma_mask == DMA_24BIT_MASK)
		gfp |= GFP_DMA;

again:
	page = alloc_pages_node(node, gfp, get_order(size));
	if (!page)
		return NULL;

	if ((page_to_phys(page) + size > dma_mask) && !(gfp & GFP_DMA)) {
		free_pages((unsigned long)page_address(page), get_order(size));
		gfp |= GFP_DMA;
		goto again;
	}

	*dma_addr = page_to_phys(page);
	if (check_addr("alloc_coherent", hwdev, *dma_addr, size)) {
		flush_write_buffers();
		return page_address(page);
	}

	free_pages((unsigned long)page_address(page), get_order(size));

	return NULL;
}

126 127 128 129 130 131
static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
				dma_addr_t dma_addr)
{
	free_pages((unsigned long)vaddr, get_order(size));
}

132
struct dma_mapping_ops nommu_dma_ops = {
133
	.alloc_coherent = nommu_alloc_coherent,
134
	.free_coherent = nommu_free_coherent,
135 136 137 138
	.map_single = nommu_map_single,
	.map_sg = nommu_map_sg,
	.is_phys = 1,
};
L
Linus Torvalds 已提交
139

140 141 142 143
void __init no_iommu_init(void)
{
	if (dma_ops)
		return;
144 145

	force_iommu = 0; /* no HW IOMMU */
146 147
	dma_ops = &nommu_dma_ops;
}