pci-dma_64.c 3.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * Dynamic DMA mapping support.
 */

#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/module.h>
10
#include <linux/dmar.h>
Y
Yinghai Lu 已提交
11 12
#include <linux/bootmem.h>
#include <asm/proto.h>
L
Linus Torvalds 已提交
13
#include <asm/io.h>
J
Joerg Roedel 已提交
14
#include <asm/gart.h>
15
#include <asm/calgary.h>
L
Linus Torvalds 已提交
16

17 18 19 20 21 22

/* Dummy device used for NULL arguments (normally ISA). Better would
   be probably a smaller DMA mask, but this is bug-to-bug compatible
   to i386. */
struct device fallback_dev = {
	.bus_id = "fallback device",
23
	.coherent_dma_mask = DMA_32BIT_MASK,
24 25 26 27 28 29 30 31
	.dma_mask = &fallback_dev.coherent_dma_mask,
};

/* Allocate DMA memory on node near device */
noinline static void *
dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
{
	int node;
32 33

	node = dev_to_node(dev);
34

35
	return alloc_pages_node(node, gfp, order);
36 37
}

38 39
#define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
#define dma_release_coherent(dev, order, vaddr) (0)
40 41
/*
 * Allocate memory for a coherent mapping.
L
Linus Torvalds 已提交
42
 */
43 44 45
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
		   gfp_t gfp)
L
Linus Torvalds 已提交
46
{
47
	void *memory;
48
	struct page *page;
49 50 51
	unsigned long dma_mask = 0;
	u64 bus;

52 53 54 55

	if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
		return memory;

56 57 58 59
	if (!dev)
		dev = &fallback_dev;
	dma_mask = dev->coherent_dma_mask;
	if (dma_mask == 0)
60
		dma_mask = DMA_32BIT_MASK;
61

62 63 64 65
	/* Device not DMA able */
	if (dev->dma_mask == NULL)
		return NULL;

66 67 68
	/* Don't invoke OOM killer */
	gfp |= __GFP_NORETRY;

69 70 71 72 73 74 75 76
	/* Kludge to make it bug-to-bug compatible with i386. i386
	   uses the normal dma_mask for alloc_coherent. */
	dma_mask &= *dev->dma_mask;

	/* Why <=? Even when the mask is smaller than 4GB it is often
	   larger than 16MB and in this case we have a chance of
	   finding fitting memory in the next higher zone first. If
	   not retry with true GFP_DMA. -AK */
77
	if (dma_mask <= DMA_32BIT_MASK)
78 79 80
		gfp |= GFP_DMA32;

 again:
81 82
	page = dma_alloc_pages(dev, gfp, get_order(size));
	if (page == NULL)
83 84 85 86
		return NULL;

	{
		int high, mmu;
87 88
		bus = page_to_phys(page);
		memory = page_address(page);
89 90 91 92 93 94 95 96 97 98
	        high = (bus + size) >= dma_mask;
		mmu = high;
		if (force_iommu && !(gfp & GFP_DMA))
			mmu = 1;
		else if (high) {
			free_pages((unsigned long)memory,
				   get_order(size));

			/* Don't use the 16MB ZONE_DMA unless absolutely
			   needed. It's better to use remapping first. */
99
			if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
100 101 102 103
				gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
				goto again;
			}

104 105 106
			/* Let low level make its own zone decisions */
			gfp &= ~(GFP_DMA32|GFP_DMA);

107 108 109 110 111 112 113 114
			if (dma_ops->alloc_coherent)
				return dma_ops->alloc_coherent(dev, size,
							   dma_handle, gfp);
			return NULL;
		}

		memset(memory, 0, size);
		if (!mmu) {
115
			*dma_handle = bus;
116 117 118 119 120 121 122 123 124 125 126
			return memory;
		}
	}

	if (dma_ops->alloc_coherent) {
		free_pages((unsigned long)memory, get_order(size));
		gfp &= ~(GFP_DMA|GFP_DMA32);
		return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
	}

	if (dma_ops->map_simple) {
I
Ingo Molnar 已提交
127
		*dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
128 129 130 131
					      size,
					      PCI_DMA_BIDIRECTIONAL);
		if (*dma_handle != bad_dma_address)
			return memory;
L
Linus Torvalds 已提交
132 133
	}

134 135 136 137 138 139
	if (panic_on_overflow)
		panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
	free_pages((unsigned long)memory, get_order(size));
	return NULL;
}
EXPORT_SYMBOL(dma_alloc_coherent);
L
Linus Torvalds 已提交
140

141 142 143
/*
 * Unmap coherent memory.
 * The caller must ensure that the device has finished accessing the mapping.
L
Linus Torvalds 已提交
144
 */
145 146 147
void dma_free_coherent(struct device *dev, size_t size,
			 void *vaddr, dma_addr_t bus)
{
148
	int order = get_order(size);
149
	WARN_ON(irqs_disabled());	/* for portability */
150 151
	if (dma_release_coherent(dev, order, vaddr))
		return;
152 153
	if (dma_ops->unmap_single)
		dma_ops->unmap_single(dev, bus, size, 0);
154
	free_pages((unsigned long)vaddr, order);
155 156
}
EXPORT_SYMBOL(dma_free_coherent);