dma-iommu.c 3.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
 *
 * Provide default implementations of the DMA mapping callbacks for
 * busses using the iommu infrastructure
 */

#include <asm/iommu.h>

/*
 * Generic iommu implementation
 */

/* Allocates a contiguous real buffer and creates mappings over it.
 * Returns the virtual address of the buffer and sets dma_handle
 * to the dma address (mapping) of the first page.
 */
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
				      dma_addr_t *dma_handle, gfp_t flag)
{
21
	return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
22
				    dma_handle, dev->coherent_dma_mask, flag,
B
Becky Bruce 已提交
23
				    dev_to_node(dev));
24 25 26 27 28
}

static void dma_iommu_free_coherent(struct device *dev, size_t size,
				    void *vaddr, dma_addr_t dma_handle)
{
29
	iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
30 31 32
}

/* Creates TCEs for a user provided buffer.  The user buffer must be
33 34 35
 * contiguous real kernel storage (not vmalloc).  The address passed here
 * comprises a page address and offset into that page. The dma_addr_t
 * returned will point to the same byte within the page as was passed in.
36
 */
37 38 39 40
static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
				     unsigned long offset, size_t size,
				     enum dma_data_direction direction,
				     struct dma_attrs *attrs)
41
{
42 43
	return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
			      size, device_to_mask(dev), direction, attrs);
44 45 46
}


47 48 49
static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
				 size_t size, enum dma_data_direction direction,
				 struct dma_attrs *attrs)
50
{
51
	iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
52
			 attrs);
53 54 55 56 57 58 59
}


static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
			    int nelems, enum dma_data_direction direction,
			    struct dma_attrs *attrs)
{
60
	return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
61 62 63 64 65 66 67
			    device_to_mask(dev), direction, attrs);
}

static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
		int nelems, enum dma_data_direction direction,
		struct dma_attrs *attrs)
{
68
	iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
69 70 71 72 73 74
		       attrs);
}

/* We support DMA to/from any memory page via the iommu */
static int dma_iommu_dma_supported(struct device *dev, u64 mask)
{
75
	struct iommu_table *tbl = get_iommu_table_base(dev);
76

77 78 79 80 81 82 83 84 85 86 87
	if (!tbl) {
		dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx"
			", table unavailable\n", mask);
		return 0;
	}

	if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) {
		dev_info(dev, "Warning: IOMMU window too big for device mask\n");
		dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n",
				mask, (tbl->it_offset + tbl->it_size) <<
				IOMMU_PAGE_SHIFT);
88 89 90 91 92
		return 0;
	} else
		return 1;
}

93 94 95 96 97 98 99 100 101 102 103 104 105
u64 dma_iommu_get_required_mask(struct device *dev)
{
	struct iommu_table *tbl = get_iommu_table_base(dev);
	u64 mask;
	if (!tbl)
		return 0;

	mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
	mask += mask - 1;

	return mask;
}

106
struct dma_map_ops dma_iommu_ops = {
107 108 109 110 111
	.alloc_coherent	= dma_iommu_alloc_coherent,
	.free_coherent	= dma_iommu_free_coherent,
	.map_sg		= dma_iommu_map_sg,
	.unmap_sg	= dma_iommu_unmap_sg,
	.dma_supported	= dma_iommu_dma_supported,
112 113
	.map_page	= dma_iommu_map_page,
	.unmap_page	= dma_iommu_unmap_page,
114 115
};
EXPORT_SYMBOL(dma_iommu_ops);