提交 4f3dd8a0 编写于 作者: M Mark Nelson 提交者: Benjamin Herrenschmidt

powerpc/dma: Use the struct dma_attrs in iommu code

Update iommu_alloc() to take the struct dma_attrs and pass them on to
tce_build(). This change propagates down to the tce_build functions of
all the platforms.
Signed-off-by: NMark Nelson <markn@au1.ibm.com>
Signed-off-by: NArnd Bergmann <arnd@arndb.de>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 4795b780
......@@ -186,7 +186,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *page, unsigned int npages,
enum dma_data_direction direction,
unsigned long mask, unsigned int align_order)
unsigned long mask, unsigned int align_order,
struct dma_attrs *attrs)
{
unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE;
......@@ -205,7 +206,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* Put the TCEs in the HW table */
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
direction);
direction, attrs);
/* Flush/invalidate TLB caches if necessary */
......@@ -336,7 +337,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages, entry, dma_addr);
/* Insert into HW table */
ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK,
direction, attrs);
/* If we are in an open segment, try merging */
if (segstart != s) {
......@@ -573,7 +575,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> IOMMU_PAGE_SHIFT, align);
mask >> IOMMU_PAGE_SHIFT, align,
attrs);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
......@@ -642,7 +645,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
nio_pages = size >> IOMMU_PAGE_SHIFT;
io_order = get_iommu_order(size);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> IOMMU_PAGE_SHIFT, io_order);
mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
......
......@@ -173,7 +173,8 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
}
static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction)
unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
int i;
unsigned long *io_pte, base_pte;
......@@ -519,7 +520,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
__set_bit(0, window->table.it_map);
tce_build_cell(&window->table, window->table.it_offset, 1,
(unsigned long)iommu->pad_page, DMA_TO_DEVICE);
(unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
window->table.it_hint = window->table.it_blocksize;
return window;
......
......@@ -42,7 +42,8 @@
#include <asm/iseries/iommu.h>
static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction)
unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 rc;
u64 tce, rpn;
......
......@@ -85,7 +85,8 @@ static int iommu_table_iobmap_inited;
static void iobmap_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u32 *ip;
u32 rpn;
......
......@@ -50,7 +50,8 @@
static void tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 proto_tce;
u64 *tcep;
......@@ -95,7 +96,8 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 rc;
u64 proto_tce, tce;
......@@ -127,7 +129,8 @@ static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
u64 rc;
u64 proto_tce;
......@@ -136,7 +139,8 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long l, limit;
if (npages == 1) {
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction);
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
return;
}
......@@ -150,7 +154,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction);
direction, attrs);
return;
}
__get_cpu_var(tce_page) = tcep;
......
......@@ -149,7 +149,8 @@ static void dart_flush(struct iommu_table *tbl)
static void dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
unsigned int *dp;
unsigned int rpn;
......
......@@ -80,7 +80,8 @@ struct machdep_calls {
long index,
long npages,
unsigned long uaddr,
enum dma_data_direction direction);
enum dma_data_direction direction,
struct dma_attrs *attrs);
void (*tce_free)(struct iommu_table *tbl,
long index,
long npages);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册