提交 ca1588e7 编写于 作者: A Anton Blanchard 提交者: Paul Mackerras

[POWERPC] node local IOMMU tables

Allocate IOMMU tables local to the relevant node.
Signed-off-by: NAnton Blanchard <anton@samba.org>
Acked-by: NOlof Johansson <olof@lixom.net>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 357518fa
......@@ -418,10 +418,11 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
*/
struct iommu_table *iommu_init_table(struct iommu_table *tbl)
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
static int welcomed = 0;
struct page *page;
/* Set aside 1/4 of the table for large allocations. */
tbl->it_halfpoint = tbl->it_size * 3 / 4;
......@@ -429,10 +430,10 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl)
/* number of bytes needed for the bitmap */
sz = (tbl->it_size + 7) >> 3;
tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
if (!tbl->it_map)
page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
tbl->it_map = page_address(page);
memset(tbl->it_map, 0, sz);
tbl->it_hint = 0;
......
......@@ -60,9 +60,9 @@ static void __init iommu_vio_init(void)
vio_iommu_table = veth_iommu_table;
vio_iommu_table.it_offset += veth_iommu_table.it_size;
if (!iommu_init_table(&veth_iommu_table))
if (!iommu_init_table(&veth_iommu_table, -1))
printk("Virtual Bus VETH TCE table failed.\n");
if (!iommu_init_table(&vio_iommu_table))
if (!iommu_init_table(&vio_iommu_table, -1))
printk("Virtual Bus VIO TCE table failed.\n");
}
#endif
......@@ -98,7 +98,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
return iommu_init_table(tbl);
return iommu_init_table(tbl, -1);
}
}
......
......@@ -173,7 +173,7 @@ void iommu_devnode_init_iSeries(struct device_node *dn)
/* Look for existing tce table */
pdn->iommu_table = iommu_table_find(tbl);
if (pdn->iommu_table == NULL)
pdn->iommu_table = iommu_init_table(tbl);
pdn->iommu_table = iommu_init_table(tbl, -1);
else
kfree(tbl);
}
......
......@@ -368,10 +368,11 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
pci->phb->dma_window_size = 0x8000000ul;
pci->phb->dma_window_base_cur = 0x8000000ul;
tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
pci->phb->node);
iommu_table_setparms(pci->phb, dn, tbl);
pci->iommu_table = iommu_init_table(tbl);
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
......@@ -414,12 +415,12 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
ppci->bussubno = bus->number;
tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
GFP_KERNEL);
tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
ppci->phb->node);
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
ppci->iommu_table = iommu_init_table(tbl);
ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
}
if (pdn != dn)
......@@ -442,9 +443,11 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
*/
if (!dev->bus->self) {
DBG(" --> first child, no bridge. Allocating iommu table.\n");
tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
PCI_DN(dn)->phb->node);
iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl);
PCI_DN(mydn)->iommu_table = iommu_init_table(tbl);
PCI_DN(dn)->iommu_table = iommu_init_table(tbl,
PCI_DN(dn)->phb->node);
return;
}
......@@ -526,12 +529,12 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
/* iommu_table_setparms_lpar needs bussubno. */
pci->bussubno = pci->phb->bus->number;
tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
GFP_KERNEL);
tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
pci->phb->node);
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
pci->iommu_table = iommu_init_table(tbl);
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
}
if (pdn != dn)
......
......@@ -246,7 +246,7 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_base = (unsigned long)dart_vbase;
iommu_table_dart.it_index = 0;
iommu_table_dart.it_blocksize = 1;
iommu_init_table(&iommu_table_dart);
iommu_init_table(&iommu_table_dart, -1);
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
......
......@@ -67,7 +67,8 @@ extern void iommu_free_table(struct device_node *dn);
/* Initializes an iommu_table based in values set in the passed-in
* structure
*/
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl);
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
int nid);
extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems, unsigned long mask,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册