提交 56997559 编写于 作者: J Jake Moilanen 提交者: Paul Mackerras

[POWERPC] DMA 4GB boundary protection

There are many adapters which cannot handle DMAing across any 4 GB
boundary.  For instance, the latest Emulex adapters.

This normally is not an issue as firmware gives dma-windows under
4gigs.  However, some of the new System-P boxes have dma-windows above
4gigs, and this present a problem.

During initialization of the IOMMU tables, the last entry at each 4GB
boundary is marked as used.  Thus no mappings can cross the boundary.
If a table ends at a 4GB boundary, the entry is not marked as used.

A boot option to remove this 4GB protection is given w/ protect4gb=off.
This exposes the potential issue for driver and hardware development
purposes.
Signed-off-by: NJake Moilanen <moilanen@austin.ibm.com>
Acked-by: NOlof Johansson <olof@lixom.net>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 1f9209cf
......@@ -47,6 +47,8 @@ static int novmerge = 0;
static int novmerge = 1;
#endif
static int protect4gb = 1;
static inline unsigned long iommu_num_pages(unsigned long vaddr,
unsigned long slen)
{
......@@ -58,6 +60,16 @@ static inline unsigned long iommu_num_pages(unsigned long vaddr,
return npages;
}
static int __init setup_protect4gb(char *str)
{
if (strcmp(str, "on") == 0)
protect4gb = 1;
else if (strcmp(str, "off") == 0)
protect4gb = 0;
return 1;
}
static int __init setup_iommu(char *str)
{
if (!strcmp(str, "novmerge"))
......@@ -67,6 +79,7 @@ static int __init setup_iommu(char *str)
return 1;
}
__setup("protect4gb=", setup_protect4gb);
__setup("iommu=", setup_iommu);
static unsigned long iommu_range_alloc(struct iommu_table *tbl,
......@@ -439,6 +452,9 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
unsigned long start_index, end_index;
unsigned long entries_per_4g;
unsigned long index;
static int welcomed = 0;
struct page *page;
......@@ -460,7 +476,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
#ifdef CONFIG_CRASH_DUMP
if (ppc_md.tce_get) {
unsigned long index, tceval;
unsigned long tceval;
unsigned long tcecount = 0;
/*
......@@ -490,6 +506,23 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
#endif
/*
* DMA cannot cross 4 GB boundary. Mark last entry of each 4
* GB chunk as reserved.
*/
if (protect4gb) {
entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
/* Mark the last bit before a 4GB boundary as used */
start_index = tbl->it_offset | (entries_per_4g - 1);
start_index -= tbl->it_offset;
end_index = tbl->it_size;
for (index = start_index; index < end_index - 1; index += entries_per_4g)
__set_bit(index, tbl->it_map);
}
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
novmerge ? "disabled" : "enabled");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册