提交 47952d5e 编写于 作者: A Arnd Bergmann 提交者: Paul Mackerras

[PATCH] powerpc: use guarded ioremap for cell on-chip mappings

I'm not sure where the information came from, but I assumed
that doing cache-inhibited mappings for mmio regions was
sufficient.

It seems we also need the guarded bit set, like everyone
else, which is the default for ioremap.
Signed-off-by: NArnd Bergmann <arnd@arndb.de>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 2fa68747
......@@ -226,9 +226,7 @@ static int setup_iic_hardcoded(void)
regs += 0x20;
printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs);
iic->regs = __ioremap(regs, sizeof(struct iic_regs),
_PAGE_NO_CACHE);
iic->regs = ioremap(regs, sizeof(struct iic_regs));
iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe);
}
......@@ -269,14 +267,12 @@ static int setup_iic(void)
}
iic = &per_cpu(iic, np[0]);
iic->regs = __ioremap(regs[0], sizeof(struct iic_regs),
_PAGE_NO_CACHE);
iic->regs = ioremap(regs[0], sizeof(struct iic_regs));
iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe);
printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs);
iic = &per_cpu(iic, np[1]);
iic->regs = __ioremap(regs[2], sizeof(struct iic_regs),
_PAGE_NO_CACHE);
iic->regs = ioremap(regs[2], sizeof(struct iic_regs));
iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe);
printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs);
......
......@@ -344,8 +344,8 @@ static int cell_map_iommu_hardcoded(int num_nodes)
/* node 0 */
iommu = &cell_iommus[0];
iommu->mapped_base = __ioremap(0x20000511000, 0x1000, _PAGE_NO_CACHE);
iommu->mapped_mmio_base = __ioremap(0x20000510000, 0x1000, _PAGE_NO_CACHE);
iommu->mapped_base = ioremap(0x20000511000, 0x1000);
iommu->mapped_mmio_base = ioremap(0x20000510000, 0x1000);
enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
......@@ -357,8 +357,8 @@ static int cell_map_iommu_hardcoded(int num_nodes)
/* node 1 */
iommu = &cell_iommus[1];
iommu->mapped_base = __ioremap(0x30000511000, 0x1000, _PAGE_NO_CACHE);
iommu->mapped_mmio_base = __ioremap(0x30000510000, 0x1000, _PAGE_NO_CACHE);
iommu->mapped_base = ioremap(0x30000511000, 0x1000);
iommu->mapped_mmio_base = ioremap(0x30000510000, 0x1000);
enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
......@@ -407,8 +407,8 @@ static int cell_map_iommu(void)
iommu->base = *base;
iommu->mmio_base = *mmio_base;
iommu->mapped_base = __ioremap(*base, 0x1000, _PAGE_NO_CACHE);
iommu->mapped_mmio_base = __ioremap(*mmio_base, 0x1000, _PAGE_NO_CACHE);
iommu->mapped_base = ioremap(*base, 0x1000);
iommu->mapped_mmio_base = ioremap(*mmio_base, 0x1000);
enable_mapping(iommu->mapped_base,
iommu->mapped_mmio_base);
......
......@@ -203,7 +203,7 @@ static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p)
pr_debug("pervasive area for CPU %d at %lx, size %x\n",
cpu, real_address, size);
p->regs = __ioremap(real_address, size, _PAGE_NO_CACHE);
p->regs = ioremap(real_address, size);
p->thread = thread;
return 0;
}
......
......@@ -159,7 +159,7 @@ void spider_init_IRQ_hardcoded(void)
for (node = 0; node < num_present_cpus()/2; node++) {
spiderpic = pics[node];
printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic);
spider_pics[node] = __ioremap(spiderpic, 0x800, _PAGE_NO_CACHE);
spider_pics[node] = ioremap(spiderpic, 0x800);
for (n = 0; n < IIC_NUM_EXT; n++) {
int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE;
get_irq_desc(irq)->handler = &spider_pic;
......@@ -210,7 +210,7 @@ void spider_init_IRQ(void)
if ( n != 2)
printk("reg property with invalid number of elements \n");
spider_pics[node] = __ioremap(spider_reg, 0x800, _PAGE_NO_CACHE);
spider_pics[node] = ioremap(spider_reg, 0x800);
printk("SPIDER addr: %lx with %i addr_cells mapped to %p\n",
spider_reg, n, spider_pics[node]);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册