提交 4c5107e4 编写于 作者: P Paul Mundt

sh: pci: Split out new-style PCI core.

This splits off a 'pci-new.c' which is aimed at gradually replacing the
pci-auto backend and the arch/sh/drivers/pci/pci.c core respectively.
Signed-off-by: NPaul Mundt <lethal@linux-sh.org>
上级 9ade1217
......@@ -18,10 +18,17 @@ config SH_PCIDMA_NONCOHERENT
bridge integrated with your SH CPU, refer carefully to the chip specs
to see if you can say 'N' here. Otherwise, leave it as 'Y'.
# Temporary config option for transitioning off of PCI_AUTO
config PCI_NEW
bool
depends on PCI
default y if CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7780 || \
CPU_SUBTYPE_SH7785
# This is also board-specific
config PCI_AUTO
bool
depends on PCI
depends on PCI && !PCI_NEW
default y
config PCI_AUTO_UPDATE_RESOURCES
......@@ -34,4 +41,3 @@ config PCI_AUTO_UPDATE_RESOURCES
for some reason, you have a board that simply refuses to work
with its resources updated beyond what they are when the device
is powered up, set this to N. Everyone else will want this as Y.
......@@ -2,8 +2,8 @@
# Makefile for the PCI specific kernel interface routines under Linux.
#
obj-y += pci.o
obj-$(CONFIG_PCI_AUTO) += pci-auto.o
obj-$(CONFIG_PCI_AUTO) := pci.o pci-auto.o
obj-$(CONFIG_PCI_NEW) := pci-new.o
obj-$(CONFIG_CPU_SUBTYPE_SH7751) += pci-sh7751.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7751R) += pci-sh7751.o ops-sh4.o
......
/*
* New-style PCI core.
*
* Copyright (c) 2002 M. R. Brown
* Copyright (c) 2004 - 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/dma-debug.h>
#include <linux/io.h>
static int __init pcibios_init(void)
{
struct pci_channel *p;
struct pci_bus *bus;
int busno;
/* init channels */
busno = 0;
for (p = board_pci_channels; p->init; p++) {
if (p->init(p) == 0)
p->enabled = 1;
else
pr_err("Unable to init pci channel %d\n", busno);
busno++;
}
/* scan the buses */
busno = 0;
for (p = board_pci_channels; p->init; p++) {
if (p->enabled) {
bus = pci_scan_bus(busno, p->pci_ops, p);
busno = bus->subordinate + 1;
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
pci_enable_bridges(bus);
}
}
pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq);
dma_debug_add_bus(&pci_bus_type);
return 0;
}
subsys_initcall(pcibios_init);
static void pcibios_fixup_device_resources(struct pci_dev *dev,
struct pci_bus *bus)
{
/* Update device resources. */
struct pci_channel *chan = bus->sysdata;
unsigned long offset = 0;
int i;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
if (!dev->resource[i].start)
continue;
if (dev->resource[i].flags & IORESOURCE_PCI_FIXED)
continue;
if (dev->resource[i].flags & IORESOURCE_IO)
offset = chan->io_base;
else if (dev->resource[i].flags & IORESOURCE_MEM)
offset = 0;
dev->resource[i].start += offset;
dev->resource[i].end += offset;
}
}
/*
* Called after each bus is probed, but before its children
* are examined.
*/
void __devinit __weak pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
struct list_head *ln;
struct pci_channel *chan = bus->sysdata;
if (!dev) {
bus->resource[0] = chan->io_resource;
bus->resource[1] = chan->mem_resource;
}
for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
dev = pci_dev_b(ln);
if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
pcibios_fixup_device_resources(dev, bus);
}
}
void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
struct pci_channel *chan = dev->sysdata;
unsigned long offset = 0;
if (res->flags & IORESOURCE_IO)
offset = chan->io_base;
else if (res->flags & IORESOURCE_MEM)
offset = 0;
region->start = res->start - offset;
region->end = res->end - offset;
}
void __devinit
pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
struct pci_bus_region *region)
{
struct pci_channel *chan = dev->sysdata;
unsigned long offset = 0;
if (res->flags & IORESOURCE_IO)
offset = chan->io_base;
else if (res->flags & IORESOURCE_MEM)
offset = 0;
res->start = region->start + offset;
res->end = region->end + offset;
}
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align)
__attribute__ ((weak));
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*/
void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align)
{
if (res->flags & IORESOURCE_IO) {
resource_size_t start = res->start;
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for(idx=0; idx<6; idx++) {
if (!(mask & (1 << idx)))
continue;
r = &dev->resource[idx];
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because "
"of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk(KERN_INFO "PCI: Enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
/*
* If we set up a device for bus mastering, we need to check and set
* the latency timer as it may not be properly set.
*/
static unsigned int pcibios_max_latency = 255;
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat < 16)
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
else if (lat > pcibios_max_latency)
lat = pcibios_max_latency;
else
return;
printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n",
pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
void __init pcibios_update_irq(struct pci_dev *dev, int irq)
{
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
resource_size_t start = pci_resource_start(dev, bar);
resource_size_t len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
if (unlikely(!len || !start))
return NULL;
if (maxlen && len > maxlen)
len = maxlen;
/*
* Presently the IORESOURCE_MEM case is a bit special, most
* SH7751 style PCI controllers have PCI memory at a fixed
* location in the address space where no remapping is desired.
* With the IORESOURCE_MEM case more care has to be taken
* to inhibit page table mapping for legacy cores, but this is
* punted off to __ioremap().
* -- PFM.
*/
if (flags & IORESOURCE_IO)
return ioport_map(start, len);
if (flags & IORESOURCE_MEM)
return ioremap(start, len);
return NULL;
}
EXPORT_SYMBOL(pci_iomap);
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{
iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);
EXPORT_SYMBOL(board_pci_channels);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册