提交 72a73a69 编写于 作者: L Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/pci-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/gregkh/pci-2.6: (28 commits)
  PCI: make arch/i386/pci/common.c:pci_bf_sort static
  PCI: ibmphp_pci.c: fix NULL dereference
  pciehp: remove unnecessary pci_disable_msi
  pciehp: remove unnecessary free_irq
  PCI: rpaphp: change device tree examination
  PCI: Change memory allocation for acpiphp slots
  i2c-i801: SMBus patch for Intel ICH9
  PCI: irq: irq and pci_ids patch for Intel ICH9
  PCI: pci_{enable,disable}_device() nestable ports
  PCI: switch pci_{enable,disable}_device() to be nestable
  PCI: arch/i386/kernel/pci-dma.c: ioremap balanced with iounmap
  pci/i386: style cleanups
  PCI: Block on access to temporarily unavailable pci device
  pci: fix __pci_register_driver error handling
  pci: clear osc support flags if no _OSC method
  acpiphp: fix missing acpiphp_glue_exit()
  acpiphp: fix use of list_for_each macro
  Altix: Initial ACPI support - ROM shadowing.
  Altix: SN ACPI hotplug support.
  Altix: Add initial ACPI IO support
  ...
......@@ -75,7 +75,7 @@ EXPORT_SYMBOL(dma_free_coherent);
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags)
{
void __iomem *mem_base;
void __iomem *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = (pages + 31)/32;
......@@ -114,6 +114,8 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
free1_out:
kfree(dev->dma_mem->bitmap);
out:
if (mem_base)
iounmap(mem_base);
return 0;
}
EXPORT_SYMBOL(dma_declare_coherent_memory);
......
......@@ -20,7 +20,7 @@
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF;
int pci_bf_sort;
static int pci_bf_sort;
int pci_routeirq;
int pcibios_last_bus = -1;
unsigned long pirq_table_addr;
......
......@@ -74,52 +74,6 @@ static void __devinit pci_fixup_ncr53c810(struct pci_dev *d)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810);
static void __devinit pci_fixup_ide_bases(struct pci_dev *d)
{
int i;
/*
* PCI IDE controllers use non-standard I/O port decoding, respect it.
*/
if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
DBG("PCI: IDE base address fixup for %s\n", pci_name(d));
for(i=0; i<4; i++) {
struct resource *r = &d->resource[i];
if ((r->start & ~0x80) == 0x374) {
r->start |= 2;
r->end = r->start;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
static void __devinit pci_fixup_ide_trash(struct pci_dev *d)
{
int i;
/*
* Runs the fixup only for the first IDE controller
* (Shai Fultheim - shai@ftcon.com)
*/
static int called = 0;
if (called)
return;
called = 1;
/*
* There exist PCI IDE controllers which have utter garbage
* in first four base registers. Ignore that.
*/
DBG("PCI: IDE base address trash cleared for %s\n", pci_name(d));
for(i=0; i<4; i++)
d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, pci_fixup_ide_trash);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_11, pci_fixup_ide_trash);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_9, pci_fixup_ide_trash);
static void __devinit pci_fixup_latency(struct pci_dev *d)
{
/*
......
......@@ -104,16 +104,24 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
/* Depth-First Search on bus tree */
list_for_each_entry(bus, bus_list, node) {
if ((dev = bus->self)) {
for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
for (idx = PCI_BRIDGE_RESOURCES;
idx < PCI_NUM_RESOURCES; idx++) {
r = &dev->resource[idx];
if (!r->flags)
continue;
pr = pci_find_parent_resource(dev, r);
if (!r->start || !pr || request_resource(pr, r) < 0) {
printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
/* Something is wrong with the region.
Invalidate the resource to prevent child
resource allocations in this range. */
if (!r->start || !pr ||
request_resource(pr, r) < 0) {
printk(KERN_ERR "PCI: Cannot allocate "
"resource region %d "
"of bridge %s\n",
idx, pci_name(dev));
/*
* Something is wrong with the region.
* Invalidate the resource to prevent
* child resource allocations in this
* range.
*/
r->flags = 0;
}
}
......@@ -131,7 +139,7 @@ static void __init pcibios_allocate_resources(int pass)
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
for(idx = 0; idx < 6; idx++) {
for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
r = &dev->resource[idx];
if (r->parent) /* Already allocated */
continue;
......@@ -142,11 +150,15 @@ static void __init pcibios_allocate_resources(int pass)
else
disabled = !(command & PCI_COMMAND_MEMORY);
if (pass == disabled) {
DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
DBG("PCI: Resource %08lx-%08lx "
"(f=%lx, d=%d, p=%d)\n",
r->start, r->end, r->flags, disabled, pass);
pr = pci_find_parent_resource(dev, r);
if (!pr || request_resource(pr, r) < 0) {
printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
printk(KERN_ERR "PCI: Cannot allocate "
"resource region %d "
"of device %s\n",
idx, pci_name(dev));
/* We'll assign a new address later */
r->end -= r->start;
r->start = 0;
......@@ -156,12 +168,16 @@ static void __init pcibios_allocate_resources(int pass)
if (!pass) {
r = &dev->resource[PCI_ROM_RESOURCE];
if (r->flags & IORESOURCE_ROM_ENABLE) {
/* Turn the ROM off, leave the resource region, but keep it unregistered. */
/* Turn the ROM off, leave the resource region,
* but keep it unregistered. */
u32 reg;
DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
DBG("PCI: Switching off ROM of %s\n",
pci_name(dev));
r->flags &= ~IORESOURCE_ROM_ENABLE;
pci_read_config_dword(dev, dev->rom_base_reg, &reg);
pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
pci_read_config_dword(dev,
dev->rom_base_reg, &reg);
pci_write_config_dword(dev, dev->rom_base_reg,
reg & ~PCI_ROM_ADDRESS_ENABLE);
}
}
}
......@@ -173,9 +189,11 @@ static int __init pcibios_assign_resources(void)
struct resource *r, *pr;
if (!(pci_probe & PCI_ASSIGN_ROMS)) {
/* Try to use BIOS settings for ROMs, otherwise let
pci_assign_unassigned_resources() allocate the new
addresses. */
/*
* Try to use BIOS settings for ROMs, otherwise let
* pci_assign_unassigned_resources() allocate the new
* addresses.
*/
for_each_pci_dev(dev) {
r = &dev->resource[PCI_ROM_RESOURCE];
if (!r->flags || !r->start)
......@@ -215,9 +233,9 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for(idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
/* Only set up the requested stuff */
if (!(mask & (1<<idx)))
if (!(mask & (1 << idx)))
continue;
r = &dev->resource[idx];
......@@ -227,7 +245,9 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
printk(KERN_ERR "PCI: Device %s not available "
"because of resource collisions\n",
pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
......@@ -236,7 +256,8 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
printk("PCI: Enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
......@@ -258,7 +279,8 @@ void pcibios_set_master(struct pci_dev *dev)
lat = pcibios_max_latency;
else
return;
printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat);
printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n",
pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
......
......@@ -543,6 +543,12 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
case PCI_DEVICE_ID_INTEL_ICH8_2:
case PCI_DEVICE_ID_INTEL_ICH8_3:
case PCI_DEVICE_ID_INTEL_ICH8_4:
case PCI_DEVICE_ID_INTEL_ICH9_0:
case PCI_DEVICE_ID_INTEL_ICH9_1:
case PCI_DEVICE_ID_INTEL_ICH9_2:
case PCI_DEVICE_ID_INTEL_ICH9_3:
case PCI_DEVICE_ID_INTEL_ICH9_4:
case PCI_DEVICE_ID_INTEL_ICH9_5:
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;
......
......@@ -469,10 +469,11 @@ pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
}
}
static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
{
pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
}
EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
static void __devinit pcibios_fixup_bridge_resources(struct pci_dev *dev)
{
......@@ -493,6 +494,7 @@ pcibios_fixup_bus (struct pci_bus *b)
}
list_for_each_entry(dev, &b->devices, bus_list)
pcibios_fixup_device_resources(dev);
platform_pci_fixup_bus(b);
return;
}
......@@ -738,75 +740,44 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
return ret;
}
/* It's defined in drivers/pci/pci.c */
extern u8 pci_cache_line_size;
/**
* pci_cacheline_size - determine cacheline size for PCI devices
* @dev: void
* set_pci_cacheline_size - determine cacheline size for PCI devices
*
* We want to use the line-size of the outer-most cache. We assume
* that this line-size is the same for all CPUs.
*
* Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
*
* RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
*/
static unsigned long
pci_cacheline_size (void)
static void __init set_pci_cacheline_size(void)
{
u64 levels, unique_caches;
s64 status;
pal_cache_config_info_t cci;
static u8 cacheline_size;
if (cacheline_size)
return cacheline_size;
status = ia64_pal_cache_summary(&levels, &unique_caches);
if (status != 0) {
printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
__FUNCTION__, status);
return SMP_CACHE_BYTES;
printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
"(status=%ld)\n", __FUNCTION__, status);
return;
}
status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2,
&cci);
status = ia64_pal_cache_config_info(levels - 1,
/* cache_type (data_or_unified)= */ 2, &cci);
if (status != 0) {
printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed (status=%ld)\n",
__FUNCTION__, status);
return SMP_CACHE_BYTES;
printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
"(status=%ld)\n", __FUNCTION__, status);
return;
}
cacheline_size = 1 << cci.pcci_line_size;
return cacheline_size;
pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
}
/**
* pcibios_prep_mwi - helper function for drivers/pci/pci.c:pci_set_mwi()
* @dev: the PCI device for which MWI is enabled
*
* For ia64, we can get the cacheline sizes from PAL.
*
* RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
*/
int
pcibios_prep_mwi (struct pci_dev *dev)
{
unsigned long desired_linesize, current_linesize;
int rc = 0;
u8 pci_linesize;
desired_linesize = pci_cacheline_size();
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &pci_linesize);
current_linesize = 4 * pci_linesize;
if (desired_linesize != current_linesize) {
printk(KERN_WARNING "PCI: slot %s has incorrect PCI cache line size of %lu bytes,",
pci_name(dev), current_linesize);
if (current_linesize > desired_linesize) {
printk(" expected %lu bytes instead\n", desired_linesize);
rc = -EINVAL;
} else {
printk(" correcting to %lu\n", desired_linesize);
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, desired_linesize / 4);
}
}
return rc;
static int __init pcibios_init(void)
{
set_pci_cacheline_size();
return 0;
}
subsys_initcall(pcibios_init);
......@@ -4,13 +4,14 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved.
# Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All Rights Reserved.
#
CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_init.o iomv.o klconflib.o pio_phys.o \
huberror.o io_acpi_init.o io_common.o \
io_init.o iomv.o klconflib.o pio_phys.o \
sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_SGI_TIOCX) += tiocx.o
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/sn_sal.h>
#include "xtalk/hubdev.h"
#include <linux/acpi.h>
/*
* The code in this file will only be executed when running with
* a PROM that has ACPI IO support. (i.e., SN_ACPI_BASE_SUPPORT() == 1)
*/
/*
* This value must match the UUID the PROM uses
* (io/acpi/defblk.c) when building a vendor descriptor.
*/
struct acpi_vendor_uuid sn_uuid = {
.subtype = 0,
.data = { 0x2c, 0xc6, 0xa6, 0xfe, 0x9c, 0x44, 0xda, 0x11,
0xa2, 0x7c, 0x08, 0x00, 0x69, 0x13, 0xea, 0x51 },
};
/*
* Perform the early IO init in PROM.
*/
static s64
sal_ioif_init(u64 *result)
{
struct ia64_sal_retval isrv = {0,0,0,0};
SAL_CALL_NOLOCK(isrv,
SN_SAL_IOIF_INIT, 0, 0, 0, 0, 0, 0, 0);
*result = isrv.v0;
return isrv.status;
}
/*
* sn_hubdev_add - The 'add' function of the acpi_sn_hubdev_driver.
* Called for every "SGIHUB" or "SGITIO" device defined
* in the ACPI namespace.
*/
static int __init
sn_hubdev_add(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
u64 addr;
struct hubdev_info *hubdev;
struct hubdev_info *hubdev_ptr;
int i;
u64 nasid;
struct acpi_resource *resource;
int ret = 0;
acpi_status status;
struct acpi_resource_vendor_typed *vendor;
extern void sn_common_hubdev_init(struct hubdev_info *);
status = acpi_get_vendor_resource(device->handle, METHOD_NAME__CRS,
&sn_uuid, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR
"sn_hubdev_add: acpi_get_vendor_resource() failed: %d\n",
status);
return 1;
}
resource = buffer.pointer;
vendor = &resource->data.vendor_typed;
if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
sizeof(struct hubdev_info *)) {
printk(KERN_ERR
"sn_hubdev_add: Invalid vendor data length: %d\n",
vendor->byte_length);
ret = 1;
goto exit;
}
memcpy(&addr, vendor->byte_data, sizeof(struct hubdev_info *));
hubdev_ptr = __va((struct hubdev_info *) addr);
nasid = hubdev_ptr->hdi_nasid;
i = nasid_to_cnodeid(nasid);
hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
*hubdev = *hubdev_ptr;
sn_common_hubdev_init(hubdev);
exit:
kfree(buffer.pointer);
return ret;
}
/*
* sn_get_bussoft_ptr() - The pcibus_bussoft pointer is found in
* the ACPI Vendor resource for this bus.
*/
static struct pcibus_bussoft *
sn_get_bussoft_ptr(struct pci_bus *bus)
{
u64 addr;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_handle handle;
struct pcibus_bussoft *prom_bussoft_ptr;
struct acpi_resource *resource;
acpi_status status;
struct acpi_resource_vendor_typed *vendor;
handle = PCI_CONTROLLER(bus)->acpi_handle;
status = acpi_get_vendor_resource(handle, METHOD_NAME__CRS,
&sn_uuid, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR "get_acpi_pcibus_ptr: "
"get_acpi_bussoft_info() failed: %d\n",
status);
return NULL;
}
resource = buffer.pointer;
vendor = &resource->data.vendor_typed;
if ((vendor->byte_length - sizeof(struct acpi_vendor_uuid)) !=
sizeof(struct pcibus_bussoft *)) {
printk(KERN_ERR
"get_acpi_bussoft_ptr: Invalid vendor data "
"length %d\n", vendor->byte_length);
kfree(buffer.pointer);
return NULL;
}
memcpy(&addr, vendor->byte_data, sizeof(struct pcibus_bussoft *));
prom_bussoft_ptr = __va((struct pcibus_bussoft *) addr);
kfree(buffer.pointer);
return prom_bussoft_ptr;
}
/*
* sn_acpi_bus_fixup
*/
void
sn_acpi_bus_fixup(struct pci_bus *bus)
{
struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr;
extern void sn_common_bus_fixup(struct pci_bus *,
struct pcibus_bussoft *);
if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = sn_get_bussoft_ptr(bus);
if (prom_bussoft_ptr == NULL) {
printk(KERN_ERR
"sn_pci_fixup_bus: 0x%04x:0x%02x Unable to "
"obtain prom_bussoft_ptr\n",
pci_domain_nr(bus), bus->number);
return;
}
sn_common_bus_fixup(bus, prom_bussoft_ptr);
}
list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_pci_fixup_slot(pci_dev);
}
}
/*
* sn_acpi_slot_fixup - Perform any SN specific slot fixup.
* At present there does not appear to be
* any generic way to handle a ROM image
* that has been shadowed by the PROM, so
* we pass a pointer to it within the
* pcidev_info structure.
*/
void
sn_acpi_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
{
void __iomem *addr;
size_t size;
if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
/*
* A valid ROM image exists and has been shadowed by the
* PROM. Setup the pci_dev ROM resource to point to
* the shadowed copy.
*/
size = dev->resource[PCI_ROM_RESOURCE].end -
dev->resource[PCI_ROM_RESOURCE].start;
addr =
ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE],
size);
dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr;
dev->resource[PCI_ROM_RESOURCE].end =
(unsigned long) addr + size;
dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
}
}
static struct acpi_driver acpi_sn_hubdev_driver = {
.name = "SGI HUBDEV Driver",
.ids = "SGIHUB,SGITIO",
.ops = {
.add = sn_hubdev_add,
},
};
/*
* sn_io_acpi_init - PROM has ACPI support for IO, defining at a minimum the
* nodes and root buses in the DSDT. As a result, bus scanning
* will be initiated by the Linux ACPI code.
*/
void __init
sn_io_acpi_init(void)
{
u64 result;
s64 status;
acpi_bus_register_driver(&acpi_sn_hubdev_driver);
status = sal_ioif_init(&result);
if (status || result)
panic("sal_ioif_init failed: [%lx] %s\n",
status, ia64_sal_strerror(status));
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/bootmem.h>
#include <asm/sn/types.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/geo.h>
#include <asm/sn/io.h>
#include <asm/sn/l1.h>
#include <asm/sn/module.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/simulator.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/tioca_provider.h>
#include <asm/sn/tioce_provider.h>
#include "xtalk/hubdev.h"
#include "xtalk/xwidgetdev.h"
#include <linux/acpi.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include <asm/sn/acpi.h>
extern void sn_init_cpei_timer(void);
extern void register_sn_procfs(void);
extern void sn_acpi_bus_fixup(struct pci_bus *);
extern void sn_bus_fixup(struct pci_bus *);
extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
extern void sn_io_acpi_init(void);
extern void sn_io_init(void);
static struct list_head sn_sysdata_list;
/* sysdata list struct */
struct sysdata_el {
struct list_head entry;
void *sysdata;
};
int sn_ioif_inited; /* SN I/O infrastructure initialized? */
struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
/*
* Hooks and struct for unsupported pci providers
*/
static dma_addr_t
sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
{
return 0;
}
static void
sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
{
return;
}
static void *
sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
{
return NULL;
}
static struct sn_pcibus_provider sn_pci_default_provider = {
.dma_map = sn_default_pci_map,
.dma_map_consistent = sn_default_pci_map,
.dma_unmap = sn_default_pci_unmap,
.bus_fixup = sn_default_pci_bus_fixup,
};
/*
* Retrieve the DMA Flush List given nasid, widget, and device.
* This list is needed to implement the WAR - Flush DMA data on PIO Reads.
*/
static inline u64
sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
(u64) nasid, (u64) widget_num,
(u64) device_num, (u64) address, 0, 0, 0);
return ret_stuff.status;
}
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/*
* sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
* device.
*/
inline struct pcidev_info *
sn_pcidev_info_get(struct pci_dev *dev)
{
struct pcidev_info *pcidev;
list_for_each_entry(pcidev,
&(SN_PLATFORM_DATA(dev)->pcidev_info), pdi_list) {
if (pcidev->pdi_linux_pcidev == dev)
return pcidev;
}
return NULL;
}
/* Older PROM flush WAR
*
* 01/16/06 -- This war will be in place until a new official PROM is released.
* Additionally note that the struct sn_flush_device_war also has to be
* removed from arch/ia64/sn/include/xtalk/hubdev.h
*/
static u8 war_implemented = 0;
static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
struct sn_flush_device_common *common)
{
struct sn_flush_device_war *war_list;
struct sn_flush_device_war *dev_entry;
struct ia64_sal_retval isrv = {0,0,0,0};
if (!war_implemented) {
printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
"PROM flush WAR\n");
war_implemented = 1;
}
war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
if (!war_list)
BUG();
SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
nasid, widget, __pa(war_list), 0, 0, 0 ,0);
if (isrv.status)
panic("sn_device_fixup_war failed: %s\n",
ia64_sal_strerror(isrv.status));
dev_entry = war_list + device;
memcpy(common,dev_entry, sizeof(*common));
kfree(war_list);
return isrv.status;
}
/*
* sn_common_hubdev_init() - This routine is called to initialize the HUB data
* structure for each node in the system.
*/
void __init
sn_common_hubdev_init(struct hubdev_info *hubdev)
{
struct sn_flush_device_kernel *sn_flush_device_kernel;
struct sn_flush_device_kernel *dev_entry;
s64 status;
int widget, device, size;
/* Attach the error interrupt handlers */
if (hubdev->hdi_nasid & 1) /* If TIO */
ice_error_init(hubdev);
else
hub_error_init(hubdev);
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
if (!hubdev->hdi_flush_nasid_list.widget_p)
return;
size = (HUB_WIDGET_ID_MAX + 1) *
sizeof(struct sn_flush_device_kernel *);
hubdev->hdi_flush_nasid_list.widget_p =
kzalloc(size, GFP_KERNEL);
if (!hubdev->hdi_flush_nasid_list.widget_p)
BUG();
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
size = DEV_PER_WIDGET *
sizeof(struct sn_flush_device_kernel);
sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
if (!sn_flush_device_kernel)
BUG();
dev_entry = sn_flush_device_kernel;
for (device = 0; device < DEV_PER_WIDGET;
device++, dev_entry++) {
size = sizeof(struct sn_flush_device_common);
dev_entry->common = kzalloc(size, GFP_KERNEL);
if (!dev_entry->common)
BUG();
if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST))
status = sal_get_device_dmaflush_list(
hubdev->hdi_nasid, widget, device,
(u64)(dev_entry->common));
else
status = sn_device_fixup_war(hubdev->hdi_nasid,
widget, device,
dev_entry->common);
if (status != SALRET_OK)
panic("SAL call failed: %s\n",
ia64_sal_strerror(status));
spin_lock_init(&dev_entry->sfdl_flush_lock);
}
if (sn_flush_device_kernel)
hubdev->hdi_flush_nasid_list.widget_p[widget] =
sn_flush_device_kernel;
}
}
void sn_pci_unfixup_slot(struct pci_dev *dev)
{
struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
sn_irq_unfixup(dev);
pci_dev_put(host_pci_dev);
pci_dev_put(dev);
}
/*
* sn_pci_fixup_slot() - This routine sets up a slot's resources consistent
* with the Linux PCI abstraction layer. Resources
* acquired from our PCI provider include PIO maps
* to BAR space and interrupt objects.
*/
void sn_pci_fixup_slot(struct pci_dev *dev)
{
int segment = pci_domain_nr(dev->bus);
int status = 0;
struct pcibus_bussoft *bs;
struct pci_bus *host_pci_bus;
struct pci_dev *host_pci_dev;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
unsigned int bus_no, devfn;
pci_dev_get(dev); /* for the sysdata pointer */
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
BUG(); /* Cannot afford to run out of memory */
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
BUG(); /* Cannot afford to run out of memory */
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
if (status)
BUG(); /* Cannot get platform pci device information */
/* Add pcidev_info to list in pci_controller.platform_data */
list_add_tail(&pcidev_info->pdi_list,
&(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
if (SN_ACPI_BASE_SUPPORT())
sn_acpi_slot_fixup(dev, pcidev_info);
else
sn_more_slot_fixup(dev, pcidev_info);
/*
* Using the PROMs values for the PCI host bus, get the Linux
* PCI host_pci_dev struct and set up host bus linkages
*/
bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
host_pci_bus = pci_find_bus(segment, bus_no);
host_pci_dev = pci_get_slot(host_pci_bus, devfn);
pcidev_info->host_pci_dev = host_pci_dev;
pcidev_info->pdi_linux_pcidev = dev;
pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
bs = SN_PCIBUS_BUSSOFT(dev->bus);
pcidev_info->pdi_pcibus_info = bs;
if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
} else {
SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
}
/* Only set up IRQ stuff if this device has a host bus context */
if (bs && sn_irq_info->irq_irq) {
pcidev_info->pdi_sn_irq_info = sn_irq_info;
dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
sn_irq_fixup(dev, sn_irq_info);
} else {
pcidev_info->pdi_sn_irq_info = NULL;
kfree(sn_irq_info);
}
}
/*
* sn_common_bus_fixup - Perform platform specific bus fixup.
* Execute the ASIC specific fixup routine
* for this bus.
*/
void
sn_common_bus_fixup(struct pci_bus *bus,
struct pcibus_bussoft *prom_bussoft_ptr)
{
int cnode;
struct pci_controller *controller;
struct hubdev_info *hubdev_info;
int nasid;
void *provider_soft;
struct sn_pcibus_provider *provider;
struct sn_platform_data *sn_platform_data;
controller = PCI_CONTROLLER(bus);
/*
* Per-provider fixup. Copies the bus soft structure from prom
* to local area and links SN_PCIBUS_BUSSOFT().
*/
if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
printk(KERN_WARNING "sn_common_bus_fixup: Unsupported asic type, %d",
prom_bussoft_ptr->bs_asic_type);
return;
}
if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
return; /* no further fixup necessary */
provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
if (provider == NULL)
panic("sn_common_bus_fixup: No provider registered for this asic type, %d",
prom_bussoft_ptr->bs_asic_type);
if (provider->bus_fixup)
provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr,
controller);
else
provider_soft = NULL;
/*
* Generic bus fixup goes here. Don't reference prom_bussoft_ptr
* after this point.
*/
controller->platform_data = kzalloc(sizeof(struct sn_platform_data),
GFP_KERNEL);
if (controller->platform_data == NULL)
BUG();
sn_platform_data =
(struct sn_platform_data *) controller->platform_data;
sn_platform_data->provider_soft = provider_soft;
INIT_LIST_HEAD(&((struct sn_platform_data *)
controller->platform_data)->pcidev_info);
nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
cnode = nasid_to_cnodeid(nasid);
hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
&(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
/*
* If the node information we obtained during the fixup phase is
* invalid then set controller->node to -1 (undetermined)
*/
if (controller->node >= num_online_nodes()) {
struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
"L_IO=%lx L_MEM=%lx BASE=%lx\n",
b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
printk(KERN_WARNING "on node %d but only %d nodes online."
"Association set to undetermined.\n",
controller->node, num_online_nodes());
controller->node = -1;
}
}
void sn_bus_store_sysdata(struct pci_dev *dev)
{
struct sysdata_el *element;
element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
if (!element) {
dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
return;
}
element->sysdata = SN_PCIDEV_INFO(dev);
list_add(&element->entry, &sn_sysdata_list);
}
void sn_bus_free_sysdata(void)
{
struct sysdata_el *element;
struct list_head *list, *safe;
list_for_each_safe(list, safe, &sn_sysdata_list) {
element = list_entry(list, struct sysdata_el, entry);
list_del(&element->entry);
list_del(&(((struct pcidev_info *)
(element->sysdata))->pdi_list));
kfree(element->sysdata);
kfree(element);
}
return;
}
/*
* hubdev_init_node() - Creates the HUB data structure and link them to it's
* own NODE specific data area.
*/
void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
{
struct hubdev_info *hubdev_info;
int size;
pg_data_t *pg;
size = sizeof(struct hubdev_info);
if (node >= num_online_nodes()) /* Headless/memless IO nodes */
pg = NODE_DATA(0);
else
pg = NODE_DATA(node);
hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size);
npda->pdinfo = (void *)hubdev_info;
}
geoid_t
cnodeid_get_geoid(cnodeid_t cnode)
{
struct hubdev_info *hubdev;
hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
return hubdev->hdi_geoid;
}
void sn_generate_path(struct pci_bus *pci_bus, char *address)
{
nasid_t nasid;
cnodeid_t cnode;
geoid_t geoid;
moduleid_t moduleid;
u16 bricktype;
nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
cnode = nasid_to_cnodeid(nasid);
geoid = cnodeid_get_geoid(cnode);
moduleid = geo_module(geoid);
sprintf(address, "module_%c%c%c%c%.2d",
'0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
'0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
'0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
/* Tollhouse requires slot id to be displayed */
bricktype = MODULE_GET_BTYPE(moduleid);
if ((bricktype == L1_BRICKTYPE_191010) ||
(bricktype == L1_BRICKTYPE_1932))
sprintf(address, "%s^%d", address, geo_slot(geoid));
}
/*
* sn_pci_fixup_bus() - Perform SN specific setup of software structs
* (pcibus_bussoft, pcidev_info) and hardware
* registers, for the specified bus and devices under it.
*/
void __devinit
sn_pci_fixup_bus(struct pci_bus *bus)
{
if (SN_ACPI_BASE_SUPPORT())
sn_acpi_bus_fixup(bus);
else
sn_bus_fixup(bus);
}
/*
* sn_io_early_init - Perform early IO (and some non-IO) initialization.
* In particular, setup the sn_pci_provider[] array.
* This needs to be done prior to any bus scanning
* (acpi_scan_init()) in the ACPI case, as the SN
* bus fixup code will reference the array.
*/
static int __init
sn_io_early_init(void)
{
int i;
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0;
/*
* prime sn_pci_provider[]. Individial provider init routines will
* override their respective default entries.
*/
for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
sn_pci_provider[i] = &sn_pci_default_provider;
pcibr_init_provider();
tioca_init_provider();
tioce_init_provider();
/*
* This is needed to avoid bounce limit checks in the blk layer
*/
ia64_max_iommu_merge_mask = ~PAGE_MASK;
sn_irq_lh_init();
INIT_LIST_HEAD(&sn_sysdata_list);
sn_init_cpei_timer();
#ifdef CONFIG_PROC_FS
register_sn_procfs();
#endif
printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
acpi_gbl_DSDT->oem_revision);
if (SN_ACPI_BASE_SUPPORT())
sn_io_acpi_init();
else
sn_io_init();
return 0;
}
arch_initcall(sn_io_early_init);
/*
* sn_io_late_init() - Perform any final platform specific IO initialization.
*/
int __init
sn_io_late_init(void)
{
struct pci_bus *bus;
struct pcibus_bussoft *bussoft;
cnodeid_t cnode;
nasid_t nasid;
cnodeid_t near_cnode;
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0;
/*
* Setup closest node in pci_controller->node for
* PIC, TIOCP, TIOCE (TIOCA does it during bus fixup using
* info from the PROM).
*/
bus = NULL;
while ((bus = pci_find_next_bus(bus)) != NULL) {
bussoft = SN_PCIBUS_BUSSOFT(bus);
nasid = NASID_GET(bussoft->bs_base);
cnode = nasid_to_cnodeid(nasid);
if ((bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) ||
(bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCE)) {
/* TIO PCI Bridge: find nearest node with CPUs */
int e = sn_hwperf_get_nearest_node(cnode, NULL,
&near_cnode);
if (e < 0) {
near_cnode = (cnodeid_t)-1; /* use any node */
printk(KERN_WARNING "pcibr_bus_fixup: failed "
"to find near node with CPUs to TIO "
"node %d, err=%d\n", cnode, e);
}
PCI_CONTROLLER(bus)->node = near_cnode;
} else if (bussoft->bs_asic_type == PCIIO_ASIC_TYPE_PIC) {
PCI_CONTROLLER(bus)->node = cnode;
}
}
sn_ioif_inited = 1; /* SN I/O infrastructure now initialized */
return 0;
}
fs_initcall(sn_io_late_init);
EXPORT_SYMBOL(sn_pci_fixup_slot);
EXPORT_SYMBOL(sn_pci_unfixup_slot);
EXPORT_SYMBOL(sn_bus_store_sysdata);
EXPORT_SYMBOL(sn_bus_free_sysdata);
EXPORT_SYMBOL(sn_generate_path);
此差异已折叠。
......@@ -3,10 +3,11 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/acpi.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/vga.h>
......@@ -15,6 +16,7 @@
#include <asm/sn/pda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/acpi.h>
#define IS_LEGACY_VGA_IOPORT(p) \
(((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
......@@ -31,11 +33,14 @@ void *sn_io_addr(unsigned long port)
{
if (!IS_RUNNING_ON_SIMULATOR()) {
if (IS_LEGACY_VGA_IOPORT(port))
port += vga_console_iobase;
return (__ia64_mk_io_addr(port));
/* On sn2, legacy I/O ports don't point at anything */
if (port < (64 * 1024))
return NULL;
return ((void *)(port | __IA64_UNCACHED_OFFSET));
if (SN_ACPI_BASE_SUPPORT())
return (__ia64_mk_io_addr(port));
else
return ((void *)(port | __IA64_UNCACHED_OFFSET));
} else {
/* but the simulator uses them... */
unsigned long addr;
......
......@@ -388,6 +388,14 @@ void __init sn_setup(char **cmdline_p)
ia64_sn_plat_set_error_handling_features(); // obsolete
ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES);
/*
* Note: The calls to notify the PROM of ACPI and PCI Segment
* support must be done prior to acpi_load_tables(), as
* an ACPI capable PROM will rebuild the DSDT as result
* of the call.
*/
ia64_sn_set_os_feature(OSF_PCISEGMENT_ENABLE);
ia64_sn_set_os_feature(OSF_ACPI_ENABLE);
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
......@@ -413,6 +421,16 @@ void __init sn_setup(char **cmdline_p)
if (! vga_console_membase)
sn_scan_pcdp();
/*
* Setup legacy IO space.
* vga_console_iobase maps to PCI IO Space address 0 on the
* bus containing the VGA console.
*/
if (vga_console_iobase) {
io_space[0].mmio_base = vga_console_iobase;
io_space[0].sparse = 0;
}
if (vga_console_membase) {
/* usable vga ... make tty0 the preferred default console */
if (!strstr(*cmdline_p, "console="))
......
......@@ -552,7 +552,7 @@ static void __exit tiocx_exit(void)
bus_unregister(&tiocx_bus_type);
}
subsys_initcall(tiocx_init);
fs_initcall(tiocx_init);
module_exit(tiocx_exit);
/************************************************************************
......
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
......@@ -109,7 +109,6 @@ void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
int nasid, cnode, j;
cnodeid_t near_cnode;
struct hubdev_info *hubdev_info;
struct pcibus_info *soft;
struct sn_flush_device_kernel *sn_flush_device_kernel;
......@@ -186,20 +185,6 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
return NULL;
}
if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
/* TIO PCI Bridge: find nearest node with CPUs */
int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode);
if (e < 0) {
near_cnode = (cnodeid_t)-1; /* use any node */
printk(KERN_WARNING "pcibr_bus_fixup: failed to find "
"near node with CPUs to TIO node %d, err=%d\n",
cnode, e);
}
controller->node = near_cnode;
}
else
controller->node = cnode;
return soft;
}
......
......@@ -15,7 +15,6 @@
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioce_provider.h>
#include <asm/sn/sn2/sn_hwperf.h>
/*
* 1/26/2006
......@@ -990,8 +989,6 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info)
static void *
tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
int my_nasid;
cnodeid_t my_cnode, mem_cnode;
struct tioce_common *tioce_common;
struct tioce_kernel *tioce_kern;
struct tioce __iomem *tioce_mmr;
......@@ -1035,21 +1032,6 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
tioce_common->ce_pcibus.bs_persist_segment,
tioce_common->ce_pcibus.bs_persist_busnum);
/*
* identify closest nasid for memory allocations
*/
my_nasid = NASID_GET(tioce_common->ce_pcibus.bs_base);
my_cnode = nasid_to_cnodeid(my_nasid);
if (sn_hwperf_get_nearest_node(my_cnode, &mem_cnode, NULL) < 0) {
printk(KERN_WARNING "tioce_bus_fixup: failed to find "
"closest node with MEM to TIO node %d\n", my_cnode);
mem_cnode = (cnodeid_t)-1; /* use any node */
}
controller->node = mem_cnode;
return tioce_common;
}
......
......@@ -48,7 +48,6 @@ static struct pci_controller *u3_ht;
static int has_second_ohare;
#endif /* CONFIG_PPC64 */
extern u8 pci_cache_line_size;
extern int pcibios_assign_bus_offset;
struct device_node *k2_skiplist[2];
......
......@@ -646,13 +646,4 @@ int pci_domain_nr(struct pci_bus *pbus)
}
EXPORT_SYMBOL(pci_domain_nr);
int pcibios_prep_mwi(struct pci_dev *dev)
{
/* We set correct PCI_CACHE_LINE_SIZE register values for every
* device probed on this platform. So there is nothing to check
* and this always succeeds.
*/
return 0;
}
#endif /* !(CONFIG_PCI) */
......@@ -125,6 +125,7 @@ config I2C_I801
ICH7
ESB2
ICH8
ICH9
This driver can also be built as a module. If so, the module
will be called i2c-i801.
......
......@@ -33,6 +33,7 @@
ICH7 27DA
ESB2 269B
ICH8 283E
ICH9 2930
This driver supports several versions of Intel's I/O Controller Hubs (ICH).
For SMBus support, they are similar to the PIIX4 and are part
of Intel's '810' and other chipsets.
......@@ -457,6 +458,7 @@ static struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_17) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
{ 0, }
};
......
......@@ -320,7 +320,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
struct i2o_controller *c;
int rc;
struct pci_dev *i960 = NULL;
int enabled = pdev->is_enabled;
printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
......@@ -330,12 +329,11 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
if (!enabled)
if ((rc = pci_enable_device(pdev))) {
printk(KERN_WARNING "i2o: couldn't enable device %s\n",
pci_name(pdev));
return rc;
}
if ((rc = pci_enable_device(pdev))) {
printk(KERN_WARNING "i2o: couldn't enable device %s\n",
pci_name(pdev));
return rc;
}
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "i2o: no suitable DMA found for %s\n",
......@@ -442,8 +440,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
i2o_iop_free(c);
disable:
if (!enabled)
pci_disable_device(pdev);
pci_disable_device(pdev);
return rc;
}
......
......@@ -19,7 +19,7 @@ config PCI_MSI
config PCI_MULTITHREAD_PROBE
bool "PCI Multi-threaded probe (EXPERIMENTAL)"
depends on PCI && EXPERIMENTAL && BROKEN
depends on PCI && EXPERIMENTAL
help
Say Y here if you want the PCI core to spawn a new thread for
every PCI device that is probed. This can cause a huge
......
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/wait.h>
#include "pci.h"
......@@ -63,30 +64,42 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
static u32 pci_user_cached_config(struct pci_dev *dev, int pos)
{
u32 data;
/*
* The following routines are to prevent the user from accessing PCI config
* space when it's unsafe to do so. Some devices require this during BIST and
* we're required to prevent it during D-state transitions.
*
* We have a bit per device to indicate it's blocked and a global wait queue
* for callers to sleep on until devices are unblocked.
*/
static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
data = dev->saved_config_space[pos/sizeof(dev->saved_config_space[0])];
data >>= (pos % sizeof(dev->saved_config_space[0])) * 8;
return data;
static noinline void pci_wait_ucfg(struct pci_dev *dev)
{
DECLARE_WAITQUEUE(wait, current);
__add_wait_queue(&pci_ucfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&pci_lock);
schedule();
spin_lock_irq(&pci_lock);
} while (dev->block_ucfg_access);
__remove_wait_queue(&pci_ucfg_wait, &wait);
}
#define PCI_USER_READ_CONFIG(size,type) \
int pci_user_read_config_##size \
(struct pci_dev *dev, int pos, type *val) \
{ \
unsigned long flags; \
int ret = 0; \
u32 data = -1; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
spin_lock_irqsave(&pci_lock, flags); \
if (likely(!dev->block_ucfg_access)) \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
else if (pos < sizeof(dev->saved_config_space)) \
data = pci_user_cached_config(dev, pos); \
spin_unlock_irqrestore(&pci_lock, flags); \
spin_unlock_irq(&pci_lock); \
*val = (type)data; \
return ret; \
}
......@@ -95,14 +108,13 @@ int pci_user_read_config_##size \
int pci_user_write_config_##size \
(struct pci_dev *dev, int pos, type val) \
{ \
unsigned long flags; \
int ret = -EIO; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
spin_lock_irqsave(&pci_lock, flags); \
if (likely(!dev->block_ucfg_access)) \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
spin_unlock_irqrestore(&pci_lock, flags); \
spin_unlock_irq(&pci_lock); \
return ret; \
}
......@@ -117,21 +129,23 @@ PCI_USER_WRITE_CONFIG(dword, u32)
* pci_block_user_cfg_access - Block userspace PCI config reads/writes
* @dev: pci device struct
*
* This function blocks any userspace PCI config accesses from occurring.
* When blocked, any writes will be bit bucketed and reads will return the
* data saved using pci_save_state for the first 64 bytes of config
* space and return 0xff for all other config reads.
**/
* When user access is blocked, any reads or writes to config space will
* sleep until access is unblocked again. We don't allow nesting of
* block/unblock calls.
*/
void pci_block_user_cfg_access(struct pci_dev *dev)
{
unsigned long flags;
int was_blocked;
pci_save_state(dev);
/* spinlock to synchronize with anyone reading config space now */
spin_lock_irqsave(&pci_lock, flags);
was_blocked = dev->block_ucfg_access;
dev->block_ucfg_access = 1;
spin_unlock_irqrestore(&pci_lock, flags);
/* If we BUG() inside the pci_lock, we're guaranteed to hose
* the machine */
BUG_ON(was_blocked);
}
EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
......@@ -140,14 +154,19 @@ EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
* @dev: pci device struct
*
* This function allows userspace PCI config accesses to resume.
**/
*/
void pci_unblock_user_cfg_access(struct pci_dev *dev)
{
unsigned long flags;
/* spinlock to synchronize with anyone reading saved config space */
spin_lock_irqsave(&pci_lock, flags);
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
WARN_ON(!dev->block_ucfg_access);
dev->block_ucfg_access = 0;
wake_up_all(&pci_ucfg_wait);
spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
......@@ -62,10 +62,10 @@ struct acpiphp_slot;
struct slot {
struct hotplug_slot *hotplug_slot;
struct acpiphp_slot *acpi_slot;
struct hotplug_slot_info info;
char name[SLOT_NAME_SIZE];
};
/**
* struct acpiphp_bridge - PCI bridge information
*
......
......@@ -303,25 +303,15 @@ static int __init init_acpi(void)
/* read initial number of slots */
if (!retval) {
num_slots = acpiphp_get_num_slots();
if (num_slots == 0)
if (num_slots == 0) {
acpiphp_glue_exit();
retval = -ENODEV;
}
}
return retval;
}
/**
* make_slot_name - make a slot name that appears in pcihpfs
* @slot: slot to name
*
*/
static void make_slot_name(struct slot *slot)
{
snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%u",
slot->acpi_slot->sun);
}
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
......@@ -332,8 +322,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot->name);
kfree(slot->hotplug_slot);
kfree(slot);
}
......@@ -342,26 +330,19 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
{
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *hotplug_slot_info;
int retval = -ENOMEM;
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
goto error;
slot->hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
slot->hotplug_slot = kzalloc(sizeof(*slot->hotplug_slot), GFP_KERNEL);
if (!slot->hotplug_slot)
goto error_slot;
slot->hotplug_slot->info = kzalloc(sizeof(*hotplug_slot_info),
GFP_KERNEL);
if (!slot->hotplug_slot->info)
goto error_hpslot;
slot->hotplug_slot->info = &slot->info;
slot->hotplug_slot->name = kzalloc(SLOT_NAME_SIZE, GFP_KERNEL);
if (!slot->hotplug_slot->name)
goto error_info;
slot->hotplug_slot->name = slot->name;
slot->hotplug_slot->private = slot;
slot->hotplug_slot->release = &release_slot;
......@@ -376,21 +357,17 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
acpiphp_slot->slot = slot;
make_slot_name(slot);
snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun);
retval = pci_hp_register(slot->hotplug_slot);
if (retval) {
err("pci_hp_register failed with error %d\n", retval);
goto error_name;
goto error_hpslot;
}
info("Slot [%s] registered\n", slot->hotplug_slot->name);
return 0;
error_name:
kfree(slot->hotplug_slot->name);
error_info:
kfree(slot->hotplug_slot->info);
error_hpslot:
kfree(slot->hotplug_slot);
error_slot:
......
......@@ -1693,14 +1693,10 @@ void __exit acpiphp_glue_exit(void)
*/
int __init acpiphp_get_num_slots(void)
{
struct list_head *node;
struct acpiphp_bridge *bridge;
int num_slots;
num_slots = 0;
int num_slots = 0;
list_for_each (node, &bridge_list) {
bridge = (struct acpiphp_bridge *)node;
list_for_each_entry (bridge, &bridge_list, list) {
dbg("Bus %04x:%02x has %d slot%s\n",
pci_domain_nr(bridge->pci_bus),
bridge->pci_bus->number, bridge->nr_slots,
......
......@@ -1371,12 +1371,12 @@ static int unconfigure_boot_bridge (u8 busno, u8 device, u8 function)
}
bus = ibmphp_find_res_bus (sec_number);
debug ("bus->busno is %x\n", bus->busno);
debug ("sec_number is %x\n", sec_number);
if (!bus) {
err ("cannot find Bus structure for the bridged device\n");
return -EINVAL;
}
debug("bus->busno is %x\n", bus->busno);
debug("sec_number is %x\n", sec_number);
ibmphp_remove_bus (bus, busno);
......
......@@ -521,14 +521,9 @@ static void __exit unload_pciehpd(void)
}
static int hpdriver_context = 0;
static void pciehp_remove (struct pcie_device *device)
{
printk("%s ENTRY\n", __FUNCTION__);
printk("%s -> Call free_irq for irq = %d\n",
__FUNCTION__, device->irq);
free_irq(device->irq, &hpdriver_context);
/* XXX - Needs to be adapted to device driver model */
}
#ifdef CONFIG_PM
......
......@@ -718,8 +718,6 @@ static void hpc_release_ctlr(struct controller *ctrl)
if (php_ctlr->irq) {
free_irq(php_ctlr->irq, ctrl);
php_ctlr->irq = 0;
if (!pcie_mch_quirk)
pci_disable_msi(php_ctlr->pci_dev);
}
}
if (php_ctlr->pci_dev)
......
......@@ -63,7 +63,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name,
char *type;
int rc;
while ((np = of_find_node_by_type(np, "pci"))) {
while ((np = of_find_node_by_name(np, "pci"))) {
rc = rpaphp_get_drc_props(np, NULL, &name, &type, NULL);
if (rc == 0)
if (!strcmp(drc_name, name) && !strcmp(drc_type, type))
......
......@@ -356,7 +356,7 @@ static int __init rpaphp_init(void)
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
init_MUTEX(&rpaphp_sem);
while ((dn = of_find_node_by_type(dn, "pci")))
while ((dn = of_find_node_by_name(dn, "pci")))
rpaphp_add_slot(dn);
return 0;
......
......@@ -205,21 +205,6 @@ static struct hotplug_slot * sn_hp_destroy(void)
return bss_hotplug_slot;
}
static void sn_bus_alloc_data(struct pci_dev *dev)
{
struct pci_bus *subordinate_bus;
struct pci_dev *child;
sn_pci_fixup_slot(dev);
/* Recursively sets up the sn_irq_info structs */
if (dev->subordinate) {
subordinate_bus = dev->subordinate;
list_for_each_entry(child, &subordinate_bus->devices, bus_list)
sn_bus_alloc_data(child);
}
}
static void sn_bus_free_data(struct pci_dev *dev)
{
struct pci_bus *subordinate_bus;
......@@ -337,6 +322,11 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
return rc;
}
/*
* Power up and configure the slot via a SAL call to PROM.
* Scan slot (and any children), do any platform specific fixup,
* and find device driver.
*/
static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
{
struct slot *slot = bss_hotplug_slot->private;
......@@ -345,6 +335,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
int func, num_funcs;
int new_ppb = 0;
int rc;
void pcibios_fixup_device_resources(struct pci_dev *);
/* Serialize the Linux PCI infrastructure */
mutex_lock(&sn_hotplug_mutex);
......@@ -367,9 +358,6 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
return -ENODEV;
}
sn_pci_controller_fixup(pci_domain_nr(slot->pci_bus),
slot->pci_bus->number,
slot->pci_bus);
/*
* Map SN resources for all functions on the card
* to the Linux PCI interface and tell the drivers
......@@ -380,6 +368,13 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
PCI_DEVFN(slot->device_num + 1,
PCI_FUNC(func)));
if (dev) {
/* Need to do slot fixup on PPB before fixup of children
* (PPB's pcidev_info needs to be in pcidev_info list
* before child's SN_PCIDEV_INFO() call to setup
* pdi_host_pcidev_info).
*/
pcibios_fixup_device_resources(dev);
sn_pci_fixup_slot(dev);
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
unsigned char sec_bus;
pci_read_config_byte(dev, PCI_SECONDARY_BUS,
......@@ -387,12 +382,8 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
new_bus = pci_add_new_bus(dev->bus, dev,
sec_bus);
pci_scan_child_bus(new_bus);
sn_pci_controller_fixup(pci_domain_nr(new_bus),
new_bus->number,
new_bus);
new_ppb = 1;
}
sn_bus_alloc_data(dev);
pci_dev_put(dev);
}
}
......
......@@ -6,14 +6,6 @@
#ifndef MSI_H
#define MSI_H
/*
* MSI-X Address Register
*/
#define PCI_MSIX_FLAGS_QSIZE 0x7FF
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
#define PCI_MSIX_FLAGS_BITMASK (1 << 0)
#define PCI_MSIX_ENTRY_SIZE 16
#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0
#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4
......
......@@ -36,6 +36,7 @@ acpi_query_osc (
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *out_obj;
u32 osc_dw0;
acpi_status *ret_status = (acpi_status *)retval;
/* Setting up input parameters */
......@@ -56,6 +57,7 @@ acpi_query_osc (
if (ACPI_FAILURE (status)) {
printk(KERN_DEBUG
"Evaluate _OSC Set fails. Status = 0x%04x\n", status);
*ret_status = status;
return status;
}
out_obj = output.pointer;
......@@ -90,6 +92,7 @@ acpi_query_osc (
query_osc_out:
kfree(output.pointer);
*ret_status = status;
return status;
}
......@@ -166,6 +169,7 @@ acpi_run_osc (
acpi_status pci_osc_support_set(u32 flags)
{
u32 temp;
acpi_status retval;
if (!(flags & OSC_SUPPORT_MASKS)) {
return AE_TYPE;
......@@ -179,9 +183,13 @@ acpi_status pci_osc_support_set(u32 flags)
acpi_get_devices ( PCI_ROOT_HID_STRING,
acpi_query_osc,
ctrlset_buf,
NULL );
(void **) &retval );
ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE;
ctrlset_buf[OSC_CONTROL_TYPE] = temp;
if (ACPI_FAILURE(retval)) {
/* no osc support at all */
ctrlset_buf[OSC_SUPPORT_TYPE] = 0;
}
return AE_OK;
}
EXPORT_SYMBOL(pci_osc_support_set);
......
......@@ -329,8 +329,8 @@ static int pci_default_resume(struct pci_dev *pci_dev)
/* restore the PCI config space */
pci_restore_state(pci_dev);
/* if the device was enabled before suspend, reenable */
if (pci_dev->is_enabled)
retval = pci_enable_device(pci_dev);
if (atomic_read(&pci_dev->enable_cnt))
retval = __pci_enable_device(pci_dev);
/* if the device was busmaster before the suspend, make it busmaster again */
if (pci_dev->is_busmaster)
pci_set_master(pci_dev);
......@@ -445,9 +445,12 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner)
/* register with core */
error = driver_register(&drv->driver);
if (error)
return error;
if (!error)
error = pci_create_newid_file(drv);
error = pci_create_newid_file(drv);
if (error)
driver_unregister(&drv->driver);
return error;
}
......
......@@ -42,7 +42,6 @@ pci_config_attr(subsystem_vendor, "0x%04x\n");
pci_config_attr(subsystem_device, "0x%04x\n");
pci_config_attr(class, "0x%06x\n");
pci_config_attr(irq, "%u\n");
pci_config_attr(is_enabled, "%u\n");
static ssize_t broken_parity_status_show(struct device *dev,
struct device_attribute *attr,
......@@ -112,26 +111,36 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
(u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
(u8)(pci_dev->class));
}
static ssize_t
is_enabled_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t is_enabled_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
ssize_t result = -EINVAL;
struct pci_dev *pdev = to_pci_dev(dev);
int retval = 0;
/* this can crash the machine when done on the "wrong" device */
if (!capable(CAP_SYS_ADMIN))
return count;
if (*buf == '0')
pci_disable_device(pdev);
if (*buf == '0') {
if (atomic_read(&pdev->enable_cnt) != 0)
pci_disable_device(pdev);
else
result = -EIO;
} else if (*buf == '1')
result = pci_enable_device(pdev);
return result < 0 ? result : count;
}
if (*buf == '1')
retval = pci_enable_device(pdev);
static ssize_t is_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev;
if (retval)
return retval;
return count;
pdev = to_pci_dev (dev);
return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt));
}
static ssize_t
......
......@@ -490,6 +490,47 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
kfree(save_state);
}
static int pci_save_pcix_state(struct pci_dev *dev)
{
int pos, i = 0;
struct pci_cap_saved_state *save_state;
u16 *cap;
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (pos <= 0)
return 0;
save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL);
if (!save_state) {
dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n");
return -ENOMEM;
}
cap = (u16 *)&save_state->data[0];
pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]);
pci_add_saved_cap(dev, save_state);
return 0;
}
static void pci_restore_pcix_state(struct pci_dev *dev)
{
int i = 0, pos;
struct pci_cap_saved_state *save_state;
u16 *cap;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!save_state || pos <= 0)
return;
cap = (u16 *)&save_state->data[0];
pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
pci_remove_saved_cap(save_state);
kfree(save_state);
}
/**
* pci_save_state - save the PCI configuration space of a device before suspending
* @dev: - PCI device that we're dealing with
......@@ -507,6 +548,8 @@ pci_save_state(struct pci_dev *dev)
return i;
if ((i = pci_save_pcie_state(dev)) != 0)
return i;
if ((i = pci_save_pcix_state(dev)) != 0)
return i;
return 0;
}
......@@ -538,6 +581,7 @@ pci_restore_state(struct pci_dev *dev)
dev->saved_config_space[i]);
}
}
pci_restore_pcix_state(dev);
pci_restore_msi_state(dev);
pci_restore_msix_state(dev);
return 0;
......@@ -568,29 +612,50 @@ pci_enable_device_bars(struct pci_dev *dev, int bars)
}
/**
* pci_enable_device - Initialize device before it's used by a driver.
* __pci_enable_device - Initialize device before it's used by a driver.
* @dev: PCI device to be initialized
*
* Initialize device before it's used by a driver. Ask low-level code
* to enable I/O and memory. Wake up the device if it was suspended.
* Beware, this function can fail.
*
* Note this function is a backend and is not supposed to be called by
* normal code, use pci_enable_device() instead.
*/
int
pci_enable_device(struct pci_dev *dev)
__pci_enable_device(struct pci_dev *dev)
{
int err;
if (dev->is_enabled)
return 0;
err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
if (err)
return err;
pci_fixup_device(pci_fixup_enable, dev);
dev->is_enabled = 1;
return 0;
}
/**
* pci_enable_device - Initialize device before it's used by a driver.
* @dev: PCI device to be initialized
*
* Initialize device before it's used by a driver. Ask low-level code
* to enable I/O and memory. Wake up the device if it was suspended.
* Beware, this function can fail.
*
* Note we don't actually enable the device many times if we call
* this function repeatedly (we just increment the count).
*/
int pci_enable_device(struct pci_dev *dev)
{
int result;
if (atomic_add_return(1, &dev->enable_cnt) > 1)
return 0; /* already enabled */
result = __pci_enable_device(dev);
if (result < 0)
atomic_dec(&dev->enable_cnt);
return result;
}
/**
* pcibios_disable_device - disable arch specific PCI resources for device dev
* @dev: the PCI device to disable
......@@ -607,12 +672,18 @@ void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
*
* Signal to the system that the PCI device is not in use by the system
* anymore. This only involves disabling PCI bus-mastering, if active.
*
* Note we don't actually disable the device until all callers of
* pci_device_enable() have called pci_device_disable().
*/
void
pci_disable_device(struct pci_dev *dev)
{
u16 pci_command;
if (atomic_sub_return(1, &dev->enable_cnt) != 0)
return;
if (dev->msi_enabled)
disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
PCI_CAP_ID_MSI);
......@@ -628,7 +699,6 @@ pci_disable_device(struct pci_dev *dev)
dev->is_busmaster = 0;
pcibios_disable_device(dev);
dev->is_enabled = 0;
}
/**
......@@ -831,22 +901,38 @@ pci_set_master(struct pci_dev *dev)
pcibios_set_master(dev);
}
#ifndef HAVE_ARCH_PCI_MWI
#ifdef PCI_DISABLE_MWI
int pci_set_mwi(struct pci_dev *dev)
{
return 0;
}
void pci_clear_mwi(struct pci_dev *dev)
{
}
#else
#ifndef PCI_CACHE_LINE_BYTES
#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
#endif
/* This can be overridden by arch code. */
u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
/* Don't forget this is measured in 32-bit words, not bytes */
u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
/**
* pci_generic_prep_mwi - helper function for pci_set_mwi
* @dev: the PCI device for which MWI is enabled
* pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
* @dev: the PCI device for which MWI is to be enabled
*
* Helper function for generic implementation of pcibios_prep_mwi
* function. Originally copied from drivers/net/acenic.c.
* Helper function for pci_set_mwi.
* Originally copied from drivers/net/acenic.c.
* Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
static int
pci_generic_prep_mwi(struct pci_dev *dev)
pci_set_cacheline_size(struct pci_dev *dev)
{
u8 cacheline_size;
......@@ -872,7 +958,6 @@ pci_generic_prep_mwi(struct pci_dev *dev)
return -EINVAL;
}
#endif /* !HAVE_ARCH_PCI_MWI */
/**
* pci_set_mwi - enables memory-write-invalidate PCI transaction
......@@ -890,12 +975,7 @@ pci_set_mwi(struct pci_dev *dev)
int rc;
u16 cmd;
#ifdef HAVE_ARCH_PCI_MWI
rc = pcibios_prep_mwi(dev);
#else
rc = pci_generic_prep_mwi(dev);
#endif
rc = pci_set_cacheline_size(dev);
if (rc)
return rc;
......@@ -926,6 +1006,7 @@ pci_clear_mwi(struct pci_dev *dev)
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
}
#endif /* ! PCI_DISABLE_MWI */
/**
* pci_intx - enables/disables PCI INTx for device dev
......
/* Functions internal to the PCI core code */
extern int __must_check __pci_enable_device(struct pci_dev *);
extern int pci_uevent(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size);
extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
......
......@@ -679,6 +679,33 @@ static int pci_setup_device(struct pci_dev * dev)
pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
/*
* Do the ugly legacy mode stuff here rather than broken chip
* quirk code. Legacy mode ATA controllers have fixed
* addresses. These are not always echoed in BAR0-3, and
* BAR0-3 in a few cases contain junk!
*/
if (class == PCI_CLASS_STORAGE_IDE) {
u8 progif;
pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
if ((progif & 1) == 0) {
dev->resource[0].start = 0x1F0;
dev->resource[0].end = 0x1F7;
dev->resource[0].flags = IORESOURCE_IO;
dev->resource[1].start = 0x3F6;
dev->resource[1].end = 0x3F6;
dev->resource[1].flags = IORESOURCE_IO;
}
if ((progif & 4) == 0) {
dev->resource[2].start = 0x170;
dev->resource[2].end = 0x177;
dev->resource[2].flags = IORESOURCE_IO;
dev->resource[3].start = 0x376;
dev->resource[3].end = 0x376;
dev->resource[3].flags = IORESOURCE_IO;
}
}
break;
case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
......
......@@ -796,56 +796,6 @@ static void __init quirk_mediagx_master(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master );
/*
* As per PCI spec, ignore base address registers 0-3 of the IDE controllers
* running in Compatible mode (bits 0 and 2 in the ProgIf for primary and
* secondary channels respectively). If the device reports Compatible mode
* but does use BAR0-3 for address decoding, we assume that firmware has
* programmed these BARs with standard values (0x1f0,0x3f4 and 0x170,0x374).
* Exceptions (if they exist) must be handled in chip/architecture specific
* fixups.
*
* Note: for non x86 people. You may need an arch specific quirk to handle
* moving IDE devices to native mode as well. Some plug in card devices power
* up in compatible mode and assume the BIOS will adjust them.
*
* Q: should we load the 0x1f0,0x3f4 into the registers or zap them as
* we do now ? We don't want is pci_enable_device to come along
* and assign new resources. Both approaches work for that.
*/
static void __devinit quirk_ide_bases(struct pci_dev *dev)
{
struct resource *res;
int first_bar = 2, last_bar = 0;
if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
res = &dev->resource[0];
/* primary channel: ProgIf bit 0, BAR0, BAR1 */
if (!(dev->class & 1) && (res[0].flags || res[1].flags)) {
res[0].start = res[0].end = res[0].flags = 0;
res[1].start = res[1].end = res[1].flags = 0;
first_bar = 0;
last_bar = 1;
}
/* secondary channel: ProgIf bit 2, BAR2, BAR3 */
if (!(dev->class & 4) && (res[2].flags || res[3].flags)) {
res[2].start = res[2].end = res[2].flags = 0;
res[3].start = res[3].end = res[3].flags = 0;
last_bar = 3;
}
if (!last_bar)
return;
printk(KERN_INFO "PCI: Ignoring BAR%d-%d of IDE controller %s\n",
first_bar, last_bar, pci_name(dev));
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_ide_bases);
/*
* Ensure C0 rev restreaming is off. This is normally done by
* the BIOS but in the odd case it is not the results are corruption
......@@ -880,11 +830,10 @@ static void __devinit quirk_svwks_csb5ide(struct pci_dev *pdev)
prog &= ~5;
pdev->class &= ~5;
pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
/* need to re-assign BARs for compat mode */
quirk_ide_bases(pdev);
/* PCI layer will sort out resources */
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide );
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide );
/*
* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
......@@ -900,11 +849,9 @@ static void __init quirk_ide_samemode(struct pci_dev *pdev)
prog &= ~5;
pdev->class &= ~5;
pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
/* need to re-assign BARs for compat mode */
quirk_ide_bases(pdev);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
/* This was originally an Alpha specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
......
......@@ -81,7 +81,8 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
} else {
if (res->flags & IORESOURCE_ROM_COPY) {
if (res->flags &
(IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
return (void __iomem *)(unsigned long)
pci_resource_start(pdev, PCI_ROM_RESOURCE);
......@@ -165,7 +166,8 @@ void __iomem *pci_map_rom_copy(struct pci_dev *pdev, size_t *size)
if (!rom)
return NULL;
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_SHADOW))
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_SHADOW |
IORESOURCE_ROM_BIOS_COPY))
return rom;
res->start = (unsigned long)kmalloc(*size, GFP_KERNEL);
......@@ -191,7 +193,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
if (res->flags & IORESOURCE_ROM_COPY)
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
return;
iounmap(rom);
......@@ -215,6 +217,7 @@ void pci_remove_rom(struct pci_dev *pdev)
sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
if (!(res->flags & (IORESOURCE_ROM_ENABLE |
IORESOURCE_ROM_SHADOW |
IORESOURCE_ROM_BIOS_COPY |
IORESOURCE_ROM_COPY)))
pci_disable_rom(pdev);
}
......
......@@ -32,7 +32,7 @@
*/
#define IO_SPACE_LIMIT 0xffffffffffffffffUL
#define MAX_IO_SPACES_BITS 4
#define MAX_IO_SPACES_BITS 8
#define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
#define IO_SPACE_BITS 24
#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
......
......@@ -36,6 +36,7 @@ typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
u8 size);
typedef void ia64_mv_migrate_t(struct task_struct * task);
typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
......@@ -95,6 +96,11 @@ machvec_noop_task (struct task_struct *task)
{
}
static inline void
machvec_noop_bus (struct pci_bus *bus)
{
}
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *);
extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int);
......@@ -159,6 +165,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_migrate ia64_mv.migrate
# define platform_setup_msi_irq ia64_mv.setup_msi_irq
# define platform_teardown_msi_irq ia64_mv.teardown_msi_irq
# define platform_pci_fixup_bus ia64_mv.pci_fixup_bus
# endif
/* __attribute__((__aligned__(16))) is required to make size of the
......@@ -210,6 +217,7 @@ struct ia64_machine_vector {
ia64_mv_migrate_t *migrate;
ia64_mv_setup_msi_irq_t *setup_msi_irq;
ia64_mv_teardown_msi_irq_t *teardown_msi_irq;
ia64_mv_pci_fixup_bus_t *pci_fixup_bus;
} __attribute__((__aligned__(16))); /* align attrib? see above comment */
#define MACHVEC_INIT(name) \
......@@ -257,6 +265,7 @@ struct ia64_machine_vector {
platform_migrate, \
platform_setup_msi_irq, \
platform_teardown_msi_irq, \
platform_pci_fixup_bus, \
}
extern struct ia64_machine_vector ia64_mv;
......@@ -416,5 +425,8 @@ extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size
#ifndef platform_teardown_msi_irq
# define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
#endif
#ifndef platform_pci_fixup_bus
# define platform_pci_fixup_bus machvec_noop_bus
#endif
#endif /* _ASM_IA64_MACHVEC_H */
......@@ -69,6 +69,7 @@ extern ia64_mv_dma_supported sn_dma_supported;
extern ia64_mv_migrate_t sn_migrate;
extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
/*
......@@ -127,6 +128,7 @@ extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
#endif
#define platform_pci_fixup_bus sn_pci_fixup_bus
#include <asm/sn/io.h>
......
......@@ -26,16 +26,18 @@ void pcibios_config_init(void);
struct pci_dev;
/*
* PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence
* between device bus addresses and CPU physical addresses. Platforms with a hardware I/O
* MMU _must_ turn this off to suppress the bounce buffer handling code in the block and
* network device layers. Platforms with separate bus address spaces _must_ turn this off
* and provide a device DMA mapping implementation that takes care of the necessary
* PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct
* correspondence between device bus addresses and CPU physical addresses.
* Platforms with a hardware I/O MMU _must_ turn this off to suppress the
* bounce buffer handling code in the block and network device layers.
* Platforms with separate bus address spaces _must_ turn this off and provide
* a device DMA mapping implementation that takes care of the necessary
* address translation.
*
* For now, the ia64 platforms which may have separate/multiple bus address spaces all
* have I/O MMUs which support the merging of physically discontiguous buffers, so we can
* use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
* For now, the ia64 platforms which may have separate/multiple bus address
* spaces all have I/O MMUs which support the merging of physically
* discontiguous buffers, so we can use that as the sole factor to determine
* the setting of PCI_DMA_BUS_IS_PHYS.
*/
extern unsigned long ia64_max_iommu_merge_mask;
#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
......@@ -52,9 +54,6 @@ pcibios_penalize_isa_irq (int irq, int active)
/* We don't do dynamic PCI IRQ allocation */
}
#define HAVE_ARCH_PCI_MWI 1
extern int pcibios_prep_mwi (struct pci_dev *);
#include <asm-generic/pci-dma-compat.h>
/* pci_unmap_{single,page} is not a nop, thus... */
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_ACPI_H
#define _ASM_IA64_SN_ACPI_H
#include "acpi/acglobal.h"
#define SN_ACPI_BASE_SUPPORT() (acpi_gbl_DSDT->oem_revision >= 0x20101)
#endif /* _ASM_IA64_SN_ACPI_H */
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
#define _ASM_IA64_SN_PCI_PCIDEV_H
......@@ -12,31 +12,29 @@
/*
* In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to
* the pcidev_info structs for all devices under a controller, we extend the
* definition of pci_controller, via sn_pci_controller, to include a list
* of pcidev_info.
* the pcidev_info structs for all devices under a controller, we keep a
* list of pcidev_info under pci_controller->platform_data.
*/
struct sn_pci_controller {
struct pci_controller pci_controller;
struct sn_platform_data {
void *provider_soft;
struct list_head pcidev_info;
};
#define SN_PCI_CONTROLLER(dev) ((struct sn_pci_controller *) dev->sysdata)
#define SN_PLATFORM_DATA(busdev) \
((struct sn_platform_data *)(PCI_CONTROLLER(busdev)->platform_data))
#define SN_PCIDEV_INFO(dev) sn_pcidev_info_get(dev)
#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
(struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
/*
* Given a pci_bus, return the sn pcibus_bussoft struct. Note that
* this only works for root busses, not for busses represented by PPB's.
*/
#define SN_PCIBUS_BUSSOFT(pci_bus) \
((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
((struct pcibus_bussoft *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \
(struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
((struct pcibus_info *)(SN_PLATFORM_DATA(pci_bus)->provider_soft))
/*
* Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
* that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
......@@ -72,8 +70,6 @@ extern void sn_irq_fixup(struct pci_dev *pci_dev,
struct sn_irq_info *sn_irq_info);
extern void sn_irq_unfixup(struct pci_dev *pci_dev);
extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *);
extern void sn_pci_controller_fixup(int segment, int busnum,
struct pci_bus *bus);
extern void sn_bus_store_sysdata(struct pci_dev *dev);
extern void sn_bus_free_sysdata(void);
extern void sn_generate_path(struct pci_bus *pci_bus, char *address);
......
......@@ -44,8 +44,14 @@ extern int sn_prom_feature_available(int id);
* Once enabled, a feature cannot be disabled.
*
* By default, features are disabled unless explicitly enabled.
*
* These defines must be kept in sync with the corresponding
* PROM definitions in feature_sets.h.
*/
#define OSF_MCA_SLV_TO_OS_INIT_SLV 0
#define OSF_FEAT_LOG_SBES 1
#define OSF_ACPI_ENABLE 2
#define OSF_PCISEGMENT_ENABLE 3
#endif /* _ASM_IA64_SN_FEATURE_SETS_H */
......@@ -77,6 +77,7 @@
#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 // deprecated
#define SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST 0x0200005a
#define SN_SAL_IOIF_INIT 0x0200005f
#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
#define SN_SAL_BTE_RECOVER 0x02000061
#define SN_SAL_RESERVED_DO_NOT_USE 0x02000062
......
......@@ -62,19 +62,13 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#ifdef CONFIG_PPC64
#define HAVE_ARCH_PCI_MWI 1
static inline int pcibios_prep_mwi(struct pci_dev *dev)
{
/*
* We would like to avoid touching the cacheline size or MWI bit
* but we cant do that with the current pcibios_prep_mwi
* interface. pSeries firmware sets the cacheline size (which is not
* the cpu cacheline size in all cases) and hardware treats MWI
* the same as memory write. So we dont touch the cacheline size
* here and allow the generic code to set the MWI bit.
*/
return 0;
}
/*
* We want to avoid touching the cacheline size or MWI bit.
* pSeries firmware sets the cacheline size (which is not the cpu cacheline
* size in all cases) and hardware treats MWI the same as memory write.
*/
#define PCI_DISABLE_MWI
extern struct dma_mapping_ops pci_dma_ops;
......
......@@ -18,6 +18,8 @@
#define PCI_IRQ_NONE 0xffffffff
#define PCI_CACHE_LINE_BYTES 64
static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
......@@ -291,10 +293,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine);
/* Platform specific MWI support. */
#define HAVE_ARCH_PCI_MWI
extern int pcibios_prep_mwi(struct pci_dev *dev);
extern void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res);
......
......@@ -89,6 +89,7 @@ struct resource_list {
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
......
......@@ -51,6 +51,7 @@
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <asm/atomic.h>
#include <linux/device.h>
/* File state for mmap()s on /proc/bus/pci/X/Y */
......@@ -159,7 +160,6 @@ struct pci_dev {
unsigned int transparent:1; /* Transparent PCI bridge */
unsigned int multifunction:1;/* Part of multi-function device */
/* keep track of device state */
unsigned int is_enabled:1; /* pci_enable_device has been called */
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
unsigned int no_d1d2:1; /* only allow d0 or d3 */
......@@ -167,6 +167,7 @@ struct pci_dev {
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
atomic_t enable_cnt; /* pci_enable_device has been called */
u32 saved_config_space[16]; /* config space saved at suspend time */
struct hlist_head saved_cap_space;
......
......@@ -2211,6 +2211,13 @@
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2911
#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913
#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2915
#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
......
......@@ -292,6 +292,12 @@
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
/* MSI-X registers (these are at offset PCI_MSI_FLAGS) */
#define PCI_MSIX_FLAGS_QSIZE 0x7FF
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
#define PCI_MSIX_FLAGS_BITMASK (1 << 0)
/* CompactPCI Hotswap Register */
#define PCI_CHSWP_CSR 2 /* Control and Status Register */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册