提交 861fe906 编写于 作者: D David S. Miller

[SPARC64]: SUN4U PCI-E controller support.

Some minor refactoring in the generic code was necessary for
this:

1) This controller requires 8-byte access to the interrupt map
   and clear register.  They are 64-bits on all the other
   SBUS and PCI controllers anyways, so this was easy to cure.

2) The IMAP register has a different layout and some bits that we
   need to preserve, so use a read/modify/write when making
   changes to the IMAP register in generic code.

3) Flushing the entire IOMMU TLB is best done with a single write
   to a register on this PCI controller, add a iommu->iommu_flushinv
   for this.

Still lacks MSI support, that will come later.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 4cad6917
......@@ -17,7 +17,7 @@ obj-y := process.o setup.o cpu.o idprom.o \
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
pci_psycho.o pci_sabre.o pci_schizo.o \
pci_sun4v.o pci_sun4v_asm.o
pci_sun4v.o pci_sun4v_asm.o pci_fire.o
obj-$(CONFIG_SMP) += smp.o trampoline.o
obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
......
......@@ -279,7 +279,7 @@ static void sun4u_irq_enable(unsigned int virt_irq)
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
if (likely(data)) {
unsigned long cpuid, imap;
unsigned long cpuid, imap, val;
unsigned int tid;
cpuid = irq_choose_cpu(virt_irq);
......@@ -287,7 +287,11 @@ static void sun4u_irq_enable(unsigned int virt_irq)
tid = sun4u_compute_tid(imap, cpuid);
upa_writel(tid | IMAP_VALID, imap);
val = upa_readq(imap);
val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
}
}
......@@ -297,10 +301,10 @@ static void sun4u_irq_disable(unsigned int virt_irq)
if (likely(data)) {
unsigned long imap = data->imap;
u32 tmp = upa_readl(imap);
u32 tmp = upa_readq(imap);
tmp &= ~IMAP_VALID;
upa_writel(tmp, imap);
upa_writeq(tmp, imap);
}
}
......@@ -309,7 +313,7 @@ static void sun4u_irq_end(unsigned int virt_irq)
struct irq_handler_data *data = get_irq_chip_data(virt_irq);
if (likely(data))
upa_writel(ICLR_IDLE, data->iclr);
upa_writeq(ICLR_IDLE, data->iclr);
}
static void sun4v_irq_enable(unsigned int virt_irq)
......@@ -465,7 +469,7 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
BUG_ON(tlb_type == hypervisor);
ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
bucket = &ivector_table[ino];
if (!bucket->virt_irq) {
bucket->virt_irq = virt_irq_alloc(__irq(bucket));
......
......@@ -190,6 +190,7 @@ extern void schizo_init(struct device_node *, const char *);
extern void schizo_plus_init(struct device_node *, const char *);
extern void tomatillo_init(struct device_node *, const char *);
extern void sun4v_pci_init(struct device_node *, const char *);
extern void fire_pci_init(struct device_node *, const char *);
static struct {
char *model_name;
......@@ -207,6 +208,7 @@ static struct {
{ "SUNW,tomatillo", tomatillo_init },
{ "pci108e,a801", tomatillo_init },
{ "SUNW,sun4v-pci", sun4v_pci_init },
{ "pciex108e,80f0", fire_pci_init },
};
#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
sizeof(pci_controller_table[0]))
......@@ -436,6 +438,13 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
printk(" class: 0x%x device name: %s\n",
dev->class, pci_name(dev));
/* I have seen IDE devices which will not respond to
* the bmdma simplex check reads if bus mastering is
* disabled.
*/
if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
pci_set_master(dev);
dev->current_state = 4; /* unknown power state */
dev->error_state = pci_channel_io_normal;
......
/* pci_fire.c: Sun4u platform PCI-E controller support.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <asm/pbm.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include "pci_impl.h"
#define fire_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
#define fire_write(__reg, __val) \
__asm__ __volatile__("stxa %0, [%1] %2" \
: /* no outputs */ \
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E) \
: "memory")
/* Fire config space address format is nearly identical to
* that of SCHIZO and PSYCHO, except that in order to accomodate
* PCI-E extended config space the encoding can handle 12 bits
* of register address:
*
* 32 28 27 20 19 15 14 12 11 2 1 0
* -------------------------------------------------
* |0 0 0 0 0| bus | device | function | reg | 0 0 |
* -------------------------------------------------
*/
#define FIRE_CONFIG_BASE(PBM) ((PBM)->config_space)
#define FIRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
(((unsigned long)(BUS) << 20) | \
((unsigned long)(DEVFN) << 12) | \
((unsigned long)(REG)))
static void *fire_pci_config_mkaddr(struct pci_pbm_info *pbm,
unsigned char bus,
unsigned int devfn,
int where)
{
if (!pbm)
return NULL;
return (void *)
(FIRE_CONFIG_BASE(pbm) |
FIRE_CONFIG_ENCODE(bus, devfn, where));
}
/* FIRE PCI configuration space accessors. */
static int fire_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
int where, int size, u32 *value)
{
struct pci_pbm_info *pbm = bus_dev->sysdata;
unsigned char bus = bus_dev->number;
u32 *addr;
u16 tmp16;
u8 tmp8;
if (bus_dev == pbm->pci_bus && devfn == 0x00)
return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
size, value);
switch (size) {
case 1:
*value = 0xff;
break;
case 2:
*value = 0xffff;
break;
case 4:
*value = 0xffffffff;
break;
}
addr = fire_pci_config_mkaddr(pbm, bus, devfn, where);
if (!addr)
return PCIBIOS_SUCCESSFUL;
switch (size) {
case 1:
pci_config_read8((u8 *)addr, &tmp8);
*value = tmp8;
break;
case 2:
if (where & 0x01) {
printk("pci_read_config_word: misaligned reg [%x]\n",
where);
return PCIBIOS_SUCCESSFUL;
}
pci_config_read16((u16 *)addr, &tmp16);
*value = tmp16;
break;
case 4:
if (where & 0x03) {
printk("pci_read_config_dword: misaligned reg [%x]\n",
where);
return PCIBIOS_SUCCESSFUL;
}
pci_config_read32(addr, value);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int fire_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
int where, int size, u32 value)
{
struct pci_pbm_info *pbm = bus_dev->sysdata;
unsigned char bus = bus_dev->number;
u32 *addr;
if (bus_dev == pbm->pci_bus && devfn == 0x00)
return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
size, value);
addr = fire_pci_config_mkaddr(pbm, bus, devfn, where);
if (!addr)
return PCIBIOS_SUCCESSFUL;
switch (size) {
case 1:
pci_config_write8((u8 *)addr, value);
break;
case 2:
if (where & 0x01) {
printk("pci_write_config_word: misaligned reg [%x]\n",
where);
return PCIBIOS_SUCCESSFUL;
}
pci_config_write16((u16 *)addr, value);
break;
case 4:
if (where & 0x03) {
printk("pci_write_config_dword: misaligned reg [%x]\n",
where);
return PCIBIOS_SUCCESSFUL;
}
pci_config_write32(addr, value);
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops pci_fire_ops = {
.read = fire_read_pci_cfg,
.write = fire_write_pci_cfg,
};
static void pbm_scan_bus(struct pci_controller_info *p,
struct pci_pbm_info *pbm)
{
pbm->pci_bus = pci_scan_one_pbm(pbm);
}
static void pci_fire_scan_bus(struct pci_controller_info *p)
{
struct device_node *dp;
if ((dp = p->pbm_A.prom_node) != NULL)
pbm_scan_bus(p, &p->pbm_A);
if ((dp = p->pbm_B.prom_node) != NULL)
pbm_scan_bus(p, &p->pbm_B);
/* XXX register error interrupt handlers XXX */
}
#define FIRE_IOMMU_CONTROL 0x40000UL
#define FIRE_IOMMU_TSBBASE 0x40008UL
#define FIRE_IOMMU_FLUSH 0x40100UL
#define FIRE_IOMMU_FLUSHINV 0x40100UL
static void pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
{
struct iommu *iommu = pbm->iommu;
u32 vdma[2], dma_mask;
u64 control;
int tsbsize;
/* No virtual-dma property on these guys, use largest size. */
vdma[0] = 0xc0000000; /* base */
vdma[1] = 0x40000000; /* size */
dma_mask = 0xffffffff;
tsbsize = 128;
/* Register addresses. */
iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
/* We use the main control/status register of FIRE as the write
* completion register.
*/
iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
/*
* Invalidate TLB Entries.
*/
fire_write(iommu->iommu_flushinv, ~(u64)0);
pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
fire_write(iommu->iommu_tsbbase, __pa(iommu->page_table) | 0x7UL);
control = fire_read(iommu->iommu_control);
control |= (0x00000400 /* TSB cache snoop enable */ |
0x00000300 /* Cache mode */ |
0x00000002 /* Bypass enable */ |
0x00000001 /* Translation enable */);
fire_write(iommu->iommu_control, control);
}
/* Based at pbm->controller_regs */
#define FIRE_PARITY_CONTROL 0x470010UL
#define FIRE_PARITY_ENAB 0x8000000000000000UL
#define FIRE_FATAL_RESET_CTL 0x471028UL
#define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
#define FIRE_FATAL_RESET_MB 0x0000000002000000UL
#define FIRE_FATAL_RESET_CPE 0x0000000000008000UL
#define FIRE_FATAL_RESET_APE 0x0000000000004000UL
#define FIRE_FATAL_RESET_PIO 0x0000000000000040UL
#define FIRE_FATAL_RESET_JW 0x0000000000000004UL
#define FIRE_FATAL_RESET_JI 0x0000000000000002UL
#define FIRE_FATAL_RESET_JR 0x0000000000000001UL
#define FIRE_CORE_INTR_ENABLE 0x471800UL
/* Based at pbm->pbm_regs */
#define FIRE_TLU_CTRL 0x80000UL
#define FIRE_TLU_CTRL_TIM 0x00000000da000000UL
#define FIRE_TLU_CTRL_QDET 0x0000000000000100UL
#define FIRE_TLU_CTRL_CFG 0x0000000000000001UL
#define FIRE_TLU_DEV_CTRL 0x90008UL
#define FIRE_TLU_LINK_CTRL 0x90020UL
#define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL
#define FIRE_LPU_RESET 0xe2008UL
#define FIRE_LPU_LLCFG 0xe2200UL
#define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
#define FIRE_LPU_FCTRL_UCTRL 0xe2240UL
#define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
#define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
#define FIRE_LPU_TXL_FIFOP 0xe2430UL
#define FIRE_LPU_LTSSM_CFG2 0xe2788UL
#define FIRE_LPU_LTSSM_CFG3 0xe2790UL
#define FIRE_LPU_LTSSM_CFG4 0xe2798UL
#define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
#define FIRE_DMC_IENAB 0x31800UL
#define FIRE_DMC_DBG_SEL_A 0x53000UL
#define FIRE_DMC_DBG_SEL_B 0x53008UL
#define FIRE_PEC_IENAB 0x51800UL
static void pci_fire_hw_init(struct pci_pbm_info *pbm)
{
u64 val;
fire_write(pbm->controller_regs + FIRE_PARITY_CONTROL,
FIRE_PARITY_ENAB);
fire_write(pbm->controller_regs + FIRE_FATAL_RESET_CTL,
(FIRE_FATAL_RESET_SPARE |
FIRE_FATAL_RESET_MB |
FIRE_FATAL_RESET_CPE |
FIRE_FATAL_RESET_APE |
FIRE_FATAL_RESET_PIO |
FIRE_FATAL_RESET_JW |
FIRE_FATAL_RESET_JI |
FIRE_FATAL_RESET_JR));
fire_write(pbm->controller_regs + FIRE_CORE_INTR_ENABLE, ~(u64)0);
val = fire_read(pbm->pbm_regs + FIRE_TLU_CTRL);
val |= (FIRE_TLU_CTRL_TIM |
FIRE_TLU_CTRL_QDET |
FIRE_TLU_CTRL_CFG);
fire_write(pbm->pbm_regs + FIRE_TLU_CTRL, val);
fire_write(pbm->pbm_regs + FIRE_TLU_DEV_CTRL, 0);
fire_write(pbm->pbm_regs + FIRE_TLU_LINK_CTRL,
FIRE_TLU_LINK_CTRL_CLK);
fire_write(pbm->pbm_regs + FIRE_LPU_RESET, 0);
fire_write(pbm->pbm_regs + FIRE_LPU_LLCFG,
FIRE_LPU_LLCFG_VC0);
fire_write(pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL,
(FIRE_LPU_FCTRL_UCTRL_N |
FIRE_LPU_FCTRL_UCTRL_P));
fire_write(pbm->pbm_regs + FIRE_LPU_TXL_FIFOP,
((0xffff << 16) | (0x0000 << 0)));
fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2, 3000000);
fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3, 500000);
fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4,
(2 << 16) | (140 << 8));
fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5, 0);
fire_write(pbm->pbm_regs + FIRE_DMC_IENAB, ~(u64)0);
fire_write(pbm->pbm_regs + FIRE_DMC_DBG_SEL_A, 0);
fire_write(pbm->pbm_regs + FIRE_DMC_DBG_SEL_B, 0);
fire_write(pbm->pbm_regs + FIRE_PEC_IENAB, ~(u64)0);
}
static void pci_fire_pbm_init(struct pci_controller_info *p,
struct device_node *dp, u32 portid)
{
const struct linux_prom64_registers *regs;
struct pci_pbm_info *pbm;
const u32 *ino_bitmap;
const unsigned int *busrange;
if ((portid & 1) == 0)
pbm = &p->pbm_A;
else
pbm = &p->pbm_B;
pbm->portid = portid;
pbm->parent = p;
pbm->prom_node = dp;
pbm->name = dp->full_name;
regs = of_get_property(dp, "reg", NULL);
pbm->pbm_regs = regs[0].phys_addr;
pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
pci_determine_mem_io_space(pbm);
ino_bitmap = of_get_property(dp, "ino-bitmap", NULL);
pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) |
((u64)ino_bitmap[0] << 0UL));
busrange = of_get_property(dp, "bus-range", NULL);
pbm->pci_first_busno = busrange[0];
pbm->pci_last_busno = busrange[1];
pci_fire_hw_init(pbm);
pci_fire_pbm_iommu_init(pbm);
}
static inline int portid_compare(u32 x, u32 y)
{
if (x == (y ^ 1))
return 1;
return 0;
}
void fire_pci_init(struct device_node *dp, const char *model_name)
{
struct pci_controller_info *p;
u32 portid = of_getintprop_default(dp, "portid", 0xff);
struct iommu *iommu;
for (p = pci_controller_root; p; p = p->next) {
struct pci_pbm_info *pbm;
if (p->pbm_A.prom_node && p->pbm_B.prom_node)
continue;
pbm = (p->pbm_A.prom_node ?
&p->pbm_A :
&p->pbm_B);
if (portid_compare(pbm->portid, portid)) {
pci_fire_pbm_init(p, dp, portid);
return;
}
}
p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
if (!p)
goto fatal_memory_error;
iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
if (!iommu)
goto fatal_memory_error;
p->pbm_A.iommu = iommu;
iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC);
if (!iommu)
goto fatal_memory_error;
p->pbm_B.iommu = iommu;
p->next = pci_controller_root;
pci_controller_root = p;
p->index = pci_num_controllers++;
p->scan_bus = pci_fire_scan_bus;
/* XXX MSI support XXX */
p->pci_ops = &pci_fire_ops;
/* Like PSYCHO and SCHIZO we have a 2GB aligned area
* for memory space.
*/
pci_memspace_mask = 0x7fffffffUL;
pci_fire_pbm_init(p, dp, portid);
return;
fatal_memory_error:
prom_printf("PCI_FIRE: Fatal memory allocation error.\n");
prom_halt();
}
......@@ -37,17 +37,21 @@
/* Must be invoked under the IOMMU lock. */
static void __iommu_flushall(struct iommu *iommu)
{
unsigned long tag;
int entry;
if (iommu->iommu_flushinv) {
pci_iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
unsigned long tag;
int entry;
tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
for (entry = 0; entry < 16; entry++) {
pci_iommu_write(tag, 0);
tag += 8;
}
tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
for (entry = 0; entry < 16; entry++) {
pci_iommu_write(tag, 0);
tag += 8;
}
/* Ensure completion of previous PIO writes. */
(void) pci_iommu_read(iommu->write_complete_reg);
/* Ensure completion of previous PIO writes. */
(void) pci_iommu_read(iommu->write_complete_reg);
}
}
#define IOPTE_CONSISTENT(CTX) \
......
......@@ -386,11 +386,9 @@ static unsigned int psycho_irq_build(struct device_node *dp,
/* Now build the IRQ bucket. */
imap = controller_regs + imap_off;
imap += 4;
iclr_off = psycho_iclr_offset(ino);
iclr = controller_regs + iclr_off;
iclr += 4;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
......@@ -613,11 +611,9 @@ static unsigned int sabre_irq_build(struct device_node *dp,
/* Now build the IRQ bucket. */
imap = controller_regs + imap_off;
imap += 4;
iclr_off = sabre_iclr_offset(ino);
iclr = controller_regs + iclr_off;
iclr += 4;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
......@@ -679,13 +675,14 @@ static unsigned long schizo_iclr_offset(unsigned long ino)
static unsigned long schizo_ino_to_iclr(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + schizo_iclr_offset(ino) + 4;
return pbm_regs + schizo_iclr_offset(ino);
}
static unsigned long schizo_ino_to_imap(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + schizo_imap_offset(ino) + 4;
return pbm_regs + schizo_imap_offset(ino);
}
#define schizo_read(__reg) \
......@@ -848,6 +845,85 @@ static void pci_sun4v_irq_trans_init(struct device_node *dp)
dp->irq_trans->data = (void *) (unsigned long)
((regs->phys_addr >> 32UL) & 0x0fffffff);
}
struct fire_irq_data {
unsigned long pbm_regs;
u32 portid;
};
#define FIRE_IMAP_BASE 0x001000
#define FIRE_ICLR_BASE 0x001400
static unsigned long fire_imap_offset(unsigned long ino)
{
return FIRE_IMAP_BASE + (ino * 8UL);
}
static unsigned long fire_iclr_offset(unsigned long ino)
{
return FIRE_ICLR_BASE + (ino * 8UL);
}
static unsigned long fire_ino_to_iclr(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + fire_iclr_offset(ino);
}
static unsigned long fire_ino_to_imap(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + fire_imap_offset(ino);
}
static unsigned int fire_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct fire_irq_data *irq_data = _data;
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
unsigned long int_ctrlr;
ino &= 0x3f;
/* Now build the IRQ bucket. */
imap = fire_ino_to_imap(pbm_regs, ino);
iclr = fire_ino_to_iclr(pbm_regs, ino);
/* Set the interrupt controller number. */
int_ctrlr = 1 << 6;
upa_writeq(int_ctrlr, imap);
/* The interrupt map registers do not have an INO field
* like other chips do. They return zero in the INO
* field, and the interrupt controller number is controlled
* in bits 6 thru 9. So in order for build_irq() to get
* the INO right we pass it in as part of the fixup
* which will get added to the map register zero value
* read by build_irq().
*/
ino |= (irq_data->portid << 6);
ino -= int_ctrlr;
return build_irq(ino, iclr, imap);
}
static void fire_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
struct fire_irq_data *irq_data;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = fire_irq_build;
irq_data = prom_early_alloc(sizeof(struct fire_irq_data));
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = irq_data;
irq_data->pbm_regs = regs[0].phys_addr;
irq_data->portid = of_getintprop_default(dp, "portid", 0);
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_SBUS
......@@ -1069,6 +1145,7 @@ static struct irq_trans pci_irq_trans_table[] = {
{ "SUNW,tomatillo", tomatillo_irq_trans_init },
{ "pci108e,a801", tomatillo_irq_trans_init },
{ "SUNW,sun4v-pci", pci_sun4v_irq_trans_init },
{ "pciex108e,80f0", fire_irq_trans_init },
};
#endif
......
......@@ -32,6 +32,7 @@ struct iommu {
unsigned long iommu_control;
unsigned long iommu_tsbbase;
unsigned long iommu_flush;
unsigned long iommu_flushinv;
unsigned long iommu_ctxflush;
unsigned long write_complete_reg;
unsigned long dummy_page;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册