提交 e18e2a00 编写于 作者: D David S. Miller

[SPARC64]: Move over to GENERIC_HARDIRQS.

This is the long overdue conversion of sparc64 over to
the generic IRQ layer.

The kernel image is slightly larger, but the BSS is ~60K
smaller due to the reduced size of struct ino_bucket.

A lot of IRQ implementation details, including ino_bucket,
were moved out of asm-sparc64/irq.h and are now private to
arch/sparc64/kernel/irq.c, and most of the code in irq.c
totally disappeared.

One thing that's different at the moment is IRQ distribution,
we do it at enable_irq() time.  If the cpu mask is ALL then
we round-robin using a global rotating cpu counter, else
we pick the first cpu in the mask to support single cpu
targetting.  This is similar to what powerpc's XICS IRQ
support code does.

This works fine on my UP SB1000, and the SMP build goes
fine and runs on that machine, but lots of testing on
different setups is needed.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 8047e247
......@@ -87,6 +87,10 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
config GENERIC_HARDIRQS
bool
default y
menu "General machine setup"
config SMP
......
......@@ -157,7 +157,7 @@ unsigned int sun4v_vdev_device_interrupt(unsigned int dev_node)
return 0;
}
return sun4v_build_irq(sun4v_vdev_devhandle, irq, 0);
return sun4v_build_irq(sun4v_vdev_devhandle, irq);
}
static const char *cpu_mid_prop(void)
......
......@@ -432,7 +432,7 @@ do_ivec:
membar #Sync
sethi %hi(ivector_table), %g2
sllx %g3, 5, %g3
sllx %g3, 3, %g3
or %g2, %lo(ivector_table), %g2
add %g2, %g3, %g3
......
此差异已折叠。
......@@ -47,12 +47,6 @@ struct pci_controller_info *pci_controller_root = NULL;
/* Each PCI controller found gets a unique index. */
int pci_num_controllers = 0;
/* At boot time the user can give the kernel a command
* line option which controls if and how PCI devices
* are reordered at PCI bus probing time.
*/
int pci_device_reorder = 0;
volatile int pci_poke_in_progress;
volatile int pci_poke_cpu = -1;
volatile int pci_poke_faulted;
......@@ -316,27 +310,6 @@ static void __init pci_scan_each_controller_bus(void)
p->scan_bus(p);
}
/* Reorder the pci_dev chain, so that onboard devices come first
* and then come the pluggable cards.
*/
static void __init pci_reorder_devs(void)
{
struct list_head *pci_onboard = &pci_devices;
struct list_head *walk = pci_onboard->next;
while (walk != pci_onboard) {
struct pci_dev *pdev = pci_dev_g(walk);
struct list_head *walk_next = walk->next;
if (pdev->irq && (__irq_ino(pdev->irq) & 0x20)) {
list_del(walk);
list_add(walk, pci_onboard);
}
walk = walk_next;
}
}
extern void clock_probe(void);
extern void power_init(void);
......@@ -348,9 +321,6 @@ static int __init pcibios_init(void)
pci_scan_each_controller_bus();
if (pci_device_reorder)
pci_reorder_devs();
isa_init();
ebus_init();
clock_probe();
......@@ -441,14 +411,6 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "onboardfirst")) {
pci_device_reorder = 1;
return NULL;
}
if (!strcmp(str, "noreorder")) {
pci_device_reorder = 0;
return NULL;
}
return str;
}
......
......@@ -308,7 +308,7 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
return build_irq(inofixup, iclr, imap, IBF_PCI);
return build_irq(inofixup, iclr, imap);
}
/* PSYCHO error handling support. */
......
......@@ -530,7 +530,7 @@ static unsigned long __onboard_imap_off[] = {
* side of the non-APB bridge, then perform a read of Sabre's DMA
* write-sync register.
*/
static void sabre_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
{
struct pci_dev *pdev = _arg1;
unsigned long sync_reg = (unsigned long) _arg2;
......@@ -573,7 +573,7 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
virt_irq = build_irq(inofixup, iclr, imap, IBF_PCI);
virt_irq = build_irq(inofixup, iclr, imap);
if (pdev) {
struct pcidev_cookie *pcp = pdev->sysdata;
......
......@@ -232,10 +232,10 @@ static unsigned long schizo_iclr_offset(unsigned long ino)
return SCHIZO_ICLR_BASE + (ino * 8UL);
}
static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
{
unsigned long sync_reg = (unsigned long) _arg2;
u64 mask = 1UL << (__irq_ino(__irq(bucket)) & IMAP_INO);
u64 mask = 1UL << (ino & IMAP_INO);
u64 val;
int limit;
......@@ -313,7 +313,7 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
ign_fixup = (1 << 6);
}
virt_irq = build_irq(ign_fixup, iclr, imap, IBF_PCI);
virt_irq = build_irq(ign_fixup, iclr, imap);
if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
irq_install_pre_handler(virt_irq,
......
......@@ -844,7 +844,7 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
{
u32 devhandle = pbm->devhandle;
return sun4v_build_irq(devhandle, devino, IBF_PCI);
return sun4v_build_irq(devhandle, devino);
}
static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
......
......@@ -821,7 +821,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
}
return build_irq(sbus_level, iclr, imap, 0);
return build_irq(sbus_level, iclr, imap);
}
/* Error interrupt handling. */
......
......@@ -175,10 +175,6 @@ EXPORT_SYMBOL(set_bit);
EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(ivector_table);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(__flushw_user);
EXPORT_SYMBOL(tlb_type);
......
......@@ -103,7 +103,7 @@ sun4v_dev_mondo:
/* Get &ivector_table[IVEC] into %g4. */
sethi %hi(ivector_table), %g4
sllx %g3, 5, %g3
sllx %g3, 3, %g3
or %g4, %lo(ivector_table), %g4
add %g4, %g3, %g4
......
......@@ -12,6 +12,8 @@
#define local_softirq_pending() \
(local_cpu_data().__softirq_pending)
void ack_bad_irq(unsigned int irq);
#define HARDIRQ_BITS 8
#endif /* !(__SPARC64_HARDIRQ_H) */
#ifndef __ASM_SPARC64_HW_IRQ_H
#define __ASM_SPARC64_HW_IRQ_H
/* Dummy include. */
extern void hw_resend_irq(struct hw_interrupt_type *handler, unsigned int virt_irq);
#endif
......@@ -16,58 +16,6 @@
#include <asm/pil.h>
#include <asm/ptrace.h>
struct ino_bucket;
#define MAX_IRQ_DESC_ACTION 4
struct irq_desc {
void (*pre_handler)(struct ino_bucket *, void *, void *);
void *pre_handler_arg1;
void *pre_handler_arg2;
u32 action_active_mask;
struct irqaction action[MAX_IRQ_DESC_ACTION];
};
/* You should not mess with this directly. That's the job of irq.c.
*
* If you make changes here, please update hand coded assembler of
* the vectored interrupt trap handler in entry.S -DaveM
*
* This is currently one DCACHE line, two buckets per L2 cache
* line. Keep this in mind please.
*/
struct ino_bucket {
/* Next handler in per-CPU IRQ worklist. We know that
* bucket pointers have the high 32-bits clear, so to
* save space we only store the bits we need.
*/
/*0x00*/unsigned int irq_chain;
/* Virtual interrupt number assigned to this INO. */
/*0x04*/unsigned char virt_irq;
/* If an IVEC arrives while irq_info is NULL, we
* set this to notify request_irq() about the event.
*/
/*0x05*/unsigned char pending;
/* Miscellaneous flags. */
/*0x06*/unsigned char flags;
/* Currently unused. */
/*0x07*/unsigned char __pad;
/* Reference to IRQ descriptor for this bucket. */
/*0x08*/struct irq_desc *irq_info;
/* Sun5 Interrupt Clear Register. */
/*0x10*/unsigned long iclr;
/* Sun5 Interrupt Mapping Register. */
/*0x18*/unsigned long imap;
};
/* IMAP/ICLR register defines */
#define IMAP_VALID 0x80000000 /* IRQ Enabled */
#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */
......@@ -85,19 +33,6 @@ struct ino_bucket {
#define ICLR_TRANSMIT 0x00000001 /* Transmit state */
#define ICLR_PENDING 0x00000003 /* Pending state */
/* Only 8-bits are available, be careful. -DaveM */
#define IBF_PCI 0x02 /* PSYCHO/SABRE/SCHIZO PCI interrupt. */
#define IBF_ACTIVE 0x04 /* Interrupt is active and has a handler.*/
#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
#define NUM_IVECS (IMAP_INR + 1)
extern struct ino_bucket ivector_table[NUM_IVECS];
#define __irq_ino(irq) \
(((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
#define __irq(bucket) ((unsigned int)(unsigned long)(bucket))
/* The largest number of unique interrupt sources we support.
* If this needs to ever be larger than 255, you need to change
* the type of ino_bucket->virt_irq as appropriate.
......@@ -107,14 +42,11 @@ extern struct ino_bucket ivector_table[NUM_IVECS];
#define NR_IRQS 255
extern void irq_install_pre_handler(int virt_irq,
void (*func)(struct ino_bucket *, void *, void *),
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2);
#define irq_canonicalize(irq) (irq)
extern void disable_irq(unsigned int);
#define disable_irq_nosync disable_irq
extern void enable_irq(unsigned int);
extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap, unsigned char flags);
extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags);
extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
static __inline__ void set_softint(unsigned long bits)
......@@ -140,8 +72,4 @@ static __inline__ unsigned long get_softint(void)
return retval;
}
struct irqaction;
struct pt_regs;
int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册