提交 e65e49d0 编写于 作者: M Mike Travis

irq: update all arches for new irq_desc

Impact: cleanup, update to new cpumask API

Irq_desc.affinity and irq_desc.pending_mask are now cpumask_var_t's
so access to them should be using the new cpumask API.
Signed-off-by: NMike Travis <travis@sgi.com>
上级 28e08861
...@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq) ...@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu; last_cpu = cpu;
irq_desc[irq].affinity = cpumask_of_cpu(cpu); cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
return 0; return 0;
} }
......
...@@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = { ...@@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
.lock = SPIN_LOCK_UNLOCKED .lock = SPIN_LOCK_UNLOCKED
}; };
#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating bad_irq_desc.affinity or .pending_mask */
#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
/* /*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their * come via this function. Instead, they should provide their
...@@ -161,7 +166,7 @@ void __init init_IRQ(void) ...@@ -161,7 +166,7 @@ void __init init_IRQ(void)
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
bad_irq_desc.affinity = CPU_MASK_ALL; cpumask_setall(bad_irq_desc.affinity);
bad_irq_desc.cpu = smp_processor_id(); bad_irq_desc.cpu = smp_processor_id();
#endif #endif
init_arch_irq(); init_arch_irq();
...@@ -191,15 +196,16 @@ void migrate_irqs(void) ...@@ -191,15 +196,16 @@ void migrate_irqs(void)
struct irq_desc *desc = irq_desc + i; struct irq_desc *desc = irq_desc + i;
if (desc->cpu == cpu) { if (desc->cpu == cpu) {
unsigned int newcpu = any_online_cpu(desc->affinity); unsigned int newcpu = cpumask_any_and(desc->affinity,
cpu_online_mask);
if (newcpu == NR_CPUS) { if (newcpu >= nr_cpu_ids) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
i, cpu); i, cpu);
cpus_setall(desc->affinity); cpumask_setall(desc->affinity);
newcpu = any_online_cpu(desc->affinity); newcpu = cpumask_any_and(desc->affinity,
cpu_online_mask);
} }
route_irq(desc, i, newcpu); route_irq(desc, i, newcpu);
......
...@@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu) ...@@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
const struct cpumask *mask = cpumask_of(cpu); const struct cpumask *mask = cpumask_of(cpu);
spin_lock_irq(&desc->lock); spin_lock_irq(&desc->lock);
desc->affinity = *mask; cpumask_copy(desc->affinity, mask);
desc->chip->set_affinity(irq, mask); desc->chip->set_affinity(irq, mask);
spin_unlock_irq(&desc->lock); spin_unlock_irq(&desc->lock);
} }
......
...@@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = { ...@@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = {
#endif #endif
}; };
#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating a variable-sized bad_irq_desc.affinity */
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
int show_interrupts(struct seq_file *p, void *v) int show_interrupts(struct seq_file *p, void *v)
{ {
int i = *(loff_t *) v, j; int i = *(loff_t *) v, j;
......
...@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi) ...@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
if (iosapic_intr_info[irq].count == 0) { if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Clear affinity */ /* Clear affinity */
cpus_setall(idesc->affinity); cpumask_setall(idesc->affinity);
#endif #endif
/* Clear the interrupt information */ /* Clear the interrupt information */
iosapic_intr_info[irq].dest = 0; iosapic_intr_info[irq].dest = 0;
......
...@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; ...@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
void set_irq_affinity_info (unsigned int irq, int hwid, int redir) void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{ {
if (irq < NR_IRQS) { if (irq < NR_IRQS) {
cpumask_copy(&irq_desc[irq].affinity, cpumask_copy(irq_desc[irq].affinity,
cpumask_of(cpu_logical_id(hwid))); cpumask_of(cpu_logical_id(hwid)));
irq_redir[irq] = (char) (redir & 0xff); irq_redir[irq] = (char) (redir & 0xff);
} }
...@@ -148,7 +148,7 @@ static void migrate_irqs(void) ...@@ -148,7 +148,7 @@ static void migrate_irqs(void)
if (desc->status == IRQ_PER_CPU) if (desc->status == IRQ_PER_CPU)
continue; continue;
if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
>= nr_cpu_ids) { >= nr_cpu_ids) {
/* /*
* Save it for phase 2 processing * Save it for phase 2 processing
......
...@@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, ...@@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
msg.data = data; msg.data = data;
write_msi_msg(irq, &msg); write_msi_msg(irq, &msg);
irq_desc[irq].affinity = cpumask_of_cpu(cpu); cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) ...@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg); dmar_msi_write(irq, &msg);
irq_desc[irq].affinity = *mask; cpumask_copy(irq_desc[irq].affinity, mask);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, ...@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
write_msi_msg(irq, &msg); write_msi_msg(irq, &msg);
irq_desc[irq].affinity = *cpu_mask; cpumask_copy(irq_desc[irq].affinity, cpu_mask);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq); ...@@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
*/ */
#define IRQ_AFFINITY_HOOK(irq) \ #define IRQ_AFFINITY_HOOK(irq) \
do { \ do { \
if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
smtc_forward_irq(irq); \ smtc_forward_irq(irq); \
irq_exit(); \ irq_exit(); \
return; \ return; \
......
...@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) ...@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
} }
irq_desc[irq].affinity = *cpumask; cpumask_copy(irq_desc[irq].affinity, cpumask);
spin_unlock_irqrestore(&gic_lock, flags); spin_unlock_irqrestore(&gic_lock, flags);
} }
......
...@@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq) ...@@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
* and efficiency, we just pick the easiest one to find. * and efficiency, we just pick the easiest one to find.
*/ */
target = first_cpu(irq_desc[irq].affinity); target = cpumask_first(irq_desc[irq].affinity);
/* /*
* We depend on the platform code to have correctly processed * We depend on the platform code to have correctly processed
......
...@@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = { ...@@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
{ {
cpumask_t tmask = *affinity; cpumask_t tmask;
int cpu = 0; int cpu = 0;
void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
...@@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) ...@@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
* be made to forward to an offline "CPU". * be made to forward to an offline "CPU".
*/ */
cpumask_copy(&tmask, affinity);
for_each_cpu(cpu, affinity) { for_each_cpu(cpu, affinity) {
if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
cpu_clear(cpu, tmask); cpu_clear(cpu, tmask);
} }
irq_desc[irq].affinity = tmask; cpumask_copy(irq_desc[irq].affinity, &tmask);
if (cpus_empty(tmask)) if (cpus_empty(tmask))
/* /*
......
...@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest) ...@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
if (CHECK_IRQ_PER_CPU(irq)) { if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already /* Bad linux design decision. The mask has already
* been set; we must reset it */ * been set; we must reset it */
irq_desc[irq].affinity = CPU_MASK_ALL; cpumask_setall(irq_desc[irq].affinity);
return -EINVAL; return -EINVAL;
} }
...@@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) ...@@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
if (cpu_check_affinity(irq, dest)) if (cpu_check_affinity(irq, dest))
return; return;
irq_desc[irq].affinity = *dest; cpumask_copy(irq_desc[irq].affinity, dest);
} }
#endif #endif
...@@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide) ...@@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu) unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
irq_desc[irq].affinity = cpumask_of_cpu(cpu); cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
#endif #endif
return per_cpu(cpu_data, cpu).txn_addr; return per_cpu(cpu_data, cpu).txn_addr;
...@@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) ...@@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq = eirr_to_irq(eirr_val); irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
dest = irq_desc[irq].affinity; cpumask_copy(&dest, irq_desc[irq].affinity);
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
!cpu_isset(smp_processor_id(), dest)) { !cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest); int cpu = first_cpu(dest);
......
...@@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map) ...@@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
if (irq_desc[irq].status & IRQ_PER_CPU) if (irq_desc[irq].status & IRQ_PER_CPU)
continue; continue;
cpus_and(mask, irq_desc[irq].affinity, map); cpumask_and(&mask, irq_desc[irq].affinity, &map);
if (any_online_cpu(mask) == NR_CPUS) { if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq); printk("Breaking affinity for irq %i\n", irq);
mask = map; mask = map;
......
...@@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check) ...@@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
{ {
int server; int server;
/* For the moment only implement delivery to all cpus or one cpu */ /* For the moment only implement delivery to all cpus or one cpu */
cpumask_t cpumask = irq_desc[virq].affinity; cpumask_t cpumask;
cpumask_t tmp = CPU_MASK_NONE; cpumask_t tmp = CPU_MASK_NONE;
cpumask_copy(&cpumask, irq_desc[virq].affinity);
if (!distribute_irqs) if (!distribute_irqs)
return default_server; return default_server;
...@@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void) ...@@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
virq, cpu); virq, cpu);
/* Reset affinity to all cpus */ /* Reset affinity to all cpus */
irq_desc[virq].affinity = CPU_MASK_ALL; cpumask_setall(irq_desc[virq].affinity);
desc->chip->set_affinity(virq, cpu_all_mask); desc->chip->set_affinity(virq, cpu_all_mask);
unlock: unlock:
spin_unlock_irqrestore(&desc->lock, flags); spin_unlock_irqrestore(&desc->lock, flags);
......
...@@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic) ...@@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq) static int irq_choose_cpu(unsigned int virt_irq)
{ {
cpumask_t mask = irq_desc[virt_irq].affinity; cpumask_t mask;
int cpuid; int cpuid;
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
if (cpus_equal(mask, CPU_MASK_ALL)) { if (cpus_equal(mask, CPU_MASK_ALL)) {
static int irq_rover; static int irq_rover;
static DEFINE_SPINLOCK(irq_rover_lock); static DEFINE_SPINLOCK(irq_rover_lock);
......
...@@ -247,9 +247,10 @@ struct irq_handler_data { ...@@ -247,9 +247,10 @@ struct irq_handler_data {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq) static int irq_choose_cpu(unsigned int virt_irq)
{ {
cpumask_t mask = irq_desc[virt_irq].affinity; cpumask_t mask;
int cpuid; int cpuid;
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
if (cpus_equal(mask, CPU_MASK_ALL)) { if (cpus_equal(mask, CPU_MASK_ALL)) {
static int irq_rover; static int irq_rover;
static DEFINE_SPINLOCK(irq_rover_lock); static DEFINE_SPINLOCK(irq_rover_lock);
...@@ -854,7 +855,7 @@ void fixup_irqs(void) ...@@ -854,7 +855,7 @@ void fixup_irqs(void)
!(irq_desc[irq].status & IRQ_PER_CPU)) { !(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity) if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq, irq_desc[irq].chip->set_affinity(irq,
&irq_desc[irq].affinity); irq_desc[irq].affinity);
} }
spin_unlock_irqrestore(&irq_desc[irq].lock, flags); spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册