diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 21f851179ff08a0ceed51d8577cc34529153f804..e17ce871bb19c86090ffca835609851e404186b5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -187,6 +187,7 @@ config X86 select HAVE_HW_BREAKPOINT select HAVE_IDE select HAVE_IOREMAP_PROT + select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h index fa444c27772af48ab26b9aace91b5c232c9bffa3..1b82f923070962f31764a2d2a10bb824e1986c9c 100644 --- a/arch/x86/include/asm/irq_stack.h +++ b/arch/x86/include/asm/irq_stack.h @@ -189,19 +189,16 @@ "call %P[__func] \n" /* - * Macro to invoke __do_softirq on the irq stack. Contrary to the above - * the only check which is necessary is whether the interrupt stack is - * in use already. + * Macro to invoke __do_softirq on the irq stack. This is only called from + * task context when bottom halfs are about to be reenabled and soft + * interrupts are pending to be processed. The interrupt stack cannot be in + * use here. */ -#define run_softirq_on_irqstack_cond() \ +#define run_softirq_on_irqstack() \ { \ - if (__this_cpu_read(hardirq_stack_inuse)) { \ - __do_softirq(); \ - } else { \ - __this_cpu_write(hardirq_stack_inuse, true); \ - call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \ - __this_cpu_write(hardirq_stack_inuse, false); \ - } \ + __this_cpu_write(hardirq_stack_inuse, true); \ + call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \ + __this_cpu_write(hardirq_stack_inuse, false); \ } #else /* CONFIG_X86_64 */ diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 8d9f9a1b49e50644afd02caa33eb5ab3054b29a6..b88fdb9686e64484e393855a6709ad9be53b7a41 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -76,5 +76,5 @@ int irq_init_percpu_irqstack(unsigned int cpu) void do_softirq_own_stack(void) { - run_softirq_on_irqstack_cond(); + run_softirq_on_irqstack(); }