提交 cd1a41ce 编写于 作者: T Thomas Gleixner

softirq: Move __ARCH_HAS_DO_SOFTIRQ to Kconfig

To prepare for inlining do_softirq_own_stack() replace
__ARCH_HAS_DO_SOFTIRQ with a Kconfig switch and select it in the affected
architectures.

This allows in the next step to move the function prototype and the inline
stub into a seperate asm-generic header file which is required to avoid
include recursion.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Reviewed-by: NKees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20210210002513.181713427@linutronix.de
上级 624db9ea
...@@ -759,6 +759,12 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK ...@@ -759,6 +759,12 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK
This spares a stack switch and improves cache usage on softirq This spares a stack switch and improves cache usage on softirq
processing. processing.
config HAVE_SOFTIRQ_ON_OWN_STACK
bool
help
Architecture provides a function to run __do_softirq() on a
seperate stack.
config PGTABLE_LEVELS config PGTABLE_LEVELS
int int
default 2 default 2
......
...@@ -63,6 +63,7 @@ config PARISC ...@@ -63,6 +63,7 @@ config PARISC
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select SET_FS select SET_FS
help help
......
...@@ -12,10 +12,6 @@ ...@@ -12,10 +12,6 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/irq.h> #include <linux/irq.h>
#ifdef CONFIG_IRQSTACKS
#define __ARCH_HAS_DO_SOFTIRQ
#endif
typedef struct { typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
unsigned int kernel_stack_usage; unsigned int kernel_stack_usage;
......
...@@ -237,6 +237,7 @@ config PPC ...@@ -237,6 +237,7 @@ config PPC
select MMU_GATHER_PAGE_SIZE select MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
......
...@@ -37,8 +37,6 @@ extern int distribute_irqs; ...@@ -37,8 +37,6 @@ extern int distribute_irqs;
struct pt_regs; struct pt_regs;
#define __ARCH_HAS_DO_SOFTIRQ
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
/* /*
* Per-cpu stacks for handling critical, debug and machine check * Per-cpu stacks for handling critical, debug and machine check
......
...@@ -182,6 +182,7 @@ config S390 ...@@ -182,6 +182,7 @@ config S390
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_VIRT_CPU_ACCOUNTING_IDLE select HAVE_VIRT_CPU_ACCOUNTING_IDLE
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x)) #define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x))
#define __ARCH_IRQ_STAT #define __ARCH_IRQ_STAT
#define __ARCH_HAS_DO_SOFTIRQ
#define __ARCH_IRQ_EXIT_IRQS_DISABLED #define __ARCH_IRQ_EXIT_IRQS_DISABLED
static inline void ack_bad_irq(unsigned int irq) static inline void ack_bad_irq(unsigned int irq)
......
...@@ -56,6 +56,7 @@ config SUPERH ...@@ -56,6 +56,7 @@ config SUPERH
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_UID16 select HAVE_UID16
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
......
...@@ -51,7 +51,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); ...@@ -51,7 +51,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
#ifdef CONFIG_IRQSTACKS #ifdef CONFIG_IRQSTACKS
extern void irq_ctx_init(int cpu); extern void irq_ctx_init(int cpu);
extern void irq_ctx_exit(int cpu); extern void irq_ctx_exit(int cpu);
# define __ARCH_HAS_DO_SOFTIRQ
#else #else
# define irq_ctx_init(cpu) do { } while (0) # define irq_ctx_init(cpu) do { } while (0)
# define irq_ctx_exit(cpu) do { } while (0) # define irq_ctx_exit(cpu) do { } while (0)
......
...@@ -97,6 +97,7 @@ config SPARC64 ...@@ -97,6 +97,7 @@ config SPARC64
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select PCI_DOMAINS if PCI select PCI_DOMAINS if PCI
select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_GIGANTIC_PAGE
select HAVE_SOFTIRQ_ON_OWN_STACK
config ARCH_PROC_KCORE_TEXT config ARCH_PROC_KCORE_TEXT
def_bool y def_bool y
......
...@@ -93,7 +93,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, ...@@ -93,7 +93,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
extern void *hardirq_stack[NR_CPUS]; extern void *hardirq_stack[NR_CPUS];
extern void *softirq_stack[NR_CPUS]; extern void *softirq_stack[NR_CPUS];
#define __ARCH_HAS_DO_SOFTIRQ
#define NO_IRQ 0xffffffff #define NO_IRQ 0xffffffff
......
...@@ -221,6 +221,7 @@ config X86 ...@@ -221,6 +221,7 @@ config X86
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64 select HAVE_STACK_VALIDATION if X86_64
select HAVE_STATIC_CALL select HAVE_STATIC_CALL
......
...@@ -25,8 +25,6 @@ static inline int irq_canonicalize(int irq) ...@@ -25,8 +25,6 @@ static inline int irq_canonicalize(int irq)
extern int irq_init_percpu_irqstack(unsigned int cpu); extern int irq_init_percpu_irqstack(unsigned int cpu);
#define __ARCH_HAS_DO_SOFTIRQ
struct irq_desc; struct irq_desc;
extern void fixup_irqs(void); extern void fixup_irqs(void);
......
...@@ -569,7 +569,7 @@ struct softirq_action ...@@ -569,7 +569,7 @@ struct softirq_action
asmlinkage void do_softirq(void); asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void); asmlinkage void __do_softirq(void);
#ifdef __ARCH_HAS_DO_SOFTIRQ #ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void); void do_softirq_own_stack(void);
#else #else
static inline void do_softirq_own_stack(void) static inline void do_softirq_own_stack(void)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册