提交 52d743f3 编写于 作者: T Thomas Gleixner

x86/softirq: Remove indirection in do_softirq_own_stack()

Use the new inline stack switching and remove the old ASM indirect call
implementation.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Reviewed-by: NKees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20210210002512.972714001@linutronix.de
上级 359f01d1
......@@ -756,45 +756,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
SYM_CODE_END(.Lbad_gs)
.previous
/*
* rdi: New stack pointer points to the top word of the stack
* rsi: Function pointer
* rdx: Function argument (can be NULL if none)
*/
SYM_FUNC_START(asm_call_on_stack)
/*
* Save the frame pointer unconditionally. This allows the ORC
* unwinder to handle the stack switch.
*/
pushq %rbp
mov %rsp, %rbp
/*
* The unwinder relies on the word at the top of the new stack
* page linking back to the previous RSP.
*/
mov %rsp, (%rdi)
mov %rdi, %rsp
/* Move the argument to the right place */
mov %rdx, %rdi
1:
.pushsection .discard.instr_begin
.long 1b - .
.popsection
CALL_NOSPEC rsi
2:
.pushsection .discard.instr_end
.long 2b - .
.popsection
/* Restore the previous stack pointer from RBP. */
leaveq
ret
SYM_FUNC_END(asm_call_on_stack)
#ifdef CONFIG_XEN_PV
/*
* A note on the "critical region" in our callback handler.
......
......@@ -185,20 +185,23 @@
IRQ_CONSTRAINTS, regs, vector); \
}
static __always_inline bool irqstack_active(void)
{
return __this_cpu_read(hardirq_stack_inuse);
}
void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
#define ASM_CALL_SOFTIRQ \
"call %P[__func] \n"
static __always_inline void __run_on_irqstack(void (*func)(void))
{
void *tos = __this_cpu_read(hardirq_stack_ptr);
__this_cpu_write(hardirq_stack_inuse, true);
asm_call_on_stack(tos, func, NULL);
__this_cpu_write(hardirq_stack_inuse, false);
/*
* Macro to invoke __do_softirq on the irq stack. Contrary to the above
* the only check which is necessary is whether the interrupt stack is
* in use already.
*/
#define run_softirq_on_irqstack_cond() \
{ \
if (__this_cpu_read(hardirq_stack_inuse)) { \
__do_softirq(); \
} else { \
__this_cpu_write(hardirq_stack_inuse, true); \
call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \
__this_cpu_write(hardirq_stack_inuse, false); \
} \
}
#else /* CONFIG_X86_64 */
......@@ -219,29 +222,6 @@ static __always_inline void __run_on_irqstack(void (*func)(void))
irq_exit_rcu(); \
}
static inline bool irqstack_active(void) { return false; }
static inline void __run_on_irqstack(void (*func)(void)) { }
#endif /* !CONFIG_X86_64 */
static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_X86_32))
return false;
if (!regs)
return !irqstack_active();
return !user_mode(regs) && !irqstack_active();
}
static __always_inline void run_on_irqstack_cond(void (*func)(void),
struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
if (irq_needs_irq_stack(regs))
__run_on_irqstack(func);
else
func();
}
#endif
......@@ -76,5 +76,5 @@ int irq_init_percpu_irqstack(unsigned int cpu)
void do_softirq_own_stack(void)
{
run_on_irqstack_cond(__do_softirq, NULL);
run_softirq_on_irqstack_cond();
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册