提交 c38e5038 编写于 作者: I Ingo Molnar

x86/asm/entry/64: Rename 'old_rsp' to 'rsp_scratch'

Make clear that the usage of PER_CPU(old_rsp) is purely temporary,
by renaming it to 'rsp_scratch'.

Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Will Drewry <wad@chromium.org>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 7fcb3bc3
......@@ -237,16 +237,16 @@ ENTRY(system_call)
GLOBAL(system_call_after_swapgs)
/*
* We use 'old_rsp' as a scratch register, hence this block must execute
* We use 'rsp_scratch' as a scratch register, hence this block must execute
* atomically in the face of possible interrupt-driven task preemption,
* so we can enable interrupts only after we're done with using old_rsp:
* so we can enable interrupts only after we're done with using rsp_scratch:
*/
movq %rsp,PER_CPU_VAR(old_rsp)
movq %rsp,PER_CPU_VAR(rsp_scratch)
/* kernel_stack is set so that 5 slots (iret frame) are preallocated */
movq PER_CPU_VAR(kernel_stack),%rsp
ALLOC_PT_GPREGS_ON_STACK 8 /* +8: space for orig_ax */
movq %rcx,RIP(%rsp)
movq PER_CPU_VAR(old_rsp),%rcx
movq PER_CPU_VAR(rsp_scratch),%rcx
movq %r11,EFLAGS(%rsp)
movq %rcx,RSP(%rsp)
/*
......@@ -657,7 +657,7 @@ common_interrupt:
ASM_CLAC
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ
/* 0(%rsp): old_rsp */
/* 0(%rsp): rsp_scratch */
ret_from_intr:
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
......
......@@ -52,7 +52,7 @@
asmlinkage extern void ret_from_fork(void);
__visible DEFINE_PER_CPU(unsigned long, old_rsp);
__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, int all)
......
......@@ -68,11 +68,11 @@ ENTRY(xen_sysret64)
* We're already on the usermode stack at this point, but
* still with the kernel gs, so we can easily switch back
*/
movq %rsp, PER_CPU_VAR(old_rsp)
movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(kernel_stack), %rsp
pushq $__USER_DS
pushq PER_CPU_VAR(old_rsp)
pushq PER_CPU_VAR(rsp_scratch)
pushq %r11
pushq $__USER_CS
pushq %rcx
......@@ -87,11 +87,11 @@ ENTRY(xen_sysret32)
* We're already on the usermode stack at this point, but
* still with the kernel gs, so we can easily switch back
*/
movq %rsp, PER_CPU_VAR(old_rsp)
movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(kernel_stack), %rsp
pushq $__USER32_DS
pushq PER_CPU_VAR(old_rsp)
pushq PER_CPU_VAR(rsp_scratch)
pushq %r11
pushq $__USER32_CS
pushq %rcx
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册