diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index f1e65c2ead6e09e272edc616ac1461eab9a7532d..d49dbe8dc96b345efa52c579e96782db3ecc3b3b 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c @@ -82,53 +82,12 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu) final_note(buf); } -static void crash_get_current_regs(struct pt_regs *regs) +static void crash_save_self(struct pt_regs *regs) { - __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx)); - __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx)); - __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx)); - __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi)); - __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi)); - __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp)); - __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax)); - __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp)); - __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss)); - __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs)); - __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds)); - __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes)); - __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags)); - - regs->eip = (unsigned long)current_text_addr(); -} - -/* CPU does not save ss and esp on stack if execution is already - * running in kernel mode at the time of NMI occurrence. This code - * fixes it. - */ -static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) -{ - memcpy(newregs, oldregs, sizeof(*newregs)); - newregs->esp = (unsigned long)&(oldregs->esp); - __asm__ __volatile__( - "xorl %%eax, %%eax\n\t" - "movw %%ss, %%ax\n\t" - :"=a"(newregs->xss)); -} - -/* We may have saved_regs from where the error came from - * or it is NULL if via a direct panic(). - */ -static void crash_save_self(struct pt_regs *saved_regs) -{ - struct pt_regs regs; int cpu; cpu = smp_processor_id(); - if (saved_regs) - crash_setup_regs(®s, saved_regs); - else - crash_get_current_regs(®s); - crash_save_this_cpu(®s, cpu); + crash_save_this_cpu(regs, cpu); } #ifdef CONFIG_SMP @@ -147,7 +106,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) local_irq_disable(); if (!user_mode(regs)) { - crash_setup_regs(&fixed_regs, regs); + crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } crash_save_this_cpu(regs, cpu); diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h index d80d446498fbca21a399e9e9b570a387951784d5..8fb1defd98c641b3f19f37a27f6f9ad77aac6ca7 100644 --- a/include/asm-i386/kexec.h +++ b/include/asm-i386/kexec.h @@ -2,6 +2,7 @@ #define _I386_KEXEC_H #include +#include /* * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. @@ -27,4 +28,48 @@ #define MAX_NOTE_BYTES 1024 +/* CPU does not save ss and esp on stack if execution is already + * running in kernel mode at the time of NMI occurrence. This code + * fixes it. + */ +static inline void crash_fixup_ss_esp(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + memcpy(newregs, oldregs, sizeof(*newregs)); + newregs->esp = (unsigned long)&(oldregs->esp); + __asm__ __volatile__( + "xorl %%eax, %%eax\n\t" + "movw %%ss, %%ax\n\t" + :"=a"(newregs->xss)); +} + +/* + * This function is responsible for capturing register states if coming + * via panic otherwise just fix up the ss and esp if coming via kernel + * mode exception. + */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + crash_fixup_ss_esp(newregs, oldregs); + else { + __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx)); + __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx)); + __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx)); + __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi)); + __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi)); + __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp)); + __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax)); + __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp)); + __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss)); + __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs)); + __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds)); + __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes)); + __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags)); + + newregs->eip = (unsigned long)current_text_addr(); + } +} + #endif /* _I386_KEXEC_H */ diff --git a/kernel/kexec.c b/kernel/kexec.c index 1197de8b2a949d1b8f8a8770d27b9b8e6a62cc3d..de1441656efdacf2d86a5db5226fb6e7f1469655 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1057,7 +1057,9 @@ void crash_kexec(struct pt_regs *regs) if (!locked) { image = xchg(&kexec_crash_image, NULL); if (image) { - machine_crash_shutdown(regs); + struct pt_regs fixed_regs; + crash_setup_regs(&fixed_regs, regs); + machine_crash_shutdown(&fixed_regs); machine_kexec(image); } xchg(&kexec_lock, 0);