diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 991d5998d6be6711a68e15ab8347b110ade13dd3..fe56a23e1ff0c1b79d3d66f2c2cf89b08b5eb9d0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -240,6 +240,12 @@ struct machdep_calls { * claims to support kexec. */ int (*machine_kexec_prepare)(struct kimage *image); + + /* Called to perform the _real_ kexec. + * Do NOT allocate memory or fail here. We are past the point of + * no return. + */ + void (*machine_kexec)(struct kimage *image); #endif /* CONFIG_KEXEC */ #ifdef CONFIG_SUSPEND diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 49a170af81456ce722fa58cd70e5980370f5f70b..a5f8672eeff37d593c219ac7ac68a6296587b7f4 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image) save_ftrace_enabled = __ftrace_enabled_save(); - default_machine_kexec(image); + if (ppc_md.machine_kexec) + ppc_md.machine_kexec(image); + else + default_machine_kexec(image); __ftrace_enabled_restore(save_ftrace_enabled); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7a1d5cb76932312f7e15609fe5f84c077696eda3..8303a6c65ef7e85f230bac363bb722c49e460211 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) prime_debug_regs(new_thread); } #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ +#ifndef CONFIG_HAVE_HW_BREAKPOINT static void set_debug_reg_defaults(struct thread_struct *thread) { if (thread->dabr) { @@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) set_dabr(0); } } +#endif /* !CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ int set_dabr(unsigned long dabr) @@ -670,11 +672,11 @@ void flush_thread(void) { discard_lazy_cpu_state(); -#ifdef CONFIG_HAVE_HW_BREAKPOINTS +#ifdef CONFIG_HAVE_HW_BREAKPOINT flush_ptrace_hw_breakpoint(current); -#else /* CONFIG_HAVE_HW_BREAKPOINTS */ +#else /* CONFIG_HAVE_HW_BREAKPOINT */ set_debug_reg_defaults(¤t->thread); -#endif /* CONFIG_HAVE_HW_BREAKPOINTS */ +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ } void diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 1ec06576f619bc8e3e73fecc9d04cabeb7263240..c14d09f614f362ef67e04744bc5db8f62b8947cc 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c @@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); * neesd to be flushed. This function will either perform the flush * immediately or will batch it up if the current CPU has an active * batch on it. - * - * Must be called from within some kind of spinlock/non-preempt region... */ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); unsigned long vsid, vaddr; unsigned int psize; int ssize; @@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, */ if (!batch->active) { flush_hash_page(vaddr, rpte, psize, ssize, 0); + put_cpu_var(ppc64_tlb_batch); return; } @@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); + put_cpu_var(ppc64_tlb_batch); } /*