提交 9778b696 编写于 作者: S Stuart Yoder 提交者: Benjamin Herrenschmidt

powerpc: Use CURRENT_THREAD_INFO instead of open coded assembly

Signed-off-by: NStuart Yoder <stuart.yoder@freescale.com>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 db911217
...@@ -293,7 +293,7 @@ label##_hv: \ ...@@ -293,7 +293,7 @@ label##_hv: \
#define RUNLATCH_ON \ #define RUNLATCH_ON \
BEGIN_FTR_SECTION \ BEGIN_FTR_SECTION \
clrrdi r3,r1,THREAD_SHIFT; \ CURRENT_THREAD_INFO(r3, r1); \
ld r4,TI_LOCAL_FLAGS(r3); \ ld r4,TI_LOCAL_FLAGS(r3); \
andi. r0,r4,_TLF_RUNLATCH; \ andi. r0,r4,_TLF_RUNLATCH; \
beql ppc64_runlatch_on_trampoline; \ beql ppc64_runlatch_on_trampoline; \
...@@ -332,7 +332,7 @@ label##_common: \ ...@@ -332,7 +332,7 @@ label##_common: \
#ifdef CONFIG_PPC_970_NAP #ifdef CONFIG_PPC_970_NAP
#define FINISH_NAP \ #define FINISH_NAP \
BEGIN_FTR_SECTION \ BEGIN_FTR_SECTION \
clrrdi r11,r1,THREAD_SHIFT; \ CURRENT_THREAD_INFO(r11, r1); \
ld r9,TI_LOCAL_FLAGS(r11); \ ld r9,TI_LOCAL_FLAGS(r11); \
andi. r10,r9,_TLF_NAPPING; \ andi. r10,r9,_TLF_NAPPING; \
bnel power4_fixup_nap; \ bnel power4_fixup_nap; \
......
...@@ -22,6 +22,12 @@ ...@@ -22,6 +22,12 @@
#define THREAD_SIZE (1 << THREAD_SHIFT) #define THREAD_SIZE (1 << THREAD_SHIFT)
#ifdef CONFIG_PPC64
#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT
#else
#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/cache.h> #include <linux/cache.h>
#include <asm/processor.h> #include <asm/processor.h>
......
...@@ -92,7 +92,7 @@ crit_transfer_to_handler: ...@@ -92,7 +92,7 @@ crit_transfer_to_handler:
mfspr r8,SPRN_SPRG_THREAD mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8) lwz r0,KSP_LIMIT(r8)
stw r0,SAVED_KSP_LIMIT(r11) stw r0,SAVED_KSP_LIMIT(r11)
rlwimi r0,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r0, r1)
stw r0,KSP_LIMIT(r8) stw r0,KSP_LIMIT(r8)
/* fall through */ /* fall through */
#endif #endif
...@@ -112,7 +112,7 @@ crit_transfer_to_handler: ...@@ -112,7 +112,7 @@ crit_transfer_to_handler:
mfspr r8,SPRN_SPRG_THREAD mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8) lwz r0,KSP_LIMIT(r8)
stw r0,saved_ksp_limit@l(0) stw r0,saved_ksp_limit@l(0)
rlwimi r0,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r0, r1)
stw r0,KSP_LIMIT(r8) stw r0,KSP_LIMIT(r8)
/* fall through */ /* fall through */
#endif #endif
...@@ -158,7 +158,7 @@ transfer_to_handler: ...@@ -158,7 +158,7 @@ transfer_to_handler:
tophys(r11,r11) tophys(r11,r11)
addi r11,r11,global_dbcr0@l addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rlwinm r9,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_CPU(r9) lwz r9,TI_CPU(r9)
slwi r9,r9,3 slwi r9,r9,3
add r11,r11,r9 add r11,r11,r9
...@@ -179,7 +179,7 @@ transfer_to_handler: ...@@ -179,7 +179,7 @@ transfer_to_handler:
ble- stack_ovf /* then the kernel stack overflowed */ ble- stack_ovf /* then the kernel stack overflowed */
5: 5:
#if defined(CONFIG_6xx) || defined(CONFIG_E500) #if defined(CONFIG_6xx) || defined(CONFIG_E500)
rlwinm r9,r1,0,0,31-THREAD_SHIFT CURRENT_THREAD_INFO(r9, r1)
tophys(r9,r9) /* check local flags */ tophys(r9,r9) /* check local flags */
lwz r12,TI_LOCAL_FLAGS(r9) lwz r12,TI_LOCAL_FLAGS(r9)
mtcrf 0x01,r12 mtcrf 0x01,r12
...@@ -333,7 +333,7 @@ _GLOBAL(DoSyscall) ...@@ -333,7 +333,7 @@ _GLOBAL(DoSyscall)
mtmsr r11 mtmsr r11
1: 1:
#endif /* CONFIG_TRACE_IRQFLAGS */ #endif /* CONFIG_TRACE_IRQFLAGS */
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ CURRENT_THREAD_INFO(r10, r1)
lwz r11,TI_FLAGS(r10) lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace bne- syscall_dotrace
...@@ -354,7 +354,7 @@ ret_from_syscall: ...@@ -354,7 +354,7 @@ ret_from_syscall:
bl do_show_syscall_exit bl do_show_syscall_exit
#endif #endif
mr r6,r3 mr r6,r3
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ CURRENT_THREAD_INFO(r12, r1)
/* disable interrupts so current_thread_info()->flags can't change */ /* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
/* Note: We don't bother telling lockdep about it */ /* Note: We don't bother telling lockdep about it */
...@@ -815,7 +815,7 @@ ret_from_except: ...@@ -815,7 +815,7 @@ ret_from_except:
user_exc_return: /* r10 contains MSR_KERNEL here */ user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */ /* Check current_thread_info()->flags */
rlwinm r9,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9) lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_USER_WORK_MASK andi. r0,r9,_TIF_USER_WORK_MASK
bne do_work bne do_work
...@@ -835,7 +835,7 @@ restore_user: ...@@ -835,7 +835,7 @@ restore_user:
/* N.B. the only way to get here is from the beq following ret_from_except. */ /* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel: resume_kernel:
/* check current_thread_info->preempt_count */ /* check current_thread_info->preempt_count */
rlwinm r9,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r9, r1)
lwz r0,TI_PREEMPT(r9) lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore bne restore
...@@ -852,7 +852,7 @@ resume_kernel: ...@@ -852,7 +852,7 @@ resume_kernel:
bl trace_hardirqs_off bl trace_hardirqs_off
#endif #endif
1: bl preempt_schedule_irq 1: bl preempt_schedule_irq
rlwinm r9,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r9, r1)
lwz r3,TI_FLAGS(r9) lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b bne- 1b
...@@ -1122,7 +1122,7 @@ ret_from_debug_exc: ...@@ -1122,7 +1122,7 @@ ret_from_debug_exc:
lwz r10,SAVED_KSP_LIMIT(r1) lwz r10,SAVED_KSP_LIMIT(r1)
stw r10,KSP_LIMIT(r9) stw r10,KSP_LIMIT(r9)
lwz r9,THREAD_INFO-THREAD(r9) lwz r9,THREAD_INFO-THREAD(r9)
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r10, r1)
lwz r10,TI_PREEMPT(r10) lwz r10,TI_PREEMPT(r10)
stw r10,TI_PREEMPT(r9) stw r10,TI_PREEMPT(r9)
RESTORE_xSRR(SRR0,SRR1); RESTORE_xSRR(SRR0,SRR1);
...@@ -1156,7 +1156,7 @@ load_dbcr0: ...@@ -1156,7 +1156,7 @@ load_dbcr0:
lis r11,global_dbcr0@ha lis r11,global_dbcr0@ha
addi r11,r11,global_dbcr0@l addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rlwinm r9,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_CPU(r9) lwz r9,TI_CPU(r9)
slwi r9,r9,3 slwi r9,r9,3
add r11,r11,r9 add r11,r11,r9
...@@ -1197,7 +1197,7 @@ recheck: ...@@ -1197,7 +1197,7 @@ recheck:
LOAD_MSR_KERNEL(r10,MSR_KERNEL) LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC SYNC
MTMSRD(r10) /* disable interrupts */ MTMSRD(r10) /* disable interrupts */
rlwinm r9,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9) lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched bne- do_resched
......
...@@ -146,7 +146,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) ...@@ -146,7 +146,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
REST_2GPRS(7,r1) REST_2GPRS(7,r1)
addi r9,r1,STACK_FRAME_OVERHEAD addi r9,r1,STACK_FRAME_OVERHEAD
#endif #endif
clrrdi r11,r1,THREAD_SHIFT CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_FLAGS(r11) ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A andi. r11,r10,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace bne- syscall_dotrace
...@@ -181,7 +181,7 @@ syscall_exit: ...@@ -181,7 +181,7 @@ syscall_exit:
bl .do_show_syscall_exit bl .do_show_syscall_exit
ld r3,RESULT(r1) ld r3,RESULT(r1)
#endif #endif
clrrdi r12,r1,THREAD_SHIFT CURRENT_THREAD_INFO(r12, r1)
ld r8,_MSR(r1) ld r8,_MSR(r1)
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
...@@ -260,7 +260,7 @@ syscall_dotrace: ...@@ -260,7 +260,7 @@ syscall_dotrace:
ld r7,GPR7(r1) ld r7,GPR7(r1)
ld r8,GPR8(r1) ld r8,GPR8(r1)
addi r9,r1,STACK_FRAME_OVERHEAD addi r9,r1,STACK_FRAME_OVERHEAD
clrrdi r10,r1,THREAD_SHIFT CURRENT_THREAD_INFO(r10, r1)
ld r10,TI_FLAGS(r10) ld r10,TI_FLAGS(r10)
b .Lsyscall_dotrace_cont b .Lsyscall_dotrace_cont
...@@ -500,7 +500,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) ...@@ -500,7 +500,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
2: 2:
#endif /* !CONFIG_PPC_BOOK3S */ #endif /* !CONFIG_PPC_BOOK3S */
clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
because we don't need to leave the 288-byte ABI gap at the because we don't need to leave the 288-byte ABI gap at the
top of the kernel stack. */ top of the kernel stack. */
...@@ -559,7 +559,7 @@ _GLOBAL(ret_from_except_lite) ...@@ -559,7 +559,7 @@ _GLOBAL(ret_from_except_lite)
mtmsrd r10,1 /* Update machine state */ mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ CURRENT_THREAD_INFO(r9, r1)
ld r3,_MSR(r1) ld r3,_MSR(r1)
ld r4,TI_FLAGS(r9) ld r4,TI_FLAGS(r9)
andi. r3,r3,MSR_PR andi. r3,r3,MSR_PR
...@@ -602,7 +602,7 @@ resume_kernel: ...@@ -602,7 +602,7 @@ resume_kernel:
1: bl .preempt_schedule_irq 1: bl .preempt_schedule_irq
/* Re-test flags and eventually loop */ /* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9) ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_NEED_RESCHED andi. r0,r4,_TIF_NEED_RESCHED
bne 1b bne 1b
......
...@@ -222,7 +222,7 @@ exc_##n##_bad_stack: \ ...@@ -222,7 +222,7 @@ exc_##n##_bad_stack: \
* interrupts happen before the wait instruction. * interrupts happen before the wait instruction.
*/ */
#define CHECK_NAPPING() \ #define CHECK_NAPPING() \
clrrdi r11,r1,THREAD_SHIFT; \ CURRENT_THREAD_INFO(r11, r1); \
ld r10,TI_LOCAL_FLAGS(r11); \ ld r10,TI_LOCAL_FLAGS(r11); \
andi. r9,r10,_TLF_NAPPING; \ andi. r9,r10,_TLF_NAPPING; \
beq+ 1f; \ beq+ 1f; \
......
...@@ -851,7 +851,7 @@ BEGIN_FTR_SECTION ...@@ -851,7 +851,7 @@ BEGIN_FTR_SECTION
bne- do_ste_alloc /* If so handle it */ bne- do_ste_alloc /* If so handle it */
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
clrrdi r11,r1,THREAD_SHIFT CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */ bne 77f /* then don't call hash_page now */
......
...@@ -192,7 +192,7 @@ _ENTRY(__early_start) ...@@ -192,7 +192,7 @@ _ENTRY(__early_start)
li r0,0 li r0,0
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
rlwinm r22,r1,0,0,31-THREAD_SHIFT /* current thread_info */ CURRENT_THREAD_INFO(r22, r1)
stw r24, TI_CPU(r22) stw r24, TI_CPU(r22)
bl early_init bl early_init
......
...@@ -135,7 +135,7 @@ BEGIN_FTR_SECTION ...@@ -135,7 +135,7 @@ BEGIN_FTR_SECTION
DSSALL DSSALL
sync sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */ CURRENT_THREAD_INFO(r9, r1)
lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
ori r8,r8,_TLF_NAPPING /* so when we take an exception */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */
stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
...@@ -158,7 +158,7 @@ _GLOBAL(power_save_ppc32_restore) ...@@ -158,7 +158,7 @@ _GLOBAL(power_save_ppc32_restore)
stw r9,_NIP(r11) /* make it do a blr */ stw r9,_NIP(r11) /* make it do a blr */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rlwinm r12,r11,0,0,31-THREAD_SHIFT CURRENT_THREAD_INFO(r12, r11)
lwz r11,TI_CPU(r12) /* get cpu number * 4 */ lwz r11,TI_CPU(r12) /* get cpu number * 4 */
slwi r11,r11,2 slwi r11,r11,2
#else #else
......
...@@ -60,7 +60,7 @@ _GLOBAL(book3e_idle) ...@@ -60,7 +60,7 @@ _GLOBAL(book3e_idle)
1: /* Let's set the _TLF_NAPPING flag so interrupts make us return 1: /* Let's set the _TLF_NAPPING flag so interrupts make us return
* to the right spot * to the right spot
*/ */
clrrdi r11,r1,THREAD_SHIFT CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_LOCAL_FLAGS(r11) ld r10,TI_LOCAL_FLAGS(r11)
ori r10,r10,_TLF_NAPPING ori r10,r10,_TLF_NAPPING
std r10,TI_LOCAL_FLAGS(r11) std r10,TI_LOCAL_FLAGS(r11)
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
.text .text
_GLOBAL(e500_idle) _GLOBAL(e500_idle)
rlwinm r3,r1,0,0,31-THREAD_SHIFT /* current thread_info */ CURRENT_THREAD_INFO(r3, r1)
lwz r4,TI_LOCAL_FLAGS(r3) /* set napping bit */ lwz r4,TI_LOCAL_FLAGS(r3) /* set napping bit */
ori r4,r4,_TLF_NAPPING /* so when we take an exception */ ori r4,r4,_TLF_NAPPING /* so when we take an exception */
stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */ stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */
...@@ -96,7 +96,7 @@ _GLOBAL(power_save_ppc32_restore) ...@@ -96,7 +96,7 @@ _GLOBAL(power_save_ppc32_restore)
stw r9,_NIP(r11) /* make it do a blr */ stw r9,_NIP(r11) /* make it do a blr */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rlwinm r12,r1,0,0,31-THREAD_SHIFT CURRENT_THREAD_INFO(r12, r1)
lwz r11,TI_CPU(r12) /* get cpu number * 4 */ lwz r11,TI_CPU(r12) /* get cpu number * 4 */
slwi r11,r11,2 slwi r11,r11,2
#else #else
......
...@@ -59,7 +59,7 @@ BEGIN_FTR_SECTION ...@@ -59,7 +59,7 @@ BEGIN_FTR_SECTION
DSSALL DSSALL
sync sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
clrrdi r9,r1,THREAD_SHIFT /* current thread_info */ CURRENT_THREAD_INFO(r9, r1)
ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
ori r8,r8,_TLF_NAPPING /* so when we take an exception */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */
std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
......
--- arch/powerpc/kernel/iommu.c 2012-06-08 09:01:02.785709100 +1000
+++ arch/powerpc/kernel/iommu.c 2012-06-08 09:01:07.489784856 +1000
@@ -33,7 +33,9 @@
#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
#include <linux/crash_dump.h>
+#include <linux/fault-inject.h>
#include <asm/io.h>
+#include <asm/vio.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
@@ -171,6 +261,9 @@
return DMA_ERROR_CODE;
}
+ if (should_fail_iommu(dev))
+ return DMA_ERROR_CODE;
+
if (handle && *handle)
start = *handle;
else
...@@ -179,7 +179,7 @@ _GLOBAL(low_choose_750fx_pll) ...@@ -179,7 +179,7 @@ _GLOBAL(low_choose_750fx_pll)
mtspr SPRN_HID1,r4 mtspr SPRN_HID1,r4
/* Store new HID1 image */ /* Store new HID1 image */
rlwinm r6,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r6, r1)
lwz r6,TI_CPU(r6) lwz r6,TI_CPU(r6)
slwi r6,r6,2 slwi r6,r6,2
addis r6,r6,nap_save_hid1@ha addis r6,r6,nap_save_hid1@ha
...@@ -699,7 +699,7 @@ _GLOBAL(kernel_thread) ...@@ -699,7 +699,7 @@ _GLOBAL(kernel_thread)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
_GLOBAL(start_secondary_resume) _GLOBAL(start_secondary_resume)
/* Reset stack */ /* Reset stack */
rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ CURRENT_THREAD_INFO(r1, r1)
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
li r3,0 li r3,0
stw r3,0(r1) /* Zero the stack frame pointer */ stw r3,0(r1) /* Zero the stack frame pointer */
......
...@@ -160,11 +160,7 @@ ...@@ -160,11 +160,7 @@
mtspr SPRN_EPLC, r8 mtspr SPRN_EPLC, r8
/* disable preemption, so we are sure we hit the fixup handler */ /* disable preemption, so we are sure we hit the fixup handler */
#ifdef CONFIG_PPC64 CURRENT_THREAD_INFO(r8, r1)
clrrdi r8,r1,THREAD_SHIFT
#else
rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */
#endif
li r7, 1 li r7, 1
stw r7, TI_PREEMPT(r8) stw r7, TI_PREEMPT(r8)
......
...@@ -184,7 +184,7 @@ _GLOBAL(add_hash_page) ...@@ -184,7 +184,7 @@ _GLOBAL(add_hash_page)
add r3,r3,r0 /* note create_hpte trims to 24 bits */ add r3,r3,r0 /* note create_hpte trims to 24 bits */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rlwinm r8,r1,0,0,(31-THREAD_SHIFT) /* use cpu number to make tag */ CURRENT_THREAD_INFO(r8, r1) /* use cpu number to make tag */
lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */ lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
oris r8,r8,12 oris r8,r8,12
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -545,7 +545,7 @@ _GLOBAL(flush_hash_pages) ...@@ -545,7 +545,7 @@ _GLOBAL(flush_hash_pages)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
addis r9,r7,mmu_hash_lock@ha addis r9,r7,mmu_hash_lock@ha
addi r9,r9,mmu_hash_lock@l addi r9,r9,mmu_hash_lock@l
rlwinm r8,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r8, r1)
add r8,r8,r7 add r8,r8,r7
lwz r8,TI_CPU(r8) lwz r8,TI_CPU(r8)
oris r8,r8,9 oris r8,r8,9
...@@ -639,7 +639,7 @@ _GLOBAL(flush_hash_patch_B) ...@@ -639,7 +639,7 @@ _GLOBAL(flush_hash_patch_B)
*/ */
_GLOBAL(_tlbie) _GLOBAL(_tlbie)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rlwinm r8,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r8, r1)
lwz r8,TI_CPU(r8) lwz r8,TI_CPU(r8)
oris r8,r8,11 oris r8,r8,11
mfmsr r10 mfmsr r10
...@@ -677,7 +677,7 @@ _GLOBAL(_tlbie) ...@@ -677,7 +677,7 @@ _GLOBAL(_tlbie)
*/ */
_GLOBAL(_tlbia) _GLOBAL(_tlbia)
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
rlwinm r8,r1,0,0,(31-THREAD_SHIFT) CURRENT_THREAD_INFO(r8, r1)
lwz r8,TI_CPU(r8) lwz r8,TI_CPU(r8)
oris r8,r8,10 oris r8,r8,10
mfmsr r10 mfmsr r10
......
...@@ -29,7 +29,7 @@ _GLOBAL(mpc6xx_enter_standby) ...@@ -29,7 +29,7 @@ _GLOBAL(mpc6xx_enter_standby)
ori r5, r5, ret_from_standby@l ori r5, r5, ret_from_standby@l
mtlr r5 mtlr r5
rlwinm r5, r1, 0, 0, 31-THREAD_SHIFT CURRENT_THREAD_INFO(r5, r1)
lwz r6, TI_LOCAL_FLAGS(r5) lwz r6, TI_LOCAL_FLAGS(r5)
ori r6, r6, _TLF_SLEEPING ori r6, r6, _TLF_SLEEPING
stw r6, TI_LOCAL_FLAGS(r5) stw r6, TI_LOCAL_FLAGS(r5)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册