diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9ef28da2c7fe3fb8ef3b0fe3cb7265ba91166c01..952eba6701f404c2d8ec1a69b2a6595d26250f94 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -389,8 +389,11 @@ BEGIN_FTR_SECTION ld r9,PACA_SLBSHADOWPTR(r13) li r12,0 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ + eieio std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ + eieio std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ + eieio slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index bc7b0cedae5e555ab711d2d5099059070766de17..f1789578747aa340c7cf3cc7cc006f3f8ef39a98 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -759,7 +759,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) mmu_psize_defs[mmu_vmalloc_psize].sllp) { get_paca()->vmalloc_sllp = mmu_psize_defs[mmu_vmalloc_psize].sllp; - slb_flush_and_rebolt(); + slb_vmalloc_update(); } #endif /* CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 304375a735747cfc868f67cfba4fc6715809b6c2..b0697017d0e8dcd10e8f80bf9e817cf53dc92f89 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; } -static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, +static inline void slb_shadow_update(unsigned long ea, + unsigned long flags, unsigned long entry) { /* @@ -61,11 +62,11 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, * updating it. */ get_slb_shadow()->save_area[entry].esid = 0; - barrier(); - get_slb_shadow()->save_area[entry].vsid = vsid; - barrier(); - get_slb_shadow()->save_area[entry].esid = esid; - + smp_wmb(); + get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); + smp_wmb(); + get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); + smp_wmb(); } static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, @@ -76,8 +77,7 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, * we don't get a stale entry here if we get preempted by PHYP * between these two statements. */ - slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), - entry); + slb_shadow_update(ea, flags, entry); asm volatile("slbmte %0,%1" : : "r" (mk_vsid_data(ea, flags)), @@ -104,8 +104,7 @@ void slb_flush_and_rebolt(void) ksp_esid_data &= ~SLB_ESID_V; /* Only third entry (stack) may change here so only resave that */ - slb_shadow_update(ksp_esid_data, - mk_vsid_data(ksp_esid_data, lflags), 2); + slb_shadow_update(get_paca()->kstack, lflags, 2); /* We need to do this all in asm, so we're sure we don't touch * the stack between the slbia and rebolting it. */ @@ -123,6 +122,15 @@ void slb_flush_and_rebolt(void) : "memory"); } +void slb_vmalloc_update(void) +{ + unsigned long vflags; + + vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; + slb_shadow_update(VMALLOC_START, vflags, 1); + slb_flush_and_rebolt(); +} + /* Flush all user entries from the segment table of the current processor. */ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) { diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index 695962f020597bc5f668ca5c5d06e272eaa0a4f6..3112ad14ad9504b699f6c4e65cc41d4a76dc332d 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h @@ -262,6 +262,7 @@ extern void slb_initialize(void); extern void slb_flush_and_rebolt(void); extern void stab_initialize(unsigned long stab); +extern void slb_vmalloc_update(void); #endif /* __ASSEMBLY__ */ /*