diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 952eba6701f404c2d8ec1a69b2a6595d26250f94..fbbd3f6f006453dc217dfa8e818a1860156edbc1 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -385,15 +385,15 @@ BEGIN_FTR_SECTION oris r0,r6,(SLB_ESID_V)@h ori r0,r0,(SLB_NUM_BOLTED-1)@l - /* Update the last bolted SLB */ + /* Update the last bolted SLB. No write barriers are needed + * here, provided we only update the current CPU's SLB shadow + * buffer. + */ ld r9,PACA_SLBSHADOWPTR(r13) li r12,0 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ - eieio std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ - eieio std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ - eieio slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index ff1811ac6c81e837be9c7c5072a364bb32d5d877..4bee1cfa9dea553b49a50f513fb8f768fdf61640 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -59,14 +59,12 @@ static inline void slb_shadow_update(unsigned long ea, { /* * Clear the ESID first so the entry is not valid while we are - * updating it. + * updating it. No write barriers are needed here, provided + * we only update the current CPU's SLB shadow buffer. */ get_slb_shadow()->save_area[entry].esid = 0; - smp_wmb(); get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); - smp_wmb(); get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); - smp_wmb(); } static inline void slb_shadow_clear(unsigned long entry)