提交 71bf08b6 编写于 作者: L Luke Browning 提交者: Paul Mackerras

[POWERPC] 64K page support for kexec

This fixes a couple of kexec problems related to 64K page
support in the kernel.  kexec issues a tlbie for each pte.  The
parameters for the tlbie are the page size and the virtual address.
Support was missing for the computation of these two parameters
for 64K pages.  This adds that support.
Signed-off-by: NLuke Browning <lukebrowning@us.ibm.com>
Acked-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: NOlof Johansson <olof@lixom.net>
Acked-by: NArnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 9f90b997
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/kexec.h>
#ifdef DEBUG_LOW #ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt) #define DBG_LOW(fmt...) udbg_printf(fmt)
...@@ -340,31 +341,70 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, ...@@ -340,31 +341,70 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
local_irq_restore(flags); local_irq_restore(flags);
} }
/* #define LP_SHIFT 12
* XXX This need fixing based on page size. It's only used by #define LP_BITS 8
* native_hpte_clear() for now which needs fixing too so they #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
* make a good pair...
*/
static unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
{
unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
unsigned long va;
va = avpn << 23; static void hpte_decode(hpte_t *hpte, unsigned long slot,
int *psize, unsigned long *va)
{
unsigned long hpte_r = hpte->r;
unsigned long hpte_v = hpte->v;
unsigned long avpn;
int i, size, shift, penc, avpnm_bits;
if (!(hpte_v & HPTE_V_LARGE))
size = MMU_PAGE_4K;
else {
for (i = 0; i < LP_BITS; i++) {
if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
break;
}
penc = LP_MASK(i+1) >> LP_SHIFT;
for (size = 0; size < MMU_PAGE_COUNT; size++) {
if (! (hpte_v & HPTE_V_LARGE)) { /* 4K pages are not represented by LP */
unsigned long vpi, pteg; if (size == MMU_PAGE_4K)
continue;
pteg = slot / HPTES_PER_GROUP; /* valid entries have a shift value */
if (hpte_v & HPTE_V_SECONDARY) if (!mmu_psize_defs[size].shift)
pteg = ~pteg; continue;
vpi = ((va >> 28) ^ pteg) & htab_hash_mask; if (penc == mmu_psize_defs[size].penc)
break;
}
}
va |= vpi << PAGE_SHIFT; /*
* FIXME, the code below works for 16M, 64K, and 4K pages as these
* fall under the p<=23 rules for calculating the virtual address.
* In the case of 16M pages, an extra bit is stolen from the AVPN
* field to achieve the requisite 24 bits.
*
* Does not work for 16G pages or 1 TB segments.
*/
shift = mmu_psize_defs[size].shift;
if (mmu_psize_defs[size].avpnm)
avpnm_bits = __ilog2_u64(mmu_psize_defs[size].avpnm) + 1;
else
avpnm_bits = 0;
if (shift - avpnm_bits <= 23) {
avpn = HPTE_V_AVPN_VAL(hpte_v) << 23;
if (shift < 23) {
unsigned long vpi, pteg;
pteg = slot / HPTES_PER_GROUP;
if (hpte_v & HPTE_V_SECONDARY)
pteg = ~pteg;
vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
avpn |= (vpi << mmu_psize_defs[size].shift);
}
} }
return va; *va = avpn;
*psize = size;
} }
/* /*
...@@ -374,15 +414,14 @@ static unsigned long slot2va(unsigned long hpte_v, unsigned long slot) ...@@ -374,15 +414,14 @@ static unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
* *
* TODO: add batching support when enabled. remember, no dynamic memory here, * TODO: add batching support when enabled. remember, no dynamic memory here,
* athough there is the control page available... * athough there is the control page available...
*
* XXX FIXME: 4k only for now !
*/ */
static void native_hpte_clear(void) static void native_hpte_clear(void)
{ {
unsigned long slot, slots, flags; unsigned long slot, slots, flags;
hpte_t *hptep = htab_address; hpte_t *hptep = htab_address;
unsigned long hpte_v; unsigned long hpte_v, va;
unsigned long pteg_count; unsigned long pteg_count;
int psize;
pteg_count = htab_hash_mask + 1; pteg_count = htab_hash_mask + 1;
...@@ -408,8 +447,9 @@ static void native_hpte_clear(void) ...@@ -408,8 +447,9 @@ static void native_hpte_clear(void)
* already hold the native_tlbie_lock. * already hold the native_tlbie_lock.
*/ */
if (hpte_v & HPTE_V_VALID) { if (hpte_v & HPTE_V_VALID) {
hpte_decode(hptep, slot, &psize, &va);
hptep->v = 0; hptep->v = 0;
__tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K); __tlbie(va, psize);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册