提交 d4c430a8 编写于 作者: P Paul Brook

Large page TLB flush

QEMU uses a fixed page size for the CPU TLB.  If the guest uses large
pages then we effectively split these into multiple smaller pages, and
populate the corresponding TLB entries on demand.

When the guest invalidates the TLB by virtual address we must invalidate
all entries covered by the large page.  However the address used to
invalidate the entry may not be present in the QEMU TLB, so we do not
know which regions to clear.

Implementing a full vaiable size TLB is hard and slow, so just keep a
simple address/mask pair to record which addresses may have been mapped by
large pages.  If the guest invalidates this region then flush the
whole TLB.
Signed-off-by: NPaul Brook <paul@codesourcery.com>
上级 409dbce5
...@@ -111,6 +111,8 @@ typedef struct CPUTLBEntry { ...@@ -111,6 +111,8 @@ typedef struct CPUTLBEntry {
/* The meaning of the MMU modes is defined in the target code. */ \ /* The meaning of the MMU modes is defined in the target code. */ \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
target_ulong tlb_flush_addr; \
target_ulong tlb_flush_mask;
#else #else
......
...@@ -96,17 +96,9 @@ void tb_invalidate_page_range(target_ulong start, target_ulong end); ...@@ -96,17 +96,9 @@ void tb_invalidate_page_range(target_ulong start, target_ulong end);
void tlb_flush_page(CPUState *env, target_ulong addr); void tlb_flush_page(CPUState *env, target_ulong addr);
void tlb_flush(CPUState *env, int flush_global); void tlb_flush(CPUState *env, int flush_global);
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, void tlb_set_page(CPUState *env, target_ulong vaddr,
target_phys_addr_t paddr, int prot, target_phys_addr_t paddr, int prot,
int mmu_idx, int is_softmmu); int mmu_idx, target_ulong size);
static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
target_phys_addr_t paddr, int prot,
int mmu_idx, int is_softmmu)
{
if (prot & PAGE_READ)
prot |= PAGE_EXEC;
return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
}
#endif #endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
......
...@@ -1918,6 +1918,8 @@ void tlb_flush(CPUState *env, int flush_global) ...@@ -1918,6 +1918,8 @@ void tlb_flush(CPUState *env, int flush_global)
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
env->tlb_flush_addr = -1;
env->tlb_flush_mask = 0;
tlb_flush_count++; tlb_flush_count++;
} }
...@@ -1941,6 +1943,16 @@ void tlb_flush_page(CPUState *env, target_ulong addr) ...@@ -1941,6 +1943,16 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
#if defined(DEBUG_TLB) #if defined(DEBUG_TLB)
printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
#endif #endif
/* Check if we need to flush due to large pages. */
if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
#if defined(DEBUG_TLB)
printf("tlb_flush_page: forced full flush ("
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
env->tlb_flush_addr, env->tlb_flush_mask);
#endif
tlb_flush(env, 1);
return;
}
/* must reset current TB so that interrupts cannot modify the /* must reset current TB so that interrupts cannot modify the
links while we are modifying them */ links while we are modifying them */
env->current_tb = NULL; env->current_tb = NULL;
...@@ -2090,13 +2102,35 @@ static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr) ...@@ -2090,13 +2102,35 @@ static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr); tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
} }
/* add a new TLB entry. At most one entry for a given virtual address /* Our TLB does not support large pages, so remember the area covered by
is permitted. Return 0 if OK or 2 if the page could not be mapped large pages and trigger a full TLB flush if these are invalidated. */
(can only happen in non SOFTMMU mode for I/O pages or pages static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
conflicting with the host address space). */ target_ulong size)
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, {
target_phys_addr_t paddr, int prot, target_ulong mask = ~(size - 1);
int mmu_idx, int is_softmmu)
if (env->tlb_flush_addr == (target_ulong)-1) {
env->tlb_flush_addr = vaddr & mask;
env->tlb_flush_mask = mask;
return;
}
/* Extend the existing region to include the new page.
This is a compromise between unnecessary flushes and the cost
of maintaining a full variable size TLB. */
mask &= env->tlb_flush_mask;
while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
mask <<= 1;
}
env->tlb_flush_addr &= mask;
env->tlb_flush_mask = mask;
}
/* Add a new TLB entry. At most one entry for a given virtual address
is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
supplied size is only used by tlb_flush_page. */
void tlb_set_page(CPUState *env, target_ulong vaddr,
target_phys_addr_t paddr, int prot,
int mmu_idx, target_ulong size)
{ {
PhysPageDesc *p; PhysPageDesc *p;
unsigned long pd; unsigned long pd;
...@@ -2104,11 +2138,14 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, ...@@ -2104,11 +2138,14 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
target_ulong address; target_ulong address;
target_ulong code_address; target_ulong code_address;
target_phys_addr_t addend; target_phys_addr_t addend;
int ret;
CPUTLBEntry *te; CPUTLBEntry *te;
CPUWatchpoint *wp; CPUWatchpoint *wp;
target_phys_addr_t iotlb; target_phys_addr_t iotlb;
assert(size >= TARGET_PAGE_SIZE);
if (size != TARGET_PAGE_SIZE) {
tlb_add_large_page(env, vaddr, size);
}
p = phys_page_find(paddr >> TARGET_PAGE_BITS); p = phys_page_find(paddr >> TARGET_PAGE_BITS);
if (!p) { if (!p) {
pd = IO_MEM_UNASSIGNED; pd = IO_MEM_UNASSIGNED;
...@@ -2120,7 +2157,6 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, ...@@ -2120,7 +2157,6 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd); vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
#endif #endif
ret = 0;
address = vaddr; address = vaddr;
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
/* IO memory case (romd handled later) */ /* IO memory case (romd handled later) */
...@@ -2190,7 +2226,6 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr, ...@@ -2190,7 +2226,6 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
} else { } else {
te->addr_write = -1; te->addr_write = -1;
} }
return ret;
} }
#else #else
......
...@@ -1003,11 +1003,14 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw, ...@@ -1003,11 +1003,14 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
/* No fault */ /* No fault */
page_size = 1ULL << zbits; page_size = 1ULL << zbits;
address &= ~(page_size - 1); address &= ~(page_size - 1);
/* FIXME: page_size should probably be passed to tlb_set_page,
and this loop removed. */
for (end = physical + page_size; physical < end; physical += 0x1000) { for (end = physical + page_size; physical < end; physical += 0x1000) {
ret = tlb_set_page(env, address, physical, prot, tlb_set_page(env, address, physical, prot, mmu_idx,
mmu_idx, is_softmmu); TARGET_PAGE_SIZE);
address += 0x1000; address += 0x1000;
} }
ret = 0;
break; break;
#if 0 #if 0
case 1: case 1:
......
...@@ -894,7 +894,8 @@ static uint32_t get_level1_table_address(CPUState *env, uint32_t address) ...@@ -894,7 +894,8 @@ static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
} }
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type, static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
int is_user, uint32_t *phys_ptr, int *prot) int is_user, uint32_t *phys_ptr, int *prot,
target_ulong *page_size)
{ {
int code; int code;
uint32_t table; uint32_t table;
...@@ -927,6 +928,7 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type, ...@@ -927,6 +928,7 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
ap = (desc >> 10) & 3; ap = (desc >> 10) & 3;
code = 13; code = 13;
*page_size = 1024 * 1024;
} else { } else {
/* Lookup l2 entry. */ /* Lookup l2 entry. */
if (type == 1) { if (type == 1) {
...@@ -944,10 +946,12 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type, ...@@ -944,10 +946,12 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
case 1: /* 64k page. */ case 1: /* 64k page. */
phys_addr = (desc & 0xffff0000) | (address & 0xffff); phys_addr = (desc & 0xffff0000) | (address & 0xffff);
ap = (desc >> (4 + ((address >> 13) & 6))) & 3; ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
*page_size = 0x10000;
break; break;
case 2: /* 4k page. */ case 2: /* 4k page. */
phys_addr = (desc & 0xfffff000) | (address & 0xfff); phys_addr = (desc & 0xfffff000) | (address & 0xfff);
ap = (desc >> (4 + ((address >> 13) & 6))) & 3; ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
*page_size = 0x1000;
break; break;
case 3: /* 1k page. */ case 3: /* 1k page. */
if (type == 1) { if (type == 1) {
...@@ -962,6 +966,7 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type, ...@@ -962,6 +966,7 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
} }
ap = (desc >> 4) & 3; ap = (desc >> 4) & 3;
*page_size = 0x400;
break; break;
default: default:
/* Never happens, but compiler isn't smart enough to tell. */ /* Never happens, but compiler isn't smart enough to tell. */
...@@ -981,7 +986,8 @@ do_fault: ...@@ -981,7 +986,8 @@ do_fault:
} }
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type, static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
int is_user, uint32_t *phys_ptr, int *prot) int is_user, uint32_t *phys_ptr, int *prot,
target_ulong *page_size)
{ {
int code; int code;
uint32_t table; uint32_t table;
...@@ -1021,9 +1027,11 @@ static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type, ...@@ -1021,9 +1027,11 @@ static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
if (desc & (1 << 18)) { if (desc & (1 << 18)) {
/* Supersection. */ /* Supersection. */
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
*page_size = 0x1000000;
} else { } else {
/* Section. */ /* Section. */
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
*page_size = 0x100000;
} }
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
xn = desc & (1 << 4); xn = desc & (1 << 4);
...@@ -1040,10 +1048,12 @@ static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type, ...@@ -1040,10 +1048,12 @@ static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
case 1: /* 64k page. */ case 1: /* 64k page. */
phys_addr = (desc & 0xffff0000) | (address & 0xffff); phys_addr = (desc & 0xffff0000) | (address & 0xffff);
xn = desc & (1 << 15); xn = desc & (1 << 15);
*page_size = 0x10000;
break; break;
case 2: case 3: /* 4k page. */ case 2: case 3: /* 4k page. */
phys_addr = (desc & 0xfffff000) | (address & 0xfff); phys_addr = (desc & 0xfffff000) | (address & 0xfff);
xn = desc & 1; xn = desc & 1;
*page_size = 0x1000;
break; break;
default: default:
/* Never happens, but compiler isn't smart enough to tell. */ /* Never happens, but compiler isn't smart enough to tell. */
...@@ -1132,7 +1142,8 @@ static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type, ...@@ -1132,7 +1142,8 @@ static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
static inline int get_phys_addr(CPUState *env, uint32_t address, static inline int get_phys_addr(CPUState *env, uint32_t address,
int access_type, int is_user, int access_type, int is_user,
uint32_t *phys_ptr, int *prot) uint32_t *phys_ptr, int *prot,
target_ulong *page_size)
{ {
/* Fast Context Switch Extension. */ /* Fast Context Switch Extension. */
if (address < 0x02000000) if (address < 0x02000000)
...@@ -1142,16 +1153,18 @@ static inline int get_phys_addr(CPUState *env, uint32_t address, ...@@ -1142,16 +1153,18 @@ static inline int get_phys_addr(CPUState *env, uint32_t address,
/* MMU/MPU disabled. */ /* MMU/MPU disabled. */
*phys_ptr = address; *phys_ptr = address;
*prot = PAGE_READ | PAGE_WRITE; *prot = PAGE_READ | PAGE_WRITE;
*page_size = TARGET_PAGE_SIZE;
return 0; return 0;
} else if (arm_feature(env, ARM_FEATURE_MPU)) { } else if (arm_feature(env, ARM_FEATURE_MPU)) {
*page_size = TARGET_PAGE_SIZE;
return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr, return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
prot); prot);
} else if (env->cp15.c1_sys & (1 << 23)) { } else if (env->cp15.c1_sys & (1 << 23)) {
return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr, return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
prot); prot, page_size);
} else { } else {
return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr, return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
prot); prot, page_size);
} }
} }
...@@ -1159,17 +1172,20 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, ...@@ -1159,17 +1172,20 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
int access_type, int mmu_idx, int is_softmmu) int access_type, int mmu_idx, int is_softmmu)
{ {
uint32_t phys_addr; uint32_t phys_addr;
target_ulong page_size;
int prot; int prot;
int ret, is_user; int ret, is_user;
is_user = mmu_idx == MMU_USER_IDX; is_user = mmu_idx == MMU_USER_IDX;
ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot); ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
&page_size);
if (ret == 0) { if (ret == 0) {
/* Map a single [sub]page. */ /* Map a single [sub]page. */
phys_addr &= ~(uint32_t)0x3ff; phys_addr &= ~(uint32_t)0x3ff;
address &= ~(uint32_t)0x3ff; address &= ~(uint32_t)0x3ff;
return tlb_set_page (env, address, phys_addr, prot, mmu_idx, tlb_set_page (env, address, phys_addr, prot | PAGE_EXEC, mmu_idx,
is_softmmu); page_size);
return 0;
} }
if (access_type == 2) { if (access_type == 2) {
...@@ -1189,10 +1205,11 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, ...@@ -1189,10 +1205,11 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{ {
uint32_t phys_addr; uint32_t phys_addr;
target_ulong page_size;
int prot; int prot;
int ret; int ret;
ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot); ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
if (ret != 0) if (ret != 0)
return -1; return -1;
...@@ -1406,18 +1423,7 @@ void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val) ...@@ -1406,18 +1423,7 @@ void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
tlb_flush(env, 0); tlb_flush(env, 0);
break; break;
case 1: /* Invalidate single TLB entry. */ case 1: /* Invalidate single TLB entry. */
#if 0 tlb_flush_page(env, val & TARGET_PAGE_MASK);
/* ??? This is wrong for large pages and sections. */
/* As an ugly hack to make linux work we always flush a 4K
pages. */
val &= 0xfffff000;
tlb_flush_page(env, val);
tlb_flush_page(env, val + 0x400);
tlb_flush_page(env, val + 0x800);
tlb_flush_page(env, val + 0xc00);
#else
tlb_flush(env, 1);
#endif
break; break;
case 2: /* Invalidate on ASID. */ case 2: /* Invalidate on ASID. */
tlb_flush(env, val == 0); tlb_flush(env, val == 0);
......
...@@ -100,8 +100,9 @@ int cpu_cris_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -100,8 +100,9 @@ int cpu_cris_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
*/ */
phy = res.phy & ~0x80000000; phy = res.phy & ~0x80000000;
prot = res.prot; prot = res.prot;
r = tlb_set_page(env, address & TARGET_PAGE_MASK, tlb_set_page(env, address & TARGET_PAGE_MASK, phy,
phy, prot, mmu_idx, is_softmmu); prot | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
} }
if (r > 0) if (r > 0)
D_LOG("%s returns %d irqreq=%x addr=%x" D_LOG("%s returns %d irqreq=%x addr=%x"
......
...@@ -531,14 +531,13 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, ...@@ -531,14 +531,13 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
-1 = cannot handle fault -1 = cannot handle fault
0 = nothing more to do 0 = nothing more to do
1 = generate PF fault 1 = generate PF fault
2 = soft MMU activation required for this block
*/ */
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write1, int mmu_idx, int is_softmmu) int is_write1, int mmu_idx, int is_softmmu)
{ {
uint64_t ptep, pte; uint64_t ptep, pte;
target_ulong pde_addr, pte_addr; target_ulong pde_addr, pte_addr;
int error_code, is_dirty, prot, page_size, ret, is_write, is_user; int error_code, is_dirty, prot, page_size, is_write, is_user;
target_phys_addr_t paddr; target_phys_addr_t paddr;
uint32_t page_offset; uint32_t page_offset;
target_ulong vaddr, virt_addr; target_ulong vaddr, virt_addr;
...@@ -799,8 +798,8 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, ...@@ -799,8 +798,8 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
paddr = (pte & TARGET_PAGE_MASK) + page_offset; paddr = (pte & TARGET_PAGE_MASK) + page_offset;
vaddr = virt_addr + page_offset; vaddr = virt_addr + page_offset;
ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu); tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
return ret; return 0;
do_fault_protect: do_fault_protect:
error_code = PG_ERROR_P_MASK; error_code = PG_ERROR_P_MASK;
do_fault: do_fault:
......
...@@ -368,8 +368,9 @@ int cpu_m68k_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -368,8 +368,9 @@ int cpu_m68k_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
int prot; int prot;
address &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE; prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu); tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
} }
/* Notify CPU of a pending interrupt. Prioritization and vectoring should /* Notify CPU of a pending interrupt. Prioritization and vectoring should
......
...@@ -76,8 +76,8 @@ int cpu_mb_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -76,8 +76,8 @@ int cpu_mb_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n", DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot)); mmu_idx, vaddr, paddr, lu.prot));
r = tlb_set_page(env, vaddr, tlb_set_page(env, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
paddr, lu.prot, mmu_idx, is_softmmu); r = 0;
} else { } else {
env->sregs[SR_EAR] = address; env->sregs[SR_EAR] = address;
DMMU(qemu_log("mmu=%d miss v=%x\n", mmu_idx, address)); DMMU(qemu_log("mmu=%d miss v=%x\n", mmu_idx, address));
...@@ -107,7 +107,8 @@ int cpu_mb_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -107,7 +107,8 @@ int cpu_mb_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
/* MMU disabled or not available. */ /* MMU disabled or not available. */
address &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK;
prot = PAGE_BITS; prot = PAGE_BITS;
r = tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu); tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
r = 0;
} }
return r; return r;
} }
......
...@@ -296,9 +296,10 @@ int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -296,9 +296,10 @@ int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx " prot %d\n", qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx " prot %d\n",
__func__, address, ret, physical, prot); __func__, address, ret, physical, prot);
if (ret == TLBRET_MATCH) { if (ret == TLBRET_MATCH) {
ret = tlb_set_page(env, address & TARGET_PAGE_MASK, tlb_set_page(env, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot, physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
mmu_idx, is_softmmu); mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
} else if (ret < 0) } else if (ret < 0)
#endif #endif
{ {
......
...@@ -1410,9 +1410,10 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -1410,9 +1410,10 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
} }
ret = get_physical_address(env, &ctx, address, rw, access_type); ret = get_physical_address(env, &ctx, address, rw, access_type);
if (ret == 0) { if (ret == 0) {
ret = tlb_set_page_exec(env, address & TARGET_PAGE_MASK, tlb_set_page(env, address & TARGET_PAGE_MASK,
ctx.raddr & TARGET_PAGE_MASK, ctx.prot, ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
mmu_idx, is_softmmu); mmu_idx, TARGET_PAGE_SIZE);
ret = 0;
} else if (ret < 0) { } else if (ret < 0) {
LOG_MMU_STATE(env); LOG_MMU_STATE(env);
if (access_type == ACCESS_CODE) { if (access_type == ACCESS_CODE) {
......
...@@ -69,10 +69,11 @@ int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -69,10 +69,11 @@ int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
/* XXX: implement mmu */ /* XXX: implement mmu */
phys = address; phys = address;
prot = PAGE_READ | PAGE_WRITE; prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return tlb_set_page(env, address & TARGET_PAGE_MASK, tlb_set_page(env, address & TARGET_PAGE_MASK,
phys & TARGET_PAGE_MASK, prot, phys & TARGET_PAGE_MASK, prot,
mmu_idx, is_softmmu); mmu_idx, TARGET_PAGE_SIZE);
return 0;
} }
#endif /* CONFIG_USER_ONLY */ #endif /* CONFIG_USER_ONLY */
...@@ -470,7 +470,8 @@ int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw, ...@@ -470,7 +470,8 @@ int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
address &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK;
physical &= TARGET_PAGE_MASK; physical &= TARGET_PAGE_MASK;
return tlb_set_page(env, address, physical, prot, mmu_idx, is_softmmu); tlb_set_page(env, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
return 0;
} }
target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr) target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
......
...@@ -102,7 +102,8 @@ static const int perm_table[2][8] = { ...@@ -102,7 +102,8 @@ static const int perm_table[2][8] = {
static int get_physical_address(CPUState *env, target_phys_addr_t *physical, static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
int *prot, int *access_index, int *prot, int *access_index,
target_ulong address, int rw, int mmu_idx) target_ulong address, int rw, int mmu_idx,
target_ulong *page_size)
{ {
int access_perms = 0; int access_perms = 0;
target_phys_addr_t pde_ptr; target_phys_addr_t pde_ptr;
...@@ -113,6 +114,7 @@ static int get_physical_address(CPUState *env, target_phys_addr_t *physical, ...@@ -113,6 +114,7 @@ static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
is_user = mmu_idx == MMU_USER_IDX; is_user = mmu_idx == MMU_USER_IDX;
if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
*page_size = TARGET_PAGE_SIZE;
// Boot mode: instruction fetches are taken from PROM // Boot mode: instruction fetches are taken from PROM
if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) { if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) {
*physical = env->prom_addr | (address & 0x7ffffULL); *physical = env->prom_addr | (address & 0x7ffffULL);
...@@ -175,13 +177,16 @@ static int get_physical_address(CPUState *env, target_phys_addr_t *physical, ...@@ -175,13 +177,16 @@ static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
page_offset = (address & TARGET_PAGE_MASK) & page_offset = (address & TARGET_PAGE_MASK) &
(TARGET_PAGE_SIZE - 1); (TARGET_PAGE_SIZE - 1);
} }
*page_size = TARGET_PAGE_SIZE;
break; break;
case 2: /* L2 PTE */ case 2: /* L2 PTE */
page_offset = address & 0x3ffff; page_offset = address & 0x3ffff;
*page_size = 0x40000;
} }
break; break;
case 2: /* L1 PTE */ case 2: /* L1 PTE */
page_offset = address & 0xffffff; page_offset = address & 0xffffff;
*page_size = 0x1000000;
} }
} }
...@@ -220,10 +225,11 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -220,10 +225,11 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
{ {
target_phys_addr_t paddr; target_phys_addr_t paddr;
target_ulong vaddr; target_ulong vaddr;
int error_code = 0, prot, ret = 0, access_index; target_ulong page_size;
int error_code = 0, prot, access_index;
error_code = get_physical_address(env, &paddr, &prot, &access_index, error_code = get_physical_address(env, &paddr, &prot, &access_index,
address, rw, mmu_idx); address, rw, mmu_idx, &page_size);
if (error_code == 0) { if (error_code == 0) {
vaddr = address & TARGET_PAGE_MASK; vaddr = address & TARGET_PAGE_MASK;
paddr &= TARGET_PAGE_MASK; paddr &= TARGET_PAGE_MASK;
...@@ -231,8 +237,8 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -231,8 +237,8 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr " printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr "
TARGET_FMT_lx "\n", address, paddr, vaddr); TARGET_FMT_lx "\n", address, paddr, vaddr);
#endif #endif
ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu); tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
return ret; return 0;
} }
if (env->mmuregs[3]) /* Fault status register */ if (env->mmuregs[3]) /* Fault status register */
...@@ -247,8 +253,8 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -247,8 +253,8 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
// switching to normal mode. // switching to normal mode.
vaddr = address & TARGET_PAGE_MASK; vaddr = address & TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu); tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
return ret; return 0;
} else { } else {
if (rw & 2) if (rw & 2)
env->exception_index = TT_TFAULT; env->exception_index = TT_TFAULT;
...@@ -531,10 +537,14 @@ static int get_physical_address_code(CPUState *env, ...@@ -531,10 +537,14 @@ static int get_physical_address_code(CPUState *env,
static int get_physical_address(CPUState *env, target_phys_addr_t *physical, static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
int *prot, int *access_index, int *prot, int *access_index,
target_ulong address, int rw, int mmu_idx) target_ulong address, int rw, int mmu_idx,
target_ulong *page_size)
{ {
int is_user = mmu_idx == MMU_USER_IDX; int is_user = mmu_idx == MMU_USER_IDX;
/* ??? We treat everything as a small page, then explicitly flush
everything when an entry is evicted. */
*page_size = TARGET_PAGE_SIZE;
if (rw == 2) if (rw == 2)
return get_physical_address_code(env, physical, prot, address, return get_physical_address_code(env, physical, prot, address,
is_user); is_user);
...@@ -549,10 +559,11 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -549,10 +559,11 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
{ {
target_ulong virt_addr, vaddr; target_ulong virt_addr, vaddr;
target_phys_addr_t paddr; target_phys_addr_t paddr;
int error_code = 0, prot, ret = 0, access_index; target_ulong page_size;
int error_code = 0, prot, access_index;
error_code = get_physical_address(env, &paddr, &prot, &access_index, error_code = get_physical_address(env, &paddr, &prot, &access_index,
address, rw, mmu_idx); address, rw, mmu_idx, &page_size);
if (error_code == 0) { if (error_code == 0) {
virt_addr = address & TARGET_PAGE_MASK; virt_addr = address & TARGET_PAGE_MASK;
vaddr = virt_addr + ((address & TARGET_PAGE_MASK) & vaddr = virt_addr + ((address & TARGET_PAGE_MASK) &
...@@ -561,8 +572,8 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw, ...@@ -561,8 +572,8 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
printf("Translate at 0x%" PRIx64 " -> 0x%" PRIx64 ", vaddr 0x%" PRIx64 printf("Translate at 0x%" PRIx64 " -> 0x%" PRIx64 ", vaddr 0x%" PRIx64
"\n", address, paddr, vaddr); "\n", address, paddr, vaddr);
#endif #endif
ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu); tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
return ret; return 0;
} }
// XXX // XXX
return 1; return 1;
...@@ -656,12 +667,13 @@ void dump_mmu(CPUState *env) ...@@ -656,12 +667,13 @@ void dump_mmu(CPUState *env)
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{ {
target_phys_addr_t phys_addr; target_phys_addr_t phys_addr;
target_ulong page_size;
int prot, access_index; int prot, access_index;
if (get_physical_address(env, &phys_addr, &prot, &access_index, addr, 2, if (get_physical_address(env, &phys_addr, &prot, &access_index, addr, 2,
MMU_KERNEL_IDX) != 0) MMU_KERNEL_IDX, &page_size) != 0)
if (get_physical_address(env, &phys_addr, &prot, &access_index, addr, if (get_physical_address(env, &phys_addr, &prot, &access_index, addr,
0, MMU_KERNEL_IDX) != 0) 0, MMU_KERNEL_IDX, &page_size) != 0)
return -1; return -1;
if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED) if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED)
return -1; return -1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册