cputlb.c 10.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  Common CPU TLB handling
 *
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */

#include "config.h"
#include "cpu.h"
22 23 24
#include "exec/exec-all.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
25

26
#include "exec/cputlb.h"
27

28
#include "exec/memory-internal.h"
29
#include "exec/ram_addr.h"
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50

//#define DEBUG_TLB
//#define DEBUG_TLB_CHECK

/* statistics */
int tlb_flush_count;

/* NOTE:
 * If flush_global is true (the usual case), flush all tlb entries.
 * If flush_global is false, flush (at least) all tlb entries not
 * marked global.
 *
 * Since QEMU doesn't currently implement a global/not-global flag
 * for tlb entries, at the moment tlb_flush() will also flush all
 * tlb entries in the flush_global == false case. This is OK because
 * CPU architectures generally permit an implementation to drop
 * entries from the TLB at any time, so flushing more entries than
 * required is only an efficiency issue, not a correctness issue.
 */
void tlb_flush(CPUArchState *env, int flush_global)
{
51
    CPUState *cpu = ENV_GET_CPU(env);
52 53 54 55 56 57

#if defined(DEBUG_TLB)
    printf("tlb_flush:\n");
#endif
    /* must reset current TB so that interrupts cannot modify the
       links while we are modifying them */
58
    cpu->current_tb = NULL;
59

60
    memset(env->tlb_table, -1, sizeof(env->tlb_table));
61
    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
62 63 64 65 66 67 68 69 70 71 72 73 74 75

    env->tlb_flush_addr = -1;
    env->tlb_flush_mask = 0;
    tlb_flush_count++;
}

static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
    if (addr == (tlb_entry->addr_read &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_write &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_code &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
76
        memset(tlb_entry, -1, sizeof(*tlb_entry));
77 78 79 80 81
    }
}

void tlb_flush_page(CPUArchState *env, target_ulong addr)
{
82
    CPUState *cpu = ENV_GET_CPU(env);
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
    int i;
    int mmu_idx;

#if defined(DEBUG_TLB)
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
#endif
    /* Check if we need to flush due to large pages.  */
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
#if defined(DEBUG_TLB)
        printf("tlb_flush_page: forced full flush ("
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
               env->tlb_flush_addr, env->tlb_flush_mask);
#endif
        tlb_flush(env, 1);
        return;
    }
    /* must reset current TB so that interrupts cannot modify the
       links while we are modifying them */
101
    cpu->current_tb = NULL;
102 103 104 105 106 107 108

    addr &= TARGET_PAGE_MASK;
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
    }

109
    tb_flush_jmp_cache(cpu, addr);
110 111 112 113 114 115
}

/* update the TLBs so that writes to code in the virtual page 'addr'
   can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
116
    cpu_physical_memory_reset_dirty(ram_addr, TARGET_PAGE_SIZE,
117
                                    DIRTY_MEMORY_CODE);
118 119 120 121
}

/* update the TLB so that writes in physical page 'phys_addr' are no longer
   tested for self modifying code */
122
void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
123 124
                             target_ulong vaddr)
{
125
    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
}

static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
{
    return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
}

void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
                           uintptr_t length)
{
    uintptr_t addr;

    if (tlb_is_dirty_ram(tlb_entry)) {
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
        if ((addr - start) < length) {
            tlb_entry->addr_write |= TLB_NOTDIRTY;
        }
    }
}

146 147 148 149
static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
{
    ram_addr_t ram_addr;

150
    if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
151 152 153 154 155 156
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
        abort();
    }
    return ram_addr;
}

157 158
void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
{
159
    CPUState *cpu;
160 161
    CPUArchState *env;

A
Andreas Färber 已提交
162
    CPU_FOREACH(cpu) {
163 164
        int mmu_idx;

165
        env = cpu->env_ptr;
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
            unsigned int i;

            for (i = 0; i < CPU_TLB_SIZE; i++) {
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
                                      start1, length);
            }
        }
    }
}

static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
{
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
        tlb_entry->addr_write = vaddr;
    }
}

/* update the TLB corresponding to virtual page vaddr
   so that it is no longer dirty */
void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
{
    int i;
    int mmu_idx;

    vaddr &= TARGET_PAGE_MASK;
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
    }
}

/* Our TLB does not support large pages, so remember the area covered by
   large pages and trigger a full TLB flush if these are invalidated.  */
static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
                               target_ulong size)
{
    target_ulong mask = ~(size - 1);

    if (env->tlb_flush_addr == (target_ulong)-1) {
        env->tlb_flush_addr = vaddr & mask;
        env->tlb_flush_mask = mask;
        return;
    }
    /* Extend the existing region to include the new page.
       This is a compromise between unnecessary flushes and the cost
       of maintaining a full variable size TLB.  */
    mask &= env->tlb_flush_mask;
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
        mask <<= 1;
    }
    env->tlb_flush_addr &= mask;
    env->tlb_flush_mask = mask;
}

/* Add a new TLB entry. At most one entry for a given virtual address
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
   supplied size is only used by tlb_flush_page.  */
void tlb_set_page(CPUArchState *env, target_ulong vaddr,
A
Avi Kivity 已提交
225
                  hwaddr paddr, int prot,
226 227 228 229 230 231 232 233
                  int mmu_idx, target_ulong size)
{
    MemoryRegionSection *section;
    unsigned int index;
    target_ulong address;
    target_ulong code_address;
    uintptr_t addend;
    CPUTLBEntry *te;
234
    hwaddr iotlb, xlat, sz;
235
    CPUState *cpu = ENV_GET_CPU(env);
236 237 238 239 240

    assert(size >= TARGET_PAGE_SIZE);
    if (size != TARGET_PAGE_SIZE) {
        tlb_add_large_page(env, vaddr, size);
    }
241 242

    sz = size;
243
    section = address_space_translate_for_iotlb(cpu->as, paddr,
244
                                                &xlat, &sz);
245 246
    assert(sz >= TARGET_PAGE_SIZE);

247 248
#if defined(DEBUG_TLB)
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
H
Hervé Poussineau 已提交
249 250
           " prot=%x idx=%d\n",
           vaddr, paddr, prot, mmu_idx);
251 252 253
#endif

    address = vaddr;
P
Paolo Bonzini 已提交
254 255
    if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
        /* IO memory case */
256
        address |= TLB_MMIO;
P
Paolo Bonzini 已提交
257 258 259
        addend = 0;
    } else {
        /* TLB_MMIO for rom/romd handled below */
260
        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
261 262 263
    }

    code_address = address;
264
    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
265
                                            prot, &address);
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
    te = &env->tlb_table[mmu_idx][index];
    te->addend = addend - vaddr;
    if (prot & PAGE_READ) {
        te->addr_read = address;
    } else {
        te->addr_read = -1;
    }

    if (prot & PAGE_EXEC) {
        te->addr_code = code_address;
    } else {
        te->addr_code = -1;
    }
    if (prot & PAGE_WRITE) {
        if ((memory_region_is_ram(section->mr) && section->readonly)
284
            || memory_region_is_romd(section->mr)) {
285 286 287
            /* Write access calls the I/O callback.  */
            te->addr_write = address | TLB_MMIO;
        } else if (memory_region_is_ram(section->mr)
288 289
                   && cpu_physical_memory_is_clean(section->mr->ram_addr
                                                   + xlat)) {
290 291 292 293 294 295 296 297 298 299 300
            te->addr_write = address | TLB_NOTDIRTY;
        } else {
            te->addr_write = address;
        }
    } else {
        te->addr_write = -1;
    }
}

/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
301 302 303
 * is actually a ram_addr_t (in system mode; the user mode emulation
 * version of this function returns a guest virtual address).
 */
304 305 306 307 308
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{
    int mmu_idx, page_index, pd;
    void *p;
    MemoryRegion *mr;
309
    CPUState *cpu = ENV_GET_CPU(env1);
310 311 312 313 314 315 316 317

    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    mmu_idx = cpu_mmu_index(env1);
    if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
                 (addr & TARGET_PAGE_MASK))) {
        cpu_ldub_code(env1, addr);
    }
    pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
318
    mr = iotlb_to_region(cpu->as, pd);
319
    if (memory_region_is_unassigned(mr)) {
320 321 322 323 324 325 326 327
        CPUClass *cc = CPU_GET_CLASS(cpu);

        if (cc->do_unassigned_access) {
            cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
        } else {
            cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x"
                      TARGET_FMT_lx "\n", addr);
        }
328 329 330 331 332 333 334 335 336 337 338
    }
    p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
    return qemu_ram_addr_from_host_nofail(p);
}

#define MMUSUFFIX _cmmu
#undef GETPC
#define GETPC() ((uintptr_t)0)
#define SOFTMMU_CODE_ACCESS

#define SHIFT 0
339
#include "exec/softmmu_template.h"
340 341

#define SHIFT 1
342
#include "exec/softmmu_template.h"
343 344

#define SHIFT 2
345
#include "exec/softmmu_template.h"
346 347

#define SHIFT 3
348
#include "exec/softmmu_template.h"
349 350

#undef env