cputlb.c 14.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  Common CPU TLB handling
 *
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */

#include "config.h"
#include "cpu.h"
22 23 24
#include "exec/exec-all.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
P
Paolo Bonzini 已提交
25
#include "exec/cpu_ldst.h"
26

27
#include "exec/cputlb.h"
28

29
#include "exec/memory-internal.h"
30
#include "exec/ram_addr.h"
31
#include "tcg/tcg.h"
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50

//#define DEBUG_TLB
//#define DEBUG_TLB_CHECK

/* statistics */
int tlb_flush_count;

/* NOTE:
 * If flush_global is true (the usual case), flush all tlb entries.
 * If flush_global is false, flush (at least) all tlb entries not
 * marked global.
 *
 * Since QEMU doesn't currently implement a global/not-global flag
 * for tlb entries, at the moment tlb_flush() will also flush all
 * tlb entries in the flush_global == false case. This is OK because
 * CPU architectures generally permit an implementation to drop
 * entries from the TLB at any time, so flushing more entries than
 * required is only an efficiency issue, not a correctness issue.
 */
51
void tlb_flush(CPUState *cpu, int flush_global)
52
{
53
    CPUArchState *env = cpu->env_ptr;
54 55 56 57 58 59

#if defined(DEBUG_TLB)
    printf("tlb_flush:\n");
#endif
    /* must reset current TB so that interrupts cannot modify the
       links while we are modifying them */
60
    cpu->current_tb = NULL;
61

62
    memset(env->tlb_table, -1, sizeof(env->tlb_table));
63
    memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
64
    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
65

66
    env->vtlb_index = 0;
67 68 69 70 71
    env->tlb_flush_addr = -1;
    env->tlb_flush_mask = 0;
    tlb_flush_count++;
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
{
    CPUArchState *env = cpu->env_ptr;

#if defined(DEBUG_TLB)
    printf("tlb_flush_by_mmuidx:");
#endif
    /* must reset current TB so that interrupts cannot modify the
       links while we are modifying them */
    cpu->current_tb = NULL;

    for (;;) {
        int mmu_idx = va_arg(argp, int);

        if (mmu_idx < 0) {
            break;
        }

#if defined(DEBUG_TLB)
        printf(" %d", mmu_idx);
#endif

        memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
        memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
    }

#if defined(DEBUG_TLB)
    printf("\n");
#endif

    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
}

void tlb_flush_by_mmuidx(CPUState *cpu, ...)
{
    va_list argp;
    va_start(argp, cpu);
    v_tlb_flush_by_mmuidx(cpu, argp);
    va_end(argp);
}

113 114 115 116 117 118 119 120
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
    if (addr == (tlb_entry->addr_read &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_write &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_code &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
121
        memset(tlb_entry, -1, sizeof(*tlb_entry));
122 123 124
    }
}

125
void tlb_flush_page(CPUState *cpu, target_ulong addr)
126
{
127
    CPUArchState *env = cpu->env_ptr;
128 129 130 131 132 133 134 135 136 137 138 139 140
    int i;
    int mmu_idx;

#if defined(DEBUG_TLB)
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
#endif
    /* Check if we need to flush due to large pages.  */
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
#if defined(DEBUG_TLB)
        printf("tlb_flush_page: forced full flush ("
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
               env->tlb_flush_addr, env->tlb_flush_mask);
#endif
141
        tlb_flush(cpu, 1);
142 143 144 145
        return;
    }
    /* must reset current TB so that interrupts cannot modify the
       links while we are modifying them */
146
    cpu->current_tb = NULL;
147 148 149 150 151 152 153

    addr &= TARGET_PAGE_MASK;
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
    }

154 155 156 157 158 159 160 161
    /* check whether there are entries that need to be flushed in the vtlb */
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        int k;
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
        }
    }

162
    tb_flush_jmp_cache(cpu, addr);
163 164
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
{
    CPUArchState *env = cpu->env_ptr;
    int i, k;
    va_list argp;

    va_start(argp, addr);

#if defined(DEBUG_TLB)
    printf("tlb_flush_page_by_mmu_idx: " TARGET_FMT_lx, addr);
#endif
    /* Check if we need to flush due to large pages.  */
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
#if defined(DEBUG_TLB)
        printf(" forced full flush ("
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
               env->tlb_flush_addr, env->tlb_flush_mask);
#endif
        v_tlb_flush_by_mmuidx(cpu, argp);
        va_end(argp);
        return;
    }
    /* must reset current TB so that interrupts cannot modify the
       links while we are modifying them */
    cpu->current_tb = NULL;

    addr &= TARGET_PAGE_MASK;
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);

    for (;;) {
        int mmu_idx = va_arg(argp, int);

        if (mmu_idx < 0) {
            break;
        }

#if defined(DEBUG_TLB)
        printf(" %d", mmu_idx);
#endif

        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);

        /* check whether there are vltb entries that need to be flushed */
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
        }
    }
    va_end(argp);

#if defined(DEBUG_TLB)
    printf("\n");
#endif

    tb_flush_jmp_cache(cpu, addr);
}

221 222 223 224
/* update the TLBs so that writes to code in the virtual page 'addr'
   can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
225 226
    cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
                                             DIRTY_MEMORY_CODE);
227 228 229 230
}

/* update the TLB so that writes in physical page 'phys_addr' are no longer
   tested for self modifying code */
231
void tlb_unprotect_code(ram_addr_t ram_addr)
232
{
233
    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
}

static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
{
    return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
}

void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
                           uintptr_t length)
{
    uintptr_t addr;

    if (tlb_is_dirty_ram(tlb_entry)) {
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
        if ((addr - start) < length) {
            tlb_entry->addr_write |= TLB_NOTDIRTY;
        }
    }
}

254 255 256 257
static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
{
    ram_addr_t ram_addr;

258
    if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
259 260 261 262 263 264
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
        abort();
    }
    return ram_addr;
}

265
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
266 267 268
{
    CPUArchState *env;

269
    int mmu_idx;
270

271 272 273
    env = cpu->env_ptr;
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        unsigned int i;
274

275 276 277 278
        for (i = 0; i < CPU_TLB_SIZE; i++) {
            tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
                                  start1, length);
        }
279

280 281 282
        for (i = 0; i < CPU_VTLB_SIZE; i++) {
            tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
                                  start1, length);
283 284 285 286 287 288 289 290 291 292 293 294 295
        }
    }
}

static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
{
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
        tlb_entry->addr_write = vaddr;
    }
}

/* update the TLB corresponding to virtual page vaddr
   so that it is no longer dirty */
296
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
297
{
298
    CPUArchState *env = cpu->env_ptr;
299 300 301 302 303 304 305 306
    int i;
    int mmu_idx;

    vaddr &= TARGET_PAGE_MASK;
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
    }
307 308 309 310 311 312 313

    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        int k;
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
        }
    }
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
}

/* Our TLB does not support large pages, so remember the area covered by
   large pages and trigger a full TLB flush if these are invalidated.  */
static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
                               target_ulong size)
{
    target_ulong mask = ~(size - 1);

    if (env->tlb_flush_addr == (target_ulong)-1) {
        env->tlb_flush_addr = vaddr & mask;
        env->tlb_flush_mask = mask;
        return;
    }
    /* Extend the existing region to include the new page.
       This is a compromise between unnecessary flushes and the cost
       of maintaining a full variable size TLB.  */
    mask &= env->tlb_flush_mask;
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
        mask <<= 1;
    }
    env->tlb_flush_addr &= mask;
    env->tlb_flush_mask = mask;
}

/* Add a new TLB entry. At most one entry for a given virtual address
340 341 342 343 344 345
 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
 * supplied size is only used by tlb_flush_page.
 *
 * Called from TCG-generated code, which is under an RCU read-side
 * critical section.
 */
P
Peter Maydell 已提交
346 347 348
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
                             hwaddr paddr, MemTxAttrs attrs, int prot,
                             int mmu_idx, target_ulong size)
349
{
350
    CPUArchState *env = cpu->env_ptr;
351 352 353 354 355 356
    MemoryRegionSection *section;
    unsigned int index;
    target_ulong address;
    target_ulong code_address;
    uintptr_t addend;
    CPUTLBEntry *te;
357
    hwaddr iotlb, xlat, sz;
358
    unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
359 360 361 362 363

    assert(size >= TARGET_PAGE_SIZE);
    if (size != TARGET_PAGE_SIZE) {
        tlb_add_large_page(env, vaddr, size);
    }
364 365

    sz = size;
P
Paolo Bonzini 已提交
366
    section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz);
367 368
    assert(sz >= TARGET_PAGE_SIZE);

369
#if defined(DEBUG_TLB)
370 371
    qemu_log_mask(CPU_LOG_MMU,
           "tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
H
Hervé Poussineau 已提交
372 373
           " prot=%x idx=%d\n",
           vaddr, paddr, prot, mmu_idx);
374 375 376
#endif

    address = vaddr;
P
Paolo Bonzini 已提交
377 378
    if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
        /* IO memory case */
379
        address |= TLB_MMIO;
P
Paolo Bonzini 已提交
380 381 382
        addend = 0;
    } else {
        /* TLB_MMIO for rom/romd handled below */
383
        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
384 385 386
    }

    code_address = address;
387
    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
388
                                            prot, &address);
389 390 391

    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    te = &env->tlb_table[mmu_idx][index];
392 393 394 395 396 397

    /* do not discard the translation in te, evict it into a victim tlb */
    env->tlb_v_table[mmu_idx][vidx] = *te;
    env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];

    /* refill the tlb */
398
    env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
P
Peter Maydell 已提交
399
    env->iotlb[mmu_idx][index].attrs = attrs;
400 401 402 403 404 405 406 407 408 409 410 411 412 413
    te->addend = addend - vaddr;
    if (prot & PAGE_READ) {
        te->addr_read = address;
    } else {
        te->addr_read = -1;
    }

    if (prot & PAGE_EXEC) {
        te->addr_code = code_address;
    } else {
        te->addr_code = -1;
    }
    if (prot & PAGE_WRITE) {
        if ((memory_region_is_ram(section->mr) && section->readonly)
414
            || memory_region_is_romd(section->mr)) {
415 416 417
            /* Write access calls the I/O callback.  */
            te->addr_write = address | TLB_MMIO;
        } else if (memory_region_is_ram(section->mr)
418 419
                   && cpu_physical_memory_is_clean(section->mr->ram_addr
                                                   + xlat)) {
420 421 422 423 424 425 426 427 428
            te->addr_write = address | TLB_NOTDIRTY;
        } else {
            te->addr_write = address;
        }
    } else {
        te->addr_write = -1;
    }
}

P
Peter Maydell 已提交
429 430 431 432 433 434 435 436 437 438 439
/* Add a new TLB entry, but without specifying the memory
 * transaction attributes to be used.
 */
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
                  hwaddr paddr, int prot,
                  int mmu_idx, target_ulong size)
{
    tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
                            prot, mmu_idx, size);
}

440 441
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
442 443 444
 * is actually a ram_addr_t (in system mode; the user mode emulation
 * version of this function returns a guest virtual address).
 */
445 446 447 448 449
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{
    int mmu_idx, page_index, pd;
    void *p;
    MemoryRegion *mr;
450
    CPUState *cpu = ENV_GET_CPU(env1);
451 452

    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
453
    mmu_idx = cpu_mmu_index(env1, true);
454 455 456 457
    if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
                 (addr & TARGET_PAGE_MASK))) {
        cpu_ldub_code(env1, addr);
    }
458
    pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK;
P
Paolo Bonzini 已提交
459
    mr = iotlb_to_region(cpu, pd);
460
    if (memory_region_is_unassigned(mr)) {
461 462 463 464 465
        CPUClass *cc = CPU_GET_CLASS(cpu);

        if (cc->do_unassigned_access) {
            cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
        } else {
466
            cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
467 468
                      TARGET_FMT_lx "\n", addr);
        }
469 470 471 472 473
    }
    p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
    return qemu_ram_addr_from_host_nofail(p);
}

474 475 476
#define MMUSUFFIX _mmu

#define SHIFT 0
477
#include "softmmu_template.h"
478 479

#define SHIFT 1
480
#include "softmmu_template.h"
481 482

#define SHIFT 2
483
#include "softmmu_template.h"
484 485

#define SHIFT 3
486
#include "softmmu_template.h"
487 488
#undef MMUSUFFIX

489
#define MMUSUFFIX _cmmu
490 491 492 493
#undef GETPC_ADJ
#define GETPC_ADJ 0
#undef GETRA
#define GETRA() ((uintptr_t)0)
494 495 496
#define SOFTMMU_CODE_ACCESS

#define SHIFT 0
497
#include "softmmu_template.h"
498 499

#define SHIFT 1
500
#include "softmmu_template.h"
501 502

#define SHIFT 2
503
#include "softmmu_template.h"
504 505

#define SHIFT 3
506
#include "softmmu_template.h"