cputlb.c 14.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  Common CPU TLB handling
 *
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */

P
Peter Maydell 已提交
20
#include "qemu/osdep.h"
21
#include "cpu.h"
22 23 24
#include "exec/exec-all.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
P
Paolo Bonzini 已提交
25
#include "exec/cpu_ldst.h"
26

27
#include "exec/cputlb.h"
28

29
#include "exec/memory-internal.h"
30
#include "exec/ram_addr.h"
31
#include "tcg/tcg.h"
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
/* #define DEBUG_TLB_LOG */

#ifdef DEBUG_TLB
# define DEBUG_TLB_GATE 1
# ifdef DEBUG_TLB_LOG
#  define DEBUG_TLB_LOG_GATE 1
# else
#  define DEBUG_TLB_LOG_GATE 0
# endif
#else
# define DEBUG_TLB_GATE 0
# define DEBUG_TLB_LOG_GATE 0
#endif

#define tlb_debug(fmt, ...) do { \
    if (DEBUG_TLB_LOG_GATE) { \
        qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
                      ## __VA_ARGS__); \
    } else if (DEBUG_TLB_GATE) { \
        fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
    } \
} while (0)
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72

/* statistics */
int tlb_flush_count;

/* NOTE:
 * If flush_global is true (the usual case), flush all tlb entries.
 * If flush_global is false, flush (at least) all tlb entries not
 * marked global.
 *
 * Since QEMU doesn't currently implement a global/not-global flag
 * for tlb entries, at the moment tlb_flush() will also flush all
 * tlb entries in the flush_global == false case. This is OK because
 * CPU architectures generally permit an implementation to drop
 * entries from the TLB at any time, so flushing more entries than
 * required is only an efficiency issue, not a correctness issue.
 */
73
void tlb_flush(CPUState *cpu, int flush_global)
74
{
75
    CPUArchState *env = cpu->env_ptr;
76

77 78
    tlb_debug("(%d)\n", flush_global);

79
    memset(env->tlb_table, -1, sizeof(env->tlb_table));
80
    memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
81
    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
82

83
    env->vtlb_index = 0;
84 85 86 87 88
    env->tlb_flush_addr = -1;
    env->tlb_flush_mask = 0;
    tlb_flush_count++;
}

89 90 91 92
static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
{
    CPUArchState *env = cpu->env_ptr;

93
    tlb_debug("start\n");
94 95 96 97 98 99 100 101

    for (;;) {
        int mmu_idx = va_arg(argp, int);

        if (mmu_idx < 0) {
            break;
        }

102
        tlb_debug("%d\n", mmu_idx);
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

        memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
        memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
    }

    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
}

void tlb_flush_by_mmuidx(CPUState *cpu, ...)
{
    va_list argp;
    va_start(argp, cpu);
    v_tlb_flush_by_mmuidx(cpu, argp);
    va_end(argp);
}

119 120 121 122 123 124 125 126
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
    if (addr == (tlb_entry->addr_read &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_write &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_code &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
127
        memset(tlb_entry, -1, sizeof(*tlb_entry));
128 129 130
    }
}

131
void tlb_flush_page(CPUState *cpu, target_ulong addr)
132
{
133
    CPUArchState *env = cpu->env_ptr;
134 135 136
    int i;
    int mmu_idx;

137 138
    tlb_debug("page :" TARGET_FMT_lx "\n", addr);

139 140
    /* Check if we need to flush due to large pages.  */
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
141 142 143 144
        tlb_debug("forcing full flush ("
                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
                  env->tlb_flush_addr, env->tlb_flush_mask);

145
        tlb_flush(cpu, 1);
146 147 148 149 150 151 152 153 154
        return;
    }

    addr &= TARGET_PAGE_MASK;
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
    }

155 156 157 158 159 160 161 162
    /* check whether there are entries that need to be flushed in the vtlb */
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        int k;
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
        }
    }

163
    tb_flush_jmp_cache(cpu, addr);
164 165
}

166 167 168 169 170 171 172 173
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
{
    CPUArchState *env = cpu->env_ptr;
    int i, k;
    va_list argp;

    va_start(argp, addr);

174 175
    tlb_debug("addr "TARGET_FMT_lx"\n", addr);

176 177
    /* Check if we need to flush due to large pages.  */
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
178 179 180 181
        tlb_debug("forced full flush ("
                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
                  env->tlb_flush_addr, env->tlb_flush_mask);

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
        v_tlb_flush_by_mmuidx(cpu, argp);
        va_end(argp);
        return;
    }

    addr &= TARGET_PAGE_MASK;
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);

    for (;;) {
        int mmu_idx = va_arg(argp, int);

        if (mmu_idx < 0) {
            break;
        }

197
        tlb_debug("idx %d\n", mmu_idx);
198 199 200 201 202 203 204 205 206 207 208 209 210

        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);

        /* check whether there are vltb entries that need to be flushed */
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
        }
    }
    va_end(argp);

    tb_flush_jmp_cache(cpu, addr);
}

211 212 213 214
/* update the TLBs so that writes to code in the virtual page 'addr'
   can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
215 216
    cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
                                             DIRTY_MEMORY_CODE);
217 218 219 220
}

/* update the TLB so that writes in physical page 'phys_addr' are no longer
   tested for self modifying code */
221
void tlb_unprotect_code(ram_addr_t ram_addr)
222
{
223
    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
}

static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
{
    return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
}

void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
                           uintptr_t length)
{
    uintptr_t addr;

    if (tlb_is_dirty_ram(tlb_entry)) {
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
        if ((addr - start) < length) {
            tlb_entry->addr_write |= TLB_NOTDIRTY;
        }
    }
}

244 245 246 247
static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
{
    ram_addr_t ram_addr;

248
    if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
249 250 251 252 253 254
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
        abort();
    }
    return ram_addr;
}

255
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
256 257 258
{
    CPUArchState *env;

259
    int mmu_idx;
260

261 262 263
    env = cpu->env_ptr;
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        unsigned int i;
264

265 266 267 268
        for (i = 0; i < CPU_TLB_SIZE; i++) {
            tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
                                  start1, length);
        }
269

270 271 272
        for (i = 0; i < CPU_VTLB_SIZE; i++) {
            tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
                                  start1, length);
273 274 275 276 277 278 279 280 281 282 283 284 285
        }
    }
}

static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
{
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
        tlb_entry->addr_write = vaddr;
    }
}

/* update the TLB corresponding to virtual page vaddr
   so that it is no longer dirty */
286
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
287
{
288
    CPUArchState *env = cpu->env_ptr;
289 290 291 292 293 294 295 296
    int i;
    int mmu_idx;

    vaddr &= TARGET_PAGE_MASK;
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
    }
297 298 299 300 301 302 303

    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        int k;
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
        }
    }
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
}

/* Our TLB does not support large pages, so remember the area covered by
   large pages and trigger a full TLB flush if these are invalidated.  */
static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
                               target_ulong size)
{
    target_ulong mask = ~(size - 1);

    if (env->tlb_flush_addr == (target_ulong)-1) {
        env->tlb_flush_addr = vaddr & mask;
        env->tlb_flush_mask = mask;
        return;
    }
    /* Extend the existing region to include the new page.
       This is a compromise between unnecessary flushes and the cost
       of maintaining a full variable size TLB.  */
    mask &= env->tlb_flush_mask;
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
        mask <<= 1;
    }
    env->tlb_flush_addr &= mask;
    env->tlb_flush_mask = mask;
}

/* Add a new TLB entry. At most one entry for a given virtual address
330 331 332 333 334 335
 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
 * supplied size is only used by tlb_flush_page.
 *
 * Called from TCG-generated code, which is under an RCU read-side
 * critical section.
 */
P
Peter Maydell 已提交
336 337 338
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
                             hwaddr paddr, MemTxAttrs attrs, int prot,
                             int mmu_idx, target_ulong size)
339
{
340
    CPUArchState *env = cpu->env_ptr;
341 342 343 344 345 346
    MemoryRegionSection *section;
    unsigned int index;
    target_ulong address;
    target_ulong code_address;
    uintptr_t addend;
    CPUTLBEntry *te;
347
    hwaddr iotlb, xlat, sz;
348
    unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
349
    int asidx = cpu_asidx_from_attrs(cpu, attrs);
350 351 352 353 354

    assert(size >= TARGET_PAGE_SIZE);
    if (size != TARGET_PAGE_SIZE) {
        tlb_add_large_page(env, vaddr, size);
    }
355 356

    sz = size;
357
    section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
358 359
    assert(sz >= TARGET_PAGE_SIZE);

360 361 362
    tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
              " prot=%x idx=%d\n",
              vaddr, paddr, prot, mmu_idx);
363 364

    address = vaddr;
P
Paolo Bonzini 已提交
365 366
    if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
        /* IO memory case */
367
        address |= TLB_MMIO;
P
Paolo Bonzini 已提交
368 369 370
        addend = 0;
    } else {
        /* TLB_MMIO for rom/romd handled below */
371
        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
372 373 374
    }

    code_address = address;
375
    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
376
                                            prot, &address);
377 378 379

    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    te = &env->tlb_table[mmu_idx][index];
380 381 382 383 384 385

    /* do not discard the translation in te, evict it into a victim tlb */
    env->tlb_v_table[mmu_idx][vidx] = *te;
    env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];

    /* refill the tlb */
386
    env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
P
Peter Maydell 已提交
387
    env->iotlb[mmu_idx][index].attrs = attrs;
388 389 390 391 392 393 394 395 396 397 398 399 400 401
    te->addend = addend - vaddr;
    if (prot & PAGE_READ) {
        te->addr_read = address;
    } else {
        te->addr_read = -1;
    }

    if (prot & PAGE_EXEC) {
        te->addr_code = code_address;
    } else {
        te->addr_code = -1;
    }
    if (prot & PAGE_WRITE) {
        if ((memory_region_is_ram(section->mr) && section->readonly)
402
            || memory_region_is_romd(section->mr)) {
403 404 405
            /* Write access calls the I/O callback.  */
            te->addr_write = address | TLB_MMIO;
        } else if (memory_region_is_ram(section->mr)
F
Fam Zheng 已提交
406 407
                   && cpu_physical_memory_is_clean(
                        memory_region_get_ram_addr(section->mr) + xlat)) {
408 409 410 411 412 413 414 415 416
            te->addr_write = address | TLB_NOTDIRTY;
        } else {
            te->addr_write = address;
        }
    } else {
        te->addr_write = -1;
    }
}

P
Peter Maydell 已提交
417 418 419 420 421 422 423 424 425 426 427
/* Add a new TLB entry, but without specifying the memory
 * transaction attributes to be used.
 */
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
                  hwaddr paddr, int prot,
                  int mmu_idx, target_ulong size)
{
    tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
                            prot, mmu_idx, size);
}

428 429
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
430 431 432
 * is actually a ram_addr_t (in system mode; the user mode emulation
 * version of this function returns a guest virtual address).
 */
433 434 435 436 437
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{
    int mmu_idx, page_index, pd;
    void *p;
    MemoryRegion *mr;
438
    CPUState *cpu = ENV_GET_CPU(env1);
439
    CPUIOTLBEntry *iotlbentry;
440 441

    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
442
    mmu_idx = cpu_mmu_index(env1, true);
443 444 445 446
    if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
                 (addr & TARGET_PAGE_MASK))) {
        cpu_ldub_code(env1, addr);
    }
447 448 449
    iotlbentry = &env1->iotlb[mmu_idx][page_index];
    pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
    mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
450
    if (memory_region_is_unassigned(mr)) {
451 452 453 454 455
        CPUClass *cc = CPU_GET_CLASS(cpu);

        if (cc->do_unassigned_access) {
            cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
        } else {
456
            cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
457 458
                      TARGET_FMT_lx "\n", addr);
        }
459 460 461 462 463
    }
    p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
    return qemu_ram_addr_from_host_nofail(p);
}

464 465 466
#define MMUSUFFIX _mmu

#define SHIFT 0
467
#include "softmmu_template.h"
468 469

#define SHIFT 1
470
#include "softmmu_template.h"
471 472

#define SHIFT 2
473
#include "softmmu_template.h"
474 475

#define SHIFT 3
476
#include "softmmu_template.h"
477 478
#undef MMUSUFFIX

479
#define MMUSUFFIX _cmmu
480 481 482 483
#undef GETPC_ADJ
#define GETPC_ADJ 0
#undef GETRA
#define GETRA() ((uintptr_t)0)
484 485 486
#define SOFTMMU_CODE_ACCESS

#define SHIFT 0
487
#include "softmmu_template.h"
488 489

#define SHIFT 1
490
#include "softmmu_template.h"
491 492

#define SHIFT 2
493
#include "softmmu_template.h"
494 495

#define SHIFT 3
496
#include "softmmu_template.h"