cputlb.c 18.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  Common CPU TLB handling
 *
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */

P
Peter Maydell 已提交
20
#include "qemu/osdep.h"
21
#include "cpu.h"
22 23 24
#include "exec/exec-all.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
P
Paolo Bonzini 已提交
25
#include "exec/cpu_ldst.h"
26

27
#include "exec/cputlb.h"
28

29
#include "exec/memory-internal.h"
30
#include "exec/ram_addr.h"
31
#include "exec/exec-all.h"
32
#include "tcg/tcg.h"
33 34
#include "qemu/error-report.h"
#include "exec/log.h"
35

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
/* #define DEBUG_TLB_LOG */

#ifdef DEBUG_TLB
# define DEBUG_TLB_GATE 1
# ifdef DEBUG_TLB_LOG
#  define DEBUG_TLB_LOG_GATE 1
# else
#  define DEBUG_TLB_LOG_GATE 0
# endif
#else
# define DEBUG_TLB_GATE 0
# define DEBUG_TLB_LOG_GATE 0
#endif

#define tlb_debug(fmt, ...) do { \
    if (DEBUG_TLB_LOG_GATE) { \
        qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
                      ## __VA_ARGS__); \
    } else if (DEBUG_TLB_GATE) { \
        fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
    } \
} while (0)
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

/* statistics */
int tlb_flush_count;

/* NOTE:
 * If flush_global is true (the usual case), flush all tlb entries.
 * If flush_global is false, flush (at least) all tlb entries not
 * marked global.
 *
 * Since QEMU doesn't currently implement a global/not-global flag
 * for tlb entries, at the moment tlb_flush() will also flush all
 * tlb entries in the flush_global == false case. This is OK because
 * CPU architectures generally permit an implementation to drop
 * entries from the TLB at any time, so flushing more entries than
 * required is only an efficiency issue, not a correctness issue.
 */
76
void tlb_flush(CPUState *cpu, int flush_global)
77
{
78
    CPUArchState *env = cpu->env_ptr;
79

80 81
    tlb_debug("(%d)\n", flush_global);

82
    memset(env->tlb_table, -1, sizeof(env->tlb_table));
83
    memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
84
    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
85

86
    env->vtlb_index = 0;
87 88 89 90 91
    env->tlb_flush_addr = -1;
    env->tlb_flush_mask = 0;
    tlb_flush_count++;
}

92 93 94 95
static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
{
    CPUArchState *env = cpu->env_ptr;

96
    tlb_debug("start\n");
97 98 99 100 101 102 103 104

    for (;;) {
        int mmu_idx = va_arg(argp, int);

        if (mmu_idx < 0) {
            break;
        }

105
        tlb_debug("%d\n", mmu_idx);
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121

        memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
        memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
    }

    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
}

void tlb_flush_by_mmuidx(CPUState *cpu, ...)
{
    va_list argp;
    va_start(argp, cpu);
    v_tlb_flush_by_mmuidx(cpu, argp);
    va_end(argp);
}

122 123 124 125 126 127 128 129
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
{
    if (addr == (tlb_entry->addr_read &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_write &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
        addr == (tlb_entry->addr_code &
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
130
        memset(tlb_entry, -1, sizeof(*tlb_entry));
131 132 133
    }
}

134
void tlb_flush_page(CPUState *cpu, target_ulong addr)
135
{
136
    CPUArchState *env = cpu->env_ptr;
137 138 139
    int i;
    int mmu_idx;

140 141
    tlb_debug("page :" TARGET_FMT_lx "\n", addr);

142 143
    /* Check if we need to flush due to large pages.  */
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
144 145 146 147
        tlb_debug("forcing full flush ("
                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
                  env->tlb_flush_addr, env->tlb_flush_mask);

148
        tlb_flush(cpu, 1);
149 150 151 152 153 154 155 156 157
        return;
    }

    addr &= TARGET_PAGE_MASK;
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
    }

158 159 160 161 162 163 164 165
    /* check whether there are entries that need to be flushed in the vtlb */
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        int k;
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
        }
    }

166
    tb_flush_jmp_cache(cpu, addr);
167 168
}

169 170 171 172 173 174 175 176
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
{
    CPUArchState *env = cpu->env_ptr;
    int i, k;
    va_list argp;

    va_start(argp, addr);

177 178
    tlb_debug("addr "TARGET_FMT_lx"\n", addr);

179 180
    /* Check if we need to flush due to large pages.  */
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
181 182 183 184
        tlb_debug("forced full flush ("
                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
                  env->tlb_flush_addr, env->tlb_flush_mask);

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
        v_tlb_flush_by_mmuidx(cpu, argp);
        va_end(argp);
        return;
    }

    addr &= TARGET_PAGE_MASK;
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);

    for (;;) {
        int mmu_idx = va_arg(argp, int);

        if (mmu_idx < 0) {
            break;
        }

200
        tlb_debug("idx %d\n", mmu_idx);
201 202 203 204 205 206 207 208 209 210 211 212 213

        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);

        /* check whether there are vltb entries that need to be flushed */
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
        }
    }
    va_end(argp);

    tb_flush_jmp_cache(cpu, addr);
}

214 215 216 217
/* update the TLBs so that writes to code in the virtual page 'addr'
   can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
218 219
    cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
                                             DIRTY_MEMORY_CODE);
220 221 222 223
}

/* update the TLB so that writes in physical page 'phys_addr' are no longer
   tested for self modifying code */
224
void tlb_unprotect_code(ram_addr_t ram_addr)
225
{
226
    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
}

static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
{
    return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
}

void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
                           uintptr_t length)
{
    uintptr_t addr;

    if (tlb_is_dirty_ram(tlb_entry)) {
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
        if ((addr - start) < length) {
            tlb_entry->addr_write |= TLB_NOTDIRTY;
        }
    }
}

247 248 249 250
static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
{
    ram_addr_t ram_addr;

251 252
    ram_addr = qemu_ram_addr_from_host(ptr);
    if (ram_addr == RAM_ADDR_INVALID) {
253 254 255 256 257 258
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
        abort();
    }
    return ram_addr;
}

259
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
260 261 262
{
    CPUArchState *env;

263
    int mmu_idx;
264

265 266 267
    env = cpu->env_ptr;
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        unsigned int i;
268

269 270 271 272
        for (i = 0; i < CPU_TLB_SIZE; i++) {
            tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
                                  start1, length);
        }
273

274 275 276
        for (i = 0; i < CPU_VTLB_SIZE; i++) {
            tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
                                  start1, length);
277 278 279 280 281 282 283 284 285 286 287 288 289
        }
    }
}

static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
{
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
        tlb_entry->addr_write = vaddr;
    }
}

/* update the TLB corresponding to virtual page vaddr
   so that it is no longer dirty */
290
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
291
{
292
    CPUArchState *env = cpu->env_ptr;
293 294 295 296 297 298 299 300
    int i;
    int mmu_idx;

    vaddr &= TARGET_PAGE_MASK;
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
    }
301 302 303 304 305 306 307

    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
        int k;
        for (k = 0; k < CPU_VTLB_SIZE; k++) {
            tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
        }
    }
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
}

/* Our TLB does not support large pages, so remember the area covered by
   large pages and trigger a full TLB flush if these are invalidated.  */
static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
                               target_ulong size)
{
    target_ulong mask = ~(size - 1);

    if (env->tlb_flush_addr == (target_ulong)-1) {
        env->tlb_flush_addr = vaddr & mask;
        env->tlb_flush_mask = mask;
        return;
    }
    /* Extend the existing region to include the new page.
       This is a compromise between unnecessary flushes and the cost
       of maintaining a full variable size TLB.  */
    mask &= env->tlb_flush_mask;
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
        mask <<= 1;
    }
    env->tlb_flush_addr &= mask;
    env->tlb_flush_mask = mask;
}

/* Add a new TLB entry. At most one entry for a given virtual address
334 335 336 337 338 339
 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
 * supplied size is only used by tlb_flush_page.
 *
 * Called from TCG-generated code, which is under an RCU read-side
 * critical section.
 */
P
Peter Maydell 已提交
340 341 342
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
                             hwaddr paddr, MemTxAttrs attrs, int prot,
                             int mmu_idx, target_ulong size)
343
{
344
    CPUArchState *env = cpu->env_ptr;
345 346 347 348 349 350
    MemoryRegionSection *section;
    unsigned int index;
    target_ulong address;
    target_ulong code_address;
    uintptr_t addend;
    CPUTLBEntry *te;
351
    hwaddr iotlb, xlat, sz;
352
    unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
353
    int asidx = cpu_asidx_from_attrs(cpu, attrs);
354 355 356 357 358

    assert(size >= TARGET_PAGE_SIZE);
    if (size != TARGET_PAGE_SIZE) {
        tlb_add_large_page(env, vaddr, size);
    }
359 360

    sz = size;
361
    section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
362 363
    assert(sz >= TARGET_PAGE_SIZE);

364 365 366
    tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
              " prot=%x idx=%d\n",
              vaddr, paddr, prot, mmu_idx);
367 368

    address = vaddr;
P
Paolo Bonzini 已提交
369 370
    if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
        /* IO memory case */
371
        address |= TLB_MMIO;
P
Paolo Bonzini 已提交
372 373 374
        addend = 0;
    } else {
        /* TLB_MMIO for rom/romd handled below */
375
        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
376 377 378
    }

    code_address = address;
379
    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
380
                                            prot, &address);
381 382 383

    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    te = &env->tlb_table[mmu_idx][index];
384 385 386 387 388 389

    /* do not discard the translation in te, evict it into a victim tlb */
    env->tlb_v_table[mmu_idx][vidx] = *te;
    env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];

    /* refill the tlb */
390
    env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
P
Peter Maydell 已提交
391
    env->iotlb[mmu_idx][index].attrs = attrs;
392 393 394 395 396 397 398 399 400 401 402 403 404 405
    te->addend = addend - vaddr;
    if (prot & PAGE_READ) {
        te->addr_read = address;
    } else {
        te->addr_read = -1;
    }

    if (prot & PAGE_EXEC) {
        te->addr_code = code_address;
    } else {
        te->addr_code = -1;
    }
    if (prot & PAGE_WRITE) {
        if ((memory_region_is_ram(section->mr) && section->readonly)
406
            || memory_region_is_romd(section->mr)) {
407 408 409
            /* Write access calls the I/O callback.  */
            te->addr_write = address | TLB_MMIO;
        } else if (memory_region_is_ram(section->mr)
F
Fam Zheng 已提交
410 411
                   && cpu_physical_memory_is_clean(
                        memory_region_get_ram_addr(section->mr) + xlat)) {
412 413 414 415 416 417 418 419 420
            te->addr_write = address | TLB_NOTDIRTY;
        } else {
            te->addr_write = address;
        }
    } else {
        te->addr_write = -1;
    }
}

P
Peter Maydell 已提交
421 422 423 424 425 426 427 428 429 430 431
/* Add a new TLB entry, but without specifying the memory
 * transaction attributes to be used.
 */
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
                  hwaddr paddr, int prot,
                  int mmu_idx, target_ulong size)
{
    tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
                            prot, mmu_idx, size);
}

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
static void report_bad_exec(CPUState *cpu, target_ulong addr)
{
    /* Accidentally executing outside RAM or ROM is quite common for
     * several user-error situations, so report it in a way that
     * makes it clear that this isn't a QEMU bug and provide suggestions
     * about what a user could do to fix things.
     */
    error_report("Trying to execute code outside RAM or ROM at 0x"
                 TARGET_FMT_lx, addr);
    error_printf("This usually means one of the following happened:\n\n"
                 "(1) You told QEMU to execute a kernel for the wrong machine "
                 "type, and it crashed on startup (eg trying to run a "
                 "raspberry pi kernel on a versatilepb QEMU machine)\n"
                 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
                 "and QEMU executed a ROM full of no-op instructions until "
                 "it fell off the end\n"
                 "(3) Your guest kernel has a bug and crashed by jumping "
                 "off into nowhere\n\n"
                 "This is almost always one of the first two, so check your "
                 "command line and that you are using the right type of kernel "
                 "for this machine.\n"
                 "If you think option (3) is likely then you can try debugging "
                 "your guest with the -d debug options; in particular "
                 "-d guest_errors will cause the log to include a dump of the "
                 "guest register state at this point.\n\n"
                 "Execution cannot continue; stopping here.\n\n");

    /* Report also to the logs, with more detail including register dump */
    qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
                  "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
    log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
}

465 466
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
467 468 469
 * is actually a ram_addr_t (in system mode; the user mode emulation
 * version of this function returns a guest virtual address).
 */
470 471 472 473 474
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{
    int mmu_idx, page_index, pd;
    void *p;
    MemoryRegion *mr;
475
    CPUState *cpu = ENV_GET_CPU(env1);
476
    CPUIOTLBEntry *iotlbentry;
477 478

    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
479
    mmu_idx = cpu_mmu_index(env1, true);
480 481 482 483
    if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
                 (addr & TARGET_PAGE_MASK))) {
        cpu_ldub_code(env1, addr);
    }
484 485 486
    iotlbentry = &env1->iotlb[mmu_idx][page_index];
    pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
    mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
487
    if (memory_region_is_unassigned(mr)) {
488 489 490 491 492
        CPUClass *cc = CPU_GET_CLASS(cpu);

        if (cc->do_unassigned_access) {
            cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
        } else {
493 494
            report_bad_exec(cpu, addr);
            exit(1);
495
        }
496 497 498 499 500
    }
    p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
    return qemu_ram_addr_from_host_nofail(p);
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
/* Return true if ADDR is present in the victim tlb, and has been copied
   back to the main tlb.  */
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
                           size_t elt_ofs, target_ulong page)
{
    size_t vidx;
    for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
        CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
        target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);

        if (cmp == page) {
            /* Found entry in victim tlb, swap tlb and iotlb.  */
            CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
            CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
            CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];

            tmptlb = *tlb; *tlb = *vtlb; *vtlb = tmptlb;
            tmpio = *io; *io = *vio; *vio = tmpio;
            return true;
        }
    }
    return false;
}

/* Macro to call the above, with local variables from the use context.  */
526
#define VICTIM_TLB_HIT(TY, ADDR) \
527
  victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
528
                 (ADDR) & TARGET_PAGE_MASK)
529

530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
/* Probe for whether the specified guest write access is permitted.
 * If it is not permitted then an exception will be taken in the same
 * way as if this were a real write access (and we will not return).
 * Otherwise the function will return, and there will be a valid
 * entry in the TLB for this access.
 */
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
                 uintptr_t retaddr)
{
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;

    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
        /* TLB entry is for a different page */
        if (!VICTIM_TLB_HIT(addr_write, addr)) {
            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
        }
    }
}

551 552
#define MMUSUFFIX _mmu

553
#define DATA_SIZE 1
554
#include "softmmu_template.h"
555

556
#define DATA_SIZE 2
557
#include "softmmu_template.h"
558

559
#define DATA_SIZE 4
560
#include "softmmu_template.h"
561

562
#define DATA_SIZE 8
563
#include "softmmu_template.h"
564 565
#undef MMUSUFFIX

566
#define MMUSUFFIX _cmmu
R
Richard Henderson 已提交
567 568
#undef GETPC
#define GETPC() ((uintptr_t)0)
569 570
#define SOFTMMU_CODE_ACCESS

571
#define DATA_SIZE 1
572
#include "softmmu_template.h"
573

574
#define DATA_SIZE 2
575
#include "softmmu_template.h"
576

577
#define DATA_SIZE 4
578
#include "softmmu_template.h"
579

580
#define DATA_SIZE 8
581
#include "softmmu_template.h"