exec.c 101.9 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  Virtual page mapping
3
 *
B
bellard 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
P
Peter Maydell 已提交
19
#include "qemu/osdep.h"
20
#include "qapi/error.h"
21
#ifndef _WIN32
B
bellard 已提交
22
#endif
B
bellard 已提交
23

24
#include "qemu/cutils.h"
B
bellard 已提交
25
#include "cpu.h"
26
#include "exec/exec-all.h"
27
#include "exec/target_page.h"
B
bellard 已提交
28
#include "tcg.h"
29
#include "hw/qdev-core.h"
30
#if !defined(CONFIG_USER_ONLY)
31
#include "hw/boards.h"
32
#include "hw/xen/xen.h"
33
#endif
34
#include "sysemu/kvm.h"
35
#include "sysemu/sysemu.h"
36 37
#include "qemu/timer.h"
#include "qemu/config-file.h"
38
#include "qemu/error-report.h"
39
#if defined(CONFIG_USER_ONLY)
40
#include "qemu.h"
J
Jun Nakajima 已提交
41
#else /* !CONFIG_USER_ONLY */
42 43
#include "hw/hw.h"
#include "exec/memory.h"
P
Paolo Bonzini 已提交
44
#include "exec/ioport.h"
45
#include "sysemu/dma.h"
46
#include "sysemu/numa.h"
47
#include "sysemu/hw_accel.h"
48
#include "exec/address-spaces.h"
49
#include "sysemu/xen-mapcache.h"
50
#include "trace-root.h"
51

52 53 54 55 56
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
#include <fcntl.h>
#include <linux/falloc.h>
#endif

57
#endif
58
#include "exec/cpu-all.h"
M
Mike Day 已提交
59
#include "qemu/rcu_queue.h"
60
#include "qemu/main-loop.h"
61
#include "translate-all.h"
62
#include "sysemu/replay.h"
63

64
#include "exec/memory-internal.h"
65
#include "exec/ram_addr.h"
66
#include "exec/log.h"
67

68 69
#include "migration/vmstate.h"

70
#include "qemu/range.h"
71 72 73
#ifndef _WIN32
#include "qemu/mmap-alloc.h"
#endif
74

75 76
#include "monitor/monitor.h"

77
//#define DEBUG_SUBPAGE
T
ths 已提交
78

79
#if !defined(CONFIG_USER_ONLY)
M
Mike Day 已提交
80 81 82
/* ram_list is read under rcu_read_lock()/rcu_read_unlock().  Writes
 * are protected by the ramlist lock.
 */
M
Mike Day 已提交
83
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
A
Avi Kivity 已提交
84 85

static MemoryRegion *system_memory;
86
static MemoryRegion *system_io;
A
Avi Kivity 已提交
87

88 89
AddressSpace address_space_io;
AddressSpace address_space_memory;
90

91
MemoryRegion io_mem_rom, io_mem_notdirty;
92
static MemoryRegion io_mem_unassigned;
93

94 95 96
/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
#define RAM_PREALLOC   (1 << 0)

97 98 99
/* RAM is mmap-ed with MAP_SHARED */
#define RAM_SHARED     (1 << 1)

100 101 102 103 104
/* Only a portion of RAM (used_length) is actually used, and migrated.
 * This used_length size can change across reboots.
 */
#define RAM_RESIZEABLE (1 << 2)

105
#endif
106

107 108 109 110 111
#ifdef TARGET_PAGE_BITS_VARY
int target_page_bits;
bool target_page_bits_decided;
#endif

A
Andreas Färber 已提交
112
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
B
bellard 已提交
113 114
/* current CPU in the current thread. It is only valid inside
   cpu_exec() */
P
Paolo Bonzini 已提交
115
__thread CPUState *current_cpu;
P
pbrook 已提交
116
/* 0 = Do not count executed instructions.
T
ths 已提交
117
   1 = Precise instruction counting.
P
pbrook 已提交
118
   2 = Adaptive rate instruction counting.  */
119
int use_icount;
B
bellard 已提交
120

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
bool set_preferred_target_page_bits(int bits)
{
    /* The target page size is the lowest common denominator for all
     * the CPUs in the system, so we can only make it smaller, never
     * larger. And we can't make it smaller once we've committed to
     * a particular size.
     */
#ifdef TARGET_PAGE_BITS_VARY
    assert(bits >= TARGET_PAGE_BITS_MIN);
    if (target_page_bits == 0 || target_page_bits > bits) {
        if (target_page_bits_decided) {
            return false;
        }
        target_page_bits = bits;
    }
#endif
    return true;
}

140
#if !defined(CONFIG_USER_ONLY)
141

142 143 144 145 146 147 148 149 150 151
static void finalize_target_page_bits(void)
{
#ifdef TARGET_PAGE_BITS_VARY
    if (target_page_bits == 0) {
        target_page_bits = TARGET_PAGE_BITS_MIN;
    }
    target_page_bits_decided = true;
#endif
}

152 153 154
typedef struct PhysPageEntry PhysPageEntry;

struct PhysPageEntry {
M
Michael S. Tsirkin 已提交
155
    /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
156
    uint32_t skip : 6;
M
Michael S. Tsirkin 已提交
157
     /* index into phys_sections (!skip) or phys_map_nodes (skip) */
158
    uint32_t ptr : 26;
159 160
};

161 162
#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)

163
/* Size of the L2 (and L3, etc) page tables.  */
164
#define ADDR_SPACE_BITS 64
165

M
Michael S. Tsirkin 已提交
166
#define P_L2_BITS 9
167 168 169 170 171
#define P_L2_SIZE (1 << P_L2_BITS)

#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)

typedef PhysPageEntry Node[P_L2_SIZE];
172

173
typedef struct PhysPageMap {
174 175
    struct rcu_head rcu;

176 177 178 179 180 181 182 183
    unsigned sections_nb;
    unsigned sections_nb_alloc;
    unsigned nodes_nb;
    unsigned nodes_nb_alloc;
    Node *nodes;
    MemoryRegionSection *sections;
} PhysPageMap;

184
struct AddressSpaceDispatch {
185 186
    struct rcu_head rcu;

187
    MemoryRegionSection *mru_section;
188 189 190 191
    /* This is a multi-level map on the physical address space.
     * The bottom level has pointers to MemoryRegionSections.
     */
    PhysPageEntry phys_map;
192
    PhysPageMap map;
193
    AddressSpace *as;
194 195
};

196 197 198
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
typedef struct subpage_t {
    MemoryRegion iomem;
199
    AddressSpace *as;
200
    hwaddr base;
201
    uint16_t sub_section[];
202 203
} subpage_t;

204 205 206 207
#define PHYS_SECTION_UNASSIGNED 0
#define PHYS_SECTION_NOTDIRTY 1
#define PHYS_SECTION_ROM 2
#define PHYS_SECTION_WATCH 3
208

209
static void io_mem_init(void);
A
Avi Kivity 已提交
210
static void memory_map_init(void);
211
static void tcg_commit(MemoryListener *listener);
212

213
static MemoryRegion io_mem_watch;
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228

/**
 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
 * @cpu: the CPU whose AddressSpace this is
 * @as: the AddressSpace itself
 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
 * @tcg_as_listener: listener for tracking changes to the AddressSpace
 */
struct CPUAddressSpace {
    CPUState *cpu;
    AddressSpace *as;
    struct AddressSpaceDispatch *memory_dispatch;
    MemoryListener tcg_as_listener;
};

229 230 231 232 233 234
struct DirtyBitmapSnapshot {
    ram_addr_t start;
    ram_addr_t end;
    unsigned long dirty[];
};

235
#endif
B
bellard 已提交
236

237
#if !defined(CONFIG_USER_ONLY)
238

239
static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
240
{
241
    static unsigned alloc_hint = 16;
242
    if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
243
        map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
244 245
        map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
        map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
246
        alloc_hint = map->nodes_nb_alloc;
247
    }
248 249
}

250
static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
251 252
{
    unsigned i;
253
    uint32_t ret;
254 255
    PhysPageEntry e;
    PhysPageEntry *p;
256

257
    ret = map->nodes_nb++;
258
    p = map->nodes[ret];
259
    assert(ret != PHYS_MAP_NODE_NIL);
260
    assert(ret != map->nodes_nb_alloc);
261 262 263

    e.skip = leaf ? 0 : 1;
    e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
264
    for (i = 0; i < P_L2_SIZE; ++i) {
265
        memcpy(&p[i], &e, sizeof(e));
266
    }
267
    return ret;
268 269
}

270 271
static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
                                hwaddr *index, hwaddr *nb, uint16_t leaf,
272
                                int level)
273 274
{
    PhysPageEntry *p;
275
    hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
276

M
Michael S. Tsirkin 已提交
277
    if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
278
        lp->ptr = phys_map_node_alloc(map, level == 0);
B
bellard 已提交
279
    }
280
    p = map->nodes[lp->ptr];
281
    lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
282

283
    while (*nb && lp < &p[P_L2_SIZE]) {
284
        if ((*index & (step - 1)) == 0 && *nb >= step) {
M
Michael S. Tsirkin 已提交
285
            lp->skip = 0;
286
            lp->ptr = leaf;
287 288
            *index += step;
            *nb -= step;
289
        } else {
290
            phys_page_set_level(map, lp, index, nb, leaf, level - 1);
291 292
        }
        ++lp;
293 294 295
    }
}

A
Avi Kivity 已提交
296
static void phys_page_set(AddressSpaceDispatch *d,
A
Avi Kivity 已提交
297
                          hwaddr index, hwaddr nb,
298
                          uint16_t leaf)
299
{
300
    /* Wildly overreserve - it doesn't matter much. */
301
    phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
302

303
    phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
B
bellard 已提交
304 305
}

306 307 308
/* Compact a non leaf page entry. Simply detect that the entry has a single child,
 * and update our entry so we can skip it and go directly to the destination.
 */
309
static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
{
    unsigned valid_ptr = P_L2_SIZE;
    int valid = 0;
    PhysPageEntry *p;
    int i;

    if (lp->ptr == PHYS_MAP_NODE_NIL) {
        return;
    }

    p = nodes[lp->ptr];
    for (i = 0; i < P_L2_SIZE; i++) {
        if (p[i].ptr == PHYS_MAP_NODE_NIL) {
            continue;
        }

        valid_ptr = i;
        valid++;
        if (p[i].skip) {
329
            phys_page_compact(&p[i], nodes);
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
        }
    }

    /* We can only compress if there's only one child. */
    if (valid != 1) {
        return;
    }

    assert(valid_ptr < P_L2_SIZE);

    /* Don't compress if it won't fit in the # of bits we have. */
    if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
        return;
    }

    lp->ptr = p[valid_ptr].ptr;
    if (!p[valid_ptr].skip) {
        /* If our only child is a leaf, make this a leaf. */
        /* By design, we should have made this node a leaf to begin with so we
         * should never reach here.
         * But since it's so simple to handle this, let's do it just in case we
         * change this rule.
         */
        lp->skip = 0;
    } else {
        lp->skip += p[valid_ptr].skip;
    }
}

static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
{
    if (d->phys_map.skip) {
362
        phys_page_compact(&d->phys_map, d->map.nodes);
363 364 365
    }
}

F
Fam Zheng 已提交
366 367 368 369 370 371
static inline bool section_covers_addr(const MemoryRegionSection *section,
                                       hwaddr addr)
{
    /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
     * the section must cover the entire address space.
     */
372
    return int128_gethi(section->size) ||
F
Fam Zheng 已提交
373
           range_covers_byte(section->offset_within_address_space,
374
                             int128_getlo(section->size), addr);
F
Fam Zheng 已提交
375 376
}

377
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
378
                                           Node *nodes, MemoryRegionSection *sections)
B
bellard 已提交
379
{
380
    PhysPageEntry *p;
381
    hwaddr index = addr >> TARGET_PAGE_BITS;
382
    int i;
383

M
Michael S. Tsirkin 已提交
384
    for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
385
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
386
            return &sections[PHYS_SECTION_UNASSIGNED];
387
        }
388
        p = nodes[lp.ptr];
389
        lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
390
    }
391

F
Fam Zheng 已提交
392
    if (section_covers_addr(&sections[lp.ptr], addr)) {
393 394 395 396
        return &sections[lp.ptr];
    } else {
        return &sections[PHYS_SECTION_UNASSIGNED];
    }
397 398
}

B
Blue Swirl 已提交
399 400
bool memory_region_is_unassigned(MemoryRegion *mr)
{
P
Paolo Bonzini 已提交
401
    return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
402
        && mr != &io_mem_watch;
B
bellard 已提交
403
}
404

405
/* Called from RCU critical section */
406
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
407 408
                                                        hwaddr addr,
                                                        bool resolve_subpage)
409
{
410
    MemoryRegionSection *section = atomic_read(&d->mru_section);
411
    subpage_t *subpage;
412
    bool update;
413

414 415 416 417 418 419 420 421
    if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
        section_covers_addr(section, addr)) {
        update = false;
    } else {
        section = phys_page_find(d->phys_map, addr, d->map.nodes,
                                 d->map.sections);
        update = true;
    }
422 423
    if (resolve_subpage && section->mr->subpage) {
        subpage = container_of(section->mr, subpage_t, iomem);
424
        section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
425
    }
426 427 428
    if (update) {
        atomic_set(&d->mru_section, section);
    }
429
    return section;
430 431
}

432
/* Called from RCU critical section */
433
static MemoryRegionSection *
434
address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
435
                                 hwaddr *plen, bool resolve_subpage)
436 437
{
    MemoryRegionSection *section;
438
    MemoryRegion *mr;
439
    Int128 diff;
440

441
    section = address_space_lookup_region(d, addr, resolve_subpage);
442 443 444 445 446 447
    /* Compute offset within MemoryRegionSection */
    addr -= section->offset_within_address_space;

    /* Compute offset within MemoryRegion */
    *xlat = addr + section->offset_within_region;

448
    mr = section->mr;
449 450 451 452 453 454 455 456 457 458 459 460

    /* MMIO registers can be expected to perform full-width accesses based only
     * on their address, without considering adjacent registers that could
     * decode to completely different MemoryRegions.  When such registers
     * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
     * regions overlap wildly.  For this reason we cannot clamp the accesses
     * here.
     *
     * If the length is small (as is the case for address_space_ldl/stl),
     * everything works fine.  If the incoming length is large, however,
     * the caller really has to do the clamping through memory_access_size.
     */
461
    if (memory_region_is_ram(mr)) {
462
        diff = int128_sub(section->size, int128_make64(addr));
463 464
        *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
    }
465 466
    return section;
}
467

468
/* Called from RCU critical section */
469 470 471 472 473 474
static MemoryRegionSection address_space_do_translate(AddressSpace *as,
                                                      hwaddr addr,
                                                      hwaddr *xlat,
                                                      hwaddr *plen,
                                                      bool is_write,
                                                      bool is_mmio)
475
{
476
    IOMMUTLBEntry iotlb;
477 478 479 480 481
    MemoryRegionSection *section;
    MemoryRegion *mr;

    for (;;) {
        AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
482
        section = address_space_translate_internal(d, addr, &addr, plen, is_mmio);
483 484 485 486 487 488
        mr = section->mr;

        if (!mr->iommu_ops) {
            break;
        }

489 490
        iotlb = mr->iommu_ops->translate(mr, addr, is_write ?
                                         IOMMU_WO : IOMMU_RO);
491 492 493
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
                | (addr & iotlb.addr_mask));
        *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
494
        if (!(iotlb.perm & (1 << is_write))) {
495
            goto translate_fail;
496 497 498 499 500
        }

        as = iotlb.target_as;
    }

501 502 503 504 505 506
    *xlat = addr;

    return *section;

translate_fail:
    return (MemoryRegionSection) { .mr = &io_mem_unassigned };
507 508 509
}

/* Called from RCU critical section */
510 511
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
                                            bool is_write)
512
{
513 514
    MemoryRegionSection section;
    hwaddr xlat, plen;
A
Avi Kivity 已提交
515

516 517
    /* Try to get maximum page mask during translation. */
    plen = (hwaddr)-1;
A
Avi Kivity 已提交
518

519 520 521
    /* This can never be MMIO. */
    section = address_space_do_translate(as, addr, &xlat, &plen,
                                         is_write, false);
A
Avi Kivity 已提交
522

523 524 525 526
    /* Illegal translation */
    if (section.mr == &io_mem_unassigned) {
        goto iotlb_fail;
    }
A
Avi Kivity 已提交
527

528 529 530 531 532 533 534 535 536 537
    /* Convert memory region offset into address space offset */
    xlat += section.offset_within_address_space -
        section.offset_within_region;

    if (plen == (hwaddr)-1) {
        /*
         * We use default page size here. Logically it only happens
         * for identity mappings.
         */
        plen = TARGET_PAGE_SIZE;
A
Avi Kivity 已提交
538 539
    }

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
    /* Convert to address mask */
    plen -= 1;

    return (IOMMUTLBEntry) {
        .target_as = section.address_space,
        .iova = addr & ~plen,
        .translated_addr = xlat & ~plen,
        .addr_mask = plen,
        /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
        .perm = IOMMU_RW,
    };

iotlb_fail:
    return (IOMMUTLBEntry) {0};
}

/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
                                      hwaddr *xlat, hwaddr *plen,
                                      bool is_write)
{
    MemoryRegion *mr;
    MemoryRegionSection section;

    /* This can be MMIO, so setup MMIO bit. */
    section = address_space_do_translate(as, addr, xlat, plen, is_write, true);
    mr = section.mr;

568
    if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
569
        hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
570
        *plen = MIN(page, *plen);
571 572
    }

A
Avi Kivity 已提交
573
    return mr;
574 575
}

576
/* Called from RCU critical section */
577
MemoryRegionSection *
578
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
P
Paolo Bonzini 已提交
579
                                  hwaddr *xlat, hwaddr *plen)
580
{
A
Avi Kivity 已提交
581
    MemoryRegionSection *section;
582
    AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
583 584

    section = address_space_translate_internal(d, addr, xlat, plen, false);
A
Avi Kivity 已提交
585 586 587

    assert(!section->mr->iommu_ops);
    return section;
588
}
589
#endif
B
bellard 已提交
590

591
#if !defined(CONFIG_USER_ONLY)
592 593

static int cpu_common_post_load(void *opaque, int version_id)
B
bellard 已提交
594
{
595
    CPUState *cpu = opaque;
B
bellard 已提交
596

597 598
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
       version_id is increased. */
599
    cpu->interrupt_request &= ~0x01;
600
    tlb_flush(cpu);
601 602

    return 0;
B
bellard 已提交
603
}
B
bellard 已提交
604

605 606 607 608
static int cpu_common_pre_load(void *opaque)
{
    CPUState *cpu = opaque;

609
    cpu->exception_index = -1;
610 611 612 613 614 615 616 617

    return 0;
}

static bool cpu_common_exception_index_needed(void *opaque)
{
    CPUState *cpu = opaque;

618
    return tcg_enabled() && cpu->exception_index != -1;
619 620 621 622 623 624
}

static const VMStateDescription vmstate_cpu_common_exception_index = {
    .name = "cpu_common/exception_index",
    .version_id = 1,
    .minimum_version_id = 1,
625
    .needed = cpu_common_exception_index_needed,
626 627 628 629 630 631
    .fields = (VMStateField[]) {
        VMSTATE_INT32(exception_index, CPUState),
        VMSTATE_END_OF_LIST()
    }
};

632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
static bool cpu_common_crash_occurred_needed(void *opaque)
{
    CPUState *cpu = opaque;

    return cpu->crash_occurred;
}

static const VMStateDescription vmstate_cpu_common_crash_occurred = {
    .name = "cpu_common/crash_occurred",
    .version_id = 1,
    .minimum_version_id = 1,
    .needed = cpu_common_crash_occurred_needed,
    .fields = (VMStateField[]) {
        VMSTATE_BOOL(crash_occurred, CPUState),
        VMSTATE_END_OF_LIST()
    }
};

650
const VMStateDescription vmstate_cpu_common = {
651 652 653
    .name = "cpu_common",
    .version_id = 1,
    .minimum_version_id = 1,
654
    .pre_load = cpu_common_pre_load,
655
    .post_load = cpu_common_post_load,
656
    .fields = (VMStateField[]) {
657 658
        VMSTATE_UINT32(halted, CPUState),
        VMSTATE_UINT32(interrupt_request, CPUState),
659
        VMSTATE_END_OF_LIST()
660
    },
661 662
    .subsections = (const VMStateDescription*[]) {
        &vmstate_cpu_common_exception_index,
663
        &vmstate_cpu_common_crash_occurred,
664
        NULL
665 666
    }
};
667

668
#endif
B
bellard 已提交
669

670
CPUState *qemu_get_cpu(int index)
B
bellard 已提交
671
{
A
Andreas Färber 已提交
672
    CPUState *cpu;
B
bellard 已提交
673

A
Andreas Färber 已提交
674
    CPU_FOREACH(cpu) {
675
        if (cpu->cpu_index == index) {
A
Andreas Färber 已提交
676
            return cpu;
677
        }
B
bellard 已提交
678
    }
679

A
Andreas Färber 已提交
680
    return NULL;
B
bellard 已提交
681 682
}

683
#if !defined(CONFIG_USER_ONLY)
684
void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
685
{
686 687 688 689 690
    CPUAddressSpace *newas;

    /* Target code should have set num_ases before calling us */
    assert(asidx < cpu->num_ases);

691 692 693 694 695
    if (asidx == 0) {
        /* address space 0 gets the convenience alias */
        cpu->as = as;
    }

696 697
    /* KVM cannot currently support multiple address spaces. */
    assert(asidx == 0 || !kvm_enabled());
698

699 700
    if (!cpu->cpu_ases) {
        cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
701
    }
702

703 704 705
    newas = &cpu->cpu_ases[asidx];
    newas->cpu = cpu;
    newas->as = as;
706
    if (tcg_enabled()) {
707 708
        newas->tcg_as_listener.commit = tcg_commit;
        memory_listener_register(&newas->tcg_as_listener, as);
709
    }
710
}
711 712 713 714 715 716

AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
{
    /* Return the AddressSpace corresponding to the specified index */
    return cpu->cpu_ases[asidx].as;
}
717 718
#endif

719
void cpu_exec_unrealizefn(CPUState *cpu)
720
{
721 722
    CPUClass *cc = CPU_GET_CLASS(cpu);

723
    cpu_list_remove(cpu);
724 725 726 727 728 729 730

    if (cc->vmsd != NULL) {
        vmstate_unregister(NULL, cc->vmsd, cpu);
    }
    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
        vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
    }
731 732
}

L
Laurent Vivier 已提交
733
void cpu_exec_initfn(CPUState *cpu)
B
bellard 已提交
734
{
735
    cpu->as = NULL;
736
    cpu->num_ases = 0;
737

738 739
#ifndef CONFIG_USER_ONLY
    cpu->thread_id = qemu_get_thread_id();
740 741 742 743 744 745 746 747 748 749 750 751 752 753

    /* This is a softmmu CPU object, so create a property for it
     * so users can wire up its memory. (This can't go in qom/cpu.c
     * because that file is compiled only once for both user-mode
     * and system builds.) The default if no link is set up is to use
     * the system address space.
     */
    object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
                             (Object **)&cpu->memory,
                             qdev_prop_allow_set_link_before_realize,
                             OBJ_PROP_LINK_UNREF_ON_RELEASE,
                             &error_abort);
    cpu->memory = system_memory;
    object_ref(OBJECT(cpu->memory));
754
#endif
L
Laurent Vivier 已提交
755 756
}

757
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
L
Laurent Vivier 已提交
758 759
{
    CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
760

761
    cpu_list_add(cpu);
762 763

#ifndef CONFIG_USER_ONLY
764
    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
765
        vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
766
    }
767
    if (cc->vmsd != NULL) {
768
        vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
769
    }
770
#endif
B
bellard 已提交
771 772
}

773
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
774
{
775 776 777 778 779 780
    /* Flush the whole TB as this will not have race conditions
     * even if we don't have proper locking yet.
     * Ideally we would just invalidate the TBs for the
     * specified PC.
     */
    tb_flush(cpu);
781
}
B
bellard 已提交
782

783
#if defined(CONFIG_USER_ONLY)
784
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
785 786 787 788

{
}

789 790 791 792 793 794 795 796 797 798
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
                          int flags)
{
    return -ENOSYS;
}

void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
{
}

799
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
800 801 802 803 804
                          int flags, CPUWatchpoint **watchpoint)
{
    return -ENOSYS;
}
#else
805
/* Add a watchpoint.  */
806
int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
807
                          int flags, CPUWatchpoint **watchpoint)
808
{
809
    CPUWatchpoint *wp;
810

811
    /* forbid ranges which are empty or run off the end of the address space */
812
    if (len == 0 || (addr + len - 1) < addr) {
813 814
        error_report("tried to set invalid watchpoint at %"
                     VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
815 816
        return -EINVAL;
    }
817
    wp = g_malloc(sizeof(*wp));
818 819

    wp->vaddr = addr;
820
    wp->len = len;
821 822
    wp->flags = flags;

823
    /* keep all GDB-injected watchpoints in front */
824 825 826 827 828
    if (flags & BP_GDB) {
        QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
    } else {
        QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
    }
829

830
    tlb_flush_page(cpu, addr);
831 832 833 834

    if (watchpoint)
        *watchpoint = wp;
    return 0;
835 836
}

837
/* Remove a specific watchpoint.  */
838
int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
839
                          int flags)
840
{
841
    CPUWatchpoint *wp;
842

843
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
844
        if (addr == wp->vaddr && len == wp->len
845
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
846
            cpu_watchpoint_remove_by_ref(cpu, wp);
847 848 849
            return 0;
        }
    }
850
    return -ENOENT;
851 852
}

853
/* Remove a specific watchpoint by reference.  */
854
void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
855
{
856
    QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
857

858
    tlb_flush_page(cpu, watchpoint->vaddr);
859

860
    g_free(watchpoint);
861 862 863
}

/* Remove all matching watchpoints.  */
864
void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
865
{
866
    CPUWatchpoint *wp, *next;
867

868
    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
869 870 871
        if (wp->flags & mask) {
            cpu_watchpoint_remove_by_ref(cpu, wp);
        }
872
    }
873
}
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894

/* Return true if this watchpoint address matches the specified
 * access (ie the address range covered by the watchpoint overlaps
 * partially or completely with the address range covered by the
 * access).
 */
static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
                                                  vaddr addr,
                                                  vaddr len)
{
    /* We know the lengths are non-zero, but a little caution is
     * required to avoid errors in the case where the range ends
     * exactly at the top of the address space and so addr + len
     * wraps round to zero.
     */
    vaddr wpend = wp->vaddr + wp->len - 1;
    vaddr addrend = addr + len - 1;

    return !(addr > wpend || wp->vaddr > addrend);
}

895
#endif
896

897
/* Add a breakpoint.  */
898
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
899
                          CPUBreakpoint **breakpoint)
B
bellard 已提交
900
{
901
    CPUBreakpoint *bp;
902

903
    bp = g_malloc(sizeof(*bp));
B
bellard 已提交
904

905 906 907
    bp->pc = pc;
    bp->flags = flags;

908
    /* keep all GDB-injected breakpoints in front */
909
    if (flags & BP_GDB) {
910
        QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
911
    } else {
912
        QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
913
    }
914

915
    breakpoint_invalidate(cpu, pc);
916

917
    if (breakpoint) {
918
        *breakpoint = bp;
919
    }
B
bellard 已提交
920 921 922
    return 0;
}

923
/* Remove a specific breakpoint.  */
924
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
925 926 927
{
    CPUBreakpoint *bp;

928
    QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
929
        if (bp->pc == pc && bp->flags == flags) {
930
            cpu_breakpoint_remove_by_ref(cpu, bp);
931 932
            return 0;
        }
933
    }
934
    return -ENOENT;
935 936
}

937
/* Remove a specific breakpoint by reference.  */
938
void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
B
bellard 已提交
939
{
940 941 942
    QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);

    breakpoint_invalidate(cpu, breakpoint->pc);
943

944
    g_free(breakpoint);
945 946 947
}

/* Remove all matching breakpoints. */
948
void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
949
{
950
    CPUBreakpoint *bp, *next;
951

952
    QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
953 954 955
        if (bp->flags & mask) {
            cpu_breakpoint_remove_by_ref(cpu, bp);
        }
956
    }
B
bellard 已提交
957 958
}

B
bellard 已提交
959 960
/* enable or disable single step mode. EXCP_DEBUG is returned by the
   CPU loop after each instruction */
961
void cpu_single_step(CPUState *cpu, int enabled)
B
bellard 已提交
962
{
963 964 965
    if (cpu->singlestep_enabled != enabled) {
        cpu->singlestep_enabled = enabled;
        if (kvm_enabled()) {
966
            kvm_update_guest_debug(cpu, 0);
967
        } else {
S
Stuart Brady 已提交
968
            /* must flush all the translated code to avoid inconsistencies */
969
            /* XXX: only flush what is necessary */
970
            tb_flush(cpu);
971
        }
B
bellard 已提交
972 973 974
    }
}

975
void cpu_abort(CPUState *cpu, const char *fmt, ...)
B
bellard 已提交
976 977
{
    va_list ap;
P
pbrook 已提交
978
    va_list ap2;
B
bellard 已提交
979 980

    va_start(ap, fmt);
P
pbrook 已提交
981
    va_copy(ap2, ap);
B
bellard 已提交
982 983 984
    fprintf(stderr, "qemu: fatal: ");
    vfprintf(stderr, fmt, ap);
    fprintf(stderr, "\n");
985
    cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
986
    if (qemu_log_separate()) {
987
        qemu_log_lock();
988 989 990
        qemu_log("qemu: fatal: ");
        qemu_log_vprintf(fmt, ap2);
        qemu_log("\n");
991
        log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
992
        qemu_log_flush();
993
        qemu_log_unlock();
994
        qemu_log_close();
995
    }
P
pbrook 已提交
996
    va_end(ap2);
997
    va_end(ap);
998
    replay_finish();
999 1000 1001 1002 1003 1004 1005 1006
#if defined(CONFIG_USER_ONLY)
    {
        struct sigaction act;
        sigfillset(&act.sa_mask);
        act.sa_handler = SIG_DFL;
        sigaction(SIGABRT, &act, NULL);
    }
#endif
B
bellard 已提交
1007 1008 1009
    abort();
}

1010
#if !defined(CONFIG_USER_ONLY)
M
Mike Day 已提交
1011
/* Called from RCU critical section */
P
Paolo Bonzini 已提交
1012 1013 1014 1015
static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
{
    RAMBlock *block;

P
Paolo Bonzini 已提交
1016
    block = atomic_rcu_read(&ram_list.mru_block);
1017
    if (block && addr - block->offset < block->max_length) {
1018
        return block;
P
Paolo Bonzini 已提交
1019
    }
P
Peter Xu 已提交
1020
    RAMBLOCK_FOREACH(block) {
1021
        if (addr - block->offset < block->max_length) {
P
Paolo Bonzini 已提交
1022 1023 1024 1025 1026 1027 1028 1029
            goto found;
        }
    }

    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
    abort();

found:
P
Paolo Bonzini 已提交
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
    /* It is safe to write mru_block outside the iothread lock.  This
     * is what happens:
     *
     *     mru_block = xxx
     *     rcu_read_unlock()
     *                                        xxx removed from list
     *                  rcu_read_lock()
     *                  read mru_block
     *                                        mru_block = NULL;
     *                                        call_rcu(reclaim_ramblock, xxx);
     *                  rcu_read_unlock()
     *
     * atomic_rcu_set is not needed here.  The block was already published
     * when it was placed into the list.  Here we're just making an extra
     * copy of the pointer.
     */
P
Paolo Bonzini 已提交
1046 1047 1048 1049
    ram_list.mru_block = block;
    return block;
}

1050
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
J
Juan Quintela 已提交
1051
{
1052
    CPUState *cpu;
P
Paolo Bonzini 已提交
1053
    ram_addr_t start1;
1054 1055 1056 1057 1058
    RAMBlock *block;
    ram_addr_t end;

    end = TARGET_PAGE_ALIGN(start + length);
    start &= TARGET_PAGE_MASK;
J
Juan Quintela 已提交
1059

M
Mike Day 已提交
1060
    rcu_read_lock();
P
Paolo Bonzini 已提交
1061 1062
    block = qemu_get_ram_block(start);
    assert(block == qemu_get_ram_block(end - 1));
1063
    start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
1064 1065 1066
    CPU_FOREACH(cpu) {
        tlb_reset_dirty(cpu, start1, length);
    }
M
Mike Day 已提交
1067
    rcu_read_unlock();
J
Juan Quintela 已提交
1068 1069
}

P
pbrook 已提交
1070
/* Note: start and end must be within the same ram block.  */
1071 1072 1073
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
                                              ram_addr_t length,
                                              unsigned client)
1074
{
1075
    DirtyMemoryBlocks *blocks;
1076
    unsigned long end, page;
1077
    bool dirty = false;
1078 1079 1080 1081

    if (length == 0) {
        return false;
    }
B
bellard 已提交
1082

1083 1084
    end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
    page = start >> TARGET_PAGE_BITS;
1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100

    rcu_read_lock();

    blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);

    while (page < end) {
        unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
        unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
        unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);

        dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
                                              offset, num);
        page += num;
    }

    rcu_read_unlock();
1101 1102

    if (dirty && tcg_enabled()) {
1103
        tlb_reset_dirty_range_all(start, length);
P
pbrook 已提交
1104
    }
1105 1106

    return dirty;
1107 1108
}

1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
     (ram_addr_t start, ram_addr_t length, unsigned client)
{
    DirtyMemoryBlocks *blocks;
    unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
    ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
    ram_addr_t last  = QEMU_ALIGN_UP(start + length, align);
    DirtyBitmapSnapshot *snap;
    unsigned long page, end, dest;

    snap = g_malloc0(sizeof(*snap) +
                     ((last - first) >> (TARGET_PAGE_BITS + 3)));
    snap->start = first;
    snap->end   = last;

    page = first >> TARGET_PAGE_BITS;
    end  = last  >> TARGET_PAGE_BITS;
    dest = 0;

    rcu_read_lock();

    blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);

    while (page < end) {
        unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
        unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
        unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);

        assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL)));
        assert(QEMU_IS_ALIGNED(num,    (1 << BITS_PER_LEVEL)));
        offset >>= BITS_PER_LEVEL;

        bitmap_copy_and_clear_atomic(snap->dirty + dest,
                                     blocks->blocks[idx] + offset,
                                     num);
        page += num;
        dest += num >> BITS_PER_LEVEL;
    }

    rcu_read_unlock();

    if (tcg_enabled()) {
        tlb_reset_dirty_range_all(start, length);
    }

    return snap;
}

bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
                                            ram_addr_t start,
                                            ram_addr_t length)
{
    unsigned long page, end;

    assert(start >= snap->start);
    assert(start + length <= snap->end);

    end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS;
    page = (start - snap->start) >> TARGET_PAGE_BITS;

    while (page < end) {
        if (test_bit(page, snap->dirty)) {
            return true;
        }
        page++;
    }
    return false;
}

1178
/* Called from RCU critical section */
1179
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
1180 1181 1182 1183 1184
                                       MemoryRegionSection *section,
                                       target_ulong vaddr,
                                       hwaddr paddr, hwaddr xlat,
                                       int prot,
                                       target_ulong *address)
B
Blue Swirl 已提交
1185
{
A
Avi Kivity 已提交
1186
    hwaddr iotlb;
B
Blue Swirl 已提交
1187 1188
    CPUWatchpoint *wp;

1189
    if (memory_region_is_ram(section->mr)) {
B
Blue Swirl 已提交
1190
        /* Normal RAM.  */
1191
        iotlb = memory_region_get_ram_addr(section->mr) + xlat;
B
Blue Swirl 已提交
1192
        if (!section->readonly) {
1193
            iotlb |= PHYS_SECTION_NOTDIRTY;
B
Blue Swirl 已提交
1194
        } else {
1195
            iotlb |= PHYS_SECTION_ROM;
B
Blue Swirl 已提交
1196 1197
        }
    } else {
1198 1199 1200 1201
        AddressSpaceDispatch *d;

        d = atomic_rcu_read(&section->address_space->dispatch);
        iotlb = section - d->map.sections;
1202
        iotlb += xlat;
B
Blue Swirl 已提交
1203 1204 1205 1206
    }

    /* Make accesses to pages with watchpoints go via the
       watchpoint trap routines.  */
1207
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1208
        if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
B
Blue Swirl 已提交
1209 1210
            /* Avoid trapping reads of pages with a write breakpoint. */
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1211
                iotlb = PHYS_SECTION_WATCH + paddr;
B
Blue Swirl 已提交
1212 1213 1214 1215 1216 1217 1218 1219
                *address |= TLB_MMIO;
                break;
            }
        }
    }

    return iotlb;
}
1220 1221
#endif /* defined(CONFIG_USER_ONLY) */

1222
#if !defined(CONFIG_USER_ONLY)
1223

A
Anthony Liguori 已提交
1224
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1225
                             uint16_t section);
1226
static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1227

1228 1229
static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
                               qemu_anon_ram_alloc;
1230 1231 1232 1233 1234 1235

/*
 * Set a custom physical guest memory alloator.
 * Accelerators with unusual needs may need this.  Hopefully, we can
 * get rid of it eventually.
 */
1236
void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1237 1238 1239 1240
{
    phys_mem_alloc = alloc;
}

1241 1242
static uint16_t phys_section_add(PhysPageMap *map,
                                 MemoryRegionSection *section)
1243
{
1244 1245 1246 1247
    /* The physical section number is ORed with a page-aligned
     * pointer to produce the iotlb entries.  Thus it should
     * never overflow into the page-aligned value.
     */
1248
    assert(map->sections_nb < TARGET_PAGE_SIZE);
1249

1250 1251 1252 1253
    if (map->sections_nb == map->sections_nb_alloc) {
        map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
        map->sections = g_renew(MemoryRegionSection, map->sections,
                                map->sections_nb_alloc);
1254
    }
1255
    map->sections[map->sections_nb] = *section;
P
Paolo Bonzini 已提交
1256
    memory_region_ref(section->mr);
1257
    return map->sections_nb++;
1258 1259
}

1260 1261
static void phys_section_destroy(MemoryRegion *mr)
{
D
Don Slutz 已提交
1262 1263
    bool have_sub_page = mr->subpage;

P
Paolo Bonzini 已提交
1264 1265
    memory_region_unref(mr);

D
Don Slutz 已提交
1266
    if (have_sub_page) {
1267
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
P
Peter Crosthwaite 已提交
1268
        object_unref(OBJECT(&subpage->iomem));
1269 1270 1271 1272
        g_free(subpage);
    }
}

P
Paolo Bonzini 已提交
1273
static void phys_sections_free(PhysPageMap *map)
1274
{
1275 1276
    while (map->sections_nb > 0) {
        MemoryRegionSection *section = &map->sections[--map->sections_nb];
1277 1278
        phys_section_destroy(section->mr);
    }
1279 1280
    g_free(map->sections);
    g_free(map->nodes);
1281 1282
}

A
Avi Kivity 已提交
1283
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1284 1285
{
    subpage_t *subpage;
A
Avi Kivity 已提交
1286
    hwaddr base = section->offset_within_address_space
1287
        & TARGET_PAGE_MASK;
1288
    MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1289
                                                   d->map.nodes, d->map.sections);
1290 1291
    MemoryRegionSection subsection = {
        .offset_within_address_space = base,
1292
        .size = int128_make64(TARGET_PAGE_SIZE),
1293
    };
A
Avi Kivity 已提交
1294
    hwaddr start, end;
1295

1296
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1297

1298
    if (!(existing->mr->subpage)) {
1299
        subpage = subpage_init(d->as, base);
1300
        subsection.address_space = d->as;
1301
        subsection.mr = &subpage->iomem;
A
Avi Kivity 已提交
1302
        phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1303
                      phys_section_add(&d->map, &subsection));
1304
    } else {
1305
        subpage = container_of(existing->mr, subpage_t, iomem);
1306 1307
    }
    start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1308
    end = start + int128_get64(section->size) - 1;
1309 1310
    subpage_register(subpage, start, end,
                     phys_section_add(&d->map, section));
1311 1312 1313
}


1314 1315
static void register_multipage(AddressSpaceDispatch *d,
                               MemoryRegionSection *section)
1316
{
A
Avi Kivity 已提交
1317
    hwaddr start_addr = section->offset_within_address_space;
1318
    uint16_t section_index = phys_section_add(&d->map, section);
1319 1320
    uint64_t num_pages = int128_get64(int128_rshift(section->size,
                                                    TARGET_PAGE_BITS));
1321

1322 1323
    assert(num_pages);
    phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1324 1325
}

A
Avi Kivity 已提交
1326
static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1327
{
1328
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1329
    AddressSpaceDispatch *d = as->next_dispatch;
1330
    MemoryRegionSection now = *section, remain = *section;
1331
    Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1332

1333 1334 1335 1336
    if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
        uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
                       - now.offset_within_address_space;

1337
        now.size = int128_min(int128_make64(left), now.size);
A
Avi Kivity 已提交
1338
        register_subpage(d, &now);
1339
    } else {
1340
        now.size = int128_zero();
1341
    }
1342 1343 1344 1345
    while (int128_ne(remain.size, now.size)) {
        remain.size = int128_sub(remain.size, now.size);
        remain.offset_within_address_space += int128_get64(now.size);
        remain.offset_within_region += int128_get64(now.size);
1346
        now = remain;
1347
        if (int128_lt(remain.size, page_size)) {
1348
            register_subpage(d, &now);
1349
        } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1350
            now.size = page_size;
A
Avi Kivity 已提交
1351
            register_subpage(d, &now);
1352
        } else {
1353
            now.size = int128_and(now.size, int128_neg(page_size));
A
Avi Kivity 已提交
1354
            register_multipage(d, &now);
1355
        }
1356 1357 1358
    }
}

1359 1360 1361 1362 1363 1364
void qemu_flush_coalesced_mmio_buffer(void)
{
    if (kvm_enabled())
        kvm_flush_coalesced_mmio_buffer();
}

1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
void qemu_mutex_lock_ramlist(void)
{
    qemu_mutex_lock(&ram_list.mutex);
}

void qemu_mutex_unlock_ramlist(void)
{
    qemu_mutex_unlock(&ram_list.mutex);
}

1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
void ram_block_dump(Monitor *mon)
{
    RAMBlock *block;
    char *psize;

    rcu_read_lock();
    monitor_printf(mon, "%24s %8s  %18s %18s %18s\n",
                   "Block Name", "PSize", "Offset", "Used", "Total");
    RAMBLOCK_FOREACH(block) {
        psize = size_to_str(block->page_size);
        monitor_printf(mon, "%24s %8s  0x%016" PRIx64 " 0x%016" PRIx64
                       " 0x%016" PRIx64 "\n", block->idstr, psize,
                       (uint64_t)block->offset,
                       (uint64_t)block->used_length,
                       (uint64_t)block->max_length);
        g_free(psize);
    }
    rcu_read_unlock();
}

1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
#ifdef __linux__
/*
 * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
 * may or may not name the same files / on the same filesystem now as
 * when we actually open and map them.  Iterate over the file
 * descriptors instead, and use qemu_fd_getpagesize().
 */
static int find_max_supported_pagesize(Object *obj, void *opaque)
{
    char *mem_path;
    long *hpsize_min = opaque;

    if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
        mem_path = object_property_get_str(obj, "mem-path", NULL);
        if (mem_path) {
            long hpsize = qemu_mempath_getpagesize(mem_path);
            if (hpsize < *hpsize_min) {
                *hpsize_min = hpsize;
            }
        } else {
            *hpsize_min = getpagesize();
        }
    }

    return 0;
}

long qemu_getrampagesize(void)
{
    long hpsize = LONG_MAX;
    long mainrampagesize;
    Object *memdev_root;

    if (mem_path) {
        mainrampagesize = qemu_mempath_getpagesize(mem_path);
    } else {
        mainrampagesize = getpagesize();
    }

    /* it's possible we have memory-backend objects with
     * hugepage-backed RAM. these may get mapped into system
     * address space via -numa parameters or memory hotplug
     * hooks. we want to take these into account, but we
     * also want to make sure these supported hugepage
     * sizes are applicable across the entire range of memory
     * we may boot from, so we take the min across all
     * backends, and assume normal pages in cases where a
     * backend isn't backed by hugepages.
     */
    memdev_root = object_resolve_path("/objects", NULL);
    if (memdev_root) {
        object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
    }
    if (hpsize == LONG_MAX) {
        /* No additional memory regions found ==> Report main RAM page size */
        return mainrampagesize;
    }

    /* If NUMA is disabled or the NUMA nodes are not backed with a
     * memory-backend, then there is at least one node using "normal" RAM,
     * so if its page size is smaller we have got to report that size instead.
     */
    if (hpsize > mainrampagesize &&
        (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
        static bool warned;
        if (!warned) {
            error_report("Huge page support disabled (n/a for main memory).");
            warned = true;
        }
        return mainrampagesize;
    }

    return hpsize;
}
#else
long qemu_getrampagesize(void)
{
    return getpagesize();
}
#endif

1476
#ifdef __linux__
1477 1478 1479 1480 1481 1482 1483 1484 1485
static int64_t get_file_size(int fd)
{
    int64_t size = lseek(fd, 0, SEEK_END);
    if (size < 0) {
        return -errno;
    }
    return size;
}

A
Alex Williamson 已提交
1486 1487
static void *file_ram_alloc(RAMBlock *block,
                            ram_addr_t memory,
1488 1489
                            const char *path,
                            Error **errp)
1490
{
1491
    bool unlink_on_error = false;
1492
    char *filename;
1493 1494
    char *sanitized_name;
    char *c;
1495
    void *area = MAP_FAILED;
1496
    int fd = -1;
1497
    int64_t file_size;
1498 1499

    if (kvm_enabled() && !kvm_has_sync_mmu()) {
1500 1501
        error_setg(errp,
                   "host lacks kvm mmu notifiers, -mem-path unsupported");
1502
        return NULL;
1503 1504
    }

1505 1506 1507 1508 1509
    for (;;) {
        fd = open(path, O_RDWR);
        if (fd >= 0) {
            /* @path names an existing file, use it */
            break;
1510
        }
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526
        if (errno == ENOENT) {
            /* @path names a file that doesn't exist, create it */
            fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
            if (fd >= 0) {
                unlink_on_error = true;
                break;
            }
        } else if (errno == EISDIR) {
            /* @path names a directory, create a file there */
            /* Make name safe to use with mkstemp by replacing '/' with '_'. */
            sanitized_name = g_strdup(memory_region_name(block->mr));
            for (c = sanitized_name; *c != '\0'; c++) {
                if (*c == '/') {
                    *c = '_';
                }
            }
1527

1528 1529 1530
            filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
                                       sanitized_name);
            g_free(sanitized_name);
1531

1532 1533 1534 1535 1536 1537 1538
            fd = mkstemp(filename);
            if (fd >= 0) {
                unlink(filename);
                g_free(filename);
                break;
            }
            g_free(filename);
1539
        }
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
        if (errno != EEXIST && errno != EINTR) {
            error_setg_errno(errp, errno,
                             "can't open backing store %s for guest RAM",
                             path);
            goto error;
        }
        /*
         * Try again on EINTR and EEXIST.  The latter happens when
         * something else creates the file between our two open().
         */
1550
    }
1551

1552
    block->page_size = qemu_fd_getpagesize(fd);
1553 1554 1555 1556 1557 1558
    block->mr->align = block->page_size;
#if defined(__s390x__)
    if (kvm_enabled()) {
        block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
    }
#endif
1559

1560 1561
    file_size = get_file_size(fd);

1562
    if (memory < block->page_size) {
1563
        error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1564 1565
                   "or larger than page size 0x%zx",
                   memory, block->page_size);
1566
        goto error;
1567 1568
    }

1569 1570 1571 1572 1573 1574 1575
    if (file_size > 0 && file_size < memory) {
        error_setg(errp, "backing store %s size 0x%" PRIx64
                   " does not match 'size' option 0x" RAM_ADDR_FMT,
                   path, file_size, memory);
        goto error;
    }

1576
    memory = ROUND_UP(memory, block->page_size);
1577 1578 1579 1580 1581 1582

    /*
     * ftruncate is not supported by hugetlbfs in older
     * hosts, so don't bother bailing out on errors.
     * If anything goes wrong with it under other filesystems,
     * mmap will fail.
1583 1584 1585 1586 1587 1588 1589 1590
     *
     * Do not truncate the non-empty backend file to avoid corrupting
     * the existing data in the file. Disabling shrinking is not
     * enough. For example, the current vNVDIMM implementation stores
     * the guest NVDIMM labels at the end of the backend file. If the
     * backend file is later extended, QEMU will not be able to find
     * those labels. Therefore, extending the non-empty backend file
     * is disabled as well.
1591
     */
1592
    if (!file_size && ftruncate(fd, memory)) {
Y
Yoshiaki Tamura 已提交
1593
        perror("ftruncate");
1594
    }
1595

1596 1597
    area = qemu_ram_mmap(fd, memory, block->mr->align,
                         block->flags & RAM_SHARED);
1598
    if (area == MAP_FAILED) {
1599
        error_setg_errno(errp, errno,
1600
                         "unable to map backing store for guest RAM");
1601
        goto error;
1602
    }
1603 1604

    if (mem_prealloc) {
1605
        os_mem_prealloc(fd, area, memory, smp_cpus, errp);
1606 1607 1608
        if (errp && *errp) {
            goto error;
        }
1609 1610
    }

A
Alex Williamson 已提交
1611
    block->fd = fd;
1612
    return area;
1613 1614

error:
1615 1616 1617
    if (area != MAP_FAILED) {
        qemu_ram_munmap(area, memory);
    }
1618 1619 1620
    if (unlink_on_error) {
        unlink(path);
    }
1621 1622 1623
    if (fd != -1) {
        close(fd);
    }
1624
    return NULL;
1625 1626 1627
}
#endif

M
Mike Day 已提交
1628
/* Called with the ramlist lock held.  */
1629
static ram_addr_t find_ram_offset(ram_addr_t size)
A
Alex Williamson 已提交
1630 1631
{
    RAMBlock *block, *next_block;
A
Alex Williamson 已提交
1632
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
A
Alex Williamson 已提交
1633

1634 1635
    assert(size != 0); /* it would hand out same offset multiple times */

M
Mike Day 已提交
1636
    if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
A
Alex Williamson 已提交
1637
        return 0;
M
Mike Day 已提交
1638
    }
A
Alex Williamson 已提交
1639

P
Peter Xu 已提交
1640
    RAMBLOCK_FOREACH(block) {
1641
        ram_addr_t end, next = RAM_ADDR_MAX;
A
Alex Williamson 已提交
1642

1643
        end = block->offset + block->max_length;
A
Alex Williamson 已提交
1644

P
Peter Xu 已提交
1645
        RAMBLOCK_FOREACH(next_block) {
A
Alex Williamson 已提交
1646 1647 1648 1649 1650
            if (next_block->offset >= end) {
                next = MIN(next, next_block->offset);
            }
        }
        if (next - end >= size && next - end < mingap) {
A
Alex Williamson 已提交
1651
            offset = end;
A
Alex Williamson 已提交
1652 1653 1654
            mingap = next - end;
        }
    }
A
Alex Williamson 已提交
1655 1656 1657 1658 1659 1660 1661

    if (offset == RAM_ADDR_MAX) {
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
                (uint64_t)size);
        abort();
    }

A
Alex Williamson 已提交
1662 1663 1664
    return offset;
}

1665
unsigned long last_ram_page(void)
1666 1667 1668 1669
{
    RAMBlock *block;
    ram_addr_t last = 0;

M
Mike Day 已提交
1670
    rcu_read_lock();
P
Peter Xu 已提交
1671
    RAMBLOCK_FOREACH(block) {
1672
        last = MAX(last, block->offset + block->max_length);
M
Mike Day 已提交
1673
    }
M
Mike Day 已提交
1674
    rcu_read_unlock();
1675
    return last >> TARGET_PAGE_BITS;
1676 1677
}

1678 1679 1680 1681 1682
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
{
    int ret;

    /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1683
    if (!machine_dump_guest_core(current_machine)) {
1684 1685 1686 1687 1688 1689 1690 1691 1692
        ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
        if (ret) {
            perror("qemu_madvise");
            fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
                            "but dump_guest_core=off specified\n");
        }
    }
}

D
Dr. David Alan Gilbert 已提交
1693 1694 1695 1696 1697
const char *qemu_ram_get_idstr(RAMBlock *rb)
{
    return rb->idstr;
}

1698 1699 1700 1701 1702
bool qemu_ram_is_shared(RAMBlock *rb)
{
    return rb->flags & RAM_SHARED;
}

1703
/* Called with iothread lock held.  */
G
Gonglei 已提交
1704
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
1705
{
G
Gonglei 已提交
1706
    RAMBlock *block;
1707

1708 1709
    assert(new_block);
    assert(!new_block->idstr[0]);
1710

1711 1712
    if (dev) {
        char *id = qdev_get_dev_path(dev);
1713 1714
        if (id) {
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1715
            g_free(id);
1716 1717 1718 1719
        }
    }
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);

G
Gonglei 已提交
1720
    rcu_read_lock();
P
Peter Xu 已提交
1721
    RAMBLOCK_FOREACH(block) {
G
Gonglei 已提交
1722 1723
        if (block != new_block &&
            !strcmp(block->idstr, new_block->idstr)) {
1724 1725 1726 1727 1728
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
                    new_block->idstr);
            abort();
        }
    }
M
Mike Day 已提交
1729
    rcu_read_unlock();
1730 1731
}

1732
/* Called with iothread lock held.  */
G
Gonglei 已提交
1733
void qemu_ram_unset_idstr(RAMBlock *block)
1734
{
1735 1736 1737 1738
    /* FIXME: arch_init.c assumes that this is not called throughout
     * migration.  Ignore the problem since hot-unplug during migration
     * does not work anyway.
     */
1739 1740 1741 1742 1743
    if (block) {
        memset(block->idstr, 0, sizeof(block->idstr));
    }
}

1744 1745 1746 1747 1748
size_t qemu_ram_pagesize(RAMBlock *rb)
{
    return rb->page_size;
}

1749 1750 1751 1752 1753 1754
/* Returns the largest size of page in use */
size_t qemu_ram_pagesize_largest(void)
{
    RAMBlock *block;
    size_t largest = 0;

P
Peter Xu 已提交
1755
    RAMBLOCK_FOREACH(block) {
1756 1757 1758 1759 1760 1761
        largest = MAX(largest, qemu_ram_pagesize(block));
    }

    return largest;
}

1762 1763
static int memory_try_enable_merging(void *addr, size_t len)
{
1764
    if (!machine_mem_merge(current_machine)) {
1765 1766 1767 1768 1769 1770 1771
        /* disabled by the user */
        return 0;
    }

    return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
}

1772 1773 1774 1775 1776 1777 1778
/* Only legal before guest might have detected the memory size: e.g. on
 * incoming migration, or right after reset.
 *
 * As memory core doesn't know how is memory accessed, it is up to
 * resize callback to update device state and/or add assertions to detect
 * misuse, if necessary.
 */
G
Gonglei 已提交
1779
int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
1780 1781 1782
{
    assert(block);

1783
    newsize = HOST_PAGE_ALIGN(newsize);
1784

1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
    if (block->used_length == newsize) {
        return 0;
    }

    if (!(block->flags & RAM_RESIZEABLE)) {
        error_setg_errno(errp, EINVAL,
                         "Length mismatch: %s: 0x" RAM_ADDR_FMT
                         " in != 0x" RAM_ADDR_FMT, block->idstr,
                         newsize, block->used_length);
        return -EINVAL;
    }

    if (block->max_length < newsize) {
        error_setg_errno(errp, EINVAL,
                         "Length too large: %s: 0x" RAM_ADDR_FMT
                         " > 0x" RAM_ADDR_FMT, block->idstr,
                         newsize, block->max_length);
        return -EINVAL;
    }

    cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
    block->used_length = newsize;
1807 1808
    cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
                                        DIRTY_CLIENTS_ALL);
1809 1810 1811 1812 1813 1814 1815
    memory_region_set_size(block->mr, newsize);
    if (block->resized) {
        block->resized(block->idstr, newsize, block->host);
    }
    return 0;
}

1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
/* Called with ram_list.mutex held */
static void dirty_memory_extend(ram_addr_t old_ram_size,
                                ram_addr_t new_ram_size)
{
    ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
                                             DIRTY_MEMORY_BLOCK_SIZE);
    ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
                                             DIRTY_MEMORY_BLOCK_SIZE);
    int i;

    /* Only need to extend if block count increased */
    if (new_num_blocks <= old_num_blocks) {
        return;
    }

    for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
        DirtyMemoryBlocks *old_blocks;
        DirtyMemoryBlocks *new_blocks;
        int j;

        old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
        new_blocks = g_malloc(sizeof(*new_blocks) +
                              sizeof(new_blocks->blocks[0]) * new_num_blocks);

        if (old_num_blocks) {
            memcpy(new_blocks->blocks, old_blocks->blocks,
                   old_num_blocks * sizeof(old_blocks->blocks[0]));
        }

        for (j = old_num_blocks; j < new_num_blocks; j++) {
            new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
        }

        atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);

        if (old_blocks) {
            g_free_rcu(old_blocks, rcu);
        }
    }
}

1857
static void ram_block_add(RAMBlock *new_block, Error **errp)
1858
{
1859
    RAMBlock *block;
M
Mike Day 已提交
1860
    RAMBlock *last_block = NULL;
1861
    ram_addr_t old_ram_size, new_ram_size;
1862
    Error *err = NULL;
1863

1864
    old_ram_size = last_ram_page();
1865

1866
    qemu_mutex_lock_ramlist();
1867
    new_block->offset = find_ram_offset(new_block->max_length);
1868 1869 1870

    if (!new_block->host) {
        if (xen_enabled()) {
1871
            xen_ram_alloc(new_block->offset, new_block->max_length,
1872 1873 1874 1875
                          new_block->mr, &err);
            if (err) {
                error_propagate(errp, err);
                qemu_mutex_unlock_ramlist();
1876
                return;
1877
            }
1878
        } else {
1879
            new_block->host = phys_mem_alloc(new_block->max_length,
1880
                                             &new_block->mr->align);
1881
            if (!new_block->host) {
1882 1883 1884 1885
                error_setg_errno(errp, errno,
                                 "cannot set up guest memory '%s'",
                                 memory_region_name(new_block->mr));
                qemu_mutex_unlock_ramlist();
1886
                return;
1887
            }
1888
            memory_try_enable_merging(new_block->host, new_block->max_length);
1889
        }
1890
    }
P
pbrook 已提交
1891

L
Li Zhijian 已提交
1892 1893 1894
    new_ram_size = MAX(old_ram_size,
              (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
    if (new_ram_size > old_ram_size) {
1895
        dirty_memory_extend(old_ram_size, new_ram_size);
L
Li Zhijian 已提交
1896
    }
M
Mike Day 已提交
1897 1898 1899 1900
    /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
     * QLIST (which has an RCU-friendly variant) does not have insertion at
     * tail, so save the last element in last_block.
     */
P
Peter Xu 已提交
1901
    RAMBLOCK_FOREACH(block) {
M
Mike Day 已提交
1902
        last_block = block;
1903
        if (block->max_length < new_block->max_length) {
1904 1905 1906 1907
            break;
        }
    }
    if (block) {
M
Mike Day 已提交
1908
        QLIST_INSERT_BEFORE_RCU(block, new_block, next);
M
Mike Day 已提交
1909
    } else if (last_block) {
M
Mike Day 已提交
1910
        QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
M
Mike Day 已提交
1911
    } else { /* list is empty */
M
Mike Day 已提交
1912
        QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1913
    }
1914
    ram_list.mru_block = NULL;
P
pbrook 已提交
1915

M
Mike Day 已提交
1916 1917
    /* Write list before version */
    smp_wmb();
U
Umesh Deshpande 已提交
1918
    ram_list.version++;
1919
    qemu_mutex_unlock_ramlist();
U
Umesh Deshpande 已提交
1920

1921
    cpu_physical_memory_set_dirty_range(new_block->offset,
1922 1923
                                        new_block->used_length,
                                        DIRTY_CLIENTS_ALL);
P
pbrook 已提交
1924

1925 1926 1927
    if (new_block->host) {
        qemu_ram_setup_dump(new_block->host, new_block->max_length);
        qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
C
Cao jin 已提交
1928
        /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
1929
        qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
P
Paolo Bonzini 已提交
1930
        ram_block_notify_add(new_block->host, new_block->max_length);
1931
    }
P
pbrook 已提交
1932
}
B
bellard 已提交
1933

1934
#ifdef __linux__
1935 1936 1937
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
                                   bool share, const char *mem_path,
                                   Error **errp)
1938 1939
{
    RAMBlock *new_block;
1940
    Error *local_err = NULL;
1941 1942

    if (xen_enabled()) {
1943
        error_setg(errp, "-mem-path not supported with Xen");
1944
        return NULL;
1945 1946 1947 1948 1949 1950 1951 1952
    }

    if (phys_mem_alloc != qemu_anon_ram_alloc) {
        /*
         * file_ram_alloc() needs to allocate just like
         * phys_mem_alloc, but we haven't bothered to provide
         * a hook there.
         */
1953 1954
        error_setg(errp,
                   "-mem-path not supported with this accelerator");
1955
        return NULL;
1956 1957
    }

1958
    size = HOST_PAGE_ALIGN(size);
1959 1960
    new_block = g_malloc0(sizeof(*new_block));
    new_block->mr = mr;
1961 1962
    new_block->used_length = size;
    new_block->max_length = size;
1963
    new_block->flags = share ? RAM_SHARED : 0;
1964 1965 1966 1967
    new_block->host = file_ram_alloc(new_block, size,
                                     mem_path, errp);
    if (!new_block->host) {
        g_free(new_block);
1968
        return NULL;
1969 1970
    }

1971
    ram_block_add(new_block, &local_err);
1972 1973 1974
    if (local_err) {
        g_free(new_block);
        error_propagate(errp, local_err);
1975
        return NULL;
1976
    }
1977
    return new_block;
1978
}
1979
#endif
1980

1981
static
1982 1983 1984 1985 1986 1987
RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
                                  void (*resized)(const char*,
                                                  uint64_t length,
                                                  void *host),
                                  void *host, bool resizeable,
                                  MemoryRegion *mr, Error **errp)
1988 1989
{
    RAMBlock *new_block;
1990
    Error *local_err = NULL;
1991

1992 1993
    size = HOST_PAGE_ALIGN(size);
    max_size = HOST_PAGE_ALIGN(max_size);
1994 1995
    new_block = g_malloc0(sizeof(*new_block));
    new_block->mr = mr;
1996
    new_block->resized = resized;
1997 1998
    new_block->used_length = size;
    new_block->max_length = max_size;
1999
    assert(max_size >= size);
2000
    new_block->fd = -1;
2001
    new_block->page_size = getpagesize();
2002 2003
    new_block->host = host;
    if (host) {
2004
        new_block->flags |= RAM_PREALLOC;
2005
    }
2006 2007 2008
    if (resizeable) {
        new_block->flags |= RAM_RESIZEABLE;
    }
2009
    ram_block_add(new_block, &local_err);
2010 2011 2012
    if (local_err) {
        g_free(new_block);
        error_propagate(errp, local_err);
2013
        return NULL;
2014
    }
2015
    return new_block;
2016 2017
}

2018
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2019 2020 2021 2022 2023
                                   MemoryRegion *mr, Error **errp)
{
    return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
}

2024
RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
2025
{
2026 2027 2028
    return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
}

2029
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
2030 2031 2032 2033 2034 2035
                                     void (*resized)(const char*,
                                                     uint64_t length,
                                                     void *host),
                                     MemoryRegion *mr, Error **errp)
{
    return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
2036 2037
}

P
Paolo Bonzini 已提交
2038 2039 2040 2041 2042 2043 2044 2045
static void reclaim_ramblock(RAMBlock *block)
{
    if (block->flags & RAM_PREALLOC) {
        ;
    } else if (xen_enabled()) {
        xen_invalidate_map_cache_entry(block->host);
#ifndef _WIN32
    } else if (block->fd >= 0) {
2046
        qemu_ram_munmap(block->host, block->max_length);
P
Paolo Bonzini 已提交
2047 2048 2049 2050 2051 2052 2053 2054
        close(block->fd);
#endif
    } else {
        qemu_anon_ram_free(block->host, block->max_length);
    }
    g_free(block);
}

2055
void qemu_ram_free(RAMBlock *block)
B
bellard 已提交
2056
{
2057 2058 2059 2060
    if (!block) {
        return;
    }

P
Paolo Bonzini 已提交
2061 2062 2063 2064
    if (block->host) {
        ram_block_notify_remove(block->host, block->max_length);
    }

2065
    qemu_mutex_lock_ramlist();
2066 2067 2068 2069 2070 2071
    QLIST_REMOVE_RCU(block, next);
    ram_list.mru_block = NULL;
    /* Write list before version */
    smp_wmb();
    ram_list.version++;
    call_rcu(block, reclaim_ramblock, rcu);
2072
    qemu_mutex_unlock_ramlist();
B
bellard 已提交
2073 2074
}

H
Huang Ying 已提交
2075 2076 2077 2078 2079 2080 2081 2082
#ifndef _WIN32
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
{
    RAMBlock *block;
    ram_addr_t offset;
    int flags;
    void *area, *vaddr;

P
Peter Xu 已提交
2083
    RAMBLOCK_FOREACH(block) {
H
Huang Ying 已提交
2084
        offset = addr - block->offset;
2085
        if (offset < block->max_length) {
2086
            vaddr = ramblock_ptr(block, offset);
2087
            if (block->flags & RAM_PREALLOC) {
H
Huang Ying 已提交
2088
                ;
2089 2090
            } else if (xen_enabled()) {
                abort();
H
Huang Ying 已提交
2091 2092
            } else {
                flags = MAP_FIXED;
2093
                if (block->fd >= 0) {
2094 2095
                    flags |= (block->flags & RAM_SHARED ?
                              MAP_SHARED : MAP_PRIVATE);
2096 2097
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
                                flags, block->fd, offset);
H
Huang Ying 已提交
2098
                } else {
2099 2100 2101 2102 2103 2104 2105
                    /*
                     * Remap needs to match alloc.  Accelerators that
                     * set phys_mem_alloc never remap.  If they did,
                     * we'd need a remap hook here.
                     */
                    assert(phys_mem_alloc == qemu_anon_ram_alloc);

H
Huang Ying 已提交
2106 2107 2108 2109 2110
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
                                flags, -1, 0);
                }
                if (area != vaddr) {
2111 2112
                    fprintf(stderr, "Could not remap addr: "
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
H
Huang Ying 已提交
2113 2114 2115
                            length, addr);
                    exit(1);
                }
2116
                memory_try_enable_merging(vaddr, length);
2117
                qemu_ram_setup_dump(vaddr, length);
H
Huang Ying 已提交
2118 2119 2120 2121 2122 2123
            }
        }
    }
}
#endif /* !_WIN32 */

2124
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2125 2126 2127
 * This should not be used for general purpose DMA.  Use address_space_map
 * or address_space_rw instead. For local memory (e.g. video ram) that the
 * device owns, use memory_region_get_ram_ptr.
M
Mike Day 已提交
2128
 *
2129
 * Called within RCU critical section.
2130
 */
2131
void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
2132
{
2133 2134 2135 2136
    RAMBlock *block = ram_block;

    if (block == NULL) {
        block = qemu_get_ram_block(addr);
2137
        addr -= block->offset;
2138
    }
2139 2140

    if (xen_enabled() && block->host == NULL) {
2141 2142 2143 2144 2145
        /* We need to check if the requested address is in the RAM
         * because we don't want to map the entire memory in QEMU.
         * In that case just map until the end of the page.
         */
        if (block->offset == 0) {
2146
            return xen_map_cache(addr, 0, 0, false);
2147
        }
2148

2149
        block->host = xen_map_cache(block->offset, block->max_length, 1, false);
2150
    }
2151
    return ramblock_ptr(block, addr);
2152 2153
}

2154
/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
2155
 * but takes a size argument.
M
Mike Day 已提交
2156
 *
2157
 * Called within RCU critical section.
2158
 */
2159 2160
static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
                                 hwaddr *size)
2161
{
2162
    RAMBlock *block = ram_block;
2163 2164 2165
    if (*size == 0) {
        return NULL;
    }
2166

2167 2168
    if (block == NULL) {
        block = qemu_get_ram_block(addr);
2169
        addr -= block->offset;
2170
    }
2171
    *size = MIN(*size, block->max_length - addr);
2172 2173 2174 2175 2176 2177 2178

    if (xen_enabled() && block->host == NULL) {
        /* We need to check if the requested address is in the RAM
         * because we don't want to map the entire memory in QEMU.
         * In that case just map the requested area.
         */
        if (block->offset == 0) {
2179
            return xen_map_cache(addr, *size, 1, true);
2180 2181
        }

2182
        block->host = xen_map_cache(block->offset, block->max_length, 1, true);
2183
    }
2184

2185
    return ramblock_ptr(block, addr);
2186 2187
}

D
Dr. David Alan Gilbert 已提交
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
/*
 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
 * in that RAMBlock.
 *
 * ptr: Host pointer to look up
 * round_offset: If true round the result offset down to a page boundary
 * *ram_addr: set to result ram_addr
 * *offset: set to result offset within the RAMBlock
 *
 * Returns: RAMBlock (or NULL if not found)
2198 2199 2200 2201 2202 2203 2204
 *
 * By the time this function returns, the returned pointer is not protected
 * by RCU anymore.  If the caller is not within an RCU critical section and
 * does not hold the iothread lock, it must have other means of protecting the
 * pointer, such as a reference to the region that includes the incoming
 * ram_addr_t.
 */
D
Dr. David Alan Gilbert 已提交
2205 2206
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
                                   ram_addr_t *offset)
P
pbrook 已提交
2207
{
P
pbrook 已提交
2208 2209 2210
    RAMBlock *block;
    uint8_t *host = ptr;

2211
    if (xen_enabled()) {
2212
        ram_addr_t ram_addr;
M
Mike Day 已提交
2213
        rcu_read_lock();
2214 2215
        ram_addr = xen_ram_addr_from_mapcache(ptr);
        block = qemu_get_ram_block(ram_addr);
D
Dr. David Alan Gilbert 已提交
2216
        if (block) {
2217
            *offset = ram_addr - block->offset;
D
Dr. David Alan Gilbert 已提交
2218
        }
M
Mike Day 已提交
2219
        rcu_read_unlock();
D
Dr. David Alan Gilbert 已提交
2220
        return block;
2221 2222
    }

M
Mike Day 已提交
2223 2224
    rcu_read_lock();
    block = atomic_rcu_read(&ram_list.mru_block);
2225
    if (block && block->host && host - block->host < block->max_length) {
2226 2227 2228
        goto found;
    }

P
Peter Xu 已提交
2229
    RAMBLOCK_FOREACH(block) {
J
Jun Nakajima 已提交
2230 2231 2232 2233
        /* This case append when the block is not mapped. */
        if (block->host == NULL) {
            continue;
        }
2234
        if (host - block->host < block->max_length) {
2235
            goto found;
A
Alex Williamson 已提交
2236
        }
P
pbrook 已提交
2237
    }
J
Jun Nakajima 已提交
2238

M
Mike Day 已提交
2239
    rcu_read_unlock();
2240
    return NULL;
2241 2242

found:
D
Dr. David Alan Gilbert 已提交
2243 2244 2245 2246
    *offset = (host - block->host);
    if (round_offset) {
        *offset &= TARGET_PAGE_MASK;
    }
M
Mike Day 已提交
2247
    rcu_read_unlock();
D
Dr. David Alan Gilbert 已提交
2248 2249 2250
    return block;
}

D
Dr. David Alan Gilbert 已提交
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
/*
 * Finds the named RAMBlock
 *
 * name: The name of RAMBlock to find
 *
 * Returns: RAMBlock (or NULL if not found)
 */
RAMBlock *qemu_ram_block_by_name(const char *name)
{
    RAMBlock *block;

P
Peter Xu 已提交
2262
    RAMBLOCK_FOREACH(block) {
D
Dr. David Alan Gilbert 已提交
2263 2264 2265 2266 2267 2268 2269 2270
        if (!strcmp(name, block->idstr)) {
            return block;
        }
    }

    return NULL;
}

D
Dr. David Alan Gilbert 已提交
2271 2272
/* Some of the softmmu routines need to translate from a host pointer
   (typically a TLB entry) back to a ram offset.  */
2273
ram_addr_t qemu_ram_addr_from_host(void *ptr)
D
Dr. David Alan Gilbert 已提交
2274 2275
{
    RAMBlock *block;
2276
    ram_addr_t offset;
D
Dr. David Alan Gilbert 已提交
2277

2278
    block = qemu_ram_block_from_host(ptr, false, &offset);
D
Dr. David Alan Gilbert 已提交
2279
    if (!block) {
2280
        return RAM_ADDR_INVALID;
D
Dr. David Alan Gilbert 已提交
2281 2282
    }

2283
    return block->offset + offset;
M
Marcelo Tosatti 已提交
2284
}
A
Alex Williamson 已提交
2285

2286
/* Called within RCU critical section.  */
A
Avi Kivity 已提交
2287
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
2288
                               uint64_t val, unsigned size)
2289
{
2290 2291
    bool locked = false;

2292
    if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
2293 2294
        locked = true;
        tb_lock();
2295
        tb_invalidate_phys_page_fast(ram_addr, size);
2296
    }
2297 2298
    switch (size) {
    case 1:
2299
        stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2300 2301
        break;
    case 2:
2302
        stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2303 2304
        break;
    case 4:
2305
        stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
2306 2307 2308
        break;
    default:
        abort();
2309
    }
2310 2311 2312 2313 2314

    if (locked) {
        tb_unlock();
    }

2315 2316 2317 2318 2319
    /* Set both VGA and migration bits for simplicity and to remove
     * the notdirty callback faster.
     */
    cpu_physical_memory_set_dirty_range(ram_addr, size,
                                        DIRTY_CLIENTS_NOCODE);
B
bellard 已提交
2320 2321
    /* we remove the notdirty callback only if the code has been
       flushed */
2322
    if (!cpu_physical_memory_is_clean(ram_addr)) {
2323
        tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2324
    }
2325 2326
}

2327 2328 2329 2330 2331 2332
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
                                 unsigned size, bool is_write)
{
    return is_write;
}

2333 2334
static const MemoryRegionOps notdirty_mem_ops = {
    .write = notdirty_mem_write,
2335
    .valid.accepts = notdirty_mem_accepts,
2336
    .endianness = DEVICE_NATIVE_ENDIAN,
2337 2338
};

P
pbrook 已提交
2339
/* Generate a debug exception if a watchpoint has been hit.  */
2340
static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
P
pbrook 已提交
2341
{
2342
    CPUState *cpu = current_cpu;
2343
    CPUClass *cc = CPU_GET_CLASS(cpu);
2344
    CPUArchState *env = cpu->env_ptr;
2345
    target_ulong pc, cs_base;
P
pbrook 已提交
2346
    target_ulong vaddr;
2347
    CPUWatchpoint *wp;
2348
    uint32_t cpu_flags;
P
pbrook 已提交
2349

2350
    if (cpu->watchpoint_hit) {
2351 2352 2353
        /* We re-entered the check after replacing the TB. Now raise
         * the debug interrupt so that is will trigger after the
         * current instruction. */
2354
        cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2355 2356
        return;
    }
2357
    vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2358
    vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len);
2359
    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2360 2361
        if (cpu_watchpoint_address_matches(wp, vaddr, len)
            && (wp->flags & flags)) {
2362 2363 2364 2365 2366 2367
            if (flags == BP_MEM_READ) {
                wp->flags |= BP_WATCHPOINT_HIT_READ;
            } else {
                wp->flags |= BP_WATCHPOINT_HIT_WRITE;
            }
            wp->hitaddr = vaddr;
2368
            wp->hitattrs = attrs;
2369
            if (!cpu->watchpoint_hit) {
2370 2371 2372 2373 2374
                if (wp->flags & BP_CPU &&
                    !cc->debug_check_watchpoint(cpu, wp)) {
                    wp->flags &= ~BP_WATCHPOINT_HIT;
                    continue;
                }
2375
                cpu->watchpoint_hit = wp;
2376

2377 2378 2379
                /* Both tb_lock and iothread_mutex will be reset when
                 * cpu_loop_exit or cpu_loop_exit_noexc longjmp
                 * back into the cpu_exec main loop.
2380 2381
                 */
                tb_lock();
2382
                tb_check_watchpoint(cpu);
2383
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2384
                    cpu->exception_index = EXCP_DEBUG;
2385
                    cpu_loop_exit(cpu);
2386 2387
                } else {
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2388
                    tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2389
                    cpu_loop_exit_noexc(cpu);
2390
                }
2391
            }
2392 2393
        } else {
            wp->flags &= ~BP_WATCHPOINT_HIT;
P
pbrook 已提交
2394 2395 2396 2397
        }
    }
}

2398 2399 2400
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
   so these check for a hit then pass through to the normal out-of-line
   phys routines.  */
2401 2402
static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
                                  unsigned size, MemTxAttrs attrs)
2403
{
2404 2405
    MemTxResult res;
    uint64_t data;
2406 2407
    int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
    AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2408 2409

    check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2410
    switch (size) {
2411
    case 1:
2412
        data = address_space_ldub(as, addr, attrs, &res);
2413 2414
        break;
    case 2:
2415
        data = address_space_lduw(as, addr, attrs, &res);
2416 2417
        break;
    case 4:
2418
        data = address_space_ldl(as, addr, attrs, &res);
2419
        break;
2420 2421
    default: abort();
    }
2422 2423
    *pdata = data;
    return res;
2424 2425
}

2426 2427 2428
static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
                                   uint64_t val, unsigned size,
                                   MemTxAttrs attrs)
2429
{
2430
    MemTxResult res;
2431 2432
    int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
    AddressSpace *as = current_cpu->cpu_ases[asidx].as;
2433 2434

    check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2435
    switch (size) {
2436
    case 1:
2437
        address_space_stb(as, addr, val, attrs, &res);
2438 2439
        break;
    case 2:
2440
        address_space_stw(as, addr, val, attrs, &res);
2441 2442
        break;
    case 4:
2443
        address_space_stl(as, addr, val, attrs, &res);
2444
        break;
2445 2446
    default: abort();
    }
2447
    return res;
2448 2449
}

2450
static const MemoryRegionOps watch_mem_ops = {
2451 2452
    .read_with_attrs = watch_mem_read,
    .write_with_attrs = watch_mem_write,
2453
    .endianness = DEVICE_NATIVE_ENDIAN,
2454 2455
};

2456 2457
static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
                                unsigned len, MemTxAttrs attrs)
2458
{
2459
    subpage_t *subpage = opaque;
2460
    uint8_t buf[8];
2461
    MemTxResult res;
2462

2463
#if defined(DEBUG_SUBPAGE)
A
Amos Kong 已提交
2464
    printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2465
           subpage, len, addr);
2466
#endif
2467 2468 2469 2470
    res = address_space_read(subpage->as, addr + subpage->base,
                             attrs, buf, len);
    if (res) {
        return res;
2471
    }
2472 2473
    switch (len) {
    case 1:
2474 2475
        *data = ldub_p(buf);
        return MEMTX_OK;
2476
    case 2:
2477 2478
        *data = lduw_p(buf);
        return MEMTX_OK;
2479
    case 4:
2480 2481
        *data = ldl_p(buf);
        return MEMTX_OK;
2482
    case 8:
2483 2484
        *data = ldq_p(buf);
        return MEMTX_OK;
2485 2486 2487
    default:
        abort();
    }
2488 2489
}

2490 2491
static MemTxResult subpage_write(void *opaque, hwaddr addr,
                                 uint64_t value, unsigned len, MemTxAttrs attrs)
2492
{
2493
    subpage_t *subpage = opaque;
2494
    uint8_t buf[8];
2495

2496
#if defined(DEBUG_SUBPAGE)
A
Amos Kong 已提交
2497
    printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2498 2499
           " value %"PRIx64"\n",
           __func__, subpage, len, addr, value);
2500
#endif
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510
    switch (len) {
    case 1:
        stb_p(buf, value);
        break;
    case 2:
        stw_p(buf, value);
        break;
    case 4:
        stl_p(buf, value);
        break;
2511 2512 2513
    case 8:
        stq_p(buf, value);
        break;
2514 2515 2516
    default:
        abort();
    }
2517 2518
    return address_space_write(subpage->as, addr + subpage->base,
                               attrs, buf, len);
2519 2520
}

2521
static bool subpage_accepts(void *opaque, hwaddr addr,
A
Amos Kong 已提交
2522
                            unsigned len, bool is_write)
2523
{
2524
    subpage_t *subpage = opaque;
2525
#if defined(DEBUG_SUBPAGE)
A
Amos Kong 已提交
2526
    printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2527
           __func__, subpage, is_write ? 'w' : 'r', len, addr);
2528 2529
#endif

2530
    return address_space_access_valid(subpage->as, addr + subpage->base,
A
Amos Kong 已提交
2531
                                      len, is_write);
2532 2533
}

2534
static const MemoryRegionOps subpage_ops = {
2535 2536
    .read_with_attrs = subpage_read,
    .write_with_attrs = subpage_write,
2537 2538 2539 2540
    .impl.min_access_size = 1,
    .impl.max_access_size = 8,
    .valid.min_access_size = 1,
    .valid.max_access_size = 8,
2541
    .valid.accepts = subpage_accepts,
2542
    .endianness = DEVICE_NATIVE_ENDIAN,
2543 2544
};

A
Anthony Liguori 已提交
2545
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2546
                             uint16_t section)
2547 2548 2549 2550 2551 2552 2553 2554
{
    int idx, eidx;

    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
        return -1;
    idx = SUBPAGE_IDX(start);
    eidx = SUBPAGE_IDX(end);
#if defined(DEBUG_SUBPAGE)
A
Amos Kong 已提交
2555 2556
    printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
           __func__, mmio, start, end, idx, eidx, section);
2557 2558
#endif
    for (; idx <= eidx; idx++) {
2559
        mmio->sub_section[idx] = section;
2560 2561 2562 2563 2564
    }

    return 0;
}

2565
static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2566
{
A
Anthony Liguori 已提交
2567
    subpage_t *mmio;
2568

2569
    mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
2570
    mmio->as = as;
2571
    mmio->base = base;
2572
    memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
P
Peter Crosthwaite 已提交
2573
                          NULL, TARGET_PAGE_SIZE);
A
Avi Kivity 已提交
2574
    mmio->iomem.subpage = true;
2575
#if defined(DEBUG_SUBPAGE)
A
Amos Kong 已提交
2576 2577
    printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
           mmio, base, TARGET_PAGE_SIZE);
2578
#endif
2579
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2580 2581 2582 2583

    return mmio;
}

2584 2585
static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
                              MemoryRegion *mr)
2586
{
2587
    assert(as);
2588
    MemoryRegionSection section = {
2589
        .address_space = as,
2590 2591 2592
        .mr = mr,
        .offset_within_address_space = 0,
        .offset_within_region = 0,
2593
        .size = int128_2_64(),
2594 2595
    };

2596
    return phys_section_add(map, &section);
2597 2598
}

2599
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
2600
{
2601 2602
    int asidx = cpu_asidx_from_attrs(cpu, attrs);
    CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
2603
    AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2604
    MemoryRegionSection *sections = d->map.sections;
P
Paolo Bonzini 已提交
2605 2606

    return sections[index & ~TARGET_PAGE_MASK].mr;
2607 2608
}

A
Avi Kivity 已提交
2609 2610
static void io_mem_init(void)
{
2611
    memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2612
    memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2613
                          NULL, UINT64_MAX);
2614 2615 2616 2617

    /* io_mem_notdirty calls tb_invalidate_phys_page_fast,
     * which can be called without the iothread mutex.
     */
2618
    memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2619
                          NULL, UINT64_MAX);
2620 2621
    memory_region_clear_global_locking(&io_mem_notdirty);

2622
    memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2623
                          NULL, UINT64_MAX);
A
Avi Kivity 已提交
2624 2625
}

A
Avi Kivity 已提交
2626
static void mem_begin(MemoryListener *listener)
2627 2628
{
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2629 2630 2631
    AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
    uint16_t n;

2632
    n = dummy_section(&d->map, as, &io_mem_unassigned);
2633
    assert(n == PHYS_SECTION_UNASSIGNED);
2634
    n = dummy_section(&d->map, as, &io_mem_notdirty);
2635
    assert(n == PHYS_SECTION_NOTDIRTY);
2636
    n = dummy_section(&d->map, as, &io_mem_rom);
2637
    assert(n == PHYS_SECTION_ROM);
2638
    n = dummy_section(&d->map, as, &io_mem_watch);
2639
    assert(n == PHYS_SECTION_WATCH);
2640

M
Michael S. Tsirkin 已提交
2641
    d->phys_map  = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2642 2643 2644 2645
    d->as = as;
    as->next_dispatch = d;
}

2646 2647 2648 2649 2650 2651
static void address_space_dispatch_free(AddressSpaceDispatch *d)
{
    phys_sections_free(&d->map);
    g_free(d);
}

2652
static void mem_commit(MemoryListener *listener)
A
Avi Kivity 已提交
2653
{
2654
    AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2655 2656 2657
    AddressSpaceDispatch *cur = as->dispatch;
    AddressSpaceDispatch *next = as->next_dispatch;

2658
    phys_page_compact_all(next, next->map.nodes_nb);
2659

2660
    atomic_rcu_set(&as->dispatch, next);
2661
    if (cur) {
2662
        call_rcu(cur, address_space_dispatch_free, rcu);
2663
    }
2664 2665
}

2666
static void tcg_commit(MemoryListener *listener)
2667
{
2668 2669
    CPUAddressSpace *cpuas;
    AddressSpaceDispatch *d;
2670 2671 2672

    /* since each CPU stores ram addresses in its TLB cache, we must
       reset the modified entries */
2673 2674 2675 2676 2677 2678 2679
    cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
    cpu_reloading_memory_map();
    /* The CPU and TLB are protected by the iothread lock.
     * We reload the dispatch pointer now because cpu_reloading_memory_map()
     * may have split the RCU critical section.
     */
    d = atomic_rcu_read(&cpuas->as->dispatch);
2680
    atomic_rcu_set(&cpuas->memory_dispatch, d);
2681
    tlb_flush(cpuas->cpu);
2682 2683
}

A
Avi Kivity 已提交
2684 2685
void address_space_init_dispatch(AddressSpace *as)
{
2686
    as->dispatch = NULL;
2687
    as->dispatch_listener = (MemoryListener) {
A
Avi Kivity 已提交
2688
        .begin = mem_begin,
2689
        .commit = mem_commit,
A
Avi Kivity 已提交
2690 2691 2692 2693
        .region_add = mem_add,
        .region_nop = mem_add,
        .priority = 0,
    };
2694
    memory_listener_register(&as->dispatch_listener, as);
A
Avi Kivity 已提交
2695 2696
}

2697 2698 2699 2700 2701
void address_space_unregister(AddressSpace *as)
{
    memory_listener_unregister(&as->dispatch_listener);
}

A
Avi Kivity 已提交
2702 2703 2704 2705
void address_space_destroy_dispatch(AddressSpace *as)
{
    AddressSpaceDispatch *d = as->dispatch;

2706 2707 2708 2709
    atomic_rcu_set(&as->dispatch, NULL);
    if (d) {
        call_rcu(d, address_space_dispatch_free, rcu);
    }
A
Avi Kivity 已提交
2710 2711
}

A
Avi Kivity 已提交
2712 2713
static void memory_map_init(void)
{
2714
    system_memory = g_malloc(sizeof(*system_memory));
2715

2716
    memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2717
    address_space_init(&address_space_memory, system_memory, "memory");
2718

2719
    system_io = g_malloc(sizeof(*system_io));
2720 2721
    memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
                          65536);
2722
    address_space_init(&address_space_io, system_io, "I/O");
A
Avi Kivity 已提交
2723 2724 2725 2726 2727 2728 2729
}

MemoryRegion *get_system_memory(void)
{
    return system_memory;
}

2730 2731 2732 2733 2734
MemoryRegion *get_system_io(void)
{
    return system_io;
}

2735 2736
#endif /* !defined(CONFIG_USER_ONLY) */

B
bellard 已提交
2737 2738
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
2739
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
P
Paul Brook 已提交
2740
                        uint8_t *buf, int len, int is_write)
B
bellard 已提交
2741 2742 2743
{
    int l, flags;
    target_ulong page;
2744
    void * p;
B
bellard 已提交
2745 2746 2747 2748 2749 2750 2751 2752

    while (len > 0) {
        page = addr & TARGET_PAGE_MASK;
        l = (page + TARGET_PAGE_SIZE) - addr;
        if (l > len)
            l = len;
        flags = page_get_flags(page);
        if (!(flags & PAGE_VALID))
P
Paul Brook 已提交
2753
            return -1;
B
bellard 已提交
2754 2755
        if (is_write) {
            if (!(flags & PAGE_WRITE))
P
Paul Brook 已提交
2756
                return -1;
2757
            /* XXX: this code should not depend on lock_user */
A
aurel32 已提交
2758
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
P
Paul Brook 已提交
2759
                return -1;
A
aurel32 已提交
2760 2761
            memcpy(p, buf, l);
            unlock_user(p, addr, l);
B
bellard 已提交
2762 2763
        } else {
            if (!(flags & PAGE_READ))
P
Paul Brook 已提交
2764
                return -1;
2765
            /* XXX: this code should not depend on lock_user */
A
aurel32 已提交
2766
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
P
Paul Brook 已提交
2767
                return -1;
A
aurel32 已提交
2768
            memcpy(buf, p, l);
A
aurel32 已提交
2769
            unlock_user(p, addr, 0);
B
bellard 已提交
2770 2771 2772 2773 2774
        }
        len -= l;
        buf += l;
        addr += l;
    }
P
Paul Brook 已提交
2775
    return 0;
B
bellard 已提交
2776
}
B
bellard 已提交
2777

B
bellard 已提交
2778
#else
2779

2780
static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
A
Avi Kivity 已提交
2781
                                     hwaddr length)
2782
{
2783
    uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2784 2785
    addr += memory_region_get_ram_addr(mr);

2786 2787 2788 2789 2790 2791 2792 2793 2794
    /* No early return if dirty_log_mask is or becomes 0, because
     * cpu_physical_memory_set_dirty_range will still call
     * xen_modified_memory.
     */
    if (dirty_log_mask) {
        dirty_log_mask =
            cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
    }
    if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2795
        tb_lock();
2796
        tb_invalidate_phys_range(addr, addr + length);
2797
        tb_unlock();
2798
        dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2799
    }
2800
    cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2801 2802
}

2803
static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2804
{
2805
    unsigned access_size_max = mr->ops->valid.max_access_size;
2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818

    /* Regions are assumed to support 1-4 byte accesses unless
       otherwise specified.  */
    if (access_size_max == 0) {
        access_size_max = 4;
    }

    /* Bound the maximum access by the alignment of the address.  */
    if (!mr->ops->impl.unaligned) {
        unsigned align_size_max = addr & -addr;
        if (align_size_max != 0 && align_size_max < access_size_max) {
            access_size_max = align_size_max;
        }
2819
    }
2820 2821 2822 2823

    /* Don't attempt accesses larger than the maximum.  */
    if (l > access_size_max) {
        l = access_size_max;
2824
    }
2825
    l = pow2floor(l);
2826 2827

    return l;
2828 2829
}

2830
static bool prepare_mmio_access(MemoryRegion *mr)
2831
{
2832 2833 2834 2835 2836 2837 2838 2839
    bool unlocked = !qemu_mutex_iothread_locked();
    bool release_lock = false;

    if (unlocked && mr->global_locking) {
        qemu_mutex_lock_iothread();
        unlocked = false;
        release_lock = true;
    }
2840
    if (mr->flush_coalesced_mmio) {
2841 2842 2843
        if (unlocked) {
            qemu_mutex_lock_iothread();
        }
2844
        qemu_flush_coalesced_mmio_buffer();
2845 2846 2847
        if (unlocked) {
            qemu_mutex_unlock_iothread();
        }
2848
    }
2849 2850

    return release_lock;
2851 2852
}

2853 2854 2855 2856 2857 2858
/* Called within RCU critical section.  */
static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
                                                MemTxAttrs attrs,
                                                const uint8_t *buf,
                                                int len, hwaddr addr1,
                                                hwaddr l, MemoryRegion *mr)
B
bellard 已提交
2859 2860
{
    uint8_t *ptr;
2861
    uint64_t val;
2862
    MemTxResult result = MEMTX_OK;
2863
    bool release_lock = false;
2864

2865
    for (;;) {
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879
        if (!memory_access_is_direct(mr, true)) {
            release_lock |= prepare_mmio_access(mr);
            l = memory_access_size(mr, l, addr1);
            /* XXX: could force current_cpu to NULL to avoid
               potential bugs */
            switch (l) {
            case 8:
                /* 64 bit write access */
                val = ldq_p(buf);
                result |= memory_region_dispatch_write(mr, addr1, val, 8,
                                                       attrs);
                break;
            case 4:
                /* 32 bit write access */
2880
                val = (uint32_t)ldl_p(buf);
2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
                result |= memory_region_dispatch_write(mr, addr1, val, 4,
                                                       attrs);
                break;
            case 2:
                /* 16 bit write access */
                val = lduw_p(buf);
                result |= memory_region_dispatch_write(mr, addr1, val, 2,
                                                       attrs);
                break;
            case 1:
                /* 8 bit write access */
                val = ldub_p(buf);
                result |= memory_region_dispatch_write(mr, addr1, val, 1,
                                                       attrs);
                break;
            default:
                abort();
B
bellard 已提交
2898 2899
            }
        } else {
2900
            /* RAM case */
2901
            ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2902 2903
            memcpy(ptr, buf, l);
            invalidate_and_set_dirty(mr, addr1, l);
B
bellard 已提交
2904
        }
2905 2906 2907 2908 2909 2910

        if (release_lock) {
            qemu_mutex_unlock_iothread();
            release_lock = false;
        }

B
bellard 已提交
2911 2912 2913
        len -= l;
        buf += l;
        addr += l;
2914 2915 2916 2917 2918 2919 2920

        if (!len) {
            break;
        }

        l = len;
        mr = address_space_translate(as, addr, &addr1, &l, true);
B
bellard 已提交
2921
    }
2922

2923
    return result;
B
bellard 已提交
2924
}
B
bellard 已提交
2925

2926 2927
MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
                                const uint8_t *buf, int len)
A
Avi Kivity 已提交
2928
{
2929 2930 2931 2932 2933
    hwaddr l;
    hwaddr addr1;
    MemoryRegion *mr;
    MemTxResult result = MEMTX_OK;

2934 2935
    if (len > 0) {
        rcu_read_lock();
2936
        l = len;
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
        mr = address_space_translate(as, addr, &addr1, &l, true);
        result = address_space_write_continue(as, addr, attrs, buf, len,
                                              addr1, l, mr);
        rcu_read_unlock();
    }

    return result;
}

/* Called within RCU critical section.  */
MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
                                        MemTxAttrs attrs, uint8_t *buf,
                                        int len, hwaddr addr1, hwaddr l,
                                        MemoryRegion *mr)
{
    uint8_t *ptr;
    uint64_t val;
    MemTxResult result = MEMTX_OK;
    bool release_lock = false;
2956

2957
    for (;;) {
2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
        if (!memory_access_is_direct(mr, false)) {
            /* I/O case */
            release_lock |= prepare_mmio_access(mr);
            l = memory_access_size(mr, l, addr1);
            switch (l) {
            case 8:
                /* 64 bit read access */
                result |= memory_region_dispatch_read(mr, addr1, &val, 8,
                                                      attrs);
                stq_p(buf, val);
                break;
            case 4:
                /* 32 bit read access */
                result |= memory_region_dispatch_read(mr, addr1, &val, 4,
                                                      attrs);
                stl_p(buf, val);
                break;
            case 2:
                /* 16 bit read access */
                result |= memory_region_dispatch_read(mr, addr1, &val, 2,
                                                      attrs);
                stw_p(buf, val);
                break;
            case 1:
                /* 8 bit read access */
                result |= memory_region_dispatch_read(mr, addr1, &val, 1,
                                                      attrs);
                stb_p(buf, val);
                break;
            default:
                abort();
            }
        } else {
            /* RAM case */
2992
            ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003
            memcpy(buf, ptr, l);
        }

        if (release_lock) {
            qemu_mutex_unlock_iothread();
            release_lock = false;
        }

        len -= l;
        buf += l;
        addr += l;
3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015

        if (!len) {
            break;
        }

        l = len;
        mr = address_space_translate(as, addr, &addr1, &l, false);
    }

    return result;
}

3016 3017
MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
                                    MemTxAttrs attrs, uint8_t *buf, int len)
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030
{
    hwaddr l;
    hwaddr addr1;
    MemoryRegion *mr;
    MemTxResult result = MEMTX_OK;

    if (len > 0) {
        rcu_read_lock();
        l = len;
        mr = address_space_translate(as, addr, &addr1, &l, false);
        result = address_space_read_continue(as, addr, attrs, buf, len,
                                             addr1, l, mr);
        rcu_read_unlock();
3031 3032 3033
    }

    return result;
A
Avi Kivity 已提交
3034 3035
}

3036 3037 3038 3039 3040 3041 3042 3043 3044
MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
                             uint8_t *buf, int len, bool is_write)
{
    if (is_write) {
        return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
    } else {
        return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
    }
}
A
Avi Kivity 已提交
3045

A
Avi Kivity 已提交
3046
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
A
Avi Kivity 已提交
3047 3048
                            int len, int is_write)
{
3049 3050
    address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
                     buf, len, is_write);
A
Avi Kivity 已提交
3051 3052
}

3053 3054 3055 3056 3057
enum write_rom_type {
    WRITE_DATA,
    FLUSH_CACHE,
};

3058
static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
3059
    hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
B
bellard 已提交
3060
{
3061
    hwaddr l;
B
bellard 已提交
3062
    uint8_t *ptr;
3063
    hwaddr addr1;
3064
    MemoryRegion *mr;
3065

3066
    rcu_read_lock();
B
bellard 已提交
3067
    while (len > 0) {
3068
        l = len;
3069
        mr = address_space_translate(as, addr, &addr1, &l, true);
3070

3071 3072
        if (!(memory_region_is_ram(mr) ||
              memory_region_is_romd(mr))) {
3073
            l = memory_access_size(mr, l, addr1);
B
bellard 已提交
3074 3075
        } else {
            /* ROM/RAM case */
3076
            ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3077 3078 3079
            switch (type) {
            case WRITE_DATA:
                memcpy(ptr, buf, l);
3080
                invalidate_and_set_dirty(mr, addr1, l);
3081 3082 3083 3084 3085
                break;
            case FLUSH_CACHE:
                flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
                break;
            }
B
bellard 已提交
3086 3087 3088 3089 3090
        }
        len -= l;
        buf += l;
        addr += l;
    }
3091
    rcu_read_unlock();
B
bellard 已提交
3092 3093
}

3094
/* used for ROM loading : can write in RAM and ROM */
3095
void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
3096 3097
                                   const uint8_t *buf, int len)
{
3098
    cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
}

void cpu_flush_icache_range(hwaddr start, int len)
{
    /*
     * This function should do the same thing as an icache flush that was
     * triggered from within the guest. For TCG we are always cache coherent,
     * so there is no need to flush anything. For KVM / Xen we need to flush
     * the host's instruction cache at least.
     */
    if (tcg_enabled()) {
        return;
    }

3113 3114
    cpu_physical_memory_write_rom_internal(&address_space_memory,
                                           start, NULL, len, FLUSH_CACHE);
3115 3116
}

3117
typedef struct {
3118
    MemoryRegion *mr;
3119
    void *buffer;
A
Avi Kivity 已提交
3120 3121
    hwaddr addr;
    hwaddr len;
F
Fam Zheng 已提交
3122
    bool in_use;
3123 3124 3125 3126
} BounceBuffer;

static BounceBuffer bounce;

3127
typedef struct MapClient {
3128
    QEMUBH *bh;
B
Blue Swirl 已提交
3129
    QLIST_ENTRY(MapClient) link;
3130 3131
} MapClient;

3132
QemuMutex map_client_list_lock;
B
Blue Swirl 已提交
3133 3134
static QLIST_HEAD(map_client_list, MapClient) map_client_list
    = QLIST_HEAD_INITIALIZER(map_client_list);
3135

3136 3137 3138 3139 3140 3141
static void cpu_unregister_map_client_do(MapClient *client)
{
    QLIST_REMOVE(client, link);
    g_free(client);
}

3142 3143 3144 3145 3146 3147
static void cpu_notify_map_clients_locked(void)
{
    MapClient *client;

    while (!QLIST_EMPTY(&map_client_list)) {
        client = QLIST_FIRST(&map_client_list);
3148 3149
        qemu_bh_schedule(client->bh);
        cpu_unregister_map_client_do(client);
3150 3151 3152
    }
}

3153
void cpu_register_map_client(QEMUBH *bh)
3154
{
3155
    MapClient *client = g_malloc(sizeof(*client));
3156

3157
    qemu_mutex_lock(&map_client_list_lock);
3158
    client->bh = bh;
B
Blue Swirl 已提交
3159
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3160 3161 3162
    if (!atomic_read(&bounce.in_use)) {
        cpu_notify_map_clients_locked();
    }
3163
    qemu_mutex_unlock(&map_client_list_lock);
3164 3165
}

3166
void cpu_exec_init_all(void)
3167
{
3168
    qemu_mutex_init(&ram_list.mutex);
3169 3170 3171 3172 3173 3174 3175 3176
    /* The data structures we set up here depend on knowing the page size,
     * so no more changes can be made after this point.
     * In an ideal world, nothing we did before we had finished the
     * machine setup would care about the target page size, and we could
     * do this much later, rather than requiring board models to state
     * up front what their requirements are.
     */
    finalize_target_page_bits();
3177
    io_mem_init();
3178
    memory_map_init();
3179
    qemu_mutex_init(&map_client_list_lock);
3180 3181
}

3182
void cpu_unregister_map_client(QEMUBH *bh)
3183 3184 3185
{
    MapClient *client;

3186 3187 3188 3189 3190 3191
    qemu_mutex_lock(&map_client_list_lock);
    QLIST_FOREACH(client, &map_client_list, link) {
        if (client->bh == bh) {
            cpu_unregister_map_client_do(client);
            break;
        }
3192
    }
3193
    qemu_mutex_unlock(&map_client_list_lock);
3194 3195 3196 3197
}

static void cpu_notify_map_clients(void)
{
3198
    qemu_mutex_lock(&map_client_list_lock);
3199
    cpu_notify_map_clients_locked();
3200
    qemu_mutex_unlock(&map_client_list_lock);
3201 3202
}

3203 3204
bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
{
3205
    MemoryRegion *mr;
3206 3207
    hwaddr l, xlat;

3208
    rcu_read_lock();
3209 3210
    while (len > 0) {
        l = len;
3211 3212 3213 3214
        mr = address_space_translate(as, addr, &xlat, &l, is_write);
        if (!memory_access_is_direct(mr, is_write)) {
            l = memory_access_size(mr, l, addr);
            if (!memory_region_access_valid(mr, xlat, l, is_write)) {
R
Roman Kapl 已提交
3215
                rcu_read_unlock();
3216 3217 3218 3219 3220 3221 3222
                return false;
            }
        }

        len -= l;
        addr += l;
    }
3223
    rcu_read_unlock();
3224 3225 3226
    return true;
}

3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251
static hwaddr
address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len,
                                 MemoryRegion *mr, hwaddr base, hwaddr len,
                                 bool is_write)
{
    hwaddr done = 0;
    hwaddr xlat;
    MemoryRegion *this_mr;

    for (;;) {
        target_len -= len;
        addr += len;
        done += len;
        if (target_len == 0) {
            return done;
        }

        len = target_len;
        this_mr = address_space_translate(as, addr, &xlat, &len, is_write);
        if (this_mr != mr || xlat != base + done) {
            return done;
        }
    }
}

3252 3253 3254 3255
/* Map a physical memory region into a host virtual address.
 * May map a subset of the requested range, given by and returned in *plen.
 * May return NULL if resources needed to perform the mapping are exhausted.
 * Use only for reads OR writes - not for read-modify-write operations.
3256 3257
 * Use cpu_register_map_client() to know when retrying the map operation is
 * likely to succeed.
3258
 */
A
Avi Kivity 已提交
3259
void *address_space_map(AddressSpace *as,
A
Avi Kivity 已提交
3260 3261
                        hwaddr addr,
                        hwaddr *plen,
A
Avi Kivity 已提交
3262
                        bool is_write)
3263
{
A
Avi Kivity 已提交
3264
    hwaddr len = *plen;
3265 3266
    hwaddr l, xlat;
    MemoryRegion *mr;
3267
    void *ptr;
3268

3269 3270 3271
    if (len == 0) {
        return NULL;
    }
3272

3273
    l = len;
3274
    rcu_read_lock();
3275
    mr = address_space_translate(as, addr, &xlat, &l, is_write);
3276

3277
    if (!memory_access_is_direct(mr, is_write)) {
F
Fam Zheng 已提交
3278
        if (atomic_xchg(&bounce.in_use, true)) {
3279
            rcu_read_unlock();
3280
            return NULL;
3281
        }
3282 3283 3284
        /* Avoid unbounded allocations */
        l = MIN(l, TARGET_PAGE_SIZE);
        bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
3285 3286
        bounce.addr = addr;
        bounce.len = l;
3287 3288 3289

        memory_region_ref(mr);
        bounce.mr = mr;
3290
        if (!is_write) {
3291 3292
            address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
                               bounce.buffer, l);
3293
        }
3294

3295
        rcu_read_unlock();
3296 3297 3298 3299 3300
        *plen = l;
        return bounce.buffer;
    }


3301
    memory_region_ref(mr);
3302 3303
    *plen = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write);
    ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen);
3304 3305 3306
    rcu_read_unlock();

    return ptr;
3307 3308
}

A
Avi Kivity 已提交
3309
/* Unmaps a memory region previously mapped by address_space_map().
3310 3311 3312
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
 * the amount of memory that was actually read or written by the caller.
 */
A
Avi Kivity 已提交
3313 3314
void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
                         int is_write, hwaddr access_len)
3315 3316
{
    if (buffer != bounce.buffer) {
3317 3318 3319
        MemoryRegion *mr;
        ram_addr_t addr1;

3320
        mr = memory_region_from_host(buffer, &addr1);
3321
        assert(mr != NULL);
3322
        if (is_write) {
3323
            invalidate_and_set_dirty(mr, addr1, access_len);
3324
        }
3325
        if (xen_enabled()) {
J
Jan Kiszka 已提交
3326
            xen_invalidate_map_cache_entry(buffer);
A
Anthony PERARD 已提交
3327
        }
3328
        memory_region_unref(mr);
3329 3330 3331
        return;
    }
    if (is_write) {
3332 3333
        address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
                            bounce.buffer, access_len);
3334
    }
3335
    qemu_vfree(bounce.buffer);
3336
    bounce.buffer = NULL;
3337
    memory_region_unref(bounce.mr);
F
Fam Zheng 已提交
3338
    atomic_mb_set(&bounce.in_use, false);
3339
    cpu_notify_map_clients();
3340
}
B
bellard 已提交
3341

A
Avi Kivity 已提交
3342 3343
void *cpu_physical_memory_map(hwaddr addr,
                              hwaddr *plen,
A
Avi Kivity 已提交
3344 3345 3346 3347 3348
                              int is_write)
{
    return address_space_map(&address_space_memory, addr, plen, is_write);
}

A
Avi Kivity 已提交
3349 3350
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
                               int is_write, hwaddr access_len)
A
Avi Kivity 已提交
3351 3352 3353 3354
{
    return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
}

P
Paolo Bonzini 已提交
3355 3356 3357 3358 3359 3360 3361 3362 3363 3364
#define ARG1_DECL                AddressSpace *as
#define ARG1                     as
#define SUFFIX
#define TRANSLATE(...)           address_space_translate(as, __VA_ARGS__)
#define IS_DIRECT(mr, is_write)  memory_access_is_direct(mr, is_write)
#define MAP_RAM(mr, ofs)         qemu_map_ram_ptr((mr)->ram_block, ofs)
#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
#define RCU_READ_LOCK(...)       rcu_read_lock()
#define RCU_READ_UNLOCK(...)     rcu_read_unlock()
#include "memory_ldst.inc.c"
3365

P
Paolo Bonzini 已提交
3366 3367 3368 3369 3370 3371
int64_t address_space_cache_init(MemoryRegionCache *cache,
                                 AddressSpace *as,
                                 hwaddr addr,
                                 hwaddr len,
                                 bool is_write)
{
P
Paolo Bonzini 已提交
3372 3373 3374 3375
    cache->len = len;
    cache->as = as;
    cache->xlat = addr;
    return len;
P
Paolo Bonzini 已提交
3376 3377 3378 3379 3380 3381 3382 3383 3384 3385
}

void address_space_cache_invalidate(MemoryRegionCache *cache,
                                    hwaddr addr,
                                    hwaddr access_len)
{
}

void address_space_cache_destroy(MemoryRegionCache *cache)
{
P
Paolo Bonzini 已提交
3386
    cache->as = NULL;
P
Paolo Bonzini 已提交
3387 3388 3389 3390 3391
}

#define ARG1_DECL                MemoryRegionCache *cache
#define ARG1                     cache
#define SUFFIX                   _cached
P
Paolo Bonzini 已提交
3392 3393
#define TRANSLATE(addr, ...)     \
    address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
P
Paolo Bonzini 已提交
3394
#define IS_DIRECT(mr, is_write)  true
P
Paolo Bonzini 已提交
3395 3396 3397 3398
#define MAP_RAM(mr, ofs)         qemu_map_ram_ptr((mr)->ram_block, ofs)
#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
#define RCU_READ_LOCK()          rcu_read_lock()
#define RCU_READ_UNLOCK()        rcu_read_unlock()
P
Paolo Bonzini 已提交
3399 3400
#include "memory_ldst.inc.c"

3401
/* virtual memory access for debug (includes writing to ROM) */
3402
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3403
                        uint8_t *buf, int len, int is_write)
B
bellard 已提交
3404 3405
{
    int l;
A
Avi Kivity 已提交
3406
    hwaddr phys_addr;
3407
    target_ulong page;
B
bellard 已提交
3408

3409
    cpu_synchronize_state(cpu);
B
bellard 已提交
3410
    while (len > 0) {
3411 3412 3413
        int asidx;
        MemTxAttrs attrs;

B
bellard 已提交
3414
        page = addr & TARGET_PAGE_MASK;
3415 3416
        phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
        asidx = cpu_asidx_from_attrs(cpu, attrs);
B
bellard 已提交
3417 3418 3419 3420 3421 3422
        /* if no physical page mapped, return an error */
        if (phys_addr == -1)
            return -1;
        l = (page + TARGET_PAGE_SIZE) - addr;
        if (l > len)
            l = len;
3423
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3424
        if (is_write) {
3425 3426
            cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
                                          phys_addr, buf, l);
3427
        } else {
3428 3429
            address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
                             MEMTXATTRS_UNSPECIFIED,
3430
                             buf, l, 0);
3431
        }
B
bellard 已提交
3432 3433 3434 3435 3436 3437
        len -= l;
        buf += l;
        addr += l;
    }
    return 0;
}
3438 3439 3440 3441 3442

/*
 * Allows code that needs to deal with migration bitmaps etc to still be built
 * target independent.
 */
3443
size_t qemu_target_page_size(void)
3444
{
3445
    return TARGET_PAGE_SIZE;
3446 3447
}

3448 3449 3450 3451 3452 3453 3454 3455 3456
int qemu_target_page_bits(void)
{
    return TARGET_PAGE_BITS;
}

int qemu_target_page_bits_min(void)
{
    return TARGET_PAGE_BITS_MIN;
}
P
Paul Brook 已提交
3457
#endif
B
bellard 已提交
3458

3459 3460 3461 3462
/*
 * A helper function for the _utterly broken_ virtio device model to find out if
 * it's running on a big endian machine. Don't do this at home kids!
 */
3463 3464
bool target_words_bigendian(void);
bool target_words_bigendian(void)
3465 3466 3467 3468 3469 3470 3471 3472
{
#if defined(TARGET_WORDS_BIGENDIAN)
    return true;
#else
    return false;
#endif
}

3473
#ifndef CONFIG_USER_ONLY
A
Avi Kivity 已提交
3474
bool cpu_physical_memory_is_io(hwaddr phys_addr)
3475
{
3476
    MemoryRegion*mr;
3477
    hwaddr l = 1;
3478
    bool res;
3479

3480
    rcu_read_lock();
3481 3482
    mr = address_space_translate(&address_space_memory,
                                 phys_addr, &phys_addr, &l, false);
3483

3484 3485 3486
    res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
    rcu_read_unlock();
    return res;
3487
}
3488

3489
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3490 3491
{
    RAMBlock *block;
3492
    int ret = 0;
3493

M
Mike Day 已提交
3494
    rcu_read_lock();
P
Peter Xu 已提交
3495
    RAMBLOCK_FOREACH(block) {
3496 3497 3498 3499 3500
        ret = func(block->idstr, block->host, block->offset,
                   block->used_length, opaque);
        if (ret) {
            break;
        }
3501
    }
M
Mike Day 已提交
3502
    rcu_read_unlock();
3503
    return ret;
3504
}
3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535

/*
 * Unmap pages of memory from start to start+length such that
 * they a) read as 0, b) Trigger whatever fault mechanism
 * the OS provides for postcopy.
 * The pages must be unmapped by the end of the function.
 * Returns: 0 on success, none-0 on failure
 *
 */
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
{
    int ret = -1;

    uint8_t *host_startaddr = rb->host + start;

    if ((uintptr_t)host_startaddr & (rb->page_size - 1)) {
        error_report("ram_block_discard_range: Unaligned start address: %p",
                     host_startaddr);
        goto err;
    }

    if ((start + length) <= rb->used_length) {
        uint8_t *host_endaddr = host_startaddr + length;
        if ((uintptr_t)host_endaddr & (rb->page_size - 1)) {
            error_report("ram_block_discard_range: Unaligned end address: %p",
                         host_endaddr);
            goto err;
        }

        errno = ENOTSUP; /* If we are missing MADVISE etc */

3536
        if (rb->page_size == qemu_host_page_size) {
3537
#if defined(CONFIG_MADVISE)
3538 3539 3540 3541
            /* Note: We need the madvise MADV_DONTNEED behaviour of definitely
             * freeing the page.
             */
            ret = madvise(host_startaddr, length, MADV_DONTNEED);
3542
#endif
3543 3544 3545 3546 3547 3548 3549 3550 3551 3552
        } else {
            /* Huge page case  - unfortunately it can't do DONTNEED, but
             * it can do the equivalent by FALLOC_FL_PUNCH_HOLE in the
             * huge page file.
             */
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
            ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
                            start, length);
#endif
        }
3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568
        if (ret) {
            ret = -errno;
            error_report("ram_block_discard_range: Failed to discard range "
                         "%s:%" PRIx64 " +%zx (%d)",
                         rb->idstr, start, length, ret);
        }
    } else {
        error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
                     "/%zx/" RAM_ADDR_FMT")",
                     rb->idstr, start, length, rb->used_length);
    }

err:
    return ret;
}

3569
#endif