memory.c 97.2 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * Physical memory management
 *
 * Copyright 2011 Red Hat, Inc. and/or its affiliates
 *
 * Authors:
 *  Avi Kivity <avi@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
A
Avi Kivity 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qapi/error.h"
18 19
#include "qemu-common.h"
#include "cpu.h"
20 21 22
#include "exec/memory.h"
#include "exec/address-spaces.h"
#include "exec/ioport.h"
23
#include "qapi/visitor.h"
24
#include "qemu/bitops.h"
P
Pavel Fedin 已提交
25
#include "qemu/error-report.h"
26
#include "qom/object.h"
27
#include "trace-root.h"
A
Avi Kivity 已提交
28

29
#include "exec/memory-internal.h"
30
#include "exec/ram_addr.h"
P
Pavel Fedin 已提交
31
#include "sysemu/kvm.h"
32
#include "sysemu/sysemu.h"
33 34
#include "hw/misc/mmio_interface.h"
#include "hw/qdev-properties.h"
35
#include "migration/vmstate.h"
36

37 38
//#define DEBUG_UNASSIGNED

39 40
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
41
static bool ioeventfd_update_pending;
42 43
static bool global_dirty_log = false;

44 45
static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
    = QTAILQ_HEAD_INITIALIZER(memory_listeners);
A
Avi Kivity 已提交
46

47 48 49
static QTAILQ_HEAD(, AddressSpace) address_spaces
    = QTAILQ_HEAD_INITIALIZER(address_spaces);

50 51
static GHashTable *flat_views;

A
Avi Kivity 已提交
52 53
typedef struct AddrRange AddrRange;

A
Avi Kivity 已提交
54
/*
55
 * Note that signed integers are needed for negative offsetting in aliases
A
Avi Kivity 已提交
56 57
 * (large MemoryRegion::alias_offset).
 */
A
Avi Kivity 已提交
58
struct AddrRange {
59 60
    Int128 start;
    Int128 size;
A
Avi Kivity 已提交
61 62
};

63
static AddrRange addrrange_make(Int128 start, Int128 size)
A
Avi Kivity 已提交
64 65 66 67 68 69
{
    return (AddrRange) { start, size };
}

static bool addrrange_equal(AddrRange r1, AddrRange r2)
{
70
    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
A
Avi Kivity 已提交
71 72
}

73
static Int128 addrrange_end(AddrRange r)
A
Avi Kivity 已提交
74
{
75
    return int128_add(r.start, r.size);
A
Avi Kivity 已提交
76 77
}

78
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
A
Avi Kivity 已提交
79
{
80
    int128_addto(&range.start, delta);
A
Avi Kivity 已提交
81 82 83
    return range;
}

84 85 86 87 88 89
static bool addrrange_contains(AddrRange range, Int128 addr)
{
    return int128_ge(addr, range.start)
        && int128_lt(addr, addrrange_end(range));
}

A
Avi Kivity 已提交
90 91
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
{
92 93
    return addrrange_contains(r1, r2.start)
        || addrrange_contains(r2, r1.start);
A
Avi Kivity 已提交
94 95 96 97
}

static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
{
98 99 100
    Int128 start = int128_max(r1.start, r2.start);
    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
    return addrrange_make(start, int128_sub(end, start));
A
Avi Kivity 已提交
101 102
}

103 104
enum ListenerDirection { Forward, Reverse };

105
#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...)    \
106 107 108 109 110 111
    do {                                                                \
        MemoryListener *_listener;                                      \
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
112 113 114
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
115 116 117 118 119
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
                                   memory_listeners, link) {            \
120 121 122
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
123 124 125 126 127 128 129
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

130
#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
131 132
    do {                                                                \
        MemoryListener *_listener;                                      \
133
        struct memory_listeners_as *list = &(_as)->listeners;           \
134 135 136
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
137 138
            QTAILQ_FOREACH(_listener, list, link_as) {                  \
                if (_listener->_callback) {                             \
139 140 141 142 143
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
144 145 146
            QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
                                   link_as) {                           \
                if (_listener->_callback) {                             \
147 148 149 150 151 152 153 154 155
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

P
Paolo Bonzini 已提交
156
/* No need to ref/unref .mr, the FlatRange keeps it alive.  */
157
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...)  \
158
    do {                                                                \
159 160
        MemoryRegionSection mrs = section_from_flat_range(fr,           \
                address_space_to_flatview(as));                         \
161
        MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args);         \
162
    } while(0)
163

A
Avi Kivity 已提交
164 165 166 167 168
struct CoalescedMemoryRange {
    AddrRange addr;
    QTAILQ_ENTRY(CoalescedMemoryRange) link;
};

A
Avi Kivity 已提交
169 170 171 172
struct MemoryRegionIoeventfd {
    AddrRange addr;
    bool match_data;
    uint64_t data;
173
    EventNotifier *e;
A
Avi Kivity 已提交
174 175 176 177 178
};

static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
                                           MemoryRegionIoeventfd b)
{
179
    if (int128_lt(a.addr.start, b.addr.start)) {
A
Avi Kivity 已提交
180
        return true;
181
    } else if (int128_gt(a.addr.start, b.addr.start)) {
A
Avi Kivity 已提交
182
        return false;
183
    } else if (int128_lt(a.addr.size, b.addr.size)) {
A
Avi Kivity 已提交
184
        return true;
185
    } else if (int128_gt(a.addr.size, b.addr.size)) {
A
Avi Kivity 已提交
186 187 188 189 190 191 192 193 194 195 196 197
        return false;
    } else if (a.match_data < b.match_data) {
        return true;
    } else  if (a.match_data > b.match_data) {
        return false;
    } else if (a.match_data) {
        if (a.data < b.data) {
            return true;
        } else if (a.data > b.data) {
            return false;
        }
    }
198
    if (a.e < b.e) {
A
Avi Kivity 已提交
199
        return true;
200
    } else if (a.e > b.e) {
A
Avi Kivity 已提交
201 202 203 204 205 206 207 208 209 210 211 212
        return false;
    }
    return false;
}

static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
                                          MemoryRegionIoeventfd b)
{
    return !memory_region_ioeventfd_before(a, b)
        && !memory_region_ioeventfd_before(b, a);
}

A
Avi Kivity 已提交
213 214 215 216 217
typedef struct FlatRange FlatRange;

/* Range of memory in the global map.  Addresses are absolute. */
struct FlatRange {
    MemoryRegion *mr;
A
Avi Kivity 已提交
218
    hwaddr offset_in_region;
A
Avi Kivity 已提交
219
    AddrRange addr;
A
Avi Kivity 已提交
220
    uint8_t dirty_log_mask;
221
    bool romd_mode;
222
    bool readonly;
A
Avi Kivity 已提交
223 224 225 226 227 228
};

/* Flattened global view of current active memory hierarchy.  Kept in sorted
 * order.
 */
struct FlatView {
229
    struct rcu_head rcu;
230
    unsigned ref;
A
Avi Kivity 已提交
231 232 233
    FlatRange *ranges;
    unsigned nr;
    unsigned nr_allocated;
234
    struct AddressSpaceDispatch *dispatch;
235
    MemoryRegion *root;
A
Avi Kivity 已提交
236 237
};

238 239
typedef struct AddressSpaceOps AddressSpaceOps;

A
Avi Kivity 已提交
240 241 242
#define FOR_EACH_FLAT_RANGE(var, view)          \
    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)

243
static inline MemoryRegionSection
244
section_from_flat_range(FlatRange *fr, FlatView *fv)
245 246 247
{
    return (MemoryRegionSection) {
        .mr = fr->mr,
248
        .fv = fv,
249 250 251 252 253 254 255
        .offset_within_region = fr->offset_in_region,
        .size = fr->addr.size,
        .offset_within_address_space = int128_get64(fr->addr.start),
        .readonly = fr->readonly,
    };
}

A
Avi Kivity 已提交
256 257 258 259
static bool flatrange_equal(FlatRange *a, FlatRange *b)
{
    return a->mr == b->mr
        && addrrange_equal(a->addr, b->addr)
260
        && a->offset_in_region == b->offset_in_region
261
        && a->romd_mode == b->romd_mode
262
        && a->readonly == b->readonly;
A
Avi Kivity 已提交
263 264
}

265
static FlatView *flatview_new(MemoryRegion *mr_root)
A
Avi Kivity 已提交
266
{
267 268 269
    FlatView *view;

    view = g_new0(FlatView, 1);
270
    view->ref = 1;
271 272
    view->root = mr_root;
    memory_region_ref(mr_root);
273 274

    return view;
A
Avi Kivity 已提交
275 276 277 278 279 280 281 282 283
}

/* Insert a range into a given position.  Caller is responsible for maintaining
 * sorting order.
 */
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
{
    if (view->nr == view->nr_allocated) {
        view->nr_allocated = MAX(2 * view->nr, 10);
284
        view->ranges = g_realloc(view->ranges,
A
Avi Kivity 已提交
285 286 287 288 289
                                    view->nr_allocated * sizeof(*view->ranges));
    }
    memmove(view->ranges + pos + 1, view->ranges + pos,
            (view->nr - pos) * sizeof(FlatRange));
    view->ranges[pos] = *range;
P
Paolo Bonzini 已提交
290
    memory_region_ref(range->mr);
A
Avi Kivity 已提交
291 292 293 294 295
    ++view->nr;
}

static void flatview_destroy(FlatView *view)
{
P
Paolo Bonzini 已提交
296 297
    int i;

298 299 300
    if (view->dispatch) {
        address_space_dispatch_free(view->dispatch);
    }
P
Paolo Bonzini 已提交
301 302 303
    for (i = 0; i < view->nr; i++) {
        memory_region_unref(view->ranges[i].mr);
    }
304
    g_free(view->ranges);
305
    memory_region_unref(view->root);
306
    g_free(view);
A
Avi Kivity 已提交
307 308
}

309
static bool flatview_ref(FlatView *view)
310
{
311
    return atomic_fetch_inc_nonzero(&view->ref) > 0;
312 313 314 315 316
}

static void flatview_unref(FlatView *view)
{
    if (atomic_fetch_dec(&view->ref) == 1) {
317
        call_rcu(view, flatview_destroy, rcu);
318 319 320
    }
}

321
FlatView *address_space_to_flatview(AddressSpace *as)
322 323 324 325 326 327 328 329 330 331 332 333 334 335
{
    return atomic_rcu_read(&as->current_map);
}

AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
{
    return fv->dispatch;
}

AddressSpaceDispatch *address_space_to_dispatch(AddressSpace *as)
{
    return flatview_to_dispatch(address_space_to_flatview(as));
}

336 337
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
338
    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
339
        && r1->mr == r2->mr
340 341 342
        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
                                r1->addr.size),
                     int128_make64(r2->offset_in_region))
343
        && r1->dirty_log_mask == r2->dirty_log_mask
344
        && r1->romd_mode == r2->romd_mode
345
        && r1->readonly == r2->readonly;
346 347
}

P
Peter Crosthwaite 已提交
348
/* Attempt to simplify a view by merging adjacent ranges */
349 350 351 352 353 354 355 356 357
static void flatview_simplify(FlatView *view)
{
    unsigned i, j;

    i = 0;
    while (i < view->nr) {
        j = i + 1;
        while (j < view->nr
               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
358
            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
359 360 361 362 363 364 365 366 367
            ++j;
        }
        ++i;
        memmove(&view->ranges[i], &view->ranges[j],
                (view->nr - j) * sizeof(view->ranges[j]));
        view->nr -= j - i;
    }
}

368 369 370 371 372 373 374 375 376
static bool memory_region_big_endian(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

P
Paolo Bonzini 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
static bool memory_region_wrong_endianness(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
{
    if (memory_region_wrong_endianness(mr)) {
        switch (size) {
        case 1:
            break;
        case 2:
            *data = bswap16(*data);
            break;
        case 4:
            *data = bswap32(*data);
            break;
        case 8:
            *data = bswap64(*data);
            break;
        default:
            abort();
        }
    }
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420
static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
{
    MemoryRegion *root;
    hwaddr abs_addr = offset;

    abs_addr += mr->addr;
    for (root = mr; root->container; ) {
        root = root->container;
        abs_addr += root->addr;
    }

    return abs_addr;
}

421 422 423 424 425 426 427 428
static int get_cpu_index(void)
{
    if (current_cpu) {
        return current_cpu->cpu_index;
    }
    return -1;
}

429 430 431 432 433 434 435 436 437 438 439
static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
                                                       hwaddr addr,
                                                       uint64_t *value,
                                                       unsigned size,
                                                       unsigned shift,
                                                       uint64_t mask,
                                                       MemTxAttrs attrs)
{
    uint64_t tmp;

    tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
440
    if (mr->subpage) {
441
        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
442 443 444 445 446
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
447 448
    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
449
        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
450
    }
451 452 453 454 455
    *value |= (tmp & mask) << shift;
    return MEMTX_OK;
}

static MemTxResult  memory_region_read_accessor(MemoryRegion *mr,
456 457 458 459
                                                hwaddr addr,
                                                uint64_t *value,
                                                unsigned size,
                                                unsigned shift,
460 461
                                                uint64_t mask,
                                                MemTxAttrs attrs)
462 463 464
{
    uint64_t tmp;

465
    tmp = mr->ops->read(mr->opaque, addr, size);
466
    if (mr->subpage) {
467
        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
468 469 470 471 472
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
473 474
    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
475
        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
476
    }
477
    *value |= (tmp & mask) << shift;
478
    return MEMTX_OK;
479 480
}

481 482 483 484 485 486 487
static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
                                                          hwaddr addr,
                                                          uint64_t *value,
                                                          unsigned size,
                                                          unsigned shift,
                                                          uint64_t mask,
                                                          MemTxAttrs attrs)
488
{
489 490
    uint64_t tmp = 0;
    MemTxResult r;
491

492
    r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
493
    if (mr->subpage) {
494
        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
495 496 497 498 499
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
500 501
    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
502
        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
503
    }
504
    *value |= (tmp & mask) << shift;
505
    return r;
506 507
}

508 509 510 511 512 513 514
static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
                                                        hwaddr addr,
                                                        uint64_t *value,
                                                        unsigned size,
                                                        unsigned shift,
                                                        uint64_t mask,
                                                        MemTxAttrs attrs)
515 516 517 518
{
    uint64_t tmp;

    tmp = (*value >> shift) & mask;
519
    if (mr->subpage) {
520
        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
521 522 523 524 525
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
526 527
    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
528
        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
529
    }
530
    mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
531
    return MEMTX_OK;
532 533
}

534 535 536 537 538 539 540
static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
                                                hwaddr addr,
                                                uint64_t *value,
                                                unsigned size,
                                                unsigned shift,
                                                uint64_t mask,
                                                MemTxAttrs attrs)
541 542 543 544
{
    uint64_t tmp;

    tmp = (*value >> shift) & mask;
545
    if (mr->subpage) {
546
        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
547 548 549 550 551
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
552 553
    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
554
        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
555
    }
556
    mr->ops->write(mr->opaque, addr, tmp, size);
557
    return MEMTX_OK;
558 559
}

560 561 562 563 564 565 566 567 568 569 570
static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
                                                           hwaddr addr,
                                                           uint64_t *value,
                                                           unsigned size,
                                                           unsigned shift,
                                                           uint64_t mask,
                                                           MemTxAttrs attrs)
{
    uint64_t tmp;

    tmp = (*value >> shift) & mask;
571
    if (mr->subpage) {
572
        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
573 574 575 576 577
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
578 579
    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
580
        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
581
    }
582 583 584 585
    return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
}

static MemTxResult access_with_adjusted_size(hwaddr addr,
586 587 588 589
                                      uint64_t *value,
                                      unsigned size,
                                      unsigned access_size_min,
                                      unsigned access_size_max,
590 591 592 593 594 595 596 597
                                      MemTxResult (*access_fn)
                                                  (MemoryRegion *mr,
                                                   hwaddr addr,
                                                   uint64_t *value,
                                                   unsigned size,
                                                   unsigned shift,
                                                   uint64_t mask,
                                                   MemTxAttrs attrs),
598 599
                                      MemoryRegion *mr,
                                      MemTxAttrs attrs)
600 601 602 603
{
    uint64_t access_mask;
    unsigned access_size;
    unsigned i;
604
    MemTxResult r = MEMTX_OK;
605 606 607 608 609 610 611

    if (!access_size_min) {
        access_size_min = 1;
    }
    if (!access_size_max) {
        access_size_max = 4;
    }
612 613

    /* FIXME: support unaligned access? */
614 615
    access_size = MAX(MIN(size, access_size_max), access_size_min);
    access_mask = -1ULL >> (64 - access_size * 8);
616 617
    if (memory_region_big_endian(mr)) {
        for (i = 0; i < size; i += access_size) {
618
            r |= access_fn(mr, addr + i, value, access_size,
619
                        (size - access_size - i) * 8, access_mask, attrs);
620 621 622
        }
    } else {
        for (i = 0; i < size; i += access_size) {
623
            r |= access_fn(mr, addr + i, value, access_size, i * 8,
624
                        access_mask, attrs);
625
        }
626
    }
627
    return r;
628 629
}

630 631
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
{
632 633
    AddressSpace *as;

634 635
    while (mr->container) {
        mr = mr->container;
636
    }
637 638 639 640
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        if (mr == as->root) {
            return as;
        }
641
    }
642
    return NULL;
643 644
}

A
Avi Kivity 已提交
645 646 647 648 649
/* Render a memory region into the global view.  Ranges in @view obscure
 * ranges in @mr.
 */
static void render_memory_region(FlatView *view,
                                 MemoryRegion *mr,
650
                                 Int128 base,
651 652
                                 AddrRange clip,
                                 bool readonly)
A
Avi Kivity 已提交
653 654 655
{
    MemoryRegion *subregion;
    unsigned i;
A
Avi Kivity 已提交
656
    hwaddr offset_in_region;
657 658
    Int128 remain;
    Int128 now;
A
Avi Kivity 已提交
659 660 661
    FlatRange fr;
    AddrRange tmp;

662 663 664 665
    if (!mr->enabled) {
        return;
    }

666
    int128_addto(&base, int128_make64(mr->addr));
667
    readonly |= mr->readonly;
A
Avi Kivity 已提交
668 669 670 671 672 673 674 675 676 677

    tmp = addrrange_make(base, mr->size);

    if (!addrrange_intersects(tmp, clip)) {
        return;
    }

    clip = addrrange_intersection(tmp, clip);

    if (mr->alias) {
678 679
        int128_subfrom(&base, int128_make64(mr->alias->addr));
        int128_subfrom(&base, int128_make64(mr->alias_offset));
680
        render_memory_region(view, mr->alias, base, clip, readonly);
A
Avi Kivity 已提交
681 682 683 684 685
        return;
    }

    /* Render subregions in priority order. */
    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
686
        render_memory_region(view, subregion, base, clip, readonly);
A
Avi Kivity 已提交
687 688
    }

689
    if (!mr->terminates) {
A
Avi Kivity 已提交
690 691 692
        return;
    }

693
    offset_in_region = int128_get64(int128_sub(clip.start, base));
A
Avi Kivity 已提交
694 695 696
    base = clip.start;
    remain = clip.size;

697
    fr.mr = mr;
698
    fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
699
    fr.romd_mode = mr->romd_mode;
700 701
    fr.readonly = readonly;

A
Avi Kivity 已提交
702
    /* Render the region itself into any gaps left by the current view. */
703 704
    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
A
Avi Kivity 已提交
705 706
            continue;
        }
707 708 709
        if (int128_lt(base, view->ranges[i].addr.start)) {
            now = int128_min(remain,
                             int128_sub(view->ranges[i].addr.start, base));
A
Avi Kivity 已提交
710 711 712 713
            fr.offset_in_region = offset_in_region;
            fr.addr = addrrange_make(base, now);
            flatview_insert(view, i, &fr);
            ++i;
714 715 716
            int128_addto(&base, now);
            offset_in_region += int128_get64(now);
            int128_subfrom(&remain, now);
A
Avi Kivity 已提交
717
        }
718 719 720 721 722 723
        now = int128_sub(int128_min(int128_add(base, remain),
                                    addrrange_end(view->ranges[i].addr)),
                         base);
        int128_addto(&base, now);
        offset_in_region += int128_get64(now);
        int128_subfrom(&remain, now);
A
Avi Kivity 已提交
724
    }
725
    if (int128_nz(remain)) {
A
Avi Kivity 已提交
726 727 728 729 730 731
        fr.offset_in_region = offset_in_region;
        fr.addr = addrrange_make(base, remain);
        flatview_insert(view, i, &fr);
    }
}

732 733 734 735 736 737 738 739 740 741 742 743 744
static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
{
    while (mr->alias && !mr->alias_offset &&
           int128_ge(mr->size, mr->alias->size)) {
        /* The alias is included in its entirety.  Use it as
         * the "real" root, so that we can share more FlatViews.
         */
        mr = mr->alias;
    }

    return mr;
}

A
Avi Kivity 已提交
745
/* Render a memory topology into a list of disjoint absolute ranges. */
746
static FlatView *generate_memory_topology(MemoryRegion *mr)
A
Avi Kivity 已提交
747
{
748
    int i;
749
    FlatView *view;
A
Avi Kivity 已提交
750

751
    view = flatview_new(mr);
A
Avi Kivity 已提交
752

A
Avi Kivity 已提交
753
    if (mr) {
754
        render_memory_region(view, mr, int128_zero(),
A
Avi Kivity 已提交
755 756
                             addrrange_make(int128_zero(), int128_2_64()), false);
    }
757
    flatview_simplify(view);
A
Avi Kivity 已提交
758

759 760 761 762 763 764 765
    view->dispatch = address_space_dispatch_new(view);
    for (i = 0; i < view->nr; i++) {
        MemoryRegionSection mrs =
            section_from_flat_range(&view->ranges[i], view);
        flatview_add_to_dispatch(view, &mrs);
    }
    address_space_dispatch_compact(view->dispatch);
766
    g_hash_table_replace(flat_views, mr, view);
767

A
Avi Kivity 已提交
768 769 770
    return view;
}

A
Avi Kivity 已提交
771 772 773 774 775 776 777
static void address_space_add_del_ioeventfds(AddressSpace *as,
                                             MemoryRegionIoeventfd *fds_new,
                                             unsigned fds_new_nb,
                                             MemoryRegionIoeventfd *fds_old,
                                             unsigned fds_old_nb)
{
    unsigned iold, inew;
778 779
    MemoryRegionIoeventfd *fd;
    MemoryRegionSection section;
A
Avi Kivity 已提交
780 781 782 783 784 785 786 787 788 789 790

    /* Generate a symmetric difference of the old and new fd sets, adding
     * and deleting as necessary.
     */

    iold = inew = 0;
    while (iold < fds_old_nb || inew < fds_new_nb) {
        if (iold < fds_old_nb
            && (inew == fds_new_nb
                || memory_region_ioeventfd_before(fds_old[iold],
                                                  fds_new[inew]))) {
791 792
            fd = &fds_old[iold];
            section = (MemoryRegionSection) {
793
                .fv = address_space_to_flatview(as),
794
                .offset_within_address_space = int128_get64(fd->addr.start),
795
                .size = fd->addr.size,
796
            };
797
            MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
798
                                 fd->match_data, fd->data, fd->e);
A
Avi Kivity 已提交
799 800 801 802 803
            ++iold;
        } else if (inew < fds_new_nb
                   && (iold == fds_old_nb
                       || memory_region_ioeventfd_before(fds_new[inew],
                                                         fds_old[iold]))) {
804 805
            fd = &fds_new[inew];
            section = (MemoryRegionSection) {
806
                .fv = address_space_to_flatview(as),
807
                .offset_within_address_space = int128_get64(fd->addr.start),
808
                .size = fd->addr.size,
809
            };
810
            MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
811
                                 fd->match_data, fd->data, fd->e);
A
Avi Kivity 已提交
812 813 814 815 816 817 818 819
            ++inew;
        } else {
            ++iold;
            ++inew;
        }
    }
}

820 821 822 823
static FlatView *address_space_get_flatview(AddressSpace *as)
{
    FlatView *view;

824
    rcu_read_lock();
825
    do {
826
        view = address_space_to_flatview(as);
827 828 829 830
        /* If somebody has replaced as->current_map concurrently,
         * flatview_ref returns false.
         */
    } while (!flatview_ref(view));
831
    rcu_read_unlock();
832 833 834
    return view;
}

A
Avi Kivity 已提交
835 836
static void address_space_update_ioeventfds(AddressSpace *as)
{
837
    FlatView *view;
A
Avi Kivity 已提交
838 839 840 841 842 843
    FlatRange *fr;
    unsigned ioeventfd_nb = 0;
    MemoryRegionIoeventfd *ioeventfds = NULL;
    AddrRange tmp;
    unsigned i;

844
    view = address_space_get_flatview(as);
845
    FOR_EACH_FLAT_RANGE(fr, view) {
A
Avi Kivity 已提交
846 847
        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
848 849
                                  int128_sub(fr->addr.start,
                                             int128_make64(fr->offset_in_region)));
A
Avi Kivity 已提交
850 851
            if (addrrange_intersects(fr->addr, tmp)) {
                ++ioeventfd_nb;
852
                ioeventfds = g_realloc(ioeventfds,
A
Avi Kivity 已提交
853 854 855 856 857 858 859 860 861 862
                                          ioeventfd_nb * sizeof(*ioeventfds));
                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
                ioeventfds[ioeventfd_nb-1].addr = tmp;
            }
        }
    }

    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
                                     as->ioeventfds, as->ioeventfd_nb);

863
    g_free(as->ioeventfds);
A
Avi Kivity 已提交
864 865
    as->ioeventfds = ioeventfds;
    as->ioeventfd_nb = ioeventfd_nb;
866
    flatview_unref(view);
A
Avi Kivity 已提交
867 868
}

869
static void address_space_update_topology_pass(AddressSpace *as,
870 871
                                               const FlatView *old_view,
                                               const FlatView *new_view,
872
                                               bool adding)
A
Avi Kivity 已提交
873 874 875 876 877 878 879 880
{
    unsigned iold, inew;
    FlatRange *frold, *frnew;

    /* Generate a symmetric difference of the old and new memory maps.
     * Kill ranges in the old map, and instantiate ranges in the new map.
     */
    iold = inew = 0;
881 882 883
    while (iold < old_view->nr || inew < new_view->nr) {
        if (iold < old_view->nr) {
            frold = &old_view->ranges[iold];
A
Avi Kivity 已提交
884 885 886
        } else {
            frold = NULL;
        }
887 888
        if (inew < new_view->nr) {
            frnew = &new_view->ranges[inew];
A
Avi Kivity 已提交
889 890 891 892 893 894
        } else {
            frnew = NULL;
        }

        if (frold
            && (!frnew
895 896
                || int128_lt(frold->addr.start, frnew->addr.start)
                || (int128_eq(frold->addr.start, frnew->addr.start)
A
Avi Kivity 已提交
897
                    && !flatrange_equal(frold, frnew)))) {
898
            /* In old but not in new, or in both but attributes changed. */
A
Avi Kivity 已提交
899

900
            if (!adding) {
901
                MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
902 903
            }

A
Avi Kivity 已提交
904 905
            ++iold;
        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
906
            /* In both and unchanged (except logging may have changed) */
A
Avi Kivity 已提交
907

908
            if (adding) {
909
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
910 911 912 913 914 915 916 917 918
                if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
                                                  frold->dirty_log_mask,
                                                  frnew->dirty_log_mask);
                }
                if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
                                                  frold->dirty_log_mask,
                                                  frnew->dirty_log_mask);
919
                }
A
Avi Kivity 已提交
920 921
            }

A
Avi Kivity 已提交
922 923 924 925 926
            ++iold;
            ++inew;
        } else {
            /* In new */

927
            if (adding) {
928
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
929 930
            }

A
Avi Kivity 已提交
931 932 933
            ++inew;
        }
    }
934 935
}

936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
static void flatviews_init(void)
{
    if (flat_views) {
        return;
    }

    flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
                                       (GDestroyNotify) flatview_unref);
}

static void flatviews_reset(void)
{
    AddressSpace *as;

    if (flat_views) {
        g_hash_table_unref(flat_views);
        flat_views = NULL;
    }
    flatviews_init();

    /* Render unique FVs */
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        MemoryRegion *physmr = memory_region_get_flatview_root(as->root);

        if (g_hash_table_lookup(flat_views, physmr)) {
            continue;
        }

        generate_memory_topology(physmr);
    }
}

static void address_space_set_flatview(AddressSpace *as)
969
{
970
    FlatView *old_view = address_space_to_flatview(as);
971 972 973 974 975
    MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
    FlatView *new_view = g_hash_table_lookup(flat_views, physmr);

    assert(new_view);

976 977 978 979 980 981 982 983
    if (old_view == new_view) {
        return;
    }

    if (old_view) {
        flatview_ref(old_view);
    }

984
    flatview_ref(new_view);
985 986

    if (!QTAILQ_EMPTY(&as->listeners)) {
987 988 989 990 991 992 993
        FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;

        if (!old_view2) {
            old_view2 = &tmpview;
        }
        address_space_update_topology_pass(as, old_view2, new_view, false);
        address_space_update_topology_pass(as, old_view2, new_view, true);
994
    }
995

996 997
    /* Writes are protected by the BQL.  */
    atomic_rcu_set(&as->current_map, new_view);
998 999 1000
    if (old_view) {
        flatview_unref(old_view);
    }
1001 1002 1003 1004 1005 1006 1007

    /* Note that all the old MemoryRegions are still alive up to this
     * point.  This relieves most MemoryListeners from the need to
     * ref/unref the MemoryRegions they get---unless they use them
     * outside the iothread mutex, in which case precise reference
     * counting is necessary.
     */
1008 1009 1010
    if (old_view) {
        flatview_unref(old_view);
    }
A
Avi Kivity 已提交
1011 1012
}

A
Avi Kivity 已提交
1013 1014
void memory_region_transaction_begin(void)
{
1015
    qemu_flush_coalesced_mmio_buffer();
A
Avi Kivity 已提交
1016 1017 1018 1019 1020
    ++memory_region_transaction_depth;
}

void memory_region_transaction_commit(void)
{
1021 1022
    AddressSpace *as;

A
Avi Kivity 已提交
1023
    assert(memory_region_transaction_depth);
1024 1025
    assert(qemu_mutex_iothread_locked());

A
Avi Kivity 已提交
1026
    --memory_region_transaction_depth;
1027 1028
    if (!memory_region_transaction_depth) {
        if (memory_region_update_pending) {
1029 1030
            flatviews_reset();

1031
            MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1032

1033
            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1034
                address_space_set_flatview(as);
1035
                address_space_update_ioeventfds(as);
1036
            }
1037
            memory_region_update_pending = false;
1038 1039 1040 1041 1042
            MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
        } else if (ioeventfd_update_pending) {
            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
                address_space_update_ioeventfds(as);
            }
1043
            ioeventfd_update_pending = false;
1044 1045
        }
   }
A
Avi Kivity 已提交
1046 1047
}

1048 1049 1050 1051 1052 1053
static void memory_region_destructor_none(MemoryRegion *mr)
{
}

static void memory_region_destructor_ram(MemoryRegion *mr)
{
1054
    qemu_ram_free(mr->ram_block);
1055 1056
}

P
Peter Crosthwaite 已提交
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
static bool memory_region_need_escape(char c)
{
    return c == '/' || c == '[' || c == '\\' || c == ']';
}

static char *memory_region_escape_name(const char *name)
{
    const char *p;
    char *escaped, *q;
    uint8_t c;
    size_t bytes = 0;

    for (p = name; *p; p++) {
        bytes += memory_region_need_escape(*p) ? 4 : 1;
    }
    if (bytes == p - name) {
       return g_memdup(name, bytes + 1);
    }

    escaped = g_malloc(bytes + 1);
    for (p = name, q = escaped; *p; p++) {
        c = *p;
        if (unlikely(memory_region_need_escape(c))) {
            *q++ = '\\';
            *q++ = 'x';
            *q++ = "0123456789abcdef"[c >> 4];
            c = "0123456789abcdef"[c & 15];
        }
        *q++ = c;
    }
    *q = 0;
    return escaped;
}

1091 1092 1093 1094
static void memory_region_do_init(MemoryRegion *mr,
                                  Object *owner,
                                  const char *name,
                                  uint64_t size)
A
Avi Kivity 已提交
1095
{
1096 1097 1098 1099
    mr->size = int128_make64(size);
    if (size == UINT64_MAX) {
        mr->size = int128_2_64();
    }
1100
    mr->name = g_strdup(name);
1101
    mr->owner = owner;
1102
    mr->ram_block = NULL;
P
Peter Crosthwaite 已提交
1103 1104

    if (name) {
1105 1106
        char *escaped_name = memory_region_escape_name(name);
        char *name_array = g_strdup_printf("%s[*]", escaped_name);
1107 1108 1109 1110 1111

        if (!owner) {
            owner = container_get(qdev_get_machine(), "/unattached");
        }

1112
        object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
P
Peter Crosthwaite 已提交
1113
        object_unref(OBJECT(mr));
1114 1115
        g_free(name_array);
        g_free(escaped_name);
P
Peter Crosthwaite 已提交
1116 1117 1118
    }
}

1119 1120 1121 1122 1123 1124 1125 1126 1127
void memory_region_init(MemoryRegion *mr,
                        Object *owner,
                        const char *name,
                        uint64_t size)
{
    object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
    memory_region_do_init(mr, owner, name, size);
}

1128 1129
static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
                                   void *opaque, Error **errp)
1130 1131 1132 1133
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    uint64_t value = mr->addr;

1134
    visit_type_uint64(v, name, &value, errp);
1135 1136
}

1137 1138 1139
static void memory_region_get_container(Object *obj, Visitor *v,
                                        const char *name, void *opaque,
                                        Error **errp)
1140 1141 1142 1143 1144 1145 1146
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    gchar *path = (gchar *)"";

    if (mr->container) {
        path = object_get_canonical_path(OBJECT(mr->container));
    }
1147
    visit_type_str(v, name, &path, errp);
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
    if (mr->container) {
        g_free(path);
    }
}

static Object *memory_region_resolve_container(Object *obj, void *opaque,
                                               const char *part)
{
    MemoryRegion *mr = MEMORY_REGION(obj);

    return OBJECT(mr->container);
}

1161 1162 1163
static void memory_region_get_priority(Object *obj, Visitor *v,
                                       const char *name, void *opaque,
                                       Error **errp)
1164 1165 1166 1167
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    int32_t value = mr->priority;

1168
    visit_type_int32(v, name, &value, errp);
1169 1170
}

1171 1172
static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
                                   void *opaque, Error **errp)
1173 1174 1175 1176
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    uint64_t value = memory_region_size(mr);

1177
    visit_type_uint64(v, name, &value, errp);
1178 1179
}

P
Peter Crosthwaite 已提交
1180 1181 1182
static void memory_region_initfn(Object *obj)
{
    MemoryRegion *mr = MEMORY_REGION(obj);
1183
    ObjectProperty *op;
P
Peter Crosthwaite 已提交
1184 1185

    mr->ops = &unassigned_mem_ops;
1186
    mr->enabled = true;
1187
    mr->romd_mode = true;
1188
    mr->global_locking = true;
1189
    mr->destructor = memory_region_destructor_none;
A
Avi Kivity 已提交
1190 1191
    QTAILQ_INIT(&mr->subregions);
    QTAILQ_INIT(&mr->coalesced);
1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203

    op = object_property_add(OBJECT(mr), "container",
                             "link<" TYPE_MEMORY_REGION ">",
                             memory_region_get_container,
                             NULL, /* memory_region_set_container */
                             NULL, NULL, &error_abort);
    op->resolve = memory_region_resolve_container;

    object_property_add(OBJECT(mr), "addr", "uint64",
                        memory_region_get_addr,
                        NULL, /* memory_region_set_addr */
                        NULL, NULL, &error_abort);
1204 1205 1206 1207
    object_property_add(OBJECT(mr), "priority", "uint32",
                        memory_region_get_priority,
                        NULL, /* memory_region_set_priority */
                        NULL, NULL, &error_abort);
1208 1209 1210 1211
    object_property_add(OBJECT(mr), "size", "uint64",
                        memory_region_get_size,
                        NULL, /* memory_region_set_size, */
                        NULL, NULL, &error_abort);
A
Avi Kivity 已提交
1212 1213
}

1214 1215 1216 1217 1218 1219 1220
static void iommu_memory_region_initfn(Object *obj)
{
    MemoryRegion *mr = MEMORY_REGION(obj);

    mr->is_iommu = true;
}

1221 1222 1223 1224 1225 1226
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
                                    unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
1227 1228
    if (current_cpu != NULL) {
        cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1229
    }
1230
    return 0;
1231 1232 1233 1234 1235 1236 1237 1238
}

static void unassigned_mem_write(void *opaque, hwaddr addr,
                                 uint64_t val, unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
#endif
1239 1240
    if (current_cpu != NULL) {
        cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1241
    }
1242 1243
}

1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
                                   unsigned size, bool is_write)
{
    return false;
}

const MemoryRegionOps unassigned_mem_ops = {
    .valid.accepts = unassigned_mem_accepts,
    .endianness = DEVICE_NATIVE_ENDIAN,
};

1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
static uint64_t memory_region_ram_device_read(void *opaque,
                                              hwaddr addr, unsigned size)
{
    MemoryRegion *mr = opaque;
    uint64_t data = (uint64_t)~0;

    switch (size) {
    case 1:
        data = *(uint8_t *)(mr->ram_block->host + addr);
        break;
    case 2:
        data = *(uint16_t *)(mr->ram_block->host + addr);
        break;
    case 4:
        data = *(uint32_t *)(mr->ram_block->host + addr);
        break;
    case 8:
        data = *(uint64_t *)(mr->ram_block->host + addr);
        break;
    }

    trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);

    return data;
}

static void memory_region_ram_device_write(void *opaque, hwaddr addr,
                                           uint64_t data, unsigned size)
{
    MemoryRegion *mr = opaque;

    trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);

    switch (size) {
    case 1:
        *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
        break;
    case 2:
        *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
        break;
    case 4:
        *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
        break;
    case 8:
        *(uint64_t *)(mr->ram_block->host + addr) = data;
        break;
    }
}

static const MemoryRegionOps ram_device_mem_ops = {
    .read = memory_region_ram_device_read,
    .write = memory_region_ram_device_write,
1307
    .endianness = DEVICE_HOST_ENDIAN,
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
    .valid = {
        .min_access_size = 1,
        .max_access_size = 8,
        .unaligned = true,
    },
    .impl = {
        .min_access_size = 1,
        .max_access_size = 8,
        .unaligned = true,
    },
};

1320 1321 1322 1323
bool memory_region_access_valid(MemoryRegion *mr,
                                hwaddr addr,
                                unsigned size,
                                bool is_write)
A
Avi Kivity 已提交
1324
{
1325 1326
    int access_size_min, access_size_max;
    int access_size, i;
1327

A
Avi Kivity 已提交
1328 1329 1330 1331
    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
        return false;
    }

1332
    if (!mr->ops->valid.accepts) {
A
Avi Kivity 已提交
1333 1334 1335
        return true;
    }

1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
    access_size_min = mr->ops->valid.min_access_size;
    if (!mr->ops->valid.min_access_size) {
        access_size_min = 1;
    }

    access_size_max = mr->ops->valid.max_access_size;
    if (!mr->ops->valid.max_access_size) {
        access_size_max = 4;
    }

    access_size = MAX(MIN(size, access_size_max), access_size_min);
    for (i = 0; i < size; i += access_size) {
        if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
                                    is_write)) {
            return false;
        }
A
Avi Kivity 已提交
1352
    }
1353

A
Avi Kivity 已提交
1354 1355 1356
    return true;
}

1357 1358 1359 1360 1361
static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
                                                hwaddr addr,
                                                uint64_t *pval,
                                                unsigned size,
                                                MemTxAttrs attrs)
A
Avi Kivity 已提交
1362
{
1363
    *pval = 0;
A
Avi Kivity 已提交
1364

1365
    if (mr->ops->read) {
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
        return access_with_adjusted_size(addr, pval, size,
                                         mr->ops->impl.min_access_size,
                                         mr->ops->impl.max_access_size,
                                         memory_region_read_accessor,
                                         mr, attrs);
    } else if (mr->ops->read_with_attrs) {
        return access_with_adjusted_size(addr, pval, size,
                                         mr->ops->impl.min_access_size,
                                         mr->ops->impl.max_access_size,
                                         memory_region_read_with_attrs_accessor,
                                         mr, attrs);
1377
    } else {
1378 1379 1380
        return access_with_adjusted_size(addr, pval, size, 1, 4,
                                         memory_region_oldmmio_read_accessor,
                                         mr, attrs);
1381
    }
A
Avi Kivity 已提交
1382 1383
}

1384 1385 1386 1387 1388
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
                                        hwaddr addr,
                                        uint64_t *pval,
                                        unsigned size,
                                        MemTxAttrs attrs)
1389
{
1390 1391
    MemTxResult r;

1392 1393
    if (!memory_region_access_valid(mr, addr, size, false)) {
        *pval = unassigned_mem_read(mr, addr, size);
1394
        return MEMTX_DECODE_ERROR;
1395
    }
1396

1397
    r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1398
    adjust_endianness(mr, pval, size);
1399
    return r;
1400
}
A
Avi Kivity 已提交
1401

P
Pavel Fedin 已提交
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
/* Return true if an eventfd was signalled */
static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
                                                    hwaddr addr,
                                                    uint64_t data,
                                                    unsigned size,
                                                    MemTxAttrs attrs)
{
    MemoryRegionIoeventfd ioeventfd = {
        .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
        .data = data,
    };
    unsigned i;

    for (i = 0; i < mr->ioeventfd_nb; i++) {
        ioeventfd.match_data = mr->ioeventfds[i].match_data;
        ioeventfd.e = mr->ioeventfds[i].e;

        if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
            event_notifier_set(ioeventfd.e);
            return true;
        }
    }

    return false;
}

1428 1429 1430 1431 1432
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
                                         hwaddr addr,
                                         uint64_t data,
                                         unsigned size,
                                         MemTxAttrs attrs)
1433
{
1434
    if (!memory_region_access_valid(mr, addr, size, true)) {
1435
        unassigned_mem_write(mr, addr, data, size);
1436
        return MEMTX_DECODE_ERROR;
A
Avi Kivity 已提交
1437 1438
    }

1439 1440
    adjust_endianness(mr, &data, size);

P
Pavel Fedin 已提交
1441 1442 1443 1444 1445
    if ((!kvm_eventfds_enabled()) &&
        memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
        return MEMTX_OK;
    }

1446
    if (mr->ops->write) {
1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
        return access_with_adjusted_size(addr, &data, size,
                                         mr->ops->impl.min_access_size,
                                         mr->ops->impl.max_access_size,
                                         memory_region_write_accessor, mr,
                                         attrs);
    } else if (mr->ops->write_with_attrs) {
        return
            access_with_adjusted_size(addr, &data, size,
                                      mr->ops->impl.min_access_size,
                                      mr->ops->impl.max_access_size,
                                      memory_region_write_with_attrs_accessor,
                                      mr, attrs);
1459
    } else {
1460 1461 1462
        return access_with_adjusted_size(addr, &data, size, 1, 4,
                                         memory_region_oldmmio_write_accessor,
                                         mr, attrs);
1463
    }
A
Avi Kivity 已提交
1464 1465 1466
}

void memory_region_init_io(MemoryRegion *mr,
1467
                           Object *owner,
A
Avi Kivity 已提交
1468 1469 1470 1471 1472
                           const MemoryRegionOps *ops,
                           void *opaque,
                           const char *name,
                           uint64_t size)
{
1473
    memory_region_init(mr, owner, name, size);
1474
    mr->ops = ops ? ops : &unassigned_mem_ops;
A
Avi Kivity 已提交
1475
    mr->opaque = opaque;
1476
    mr->terminates = true;
A
Avi Kivity 已提交
1477 1478
}

1479 1480 1481 1482 1483
void memory_region_init_ram_nomigrate(MemoryRegion *mr,
                                      Object *owner,
                                      const char *name,
                                      uint64_t size,
                                      Error **errp)
A
Avi Kivity 已提交
1484
{
1485
    memory_region_init(mr, owner, name, size);
A
Avi Kivity 已提交
1486
    mr->ram = true;
1487
    mr->terminates = true;
1488
    mr->destructor = memory_region_destructor_ram;
F
Fam Zheng 已提交
1489
    mr->ram_block = qemu_ram_alloc(size, mr, errp);
1490
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1491 1492
}

1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
void memory_region_init_resizeable_ram(MemoryRegion *mr,
                                       Object *owner,
                                       const char *name,
                                       uint64_t size,
                                       uint64_t max_size,
                                       void (*resized)(const char*,
                                                       uint64_t length,
                                                       void *host),
                                       Error **errp)
{
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
F
Fam Zheng 已提交
1507 1508
    mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
                                              mr, errp);
1509
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1510 1511
}

1512 1513 1514 1515 1516
#ifdef __linux__
void memory_region_init_ram_from_file(MemoryRegion *mr,
                                      struct Object *owner,
                                      const char *name,
                                      uint64_t size,
1517
                                      bool share,
1518 1519
                                      const char *path,
                                      Error **errp)
1520 1521 1522 1523 1524
{
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
F
Fam Zheng 已提交
1525
    mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1526
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
A
Avi Kivity 已提交
1527
}
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543

void memory_region_init_ram_from_fd(MemoryRegion *mr,
                                    struct Object *owner,
                                    const char *name,
                                    uint64_t size,
                                    bool share,
                                    int fd,
                                    Error **errp)
{
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
    mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
1544
#endif
A
Avi Kivity 已提交
1545 1546

void memory_region_init_ram_ptr(MemoryRegion *mr,
1547
                                Object *owner,
A
Avi Kivity 已提交
1548 1549 1550 1551
                                const char *name,
                                uint64_t size,
                                void *ptr)
{
1552
    memory_region_init(mr, owner, name, size);
A
Avi Kivity 已提交
1553
    mr->ram = true;
1554
    mr->terminates = true;
1555
    mr->destructor = memory_region_destructor_ram;
1556
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1557 1558 1559

    /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL.  */
    assert(ptr != NULL);
F
Fam Zheng 已提交
1560
    mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
A
Avi Kivity 已提交
1561 1562
}

1563 1564 1565 1566 1567
void memory_region_init_ram_device_ptr(MemoryRegion *mr,
                                       Object *owner,
                                       const char *name,
                                       uint64_t size,
                                       void *ptr)
1568
{
1569 1570
    memory_region_init_ram_ptr(mr, owner, name, size, ptr);
    mr->ram_device = true;
1571 1572
    mr->ops = &ram_device_mem_ops;
    mr->opaque = mr;
1573 1574
}

A
Avi Kivity 已提交
1575
void memory_region_init_alias(MemoryRegion *mr,
1576
                              Object *owner,
A
Avi Kivity 已提交
1577 1578
                              const char *name,
                              MemoryRegion *orig,
A
Avi Kivity 已提交
1579
                              hwaddr offset,
A
Avi Kivity 已提交
1580 1581
                              uint64_t size)
{
1582
    memory_region_init(mr, owner, name, size);
A
Avi Kivity 已提交
1583 1584 1585 1586
    mr->alias = orig;
    mr->alias_offset = offset;
}

1587 1588 1589 1590 1591
void memory_region_init_rom_nomigrate(MemoryRegion *mr,
                                      struct Object *owner,
                                      const char *name,
                                      uint64_t size,
                                      Error **errp)
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
{
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->readonly = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
    mr->ram_block = qemu_ram_alloc(size, mr, errp);
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}

1602 1603 1604 1605 1606 1607 1608
void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
                                             Object *owner,
                                             const MemoryRegionOps *ops,
                                             void *opaque,
                                             const char *name,
                                             uint64_t size,
                                             Error **errp)
1609
{
1610
    assert(ops);
1611
    memory_region_init(mr, owner, name, size);
1612
    mr->ops = ops;
1613
    mr->opaque = opaque;
1614
    mr->terminates = true;
A
Avi Kivity 已提交
1615
    mr->rom_device = true;
1616
    mr->destructor = memory_region_destructor_ram;
F
Fam Zheng 已提交
1617
    mr->ram_block = qemu_ram_alloc(size, mr, errp);
1618 1619
}

1620 1621 1622
void memory_region_init_iommu(void *_iommu_mr,
                              size_t instance_size,
                              const char *mrtypename,
1623
                              Object *owner,
A
Avi Kivity 已提交
1624 1625 1626
                              const char *name,
                              uint64_t size)
{
1627
    struct IOMMUMemoryRegion *iommu_mr;
1628 1629
    struct MemoryRegion *mr;

1630 1631
    object_initialize(_iommu_mr, instance_size, mrtypename);
    mr = MEMORY_REGION(_iommu_mr);
1632 1633
    memory_region_do_init(mr, owner, name, size);
    iommu_mr = IOMMU_MEMORY_REGION(mr);
A
Avi Kivity 已提交
1634
    mr->terminates = true;  /* then re-forwards */
1635 1636
    QLIST_INIT(&iommu_mr->iommu_notify);
    iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
A
Avi Kivity 已提交
1637 1638
}

P
Peter Crosthwaite 已提交
1639
static void memory_region_finalize(Object *obj)
A
Avi Kivity 已提交
1640
{
P
Peter Crosthwaite 已提交
1641 1642
    MemoryRegion *mr = MEMORY_REGION(obj);

1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
    assert(!mr->container);

    /* We know the region is not visible in any address space (it
     * does not have a container and cannot be a root either because
     * it has no references, so we can blindly clear mr->enabled.
     * memory_region_set_enabled instead could trigger a transaction
     * and cause an infinite loop.
     */
    mr->enabled = false;
    memory_region_transaction_begin();
    while (!QTAILQ_EMPTY(&mr->subregions)) {
        MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
        memory_region_del_subregion(mr, subregion);
    }
    memory_region_transaction_commit();

1659
    mr->destructor(mr);
A
Avi Kivity 已提交
1660
    memory_region_clear_coalescing(mr);
1661
    g_free((char *)mr->name);
1662
    g_free(mr->ioeventfds);
A
Avi Kivity 已提交
1663 1664
}

P
Paolo Bonzini 已提交
1665 1666
Object *memory_region_owner(MemoryRegion *mr)
{
1667 1668
    Object *obj = OBJECT(mr);
    return obj->parent;
P
Paolo Bonzini 已提交
1669 1670
}

P
Paolo Bonzini 已提交
1671 1672
void memory_region_ref(MemoryRegion *mr)
{
1673 1674 1675 1676 1677 1678 1679
    /* MMIO callbacks most likely will access data that belongs
     * to the owner, hence the need to ref/unref the owner whenever
     * the memory region is in use.
     *
     * The memory region is a child of its owner.  As long as the
     * owner doesn't call unparent itself on the memory region,
     * ref-ing the owner will also keep the memory region alive.
1680 1681
     * Memory regions without an owner are supposed to never go away;
     * we do not ref/unref them because it slows down DMA sensibly.
1682
     */
1683 1684
    if (mr && mr->owner) {
        object_ref(mr->owner);
P
Paolo Bonzini 已提交
1685 1686 1687 1688 1689
    }
}

void memory_region_unref(MemoryRegion *mr)
{
1690 1691
    if (mr && mr->owner) {
        object_unref(mr->owner);
P
Paolo Bonzini 已提交
1692 1693 1694
    }
}

A
Avi Kivity 已提交
1695 1696
uint64_t memory_region_size(MemoryRegion *mr)
{
1697 1698 1699 1700
    if (int128_eq(mr->size, int128_2_64())) {
        return UINT64_MAX;
    }
    return int128_get64(mr->size);
A
Avi Kivity 已提交
1701 1702
}

1703
const char *memory_region_name(const MemoryRegion *mr)
1704
{
1705 1706 1707 1708
    if (!mr->name) {
        ((MemoryRegion *)mr)->name =
            object_get_canonical_path_component(OBJECT(mr));
    }
1709
    return mr->name;
1710 1711
}

1712
bool memory_region_is_ram_device(MemoryRegion *mr)
1713
{
1714
    return mr->ram_device;
1715 1716
}

1717
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1718
{
1719
    uint8_t mask = mr->dirty_log_mask;
1720
    if (global_dirty_log && mr->ram_block) {
1721 1722 1723
        mask |= (1 << DIRTY_MEMORY_MIGRATION);
    }
    return mask;
1724 1725
}

1726 1727 1728 1729 1730
bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
{
    return memory_region_get_dirty_log_mask(mr) & (1 << client);
}

1731
static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
1732 1733 1734
{
    IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
    IOMMUNotifier *iommu_notifier;
1735
    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1736

1737
    IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1738 1739 1740
        flags |= iommu_notifier->notifier_flags;
    }

1741 1742 1743 1744
    if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
        imrc->notify_flag_changed(iommu_mr,
                                  iommu_mr->iommu_notify_flags,
                                  flags);
1745 1746
    }

1747
    iommu_mr->iommu_notify_flags = flags;
1748 1749
}

1750 1751
void memory_region_register_iommu_notifier(MemoryRegion *mr,
                                           IOMMUNotifier *n)
1752
{
1753 1754
    IOMMUMemoryRegion *iommu_mr;

1755 1756 1757 1758 1759
    if (mr->alias) {
        memory_region_register_iommu_notifier(mr->alias, n);
        return;
    }

1760
    /* We need to register for at least one bitfield */
1761
    iommu_mr = IOMMU_MEMORY_REGION(mr);
1762
    assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1763
    assert(n->start <= n->end);
1764 1765
    QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
    memory_region_update_iommu_notify_flags(iommu_mr);
1766 1767
}

1768
uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1769
{
1770 1771 1772 1773
    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);

    if (imrc->get_min_page_size) {
        return imrc->get_min_page_size(iommu_mr);
1774 1775 1776 1777
    }
    return TARGET_PAGE_SIZE;
}

1778
void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1779
{
1780
    MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1781
    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1782
    hwaddr addr, granularity;
1783 1784
    IOMMUTLBEntry iotlb;

1785
    /* If the IOMMU has its own replay callback, override */
1786 1787
    if (imrc->replay) {
        imrc->replay(iommu_mr, n);
1788 1789 1790
        return;
    }

1791
    granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1792

1793
    for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1794
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806
        if (iotlb.perm != IOMMU_NONE) {
            n->notify(n, &iotlb);
        }

        /* if (2^64 - MR size) < granularity, it's possible to get an
         * infinite loop here.  This should catch such a wraparound */
        if ((addr + granularity) < addr) {
            break;
        }
    }
}

1807
void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
P
Peter Xu 已提交
1808 1809 1810
{
    IOMMUNotifier *notifier;

1811 1812
    IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
        memory_region_iommu_replay(iommu_mr, notifier);
P
Peter Xu 已提交
1813 1814 1815
    }
}

1816 1817
void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
                                             IOMMUNotifier *n)
1818
{
1819 1820
    IOMMUMemoryRegion *iommu_mr;

1821 1822 1823 1824
    if (mr->alias) {
        memory_region_unregister_iommu_notifier(mr->alias, n);
        return;
    }
1825
    QLIST_REMOVE(n, node);
1826 1827
    iommu_mr = IOMMU_MEMORY_REGION(mr);
    memory_region_update_iommu_notify_flags(iommu_mr);
1828 1829
}

1830 1831
void memory_region_notify_one(IOMMUNotifier *notifier,
                              IOMMUTLBEntry *entry)
1832
{
1833 1834
    IOMMUNotifierFlag request_flags;

1835 1836 1837 1838 1839 1840 1841 1842
    /*
     * Skip the notification if the notification does not overlap
     * with registered range.
     */
    if (notifier->start > entry->iova + entry->addr_mask + 1 ||
        notifier->end < entry->iova) {
        return;
    }
1843

1844
    if (entry->perm & IOMMU_RW) {
1845 1846 1847 1848 1849
        request_flags = IOMMU_NOTIFIER_MAP;
    } else {
        request_flags = IOMMU_NOTIFIER_UNMAP;
    }

1850 1851 1852 1853 1854
    if (notifier->notifier_flags & request_flags) {
        notifier->notify(notifier, entry);
    }
}

1855
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1856 1857 1858 1859
                                IOMMUTLBEntry entry)
{
    IOMMUNotifier *iommu_notifier;

1860
    assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
1861

1862
    IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1863
        memory_region_notify_one(iommu_notifier, &entry);
1864
    }
1865 1866
}

A
Avi Kivity 已提交
1867 1868
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
{
A
Avi Kivity 已提交
1869
    uint8_t mask = 1 << client;
1870
    uint8_t old_logging;
A
Avi Kivity 已提交
1871

1872
    assert(client == DIRTY_MEMORY_VGA);
1873 1874 1875 1876 1877 1878
    old_logging = mr->vga_logging_count;
    mr->vga_logging_count += log ? 1 : -1;
    if (!!old_logging == !!mr->vga_logging_count) {
        return;
    }

1879
    memory_region_transaction_begin();
A
Avi Kivity 已提交
1880
    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1881
    memory_region_update_pending |= mr->enabled;
1882
    memory_region_transaction_commit();
A
Avi Kivity 已提交
1883 1884
}

A
Avi Kivity 已提交
1885 1886
bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
                             hwaddr size, unsigned client)
A
Avi Kivity 已提交
1887
{
F
Fam Zheng 已提交
1888 1889 1890
    assert(mr->ram_block);
    return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
                                         size, client);
A
Avi Kivity 已提交
1891 1892
}

A
Avi Kivity 已提交
1893 1894
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
                             hwaddr size)
A
Avi Kivity 已提交
1895
{
F
Fam Zheng 已提交
1896 1897 1898
    assert(mr->ram_block);
    cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
                                        size,
1899
                                        memory_region_get_dirty_log_mask(mr));
A
Avi Kivity 已提交
1900 1901
}

1902 1903 1904
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
                                        hwaddr size, unsigned client)
{
F
Fam Zheng 已提交
1905 1906 1907
    assert(mr->ram_block);
    return cpu_physical_memory_test_and_clear_dirty(
                memory_region_get_ram_addr(mr) + addr, size, client);
1908 1909
}

1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926
DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
                                                            hwaddr addr,
                                                            hwaddr size,
                                                            unsigned client)
{
    assert(mr->ram_block);
    return cpu_physical_memory_snapshot_and_clear_dirty(
                memory_region_get_ram_addr(mr) + addr, size, client);
}

bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
                                      hwaddr addr, hwaddr size)
{
    assert(mr->ram_block);
    return cpu_physical_memory_snapshot_get_dirty(snap,
                memory_region_get_ram_addr(mr) + addr, size);
}
1927

A
Avi Kivity 已提交
1928 1929
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
{
1930
    MemoryListener *listener;
1931
    AddressSpace *as;
1932
    FlatView *view;
A
Avi Kivity 已提交
1933 1934
    FlatRange *fr;

1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
    /* If the same address space has multiple log_sync listeners, we
     * visit that address space's FlatView multiple times.  But because
     * log_sync listeners are rare, it's still cheaper than walking each
     * address space once.
     */
    QTAILQ_FOREACH(listener, &memory_listeners, link) {
        if (!listener->log_sync) {
            continue;
        }
        as = listener->address_space;
        view = address_space_get_flatview(as);
1946
        FOR_EACH_FLAT_RANGE(fr, view) {
1947
            if (fr->mr == mr) {
1948
                MemoryRegionSection mrs = section_from_flat_range(fr, view);
1949
                listener->log_sync(listener, &mrs);
1950
            }
A
Avi Kivity 已提交
1951
        }
1952
        flatview_unref(view);
A
Avi Kivity 已提交
1953
    }
A
Avi Kivity 已提交
1954 1955 1956 1957
}

void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
{
1958
    if (mr->readonly != readonly) {
1959
        memory_region_transaction_begin();
1960
        mr->readonly = readonly;
1961
        memory_region_update_pending |= mr->enabled;
1962
        memory_region_transaction_commit();
1963
    }
A
Avi Kivity 已提交
1964 1965
}

1966
void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
1967
{
1968
    if (mr->romd_mode != romd_mode) {
1969
        memory_region_transaction_begin();
1970
        mr->romd_mode = romd_mode;
1971
        memory_region_update_pending |= mr->enabled;
1972
        memory_region_transaction_commit();
1973 1974 1975
    }
}

A
Avi Kivity 已提交
1976 1977
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
                               hwaddr size, unsigned client)
A
Avi Kivity 已提交
1978
{
F
Fam Zheng 已提交
1979 1980 1981
    assert(mr->ram_block);
    cpu_physical_memory_test_and_clear_dirty(
        memory_region_get_ram_addr(mr) + addr, size, client);
A
Avi Kivity 已提交
1982 1983
}

1984 1985
int memory_region_get_fd(MemoryRegion *mr)
{
1986 1987 1988 1989 1990
    int fd;

    rcu_read_lock();
    while (mr->alias) {
        mr = mr->alias;
1991
    }
1992 1993
    fd = mr->ram_block->fd;
    rcu_read_unlock();
1994

1995 1996
    return fd;
}
1997

A
Avi Kivity 已提交
1998 1999
void *memory_region_get_ram_ptr(MemoryRegion *mr)
{
2000 2001
    void *ptr;
    uint64_t offset = 0;
A
Avi Kivity 已提交
2002

2003 2004 2005 2006 2007
    rcu_read_lock();
    while (mr->alias) {
        offset += mr->alias_offset;
        mr = mr->alias;
    }
F
Fam Zheng 已提交
2008
    assert(mr->ram_block);
2009
    ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2010
    rcu_read_unlock();
A
Avi Kivity 已提交
2011

2012
    return ptr;
A
Avi Kivity 已提交
2013 2014
}

2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026
MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
{
    RAMBlock *block;

    block = qemu_ram_block_from_host(ptr, false, offset);
    if (!block) {
        return NULL;
    }

    return block->mr;
}

2027 2028 2029 2030 2031
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
{
    return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
}

2032 2033
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
{
F
Fam Zheng 已提交
2034
    assert(mr->ram_block);
2035

G
Gonglei 已提交
2036
    qemu_ram_resize(mr->ram_block, newsize, errp);
2037 2038
}

2039
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
A
Avi Kivity 已提交
2040
{
2041
    FlatView *view;
A
Avi Kivity 已提交
2042 2043 2044
    FlatRange *fr;
    CoalescedMemoryRange *cmr;
    AddrRange tmp;
2045
    MemoryRegionSection section;
A
Avi Kivity 已提交
2046

2047
    view = address_space_get_flatview(as);
2048
    FOR_EACH_FLAT_RANGE(fr, view) {
A
Avi Kivity 已提交
2049
        if (fr->mr == mr) {
2050
            section = (MemoryRegionSection) {
2051
                .fv = view,
2052
                .offset_within_address_space = int128_get64(fr->addr.start),
2053
                .size = fr->addr.size,
2054 2055
            };

2056
            MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
2057 2058
                                 int128_get64(fr->addr.start),
                                 int128_get64(fr->addr.size));
A
Avi Kivity 已提交
2059 2060
            QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
                tmp = addrrange_shift(cmr->addr,
2061 2062
                                      int128_sub(fr->addr.start,
                                                 int128_make64(fr->offset_in_region)));
A
Avi Kivity 已提交
2063 2064 2065 2066
                if (!addrrange_intersects(tmp, fr->addr)) {
                    continue;
                }
                tmp = addrrange_intersection(tmp, fr->addr);
2067
                MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
2068 2069
                                     int128_get64(tmp.start),
                                     int128_get64(tmp.size));
A
Avi Kivity 已提交
2070 2071 2072
            }
        }
    }
2073
    flatview_unref(view);
A
Avi Kivity 已提交
2074 2075
}

2076 2077 2078 2079 2080 2081 2082 2083 2084
static void memory_region_update_coalesced_range(MemoryRegion *mr)
{
    AddressSpace *as;

    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        memory_region_update_coalesced_range_as(mr, as);
    }
}

A
Avi Kivity 已提交
2085 2086 2087
void memory_region_set_coalescing(MemoryRegion *mr)
{
    memory_region_clear_coalescing(mr);
2088
    memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
A
Avi Kivity 已提交
2089 2090 2091
}

void memory_region_add_coalescing(MemoryRegion *mr,
A
Avi Kivity 已提交
2092
                                  hwaddr offset,
A
Avi Kivity 已提交
2093 2094
                                  uint64_t size)
{
2095
    CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
A
Avi Kivity 已提交
2096

2097
    cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
A
Avi Kivity 已提交
2098 2099
    QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
    memory_region_update_coalesced_range(mr);
2100
    memory_region_set_flush_coalesced(mr);
A
Avi Kivity 已提交
2101 2102 2103 2104 2105
}

void memory_region_clear_coalescing(MemoryRegion *mr)
{
    CoalescedMemoryRange *cmr;
2106
    bool updated = false;
A
Avi Kivity 已提交
2107

2108 2109 2110
    qemu_flush_coalesced_mmio_buffer();
    mr->flush_coalesced_mmio = false;

A
Avi Kivity 已提交
2111 2112 2113
    while (!QTAILQ_EMPTY(&mr->coalesced)) {
        cmr = QTAILQ_FIRST(&mr->coalesced);
        QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2114
        g_free(cmr);
2115 2116 2117 2118 2119
        updated = true;
    }

    if (updated) {
        memory_region_update_coalesced_range(mr);
A
Avi Kivity 已提交
2120 2121 2122
    }
}

2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
void memory_region_set_flush_coalesced(MemoryRegion *mr)
{
    mr->flush_coalesced_mmio = true;
}

void memory_region_clear_flush_coalesced(MemoryRegion *mr)
{
    qemu_flush_coalesced_mmio_buffer();
    if (QTAILQ_EMPTY(&mr->coalesced)) {
        mr->flush_coalesced_mmio = false;
    }
}

2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
void memory_region_set_global_locking(MemoryRegion *mr)
{
    mr->global_locking = true;
}

void memory_region_clear_global_locking(MemoryRegion *mr)
{
    mr->global_locking = false;
}

P
Pavel Fedin 已提交
2146 2147
static bool userspace_eventfd_warning;

A
Avi Kivity 已提交
2148
void memory_region_add_eventfd(MemoryRegion *mr,
A
Avi Kivity 已提交
2149
                               hwaddr addr,
A
Avi Kivity 已提交
2150 2151 2152
                               unsigned size,
                               bool match_data,
                               uint64_t data,
2153
                               EventNotifier *e)
A
Avi Kivity 已提交
2154 2155
{
    MemoryRegionIoeventfd mrfd = {
2156 2157
        .addr.start = int128_make64(addr),
        .addr.size = int128_make64(size),
A
Avi Kivity 已提交
2158 2159
        .match_data = match_data,
        .data = data,
2160
        .e = e,
A
Avi Kivity 已提交
2161 2162 2163
    };
    unsigned i;

P
Pavel Fedin 已提交
2164 2165 2166 2167 2168 2169 2170
    if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
                            userspace_eventfd_warning))) {
        userspace_eventfd_warning = true;
        error_report("Using eventfd without MMIO binding in KVM. "
                     "Suboptimal performance expected");
    }

2171 2172 2173
    if (size) {
        adjust_endianness(mr, &mrfd.data, size);
    }
2174
    memory_region_transaction_begin();
A
Avi Kivity 已提交
2175 2176 2177 2178 2179 2180
    for (i = 0; i < mr->ioeventfd_nb; ++i) {
        if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
            break;
        }
    }
    ++mr->ioeventfd_nb;
2181
    mr->ioeventfds = g_realloc(mr->ioeventfds,
A
Avi Kivity 已提交
2182 2183 2184 2185
                                  sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
    memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
    mr->ioeventfds[i] = mrfd;
2186
    ioeventfd_update_pending |= mr->enabled;
2187
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2188 2189 2190
}

void memory_region_del_eventfd(MemoryRegion *mr,
A
Avi Kivity 已提交
2191
                               hwaddr addr,
A
Avi Kivity 已提交
2192 2193 2194
                               unsigned size,
                               bool match_data,
                               uint64_t data,
2195
                               EventNotifier *e)
A
Avi Kivity 已提交
2196 2197
{
    MemoryRegionIoeventfd mrfd = {
2198 2199
        .addr.start = int128_make64(addr),
        .addr.size = int128_make64(size),
A
Avi Kivity 已提交
2200 2201
        .match_data = match_data,
        .data = data,
2202
        .e = e,
A
Avi Kivity 已提交
2203 2204 2205
    };
    unsigned i;

2206 2207 2208
    if (size) {
        adjust_endianness(mr, &mrfd.data, size);
    }
2209
    memory_region_transaction_begin();
A
Avi Kivity 已提交
2210 2211 2212 2213 2214 2215 2216 2217 2218
    for (i = 0; i < mr->ioeventfd_nb; ++i) {
        if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
            break;
        }
    }
    assert(i != mr->ioeventfd_nb);
    memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
    --mr->ioeventfd_nb;
2219
    mr->ioeventfds = g_realloc(mr->ioeventfds,
A
Avi Kivity 已提交
2220
                                  sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2221
    ioeventfd_update_pending |= mr->enabled;
2222
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2223 2224
}

2225
static void memory_region_update_container_subregions(MemoryRegion *subregion)
A
Avi Kivity 已提交
2226
{
2227
    MemoryRegion *mr = subregion->container;
A
Avi Kivity 已提交
2228 2229
    MemoryRegion *other;

2230 2231
    memory_region_transaction_begin();

P
Paolo Bonzini 已提交
2232
    memory_region_ref(subregion);
A
Avi Kivity 已提交
2233 2234 2235 2236 2237 2238 2239 2240
    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
        if (subregion->priority >= other->priority) {
            QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
            goto done;
        }
    }
    QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
done:
2241
    memory_region_update_pending |= mr->enabled && subregion->enabled;
2242
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2243 2244
}

2245 2246 2247 2248
static void memory_region_add_subregion_common(MemoryRegion *mr,
                                               hwaddr offset,
                                               MemoryRegion *subregion)
{
2249 2250
    assert(!subregion->container);
    subregion->container = mr;
2251
    subregion->addr = offset;
2252
    memory_region_update_container_subregions(subregion);
2253
}
A
Avi Kivity 已提交
2254 2255

void memory_region_add_subregion(MemoryRegion *mr,
A
Avi Kivity 已提交
2256
                                 hwaddr offset,
A
Avi Kivity 已提交
2257 2258 2259 2260 2261 2262 2263
                                 MemoryRegion *subregion)
{
    subregion->priority = 0;
    memory_region_add_subregion_common(mr, offset, subregion);
}

void memory_region_add_subregion_overlap(MemoryRegion *mr,
A
Avi Kivity 已提交
2264
                                         hwaddr offset,
A
Avi Kivity 已提交
2265
                                         MemoryRegion *subregion,
2266
                                         int priority)
A
Avi Kivity 已提交
2267 2268 2269 2270 2271 2272 2273 2274
{
    subregion->priority = priority;
    memory_region_add_subregion_common(mr, offset, subregion);
}

void memory_region_del_subregion(MemoryRegion *mr,
                                 MemoryRegion *subregion)
{
2275
    memory_region_transaction_begin();
2276 2277
    assert(subregion->container == mr);
    subregion->container = NULL;
A
Avi Kivity 已提交
2278
    QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
P
Paolo Bonzini 已提交
2279
    memory_region_unref(subregion);
2280
    memory_region_update_pending |= mr->enabled && subregion->enabled;
2281
    memory_region_transaction_commit();
2282 2283 2284 2285 2286 2287 2288
}

void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
{
    if (enabled == mr->enabled) {
        return;
    }
2289
    memory_region_transaction_begin();
2290
    mr->enabled = enabled;
2291
    memory_region_update_pending = true;
2292
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2293
}
A
Avi Kivity 已提交
2294

2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
void memory_region_set_size(MemoryRegion *mr, uint64_t size)
{
    Int128 s = int128_make64(size);

    if (size == UINT64_MAX) {
        s = int128_2_64();
    }
    if (int128_eq(s, mr->size)) {
        return;
    }
    memory_region_transaction_begin();
    mr->size = s;
    memory_region_update_pending = true;
    memory_region_transaction_commit();
}

2311
static void memory_region_readd_subregion(MemoryRegion *mr)
2312
{
2313
    MemoryRegion *container = mr->container;
2314

2315
    if (container) {
2316 2317
        memory_region_transaction_begin();
        memory_region_ref(mr);
2318 2319 2320
        memory_region_del_subregion(container, mr);
        mr->container = container;
        memory_region_update_container_subregions(mr);
2321 2322
        memory_region_unref(mr);
        memory_region_transaction_commit();
2323
    }
2324
}
2325

2326 2327 2328 2329 2330 2331
void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
{
    if (addr != mr->addr) {
        mr->addr = addr;
        memory_region_readd_subregion(mr);
    }
2332 2333
}

A
Avi Kivity 已提交
2334
void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2335 2336 2337
{
    assert(mr->alias);

2338
    if (offset == mr->alias_offset) {
2339 2340 2341
        return;
    }

2342 2343
    memory_region_transaction_begin();
    mr->alias_offset = offset;
2344
    memory_region_update_pending |= mr->enabled;
2345
    memory_region_transaction_commit();
2346 2347
}

2348 2349 2350 2351 2352
uint64_t memory_region_get_alignment(const MemoryRegion *mr)
{
    return mr->align;
}

2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
static int cmp_flatrange_addr(const void *addr_, const void *fr_)
{
    const AddrRange *addr = addr_;
    const FlatRange *fr = fr_;

    if (int128_le(addrrange_end(*addr), fr->addr.start)) {
        return -1;
    } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
        return 1;
    }
    return 0;
}

2366
static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2367
{
2368
    return bsearch(&addr, view->ranges, view->nr,
2369 2370 2371
                   sizeof(FlatRange), cmp_flatrange_addr);
}

2372 2373 2374 2375 2376
bool memory_region_is_mapped(MemoryRegion *mr)
{
    return mr->container ? true : false;
}

2377 2378 2379 2380 2381
/* Same as memory_region_find, but it does not add a reference to the
 * returned region.  It must be called from an RCU critical section.
 */
static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
                                                  hwaddr addr, uint64_t size)
2382
{
2383
    MemoryRegionSection ret = { .mr = NULL };
2384 2385 2386
    MemoryRegion *root;
    AddressSpace *as;
    AddrRange range;
2387
    FlatView *view;
2388 2389 2390
    FlatRange *fr;

    addr += mr->addr;
2391 2392
    for (root = mr; root->container; ) {
        root = root->container;
2393 2394
        addr += root->addr;
    }
2395

2396
    as = memory_region_to_address_space(root);
2397 2398 2399
    if (!as) {
        return ret;
    }
2400
    range = addrrange_make(int128_make64(addr), int128_make64(size));
2401

2402
    view = address_space_to_flatview(as);
2403
    fr = flatview_lookup(view, range);
2404
    if (!fr) {
2405
        return ret;
2406 2407
    }

2408
    while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2409 2410 2411 2412
        --fr;
    }

    ret.mr = fr->mr;
2413
    ret.fv = view;
2414 2415 2416 2417
    range = addrrange_intersection(range, fr->addr);
    ret.offset_within_region = fr->offset_in_region;
    ret.offset_within_region += int128_get64(int128_sub(range.start,
                                                        fr->addr.start));
2418
    ret.size = range.size;
2419
    ret.offset_within_address_space = int128_get64(range.start);
2420
    ret.readonly = fr->readonly;
2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432
    return ret;
}

MemoryRegionSection memory_region_find(MemoryRegion *mr,
                                       hwaddr addr, uint64_t size)
{
    MemoryRegionSection ret;
    rcu_read_lock();
    ret = memory_region_find_rcu(mr, addr, size);
    if (ret.mr) {
        memory_region_ref(ret.mr);
    }
2433
    rcu_read_unlock();
2434 2435 2436
    return ret;
}

2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
bool memory_region_present(MemoryRegion *container, hwaddr addr)
{
    MemoryRegion *mr;

    rcu_read_lock();
    mr = memory_region_find_rcu(container, addr, 1).mr;
    rcu_read_unlock();
    return mr && mr != container;
}

2447
void memory_global_dirty_log_sync(void)
2448
{
2449 2450
    MemoryListener *listener;
    AddressSpace *as;
2451
    FlatView *view;
2452 2453
    FlatRange *fr;

2454 2455 2456 2457
    QTAILQ_FOREACH(listener, &memory_listeners, link) {
        if (!listener->log_sync) {
            continue;
        }
2458
        as = listener->address_space;
2459 2460
        view = address_space_get_flatview(as);
        FOR_EACH_FLAT_RANGE(fr, view) {
2461
            if (fr->dirty_log_mask) {
2462 2463
                MemoryRegionSection mrs = section_from_flat_range(fr, view);

2464 2465
                listener->log_sync(listener, &mrs);
            }
2466 2467
        }
        flatview_unref(view);
2468 2469 2470
    }
}

J
Jay Zhou 已提交
2471 2472
static VMChangeStateEntry *vmstate_change;

2473 2474
void memory_global_dirty_log_start(void)
{
J
Jay Zhou 已提交
2475 2476 2477 2478 2479
    if (vmstate_change) {
        qemu_del_vm_change_state_handler(vmstate_change);
        vmstate_change = NULL;
    }

2480
    global_dirty_log = true;
2481

2482
    MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2483 2484 2485 2486 2487

    /* Refresh DIRTY_LOG_MIGRATION bit.  */
    memory_region_transaction_begin();
    memory_region_update_pending = true;
    memory_region_transaction_commit();
2488 2489
}

J
Jay Zhou 已提交
2490
static void memory_global_dirty_log_do_stop(void)
2491 2492
{
    global_dirty_log = false;
2493 2494 2495 2496 2497 2498

    /* Refresh DIRTY_LOG_MIGRATION bit.  */
    memory_region_transaction_begin();
    memory_region_update_pending = true;
    memory_region_transaction_commit();

2499
    MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2500 2501
}

J
Jay Zhou 已提交
2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528
static void memory_vm_change_state_handler(void *opaque, int running,
                                           RunState state)
{
    if (running) {
        memory_global_dirty_log_do_stop();

        if (vmstate_change) {
            qemu_del_vm_change_state_handler(vmstate_change);
            vmstate_change = NULL;
        }
    }
}

void memory_global_dirty_log_stop(void)
{
    if (!runstate_is_running()) {
        if (vmstate_change) {
            return;
        }
        vmstate_change = qemu_add_vm_change_state_handler(
                                memory_vm_change_state_handler, NULL);
        return;
    }

    memory_global_dirty_log_do_stop();
}

2529 2530 2531
static void listener_add_address_space(MemoryListener *listener,
                                       AddressSpace *as)
{
2532
    FlatView *view;
2533 2534
    FlatRange *fr;

2535 2536 2537
    if (listener->begin) {
        listener->begin(listener);
    }
2538
    if (global_dirty_log) {
2539 2540 2541
        if (listener->log_global_start) {
            listener->log_global_start(listener);
        }
2542
    }
2543

2544
    view = address_space_get_flatview(as);
2545
    FOR_EACH_FLAT_RANGE(fr, view) {
2546 2547
        MemoryRegionSection section = {
            .mr = fr->mr,
2548
            .fv = view,
2549
            .offset_within_region = fr->offset_in_region,
2550
            .size = fr->addr.size,
2551
            .offset_within_address_space = int128_get64(fr->addr.start),
2552
            .readonly = fr->readonly,
2553
        };
2554 2555 2556
        if (fr->dirty_log_mask && listener->log_start) {
            listener->log_start(listener, &section, 0, fr->dirty_log_mask);
        }
2557 2558 2559
        if (listener->region_add) {
            listener->region_add(listener, &section);
        }
2560
    }
2561 2562 2563
    if (listener->commit) {
        listener->commit(listener);
    }
2564
    flatview_unref(view);
2565 2566
}

2567
void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2568
{
2569 2570
    MemoryListener *other = NULL;

2571
    listener->address_space = as;
2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
    if (QTAILQ_EMPTY(&memory_listeners)
        || listener->priority >= QTAILQ_LAST(&memory_listeners,
                                             memory_listeners)->priority) {
        QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
    } else {
        QTAILQ_FOREACH(other, &memory_listeners, link) {
            if (listener->priority < other->priority) {
                break;
            }
        }
        QTAILQ_INSERT_BEFORE(other, listener, link);
    }
2584

2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597
    if (QTAILQ_EMPTY(&as->listeners)
        || listener->priority >= QTAILQ_LAST(&as->listeners,
                                             memory_listeners)->priority) {
        QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
    } else {
        QTAILQ_FOREACH(other, &as->listeners, link_as) {
            if (listener->priority < other->priority) {
                break;
            }
        }
        QTAILQ_INSERT_BEFORE(other, listener, link_as);
    }

2598
    listener_add_address_space(listener, as);
2599 2600 2601 2602
}

void memory_listener_unregister(MemoryListener *listener)
{
2603 2604 2605 2606
    if (!listener->address_space) {
        return;
    }

2607
    QTAILQ_REMOVE(&memory_listeners, listener, link);
2608
    QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2609
    listener->address_space = NULL;
2610
}
2611

2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720
bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
{
    void *host;
    unsigned size = 0;
    unsigned offset = 0;
    Object *new_interface;

    if (!mr || !mr->ops->request_ptr) {
        return false;
    }

    /*
     * Avoid an update if the request_ptr call
     * memory_region_invalidate_mmio_ptr which seems to be likely when we use
     * a cache.
     */
    memory_region_transaction_begin();

    host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);

    if (!host || !size) {
        memory_region_transaction_commit();
        return false;
    }

    new_interface = object_new("mmio_interface");
    qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
    qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
    qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
    qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
    qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
    object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);

    memory_region_transaction_commit();
    return true;
}

typedef struct MMIOPtrInvalidate {
    MemoryRegion *mr;
    hwaddr offset;
    unsigned size;
    int busy;
    int allocated;
} MMIOPtrInvalidate;

#define MAX_MMIO_INVALIDATE 10
static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];

static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
                                                 run_on_cpu_data data)
{
    MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
    MemoryRegion *mr = invalidate_data->mr;
    hwaddr offset = invalidate_data->offset;
    unsigned size = invalidate_data->size;
    MemoryRegionSection section = memory_region_find(mr, offset, size);

    qemu_mutex_lock_iothread();

    /* Reset dirty so this doesn't happen later. */
    cpu_physical_memory_test_and_clear_dirty(offset, size, 1);

    if (section.mr != mr) {
        /* memory_region_find add a ref on section.mr */
        memory_region_unref(section.mr);
        if (MMIO_INTERFACE(section.mr->owner)) {
            /* We found the interface just drop it. */
            object_property_set_bool(section.mr->owner, false, "realized",
                                     NULL);
            object_unref(section.mr->owner);
            object_unparent(section.mr->owner);
        }
    }

    qemu_mutex_unlock_iothread();

    if (invalidate_data->allocated) {
        g_free(invalidate_data);
    } else {
        invalidate_data->busy = 0;
    }
}

void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
                                       unsigned size)
{
    size_t i;
    MMIOPtrInvalidate *invalidate_data = NULL;

    for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
        if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
            invalidate_data = &mmio_ptr_invalidate_list[i];
            break;
        }
    }

    if (!invalidate_data) {
        invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
        invalidate_data->allocated = 1;
    }

    invalidate_data->mr = mr;
    invalidate_data->offset = offset;
    invalidate_data->size = size;

    async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
                          RUN_ON_CPU_HOST_PTR(invalidate_data));
}

2721
void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
A
Avi Kivity 已提交
2722
{
2723
    memory_region_ref(root);
2724
    memory_region_transaction_begin();
2725
    as->root = root;
2726
    as->current_map = NULL;
2727 2728
    as->ioeventfd_nb = 0;
    as->ioeventfds = NULL;
2729
    QTAILQ_INIT(&as->listeners);
2730
    QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2731
    as->name = g_strdup(name ? name : "anonymous");
2732 2733
    memory_region_update_pending |= root->enabled;
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2734
}
A
Avi Kivity 已提交
2735

2736
static void do_address_space_destroy(AddressSpace *as)
A
Avi Kivity 已提交
2737
{
2738
    assert(QTAILQ_EMPTY(&as->listeners));
2739

2740
    flatview_unref(as->current_map);
2741
    g_free(as->name);
2742
    g_free(as->ioeventfds);
2743
    memory_region_unref(as->root);
A
Avi Kivity 已提交
2744 2745
}

2746 2747
void address_space_destroy(AddressSpace *as)
{
2748 2749
    MemoryRegion *root = as->root;

2750 2751 2752 2753 2754 2755 2756 2757 2758 2759
    /* Flush out anything from MemoryListeners listening in on this */
    memory_region_transaction_begin();
    as->root = NULL;
    memory_region_transaction_commit();
    QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);

    /* At this point, as->dispatch and as->current_map are dummy
     * entries that the guest should never use.  Wait for the old
     * values to expire before freeing the data.
     */
2760
    as->root = root;
2761 2762 2763
    call_rcu(as, do_address_space_destroy, rcu);
}

2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778
static const char *memory_region_type(MemoryRegion *mr)
{
    if (memory_region_is_ram_device(mr)) {
        return "ramd";
    } else if (memory_region_is_romd(mr)) {
        return "romd";
    } else if (memory_region_is_rom(mr)) {
        return "rom";
    } else if (memory_region_is_ram(mr)) {
        return "ram";
    } else {
        return "i/o";
    }
}

B
Blue Swirl 已提交
2779 2780 2781 2782
typedef struct MemoryRegionList MemoryRegionList;

struct MemoryRegionList {
    const MemoryRegion *mr;
2783
    QTAILQ_ENTRY(MemoryRegionList) mrqueue;
B
Blue Swirl 已提交
2784 2785
};

2786
typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
B
Blue Swirl 已提交
2787

2788 2789 2790 2791
#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
                           int128_sub((size), int128_one())) : 0)
#define MTREE_INDENT "  "

B
Blue Swirl 已提交
2792 2793
static void mtree_print_mr(fprintf_function mon_printf, void *f,
                           const MemoryRegion *mr, unsigned int level,
A
Avi Kivity 已提交
2794
                           hwaddr base,
2795
                           MemoryRegionListHead *alias_print_queue)
B
Blue Swirl 已提交
2796
{
2797 2798
    MemoryRegionList *new_ml, *ml, *next_ml;
    MemoryRegionListHead submr_print_queue;
B
Blue Swirl 已提交
2799 2800
    const MemoryRegion *submr;
    unsigned int i;
2801
    hwaddr cur_start, cur_end;
B
Blue Swirl 已提交
2802

2803
    if (!mr) {
B
Blue Swirl 已提交
2804 2805 2806 2807
        return;
    }

    for (i = 0; i < level; i++) {
2808
        mon_printf(f, MTREE_INDENT);
B
Blue Swirl 已提交
2809 2810
    }

2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822
    cur_start = base + mr->addr;
    cur_end = cur_start + MR_SIZE(mr->size);

    /*
     * Try to detect overflow of memory region. This should never
     * happen normally. When it happens, we dump something to warn the
     * user who is observing this.
     */
    if (cur_start < base || cur_end < cur_start) {
        mon_printf(f, "[DETECTED OVERFLOW!] ");
    }

B
Blue Swirl 已提交
2823 2824 2825 2826 2827
    if (mr->alias) {
        MemoryRegionList *ml;
        bool found = false;

        /* check if the alias is already in the queue */
2828
        QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
P
Paolo Bonzini 已提交
2829
            if (ml->mr == mr->alias) {
B
Blue Swirl 已提交
2830 2831 2832 2833 2834 2835 2836
                found = true;
            }
        }

        if (!found) {
            ml = g_new(MemoryRegionList, 1);
            ml->mr = mr->alias;
2837
            QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
B
Blue Swirl 已提交
2838
        }
2839
        mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
2840
                   " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
2841
                   "-" TARGET_FMT_plx "%s\n",
2842
                   cur_start, cur_end,
J
Jan Kiszka 已提交
2843
                   mr->priority,
2844
                   memory_region_type((MemoryRegion *)mr),
2845 2846
                   memory_region_name(mr),
                   memory_region_name(mr->alias),
B
Blue Swirl 已提交
2847
                   mr->alias_offset,
2848
                   mr->alias_offset + MR_SIZE(mr->size),
2849
                   mr->enabled ? "" : " [disabled]");
B
Blue Swirl 已提交
2850
    } else {
2851
        mon_printf(f,
2852
                   TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
2853
                   cur_start, cur_end,
J
Jan Kiszka 已提交
2854
                   mr->priority,
2855
                   memory_region_type((MemoryRegion *)mr),
2856 2857
                   memory_region_name(mr),
                   mr->enabled ? "" : " [disabled]");
B
Blue Swirl 已提交
2858
    }
2859 2860 2861

    QTAILQ_INIT(&submr_print_queue);

B
Blue Swirl 已提交
2862
    QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2863 2864
        new_ml = g_new(MemoryRegionList, 1);
        new_ml->mr = submr;
2865
        QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2866 2867 2868
            if (new_ml->mr->addr < ml->mr->addr ||
                (new_ml->mr->addr == ml->mr->addr &&
                 new_ml->mr->priority > ml->mr->priority)) {
2869
                QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
2870 2871 2872 2873 2874
                new_ml = NULL;
                break;
            }
        }
        if (new_ml) {
2875
            QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
2876 2877 2878
        }
    }

2879
    QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2880
        mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
2881 2882 2883
                       alias_print_queue);
    }

2884
    QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
2885
        g_free(ml);
B
Blue Swirl 已提交
2886 2887 2888
    }
}

2889 2890 2891 2892 2893 2894 2895 2896 2897
struct FlatViewInfo {
    fprintf_function mon_printf;
    void *f;
    int counter;
    bool dispatch_tree;
};

static void mtree_print_flatview(gpointer key, gpointer value,
                                 gpointer user_data)
2898
{
2899 2900 2901 2902 2903
    FlatView *view = key;
    GArray *fv_address_spaces = value;
    struct FlatViewInfo *fvi = user_data;
    fprintf_function p = fvi->mon_printf;
    void *f = fvi->f;
2904 2905 2906
    FlatRange *range = &view->ranges[0];
    MemoryRegion *mr;
    int n = view->nr;
2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923
    int i;
    AddressSpace *as;

    p(f, "FlatView #%d\n", fvi->counter);
    ++fvi->counter;

    for (i = 0; i < fv_address_spaces->len; ++i) {
        as = g_array_index(fv_address_spaces, AddressSpace*, i);
        p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root));
        if (as->root->alias) {
            p(f, ", alias %s", memory_region_name(as->root->alias));
        }
        p(f, "\n");
    }

    p(f, " Root memory region: %s\n",
      view->root ? memory_region_name(view->root) : "(none)");
2924 2925

    if (n <= 0) {
2926
        p(f, MTREE_INDENT "No rendered FlatView\n\n");
2927 2928 2929 2930 2931
        return;
    }

    while (n--) {
        mr = range->mr;
2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949
        if (range->offset_in_region) {
            p(f, MTREE_INDENT TARGET_FMT_plx "-"
              TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
              int128_get64(range->addr.start),
              int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
              mr->priority,
              range->readonly ? "rom" : memory_region_type(mr),
              memory_region_name(mr),
              range->offset_in_region);
        } else {
            p(f, MTREE_INDENT TARGET_FMT_plx "-"
              TARGET_FMT_plx " (prio %d, %s): %s\n",
              int128_get64(range->addr.start),
              int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
              mr->priority,
              range->readonly ? "rom" : memory_region_type(mr),
              memory_region_name(mr));
        }
2950 2951 2952
        range++;
    }

2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968
#if !defined(CONFIG_USER_ONLY)
    if (fvi->dispatch_tree && view->root) {
        mtree_print_dispatch(p, f, view->dispatch, view->root);
    }
#endif

    p(f, "\n");
}

static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
                                      gpointer user_data)
{
    FlatView *view = key;
    GArray *fv_address_spaces = value;

    g_array_unref(fv_address_spaces);
2969
    flatview_unref(view);
2970 2971

    return true;
2972 2973
}

2974 2975
void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
                bool dispatch_tree)
B
Blue Swirl 已提交
2976 2977 2978
{
    MemoryRegionListHead ml_head;
    MemoryRegionList *ml, *ml2;
2979
    AddressSpace *as;
B
Blue Swirl 已提交
2980

2981
    if (flatview) {
2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992
        FlatView *view;
        struct FlatViewInfo fvi = {
            .mon_printf = mon_printf,
            .f = f,
            .counter = 0,
            .dispatch_tree = dispatch_tree
        };
        GArray *fv_address_spaces;
        GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);

        /* Gather all FVs in one table */
2993
        QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2994 2995 2996 2997 2998 2999 3000 3001 3002
            view = address_space_get_flatview(as);

            fv_address_spaces = g_hash_table_lookup(views, view);
            if (!fv_address_spaces) {
                fv_address_spaces = g_array_new(false, false, sizeof(as));
                g_hash_table_insert(views, view, fv_address_spaces);
            }

            g_array_append_val(fv_address_spaces, as);
3003
        }
3004 3005 3006 3007 3008 3009 3010 3011

        /* Print */
        g_hash_table_foreach(views, mtree_print_flatview, &fvi);

        /* Free */
        g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
        g_hash_table_unref(views);

3012 3013 3014
        return;
    }

B
Blue Swirl 已提交
3015 3016
    QTAILQ_INIT(&ml_head);

3017
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
G
Gerd Hoffmann 已提交
3018 3019 3020
        mon_printf(f, "address-space: %s\n", as->name);
        mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
        mon_printf(f, "\n");
3021 3022
    }

B
Blue Swirl 已提交
3023
    /* print aliased regions */
3024
    QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
G
Gerd Hoffmann 已提交
3025 3026 3027
        mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
        mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
        mon_printf(f, "\n");
B
Blue Swirl 已提交
3028 3029
    }

3030
    QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
A
Avi Kivity 已提交
3031
        g_free(ml);
B
Blue Swirl 已提交
3032 3033
    }
}
P
Peter Crosthwaite 已提交
3034

3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109
void memory_region_init_ram(MemoryRegion *mr,
                            struct Object *owner,
                            const char *name,
                            uint64_t size,
                            Error **errp)
{
    DeviceState *owner_dev;
    Error *err = NULL;

    memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
    if (err) {
        error_propagate(errp, err);
        return;
    }
    /* This will assert if owner is neither NULL nor a DeviceState.
     * We only want the owner here for the purposes of defining a
     * unique name for migration. TODO: Ideally we should implement
     * a naming scheme for Objects which are not DeviceStates, in
     * which case we can relax this restriction.
     */
    owner_dev = DEVICE(owner);
    vmstate_register_ram(mr, owner_dev);
}

void memory_region_init_rom(MemoryRegion *mr,
                            struct Object *owner,
                            const char *name,
                            uint64_t size,
                            Error **errp)
{
    DeviceState *owner_dev;
    Error *err = NULL;

    memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
    if (err) {
        error_propagate(errp, err);
        return;
    }
    /* This will assert if owner is neither NULL nor a DeviceState.
     * We only want the owner here for the purposes of defining a
     * unique name for migration. TODO: Ideally we should implement
     * a naming scheme for Objects which are not DeviceStates, in
     * which case we can relax this restriction.
     */
    owner_dev = DEVICE(owner);
    vmstate_register_ram(mr, owner_dev);
}

void memory_region_init_rom_device(MemoryRegion *mr,
                                   struct Object *owner,
                                   const MemoryRegionOps *ops,
                                   void *opaque,
                                   const char *name,
                                   uint64_t size,
                                   Error **errp)
{
    DeviceState *owner_dev;
    Error *err = NULL;

    memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
                                            name, size, &err);
    if (err) {
        error_propagate(errp, err);
        return;
    }
    /* This will assert if owner is neither NULL nor a DeviceState.
     * We only want the owner here for the purposes of defining a
     * unique name for migration. TODO: Ideally we should implement
     * a naming scheme for Objects which are not DeviceStates, in
     * which case we can relax this restriction.
     */
    owner_dev = DEVICE(owner);
    vmstate_register_ram(mr, owner_dev);
}

P
Peter Crosthwaite 已提交
3110 3111 3112 3113 3114 3115 3116 3117
static const TypeInfo memory_region_info = {
    .parent             = TYPE_OBJECT,
    .name               = TYPE_MEMORY_REGION,
    .instance_size      = sizeof(MemoryRegion),
    .instance_init      = memory_region_initfn,
    .instance_finalize  = memory_region_finalize,
};

3118 3119 3120
static const TypeInfo iommu_memory_region_info = {
    .parent             = TYPE_MEMORY_REGION,
    .name               = TYPE_IOMMU_MEMORY_REGION,
3121
    .class_size         = sizeof(IOMMUMemoryRegionClass),
3122 3123
    .instance_size      = sizeof(IOMMUMemoryRegion),
    .instance_init      = iommu_memory_region_initfn,
3124
    .abstract           = true,
3125 3126
};

P
Peter Crosthwaite 已提交
3127 3128 3129
static void memory_register_types(void)
{
    type_register_static(&memory_region_info);
3130
    type_register_static(&iommu_memory_region_info);
P
Peter Crosthwaite 已提交
3131 3132 3133
}

type_init(memory_register_types)