memory.c 98.0 KB
Newer Older
A
Avi Kivity 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * Physical memory management
 *
 * Copyright 2011 Red Hat, Inc. and/or its affiliates
 *
 * Authors:
 *  Avi Kivity <avi@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
A
Avi Kivity 已提交
14 15
 */

P
Peter Maydell 已提交
16
#include "qemu/osdep.h"
17
#include "qapi/error.h"
18
#include "cpu.h"
19 20
#include "exec/memory.h"
#include "exec/address-spaces.h"
21
#include "qapi/visitor.h"
22
#include "qemu/bitops.h"
P
Pavel Fedin 已提交
23
#include "qemu/error-report.h"
24
#include "qemu/qemu-print.h"
25
#include "qom/object.h"
26
#include "trace-root.h"
A
Avi Kivity 已提交
27

28
#include "exec/memory-internal.h"
29
#include "exec/ram_addr.h"
P
Pavel Fedin 已提交
30
#include "sysemu/kvm.h"
31
#include "sysemu/sysemu.h"
32
#include "sysemu/tcg.h"
33
#include "hw/qdev-properties.h"
34
#include "migration/vmstate.h"
35

36 37
//#define DEBUG_UNASSIGNED

38 39
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
40
static bool ioeventfd_update_pending;
41 42
static bool global_dirty_log = false;

43
static QTAILQ_HEAD(, MemoryListener) memory_listeners
44
    = QTAILQ_HEAD_INITIALIZER(memory_listeners);
A
Avi Kivity 已提交
45

46 47 48
static QTAILQ_HEAD(, AddressSpace) address_spaces
    = QTAILQ_HEAD_INITIALIZER(address_spaces);

49 50
static GHashTable *flat_views;

A
Avi Kivity 已提交
51 52
typedef struct AddrRange AddrRange;

A
Avi Kivity 已提交
53
/*
54
 * Note that signed integers are needed for negative offsetting in aliases
A
Avi Kivity 已提交
55 56
 * (large MemoryRegion::alias_offset).
 */
A
Avi Kivity 已提交
57
struct AddrRange {
58 59
    Int128 start;
    Int128 size;
A
Avi Kivity 已提交
60 61
};

62
static AddrRange addrrange_make(Int128 start, Int128 size)
A
Avi Kivity 已提交
63 64 65 66 67 68
{
    return (AddrRange) { start, size };
}

static bool addrrange_equal(AddrRange r1, AddrRange r2)
{
69
    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
A
Avi Kivity 已提交
70 71
}

72
static Int128 addrrange_end(AddrRange r)
A
Avi Kivity 已提交
73
{
74
    return int128_add(r.start, r.size);
A
Avi Kivity 已提交
75 76
}

77
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
A
Avi Kivity 已提交
78
{
79
    int128_addto(&range.start, delta);
A
Avi Kivity 已提交
80 81 82
    return range;
}

83 84 85 86 87 88
static bool addrrange_contains(AddrRange range, Int128 addr)
{
    return int128_ge(addr, range.start)
        && int128_lt(addr, addrrange_end(range));
}

A
Avi Kivity 已提交
89 90
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
{
91 92
    return addrrange_contains(r1, r2.start)
        || addrrange_contains(r2, r1.start);
A
Avi Kivity 已提交
93 94 95 96
}

static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
{
97 98 99
    Int128 start = int128_max(r1.start, r2.start);
    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
    return addrrange_make(start, int128_sub(end, start));
A
Avi Kivity 已提交
100 101
}

102 103
enum ListenerDirection { Forward, Reverse };

104
#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...)    \
105 106 107 108 109 110
    do {                                                                \
        MemoryListener *_listener;                                      \
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
111 112 113
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
114 115 116
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
117
            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
118 119 120
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
121 122 123 124 125 126 127
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

128
#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
129 130 131 132 133
    do {                                                                \
        MemoryListener *_listener;                                      \
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
134
            QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) {     \
135
                if (_listener->_callback) {                             \
136 137 138 139 140
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
141
            QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
142
                if (_listener->_callback) {                             \
143 144 145 146 147 148 149 150 151
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

P
Paolo Bonzini 已提交
152
/* No need to ref/unref .mr, the FlatRange keeps it alive.  */
153
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...)  \
154
    do {                                                                \
155 156
        MemoryRegionSection mrs = section_from_flat_range(fr,           \
                address_space_to_flatview(as));                         \
157
        MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args);         \
158
    } while(0)
159

A
Avi Kivity 已提交
160 161 162 163 164
struct CoalescedMemoryRange {
    AddrRange addr;
    QTAILQ_ENTRY(CoalescedMemoryRange) link;
};

A
Avi Kivity 已提交
165 166 167 168
struct MemoryRegionIoeventfd {
    AddrRange addr;
    bool match_data;
    uint64_t data;
169
    EventNotifier *e;
A
Avi Kivity 已提交
170 171
};

172 173
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
                                           MemoryRegionIoeventfd *b)
A
Avi Kivity 已提交
174
{
175
    if (int128_lt(a->addr.start, b->addr.start)) {
A
Avi Kivity 已提交
176
        return true;
177
    } else if (int128_gt(a->addr.start, b->addr.start)) {
A
Avi Kivity 已提交
178
        return false;
179
    } else if (int128_lt(a->addr.size, b->addr.size)) {
A
Avi Kivity 已提交
180
        return true;
181
    } else if (int128_gt(a->addr.size, b->addr.size)) {
A
Avi Kivity 已提交
182
        return false;
183
    } else if (a->match_data < b->match_data) {
A
Avi Kivity 已提交
184
        return true;
185
    } else  if (a->match_data > b->match_data) {
A
Avi Kivity 已提交
186
        return false;
187 188
    } else if (a->match_data) {
        if (a->data < b->data) {
A
Avi Kivity 已提交
189
            return true;
190
        } else if (a->data > b->data) {
A
Avi Kivity 已提交
191 192 193
            return false;
        }
    }
194
    if (a->e < b->e) {
A
Avi Kivity 已提交
195
        return true;
196
    } else if (a->e > b->e) {
A
Avi Kivity 已提交
197 198 199 200 201
        return false;
    }
    return false;
}

202 203
static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
                                          MemoryRegionIoeventfd *b)
A
Avi Kivity 已提交
204 205 206 207 208
{
    return !memory_region_ioeventfd_before(a, b)
        && !memory_region_ioeventfd_before(b, a);
}

A
Avi Kivity 已提交
209 210 211
/* Range of memory in the global map.  Addresses are absolute. */
struct FlatRange {
    MemoryRegion *mr;
A
Avi Kivity 已提交
212
    hwaddr offset_in_region;
A
Avi Kivity 已提交
213
    AddrRange addr;
A
Avi Kivity 已提交
214
    uint8_t dirty_log_mask;
215
    bool romd_mode;
216
    bool readonly;
217
    bool nonvolatile;
218
    int has_coalesced_range;
A
Avi Kivity 已提交
219 220 221 222 223
};

#define FOR_EACH_FLAT_RANGE(var, view)          \
    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)

224
static inline MemoryRegionSection
225
section_from_flat_range(FlatRange *fr, FlatView *fv)
226 227 228
{
    return (MemoryRegionSection) {
        .mr = fr->mr,
229
        .fv = fv,
230 231 232 233
        .offset_within_region = fr->offset_in_region,
        .size = fr->addr.size,
        .offset_within_address_space = int128_get64(fr->addr.start),
        .readonly = fr->readonly,
234
        .nonvolatile = fr->nonvolatile,
235 236 237
    };
}

A
Avi Kivity 已提交
238 239 240 241
static bool flatrange_equal(FlatRange *a, FlatRange *b)
{
    return a->mr == b->mr
        && addrrange_equal(a->addr, b->addr)
242
        && a->offset_in_region == b->offset_in_region
243
        && a->romd_mode == b->romd_mode
244 245
        && a->readonly == b->readonly
        && a->nonvolatile == b->nonvolatile;
A
Avi Kivity 已提交
246 247
}

248
static FlatView *flatview_new(MemoryRegion *mr_root)
A
Avi Kivity 已提交
249
{
250 251 252
    FlatView *view;

    view = g_new0(FlatView, 1);
253
    view->ref = 1;
254 255
    view->root = mr_root;
    memory_region_ref(mr_root);
256
    trace_flatview_new(view, mr_root);
257 258

    return view;
A
Avi Kivity 已提交
259 260 261 262 263 264 265 266 267
}

/* Insert a range into a given position.  Caller is responsible for maintaining
 * sorting order.
 */
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
{
    if (view->nr == view->nr_allocated) {
        view->nr_allocated = MAX(2 * view->nr, 10);
268
        view->ranges = g_realloc(view->ranges,
A
Avi Kivity 已提交
269 270 271 272 273
                                    view->nr_allocated * sizeof(*view->ranges));
    }
    memmove(view->ranges + pos + 1, view->ranges + pos,
            (view->nr - pos) * sizeof(FlatRange));
    view->ranges[pos] = *range;
P
Paolo Bonzini 已提交
274
    memory_region_ref(range->mr);
A
Avi Kivity 已提交
275 276 277 278 279
    ++view->nr;
}

static void flatview_destroy(FlatView *view)
{
P
Paolo Bonzini 已提交
280 281
    int i;

282
    trace_flatview_destroy(view, view->root);
283 284 285
    if (view->dispatch) {
        address_space_dispatch_free(view->dispatch);
    }
P
Paolo Bonzini 已提交
286 287 288
    for (i = 0; i < view->nr; i++) {
        memory_region_unref(view->ranges[i].mr);
    }
289
    g_free(view->ranges);
290
    memory_region_unref(view->root);
291
    g_free(view);
A
Avi Kivity 已提交
292 293
}

294
static bool flatview_ref(FlatView *view)
295
{
296
    return atomic_fetch_inc_nonzero(&view->ref) > 0;
297 298
}

299
void flatview_unref(FlatView *view)
300 301
{
    if (atomic_fetch_dec(&view->ref) == 1) {
302
        trace_flatview_destroy_rcu(view, view->root);
303
        assert(view->root);
304
        call_rcu(view, flatview_destroy, rcu);
305 306 307
    }
}

308 309
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
310
    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
311
        && r1->mr == r2->mr
312 313 314
        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
                                r1->addr.size),
                     int128_make64(r2->offset_in_region))
315
        && r1->dirty_log_mask == r2->dirty_log_mask
316
        && r1->romd_mode == r2->romd_mode
317 318
        && r1->readonly == r2->readonly
        && r1->nonvolatile == r2->nonvolatile;
319 320
}

P
Peter Crosthwaite 已提交
321
/* Attempt to simplify a view by merging adjacent ranges */
322 323 324 325 326 327 328 329 330
static void flatview_simplify(FlatView *view)
{
    unsigned i, j;

    i = 0;
    while (i < view->nr) {
        j = i + 1;
        while (j < view->nr
               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
331
            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
332 333 334 335 336 337 338 339 340
            ++j;
        }
        ++i;
        memmove(&view->ranges[i], &view->ranges[j],
                (view->nr - j) * sizeof(view->ranges[j]));
        view->nr -= j - i;
    }
}

341 342 343 344 345 346 347 348 349
static bool memory_region_big_endian(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

P
Paolo Bonzini 已提交
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
static bool memory_region_wrong_endianness(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
{
    if (memory_region_wrong_endianness(mr)) {
        switch (size) {
        case 1:
            break;
        case 2:
            *data = bswap16(*data);
            break;
        case 4:
            *data = bswap32(*data);
            break;
        case 8:
            *data = bswap64(*data);
            break;
        default:
            abort();
        }
    }
}

380
static inline void memory_region_shift_read_access(uint64_t *value,
381
                                                   signed shift,
382 383 384
                                                   uint64_t mask,
                                                   uint64_t tmp)
{
385 386 387 388 389
    if (shift >= 0) {
        *value |= (tmp & mask) << shift;
    } else {
        *value |= (tmp & mask) >> -shift;
    }
390 391 392
}

static inline uint64_t memory_region_shift_write_access(uint64_t *value,
393
                                                        signed shift,
394 395
                                                        uint64_t mask)
{
396 397 398 399 400 401 402 403 404
    uint64_t tmp;

    if (shift >= 0) {
        tmp = (*value >> shift) & mask;
    } else {
        tmp = (*value << -shift) & mask;
    }

    return tmp;
405 406
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420
static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
{
    MemoryRegion *root;
    hwaddr abs_addr = offset;

    abs_addr += mr->addr;
    for (root = mr; root->container; ) {
        root = root->container;
        abs_addr += root->addr;
    }

    return abs_addr;
}

421 422 423 424 425 426 427 428
static int get_cpu_index(void)
{
    if (current_cpu) {
        return current_cpu->cpu_index;
    }
    return -1;
}

429
static MemTxResult  memory_region_read_accessor(MemoryRegion *mr,
430 431 432
                                                hwaddr addr,
                                                uint64_t *value,
                                                unsigned size,
433
                                                signed shift,
434 435
                                                uint64_t mask,
                                                MemTxAttrs attrs)
436 437 438
{
    uint64_t tmp;

439
    tmp = mr->ops->read(mr->opaque, addr, size);
440
    if (mr->subpage) {
441
        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
442 443 444 445 446
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
447 448
    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
449
        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
450
    }
451
    memory_region_shift_read_access(value, shift, mask, tmp);
452
    return MEMTX_OK;
453 454
}

455 456 457 458
static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
                                                          hwaddr addr,
                                                          uint64_t *value,
                                                          unsigned size,
459
                                                          signed shift,
460 461
                                                          uint64_t mask,
                                                          MemTxAttrs attrs)
462
{
463 464
    uint64_t tmp = 0;
    MemTxResult r;
465

466
    r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
467
    if (mr->subpage) {
468
        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
469 470 471 472 473
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
474 475
    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
476
        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
477
    }
478
    memory_region_shift_read_access(value, shift, mask, tmp);
479
    return r;
480 481
}

482 483 484 485
static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
                                                hwaddr addr,
                                                uint64_t *value,
                                                unsigned size,
486
                                                signed shift,
487 488
                                                uint64_t mask,
                                                MemTxAttrs attrs)
489
{
490
    uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
491

492
    if (mr->subpage) {
493
        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
494 495 496 497 498
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
499 500
    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
501
        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
502
    }
503
    mr->ops->write(mr->opaque, addr, tmp, size);
504
    return MEMTX_OK;
505 506
}

507 508 509 510
static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
                                                           hwaddr addr,
                                                           uint64_t *value,
                                                           unsigned size,
511
                                                           signed shift,
512 513 514
                                                           uint64_t mask,
                                                           MemTxAttrs attrs)
{
515
    uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
516

517
    if (mr->subpage) {
518
        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
519 520 521 522 523
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
524 525
    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
526
        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
527
    }
528 529 530 531
    return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
}

static MemTxResult access_with_adjusted_size(hwaddr addr,
532 533 534 535
                                      uint64_t *value,
                                      unsigned size,
                                      unsigned access_size_min,
                                      unsigned access_size_max,
536 537 538 539 540
                                      MemTxResult (*access_fn)
                                                  (MemoryRegion *mr,
                                                   hwaddr addr,
                                                   uint64_t *value,
                                                   unsigned size,
541
                                                   signed shift,
542 543
                                                   uint64_t mask,
                                                   MemTxAttrs attrs),
544 545
                                      MemoryRegion *mr,
                                      MemTxAttrs attrs)
546 547 548 549
{
    uint64_t access_mask;
    unsigned access_size;
    unsigned i;
550
    MemTxResult r = MEMTX_OK;
551 552 553 554 555 556 557

    if (!access_size_min) {
        access_size_min = 1;
    }
    if (!access_size_max) {
        access_size_max = 4;
    }
558 559

    /* FIXME: support unaligned access? */
560
    access_size = MAX(MIN(size, access_size_max), access_size_min);
561
    access_mask = MAKE_64BIT_MASK(0, access_size * 8);
562 563
    if (memory_region_big_endian(mr)) {
        for (i = 0; i < size; i += access_size) {
564
            r |= access_fn(mr, addr + i, value, access_size,
565
                        (size - access_size - i) * 8, access_mask, attrs);
566 567 568
        }
    } else {
        for (i = 0; i < size; i += access_size) {
569
            r |= access_fn(mr, addr + i, value, access_size, i * 8,
570
                        access_mask, attrs);
571
        }
572
    }
573
    return r;
574 575
}

576 577
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
{
578 579
    AddressSpace *as;

580 581
    while (mr->container) {
        mr = mr->container;
582
    }
583 584 585 586
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        if (mr == as->root) {
            return as;
        }
587
    }
588
    return NULL;
589 590
}

A
Avi Kivity 已提交
591 592 593 594 595
/* Render a memory region into the global view.  Ranges in @view obscure
 * ranges in @mr.
 */
static void render_memory_region(FlatView *view,
                                 MemoryRegion *mr,
596
                                 Int128 base,
597
                                 AddrRange clip,
598 599
                                 bool readonly,
                                 bool nonvolatile)
A
Avi Kivity 已提交
600 601 602
{
    MemoryRegion *subregion;
    unsigned i;
A
Avi Kivity 已提交
603
    hwaddr offset_in_region;
604 605
    Int128 remain;
    Int128 now;
A
Avi Kivity 已提交
606 607 608
    FlatRange fr;
    AddrRange tmp;

609 610 611 612
    if (!mr->enabled) {
        return;
    }

613
    int128_addto(&base, int128_make64(mr->addr));
614
    readonly |= mr->readonly;
615
    nonvolatile |= mr->nonvolatile;
A
Avi Kivity 已提交
616 617 618 619 620 621 622 623 624 625

    tmp = addrrange_make(base, mr->size);

    if (!addrrange_intersects(tmp, clip)) {
        return;
    }

    clip = addrrange_intersection(tmp, clip);

    if (mr->alias) {
626 627
        int128_subfrom(&base, int128_make64(mr->alias->addr));
        int128_subfrom(&base, int128_make64(mr->alias_offset));
628 629
        render_memory_region(view, mr->alias, base, clip,
                             readonly, nonvolatile);
A
Avi Kivity 已提交
630 631 632 633 634
        return;
    }

    /* Render subregions in priority order. */
    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
635 636
        render_memory_region(view, subregion, base, clip,
                             readonly, nonvolatile);
A
Avi Kivity 已提交
637 638
    }

639
    if (!mr->terminates) {
A
Avi Kivity 已提交
640 641 642
        return;
    }

643
    offset_in_region = int128_get64(int128_sub(clip.start, base));
A
Avi Kivity 已提交
644 645 646
    base = clip.start;
    remain = clip.size;

647
    fr.mr = mr;
648
    fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
649
    fr.romd_mode = mr->romd_mode;
650
    fr.readonly = readonly;
651
    fr.nonvolatile = nonvolatile;
652
    fr.has_coalesced_range = 0;
653

A
Avi Kivity 已提交
654
    /* Render the region itself into any gaps left by the current view. */
655 656
    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
A
Avi Kivity 已提交
657 658
            continue;
        }
659 660 661
        if (int128_lt(base, view->ranges[i].addr.start)) {
            now = int128_min(remain,
                             int128_sub(view->ranges[i].addr.start, base));
A
Avi Kivity 已提交
662 663 664 665
            fr.offset_in_region = offset_in_region;
            fr.addr = addrrange_make(base, now);
            flatview_insert(view, i, &fr);
            ++i;
666 667 668
            int128_addto(&base, now);
            offset_in_region += int128_get64(now);
            int128_subfrom(&remain, now);
A
Avi Kivity 已提交
669
        }
670 671 672 673 674 675
        now = int128_sub(int128_min(int128_add(base, remain),
                                    addrrange_end(view->ranges[i].addr)),
                         base);
        int128_addto(&base, now);
        offset_in_region += int128_get64(now);
        int128_subfrom(&remain, now);
A
Avi Kivity 已提交
676
    }
677
    if (int128_nz(remain)) {
A
Avi Kivity 已提交
678 679 680 681 682 683
        fr.offset_in_region = offset_in_region;
        fr.addr = addrrange_make(base, remain);
        flatview_insert(view, i, &fr);
    }
}

684 685
static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
{
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
    while (mr->enabled) {
        if (mr->alias) {
            if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
                /* The alias is included in its entirety.  Use it as
                 * the "real" root, so that we can share more FlatViews.
                 */
                mr = mr->alias;
                continue;
            }
        } else if (!mr->terminates) {
            unsigned int found = 0;
            MemoryRegion *child, *next = NULL;
            QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
                if (child->enabled) {
                    if (++found > 1) {
                        next = NULL;
                        break;
                    }
                    if (!child->addr && int128_ge(mr->size, child->size)) {
                        /* A child is included in its entirety.  If it's the only
                         * enabled one, use it in the hope of finding an alias down the
                         * way. This will also let us share FlatViews.
                         */
                        next = child;
                    }
                }
            }
713 714 715
            if (found == 0) {
                return NULL;
            }
716 717 718 719 720 721
            if (next) {
                mr = next;
                continue;
            }
        }

722
        return mr;
723 724
    }

725
    return NULL;
726 727
}

A
Avi Kivity 已提交
728
/* Render a memory topology into a list of disjoint absolute ranges. */
729
static FlatView *generate_memory_topology(MemoryRegion *mr)
A
Avi Kivity 已提交
730
{
731
    int i;
732
    FlatView *view;
A
Avi Kivity 已提交
733

734
    view = flatview_new(mr);
A
Avi Kivity 已提交
735

A
Avi Kivity 已提交
736
    if (mr) {
737
        render_memory_region(view, mr, int128_zero(),
738 739
                             addrrange_make(int128_zero(), int128_2_64()),
                             false, false);
A
Avi Kivity 已提交
740
    }
741
    flatview_simplify(view);
A
Avi Kivity 已提交
742

743 744 745 746 747 748 749
    view->dispatch = address_space_dispatch_new(view);
    for (i = 0; i < view->nr; i++) {
        MemoryRegionSection mrs =
            section_from_flat_range(&view->ranges[i], view);
        flatview_add_to_dispatch(view, &mrs);
    }
    address_space_dispatch_compact(view->dispatch);
750
    g_hash_table_replace(flat_views, mr, view);
751

A
Avi Kivity 已提交
752 753 754
    return view;
}

A
Avi Kivity 已提交
755 756 757 758 759 760 761
static void address_space_add_del_ioeventfds(AddressSpace *as,
                                             MemoryRegionIoeventfd *fds_new,
                                             unsigned fds_new_nb,
                                             MemoryRegionIoeventfd *fds_old,
                                             unsigned fds_old_nb)
{
    unsigned iold, inew;
762 763
    MemoryRegionIoeventfd *fd;
    MemoryRegionSection section;
A
Avi Kivity 已提交
764 765 766 767 768 769 770 771 772

    /* Generate a symmetric difference of the old and new fd sets, adding
     * and deleting as necessary.
     */

    iold = inew = 0;
    while (iold < fds_old_nb || inew < fds_new_nb) {
        if (iold < fds_old_nb
            && (inew == fds_new_nb
773 774
                || memory_region_ioeventfd_before(&fds_old[iold],
                                                  &fds_new[inew]))) {
775 776
            fd = &fds_old[iold];
            section = (MemoryRegionSection) {
777
                .fv = address_space_to_flatview(as),
778
                .offset_within_address_space = int128_get64(fd->addr.start),
779
                .size = fd->addr.size,
780
            };
781
            MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
782
                                 fd->match_data, fd->data, fd->e);
A
Avi Kivity 已提交
783 784 785
            ++iold;
        } else if (inew < fds_new_nb
                   && (iold == fds_old_nb
786 787
                       || memory_region_ioeventfd_before(&fds_new[inew],
                                                         &fds_old[iold]))) {
788 789
            fd = &fds_new[inew];
            section = (MemoryRegionSection) {
790
                .fv = address_space_to_flatview(as),
791
                .offset_within_address_space = int128_get64(fd->addr.start),
792
                .size = fd->addr.size,
793
            };
794
            MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
795
                                 fd->match_data, fd->data, fd->e);
A
Avi Kivity 已提交
796 797 798 799 800 801 802 803
            ++inew;
        } else {
            ++iold;
            ++inew;
        }
    }
}

804
FlatView *address_space_get_flatview(AddressSpace *as)
805 806 807
{
    FlatView *view;

808
    rcu_read_lock();
809
    do {
810
        view = address_space_to_flatview(as);
811 812 813 814
        /* If somebody has replaced as->current_map concurrently,
         * flatview_ref returns false.
         */
    } while (!flatview_ref(view));
815
    rcu_read_unlock();
816 817 818
    return view;
}

A
Avi Kivity 已提交
819 820
static void address_space_update_ioeventfds(AddressSpace *as)
{
821
    FlatView *view;
A
Avi Kivity 已提交
822 823 824 825 826 827
    FlatRange *fr;
    unsigned ioeventfd_nb = 0;
    MemoryRegionIoeventfd *ioeventfds = NULL;
    AddrRange tmp;
    unsigned i;

828
    view = address_space_get_flatview(as);
829
    FOR_EACH_FLAT_RANGE(fr, view) {
A
Avi Kivity 已提交
830 831
        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
832 833
                                  int128_sub(fr->addr.start,
                                             int128_make64(fr->offset_in_region)));
A
Avi Kivity 已提交
834 835
            if (addrrange_intersects(fr->addr, tmp)) {
                ++ioeventfd_nb;
836
                ioeventfds = g_realloc(ioeventfds,
A
Avi Kivity 已提交
837 838 839 840 841 842 843 844 845 846
                                          ioeventfd_nb * sizeof(*ioeventfds));
                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
                ioeventfds[ioeventfd_nb-1].addr = tmp;
            }
        }
    }

    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
                                     as->ioeventfds, as->ioeventfd_nb);

847
    g_free(as->ioeventfds);
A
Avi Kivity 已提交
848 849
    as->ioeventfds = ioeventfds;
    as->ioeventfd_nb = ioeventfd_nb;
850
    flatview_unref(view);
A
Avi Kivity 已提交
851 852
}

853 854
static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
{
855 856 857 858
    if (!fr->has_coalesced_range) {
        return;
    }

859 860 861 862
    if (--fr->has_coalesced_range > 0) {
        return;
    }

863 864 865 866 867 868 869 870 871 872 873
    MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
                                  int128_get64(fr->addr.start),
                                  int128_get64(fr->addr.size));
}

static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
{
    MemoryRegion *mr = fr->mr;
    CoalescedMemoryRange *cmr;
    AddrRange tmp;

874 875 876 877
    if (QTAILQ_EMPTY(&mr->coalesced)) {
        return;
    }

878 879 880 881
    if (fr->has_coalesced_range++) {
        return;
    }

882 883 884 885 886 887 888 889 890 891 892 893 894 895
    QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
        tmp = addrrange_shift(cmr->addr,
                              int128_sub(fr->addr.start,
                                         int128_make64(fr->offset_in_region)));
        if (!addrrange_intersects(tmp, fr->addr)) {
            continue;
        }
        tmp = addrrange_intersection(tmp, fr->addr);
        MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
                                      int128_get64(tmp.start),
                                      int128_get64(tmp.size));
    }
}

896
static void address_space_update_topology_pass(AddressSpace *as,
897 898
                                               const FlatView *old_view,
                                               const FlatView *new_view,
899
                                               bool adding)
A
Avi Kivity 已提交
900 901 902 903 904 905 906 907
{
    unsigned iold, inew;
    FlatRange *frold, *frnew;

    /* Generate a symmetric difference of the old and new memory maps.
     * Kill ranges in the old map, and instantiate ranges in the new map.
     */
    iold = inew = 0;
908 909 910
    while (iold < old_view->nr || inew < new_view->nr) {
        if (iold < old_view->nr) {
            frold = &old_view->ranges[iold];
A
Avi Kivity 已提交
911 912 913
        } else {
            frold = NULL;
        }
914 915
        if (inew < new_view->nr) {
            frnew = &new_view->ranges[inew];
A
Avi Kivity 已提交
916 917 918 919 920 921
        } else {
            frnew = NULL;
        }

        if (frold
            && (!frnew
922 923
                || int128_lt(frold->addr.start, frnew->addr.start)
                || (int128_eq(frold->addr.start, frnew->addr.start)
A
Avi Kivity 已提交
924
                    && !flatrange_equal(frold, frnew)))) {
925
            /* In old but not in new, or in both but attributes changed. */
A
Avi Kivity 已提交
926

927
            if (!adding) {
928
                flat_range_coalesced_io_del(frold, as);
929
                MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
930 931
            }

A
Avi Kivity 已提交
932 933
            ++iold;
        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
934
            /* In both and unchanged (except logging may have changed) */
A
Avi Kivity 已提交
935

936
            if (adding) {
937
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
938 939 940 941 942 943 944 945 946
                if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
                                                  frold->dirty_log_mask,
                                                  frnew->dirty_log_mask);
                }
                if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
                                                  frold->dirty_log_mask,
                                                  frnew->dirty_log_mask);
947
                }
A
Avi Kivity 已提交
948 949
            }

A
Avi Kivity 已提交
950 951 952 953 954
            ++iold;
            ++inew;
        } else {
            /* In new */

955
            if (adding) {
956
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
957
                flat_range_coalesced_io_add(frnew, as);
958 959
            }

A
Avi Kivity 已提交
960 961 962
            ++inew;
        }
    }
963 964
}

965 966
static void flatviews_init(void)
{
967 968
    static FlatView *empty_view;

969 970 971 972 973 974
    if (flat_views) {
        return;
    }

    flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
                                       (GDestroyNotify) flatview_unref);
975 976 977 978 979 980 981 982
    if (!empty_view) {
        empty_view = generate_memory_topology(NULL);
        /* We keep it alive forever in the global variable.  */
        flatview_ref(empty_view);
    } else {
        g_hash_table_replace(flat_views, NULL, empty_view);
        flatview_ref(empty_view);
    }
983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
}

static void flatviews_reset(void)
{
    AddressSpace *as;

    if (flat_views) {
        g_hash_table_unref(flat_views);
        flat_views = NULL;
    }
    flatviews_init();

    /* Render unique FVs */
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        MemoryRegion *physmr = memory_region_get_flatview_root(as->root);

        if (g_hash_table_lookup(flat_views, physmr)) {
            continue;
        }

        generate_memory_topology(physmr);
    }
}

static void address_space_set_flatview(AddressSpace *as)
1008
{
1009
    FlatView *old_view = address_space_to_flatview(as);
1010 1011 1012 1013 1014
    MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
    FlatView *new_view = g_hash_table_lookup(flat_views, physmr);

    assert(new_view);

1015 1016 1017 1018 1019 1020 1021 1022
    if (old_view == new_view) {
        return;
    }

    if (old_view) {
        flatview_ref(old_view);
    }

1023
    flatview_ref(new_view);
1024 1025

    if (!QTAILQ_EMPTY(&as->listeners)) {
1026 1027 1028 1029 1030 1031 1032
        FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;

        if (!old_view2) {
            old_view2 = &tmpview;
        }
        address_space_update_topology_pass(as, old_view2, new_view, false);
        address_space_update_topology_pass(as, old_view2, new_view, true);
1033
    }
1034

1035 1036
    /* Writes are protected by the BQL.  */
    atomic_rcu_set(&as->current_map, new_view);
1037 1038 1039
    if (old_view) {
        flatview_unref(old_view);
    }
1040 1041 1042 1043 1044 1045 1046

    /* Note that all the old MemoryRegions are still alive up to this
     * point.  This relieves most MemoryListeners from the need to
     * ref/unref the MemoryRegions they get---unless they use them
     * outside the iothread mutex, in which case precise reference
     * counting is necessary.
     */
1047 1048 1049
    if (old_view) {
        flatview_unref(old_view);
    }
A
Avi Kivity 已提交
1050 1051
}

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
static void address_space_update_topology(AddressSpace *as)
{
    MemoryRegion *physmr = memory_region_get_flatview_root(as->root);

    flatviews_init();
    if (!g_hash_table_lookup(flat_views, physmr)) {
        generate_memory_topology(physmr);
    }
    address_space_set_flatview(as);
}

A
Avi Kivity 已提交
1063 1064
void memory_region_transaction_begin(void)
{
1065
    qemu_flush_coalesced_mmio_buffer();
A
Avi Kivity 已提交
1066 1067 1068 1069 1070
    ++memory_region_transaction_depth;
}

void memory_region_transaction_commit(void)
{
1071 1072
    AddressSpace *as;

A
Avi Kivity 已提交
1073
    assert(memory_region_transaction_depth);
1074 1075
    assert(qemu_mutex_iothread_locked());

A
Avi Kivity 已提交
1076
    --memory_region_transaction_depth;
1077 1078
    if (!memory_region_transaction_depth) {
        if (memory_region_update_pending) {
1079 1080
            flatviews_reset();

1081
            MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1082

1083
            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1084
                address_space_set_flatview(as);
1085
                address_space_update_ioeventfds(as);
1086
            }
1087
            memory_region_update_pending = false;
1088
            ioeventfd_update_pending = false;
1089 1090 1091 1092 1093
            MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
        } else if (ioeventfd_update_pending) {
            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
                address_space_update_ioeventfds(as);
            }
1094
            ioeventfd_update_pending = false;
1095 1096
        }
   }
A
Avi Kivity 已提交
1097 1098
}

1099 1100 1101 1102 1103 1104
static void memory_region_destructor_none(MemoryRegion *mr)
{
}

static void memory_region_destructor_ram(MemoryRegion *mr)
{
1105
    qemu_ram_free(mr->ram_block);
1106 1107
}

P
Peter Crosthwaite 已提交
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141
static bool memory_region_need_escape(char c)
{
    return c == '/' || c == '[' || c == '\\' || c == ']';
}

static char *memory_region_escape_name(const char *name)
{
    const char *p;
    char *escaped, *q;
    uint8_t c;
    size_t bytes = 0;

    for (p = name; *p; p++) {
        bytes += memory_region_need_escape(*p) ? 4 : 1;
    }
    if (bytes == p - name) {
       return g_memdup(name, bytes + 1);
    }

    escaped = g_malloc(bytes + 1);
    for (p = name, q = escaped; *p; p++) {
        c = *p;
        if (unlikely(memory_region_need_escape(c))) {
            *q++ = '\\';
            *q++ = 'x';
            *q++ = "0123456789abcdef"[c >> 4];
            c = "0123456789abcdef"[c & 15];
        }
        *q++ = c;
    }
    *q = 0;
    return escaped;
}

1142 1143 1144 1145
static void memory_region_do_init(MemoryRegion *mr,
                                  Object *owner,
                                  const char *name,
                                  uint64_t size)
A
Avi Kivity 已提交
1146
{
1147 1148 1149 1150
    mr->size = int128_make64(size);
    if (size == UINT64_MAX) {
        mr->size = int128_2_64();
    }
1151
    mr->name = g_strdup(name);
1152
    mr->owner = owner;
1153
    mr->ram_block = NULL;
P
Peter Crosthwaite 已提交
1154 1155

    if (name) {
1156 1157
        char *escaped_name = memory_region_escape_name(name);
        char *name_array = g_strdup_printf("%s[*]", escaped_name);
1158 1159 1160 1161 1162

        if (!owner) {
            owner = container_get(qdev_get_machine(), "/unattached");
        }

1163
        object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
P
Peter Crosthwaite 已提交
1164
        object_unref(OBJECT(mr));
1165 1166
        g_free(name_array);
        g_free(escaped_name);
P
Peter Crosthwaite 已提交
1167 1168 1169
    }
}

1170 1171 1172 1173 1174 1175 1176 1177 1178
void memory_region_init(MemoryRegion *mr,
                        Object *owner,
                        const char *name,
                        uint64_t size)
{
    object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
    memory_region_do_init(mr, owner, name, size);
}

1179 1180
static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
                                   void *opaque, Error **errp)
1181 1182 1183 1184
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    uint64_t value = mr->addr;

1185
    visit_type_uint64(v, name, &value, errp);
1186 1187
}

1188 1189 1190
static void memory_region_get_container(Object *obj, Visitor *v,
                                        const char *name, void *opaque,
                                        Error **errp)
1191 1192 1193 1194 1195 1196 1197
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    gchar *path = (gchar *)"";

    if (mr->container) {
        path = object_get_canonical_path(OBJECT(mr->container));
    }
1198
    visit_type_str(v, name, &path, errp);
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
    if (mr->container) {
        g_free(path);
    }
}

static Object *memory_region_resolve_container(Object *obj, void *opaque,
                                               const char *part)
{
    MemoryRegion *mr = MEMORY_REGION(obj);

    return OBJECT(mr->container);
}

1212 1213 1214
static void memory_region_get_priority(Object *obj, Visitor *v,
                                       const char *name, void *opaque,
                                       Error **errp)
1215 1216 1217 1218
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    int32_t value = mr->priority;

1219
    visit_type_int32(v, name, &value, errp);
1220 1221
}

1222 1223
static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
                                   void *opaque, Error **errp)
1224 1225 1226 1227
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    uint64_t value = memory_region_size(mr);

1228
    visit_type_uint64(v, name, &value, errp);
1229 1230
}

P
Peter Crosthwaite 已提交
1231 1232 1233
static void memory_region_initfn(Object *obj)
{
    MemoryRegion *mr = MEMORY_REGION(obj);
1234
    ObjectProperty *op;
P
Peter Crosthwaite 已提交
1235 1236

    mr->ops = &unassigned_mem_ops;
1237
    mr->enabled = true;
1238
    mr->romd_mode = true;
1239
    mr->global_locking = true;
1240
    mr->destructor = memory_region_destructor_none;
A
Avi Kivity 已提交
1241 1242
    QTAILQ_INIT(&mr->subregions);
    QTAILQ_INIT(&mr->coalesced);
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254

    op = object_property_add(OBJECT(mr), "container",
                             "link<" TYPE_MEMORY_REGION ">",
                             memory_region_get_container,
                             NULL, /* memory_region_set_container */
                             NULL, NULL, &error_abort);
    op->resolve = memory_region_resolve_container;

    object_property_add(OBJECT(mr), "addr", "uint64",
                        memory_region_get_addr,
                        NULL, /* memory_region_set_addr */
                        NULL, NULL, &error_abort);
1255 1256 1257 1258
    object_property_add(OBJECT(mr), "priority", "uint32",
                        memory_region_get_priority,
                        NULL, /* memory_region_set_priority */
                        NULL, NULL, &error_abort);
1259 1260 1261 1262
    object_property_add(OBJECT(mr), "size", "uint64",
                        memory_region_get_size,
                        NULL, /* memory_region_set_size, */
                        NULL, NULL, &error_abort);
A
Avi Kivity 已提交
1263 1264
}

1265 1266 1267 1268 1269 1270 1271
static void iommu_memory_region_initfn(Object *obj)
{
    MemoryRegion *mr = MEMORY_REGION(obj);

    mr->is_iommu = true;
}

1272 1273 1274 1275 1276 1277
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
                                    unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
1278
    if (current_cpu != NULL) {
1279 1280
        bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
        cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
1281
    }
1282
    return 0;
1283 1284 1285 1286 1287 1288 1289 1290
}

static void unassigned_mem_write(void *opaque, hwaddr addr,
                                 uint64_t val, unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
#endif
1291 1292
    if (current_cpu != NULL) {
        cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1293
    }
1294 1295
}

1296
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1297 1298
                                   unsigned size, bool is_write,
                                   MemTxAttrs attrs)
1299 1300 1301 1302 1303 1304 1305 1306 1307
{
    return false;
}

const MemoryRegionOps unassigned_mem_ops = {
    .valid.accepts = unassigned_mem_accepts,
    .endianness = DEVICE_NATIVE_ENDIAN,
};

1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
static uint64_t memory_region_ram_device_read(void *opaque,
                                              hwaddr addr, unsigned size)
{
    MemoryRegion *mr = opaque;
    uint64_t data = (uint64_t)~0;

    switch (size) {
    case 1:
        data = *(uint8_t *)(mr->ram_block->host + addr);
        break;
    case 2:
        data = *(uint16_t *)(mr->ram_block->host + addr);
        break;
    case 4:
        data = *(uint32_t *)(mr->ram_block->host + addr);
        break;
    case 8:
        data = *(uint64_t *)(mr->ram_block->host + addr);
        break;
    }

    trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);

    return data;
}

static void memory_region_ram_device_write(void *opaque, hwaddr addr,
                                           uint64_t data, unsigned size)
{
    MemoryRegion *mr = opaque;

    trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);

    switch (size) {
    case 1:
        *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
        break;
    case 2:
        *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
        break;
    case 4:
        *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
        break;
    case 8:
        *(uint64_t *)(mr->ram_block->host + addr) = data;
        break;
    }
}

static const MemoryRegionOps ram_device_mem_ops = {
    .read = memory_region_ram_device_read,
    .write = memory_region_ram_device_write,
1360
    .endianness = DEVICE_HOST_ENDIAN,
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
    .valid = {
        .min_access_size = 1,
        .max_access_size = 8,
        .unaligned = true,
    },
    .impl = {
        .min_access_size = 1,
        .max_access_size = 8,
        .unaligned = true,
    },
};

1373 1374 1375
bool memory_region_access_valid(MemoryRegion *mr,
                                hwaddr addr,
                                unsigned size,
1376 1377
                                bool is_write,
                                MemTxAttrs attrs)
A
Avi Kivity 已提交
1378
{
1379 1380
    int access_size_min, access_size_max;
    int access_size, i;
1381

A
Avi Kivity 已提交
1382 1383 1384 1385
    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
        return false;
    }

1386
    if (!mr->ops->valid.accepts) {
A
Avi Kivity 已提交
1387 1388 1389
        return true;
    }

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
    access_size_min = mr->ops->valid.min_access_size;
    if (!mr->ops->valid.min_access_size) {
        access_size_min = 1;
    }

    access_size_max = mr->ops->valid.max_access_size;
    if (!mr->ops->valid.max_access_size) {
        access_size_max = 4;
    }

    access_size = MAX(MIN(size, access_size_max), access_size_min);
    for (i = 0; i < size; i += access_size) {
        if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1403
                                    is_write, attrs)) {
1404 1405
            return false;
        }
A
Avi Kivity 已提交
1406
    }
1407

A
Avi Kivity 已提交
1408 1409 1410
    return true;
}

1411 1412 1413 1414 1415
static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
                                                hwaddr addr,
                                                uint64_t *pval,
                                                unsigned size,
                                                MemTxAttrs attrs)
A
Avi Kivity 已提交
1416
{
1417
    *pval = 0;
A
Avi Kivity 已提交
1418

1419
    if (mr->ops->read) {
1420 1421 1422 1423 1424
        return access_with_adjusted_size(addr, pval, size,
                                         mr->ops->impl.min_access_size,
                                         mr->ops->impl.max_access_size,
                                         memory_region_read_accessor,
                                         mr, attrs);
P
Peter Maydell 已提交
1425
    } else {
1426 1427 1428 1429 1430
        return access_with_adjusted_size(addr, pval, size,
                                         mr->ops->impl.min_access_size,
                                         mr->ops->impl.max_access_size,
                                         memory_region_read_with_attrs_accessor,
                                         mr, attrs);
1431
    }
A
Avi Kivity 已提交
1432 1433
}

1434 1435 1436 1437 1438
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
                                        hwaddr addr,
                                        uint64_t *pval,
                                        unsigned size,
                                        MemTxAttrs attrs)
1439
{
1440 1441
    MemTxResult r;

1442
    if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1443
        *pval = unassigned_mem_read(mr, addr, size);
1444
        return MEMTX_DECODE_ERROR;
1445
    }
1446

1447
    r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1448
    adjust_endianness(mr, pval, size);
1449
    return r;
1450
}
A
Avi Kivity 已提交
1451

P
Pavel Fedin 已提交
1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
/* Return true if an eventfd was signalled */
static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
                                                    hwaddr addr,
                                                    uint64_t data,
                                                    unsigned size,
                                                    MemTxAttrs attrs)
{
    MemoryRegionIoeventfd ioeventfd = {
        .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
        .data = data,
    };
    unsigned i;

    for (i = 0; i < mr->ioeventfd_nb; i++) {
        ioeventfd.match_data = mr->ioeventfds[i].match_data;
        ioeventfd.e = mr->ioeventfds[i].e;

1469
        if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
P
Pavel Fedin 已提交
1470 1471 1472 1473 1474 1475 1476 1477
            event_notifier_set(ioeventfd.e);
            return true;
        }
    }

    return false;
}

1478 1479 1480 1481 1482
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
                                         hwaddr addr,
                                         uint64_t data,
                                         unsigned size,
                                         MemTxAttrs attrs)
1483
{
1484
    if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1485
        unassigned_mem_write(mr, addr, data, size);
1486
        return MEMTX_DECODE_ERROR;
A
Avi Kivity 已提交
1487 1488
    }

1489 1490
    adjust_endianness(mr, &data, size);

P
Pavel Fedin 已提交
1491 1492 1493 1494 1495
    if ((!kvm_eventfds_enabled()) &&
        memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
        return MEMTX_OK;
    }

1496
    if (mr->ops->write) {
1497 1498 1499 1500 1501
        return access_with_adjusted_size(addr, &data, size,
                                         mr->ops->impl.min_access_size,
                                         mr->ops->impl.max_access_size,
                                         memory_region_write_accessor, mr,
                                         attrs);
P
Peter Maydell 已提交
1502
    } else {
1503 1504 1505 1506 1507 1508
        return
            access_with_adjusted_size(addr, &data, size,
                                      mr->ops->impl.min_access_size,
                                      mr->ops->impl.max_access_size,
                                      memory_region_write_with_attrs_accessor,
                                      mr, attrs);
1509
    }
A
Avi Kivity 已提交
1510 1511 1512
}

void memory_region_init_io(MemoryRegion *mr,
1513
                           Object *owner,
A
Avi Kivity 已提交
1514 1515 1516 1517 1518
                           const MemoryRegionOps *ops,
                           void *opaque,
                           const char *name,
                           uint64_t size)
{
1519
    memory_region_init(mr, owner, name, size);
1520
    mr->ops = ops ? ops : &unassigned_mem_ops;
A
Avi Kivity 已提交
1521
    mr->opaque = opaque;
1522
    mr->terminates = true;
A
Avi Kivity 已提交
1523 1524
}

1525 1526 1527 1528 1529
void memory_region_init_ram_nomigrate(MemoryRegion *mr,
                                      Object *owner,
                                      const char *name,
                                      uint64_t size,
                                      Error **errp)
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539
{
    memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
}

void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
                                             Object *owner,
                                             const char *name,
                                             uint64_t size,
                                             bool share,
                                             Error **errp)
A
Avi Kivity 已提交
1540
{
1541
    Error *err = NULL;
1542
    memory_region_init(mr, owner, name, size);
A
Avi Kivity 已提交
1543
    mr->ram = true;
1544
    mr->terminates = true;
1545
    mr->destructor = memory_region_destructor_ram;
1546
    mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
1547
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1548 1549 1550 1551 1552
    if (err) {
        mr->size = int128_zero();
        object_unparent(OBJECT(mr));
        error_propagate(errp, err);
    }
1553 1554
}

1555 1556 1557 1558 1559 1560 1561 1562 1563 1564
void memory_region_init_resizeable_ram(MemoryRegion *mr,
                                       Object *owner,
                                       const char *name,
                                       uint64_t size,
                                       uint64_t max_size,
                                       void (*resized)(const char*,
                                                       uint64_t length,
                                                       void *host),
                                       Error **errp)
{
1565
    Error *err = NULL;
1566 1567 1568 1569
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
F
Fam Zheng 已提交
1570
    mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1571
                                              mr, &err);
1572
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1573 1574 1575 1576 1577
    if (err) {
        mr->size = int128_zero();
        object_unparent(OBJECT(mr));
        error_propagate(errp, err);
    }
1578 1579
}

1580
#ifdef CONFIG_POSIX
1581 1582 1583 1584
void memory_region_init_ram_from_file(MemoryRegion *mr,
                                      struct Object *owner,
                                      const char *name,
                                      uint64_t size,
1585
                                      uint64_t align,
1586
                                      uint32_t ram_flags,
1587 1588
                                      const char *path,
                                      Error **errp)
1589
{
1590
    Error *err = NULL;
1591 1592 1593 1594
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
1595
    mr->align = align;
1596
    mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
1597
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1598 1599 1600 1601 1602
    if (err) {
        mr->size = int128_zero();
        object_unparent(OBJECT(mr));
        error_propagate(errp, err);
    }
A
Avi Kivity 已提交
1603
}
1604 1605 1606 1607 1608 1609 1610 1611 1612

void memory_region_init_ram_from_fd(MemoryRegion *mr,
                                    struct Object *owner,
                                    const char *name,
                                    uint64_t size,
                                    bool share,
                                    int fd,
                                    Error **errp)
{
1613
    Error *err = NULL;
1614 1615 1616 1617
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
1618 1619
    mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
                                           share ? RAM_SHARED : 0,
1620
                                           fd, &err);
1621
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1622 1623 1624 1625 1626
    if (err) {
        mr->size = int128_zero();
        object_unparent(OBJECT(mr));
        error_propagate(errp, err);
    }
1627
}
1628
#endif
A
Avi Kivity 已提交
1629 1630

void memory_region_init_ram_ptr(MemoryRegion *mr,
1631
                                Object *owner,
A
Avi Kivity 已提交
1632 1633 1634 1635
                                const char *name,
                                uint64_t size,
                                void *ptr)
{
1636
    memory_region_init(mr, owner, name, size);
A
Avi Kivity 已提交
1637
    mr->ram = true;
1638
    mr->terminates = true;
1639
    mr->destructor = memory_region_destructor_ram;
1640
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1641 1642 1643

    /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL.  */
    assert(ptr != NULL);
F
Fam Zheng 已提交
1644
    mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
A
Avi Kivity 已提交
1645 1646
}

1647 1648 1649 1650 1651
void memory_region_init_ram_device_ptr(MemoryRegion *mr,
                                       Object *owner,
                                       const char *name,
                                       uint64_t size,
                                       void *ptr)
1652
{
1653 1654 1655
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->terminates = true;
1656
    mr->ram_device = true;
1657 1658
    mr->ops = &ram_device_mem_ops;
    mr->opaque = mr;
1659 1660 1661 1662 1663
    mr->destructor = memory_region_destructor_ram;
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
    /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL.  */
    assert(ptr != NULL);
    mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1664 1665
}

A
Avi Kivity 已提交
1666
void memory_region_init_alias(MemoryRegion *mr,
1667
                              Object *owner,
A
Avi Kivity 已提交
1668 1669
                              const char *name,
                              MemoryRegion *orig,
A
Avi Kivity 已提交
1670
                              hwaddr offset,
A
Avi Kivity 已提交
1671 1672
                              uint64_t size)
{
1673
    memory_region_init(mr, owner, name, size);
A
Avi Kivity 已提交
1674 1675 1676 1677
    mr->alias = orig;
    mr->alias_offset = offset;
}

1678 1679 1680 1681 1682
void memory_region_init_rom_nomigrate(MemoryRegion *mr,
                                      struct Object *owner,
                                      const char *name,
                                      uint64_t size,
                                      Error **errp)
1683
{
1684
    Error *err = NULL;
1685 1686 1687 1688 1689
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->readonly = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
1690
    mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1691
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1692 1693 1694 1695 1696
    if (err) {
        mr->size = int128_zero();
        object_unparent(OBJECT(mr));
        error_propagate(errp, err);
    }
1697 1698
}

1699 1700 1701 1702 1703 1704 1705
void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
                                             Object *owner,
                                             const MemoryRegionOps *ops,
                                             void *opaque,
                                             const char *name,
                                             uint64_t size,
                                             Error **errp)
1706
{
1707
    Error *err = NULL;
1708
    assert(ops);
1709
    memory_region_init(mr, owner, name, size);
1710
    mr->ops = ops;
1711
    mr->opaque = opaque;
1712
    mr->terminates = true;
A
Avi Kivity 已提交
1713
    mr->rom_device = true;
1714
    mr->destructor = memory_region_destructor_ram;
1715 1716 1717 1718 1719 1720
    mr->ram_block = qemu_ram_alloc(size, false,  mr, &err);
    if (err) {
        mr->size = int128_zero();
        object_unparent(OBJECT(mr));
        error_propagate(errp, err);
    }
1721 1722
}

1723 1724 1725
void memory_region_init_iommu(void *_iommu_mr,
                              size_t instance_size,
                              const char *mrtypename,
1726
                              Object *owner,
A
Avi Kivity 已提交
1727 1728 1729
                              const char *name,
                              uint64_t size)
{
1730
    struct IOMMUMemoryRegion *iommu_mr;
1731 1732
    struct MemoryRegion *mr;

1733 1734
    object_initialize(_iommu_mr, instance_size, mrtypename);
    mr = MEMORY_REGION(_iommu_mr);
1735 1736
    memory_region_do_init(mr, owner, name, size);
    iommu_mr = IOMMU_MEMORY_REGION(mr);
A
Avi Kivity 已提交
1737
    mr->terminates = true;  /* then re-forwards */
1738 1739
    QLIST_INIT(&iommu_mr->iommu_notify);
    iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
A
Avi Kivity 已提交
1740 1741
}

P
Peter Crosthwaite 已提交
1742
static void memory_region_finalize(Object *obj)
A
Avi Kivity 已提交
1743
{
P
Peter Crosthwaite 已提交
1744 1745
    MemoryRegion *mr = MEMORY_REGION(obj);

1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
    assert(!mr->container);

    /* We know the region is not visible in any address space (it
     * does not have a container and cannot be a root either because
     * it has no references, so we can blindly clear mr->enabled.
     * memory_region_set_enabled instead could trigger a transaction
     * and cause an infinite loop.
     */
    mr->enabled = false;
    memory_region_transaction_begin();
    while (!QTAILQ_EMPTY(&mr->subregions)) {
        MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
        memory_region_del_subregion(mr, subregion);
    }
    memory_region_transaction_commit();

1762
    mr->destructor(mr);
A
Avi Kivity 已提交
1763
    memory_region_clear_coalescing(mr);
1764
    g_free((char *)mr->name);
1765
    g_free(mr->ioeventfds);
A
Avi Kivity 已提交
1766 1767
}

P
Paolo Bonzini 已提交
1768 1769
Object *memory_region_owner(MemoryRegion *mr)
{
1770 1771
    Object *obj = OBJECT(mr);
    return obj->parent;
P
Paolo Bonzini 已提交
1772 1773
}

P
Paolo Bonzini 已提交
1774 1775
void memory_region_ref(MemoryRegion *mr)
{
1776 1777 1778 1779 1780 1781 1782
    /* MMIO callbacks most likely will access data that belongs
     * to the owner, hence the need to ref/unref the owner whenever
     * the memory region is in use.
     *
     * The memory region is a child of its owner.  As long as the
     * owner doesn't call unparent itself on the memory region,
     * ref-ing the owner will also keep the memory region alive.
1783 1784
     * Memory regions without an owner are supposed to never go away;
     * we do not ref/unref them because it slows down DMA sensibly.
1785
     */
1786 1787
    if (mr && mr->owner) {
        object_ref(mr->owner);
P
Paolo Bonzini 已提交
1788 1789 1790 1791 1792
    }
}

void memory_region_unref(MemoryRegion *mr)
{
1793 1794
    if (mr && mr->owner) {
        object_unref(mr->owner);
P
Paolo Bonzini 已提交
1795 1796 1797
    }
}

A
Avi Kivity 已提交
1798 1799
uint64_t memory_region_size(MemoryRegion *mr)
{
1800 1801 1802 1803
    if (int128_eq(mr->size, int128_2_64())) {
        return UINT64_MAX;
    }
    return int128_get64(mr->size);
A
Avi Kivity 已提交
1804 1805
}

1806
const char *memory_region_name(const MemoryRegion *mr)
1807
{
1808 1809 1810 1811
    if (!mr->name) {
        ((MemoryRegion *)mr)->name =
            object_get_canonical_path_component(OBJECT(mr));
    }
1812
    return mr->name;
1813 1814
}

1815
bool memory_region_is_ram_device(MemoryRegion *mr)
1816
{
1817
    return mr->ram_device;
1818 1819
}

1820
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1821
{
1822
    uint8_t mask = mr->dirty_log_mask;
1823
    if (global_dirty_log && mr->ram_block) {
1824 1825 1826
        mask |= (1 << DIRTY_MEMORY_MIGRATION);
    }
    return mask;
1827 1828
}

1829 1830 1831 1832 1833
bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
{
    return memory_region_get_dirty_log_mask(mr) & (1 << client);
}

1834
static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
1835 1836 1837
{
    IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
    IOMMUNotifier *iommu_notifier;
1838
    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1839

1840
    IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1841 1842 1843
        flags |= iommu_notifier->notifier_flags;
    }

1844 1845 1846 1847
    if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
        imrc->notify_flag_changed(iommu_mr,
                                  iommu_mr->iommu_notify_flags,
                                  flags);
1848 1849
    }

1850
    iommu_mr->iommu_notify_flags = flags;
1851 1852
}

1853 1854
void memory_region_register_iommu_notifier(MemoryRegion *mr,
                                           IOMMUNotifier *n)
1855
{
1856 1857
    IOMMUMemoryRegion *iommu_mr;

1858 1859 1860 1861 1862
    if (mr->alias) {
        memory_region_register_iommu_notifier(mr->alias, n);
        return;
    }

1863
    /* We need to register for at least one bitfield */
1864
    iommu_mr = IOMMU_MEMORY_REGION(mr);
1865
    assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1866
    assert(n->start <= n->end);
1867 1868 1869
    assert(n->iommu_idx >= 0 &&
           n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));

1870 1871
    QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
    memory_region_update_iommu_notify_flags(iommu_mr);
1872 1873
}

1874
uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1875
{
1876 1877 1878 1879
    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);

    if (imrc->get_min_page_size) {
        return imrc->get_min_page_size(iommu_mr);
1880 1881 1882 1883
    }
    return TARGET_PAGE_SIZE;
}

1884
void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1885
{
1886
    MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1887
    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1888
    hwaddr addr, granularity;
1889 1890
    IOMMUTLBEntry iotlb;

1891
    /* If the IOMMU has its own replay callback, override */
1892 1893
    if (imrc->replay) {
        imrc->replay(iommu_mr, n);
1894 1895 1896
        return;
    }

1897
    granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1898

1899
    for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1900
        iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912
        if (iotlb.perm != IOMMU_NONE) {
            n->notify(n, &iotlb);
        }

        /* if (2^64 - MR size) < granularity, it's possible to get an
         * infinite loop here.  This should catch such a wraparound */
        if ((addr + granularity) < addr) {
            break;
        }
    }
}

1913
void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
P
Peter Xu 已提交
1914 1915 1916
{
    IOMMUNotifier *notifier;

1917 1918
    IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
        memory_region_iommu_replay(iommu_mr, notifier);
P
Peter Xu 已提交
1919 1920 1921
    }
}

1922 1923
void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
                                             IOMMUNotifier *n)
1924
{
1925 1926
    IOMMUMemoryRegion *iommu_mr;

1927 1928 1929 1930
    if (mr->alias) {
        memory_region_unregister_iommu_notifier(mr->alias, n);
        return;
    }
1931
    QLIST_REMOVE(n, node);
1932 1933
    iommu_mr = IOMMU_MEMORY_REGION(mr);
    memory_region_update_iommu_notify_flags(iommu_mr);
1934 1935
}

1936 1937
void memory_region_notify_one(IOMMUNotifier *notifier,
                              IOMMUTLBEntry *entry)
1938
{
1939 1940
    IOMMUNotifierFlag request_flags;

1941 1942 1943 1944
    /*
     * Skip the notification if the notification does not overlap
     * with registered range.
     */
1945
    if (notifier->start > entry->iova + entry->addr_mask ||
1946 1947 1948
        notifier->end < entry->iova) {
        return;
    }
1949

1950
    if (entry->perm & IOMMU_RW) {
1951 1952 1953 1954 1955
        request_flags = IOMMU_NOTIFIER_MAP;
    } else {
        request_flags = IOMMU_NOTIFIER_UNMAP;
    }

1956 1957 1958 1959 1960
    if (notifier->notifier_flags & request_flags) {
        notifier->notify(notifier, entry);
    }
}

1961
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1962
                                int iommu_idx,
1963 1964 1965 1966
                                IOMMUTLBEntry entry)
{
    IOMMUNotifier *iommu_notifier;

1967
    assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
1968

1969
    IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1970 1971 1972
        if (iommu_notifier->iommu_idx == iommu_idx) {
            memory_region_notify_one(iommu_notifier, &entry);
        }
1973
    }
1974 1975
}

1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
                                 enum IOMMUMemoryRegionAttr attr,
                                 void *data)
{
    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);

    if (!imrc->get_attr) {
        return -EINVAL;
    }

    return imrc->get_attr(iommu_mr, attr, data);
}

1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
                                       MemTxAttrs attrs)
{
    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);

    if (!imrc->attrs_to_index) {
        return 0;
    }

    return imrc->attrs_to_index(iommu_mr, attrs);
}

int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
{
    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);

    if (!imrc->num_indexes) {
        return 1;
    }

    return imrc->num_indexes(iommu_mr);
}

A
Avi Kivity 已提交
2012 2013
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
{
A
Avi Kivity 已提交
2014
    uint8_t mask = 1 << client;
2015
    uint8_t old_logging;
A
Avi Kivity 已提交
2016

2017
    assert(client == DIRTY_MEMORY_VGA);
2018 2019 2020 2021 2022 2023
    old_logging = mr->vga_logging_count;
    mr->vga_logging_count += log ? 1 : -1;
    if (!!old_logging == !!mr->vga_logging_count) {
        return;
    }

2024
    memory_region_transaction_begin();
A
Avi Kivity 已提交
2025
    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2026
    memory_region_update_pending |= mr->enabled;
2027
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2028 2029
}

A
Avi Kivity 已提交
2030 2031
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
                             hwaddr size)
A
Avi Kivity 已提交
2032
{
F
Fam Zheng 已提交
2033 2034 2035
    assert(mr->ram_block);
    cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
                                        size,
2036
                                        memory_region_get_dirty_log_mask(mr));
A
Avi Kivity 已提交
2037 2038
}

2039
static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
A
Avi Kivity 已提交
2040
{
2041
    MemoryListener *listener;
2042
    AddressSpace *as;
2043
    FlatView *view;
A
Avi Kivity 已提交
2044 2045
    FlatRange *fr;

2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
    /* If the same address space has multiple log_sync listeners, we
     * visit that address space's FlatView multiple times.  But because
     * log_sync listeners are rare, it's still cheaper than walking each
     * address space once.
     */
    QTAILQ_FOREACH(listener, &memory_listeners, link) {
        if (!listener->log_sync) {
            continue;
        }
        as = listener->address_space;
        view = address_space_get_flatview(as);
2057
        FOR_EACH_FLAT_RANGE(fr, view) {
2058
            if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2059
                MemoryRegionSection mrs = section_from_flat_range(fr, view);
2060
                listener->log_sync(listener, &mrs);
2061
            }
A
Avi Kivity 已提交
2062
        }
2063
        flatview_unref(view);
A
Avi Kivity 已提交
2064
    }
A
Avi Kivity 已提交
2065 2066
}

2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
                                                            hwaddr addr,
                                                            hwaddr size,
                                                            unsigned client)
{
    assert(mr->ram_block);
    memory_region_sync_dirty_bitmap(mr);
    return cpu_physical_memory_snapshot_and_clear_dirty(
                memory_region_get_ram_addr(mr) + addr, size, client);
}

bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
                                      hwaddr addr, hwaddr size)
{
    assert(mr->ram_block);
    return cpu_physical_memory_snapshot_get_dirty(snap,
                memory_region_get_ram_addr(mr) + addr, size);
}

A
Avi Kivity 已提交
2086 2087
void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
{
2088
    if (mr->readonly != readonly) {
2089
        memory_region_transaction_begin();
2090
        mr->readonly = readonly;
2091
        memory_region_update_pending |= mr->enabled;
2092
        memory_region_transaction_commit();
2093
    }
A
Avi Kivity 已提交
2094 2095
}

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
{
    if (mr->nonvolatile != nonvolatile) {
        memory_region_transaction_begin();
        mr->nonvolatile = nonvolatile;
        memory_region_update_pending |= mr->enabled;
        memory_region_transaction_commit();
    }
}

2106
void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2107
{
2108
    if (mr->romd_mode != romd_mode) {
2109
        memory_region_transaction_begin();
2110
        mr->romd_mode = romd_mode;
2111
        memory_region_update_pending |= mr->enabled;
2112
        memory_region_transaction_commit();
2113 2114 2115
    }
}

A
Avi Kivity 已提交
2116 2117
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
                               hwaddr size, unsigned client)
A
Avi Kivity 已提交
2118
{
F
Fam Zheng 已提交
2119 2120 2121
    assert(mr->ram_block);
    cpu_physical_memory_test_and_clear_dirty(
        memory_region_get_ram_addr(mr) + addr, size, client);
A
Avi Kivity 已提交
2122 2123
}

2124 2125
int memory_region_get_fd(MemoryRegion *mr)
{
2126 2127 2128 2129 2130
    int fd;

    rcu_read_lock();
    while (mr->alias) {
        mr = mr->alias;
2131
    }
2132 2133
    fd = mr->ram_block->fd;
    rcu_read_unlock();
2134

2135 2136
    return fd;
}
2137

A
Avi Kivity 已提交
2138 2139
void *memory_region_get_ram_ptr(MemoryRegion *mr)
{
2140 2141
    void *ptr;
    uint64_t offset = 0;
A
Avi Kivity 已提交
2142

2143 2144 2145 2146 2147
    rcu_read_lock();
    while (mr->alias) {
        offset += mr->alias_offset;
        mr = mr->alias;
    }
F
Fam Zheng 已提交
2148
    assert(mr->ram_block);
2149
    ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2150
    rcu_read_unlock();
A
Avi Kivity 已提交
2151

2152
    return ptr;
A
Avi Kivity 已提交
2153 2154
}

2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166
MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
{
    RAMBlock *block;

    block = qemu_ram_block_from_host(ptr, false, offset);
    if (!block) {
        return NULL;
    }

    return block->mr;
}

2167 2168 2169 2170 2171
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
{
    return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
}

2172 2173
void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
{
F
Fam Zheng 已提交
2174
    assert(mr->ram_block);
2175

G
Gonglei 已提交
2176
    qemu_ram_resize(mr->ram_block, newsize, errp);
2177 2178
}

2179
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
A
Avi Kivity 已提交
2180
{
2181
    FlatView *view;
A
Avi Kivity 已提交
2182 2183
    FlatRange *fr;

2184
    view = address_space_get_flatview(as);
2185
    FOR_EACH_FLAT_RANGE(fr, view) {
A
Avi Kivity 已提交
2186
        if (fr->mr == mr) {
2187 2188
            flat_range_coalesced_io_del(fr, as);
            flat_range_coalesced_io_add(fr, as);
A
Avi Kivity 已提交
2189 2190
        }
    }
2191
    flatview_unref(view);
A
Avi Kivity 已提交
2192 2193
}

2194 2195 2196 2197 2198 2199 2200 2201 2202
static void memory_region_update_coalesced_range(MemoryRegion *mr)
{
    AddressSpace *as;

    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        memory_region_update_coalesced_range_as(mr, as);
    }
}

A
Avi Kivity 已提交
2203 2204 2205
void memory_region_set_coalescing(MemoryRegion *mr)
{
    memory_region_clear_coalescing(mr);
2206
    memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
A
Avi Kivity 已提交
2207 2208 2209
}

void memory_region_add_coalescing(MemoryRegion *mr,
A
Avi Kivity 已提交
2210
                                  hwaddr offset,
A
Avi Kivity 已提交
2211 2212
                                  uint64_t size)
{
2213
    CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
A
Avi Kivity 已提交
2214

2215
    cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
A
Avi Kivity 已提交
2216 2217
    QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
    memory_region_update_coalesced_range(mr);
2218
    memory_region_set_flush_coalesced(mr);
A
Avi Kivity 已提交
2219 2220 2221 2222 2223
}

void memory_region_clear_coalescing(MemoryRegion *mr)
{
    CoalescedMemoryRange *cmr;
2224
    bool updated = false;
A
Avi Kivity 已提交
2225

2226 2227 2228
    qemu_flush_coalesced_mmio_buffer();
    mr->flush_coalesced_mmio = false;

A
Avi Kivity 已提交
2229 2230 2231
    while (!QTAILQ_EMPTY(&mr->coalesced)) {
        cmr = QTAILQ_FIRST(&mr->coalesced);
        QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2232
        g_free(cmr);
2233 2234 2235 2236 2237
        updated = true;
    }

    if (updated) {
        memory_region_update_coalesced_range(mr);
A
Avi Kivity 已提交
2238 2239 2240
    }
}

2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253
void memory_region_set_flush_coalesced(MemoryRegion *mr)
{
    mr->flush_coalesced_mmio = true;
}

void memory_region_clear_flush_coalesced(MemoryRegion *mr)
{
    qemu_flush_coalesced_mmio_buffer();
    if (QTAILQ_EMPTY(&mr->coalesced)) {
        mr->flush_coalesced_mmio = false;
    }
}

2254 2255 2256 2257 2258
void memory_region_clear_global_locking(MemoryRegion *mr)
{
    mr->global_locking = false;
}

P
Pavel Fedin 已提交
2259 2260
static bool userspace_eventfd_warning;

A
Avi Kivity 已提交
2261
void memory_region_add_eventfd(MemoryRegion *mr,
A
Avi Kivity 已提交
2262
                               hwaddr addr,
A
Avi Kivity 已提交
2263 2264 2265
                               unsigned size,
                               bool match_data,
                               uint64_t data,
2266
                               EventNotifier *e)
A
Avi Kivity 已提交
2267 2268
{
    MemoryRegionIoeventfd mrfd = {
2269 2270
        .addr.start = int128_make64(addr),
        .addr.size = int128_make64(size),
A
Avi Kivity 已提交
2271 2272
        .match_data = match_data,
        .data = data,
2273
        .e = e,
A
Avi Kivity 已提交
2274 2275 2276
    };
    unsigned i;

P
Pavel Fedin 已提交
2277 2278 2279 2280 2281 2282 2283
    if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
                            userspace_eventfd_warning))) {
        userspace_eventfd_warning = true;
        error_report("Using eventfd without MMIO binding in KVM. "
                     "Suboptimal performance expected");
    }

2284 2285 2286
    if (size) {
        adjust_endianness(mr, &mrfd.data, size);
    }
2287
    memory_region_transaction_begin();
A
Avi Kivity 已提交
2288
    for (i = 0; i < mr->ioeventfd_nb; ++i) {
2289
        if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
A
Avi Kivity 已提交
2290 2291 2292 2293
            break;
        }
    }
    ++mr->ioeventfd_nb;
2294
    mr->ioeventfds = g_realloc(mr->ioeventfds,
A
Avi Kivity 已提交
2295 2296 2297 2298
                                  sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
    memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
    mr->ioeventfds[i] = mrfd;
2299
    ioeventfd_update_pending |= mr->enabled;
2300
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2301 2302 2303
}

void memory_region_del_eventfd(MemoryRegion *mr,
A
Avi Kivity 已提交
2304
                               hwaddr addr,
A
Avi Kivity 已提交
2305 2306 2307
                               unsigned size,
                               bool match_data,
                               uint64_t data,
2308
                               EventNotifier *e)
A
Avi Kivity 已提交
2309 2310
{
    MemoryRegionIoeventfd mrfd = {
2311 2312
        .addr.start = int128_make64(addr),
        .addr.size = int128_make64(size),
A
Avi Kivity 已提交
2313 2314
        .match_data = match_data,
        .data = data,
2315
        .e = e,
A
Avi Kivity 已提交
2316 2317 2318
    };
    unsigned i;

2319 2320 2321
    if (size) {
        adjust_endianness(mr, &mrfd.data, size);
    }
2322
    memory_region_transaction_begin();
A
Avi Kivity 已提交
2323
    for (i = 0; i < mr->ioeventfd_nb; ++i) {
2324
        if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
A
Avi Kivity 已提交
2325 2326 2327 2328 2329 2330 2331
            break;
        }
    }
    assert(i != mr->ioeventfd_nb);
    memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
    --mr->ioeventfd_nb;
2332
    mr->ioeventfds = g_realloc(mr->ioeventfds,
A
Avi Kivity 已提交
2333
                                  sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2334
    ioeventfd_update_pending |= mr->enabled;
2335
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2336 2337
}

2338
static void memory_region_update_container_subregions(MemoryRegion *subregion)
A
Avi Kivity 已提交
2339
{
2340
    MemoryRegion *mr = subregion->container;
A
Avi Kivity 已提交
2341 2342
    MemoryRegion *other;

2343 2344
    memory_region_transaction_begin();

P
Paolo Bonzini 已提交
2345
    memory_region_ref(subregion);
A
Avi Kivity 已提交
2346 2347 2348 2349 2350 2351 2352 2353
    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
        if (subregion->priority >= other->priority) {
            QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
            goto done;
        }
    }
    QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
done:
2354
    memory_region_update_pending |= mr->enabled && subregion->enabled;
2355
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2356 2357
}

2358 2359 2360 2361
static void memory_region_add_subregion_common(MemoryRegion *mr,
                                               hwaddr offset,
                                               MemoryRegion *subregion)
{
2362 2363
    assert(!subregion->container);
    subregion->container = mr;
2364
    subregion->addr = offset;
2365
    memory_region_update_container_subregions(subregion);
2366
}
A
Avi Kivity 已提交
2367 2368

void memory_region_add_subregion(MemoryRegion *mr,
A
Avi Kivity 已提交
2369
                                 hwaddr offset,
A
Avi Kivity 已提交
2370 2371 2372 2373 2374 2375 2376
                                 MemoryRegion *subregion)
{
    subregion->priority = 0;
    memory_region_add_subregion_common(mr, offset, subregion);
}

void memory_region_add_subregion_overlap(MemoryRegion *mr,
A
Avi Kivity 已提交
2377
                                         hwaddr offset,
A
Avi Kivity 已提交
2378
                                         MemoryRegion *subregion,
2379
                                         int priority)
A
Avi Kivity 已提交
2380 2381 2382 2383 2384 2385 2386 2387
{
    subregion->priority = priority;
    memory_region_add_subregion_common(mr, offset, subregion);
}

void memory_region_del_subregion(MemoryRegion *mr,
                                 MemoryRegion *subregion)
{
2388
    memory_region_transaction_begin();
2389 2390
    assert(subregion->container == mr);
    subregion->container = NULL;
A
Avi Kivity 已提交
2391
    QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
P
Paolo Bonzini 已提交
2392
    memory_region_unref(subregion);
2393
    memory_region_update_pending |= mr->enabled && subregion->enabled;
2394
    memory_region_transaction_commit();
2395 2396 2397 2398 2399 2400 2401
}

void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
{
    if (enabled == mr->enabled) {
        return;
    }
2402
    memory_region_transaction_begin();
2403
    mr->enabled = enabled;
2404
    memory_region_update_pending = true;
2405
    memory_region_transaction_commit();
A
Avi Kivity 已提交
2406
}
A
Avi Kivity 已提交
2407

2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423
void memory_region_set_size(MemoryRegion *mr, uint64_t size)
{
    Int128 s = int128_make64(size);

    if (size == UINT64_MAX) {
        s = int128_2_64();
    }
    if (int128_eq(s, mr->size)) {
        return;
    }
    memory_region_transaction_begin();
    mr->size = s;
    memory_region_update_pending = true;
    memory_region_transaction_commit();
}

2424
static void memory_region_readd_subregion(MemoryRegion *mr)
2425
{
2426
    MemoryRegion *container = mr->container;
2427

2428
    if (container) {
2429 2430
        memory_region_transaction_begin();
        memory_region_ref(mr);
2431 2432 2433
        memory_region_del_subregion(container, mr);
        mr->container = container;
        memory_region_update_container_subregions(mr);
2434 2435
        memory_region_unref(mr);
        memory_region_transaction_commit();
2436
    }
2437
}
2438

2439 2440 2441 2442 2443 2444
void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
{
    if (addr != mr->addr) {
        mr->addr = addr;
        memory_region_readd_subregion(mr);
    }
2445 2446
}

A
Avi Kivity 已提交
2447
void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2448 2449 2450
{
    assert(mr->alias);

2451
    if (offset == mr->alias_offset) {
2452 2453 2454
        return;
    }

2455 2456
    memory_region_transaction_begin();
    mr->alias_offset = offset;
2457
    memory_region_update_pending |= mr->enabled;
2458
    memory_region_transaction_commit();
2459 2460
}

2461 2462 2463 2464 2465
uint64_t memory_region_get_alignment(const MemoryRegion *mr)
{
    return mr->align;
}

2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
static int cmp_flatrange_addr(const void *addr_, const void *fr_)
{
    const AddrRange *addr = addr_;
    const FlatRange *fr = fr_;

    if (int128_le(addrrange_end(*addr), fr->addr.start)) {
        return -1;
    } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
        return 1;
    }
    return 0;
}

2479
static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2480
{
2481
    return bsearch(&addr, view->ranges, view->nr,
2482 2483 2484
                   sizeof(FlatRange), cmp_flatrange_addr);
}

2485 2486 2487 2488 2489
bool memory_region_is_mapped(MemoryRegion *mr)
{
    return mr->container ? true : false;
}

2490 2491 2492 2493 2494
/* Same as memory_region_find, but it does not add a reference to the
 * returned region.  It must be called from an RCU critical section.
 */
static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
                                                  hwaddr addr, uint64_t size)
2495
{
2496
    MemoryRegionSection ret = { .mr = NULL };
2497 2498 2499
    MemoryRegion *root;
    AddressSpace *as;
    AddrRange range;
2500
    FlatView *view;
2501 2502 2503
    FlatRange *fr;

    addr += mr->addr;
2504 2505
    for (root = mr; root->container; ) {
        root = root->container;
2506 2507
        addr += root->addr;
    }
2508

2509
    as = memory_region_to_address_space(root);
2510 2511 2512
    if (!as) {
        return ret;
    }
2513
    range = addrrange_make(int128_make64(addr), int128_make64(size));
2514

2515
    view = address_space_to_flatview(as);
2516
    fr = flatview_lookup(view, range);
2517
    if (!fr) {
2518
        return ret;
2519 2520
    }

2521
    while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2522 2523 2524 2525
        --fr;
    }

    ret.mr = fr->mr;
2526
    ret.fv = view;
2527 2528 2529 2530
    range = addrrange_intersection(range, fr->addr);
    ret.offset_within_region = fr->offset_in_region;
    ret.offset_within_region += int128_get64(int128_sub(range.start,
                                                        fr->addr.start));
2531
    ret.size = range.size;
2532
    ret.offset_within_address_space = int128_get64(range.start);
2533
    ret.readonly = fr->readonly;
2534
    ret.nonvolatile = fr->nonvolatile;
2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546
    return ret;
}

MemoryRegionSection memory_region_find(MemoryRegion *mr,
                                       hwaddr addr, uint64_t size)
{
    MemoryRegionSection ret;
    rcu_read_lock();
    ret = memory_region_find_rcu(mr, addr, size);
    if (ret.mr) {
        memory_region_ref(ret.mr);
    }
2547
    rcu_read_unlock();
2548 2549 2550
    return ret;
}

2551 2552 2553 2554 2555 2556 2557 2558 2559 2560
bool memory_region_present(MemoryRegion *container, hwaddr addr)
{
    MemoryRegion *mr;

    rcu_read_lock();
    mr = memory_region_find_rcu(container, addr, 1).mr;
    rcu_read_unlock();
    return mr && mr != container;
}

2561
void memory_global_dirty_log_sync(void)
2562
{
2563
    memory_region_sync_dirty_bitmap(NULL);
2564 2565
}

J
Jay Zhou 已提交
2566 2567
static VMChangeStateEntry *vmstate_change;

2568 2569
void memory_global_dirty_log_start(void)
{
J
Jay Zhou 已提交
2570 2571 2572 2573 2574
    if (vmstate_change) {
        qemu_del_vm_change_state_handler(vmstate_change);
        vmstate_change = NULL;
    }

2575
    global_dirty_log = true;
2576

2577
    MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2578

2579
    /* Refresh DIRTY_MEMORY_MIGRATION bit.  */
2580 2581 2582
    memory_region_transaction_begin();
    memory_region_update_pending = true;
    memory_region_transaction_commit();
2583 2584
}

J
Jay Zhou 已提交
2585
static void memory_global_dirty_log_do_stop(void)
2586 2587
{
    global_dirty_log = false;
2588

2589
    /* Refresh DIRTY_MEMORY_MIGRATION bit.  */
2590 2591 2592 2593
    memory_region_transaction_begin();
    memory_region_update_pending = true;
    memory_region_transaction_commit();

2594
    MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2595 2596
}

J
Jay Zhou 已提交
2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623
static void memory_vm_change_state_handler(void *opaque, int running,
                                           RunState state)
{
    if (running) {
        memory_global_dirty_log_do_stop();

        if (vmstate_change) {
            qemu_del_vm_change_state_handler(vmstate_change);
            vmstate_change = NULL;
        }
    }
}

void memory_global_dirty_log_stop(void)
{
    if (!runstate_is_running()) {
        if (vmstate_change) {
            return;
        }
        vmstate_change = qemu_add_vm_change_state_handler(
                                memory_vm_change_state_handler, NULL);
        return;
    }

    memory_global_dirty_log_do_stop();
}

2624 2625 2626
static void listener_add_address_space(MemoryListener *listener,
                                       AddressSpace *as)
{
2627
    FlatView *view;
2628 2629
    FlatRange *fr;

2630 2631 2632
    if (listener->begin) {
        listener->begin(listener);
    }
2633
    if (global_dirty_log) {
2634 2635 2636
        if (listener->log_global_start) {
            listener->log_global_start(listener);
        }
2637
    }
2638

2639
    view = address_space_get_flatview(as);
2640
    FOR_EACH_FLAT_RANGE(fr, view) {
2641 2642
        MemoryRegionSection section = section_from_flat_range(fr, view);

2643 2644 2645
        if (listener->region_add) {
            listener->region_add(listener, &section);
        }
2646 2647 2648
        if (fr->dirty_log_mask && listener->log_start) {
            listener->log_start(listener, &section, 0, fr->dirty_log_mask);
        }
2649
    }
2650 2651 2652
    if (listener->commit) {
        listener->commit(listener);
    }
2653
    flatview_unref(view);
2654 2655
}

2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
static void listener_del_address_space(MemoryListener *listener,
                                       AddressSpace *as)
{
    FlatView *view;
    FlatRange *fr;

    if (listener->begin) {
        listener->begin(listener);
    }
    view = address_space_get_flatview(as);
    FOR_EACH_FLAT_RANGE(fr, view) {
        MemoryRegionSection section = section_from_flat_range(fr, view);

        if (fr->dirty_log_mask && listener->log_stop) {
            listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
        }
        if (listener->region_del) {
            listener->region_del(listener, &section);
        }
    }
    if (listener->commit) {
        listener->commit(listener);
    }
    flatview_unref(view);
}

2682
void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2683
{
2684 2685
    MemoryListener *other = NULL;

2686
    listener->address_space = as;
2687
    if (QTAILQ_EMPTY(&memory_listeners)
2688
        || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
2689 2690 2691 2692 2693 2694 2695 2696 2697
        QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
    } else {
        QTAILQ_FOREACH(other, &memory_listeners, link) {
            if (listener->priority < other->priority) {
                break;
            }
        }
        QTAILQ_INSERT_BEFORE(other, listener, link);
    }
2698

2699
    if (QTAILQ_EMPTY(&as->listeners)
2700
        || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
        QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
    } else {
        QTAILQ_FOREACH(other, &as->listeners, link_as) {
            if (listener->priority < other->priority) {
                break;
            }
        }
        QTAILQ_INSERT_BEFORE(other, listener, link_as);
    }

2711
    listener_add_address_space(listener, as);
2712 2713 2714 2715
}

void memory_listener_unregister(MemoryListener *listener)
{
2716 2717 2718 2719
    if (!listener->address_space) {
        return;
    }

2720
    listener_del_address_space(listener, listener->address_space);
2721
    QTAILQ_REMOVE(&memory_listeners, listener, link);
2722
    QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2723
    listener->address_space = NULL;
2724
}
2725

2726 2727 2728 2729 2730 2731 2732
void address_space_remove_listeners(AddressSpace *as)
{
    while (!QTAILQ_EMPTY(&as->listeners)) {
        memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
    }
}

2733
void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
A
Avi Kivity 已提交
2734
{
2735
    memory_region_ref(root);
2736
    as->root = root;
2737
    as->current_map = NULL;
2738 2739
    as->ioeventfd_nb = 0;
    as->ioeventfds = NULL;
2740
    QTAILQ_INIT(&as->listeners);
2741
    QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2742
    as->name = g_strdup(name ? name : "anonymous");
2743 2744
    address_space_update_topology(as);
    address_space_update_ioeventfds(as);
A
Avi Kivity 已提交
2745
}
A
Avi Kivity 已提交
2746

2747
static void do_address_space_destroy(AddressSpace *as)
A
Avi Kivity 已提交
2748
{
2749
    assert(QTAILQ_EMPTY(&as->listeners));
2750

2751
    flatview_unref(as->current_map);
2752
    g_free(as->name);
2753
    g_free(as->ioeventfds);
2754
    memory_region_unref(as->root);
A
Avi Kivity 已提交
2755 2756
}

2757 2758
void address_space_destroy(AddressSpace *as)
{
2759 2760
    MemoryRegion *root = as->root;

2761 2762 2763 2764 2765 2766 2767 2768 2769 2770
    /* Flush out anything from MemoryListeners listening in on this */
    memory_region_transaction_begin();
    as->root = NULL;
    memory_region_transaction_commit();
    QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);

    /* At this point, as->dispatch and as->current_map are dummy
     * entries that the guest should never use.  Wait for the old
     * values to expire before freeing the data.
     */
2771
    as->root = root;
2772 2773 2774
    call_rcu(as, do_address_space_destroy, rcu);
}

2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789
static const char *memory_region_type(MemoryRegion *mr)
{
    if (memory_region_is_ram_device(mr)) {
        return "ramd";
    } else if (memory_region_is_romd(mr)) {
        return "romd";
    } else if (memory_region_is_rom(mr)) {
        return "rom";
    } else if (memory_region_is_ram(mr)) {
        return "ram";
    } else {
        return "i/o";
    }
}

B
Blue Swirl 已提交
2790 2791 2792 2793
typedef struct MemoryRegionList MemoryRegionList;

struct MemoryRegionList {
    const MemoryRegion *mr;
2794
    QTAILQ_ENTRY(MemoryRegionList) mrqueue;
B
Blue Swirl 已提交
2795 2796
};

2797
typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
B
Blue Swirl 已提交
2798

2799 2800 2801 2802
#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
                           int128_sub((size), int128_one())) : 0)
#define MTREE_INDENT "  "

2803
static void mtree_expand_owner(const char *label, Object *obj)
2804 2805 2806
{
    DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);

2807
    qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
2808
    if (dev && dev->id) {
2809
        qemu_printf(" id=%s", dev->id);
2810 2811 2812
    } else {
        gchar *canonical_path = object_get_canonical_path(obj);
        if (canonical_path) {
2813
            qemu_printf(" path=%s", canonical_path);
2814 2815
            g_free(canonical_path);
        } else {
2816
            qemu_printf(" type=%s", object_get_typename(obj));
2817 2818
        }
    }
2819
    qemu_printf("}");
2820 2821
}

2822
static void mtree_print_mr_owner(const MemoryRegion *mr)
2823 2824 2825 2826 2827
{
    Object *owner = mr->owner;
    Object *parent = memory_region_owner((MemoryRegion *)mr);

    if (!owner && !parent) {
2828
        qemu_printf(" orphan");
2829 2830 2831
        return;
    }
    if (owner) {
2832
        mtree_expand_owner("owner", owner);
2833 2834
    }
    if (parent && parent != owner) {
2835
        mtree_expand_owner("parent", parent);
2836 2837 2838
    }
}

2839
static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
A
Avi Kivity 已提交
2840
                           hwaddr base,
2841 2842
                           MemoryRegionListHead *alias_print_queue,
                           bool owner)
B
Blue Swirl 已提交
2843
{
2844 2845
    MemoryRegionList *new_ml, *ml, *next_ml;
    MemoryRegionListHead submr_print_queue;
B
Blue Swirl 已提交
2846 2847
    const MemoryRegion *submr;
    unsigned int i;
2848
    hwaddr cur_start, cur_end;
B
Blue Swirl 已提交
2849

2850
    if (!mr) {
B
Blue Swirl 已提交
2851 2852 2853 2854
        return;
    }

    for (i = 0; i < level; i++) {
2855
        qemu_printf(MTREE_INDENT);
B
Blue Swirl 已提交
2856 2857
    }

2858 2859 2860 2861 2862 2863 2864 2865 2866
    cur_start = base + mr->addr;
    cur_end = cur_start + MR_SIZE(mr->size);

    /*
     * Try to detect overflow of memory region. This should never
     * happen normally. When it happens, we dump something to warn the
     * user who is observing this.
     */
    if (cur_start < base || cur_end < cur_start) {
2867
        qemu_printf("[DETECTED OVERFLOW!] ");
2868 2869
    }

B
Blue Swirl 已提交
2870 2871 2872 2873 2874
    if (mr->alias) {
        MemoryRegionList *ml;
        bool found = false;

        /* check if the alias is already in the queue */
2875
        QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
P
Paolo Bonzini 已提交
2876
            if (ml->mr == mr->alias) {
B
Blue Swirl 已提交
2877 2878 2879 2880 2881 2882 2883
                found = true;
            }
        }

        if (!found) {
            ml = g_new(MemoryRegionList, 1);
            ml->mr = mr->alias;
2884
            QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
B
Blue Swirl 已提交
2885
        }
2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
        qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
                    " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
                    "-" TARGET_FMT_plx "%s",
                    cur_start, cur_end,
                    mr->priority,
                    mr->nonvolatile ? "nv-" : "",
                    memory_region_type((MemoryRegion *)mr),
                    memory_region_name(mr),
                    memory_region_name(mr->alias),
                    mr->alias_offset,
                    mr->alias_offset + MR_SIZE(mr->size),
                    mr->enabled ? "" : " [disabled]");
2898
        if (owner) {
2899
            mtree_print_mr_owner(mr);
2900
        }
B
Blue Swirl 已提交
2901
    } else {
2902 2903 2904 2905 2906 2907 2908 2909
        qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
                    " (prio %d, %s%s): %s%s",
                    cur_start, cur_end,
                    mr->priority,
                    mr->nonvolatile ? "nv-" : "",
                    memory_region_type((MemoryRegion *)mr),
                    memory_region_name(mr),
                    mr->enabled ? "" : " [disabled]");
2910
        if (owner) {
2911
            mtree_print_mr_owner(mr);
2912
        }
B
Blue Swirl 已提交
2913
    }
2914
    qemu_printf("\n");
2915 2916 2917

    QTAILQ_INIT(&submr_print_queue);

B
Blue Swirl 已提交
2918
    QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2919 2920
        new_ml = g_new(MemoryRegionList, 1);
        new_ml->mr = submr;
2921
        QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2922 2923 2924
            if (new_ml->mr->addr < ml->mr->addr ||
                (new_ml->mr->addr == ml->mr->addr &&
                 new_ml->mr->priority > ml->mr->priority)) {
2925
                QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
2926 2927 2928 2929 2930
                new_ml = NULL;
                break;
            }
        }
        if (new_ml) {
2931
            QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
2932 2933 2934
        }
    }

2935
    QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2936
        mtree_print_mr(ml->mr, level + 1, cur_start,
2937
                       alias_print_queue, owner);
2938 2939
    }

2940
    QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
2941
        g_free(ml);
B
Blue Swirl 已提交
2942 2943 2944
    }
}

2945 2946 2947
struct FlatViewInfo {
    int counter;
    bool dispatch_tree;
2948
    bool owner;
2949 2950 2951 2952
};

static void mtree_print_flatview(gpointer key, gpointer value,
                                 gpointer user_data)
2953
{
2954 2955 2956
    FlatView *view = key;
    GArray *fv_address_spaces = value;
    struct FlatViewInfo *fvi = user_data;
2957 2958 2959
    FlatRange *range = &view->ranges[0];
    MemoryRegion *mr;
    int n = view->nr;
2960 2961 2962
    int i;
    AddressSpace *as;

2963
    qemu_printf("FlatView #%d\n", fvi->counter);
2964 2965 2966 2967
    ++fvi->counter;

    for (i = 0; i < fv_address_spaces->len; ++i) {
        as = g_array_index(fv_address_spaces, AddressSpace*, i);
2968 2969
        qemu_printf(" AS \"%s\", root: %s",
                    as->name, memory_region_name(as->root));
2970
        if (as->root->alias) {
2971
            qemu_printf(", alias %s", memory_region_name(as->root->alias));
2972
        }
2973
        qemu_printf("\n");
2974 2975
    }

2976
    qemu_printf(" Root memory region: %s\n",
2977
      view->root ? memory_region_name(view->root) : "(none)");
2978 2979

    if (n <= 0) {
2980
        qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
2981 2982 2983 2984 2985
        return;
    }

    while (n--) {
        mr = range->mr;
2986
        if (range->offset_in_region) {
2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
            qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
                        " (prio %d, %s%s): %s @" TARGET_FMT_plx,
                        int128_get64(range->addr.start),
                        int128_get64(range->addr.start)
                        + MR_SIZE(range->addr.size),
                        mr->priority,
                        range->nonvolatile ? "nv-" : "",
                        range->readonly ? "rom" : memory_region_type(mr),
                        memory_region_name(mr),
                        range->offset_in_region);
2997
        } else {
2998 2999 3000 3001 3002 3003 3004 3005 3006
            qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
                        " (prio %d, %s%s): %s",
                        int128_get64(range->addr.start),
                        int128_get64(range->addr.start)
                        + MR_SIZE(range->addr.size),
                        mr->priority,
                        range->nonvolatile ? "nv-" : "",
                        range->readonly ? "rom" : memory_region_type(mr),
                        memory_region_name(mr));
3007
        }
3008
        if (fvi->owner) {
3009
            mtree_print_mr_owner(mr);
3010
        }
3011
        qemu_printf("\n");
3012 3013 3014
        range++;
    }

3015 3016
#if !defined(CONFIG_USER_ONLY)
    if (fvi->dispatch_tree && view->root) {
3017
        mtree_print_dispatch(view->dispatch, view->root);
3018 3019 3020
    }
#endif

3021
    qemu_printf("\n");
3022 3023 3024 3025 3026 3027 3028 3029 3030
}

static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
                                      gpointer user_data)
{
    FlatView *view = key;
    GArray *fv_address_spaces = value;

    g_array_unref(fv_address_spaces);
3031
    flatview_unref(view);
3032 3033

    return true;
3034 3035
}

3036
void mtree_info(bool flatview, bool dispatch_tree, bool owner)
B
Blue Swirl 已提交
3037 3038 3039
{
    MemoryRegionListHead ml_head;
    MemoryRegionList *ml, *ml2;
3040
    AddressSpace *as;
B
Blue Swirl 已提交
3041

3042
    if (flatview) {
3043 3044 3045
        FlatView *view;
        struct FlatViewInfo fvi = {
            .counter = 0,
3046 3047
            .dispatch_tree = dispatch_tree,
            .owner = owner,
3048 3049 3050 3051 3052
        };
        GArray *fv_address_spaces;
        GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);

        /* Gather all FVs in one table */
3053
        QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3054 3055 3056 3057 3058 3059 3060 3061 3062
            view = address_space_get_flatview(as);

            fv_address_spaces = g_hash_table_lookup(views, view);
            if (!fv_address_spaces) {
                fv_address_spaces = g_array_new(false, false, sizeof(as));
                g_hash_table_insert(views, view, fv_address_spaces);
            }

            g_array_append_val(fv_address_spaces, as);
3063
        }
3064 3065 3066 3067 3068 3069 3070 3071

        /* Print */
        g_hash_table_foreach(views, mtree_print_flatview, &fvi);

        /* Free */
        g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
        g_hash_table_unref(views);

3072 3073 3074
        return;
    }

B
Blue Swirl 已提交
3075 3076
    QTAILQ_INIT(&ml_head);

3077
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3078 3079 3080
        qemu_printf("address-space: %s\n", as->name);
        mtree_print_mr(as->root, 1, 0, &ml_head, owner);
        qemu_printf("\n");
3081 3082
    }

B
Blue Swirl 已提交
3083
    /* print aliased regions */
3084
    QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3085 3086 3087
        qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
        mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
        qemu_printf("\n");
B
Blue Swirl 已提交
3088 3089
    }

3090
    QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
A
Avi Kivity 已提交
3091
        g_free(ml);
B
Blue Swirl 已提交
3092 3093
    }
}
P
Peter Crosthwaite 已提交
3094

3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169
void memory_region_init_ram(MemoryRegion *mr,
                            struct Object *owner,
                            const char *name,
                            uint64_t size,
                            Error **errp)
{
    DeviceState *owner_dev;
    Error *err = NULL;

    memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
    if (err) {
        error_propagate(errp, err);
        return;
    }
    /* This will assert if owner is neither NULL nor a DeviceState.
     * We only want the owner here for the purposes of defining a
     * unique name for migration. TODO: Ideally we should implement
     * a naming scheme for Objects which are not DeviceStates, in
     * which case we can relax this restriction.
     */
    owner_dev = DEVICE(owner);
    vmstate_register_ram(mr, owner_dev);
}

void memory_region_init_rom(MemoryRegion *mr,
                            struct Object *owner,
                            const char *name,
                            uint64_t size,
                            Error **errp)
{
    DeviceState *owner_dev;
    Error *err = NULL;

    memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
    if (err) {
        error_propagate(errp, err);
        return;
    }
    /* This will assert if owner is neither NULL nor a DeviceState.
     * We only want the owner here for the purposes of defining a
     * unique name for migration. TODO: Ideally we should implement
     * a naming scheme for Objects which are not DeviceStates, in
     * which case we can relax this restriction.
     */
    owner_dev = DEVICE(owner);
    vmstate_register_ram(mr, owner_dev);
}

void memory_region_init_rom_device(MemoryRegion *mr,
                                   struct Object *owner,
                                   const MemoryRegionOps *ops,
                                   void *opaque,
                                   const char *name,
                                   uint64_t size,
                                   Error **errp)
{
    DeviceState *owner_dev;
    Error *err = NULL;

    memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
                                            name, size, &err);
    if (err) {
        error_propagate(errp, err);
        return;
    }
    /* This will assert if owner is neither NULL nor a DeviceState.
     * We only want the owner here for the purposes of defining a
     * unique name for migration. TODO: Ideally we should implement
     * a naming scheme for Objects which are not DeviceStates, in
     * which case we can relax this restriction.
     */
    owner_dev = DEVICE(owner);
    vmstate_register_ram(mr, owner_dev);
}

P
Peter Crosthwaite 已提交
3170 3171 3172 3173 3174 3175 3176 3177
static const TypeInfo memory_region_info = {
    .parent             = TYPE_OBJECT,
    .name               = TYPE_MEMORY_REGION,
    .instance_size      = sizeof(MemoryRegion),
    .instance_init      = memory_region_initfn,
    .instance_finalize  = memory_region_finalize,
};

3178 3179 3180
static const TypeInfo iommu_memory_region_info = {
    .parent             = TYPE_MEMORY_REGION,
    .name               = TYPE_IOMMU_MEMORY_REGION,
3181
    .class_size         = sizeof(IOMMUMemoryRegionClass),
3182 3183
    .instance_size      = sizeof(IOMMUMemoryRegion),
    .instance_init      = iommu_memory_region_initfn,
3184
    .abstract           = true,
3185 3186
};

P
Peter Crosthwaite 已提交
3187 3188 3189
static void memory_register_types(void)
{
    type_register_static(&memory_region_info);
3190
    type_register_static(&iommu_memory_region_info);
P
Peter Crosthwaite 已提交
3191 3192 3193
}

type_init(memory_register_types)