memory.c 57.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Physical memory management
 *
 * Copyright 2011 Red Hat, Inc. and/or its affiliates
 *
 * Authors:
 *  Avi Kivity <avi@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
14 15
 */

16 17 18
#include "exec/memory.h"
#include "exec/address-spaces.h"
#include "exec/ioport.h"
19
#include "qemu/bitops.h"
20
#include "qom/object.h"
21
#include "trace.h"
22 23
#include <assert.h>

24
#include "exec/memory-internal.h"
25
#include "exec/ram_addr.h"
26

27 28
//#define DEBUG_UNASSIGNED

29 30
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
31 32
static bool global_dirty_log = false;

33 34 35 36 37 38 39 40 41
/* flat_view_mutex is taken around reading as->current_map; the critical
 * section is extremely short, so I'm using a single mutex for every AS.
 * We could also RCU for the read-side.
 *
 * The BQL is taken around transaction commits, hence both locks are taken
 * while writing to as->current_map (with the BQL taken outside).
 */
static QemuMutex flat_view_mutex;

42 43
static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
    = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity's avatar
Avi Kivity committed
44

45 46 47
static QTAILQ_HEAD(, AddressSpace) address_spaces
    = QTAILQ_HEAD_INITIALIZER(address_spaces);

48 49 50 51 52
static void memory_init(void)
{
    qemu_mutex_init(&flat_view_mutex);
}

53 54
typedef struct AddrRange AddrRange;

55 56 57 58 59
/*
 * Note using signed integers limits us to physical addresses at most
 * 63 bits wide.  They are needed for negative offsetting in aliases
 * (large MemoryRegion::alias_offset).
 */
60
struct AddrRange {
61 62
    Int128 start;
    Int128 size;
63 64
};

65
static AddrRange addrrange_make(Int128 start, Int128 size)
66 67 68 69 70 71
{
    return (AddrRange) { start, size };
}

static bool addrrange_equal(AddrRange r1, AddrRange r2)
{
72
    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
73 74
}

75
static Int128 addrrange_end(AddrRange r)
76
{
77
    return int128_add(r.start, r.size);
78 79
}

80
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
81
{
82
    int128_addto(&range.start, delta);
83 84 85
    return range;
}

86 87 88 89 90 91
static bool addrrange_contains(AddrRange range, Int128 addr)
{
    return int128_ge(addr, range.start)
        && int128_lt(addr, addrrange_end(range));
}

92 93
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
{
94 95
    return addrrange_contains(r1, r2.start)
        || addrrange_contains(r2, r1.start);
96 97 98 99
}

static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
{
100 101 102
    Int128 start = int128_max(r1.start, r2.start);
    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
    return addrrange_make(start, int128_sub(end, start));
103 104
}

105 106
enum ListenerDirection { Forward, Reverse };

107 108 109 110 111 112 113 114
static bool memory_listener_match(MemoryListener *listener,
                                  MemoryRegionSection *section)
{
    return !listener->address_space_filter
        || listener->address_space_filter == section->address_space;
}

#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...)    \
115 116 117 118 119 120
    do {                                                                \
        MemoryListener *_listener;                                      \
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
121 122 123
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
124 125 126 127 128
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
                                   memory_listeners, link) {            \
129 130 131
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
132 133 134 135 136 137 138
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

139 140 141 142 143 144 145
#define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
    do {                                                                \
        MemoryListener *_listener;                                      \
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
146 147
                if (_listener->_callback                                \
                    && memory_listener_match(_listener, _section)) {    \
148 149 150 151 152 153 154
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
                                   memory_listeners, link) {            \
155 156
                if (_listener->_callback                                \
                    && memory_listener_match(_listener, _section)) {    \
157 158 159 160 161 162 163 164 165
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

166
/* No need to ref/unref .mr, the FlatRange keeps it alive.  */
167
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback)            \
168
    MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) {       \
169
        .mr = (fr)->mr,                                                 \
170
        .address_space = (as),                                          \
171
        .offset_within_region = (fr)->offset_in_region,                 \
172
        .size = (fr)->addr.size,                                        \
173
        .offset_within_address_space = int128_get64((fr)->addr.start),  \
174
        .readonly = (fr)->readonly,                                     \
175
              }))
176

177 178 179 180 181
struct CoalescedMemoryRange {
    AddrRange addr;
    QTAILQ_ENTRY(CoalescedMemoryRange) link;
};

182 183 184 185
struct MemoryRegionIoeventfd {
    AddrRange addr;
    bool match_data;
    uint64_t data;
186
    EventNotifier *e;
187 188 189 190 191
};

static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
                                           MemoryRegionIoeventfd b)
{
192
    if (int128_lt(a.addr.start, b.addr.start)) {
193
        return true;
194
    } else if (int128_gt(a.addr.start, b.addr.start)) {
195
        return false;
196
    } else if (int128_lt(a.addr.size, b.addr.size)) {
197
        return true;
198
    } else if (int128_gt(a.addr.size, b.addr.size)) {
199 200 201 202 203 204 205 206 207 208 209 210
        return false;
    } else if (a.match_data < b.match_data) {
        return true;
    } else  if (a.match_data > b.match_data) {
        return false;
    } else if (a.match_data) {
        if (a.data < b.data) {
            return true;
        } else if (a.data > b.data) {
            return false;
        }
    }
211
    if (a.e < b.e) {
212
        return true;
213
    } else if (a.e > b.e) {
214 215 216 217 218 219 220 221 222 223 224 225
        return false;
    }
    return false;
}

static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
                                          MemoryRegionIoeventfd b)
{
    return !memory_region_ioeventfd_before(a, b)
        && !memory_region_ioeventfd_before(b, a);
}

226 227 228 229 230 231
typedef struct FlatRange FlatRange;
typedef struct FlatView FlatView;

/* Range of memory in the global map.  Addresses are absolute. */
struct FlatRange {
    MemoryRegion *mr;
232
    hwaddr offset_in_region;
233
    AddrRange addr;
234
    uint8_t dirty_log_mask;
235
    bool romd_mode;
236
    bool readonly;
237 238 239 240 241 242
};

/* Flattened global view of current active memory hierarchy.  Kept in sorted
 * order.
 */
struct FlatView {
243
    unsigned ref;
244 245 246 247 248
    FlatRange *ranges;
    unsigned nr;
    unsigned nr_allocated;
};

249 250
typedef struct AddressSpaceOps AddressSpaceOps;

251 252 253 254 255 256 257
#define FOR_EACH_FLAT_RANGE(var, view)          \
    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)

static bool flatrange_equal(FlatRange *a, FlatRange *b)
{
    return a->mr == b->mr
        && addrrange_equal(a->addr, b->addr)
258
        && a->offset_in_region == b->offset_in_region
259
        && a->romd_mode == b->romd_mode
260
        && a->readonly == b->readonly;
261 262 263 264
}

static void flatview_init(FlatView *view)
{
265
    view->ref = 1;
266 267 268 269 270 271 272 273 274 275 276 277
    view->ranges = NULL;
    view->nr = 0;
    view->nr_allocated = 0;
}

/* Insert a range into a given position.  Caller is responsible for maintaining
 * sorting order.
 */
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
{
    if (view->nr == view->nr_allocated) {
        view->nr_allocated = MAX(2 * view->nr, 10);
278
        view->ranges = g_realloc(view->ranges,
279 280 281 282 283
                                    view->nr_allocated * sizeof(*view->ranges));
    }
    memmove(view->ranges + pos + 1, view->ranges + pos,
            (view->nr - pos) * sizeof(FlatRange));
    view->ranges[pos] = *range;
284
    memory_region_ref(range->mr);
285 286 287 288 289
    ++view->nr;
}

static void flatview_destroy(FlatView *view)
{
290 291 292 293 294
    int i;

    for (i = 0; i < view->nr; i++) {
        memory_region_unref(view->ranges[i].mr);
    }
295
    g_free(view->ranges);
296
    g_free(view);
297 298
}

299 300 301 302 303 304 305 306 307 308 309 310
static void flatview_ref(FlatView *view)
{
    atomic_inc(&view->ref);
}

static void flatview_unref(FlatView *view)
{
    if (atomic_fetch_dec(&view->ref) == 1) {
        flatview_destroy(view);
    }
}

311 312
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
313
    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
314
        && r1->mr == r2->mr
315 316 317
        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
                                r1->addr.size),
                     int128_make64(r2->offset_in_region))
318
        && r1->dirty_log_mask == r2->dirty_log_mask
319
        && r1->romd_mode == r2->romd_mode
320
        && r1->readonly == r2->readonly;
321 322
}

323
/* Attempt to simplify a view by merging adjacent ranges */
324 325 326 327 328 329 330 331 332
static void flatview_simplify(FlatView *view)
{
    unsigned i, j;

    i = 0;
    while (i < view->nr) {
        j = i + 1;
        while (j < view->nr
               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
333
            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
334 335 336 337 338 339 340 341 342
            ++j;
        }
        ++i;
        memmove(&view->ranges[i], &view->ranges[j],
                (view->nr - j) * sizeof(view->ranges[j]));
        view->nr -= j - i;
    }
}

343 344 345 346 347 348 349 350 351
static bool memory_region_big_endian(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
static bool memory_region_wrong_endianness(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
{
    if (memory_region_wrong_endianness(mr)) {
        switch (size) {
        case 1:
            break;
        case 2:
            *data = bswap16(*data);
            break;
        case 4:
            *data = bswap32(*data);
            break;
        case 8:
            *data = bswap64(*data);
            break;
        default:
            abort();
        }
    }
}

382
static void memory_region_oldmmio_read_accessor(MemoryRegion *mr,
383 384 385 386 387 388 389 390 391
                                                hwaddr addr,
                                                uint64_t *value,
                                                unsigned size,
                                                unsigned shift,
                                                uint64_t mask)
{
    uint64_t tmp;

    tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
392
    trace_memory_region_ops_read(mr, addr, tmp, size);
393 394 395
    *value |= (tmp & mask) << shift;
}

396
static void memory_region_read_accessor(MemoryRegion *mr,
397
                                        hwaddr addr,
398 399 400 401 402 403 404
                                        uint64_t *value,
                                        unsigned size,
                                        unsigned shift,
                                        uint64_t mask)
{
    uint64_t tmp;

405 406 407
    if (mr->flush_coalesced_mmio) {
        qemu_flush_coalesced_mmio_buffer();
    }
408
    tmp = mr->ops->read(mr->opaque, addr, size);
409
    trace_memory_region_ops_read(mr, addr, tmp, size);
410 411 412
    *value |= (tmp & mask) << shift;
}

413
static void memory_region_oldmmio_write_accessor(MemoryRegion *mr,
414 415 416 417 418 419 420 421 422
                                                 hwaddr addr,
                                                 uint64_t *value,
                                                 unsigned size,
                                                 unsigned shift,
                                                 uint64_t mask)
{
    uint64_t tmp;

    tmp = (*value >> shift) & mask;
423
    trace_memory_region_ops_write(mr, addr, tmp, size);
424 425 426
    mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
}

427
static void memory_region_write_accessor(MemoryRegion *mr,
428
                                         hwaddr addr,
429 430 431 432 433 434 435
                                         uint64_t *value,
                                         unsigned size,
                                         unsigned shift,
                                         uint64_t mask)
{
    uint64_t tmp;

436 437 438
    if (mr->flush_coalesced_mmio) {
        qemu_flush_coalesced_mmio_buffer();
    }
439
    tmp = (*value >> shift) & mask;
440
    trace_memory_region_ops_write(mr, addr, tmp, size);
441 442 443
    mr->ops->write(mr->opaque, addr, tmp, size);
}

444
static void access_with_adjusted_size(hwaddr addr,
445 446 447 448
                                      uint64_t *value,
                                      unsigned size,
                                      unsigned access_size_min,
                                      unsigned access_size_max,
449
                                      void (*access)(MemoryRegion *mr,
450
                                                     hwaddr addr,
451 452 453 454
                                                     uint64_t *value,
                                                     unsigned size,
                                                     unsigned shift,
                                                     uint64_t mask),
455
                                      MemoryRegion *mr)
456 457 458 459 460 461 462 463 464 465 466
{
    uint64_t access_mask;
    unsigned access_size;
    unsigned i;

    if (!access_size_min) {
        access_size_min = 1;
    }
    if (!access_size_max) {
        access_size_max = 4;
    }
467 468

    /* FIXME: support unaligned access? */
469 470
    access_size = MAX(MIN(size, access_size_max), access_size_min);
    access_mask = -1ULL >> (64 - access_size * 8);
471 472 473 474 475 476 477 478 479
    if (memory_region_big_endian(mr)) {
        for (i = 0; i < size; i += access_size) {
            access(mr, addr + i, value, access_size,
                   (size - access_size - i) * 8, access_mask);
        }
    } else {
        for (i = 0; i < size; i += access_size) {
            access(mr, addr + i, value, access_size, i * 8, access_mask);
        }
480 481 482
    }
}

483 484
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
{
485 486
    AddressSpace *as;

487 488 489
    while (mr->parent) {
        mr = mr->parent;
    }
490 491 492 493
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        if (mr == as->root) {
            return as;
        }
494 495 496 497
    }
    abort();
}

498 499 500 501 502
/* Render a memory region into the global view.  Ranges in @view obscure
 * ranges in @mr.
 */
static void render_memory_region(FlatView *view,
                                 MemoryRegion *mr,
503
                                 Int128 base,
504 505
                                 AddrRange clip,
                                 bool readonly)
506 507 508
{
    MemoryRegion *subregion;
    unsigned i;
509
    hwaddr offset_in_region;
510 511
    Int128 remain;
    Int128 now;
512 513 514
    FlatRange fr;
    AddrRange tmp;

515 516 517 518
    if (!mr->enabled) {
        return;
    }

519
    int128_addto(&base, int128_make64(mr->addr));
520
    readonly |= mr->readonly;
521 522 523 524 525 526 527 528 529 530

    tmp = addrrange_make(base, mr->size);

    if (!addrrange_intersects(tmp, clip)) {
        return;
    }

    clip = addrrange_intersection(tmp, clip);

    if (mr->alias) {
531 532
        int128_subfrom(&base, int128_make64(mr->alias->addr));
        int128_subfrom(&base, int128_make64(mr->alias_offset));
533
        render_memory_region(view, mr->alias, base, clip, readonly);
534 535 536 537 538
        return;
    }

    /* Render subregions in priority order. */
    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
539
        render_memory_region(view, subregion, base, clip, readonly);
540 541
    }

542
    if (!mr->terminates) {
543 544 545
        return;
    }

546
    offset_in_region = int128_get64(int128_sub(clip.start, base));
547 548 549
    base = clip.start;
    remain = clip.size;

550 551 552 553 554
    fr.mr = mr;
    fr.dirty_log_mask = mr->dirty_log_mask;
    fr.romd_mode = mr->romd_mode;
    fr.readonly = readonly;

555
    /* Render the region itself into any gaps left by the current view. */
556 557
    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
558 559
            continue;
        }
560 561 562
        if (int128_lt(base, view->ranges[i].addr.start)) {
            now = int128_min(remain,
                             int128_sub(view->ranges[i].addr.start, base));
563 564 565 566
            fr.offset_in_region = offset_in_region;
            fr.addr = addrrange_make(base, now);
            flatview_insert(view, i, &fr);
            ++i;
567 568 569
            int128_addto(&base, now);
            offset_in_region += int128_get64(now);
            int128_subfrom(&remain, now);
570
        }
571 572 573 574 575 576
        now = int128_sub(int128_min(int128_add(base, remain),
                                    addrrange_end(view->ranges[i].addr)),
                         base);
        int128_addto(&base, now);
        offset_in_region += int128_get64(now);
        int128_subfrom(&remain, now);
577
    }
578
    if (int128_nz(remain)) {
579 580 581 582 583 584 585
        fr.offset_in_region = offset_in_region;
        fr.addr = addrrange_make(base, remain);
        flatview_insert(view, i, &fr);
    }
}

/* Render a memory topology into a list of disjoint absolute ranges. */
586
static FlatView *generate_memory_topology(MemoryRegion *mr)
587
{
588
    FlatView *view;
589

590 591
    view = g_new(FlatView, 1);
    flatview_init(view);
592

593
    if (mr) {
594
        render_memory_region(view, mr, int128_zero(),
595 596
                             addrrange_make(int128_zero(), int128_2_64()), false);
    }
597
    flatview_simplify(view);
598 599 600 601

    return view;
}

602 603 604 605 606 607 608
static void address_space_add_del_ioeventfds(AddressSpace *as,
                                             MemoryRegionIoeventfd *fds_new,
                                             unsigned fds_new_nb,
                                             MemoryRegionIoeventfd *fds_old,
                                             unsigned fds_old_nb)
{
    unsigned iold, inew;
609 610
    MemoryRegionIoeventfd *fd;
    MemoryRegionSection section;
611 612 613 614 615 616 617 618 619 620 621

    /* Generate a symmetric difference of the old and new fd sets, adding
     * and deleting as necessary.
     */

    iold = inew = 0;
    while (iold < fds_old_nb || inew < fds_new_nb) {
        if (iold < fds_old_nb
            && (inew == fds_new_nb
                || memory_region_ioeventfd_before(fds_old[iold],
                                                  fds_new[inew]))) {
622 623
            fd = &fds_old[iold];
            section = (MemoryRegionSection) {
624
                .address_space = as,
625
                .offset_within_address_space = int128_get64(fd->addr.start),
626
                .size = fd->addr.size,
627 628
            };
            MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
629
                                 fd->match_data, fd->data, fd->e);
630 631 632 633 634
            ++iold;
        } else if (inew < fds_new_nb
                   && (iold == fds_old_nb
                       || memory_region_ioeventfd_before(fds_new[inew],
                                                         fds_old[iold]))) {
635 636
            fd = &fds_new[inew];
            section = (MemoryRegionSection) {
637
                .address_space = as,
638
                .offset_within_address_space = int128_get64(fd->addr.start),
639
                .size = fd->addr.size,
640 641
            };
            MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
642
                                 fd->match_data, fd->data, fd->e);
643 644 645 646 647 648 649 650
            ++inew;
        } else {
            ++iold;
            ++inew;
        }
    }
}

651 652 653 654 655 656 657 658 659 660 661
static FlatView *address_space_get_flatview(AddressSpace *as)
{
    FlatView *view;

    qemu_mutex_lock(&flat_view_mutex);
    view = as->current_map;
    flatview_ref(view);
    qemu_mutex_unlock(&flat_view_mutex);
    return view;
}

662 663
static void address_space_update_ioeventfds(AddressSpace *as)
{
664
    FlatView *view;
665 666 667 668 669 670
    FlatRange *fr;
    unsigned ioeventfd_nb = 0;
    MemoryRegionIoeventfd *ioeventfds = NULL;
    AddrRange tmp;
    unsigned i;

671
    view = address_space_get_flatview(as);
672
    FOR_EACH_FLAT_RANGE(fr, view) {
673 674
        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
675 676
                                  int128_sub(fr->addr.start,
                                             int128_make64(fr->offset_in_region)));
677 678
            if (addrrange_intersects(fr->addr, tmp)) {
                ++ioeventfd_nb;
679
                ioeventfds = g_realloc(ioeventfds,
680 681 682 683 684 685 686 687 688 689
                                          ioeventfd_nb * sizeof(*ioeventfds));
                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
                ioeventfds[ioeventfd_nb-1].addr = tmp;
            }
        }
    }

    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
                                     as->ioeventfds, as->ioeventfd_nb);

690
    g_free(as->ioeventfds);
691 692
    as->ioeventfds = ioeventfds;
    as->ioeventfd_nb = ioeventfd_nb;
693
    flatview_unref(view);
694 695
}

696
static void address_space_update_topology_pass(AddressSpace *as,
697 698
                                               const FlatView *old_view,
                                               const FlatView *new_view,
699
                                               bool adding)
700 701 702 703 704 705 706 707
{
    unsigned iold, inew;
    FlatRange *frold, *frnew;

    /* Generate a symmetric difference of the old and new memory maps.
     * Kill ranges in the old map, and instantiate ranges in the new map.
     */
    iold = inew = 0;
708 709 710
    while (iold < old_view->nr || inew < new_view->nr) {
        if (iold < old_view->nr) {
            frold = &old_view->ranges[iold];
711 712 713
        } else {
            frold = NULL;
        }
714 715
        if (inew < new_view->nr) {
            frnew = &new_view->ranges[inew];
716 717 718 719 720 721
        } else {
            frnew = NULL;
        }

        if (frold
            && (!frnew
722 723
                || int128_lt(frold->addr.start, frnew->addr.start)
                || (int128_eq(frold->addr.start, frnew->addr.start)
724
                    && !flatrange_equal(frold, frnew)))) {
725
            /* In old but not in new, or in both but attributes changed. */
726

727
            if (!adding) {
728
                MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
729 730
            }

731 732
            ++iold;
        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
733
            /* In both and unchanged (except logging may have changed) */
734

735
            if (adding) {
736
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
737
                if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
738
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop);
739
                } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
740
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start);
741
                }
742 743
            }

744 745 746 747 748
            ++iold;
            ++inew;
        } else {
            /* In new */

749
            if (adding) {
750
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
751 752
            }

753 754 755
            ++inew;
        }
    }
756 757 758 759 760
}


static void address_space_update_topology(AddressSpace *as)
{
761
    FlatView *old_view = address_space_get_flatview(as);
762
    FlatView *new_view = generate_memory_topology(as->root);
763 764 765 766

    address_space_update_topology_pass(as, old_view, new_view, false);
    address_space_update_topology_pass(as, old_view, new_view, true);

767 768
    qemu_mutex_lock(&flat_view_mutex);
    flatview_unref(as->current_map);
769
    as->current_map = new_view;
770 771 772 773 774 775 776 777 778 779
    qemu_mutex_unlock(&flat_view_mutex);

    /* Note that all the old MemoryRegions are still alive up to this
     * point.  This relieves most MemoryListeners from the need to
     * ref/unref the MemoryRegions they get---unless they use them
     * outside the iothread mutex, in which case precise reference
     * counting is necessary.
     */
    flatview_unref(old_view);

780
    address_space_update_ioeventfds(as);
781 782
}

Avi Kivity's avatar
Avi Kivity committed
783 784
void memory_region_transaction_begin(void)
{
785
    qemu_flush_coalesced_mmio_buffer();
Avi Kivity's avatar
Avi Kivity committed
786 787 788 789 790
    ++memory_region_transaction_depth;
}

void memory_region_transaction_commit(void)
{
791 792
    AddressSpace *as;

Avi Kivity's avatar
Avi Kivity committed
793 794
    assert(memory_region_transaction_depth);
    --memory_region_transaction_depth;
795 796
    if (!memory_region_transaction_depth && memory_region_update_pending) {
        memory_region_update_pending = false;
797 798
        MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);

799 800
        QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
            address_space_update_topology(as);
801 802 803
        }

        MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
804
    }
Avi Kivity's avatar
Avi Kivity committed
805 806
}

807 808 809 810 811 812 813 814 815
static void memory_region_destructor_none(MemoryRegion *mr)
{
}

static void memory_region_destructor_ram(MemoryRegion *mr)
{
    qemu_ram_free(mr->ram_addr);
}

816 817 818 819 820
static void memory_region_destructor_alias(MemoryRegion *mr)
{
    memory_region_unref(mr->alias);
}

821 822 823 824 825
static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
{
    qemu_ram_free_from_ptr(mr->ram_addr);
}

826 827 828 829 830
static void memory_region_destructor_rom_device(MemoryRegion *mr)
{
    qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
}

831
void memory_region_init(MemoryRegion *mr,
832
                        Object *owner,
833 834 835
                        const char *name,
                        uint64_t size)
{
836 837
    mr->ops = &unassigned_mem_ops;
    mr->opaque = NULL;
838
    mr->owner = owner;
Avi Kivity's avatar
Avi Kivity committed
839
    mr->iommu_ops = NULL;
840
    mr->parent = NULL;
841 842 843 844
    mr->size = int128_make64(size);
    if (size == UINT64_MAX) {
        mr->size = int128_2_64();
    }
845
    mr->addr = 0;
Avi Kivity's avatar
Avi Kivity committed
846
    mr->subpage = false;
847
    mr->enabled = true;
848
    mr->terminates = false;
849
    mr->ram = false;
850
    mr->romd_mode = true;
851
    mr->readonly = false;
Avi Kivity's avatar
Avi Kivity committed
852
    mr->rom_device = false;
853
    mr->destructor = memory_region_destructor_none;
854 855 856 857 858 859
    mr->priority = 0;
    mr->may_overlap = false;
    mr->alias = NULL;
    QTAILQ_INIT(&mr->subregions);
    memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
    QTAILQ_INIT(&mr->coalesced);
860
    mr->name = g_strdup(name);
861
    mr->dirty_log_mask = 0;
862 863
    mr->ioeventfd_nb = 0;
    mr->ioeventfds = NULL;
864
    mr->flush_coalesced_mmio = false;
865 866
}

867 868 869 870 871 872
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
                                    unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
873 874
    if (current_cpu != NULL) {
        cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
875
    }
876
    return 0;
877 878 879 880 881 882 883 884
}

static void unassigned_mem_write(void *opaque, hwaddr addr,
                                 uint64_t val, unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
#endif
885 886
    if (current_cpu != NULL) {
        cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
887
    }
888 889
}

890 891 892 893 894 895 896 897 898 899 900
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
                                   unsigned size, bool is_write)
{
    return false;
}

const MemoryRegionOps unassigned_mem_ops = {
    .valid.accepts = unassigned_mem_accepts,
    .endianness = DEVICE_NATIVE_ENDIAN,
};

901 902 903 904
bool memory_region_access_valid(MemoryRegion *mr,
                                hwaddr addr,
                                unsigned size,
                                bool is_write)
905
{
906 907
    int access_size_min, access_size_max;
    int access_size, i;
908

909 910 911 912
    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
        return false;
    }

913
    if (!mr->ops->valid.accepts) {
914 915 916
        return true;
    }

917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
    access_size_min = mr->ops->valid.min_access_size;
    if (!mr->ops->valid.min_access_size) {
        access_size_min = 1;
    }

    access_size_max = mr->ops->valid.max_access_size;
    if (!mr->ops->valid.max_access_size) {
        access_size_max = 4;
    }

    access_size = MAX(MIN(size, access_size_max), access_size_min);
    for (i = 0; i < size; i += access_size) {
        if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
                                    is_write)) {
            return false;
        }
933
    }
934

935 936 937
    return true;
}

938
static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
939
                                             hwaddr addr,
940
                                             unsigned size)
941
{
942
    uint64_t data = 0;
943

944 945 946 947 948 949 950 951
    if (mr->ops->read) {
        access_with_adjusted_size(addr, &data, size,
                                  mr->ops->impl.min_access_size,
                                  mr->ops->impl.max_access_size,
                                  memory_region_read_accessor, mr);
    } else {
        access_with_adjusted_size(addr, &data, size, 1, 4,
                                  memory_region_oldmmio_read_accessor, mr);
952 953
    }

954 955 956
    return data;
}

957 958 959 960
static bool memory_region_dispatch_read(MemoryRegion *mr,
                                        hwaddr addr,
                                        uint64_t *pval,
                                        unsigned size)
961
{
962 963 964 965
    if (!memory_region_access_valid(mr, addr, size, false)) {
        *pval = unassigned_mem_read(mr, addr, size);
        return true;
    }
966

967 968 969
    *pval = memory_region_dispatch_read1(mr, addr, size);
    adjust_endianness(mr, pval, size);
    return false;
970
}
971

972
static bool memory_region_dispatch_write(MemoryRegion *mr,
973
                                         hwaddr addr,
974 975 976
                                         uint64_t data,
                                         unsigned size)
{
977
    if (!memory_region_access_valid(mr, addr, size, true)) {
978
        unassigned_mem_write(mr, addr, data, size);
979
        return true;
980 981
    }

982 983
    adjust_endianness(mr, &data, size);

984 985 986 987 988 989 990 991
    if (mr->ops->write) {
        access_with_adjusted_size(addr, &data, size,
                                  mr->ops->impl.min_access_size,
                                  mr->ops->impl.max_access_size,
                                  memory_region_write_accessor, mr);
    } else {
        access_with_adjusted_size(addr, &data, size, 1, 4,
                                  memory_region_oldmmio_write_accessor, mr);
992
    }
993
    return false;
994 995 996
}

void memory_region_init_io(MemoryRegion *mr,
997
                           Object *owner,
998 999 1000 1001 1002
                           const MemoryRegionOps *ops,
                           void *opaque,
                           const char *name,
                           uint64_t size)
{
1003
    memory_region_init(mr, owner, name, size);
1004 1005
    mr->ops = ops;
    mr->opaque = opaque;
1006
    mr->terminates = true;
1007
    mr->ram_addr = ~(ram_addr_t)0;
1008 1009 1010
}

void memory_region_init_ram(MemoryRegion *mr,
1011
                            Object *owner,
1012 1013 1014
                            const char *name,
                            uint64_t size)
{
1015
    memory_region_init(mr, owner, name, size);
1016
    mr->ram = true;
1017
    mr->terminates = true;
1018
    mr->destructor = memory_region_destructor_ram;
1019
    mr->ram_addr = qemu_ram_alloc(size, mr);
1020 1021 1022
}

void memory_region_init_ram_ptr(MemoryRegion *mr,
1023
                                Object *owner,
1024 1025 1026 1027
                                const char *name,
                                uint64_t size,
                                void *ptr)
{
1028
    memory_region_init(mr, owner, name, size);
1029
    mr->ram = true;
1030
    mr->terminates = true;
1031
    mr->destructor = memory_region_destructor_ram_from_ptr;
1032
    mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr);
1033 1034 1035
}

void memory_region_init_alias(MemoryRegion *mr,
1036
                              Object *owner,
1037 1038
                              const char *name,
                              MemoryRegion *orig,
1039
                              hwaddr offset,
1040 1041
                              uint64_t size)
{
1042
    memory_region_init(mr, owner, name, size);
1043 1044
    memory_region_ref(orig);
    mr->destructor = memory_region_destructor_alias;
1045 1046 1047 1048
    mr->alias = orig;
    mr->alias_offset = offset;
}

1049
void memory_region_init_rom_device(MemoryRegion *mr,
1050
                                   Object *owner,
1051
                                   const MemoryRegionOps *ops,
1052
                                   void *opaque,
1053 1054 1055
                                   const char *name,
                                   uint64_t size)
{
1056
    memory_region_init(mr, owner, name, size);
1057
    mr->ops = ops;
1058
    mr->opaque = opaque;
1059
    mr->terminates = true;
Avi Kivity's avatar
Avi Kivity committed
1060
    mr->rom_device = true;
1061
    mr->destructor = memory_region_destructor_rom_device;
1062
    mr->ram_addr = qemu_ram_alloc(size, mr);
1063 1064
}

Avi Kivity's avatar
Avi Kivity committed
1065
void memory_region_init_iommu(MemoryRegion *mr,
1066
                              Object *owner,
Avi Kivity's avatar
Avi Kivity committed
1067 1068 1069 1070
                              const MemoryRegionIOMMUOps *ops,
                              const char *name,
                              uint64_t size)
{
1071
    memory_region_init(mr, owner, name, size);
Avi Kivity's avatar
Avi Kivity committed
1072 1073
    mr->iommu_ops = ops,
    mr->terminates = true;  /* then re-forwards */
1074
    notifier_list_init(&mr->iommu_notify);
Avi Kivity's avatar
Avi Kivity committed
1075 1076
}

1077
void memory_region_init_reservation(MemoryRegion *mr,
1078
                                    Object *owner,
1079 1080 1081
                                    const char *name,
                                    uint64_t size)
{
1082
    memory_region_init_io(mr, owner, &unassigned_mem_ops, mr, name, size);
1083 1084
}

1085 1086 1087
void memory_region_destroy(MemoryRegion *mr)
{
    assert(QTAILQ_EMPTY(&mr->subregions));
1088
    assert(memory_region_transaction_depth == 0);
1089
    mr->destructor(mr);
1090
    memory_region_clear_coalescing(mr);
1091 1092
    g_free((char *)mr->name);
    g_free(mr->ioeventfds);
1093 1094
}

1095 1096 1097 1098 1099
Object *memory_region_owner(MemoryRegion *mr)
{
    return mr->owner;
}

Paolo Bonzini's avatar
Paolo Bonzini committed
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
void memory_region_ref(MemoryRegion *mr)
{
    if (mr && mr->owner) {
        object_ref(mr->owner);
    }
}

void memory_region_unref(MemoryRegion *mr)
{
    if (mr && mr->owner) {
        object_unref(mr->owner);
    }
}

1114 1115
uint64_t memory_region_size(MemoryRegion *mr)
{
1116 1117 1118 1119
    if (int128_eq(mr->size, int128_2_64())) {
        return UINT64_MAX;
    }
    return int128_get64(mr->size);
1120 1121
}

1122 1123 1124 1125 1126
const char *memory_region_name(MemoryRegion *mr)
{
    return mr->name;
}

1127 1128 1129 1130 1131
bool memory_region_is_ram(MemoryRegion *mr)
{
    return mr->ram;
}

1132 1133 1134 1135 1136
bool memory_region_is_logging(MemoryRegion *mr)
{
    return mr->dirty_log_mask;
}

1137 1138 1139 1140 1141
bool memory_region_is_rom(MemoryRegion *mr)
{
    return mr->ram && mr->readonly;
}

Avi Kivity's avatar
Avi Kivity committed
1142 1143 1144 1145 1146
bool memory_region_is_iommu(MemoryRegion *mr)
{
    return mr->iommu_ops;
}

1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
{
    notifier_list_add(&mr->iommu_notify, n);
}

void memory_region_unregister_iommu_notifier(Notifier *n)
{
    notifier_remove(n);
}

void memory_region_notify_iommu(MemoryRegion *mr,
                                IOMMUTLBEntry entry)
{
    assert(memory_region_is_iommu(mr));
    notifier_list_notify(&mr->iommu_notify, &entry);
}

1164 1165
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
{
1166 1167
    uint8_t mask = 1 << client;

1168
    memory_region_transaction_begin();
1169
    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1170
    memory_region_update_pending |= mr->enabled;
1171
    memory_region_transaction_commit();
1172 1173
}

1174 1175
bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
                             hwaddr size, unsigned client)
1176
{
1177
    assert(mr->terminates);
1178
    return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
1179 1180
}

1181 1182
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
                             hwaddr size)
1183
{
1184
    assert(mr->terminates);
1185
    cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size);
1186 1187
}

1188 1189 1190 1191 1192
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
                                        hwaddr size, unsigned client)
{
    bool ret;
    assert(mr->terminates);
1193
    ret = cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
1194
    if (ret) {
1195
        cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
1196 1197 1198 1199 1200
    }
    return ret;
}


1201 1202
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
{
1203
    AddressSpace *as;
1204 1205
    FlatRange *fr;

1206
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1207
        FlatView *view = address_space_get_flatview(as);
1208
        FOR_EACH_FLAT_RANGE(fr, view) {
1209 1210 1211
            if (fr->mr == mr) {
                MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
            }
1212
        }
1213
        flatview_unref(view);
1214
    }
1215 1216 1217 1218
}

void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
{
1219
    if (mr->readonly != readonly) {
1220
        memory_region_transaction_begin();
1221
        mr->readonly = readonly;
1222
        memory_region_update_pending |= mr->enabled;
1223
        memory_region_transaction_commit();
1224
    }
1225 1226
}

1227
void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
1228
{
1229
    if (mr->romd_mode != romd_mode) {
1230
        memory_region_transaction_begin();
1231
        mr->romd_mode = romd_mode;
1232
        memory_region_update_pending |= mr->enabled;
1233
        memory_region_transaction_commit();
1234 1235 1236
    }
}

1237 1238
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
                               hwaddr size, unsigned client)
1239
{
1240
    assert(mr->terminates);
1241
    cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
1242 1243 1244 1245 1246 1247 1248 1249
}

void *memory_region_get_ram_ptr(MemoryRegion *mr)
{
    if (mr->alias) {
        return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
    }

1250
    assert(mr->terminates);
1251

1252
    return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
1253 1254
}

1255
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
1256
{
1257
    FlatView *view;
1258 1259 1260
    FlatRange *fr;
    CoalescedMemoryRange *cmr;
    AddrRange tmp;
1261
    MemoryRegionSection section;
1262

1263
    view = address_space_get_flatview(as);
1264
    FOR_EACH_FLAT_RANGE(fr, view) {
1265
        if (fr->mr == mr) {
1266
            section = (MemoryRegionSection) {
1267
                .address_space = as,
1268
                .offset_within_address_space = int128_get64(fr->addr.start),
1269
                .size = fr->addr.size,
1270 1271 1272 1273 1274
            };

            MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, &section,
                                 int128_get64(fr->addr.start),
                                 int128_get64(fr->addr.size));
1275 1276
            QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
                tmp = addrrange_shift(cmr->addr,
1277 1278
                                      int128_sub(fr->addr.start,
                                                 int128_make64(fr->offset_in_region)));
1279 1280 1281 1282
                if (!addrrange_intersects(tmp, fr->addr)) {
                    continue;
                }
                tmp = addrrange_intersection(tmp, fr->addr);
1283 1284 1285
                MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, &section,
                                     int128_get64(tmp.start),
                                     int128_get64(tmp.size));
1286 1287 1288
            }
        }
    }
1289
    flatview_unref(view);
1290 1291
}

1292 1293 1294 1295 1296 1297 1298 1299 1300
static void memory_region_update_coalesced_range(MemoryRegion *mr)
{
    AddressSpace *as;

    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        memory_region_update_coalesced_range_as(mr, as);
    }
}

1301 1302 1303
void memory_region_set_coalescing(MemoryRegion *mr)
{
    memory_region_clear_coalescing(mr);
1304
    memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
1305 1306 1307
}

void memory_region_add_coalescing(MemoryRegion *mr,
1308
                                  hwaddr offset,