memory.c 57.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Physical memory management
 *
 * Copyright 2011 Red Hat, Inc. and/or its affiliates
 *
 * Authors:
 *  Avi Kivity <avi@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
14 15
 */

16 17 18
#include "exec/memory.h"
#include "exec/address-spaces.h"
#include "exec/ioport.h"
19
#include "qemu/bitops.h"
20
#include "qom/object.h"
21
#include "trace.h"
22 23
#include <assert.h>

24
#include "exec/memory-internal.h"
25

26 27
//#define DEBUG_UNASSIGNED

28 29
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
30 31
static bool global_dirty_log = false;

32 33 34 35 36 37 38 39 40
/* flat_view_mutex is taken around reading as->current_map; the critical
 * section is extremely short, so I'm using a single mutex for every AS.
 * We could also RCU for the read-side.
 *
 * The BQL is taken around transaction commits, hence both locks are taken
 * while writing to as->current_map (with the BQL taken outside).
 */
static QemuMutex flat_view_mutex;

41 42
static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
    = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity's avatar
Avi Kivity committed
43

44 45 46
static QTAILQ_HEAD(, AddressSpace) address_spaces
    = QTAILQ_HEAD_INITIALIZER(address_spaces);

47 48 49 50 51
static void memory_init(void)
{
    qemu_mutex_init(&flat_view_mutex);
}

52 53
typedef struct AddrRange AddrRange;

54 55 56 57 58
/*
 * Note using signed integers limits us to physical addresses at most
 * 63 bits wide.  They are needed for negative offsetting in aliases
 * (large MemoryRegion::alias_offset).
 */
59
struct AddrRange {
60 61
    Int128 start;
    Int128 size;
62 63
};

64
static AddrRange addrrange_make(Int128 start, Int128 size)
65 66 67 68 69 70
{
    return (AddrRange) { start, size };
}

static bool addrrange_equal(AddrRange r1, AddrRange r2)
{
71
    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
72 73
}

74
static Int128 addrrange_end(AddrRange r)
75
{
76
    return int128_add(r.start, r.size);
77 78
}

79
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
80
{
81
    int128_addto(&range.start, delta);
82 83 84
    return range;
}

85 86 87 88 89 90
static bool addrrange_contains(AddrRange range, Int128 addr)
{
    return int128_ge(addr, range.start)
        && int128_lt(addr, addrrange_end(range));
}

91 92
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
{
93 94
    return addrrange_contains(r1, r2.start)
        || addrrange_contains(r2, r1.start);
95 96 97 98
}

static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
{
99 100 101
    Int128 start = int128_max(r1.start, r2.start);
    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
    return addrrange_make(start, int128_sub(end, start));
102 103
}

104 105
enum ListenerDirection { Forward, Reverse };

106 107 108 109 110 111 112 113
static bool memory_listener_match(MemoryListener *listener,
                                  MemoryRegionSection *section)
{
    return !listener->address_space_filter
        || listener->address_space_filter == section->address_space;
}

#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...)    \
114 115 116 117 118 119
    do {                                                                \
        MemoryListener *_listener;                                      \
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
120 121 122
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
123 124 125 126 127
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
                                   memory_listeners, link) {            \
128 129 130
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
131 132 133 134 135 136 137
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

138 139 140 141 142 143 144
#define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
    do {                                                                \
        MemoryListener *_listener;                                      \
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
145 146
                if (_listener->_callback                                \
                    && memory_listener_match(_listener, _section)) {    \
147 148 149 150 151 152 153
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
                                   memory_listeners, link) {            \
154 155
                if (_listener->_callback                                \
                    && memory_listener_match(_listener, _section)) {    \
156 157 158 159 160 161 162 163 164
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

165
/* No need to ref/unref .mr, the FlatRange keeps it alive.  */
166
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback)            \
167
    MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) {       \
168
        .mr = (fr)->mr,                                                 \
169
        .address_space = (as),                                          \
170
        .offset_within_region = (fr)->offset_in_region,                 \
171
        .size = (fr)->addr.size,                                        \
172
        .offset_within_address_space = int128_get64((fr)->addr.start),  \
173
        .readonly = (fr)->readonly,                                     \
174
              }))
175

176 177 178 179 180
struct CoalescedMemoryRange {
    AddrRange addr;
    QTAILQ_ENTRY(CoalescedMemoryRange) link;
};

181 182 183 184
struct MemoryRegionIoeventfd {
    AddrRange addr;
    bool match_data;
    uint64_t data;
185
    EventNotifier *e;
186 187 188 189 190
};

static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
                                           MemoryRegionIoeventfd b)
{
191
    if (int128_lt(a.addr.start, b.addr.start)) {
192
        return true;
193
    } else if (int128_gt(a.addr.start, b.addr.start)) {
194
        return false;
195
    } else if (int128_lt(a.addr.size, b.addr.size)) {
196
        return true;
197
    } else if (int128_gt(a.addr.size, b.addr.size)) {
198 199 200 201 202 203 204 205 206 207 208 209
        return false;
    } else if (a.match_data < b.match_data) {
        return true;
    } else  if (a.match_data > b.match_data) {
        return false;
    } else if (a.match_data) {
        if (a.data < b.data) {
            return true;
        } else if (a.data > b.data) {
            return false;
        }
    }
210
    if (a.e < b.e) {
211
        return true;
212
    } else if (a.e > b.e) {
213 214 215 216 217 218 219 220 221 222 223 224
        return false;
    }
    return false;
}

static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
                                          MemoryRegionIoeventfd b)
{
    return !memory_region_ioeventfd_before(a, b)
        && !memory_region_ioeventfd_before(b, a);
}

225 226 227 228 229 230
typedef struct FlatRange FlatRange;
typedef struct FlatView FlatView;

/* Range of memory in the global map.  Addresses are absolute. */
struct FlatRange {
    MemoryRegion *mr;
231
    hwaddr offset_in_region;
232
    AddrRange addr;
233
    uint8_t dirty_log_mask;
234
    bool romd_mode;
235
    bool readonly;
236 237 238 239 240 241
};

/* Flattened global view of current active memory hierarchy.  Kept in sorted
 * order.
 */
struct FlatView {
242
    unsigned ref;
243 244 245 246 247
    FlatRange *ranges;
    unsigned nr;
    unsigned nr_allocated;
};

248 249
typedef struct AddressSpaceOps AddressSpaceOps;

250 251 252 253 254 255 256
#define FOR_EACH_FLAT_RANGE(var, view)          \
    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)

static bool flatrange_equal(FlatRange *a, FlatRange *b)
{
    return a->mr == b->mr
        && addrrange_equal(a->addr, b->addr)
257
        && a->offset_in_region == b->offset_in_region
258
        && a->romd_mode == b->romd_mode
259
        && a->readonly == b->readonly;
260 261 262 263
}

static void flatview_init(FlatView *view)
{
264
    view->ref = 1;
265 266 267 268 269 270 271 272 273 274 275 276
    view->ranges = NULL;
    view->nr = 0;
    view->nr_allocated = 0;
}

/* Insert a range into a given position.  Caller is responsible for maintaining
 * sorting order.
 */
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
{
    if (view->nr == view->nr_allocated) {
        view->nr_allocated = MAX(2 * view->nr, 10);
277
        view->ranges = g_realloc(view->ranges,
278 279 280 281 282
                                    view->nr_allocated * sizeof(*view->ranges));
    }
    memmove(view->ranges + pos + 1, view->ranges + pos,
            (view->nr - pos) * sizeof(FlatRange));
    view->ranges[pos] = *range;
283
    memory_region_ref(range->mr);
284 285 286 287 288
    ++view->nr;
}

static void flatview_destroy(FlatView *view)
{
289 290 291 292 293
    int i;

    for (i = 0; i < view->nr; i++) {
        memory_region_unref(view->ranges[i].mr);
    }
294
    g_free(view->ranges);
295
    g_free(view);
296 297
}

298 299 300 301 302 303 304 305 306 307 308 309
static void flatview_ref(FlatView *view)
{
    atomic_inc(&view->ref);
}

static void flatview_unref(FlatView *view)
{
    if (atomic_fetch_dec(&view->ref) == 1) {
        flatview_destroy(view);
    }
}

310 311
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
312
    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
313
        && r1->mr == r2->mr
314 315 316
        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
                                r1->addr.size),
                     int128_make64(r2->offset_in_region))
317
        && r1->dirty_log_mask == r2->dirty_log_mask
318
        && r1->romd_mode == r2->romd_mode
319
        && r1->readonly == r2->readonly;
320 321
}

322
/* Attempt to simplify a view by merging adjacent ranges */
323 324 325 326 327 328 329 330 331
static void flatview_simplify(FlatView *view)
{
    unsigned i, j;

    i = 0;
    while (i < view->nr) {
        j = i + 1;
        while (j < view->nr
               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
332
            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
333 334 335 336 337 338 339 340 341
            ++j;
        }
        ++i;
        memmove(&view->ranges[i], &view->ranges[j],
                (view->nr - j) * sizeof(view->ranges[j]));
        view->nr -= j - i;
    }
}

342 343 344 345 346 347 348 349 350
static bool memory_region_big_endian(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
static bool memory_region_wrong_endianness(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
{
    if (memory_region_wrong_endianness(mr)) {
        switch (size) {
        case 1:
            break;
        case 2:
            *data = bswap16(*data);
            break;
        case 4:
            *data = bswap32(*data);
            break;
        case 8:
            *data = bswap64(*data);
            break;
        default:
            abort();
        }
    }
}

381
static void memory_region_oldmmio_read_accessor(MemoryRegion *mr,
382 383 384 385 386 387 388 389 390
                                                hwaddr addr,
                                                uint64_t *value,
                                                unsigned size,
                                                unsigned shift,
                                                uint64_t mask)
{
    uint64_t tmp;

    tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
391
    trace_memory_region_ops_read(mr, addr, tmp, size);
392 393 394
    *value |= (tmp & mask) << shift;
}

395
static void memory_region_read_accessor(MemoryRegion *mr,
396
                                        hwaddr addr,
397 398 399 400 401 402 403
                                        uint64_t *value,
                                        unsigned size,
                                        unsigned shift,
                                        uint64_t mask)
{
    uint64_t tmp;

404 405 406
    if (mr->flush_coalesced_mmio) {
        qemu_flush_coalesced_mmio_buffer();
    }
407
    tmp = mr->ops->read(mr->opaque, addr, size);
408
    trace_memory_region_ops_read(mr, addr, tmp, size);
409 410 411
    *value |= (tmp & mask) << shift;
}

412
static void memory_region_oldmmio_write_accessor(MemoryRegion *mr,
413 414 415 416 417 418 419 420 421
                                                 hwaddr addr,
                                                 uint64_t *value,
                                                 unsigned size,
                                                 unsigned shift,
                                                 uint64_t mask)
{
    uint64_t tmp;

    tmp = (*value >> shift) & mask;
422
    trace_memory_region_ops_write(mr, addr, tmp, size);
423 424 425
    mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
}

426
static void memory_region_write_accessor(MemoryRegion *mr,
427
                                         hwaddr addr,
428 429 430 431 432 433 434
                                         uint64_t *value,
                                         unsigned size,
                                         unsigned shift,
                                         uint64_t mask)
{
    uint64_t tmp;

435 436 437
    if (mr->flush_coalesced_mmio) {
        qemu_flush_coalesced_mmio_buffer();
    }
438
    tmp = (*value >> shift) & mask;
439
    trace_memory_region_ops_write(mr, addr, tmp, size);
440 441 442
    mr->ops->write(mr->opaque, addr, tmp, size);
}

443
static void access_with_adjusted_size(hwaddr addr,
444 445 446 447
                                      uint64_t *value,
                                      unsigned size,
                                      unsigned access_size_min,
                                      unsigned access_size_max,
448
                                      void (*access)(MemoryRegion *mr,
449
                                                     hwaddr addr,
450 451 452 453
                                                     uint64_t *value,
                                                     unsigned size,
                                                     unsigned shift,
                                                     uint64_t mask),
454
                                      MemoryRegion *mr)
455 456 457 458 459 460 461 462 463 464 465
{
    uint64_t access_mask;
    unsigned access_size;
    unsigned i;

    if (!access_size_min) {
        access_size_min = 1;
    }
    if (!access_size_max) {
        access_size_max = 4;
    }
466 467

    /* FIXME: support unaligned access? */
468 469
    access_size = MAX(MIN(size, access_size_max), access_size_min);
    access_mask = -1ULL >> (64 - access_size * 8);
470 471 472 473 474 475 476 477 478
    if (memory_region_big_endian(mr)) {
        for (i = 0; i < size; i += access_size) {
            access(mr, addr + i, value, access_size,
                   (size - access_size - i) * 8, access_mask);
        }
    } else {
        for (i = 0; i < size; i += access_size) {
            access(mr, addr + i, value, access_size, i * 8, access_mask);
        }
479 480 481
    }
}

482 483
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
{
484 485
    AddressSpace *as;

486 487 488
    while (mr->parent) {
        mr = mr->parent;
    }
489 490 491 492
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        if (mr == as->root) {
            return as;
        }
493 494 495 496
    }
    abort();
}

497 498 499 500 501
/* Render a memory region into the global view.  Ranges in @view obscure
 * ranges in @mr.
 */
static void render_memory_region(FlatView *view,
                                 MemoryRegion *mr,
502
                                 Int128 base,
503 504
                                 AddrRange clip,
                                 bool readonly)
505 506 507
{
    MemoryRegion *subregion;
    unsigned i;
508
    hwaddr offset_in_region;
509 510
    Int128 remain;
    Int128 now;
511 512 513
    FlatRange fr;
    AddrRange tmp;

514 515 516 517
    if (!mr->enabled) {
        return;
    }

518
    int128_addto(&base, int128_make64(mr->addr));
519
    readonly |= mr->readonly;
520 521 522 523 524 525 526 527 528 529

    tmp = addrrange_make(base, mr->size);

    if (!addrrange_intersects(tmp, clip)) {
        return;
    }

    clip = addrrange_intersection(tmp, clip);

    if (mr->alias) {
530 531
        int128_subfrom(&base, int128_make64(mr->alias->addr));
        int128_subfrom(&base, int128_make64(mr->alias_offset));
532
        render_memory_region(view, mr->alias, base, clip, readonly);
533 534 535 536 537
        return;
    }

    /* Render subregions in priority order. */
    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
538
        render_memory_region(view, subregion, base, clip, readonly);
539 540
    }

541
    if (!mr->terminates) {
542 543 544
        return;
    }

545
    offset_in_region = int128_get64(int128_sub(clip.start, base));
546 547 548
    base = clip.start;
    remain = clip.size;

549 550 551 552 553
    fr.mr = mr;
    fr.dirty_log_mask = mr->dirty_log_mask;
    fr.romd_mode = mr->romd_mode;
    fr.readonly = readonly;

554
    /* Render the region itself into any gaps left by the current view. */
555 556
    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
557 558
            continue;
        }
559 560 561
        if (int128_lt(base, view->ranges[i].addr.start)) {
            now = int128_min(remain,
                             int128_sub(view->ranges[i].addr.start, base));
562 563 564 565
            fr.offset_in_region = offset_in_region;
            fr.addr = addrrange_make(base, now);
            flatview_insert(view, i, &fr);
            ++i;
566 567 568
            int128_addto(&base, now);
            offset_in_region += int128_get64(now);
            int128_subfrom(&remain, now);
569
        }
570 571 572 573 574 575
        now = int128_sub(int128_min(int128_add(base, remain),
                                    addrrange_end(view->ranges[i].addr)),
                         base);
        int128_addto(&base, now);
        offset_in_region += int128_get64(now);
        int128_subfrom(&remain, now);
576
    }
577
    if (int128_nz(remain)) {
578 579 580 581 582 583 584
        fr.offset_in_region = offset_in_region;
        fr.addr = addrrange_make(base, remain);
        flatview_insert(view, i, &fr);
    }
}

/* Render a memory topology into a list of disjoint absolute ranges. */
585
static FlatView *generate_memory_topology(MemoryRegion *mr)
586
{
587
    FlatView *view;
588

589 590
    view = g_new(FlatView, 1);
    flatview_init(view);
591

592
    if (mr) {
593
        render_memory_region(view, mr, int128_zero(),
594 595
                             addrrange_make(int128_zero(), int128_2_64()), false);
    }
596
    flatview_simplify(view);
597 598 599 600

    return view;
}

601 602 603 604 605 606 607
static void address_space_add_del_ioeventfds(AddressSpace *as,
                                             MemoryRegionIoeventfd *fds_new,
                                             unsigned fds_new_nb,
                                             MemoryRegionIoeventfd *fds_old,
                                             unsigned fds_old_nb)
{
    unsigned iold, inew;
608 609
    MemoryRegionIoeventfd *fd;
    MemoryRegionSection section;
610 611 612 613 614 615 616 617 618 619 620

    /* Generate a symmetric difference of the old and new fd sets, adding
     * and deleting as necessary.
     */

    iold = inew = 0;
    while (iold < fds_old_nb || inew < fds_new_nb) {
        if (iold < fds_old_nb
            && (inew == fds_new_nb
                || memory_region_ioeventfd_before(fds_old[iold],
                                                  fds_new[inew]))) {
621 622
            fd = &fds_old[iold];
            section = (MemoryRegionSection) {
623
                .address_space = as,
624
                .offset_within_address_space = int128_get64(fd->addr.start),
625
                .size = fd->addr.size,
626 627
            };
            MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
628
                                 fd->match_data, fd->data, fd->e);
629 630 631 632 633
            ++iold;
        } else if (inew < fds_new_nb
                   && (iold == fds_old_nb
                       || memory_region_ioeventfd_before(fds_new[inew],
                                                         fds_old[iold]))) {
634 635
            fd = &fds_new[inew];
            section = (MemoryRegionSection) {
636
                .address_space = as,
637
                .offset_within_address_space = int128_get64(fd->addr.start),
638
                .size = fd->addr.size,
639 640
            };
            MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
641
                                 fd->match_data, fd->data, fd->e);
642 643 644 645 646 647 648 649
            ++inew;
        } else {
            ++iold;
            ++inew;
        }
    }
}

650 651 652 653 654 655 656 657 658 659 660
static FlatView *address_space_get_flatview(AddressSpace *as)
{
    FlatView *view;

    qemu_mutex_lock(&flat_view_mutex);
    view = as->current_map;
    flatview_ref(view);
    qemu_mutex_unlock(&flat_view_mutex);
    return view;
}

661 662
static void address_space_update_ioeventfds(AddressSpace *as)
{
663
    FlatView *view;
664 665 666 667 668 669
    FlatRange *fr;
    unsigned ioeventfd_nb = 0;
    MemoryRegionIoeventfd *ioeventfds = NULL;
    AddrRange tmp;
    unsigned i;

670
    view = address_space_get_flatview(as);
671
    FOR_EACH_FLAT_RANGE(fr, view) {
672 673
        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
674 675
                                  int128_sub(fr->addr.start,
                                             int128_make64(fr->offset_in_region)));
676 677
            if (addrrange_intersects(fr->addr, tmp)) {
                ++ioeventfd_nb;
678
                ioeventfds = g_realloc(ioeventfds,
679 680 681 682 683 684 685 686 687 688
                                          ioeventfd_nb * sizeof(*ioeventfds));
                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
                ioeventfds[ioeventfd_nb-1].addr = tmp;
            }
        }
    }

    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
                                     as->ioeventfds, as->ioeventfd_nb);

689
    g_free(as->ioeventfds);
690 691
    as->ioeventfds = ioeventfds;
    as->ioeventfd_nb = ioeventfd_nb;
692
    flatview_unref(view);
693 694
}

695
static void address_space_update_topology_pass(AddressSpace *as,
696 697
                                               const FlatView *old_view,
                                               const FlatView *new_view,
698
                                               bool adding)
699 700 701 702 703 704 705 706
{
    unsigned iold, inew;
    FlatRange *frold, *frnew;

    /* Generate a symmetric difference of the old and new memory maps.
     * Kill ranges in the old map, and instantiate ranges in the new map.
     */
    iold = inew = 0;
707 708 709
    while (iold < old_view->nr || inew < new_view->nr) {
        if (iold < old_view->nr) {
            frold = &old_view->ranges[iold];
710 711 712
        } else {
            frold = NULL;
        }
713 714
        if (inew < new_view->nr) {
            frnew = &new_view->ranges[inew];
715 716 717 718 719 720
        } else {
            frnew = NULL;
        }

        if (frold
            && (!frnew
721 722
                || int128_lt(frold->addr.start, frnew->addr.start)
                || (int128_eq(frold->addr.start, frnew->addr.start)
723
                    && !flatrange_equal(frold, frnew)))) {
724
            /* In old but not in new, or in both but attributes changed. */
725

726
            if (!adding) {
727
                MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
728 729
            }

730 731
            ++iold;
        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
732
            /* In both and unchanged (except logging may have changed) */
733

734
            if (adding) {
735
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
736
                if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
737
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop);
738
                } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
739
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start);
740
                }
741 742
            }

743 744 745 746 747
            ++iold;
            ++inew;
        } else {
            /* In new */

748
            if (adding) {
749
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
750 751
            }

752 753 754
            ++inew;
        }
    }
755 756 757 758 759
}


static void address_space_update_topology(AddressSpace *as)
{
760
    FlatView *old_view = address_space_get_flatview(as);
761
    FlatView *new_view = generate_memory_topology(as->root);
762 763 764 765

    address_space_update_topology_pass(as, old_view, new_view, false);
    address_space_update_topology_pass(as, old_view, new_view, true);

766 767
    qemu_mutex_lock(&flat_view_mutex);
    flatview_unref(as->current_map);
768
    as->current_map = new_view;
769 770 771 772 773 774 775 776 777 778
    qemu_mutex_unlock(&flat_view_mutex);

    /* Note that all the old MemoryRegions are still alive up to this
     * point.  This relieves most MemoryListeners from the need to
     * ref/unref the MemoryRegions they get---unless they use them
     * outside the iothread mutex, in which case precise reference
     * counting is necessary.
     */
    flatview_unref(old_view);

779
    address_space_update_ioeventfds(as);
780 781
}

Avi Kivity's avatar
Avi Kivity committed
782 783
void memory_region_transaction_begin(void)
{
784
    qemu_flush_coalesced_mmio_buffer();
Avi Kivity's avatar
Avi Kivity committed
785 786 787 788 789
    ++memory_region_transaction_depth;
}

void memory_region_transaction_commit(void)
{
790 791
    AddressSpace *as;

Avi Kivity's avatar
Avi Kivity committed
792 793
    assert(memory_region_transaction_depth);
    --memory_region_transaction_depth;
794 795
    if (!memory_region_transaction_depth && memory_region_update_pending) {
        memory_region_update_pending = false;
796 797
        MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);

798 799
        QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
            address_space_update_topology(as);
800 801 802
        }

        MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
803
    }
Avi Kivity's avatar
Avi Kivity committed
804 805
}

806 807 808 809 810 811 812 813 814
static void memory_region_destructor_none(MemoryRegion *mr)
{
}

static void memory_region_destructor_ram(MemoryRegion *mr)
{
    qemu_ram_free(mr->ram_addr);
}

815 816 817 818 819
static void memory_region_destructor_alias(MemoryRegion *mr)
{
    memory_region_unref(mr->alias);
}

820 821 822 823 824
static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
{
    qemu_ram_free_from_ptr(mr->ram_addr);
}

825 826 827 828 829
static void memory_region_destructor_rom_device(MemoryRegion *mr)
{
    qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
}

830
void memory_region_init(MemoryRegion *mr,
831
                        Object *owner,
832 833 834
                        const char *name,
                        uint64_t size)
{
835 836
    mr->ops = &unassigned_mem_ops;
    mr->opaque = NULL;
837
    mr->owner = owner;
Avi Kivity's avatar
Avi Kivity committed
838
    mr->iommu_ops = NULL;
839
    mr->parent = NULL;
840 841 842 843
    mr->size = int128_make64(size);
    if (size == UINT64_MAX) {
        mr->size = int128_2_64();
    }
844
    mr->addr = 0;
Avi Kivity's avatar
Avi Kivity committed
845
    mr->subpage = false;
846
    mr->enabled = true;
847
    mr->terminates = false;
848
    mr->ram = false;
849
    mr->romd_mode = true;
850
    mr->readonly = false;
Avi Kivity's avatar
Avi Kivity committed
851
    mr->rom_device = false;
852
    mr->destructor = memory_region_destructor_none;
853 854 855 856 857 858
    mr->priority = 0;
    mr->may_overlap = false;
    mr->alias = NULL;
    QTAILQ_INIT(&mr->subregions);
    memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
    QTAILQ_INIT(&mr->coalesced);
859
    mr->name = g_strdup(name);
860
    mr->dirty_log_mask = 0;
861 862
    mr->ioeventfd_nb = 0;
    mr->ioeventfds = NULL;
863
    mr->flush_coalesced_mmio = false;
864 865
}

866 867 868 869 870 871
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
                                    unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
872 873
    if (current_cpu != NULL) {
        cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
874
    }
875
    return 0;
876 877 878 879 880 881 882 883
}

static void unassigned_mem_write(void *opaque, hwaddr addr,
                                 uint64_t val, unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
#endif
884 885
    if (current_cpu != NULL) {
        cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
886
    }
887 888
}

889 890 891 892 893 894 895 896 897 898 899
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
                                   unsigned size, bool is_write)
{
    return false;
}

const MemoryRegionOps unassigned_mem_ops = {
    .valid.accepts = unassigned_mem_accepts,
    .endianness = DEVICE_NATIVE_ENDIAN,
};

900 901 902 903
bool memory_region_access_valid(MemoryRegion *mr,
                                hwaddr addr,
                                unsigned size,
                                bool is_write)
904
{
905 906
    int access_size_min, access_size_max;
    int access_size, i;
907

908 909 910 911
    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
        return false;
    }

912
    if (!mr->ops->valid.accepts) {
913 914 915
        return true;
    }

916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
    access_size_min = mr->ops->valid.min_access_size;
    if (!mr->ops->valid.min_access_size) {
        access_size_min = 1;
    }

    access_size_max = mr->ops->valid.max_access_size;
    if (!mr->ops->valid.max_access_size) {
        access_size_max = 4;
    }

    access_size = MAX(MIN(size, access_size_max), access_size_min);
    for (i = 0; i < size; i += access_size) {
        if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
                                    is_write)) {
            return false;
        }
932
    }
933

934 935 936
    return true;
}

937
static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
938
                                             hwaddr addr,
939
                                             unsigned size)
940
{
941
    uint64_t data = 0;
942

943 944 945 946 947 948 949 950
    if (mr->ops->read) {
        access_with_adjusted_size(addr, &data, size,
                                  mr->ops->impl.min_access_size,
                                  mr->ops->impl.max_access_size,
                                  memory_region_read_accessor, mr);
    } else {
        access_with_adjusted_size(addr, &data, size, 1, 4,
                                  memory_region_oldmmio_read_accessor, mr);
951 952
    }

953 954 955
    return data;
}

956 957 958 959
static bool memory_region_dispatch_read(MemoryRegion *mr,
                                        hwaddr addr,
                                        uint64_t *pval,
                                        unsigned size)
960
{
961 962 963 964
    if (!memory_region_access_valid(mr, addr, size, false)) {
        *pval = unassigned_mem_read(mr, addr, size);
        return true;
    }
965

966 967 968
    *pval = memory_region_dispatch_read1(mr, addr, size);
    adjust_endianness(mr, pval, size);
    return false;
969
}
970

971
static bool memory_region_dispatch_write(MemoryRegion *mr,
972
                                         hwaddr addr,
973 974 975
                                         uint64_t data,
                                         unsigned size)
{
976
    if (!memory_region_access_valid(mr, addr, size, true)) {
977
        unassigned_mem_write(mr, addr, data, size);
978
        return true;
979 980
    }

981 982
    adjust_endianness(mr, &data, size);

983 984 985 986 987 988 989 990
    if (mr->ops->write) {
        access_with_adjusted_size(addr, &data, size,
                                  mr->ops->impl.min_access_size,
                                  mr->ops->impl.max_access_size,
                                  memory_region_write_accessor, mr);
    } else {
        access_with_adjusted_size(addr, &data, size, 1, 4,
                                  memory_region_oldmmio_write_accessor, mr);
991
    }
992
    return false;
993 994 995
}

void memory_region_init_io(MemoryRegion *mr,
996
                           Object *owner,
997 998 999 1000 1001
                           const MemoryRegionOps *ops,
                           void *opaque,
                           const char *name,
                           uint64_t size)
{
1002
    memory_region_init(mr, owner, name, size);
1003 1004
    mr->ops = ops;
    mr->opaque = opaque;
1005
    mr->terminates = true;
1006
    mr->ram_addr = ~(ram_addr_t)0;
1007 1008 1009
}

void memory_region_init_ram(MemoryRegion *mr,
1010
                            Object *owner,
1011 1012 1013
                            const char *name,
                            uint64_t size)
{
1014
    memory_region_init(mr, owner, name, size);
1015
    mr->ram = true;
1016
    mr->terminates = true;
1017
    mr->destructor = memory_region_destructor_ram;
1018
    mr->ram_addr = qemu_ram_alloc(size, mr);
1019 1020 1021
}

void memory_region_init_ram_ptr(MemoryRegion *mr,
1022
                                Object *owner,
1023 1024 1025 1026
                                const char *name,
                                uint64_t size,
                                void *ptr)
{
1027
    memory_region_init(mr, owner, name, size);
1028
    mr->ram = true;
1029
    mr->terminates = true;
1030
    mr->destructor = memory_region_destructor_ram_from_ptr;
1031
    mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr);
1032 1033 1034
}

void memory_region_init_alias(MemoryRegion *mr,
1035
                              Object *owner,
1036 1037
                              const char *name,
                              MemoryRegion *orig,
1038
                              hwaddr offset,
1039 1040
                              uint64_t size)
{
1041
    memory_region_init(mr, owner, name, size);
1042 1043
    memory_region_ref(orig);
    mr->destructor = memory_region_destructor_alias;
1044 1045 1046 1047
    mr->alias = orig;
    mr->alias_offset = offset;
}

1048
void memory_region_init_rom_device(MemoryRegion *mr,
1049
                                   Object *owner,
1050
                                   const MemoryRegionOps *ops,
1051
                                   void *opaque,
1052 1053 1054
                                   const char *name,
                                   uint64_t size)
{
1055
    memory_region_init(mr, owner, name, size);
1056
    mr->ops = ops;
1057
    mr->opaque = opaque;
1058
    mr->terminates = true;
Avi Kivity's avatar
Avi Kivity committed
1059
    mr->rom_device = true;
1060
    mr->destructor = memory_region_destructor_rom_device;
1061
    mr->ram_addr = qemu_ram_alloc(size, mr);
1062 1063
}

Avi Kivity's avatar
Avi Kivity committed
1064
void memory_region_init_iommu(MemoryRegion *mr,
1065
                              Object *owner,
Avi Kivity's avatar
Avi Kivity committed
1066 1067 1068 1069
                              const MemoryRegionIOMMUOps *ops,
                              const char *name,
                              uint64_t size)
{
1070
    memory_region_init(mr, owner, name, size);
Avi Kivity's avatar
Avi Kivity committed
1071 1072
    mr->iommu_ops = ops,
    mr->terminates = true;  /* then re-forwards */
1073
    notifier_list_init(&mr->iommu_notify);
Avi Kivity's avatar
Avi Kivity committed
1074 1075
}

1076
void memory_region_init_reservation(MemoryRegion *mr,
1077
                                    Object *owner,
1078 1079 1080
                                    const char *name,
                                    uint64_t size)
{
1081
    memory_region_init_io(mr, owner, &unassigned_mem_ops, mr, name, size);
1082 1083
}

1084 1085 1086
void memory_region_destroy(MemoryRegion *mr)
{
    assert(QTAILQ_EMPTY(&mr->subregions));
1087
    assert(memory_region_transaction_depth == 0);
1088
    mr->destructor(mr);
1089
    memory_region_clear_coalescing(mr);
1090 1091
    g_free((char *)mr->name);
    g_free(mr->ioeventfds);
1092 1093
}

1094 1095 1096 1097 1098
Object *memory_region_owner(MemoryRegion *mr)
{
    return mr->owner;
}

Paolo Bonzini's avatar
Paolo Bonzini committed
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
void memory_region_ref(MemoryRegion *mr)
{
    if (mr && mr->owner) {
        object_ref(mr->owner);
    }
}

void memory_region_unref(MemoryRegion *mr)
{
    if (mr && mr->owner) {
        object_unref(mr->owner);
    }
}

1113 1114
uint64_t memory_region_size(MemoryRegion *mr)
{
1115 1116 1117 1118
    if (int128_eq(mr->size, int128_2_64())) {
        return UINT64_MAX;
    }
    return int128_get64(mr->size);
1119 1120
}

1121 1122 1123 1124 1125
const char *memory_region_name(MemoryRegion *mr)
{
    return mr->name;
}

1126 1127 1128 1129 1130
bool memory_region_is_ram(MemoryRegion *mr)
{
    return mr->ram;
}

1131 1132 1133 1134 1135
bool memory_region_is_logging(MemoryRegion *mr)
{
    return mr->dirty_log_mask;
}

1136 1137 1138 1139 1140
bool memory_region_is_rom(MemoryRegion *mr)
{
    return mr->ram && mr->readonly;
}

Avi Kivity's avatar
Avi Kivity committed
1141 1142 1143 1144 1145
bool memory_region_is_iommu(MemoryRegion *mr)
{
    return mr->iommu_ops;
}

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
{
    notifier_list_add(&mr->iommu_notify, n);
}

void memory_region_unregister_iommu_notifier(Notifier *n)
{
    notifier_remove(n);
}

void memory_region_notify_iommu(MemoryRegion *mr,
                                IOMMUTLBEntry entry)
{
    assert(memory_region_is_iommu(mr));
    notifier_list_notify(&mr->iommu_notify, &entry);
}

1163 1164
void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
{
1165 1166
    uint8_t mask = 1 << client;

1167
    memory_region_transaction_begin();
1168
    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1169
    memory_region_update_pending |= mr->enabled;
1170
    memory_region_transaction_commit();
1171 1172
}

1173 1174
bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
                             hwaddr size, unsigned client)
1175
{
1176
    assert(mr->terminates);
1177 1178
    return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size,
                                         1 << client);
1179 1180
}

1181 1182
void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
                             hwaddr size)
1183
{
1184
    assert(mr->terminates);
1185
    cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size, -1);
1186 1187
}

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
                                        hwaddr size, unsigned client)
{
    bool ret;
    assert(mr->terminates);
    ret = cpu_physical_memory_get_dirty(mr->ram_addr + addr, size,
                                        1 << client);
    if (ret) {
        cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
                                        mr->ram_addr + addr + size,
                                        1 << client);
    }
    return ret;
}


1204 1205
void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
{
1206
    AddressSpace *as;
1207 1208
    FlatRange *fr;

1209
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1210
        FlatView *view = address_space_get_flatview(as);
1211
        FOR_EACH_FLAT_RANGE(fr, view) {
1212 1213 1214
            if (fr->mr == mr) {
                MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
            }
1215
        }
1216
        flatview_unref(view);
1217
    }
1218 1219 1220 1221
}

void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
{
1222
    if (mr->readonly != readonly) {
1223
        memory_region_transaction_begin();
1224
        mr->readonly = readonly;
1225
        memory_region_update_pending |= mr->enabled;
1226
        memory_region_transaction_commit();
1227
    }
1228 1229
}

1230
void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
1231
{
1232
    if (mr->romd_mode != romd_mode) {
1233
        memory_region_transaction_begin();
1234
        mr->romd_mode = romd_mode;
1235
        memory_region_update_pending |= mr->enabled;
1236
        memory_region_transaction_commit();
1237 1238 1239
    }
}

1240 1241
void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
                               hwaddr size, unsigned client)
1242
{
1243
    assert(mr->terminates);
1244 1245 1246
    cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
                                    mr->ram_addr + addr + size,
                                    1 << client);
1247 1248 1249 1250 1251 1252 1253 1254
}

void *memory_region_get_ram_ptr(MemoryRegion *mr)
{
    if (mr->alias) {
        return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
    }

1255
    assert(mr->terminates);
1256

1257
    return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
1258 1259
}

1260
static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
1261
{
1262
    FlatView *view;
1263 1264 1265
    FlatRange *fr;
    CoalescedMemoryRange *cmr;
    AddrRange tmp;
1266
    MemoryRegionSection section;
1267

1268
    view = address_space_get_flatview(as);
1269
    FOR_EACH_FLAT_RANGE(fr, view) {
1270
        if (fr->mr == mr) {
1271
            section = (MemoryRegionSection) {
1272
                .address_space = as,
1273
                .offset_within_address_space = int128_get64(fr->addr.start),
1274
                .size = fr->addr.size,
1275 1276 1277 1278 1279
            };

            MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, &section,
                                 int128_get64(fr->addr.start),
                                 int128_get64(fr->addr.size));
1280 1281
            QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
                tmp = addrrange_shift(cmr->addr,
1282 1283
                                      int128_sub(fr->addr.start,
                                                 int128_make64(fr->offset_in_region)));
1284 1285 1286 1287
                if (!addrrange_intersects(tmp, fr->addr)) {
                    continue;
                }
                tmp = addrrange_intersection(tmp, fr->addr);
1288 1289 1290
                MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, &section,
                                     int128_get64(tmp.start),
                                     int128_get64(tmp.size));
1291 1292 1293
            }
        }
    }
1294
    flatview_unref(view);
1295 1296
}

1297 1298 1299 1300 1301 1302 1303 1304 1305
static void memory_region_update_coalesced_range(MemoryRegion *mr)
{
    AddressSpace *as;

    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        memory_region_update_coalesced_range_as(mr, as);
    }
}

1306 1307 1308
void memory_region_set_coalescing(MemoryRegion *mr)
{
    memory_region_clear_coalescing(mr);
1309
    memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
1310 1311 1312
}

void memory_region_add_coalescing(MemoryRegion *mr,
1313
                                  hwaddr offset,
1314 1315
                                  uint64_t size)
{
1316
    CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));