sync with latest
[sdk/emulator/qemu.git] / memory.c
1 /*
2  * Physical memory management
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15
16 #include "memory.h"
17 #include "exec-memory.h"
18 #include "ioport.h"
19 #include "bitops.h"
20 #include "kvm.h"
21 #include <assert.h>
22
23 #define WANT_EXEC_OBSOLETE
24 #include "exec-obsolete.h"
25
26 unsigned memory_region_transaction_depth = 0;
27 static bool memory_region_update_pending = false;
28 static bool global_dirty_log = false;
29
30 static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
31     = QTAILQ_HEAD_INITIALIZER(memory_listeners);
32
33 typedef struct AddrRange AddrRange;
34
35 /*
36  * Note using signed integers limits us to physical addresses at most
37  * 63 bits wide.  They are needed for negative offsetting in aliases
38  * (large MemoryRegion::alias_offset).
39  */
40 struct AddrRange {
41     Int128 start;
42     Int128 size;
43 };
44
45 static AddrRange addrrange_make(Int128 start, Int128 size)
46 {
47     return (AddrRange) { start, size };
48 }
49
50 static bool addrrange_equal(AddrRange r1, AddrRange r2)
51 {
52     return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
53 }
54
55 static Int128 addrrange_end(AddrRange r)
56 {
57     return int128_add(r.start, r.size);
58 }
59
60 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
61 {
62     int128_addto(&range.start, delta);
63     return range;
64 }
65
66 static bool addrrange_contains(AddrRange range, Int128 addr)
67 {
68     return int128_ge(addr, range.start)
69         && int128_lt(addr, addrrange_end(range));
70 }
71
72 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
73 {
74     return addrrange_contains(r1, r2.start)
75         || addrrange_contains(r2, r1.start);
76 }
77
78 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
79 {
80     Int128 start = int128_max(r1.start, r2.start);
81     Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
82     return addrrange_make(start, int128_sub(end, start));
83 }
84
85 enum ListenerDirection { Forward, Reverse };
86
87 static bool memory_listener_match(MemoryListener *listener,
88                                   MemoryRegionSection *section)
89 {
90     return !listener->address_space_filter
91         || listener->address_space_filter == section->address_space;
92 }
93
94 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...)    \
95     do {                                                                \
96         MemoryListener *_listener;                                      \
97                                                                         \
98         switch (_direction) {                                           \
99         case Forward:                                                   \
100             QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
101                 _listener->_callback(_listener, ##_args);               \
102             }                                                           \
103             break;                                                      \
104         case Reverse:                                                   \
105             QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
106                                    memory_listeners, link) {            \
107                 _listener->_callback(_listener, ##_args);               \
108             }                                                           \
109             break;                                                      \
110         default:                                                        \
111             abort();                                                    \
112         }                                                               \
113     } while (0)
114
115 #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
116     do {                                                                \
117         MemoryListener *_listener;                                      \
118                                                                         \
119         switch (_direction) {                                           \
120         case Forward:                                                   \
121             QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
122                 if (memory_listener_match(_listener, _section)) {       \
123                     _listener->_callback(_listener, _section, ##_args); \
124                 }                                                       \
125             }                                                           \
126             break;                                                      \
127         case Reverse:                                                   \
128             QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
129                                    memory_listeners, link) {            \
130                 if (memory_listener_match(_listener, _section)) {       \
131                     _listener->_callback(_listener, _section, ##_args); \
132                 }                                                       \
133             }                                                           \
134             break;                                                      \
135         default:                                                        \
136             abort();                                                    \
137         }                                                               \
138     } while (0)
139
140 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback)            \
141     MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) {       \
142         .mr = (fr)->mr,                                                 \
143         .address_space = (as)->root,                                    \
144         .offset_within_region = (fr)->offset_in_region,                 \
145         .size = int128_get64((fr)->addr.size),                          \
146         .offset_within_address_space = int128_get64((fr)->addr.start),  \
147         .readonly = (fr)->readonly,                                     \
148               }))
149
150 struct CoalescedMemoryRange {
151     AddrRange addr;
152     QTAILQ_ENTRY(CoalescedMemoryRange) link;
153 };
154
155 struct MemoryRegionIoeventfd {
156     AddrRange addr;
157     bool match_data;
158     uint64_t data;
159     EventNotifier *e;
160 };
161
162 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
163                                            MemoryRegionIoeventfd b)
164 {
165     if (int128_lt(a.addr.start, b.addr.start)) {
166         return true;
167     } else if (int128_gt(a.addr.start, b.addr.start)) {
168         return false;
169     } else if (int128_lt(a.addr.size, b.addr.size)) {
170         return true;
171     } else if (int128_gt(a.addr.size, b.addr.size)) {
172         return false;
173     } else if (a.match_data < b.match_data) {
174         return true;
175     } else  if (a.match_data > b.match_data) {
176         return false;
177     } else if (a.match_data) {
178         if (a.data < b.data) {
179             return true;
180         } else if (a.data > b.data) {
181             return false;
182         }
183     }
184     if (a.e < b.e) {
185         return true;
186     } else if (a.e > b.e) {
187         return false;
188     }
189     return false;
190 }
191
192 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
193                                           MemoryRegionIoeventfd b)
194 {
195     return !memory_region_ioeventfd_before(a, b)
196         && !memory_region_ioeventfd_before(b, a);
197 }
198
199 typedef struct FlatRange FlatRange;
200 typedef struct FlatView FlatView;
201
202 /* Range of memory in the global map.  Addresses are absolute. */
203 struct FlatRange {
204     MemoryRegion *mr;
205     target_phys_addr_t offset_in_region;
206     AddrRange addr;
207     uint8_t dirty_log_mask;
208     bool readable;
209     bool readonly;
210 };
211
212 /* Flattened global view of current active memory hierarchy.  Kept in sorted
213  * order.
214  */
215 struct FlatView {
216     FlatRange *ranges;
217     unsigned nr;
218     unsigned nr_allocated;
219 };
220
221 typedef struct AddressSpace AddressSpace;
222 typedef struct AddressSpaceOps AddressSpaceOps;
223
224 /* A system address space - I/O, memory, etc. */
225 struct AddressSpace {
226     MemoryRegion *root;
227     FlatView current_map;
228     int ioeventfd_nb;
229     MemoryRegionIoeventfd *ioeventfds;
230 };
231
232 #define FOR_EACH_FLAT_RANGE(var, view)          \
233     for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
234
235 static bool flatrange_equal(FlatRange *a, FlatRange *b)
236 {
237     return a->mr == b->mr
238         && addrrange_equal(a->addr, b->addr)
239         && a->offset_in_region == b->offset_in_region
240         && a->readable == b->readable
241         && a->readonly == b->readonly;
242 }
243
244 static void flatview_init(FlatView *view)
245 {
246     view->ranges = NULL;
247     view->nr = 0;
248     view->nr_allocated = 0;
249 }
250
251 /* Insert a range into a given position.  Caller is responsible for maintaining
252  * sorting order.
253  */
254 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
255 {
256     if (view->nr == view->nr_allocated) {
257         view->nr_allocated = MAX(2 * view->nr, 10);
258         view->ranges = g_realloc(view->ranges,
259                                     view->nr_allocated * sizeof(*view->ranges));
260     }
261     memmove(view->ranges + pos + 1, view->ranges + pos,
262             (view->nr - pos) * sizeof(FlatRange));
263     view->ranges[pos] = *range;
264     ++view->nr;
265 }
266
267 static void flatview_destroy(FlatView *view)
268 {
269     g_free(view->ranges);
270 }
271
272 static bool can_merge(FlatRange *r1, FlatRange *r2)
273 {
274     return int128_eq(addrrange_end(r1->addr), r2->addr.start)
275         && r1->mr == r2->mr
276         && int128_eq(int128_add(int128_make64(r1->offset_in_region),
277                                 r1->addr.size),
278                      int128_make64(r2->offset_in_region))
279         && r1->dirty_log_mask == r2->dirty_log_mask
280         && r1->readable == r2->readable
281         && r1->readonly == r2->readonly;
282 }
283
284 /* Attempt to simplify a view by merging ajacent ranges */
285 static void flatview_simplify(FlatView *view)
286 {
287     unsigned i, j;
288
289     i = 0;
290     while (i < view->nr) {
291         j = i + 1;
292         while (j < view->nr
293                && can_merge(&view->ranges[j-1], &view->ranges[j])) {
294             int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
295             ++j;
296         }
297         ++i;
298         memmove(&view->ranges[i], &view->ranges[j],
299                 (view->nr - j) * sizeof(view->ranges[j]));
300         view->nr -= j - i;
301     }
302 }
303
304 static void memory_region_read_accessor(void *opaque,
305                                         target_phys_addr_t addr,
306                                         uint64_t *value,
307                                         unsigned size,
308                                         unsigned shift,
309                                         uint64_t mask)
310 {
311     MemoryRegion *mr = opaque;
312     uint64_t tmp;
313
314     tmp = mr->ops->read(mr->opaque, addr, size);
315     *value |= (tmp & mask) << shift;
316 }
317
318 static void memory_region_write_accessor(void *opaque,
319                                          target_phys_addr_t addr,
320                                          uint64_t *value,
321                                          unsigned size,
322                                          unsigned shift,
323                                          uint64_t mask)
324 {
325     MemoryRegion *mr = opaque;
326     uint64_t tmp;
327
328     tmp = (*value >> shift) & mask;
329     mr->ops->write(mr->opaque, addr, tmp, size);
330 }
331
332 static void access_with_adjusted_size(target_phys_addr_t addr,
333                                       uint64_t *value,
334                                       unsigned size,
335                                       unsigned access_size_min,
336                                       unsigned access_size_max,
337                                       void (*access)(void *opaque,
338                                                      target_phys_addr_t addr,
339                                                      uint64_t *value,
340                                                      unsigned size,
341                                                      unsigned shift,
342                                                      uint64_t mask),
343                                       void *opaque)
344 {
345     uint64_t access_mask;
346     unsigned access_size;
347     unsigned i;
348
349     if (!access_size_min) {
350         access_size_min = 1;
351     }
352     if (!access_size_max) {
353         access_size_max = 4;
354     }
355     access_size = MAX(MIN(size, access_size_max), access_size_min);
356     access_mask = -1ULL >> (64 - access_size * 8);
357     for (i = 0; i < size; i += access_size) {
358         /* FIXME: big-endian support */
359         access(opaque, addr + i, value, access_size, i * 8, access_mask);
360     }
361 }
362
363 static AddressSpace address_space_memory;
364
365 static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset,
366                                              unsigned width, bool write)
367 {
368     const MemoryRegionPortio *mrp;
369
370     for (mrp = mr->ops->old_portio; mrp->size; ++mrp) {
371         if (offset >= mrp->offset && offset < mrp->offset + mrp->len
372             && width == mrp->size
373             && (write ? (bool)mrp->write : (bool)mrp->read)) {
374             return mrp;
375         }
376     }
377     return NULL;
378 }
379
380 static void memory_region_iorange_read(IORange *iorange,
381                                        uint64_t offset,
382                                        unsigned width,
383                                        uint64_t *data)
384 {
385     MemoryRegionIORange *mrio
386         = container_of(iorange, MemoryRegionIORange, iorange);
387     MemoryRegion *mr = mrio->mr;
388
389     offset += mrio->offset;
390     if (mr->ops->old_portio) {
391         const MemoryRegionPortio *mrp = find_portio(mr, offset - mrio->offset,
392                                                     width, false);
393
394         *data = ((uint64_t)1 << (width * 8)) - 1;
395         if (mrp) {
396             *data = mrp->read(mr->opaque, offset);
397         } else if (width == 2) {
398             mrp = find_portio(mr, offset - mrio->offset, 1, false);
399             assert(mrp);
400             *data = mrp->read(mr->opaque, offset) |
401                     (mrp->read(mr->opaque, offset + 1) << 8);
402         }
403         return;
404     }
405     *data = 0;
406     access_with_adjusted_size(offset, data, width,
407                               mr->ops->impl.min_access_size,
408                               mr->ops->impl.max_access_size,
409                               memory_region_read_accessor, mr);
410 }
411
412 static void memory_region_iorange_write(IORange *iorange,
413                                         uint64_t offset,
414                                         unsigned width,
415                                         uint64_t data)
416 {
417     MemoryRegionIORange *mrio
418         = container_of(iorange, MemoryRegionIORange, iorange);
419     MemoryRegion *mr = mrio->mr;
420
421     offset += mrio->offset;
422     if (mr->ops->old_portio) {
423         const MemoryRegionPortio *mrp = find_portio(mr, offset - mrio->offset,
424                                                     width, true);
425
426         if (mrp) {
427             mrp->write(mr->opaque, offset, data);
428         } else if (width == 2) {
429             mrp = find_portio(mr, offset - mrio->offset, 1, true);
430             assert(mrp);
431             mrp->write(mr->opaque, offset, data & 0xff);
432             mrp->write(mr->opaque, offset + 1, data >> 8);
433         }
434         return;
435     }
436     access_with_adjusted_size(offset, &data, width,
437                               mr->ops->impl.min_access_size,
438                               mr->ops->impl.max_access_size,
439                               memory_region_write_accessor, mr);
440 }
441
442 static void memory_region_iorange_destructor(IORange *iorange)
443 {
444     g_free(container_of(iorange, MemoryRegionIORange, iorange));
445 }
446
447 const IORangeOps memory_region_iorange_ops = {
448     .read = memory_region_iorange_read,
449     .write = memory_region_iorange_write,
450     .destructor = memory_region_iorange_destructor,
451 };
452
453 static AddressSpace address_space_io;
454
455 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
456 {
457     while (mr->parent) {
458         mr = mr->parent;
459     }
460     if (mr == address_space_memory.root) {
461         return &address_space_memory;
462     }
463     if (mr == address_space_io.root) {
464         return &address_space_io;
465     }
466     abort();
467 }
468
469 /* Render a memory region into the global view.  Ranges in @view obscure
470  * ranges in @mr.
471  */
472 static void render_memory_region(FlatView *view,
473                                  MemoryRegion *mr,
474                                  Int128 base,
475                                  AddrRange clip,
476                                  bool readonly)
477 {
478     MemoryRegion *subregion;
479     unsigned i;
480     target_phys_addr_t offset_in_region;
481     Int128 remain;
482     Int128 now;
483     FlatRange fr;
484     AddrRange tmp;
485
486     if (!mr->enabled) {
487         return;
488     }
489
490     int128_addto(&base, int128_make64(mr->addr));
491     readonly |= mr->readonly;
492
493     tmp = addrrange_make(base, mr->size);
494
495     if (!addrrange_intersects(tmp, clip)) {
496         return;
497     }
498
499     clip = addrrange_intersection(tmp, clip);
500
501     if (mr->alias) {
502         int128_subfrom(&base, int128_make64(mr->alias->addr));
503         int128_subfrom(&base, int128_make64(mr->alias_offset));
504         render_memory_region(view, mr->alias, base, clip, readonly);
505         return;
506     }
507
508     /* Render subregions in priority order. */
509     QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
510         render_memory_region(view, subregion, base, clip, readonly);
511     }
512
513     if (!mr->terminates) {
514         return;
515     }
516
517     offset_in_region = int128_get64(int128_sub(clip.start, base));
518     base = clip.start;
519     remain = clip.size;
520
521     /* Render the region itself into any gaps left by the current view. */
522     for (i = 0; i < view->nr && int128_nz(remain); ++i) {
523         if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
524             continue;
525         }
526         if (int128_lt(base, view->ranges[i].addr.start)) {
527             now = int128_min(remain,
528                              int128_sub(view->ranges[i].addr.start, base));
529             fr.mr = mr;
530             fr.offset_in_region = offset_in_region;
531             fr.addr = addrrange_make(base, now);
532             fr.dirty_log_mask = mr->dirty_log_mask;
533             fr.readable = mr->readable;
534             fr.readonly = readonly;
535             flatview_insert(view, i, &fr);
536             ++i;
537             int128_addto(&base, now);
538             offset_in_region += int128_get64(now);
539             int128_subfrom(&remain, now);
540         }
541         if (int128_eq(base, view->ranges[i].addr.start)) {
542             now = int128_min(remain, view->ranges[i].addr.size);
543             int128_addto(&base, now);
544             offset_in_region += int128_get64(now);
545             int128_subfrom(&remain, now);
546         }
547     }
548     if (int128_nz(remain)) {
549         fr.mr = mr;
550         fr.offset_in_region = offset_in_region;
551         fr.addr = addrrange_make(base, remain);
552         fr.dirty_log_mask = mr->dirty_log_mask;
553         fr.readable = mr->readable;
554         fr.readonly = readonly;
555         flatview_insert(view, i, &fr);
556     }
557 }
558
559 /* Render a memory topology into a list of disjoint absolute ranges. */
560 static FlatView generate_memory_topology(MemoryRegion *mr)
561 {
562     FlatView view;
563
564     flatview_init(&view);
565
566     render_memory_region(&view, mr, int128_zero(),
567                          addrrange_make(int128_zero(), int128_2_64()), false);
568     flatview_simplify(&view);
569
570     return view;
571 }
572
573 static void address_space_add_del_ioeventfds(AddressSpace *as,
574                                              MemoryRegionIoeventfd *fds_new,
575                                              unsigned fds_new_nb,
576                                              MemoryRegionIoeventfd *fds_old,
577                                              unsigned fds_old_nb)
578 {
579     unsigned iold, inew;
580     MemoryRegionIoeventfd *fd;
581     MemoryRegionSection section;
582
583     /* Generate a symmetric difference of the old and new fd sets, adding
584      * and deleting as necessary.
585      */
586
587     iold = inew = 0;
588     while (iold < fds_old_nb || inew < fds_new_nb) {
589         if (iold < fds_old_nb
590             && (inew == fds_new_nb
591                 || memory_region_ioeventfd_before(fds_old[iold],
592                                                   fds_new[inew]))) {
593             fd = &fds_old[iold];
594             section = (MemoryRegionSection) {
595                 .address_space = as->root,
596                 .offset_within_address_space = int128_get64(fd->addr.start),
597                 .size = int128_get64(fd->addr.size),
598             };
599             MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
600                                  fd->match_data, fd->data, fd->e);
601             ++iold;
602         } else if (inew < fds_new_nb
603                    && (iold == fds_old_nb
604                        || memory_region_ioeventfd_before(fds_new[inew],
605                                                          fds_old[iold]))) {
606             fd = &fds_new[inew];
607             section = (MemoryRegionSection) {
608                 .address_space = as->root,
609                 .offset_within_address_space = int128_get64(fd->addr.start),
610                 .size = int128_get64(fd->addr.size),
611             };
612             MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
613                                  fd->match_data, fd->data, fd->e);
614             ++inew;
615         } else {
616             ++iold;
617             ++inew;
618         }
619     }
620 }
621
622 static void address_space_update_ioeventfds(AddressSpace *as)
623 {
624     FlatRange *fr;
625     unsigned ioeventfd_nb = 0;
626     MemoryRegionIoeventfd *ioeventfds = NULL;
627     AddrRange tmp;
628     unsigned i;
629
630     FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
631         for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
632             tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
633                                   int128_sub(fr->addr.start,
634                                              int128_make64(fr->offset_in_region)));
635             if (addrrange_intersects(fr->addr, tmp)) {
636                 ++ioeventfd_nb;
637                 ioeventfds = g_realloc(ioeventfds,
638                                           ioeventfd_nb * sizeof(*ioeventfds));
639                 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
640                 ioeventfds[ioeventfd_nb-1].addr = tmp;
641             }
642         }
643     }
644
645     address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
646                                      as->ioeventfds, as->ioeventfd_nb);
647
648     g_free(as->ioeventfds);
649     as->ioeventfds = ioeventfds;
650     as->ioeventfd_nb = ioeventfd_nb;
651 }
652
653 static void address_space_update_topology_pass(AddressSpace *as,
654                                                FlatView old_view,
655                                                FlatView new_view,
656                                                bool adding)
657 {
658     unsigned iold, inew;
659     FlatRange *frold, *frnew;
660
661     /* Generate a symmetric difference of the old and new memory maps.
662      * Kill ranges in the old map, and instantiate ranges in the new map.
663      */
664     iold = inew = 0;
665     while (iold < old_view.nr || inew < new_view.nr) {
666         if (iold < old_view.nr) {
667             frold = &old_view.ranges[iold];
668         } else {
669             frold = NULL;
670         }
671         if (inew < new_view.nr) {
672             frnew = &new_view.ranges[inew];
673         } else {
674             frnew = NULL;
675         }
676
677         if (frold
678             && (!frnew
679                 || int128_lt(frold->addr.start, frnew->addr.start)
680                 || (int128_eq(frold->addr.start, frnew->addr.start)
681                     && !flatrange_equal(frold, frnew)))) {
682             /* In old, but (not in new, or in new but attributes changed). */
683
684             if (!adding) {
685                 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
686             }
687
688             ++iold;
689         } else if (frold && frnew && flatrange_equal(frold, frnew)) {
690             /* In both (logging may have changed) */
691
692             if (adding) {
693                 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
694                 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
695                     MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop);
696                 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
697                     MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start);
698                 }
699             }
700
701             ++iold;
702             ++inew;
703         } else {
704             /* In new */
705
706             if (adding) {
707                 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
708             }
709
710             ++inew;
711         }
712     }
713 }
714
715
716 static void address_space_update_topology(AddressSpace *as)
717 {
718     FlatView old_view = as->current_map;
719     FlatView new_view = generate_memory_topology(as->root);
720
721     address_space_update_topology_pass(as, old_view, new_view, false);
722     address_space_update_topology_pass(as, old_view, new_view, true);
723
724     as->current_map = new_view;
725     flatview_destroy(&old_view);
726     address_space_update_ioeventfds(as);
727 }
728
729 static void memory_region_update_topology(MemoryRegion *mr)
730 {
731     if (memory_region_transaction_depth) {
732         memory_region_update_pending |= !mr || mr->enabled;
733         return;
734     }
735
736     if (mr && !mr->enabled) {
737         return;
738     }
739
740     MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
741
742     if (address_space_memory.root) {
743         address_space_update_topology(&address_space_memory);
744     }
745     if (address_space_io.root) {
746         address_space_update_topology(&address_space_io);
747     }
748
749     MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
750
751     memory_region_update_pending = false;
752 }
753
754 void memory_region_transaction_begin(void)
755 {
756     ++memory_region_transaction_depth;
757 }
758
759 void memory_region_transaction_commit(void)
760 {
761     assert(memory_region_transaction_depth);
762     --memory_region_transaction_depth;
763     if (!memory_region_transaction_depth && memory_region_update_pending) {
764         memory_region_update_topology(NULL);
765     }
766 }
767
768 static void memory_region_destructor_none(MemoryRegion *mr)
769 {
770 }
771
772 static void memory_region_destructor_ram(MemoryRegion *mr)
773 {
774     qemu_ram_free(mr->ram_addr);
775 }
776
777 static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
778 {
779     qemu_ram_free_from_ptr(mr->ram_addr);
780 }
781
782 static void memory_region_destructor_iomem(MemoryRegion *mr)
783 {
784 }
785
786 static void memory_region_destructor_rom_device(MemoryRegion *mr)
787 {
788     qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
789 }
790
791 static bool memory_region_wrong_endianness(MemoryRegion *mr)
792 {
793 #ifdef TARGET_WORDS_BIGENDIAN
794     return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
795 #else
796     return mr->ops->endianness == DEVICE_BIG_ENDIAN;
797 #endif
798 }
799
800 void memory_region_init(MemoryRegion *mr,
801                         const char *name,
802                         uint64_t size)
803 {
804     mr->ops = NULL;
805     mr->parent = NULL;
806     mr->size = int128_make64(size);
807     if (size == UINT64_MAX) {
808         mr->size = int128_2_64();
809     }
810     mr->addr = 0;
811     mr->subpage = false;
812     mr->enabled = true;
813     mr->terminates = false;
814     mr->ram = false;
815     mr->readable = true;
816     mr->readonly = false;
817     mr->rom_device = false;
818     mr->destructor = memory_region_destructor_none;
819     mr->priority = 0;
820     mr->may_overlap = false;
821     mr->alias = NULL;
822     QTAILQ_INIT(&mr->subregions);
823     memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
824     QTAILQ_INIT(&mr->coalesced);
825     mr->name = g_strdup(name);
826     mr->dirty_log_mask = 0;
827     mr->ioeventfd_nb = 0;
828     mr->ioeventfds = NULL;
829 }
830
831 static bool memory_region_access_valid(MemoryRegion *mr,
832                                        target_phys_addr_t addr,
833                                        unsigned size,
834                                        bool is_write)
835 {
836     if (mr->ops->valid.accepts
837         && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write)) {
838         return false;
839     }
840
841     if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
842         return false;
843     }
844
845     /* Treat zero as compatibility all valid */
846     if (!mr->ops->valid.max_access_size) {
847         return true;
848     }
849
850     if (size > mr->ops->valid.max_access_size
851         || size < mr->ops->valid.min_access_size) {
852         return false;
853     }
854     return true;
855 }
856
857 static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
858                                              target_phys_addr_t addr,
859                                              unsigned size)
860 {
861     uint64_t data = 0;
862
863     if (!memory_region_access_valid(mr, addr, size, false)) {
864         return -1U; /* FIXME: better signalling */
865     }
866
867     if (!mr->ops->read) {
868         return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr);
869     }
870
871     /* FIXME: support unaligned access */
872     access_with_adjusted_size(addr, &data, size,
873                               mr->ops->impl.min_access_size,
874                               mr->ops->impl.max_access_size,
875                               memory_region_read_accessor, mr);
876
877     return data;
878 }
879
880 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
881 {
882     if (memory_region_wrong_endianness(mr)) {
883         switch (size) {
884         case 1:
885             break;
886         case 2:
887             *data = bswap16(*data);
888             break;
889         case 4:
890             *data = bswap32(*data);
891             break;
892         default:
893             abort();
894         }
895     }
896 }
897
898 static uint64_t memory_region_dispatch_read(MemoryRegion *mr,
899                                             target_phys_addr_t addr,
900                                             unsigned size)
901 {
902     uint64_t ret;
903
904     ret = memory_region_dispatch_read1(mr, addr, size);
905     adjust_endianness(mr, &ret, size);
906     return ret;
907 }
908
909 static void memory_region_dispatch_write(MemoryRegion *mr,
910                                          target_phys_addr_t addr,
911                                          uint64_t data,
912                                          unsigned size)
913 {
914     if (!memory_region_access_valid(mr, addr, size, true)) {
915         return; /* FIXME: better signalling */
916     }
917
918     adjust_endianness(mr, &data, size);
919
920     if (!mr->ops->write) {
921         mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data);
922         return;
923     }
924
925     /* FIXME: support unaligned access */
926     access_with_adjusted_size(addr, &data, size,
927                               mr->ops->impl.min_access_size,
928                               mr->ops->impl.max_access_size,
929                               memory_region_write_accessor, mr);
930 }
931
932 void memory_region_init_io(MemoryRegion *mr,
933                            const MemoryRegionOps *ops,
934                            void *opaque,
935                            const char *name,
936                            uint64_t size)
937 {
938     memory_region_init(mr, name, size);
939     mr->ops = ops;
940     mr->opaque = opaque;
941     mr->terminates = true;
942     mr->destructor = memory_region_destructor_iomem;
943     mr->ram_addr = ~(ram_addr_t)0;
944 }
945
946 void memory_region_init_ram(MemoryRegion *mr,
947                             const char *name,
948                             uint64_t size)
949 {
950     memory_region_init(mr, name, size);
951     mr->ram = true;
952     mr->terminates = true;
953     mr->destructor = memory_region_destructor_ram;
954     mr->ram_addr = qemu_ram_alloc(size, mr);
955 }
956
957 void memory_region_init_ram_ptr(MemoryRegion *mr,
958                                 const char *name,
959                                 uint64_t size,
960                                 void *ptr)
961 {
962     memory_region_init(mr, name, size);
963     mr->ram = true;
964     mr->terminates = true;
965     mr->destructor = memory_region_destructor_ram_from_ptr;
966     mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr);
967 }
968
969 void memory_region_init_alias(MemoryRegion *mr,
970                               const char *name,
971                               MemoryRegion *orig,
972                               target_phys_addr_t offset,
973                               uint64_t size)
974 {
975     memory_region_init(mr, name, size);
976     mr->alias = orig;
977     mr->alias_offset = offset;
978 }
979
980 void memory_region_init_rom_device(MemoryRegion *mr,
981                                    const MemoryRegionOps *ops,
982                                    void *opaque,
983                                    const char *name,
984                                    uint64_t size)
985 {
986     memory_region_init(mr, name, size);
987     mr->ops = ops;
988     mr->opaque = opaque;
989     mr->terminates = true;
990     mr->rom_device = true;
991     mr->destructor = memory_region_destructor_rom_device;
992     mr->ram_addr = qemu_ram_alloc(size, mr);
993 }
994
995 static uint64_t invalid_read(void *opaque, target_phys_addr_t addr,
996                              unsigned size)
997 {
998     MemoryRegion *mr = opaque;
999
1000     if (!mr->warning_printed) {
1001         fprintf(stderr, "Invalid read from memory region %s\n", mr->name);
1002         mr->warning_printed = true;
1003     }
1004     return -1U;
1005 }
1006
1007 static void invalid_write(void *opaque, target_phys_addr_t addr, uint64_t data,
1008                           unsigned size)
1009 {
1010     MemoryRegion *mr = opaque;
1011
1012     if (!mr->warning_printed) {
1013         fprintf(stderr, "Invalid write to memory region %s\n", mr->name);
1014         mr->warning_printed = true;
1015     }
1016 }
1017
1018 static const MemoryRegionOps reservation_ops = {
1019     .read = invalid_read,
1020     .write = invalid_write,
1021     .endianness = DEVICE_NATIVE_ENDIAN,
1022 };
1023
1024 void memory_region_init_reservation(MemoryRegion *mr,
1025                                     const char *name,
1026                                     uint64_t size)
1027 {
1028     memory_region_init_io(mr, &reservation_ops, mr, name, size);
1029 }
1030
1031 void memory_region_destroy(MemoryRegion *mr)
1032 {
1033     assert(QTAILQ_EMPTY(&mr->subregions));
1034     mr->destructor(mr);
1035     memory_region_clear_coalescing(mr);
1036     g_free((char *)mr->name);
1037     g_free(mr->ioeventfds);
1038 }
1039
1040 uint64_t memory_region_size(MemoryRegion *mr)
1041 {
1042     if (int128_eq(mr->size, int128_2_64())) {
1043         return UINT64_MAX;
1044     }
1045     return int128_get64(mr->size);
1046 }
1047
1048 const char *memory_region_name(MemoryRegion *mr)
1049 {
1050     return mr->name;
1051 }
1052
1053 bool memory_region_is_ram(MemoryRegion *mr)
1054 {
1055     return mr->ram;
1056 }
1057
1058 bool memory_region_is_logging(MemoryRegion *mr)
1059 {
1060     return mr->dirty_log_mask;
1061 }
1062
1063 bool memory_region_is_rom(MemoryRegion *mr)
1064 {
1065     return mr->ram && mr->readonly;
1066 }
1067
1068 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1069 {
1070     uint8_t mask = 1 << client;
1071
1072     mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
1073     memory_region_update_topology(mr);
1074 }
1075
1076 bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
1077                              target_phys_addr_t size, unsigned client)
1078 {
1079     assert(mr->terminates);
1080     return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size,
1081                                          1 << client);
1082 }
1083
1084 void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr,
1085                              target_phys_addr_t size)
1086 {
1087     assert(mr->terminates);
1088     return cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size, -1);
1089 }
1090
1091 void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1092 {
1093     FlatRange *fr;
1094
1095     FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
1096         if (fr->mr == mr) {
1097             MEMORY_LISTENER_UPDATE_REGION(fr, &address_space_memory,
1098                                           Forward, log_sync);
1099         }
1100     }
1101 }
1102
1103 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1104 {
1105     if (mr->readonly != readonly) {
1106         mr->readonly = readonly;
1107         memory_region_update_topology(mr);
1108     }
1109 }
1110
1111 void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable)
1112 {
1113     if (mr->readable != readable) {
1114         mr->readable = readable;
1115         memory_region_update_topology(mr);
1116     }
1117 }
1118
1119 void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
1120                                target_phys_addr_t size, unsigned client)
1121 {
1122     assert(mr->terminates);
1123     cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
1124                                     mr->ram_addr + addr + size,
1125                                     1 << client);
1126 }
1127
1128 void *memory_region_get_ram_ptr(MemoryRegion *mr)
1129 {
1130     if (mr->alias) {
1131         return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
1132     }
1133
1134     assert(mr->terminates);
1135
1136     return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
1137 }
1138
1139 static void memory_region_update_coalesced_range(MemoryRegion *mr)
1140 {
1141     FlatRange *fr;
1142     CoalescedMemoryRange *cmr;
1143     AddrRange tmp;
1144
1145     FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) {
1146         if (fr->mr == mr) {
1147             qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start),
1148                                            int128_get64(fr->addr.size));
1149             QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1150                 tmp = addrrange_shift(cmr->addr,
1151                                       int128_sub(fr->addr.start,
1152                                                  int128_make64(fr->offset_in_region)));
1153                 if (!addrrange_intersects(tmp, fr->addr)) {
1154                     continue;
1155                 }
1156                 tmp = addrrange_intersection(tmp, fr->addr);
1157                 qemu_register_coalesced_mmio(int128_get64(tmp.start),
1158                                              int128_get64(tmp.size));
1159             }
1160         }
1161     }
1162 }
1163
1164 void memory_region_set_coalescing(MemoryRegion *mr)
1165 {
1166     memory_region_clear_coalescing(mr);
1167     memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
1168 }
1169
1170 void memory_region_add_coalescing(MemoryRegion *mr,
1171                                   target_phys_addr_t offset,
1172                                   uint64_t size)
1173 {
1174     CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
1175
1176     cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
1177     QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1178     memory_region_update_coalesced_range(mr);
1179 }
1180
1181 void memory_region_clear_coalescing(MemoryRegion *mr)
1182 {
1183     CoalescedMemoryRange *cmr;
1184
1185     while (!QTAILQ_EMPTY(&mr->coalesced)) {
1186         cmr = QTAILQ_FIRST(&mr->coalesced);
1187         QTAILQ_REMOVE(&mr->coalesced, cmr, link);
1188         g_free(cmr);
1189     }
1190     memory_region_update_coalesced_range(mr);
1191 }
1192
1193 void memory_region_add_eventfd(MemoryRegion *mr,
1194                                target_phys_addr_t addr,
1195                                unsigned size,
1196                                bool match_data,
1197                                uint64_t data,
1198                                EventNotifier *e)
1199 {
1200     MemoryRegionIoeventfd mrfd = {
1201         .addr.start = int128_make64(addr),
1202         .addr.size = int128_make64(size),
1203         .match_data = match_data,
1204         .data = data,
1205         .e = e,
1206     };
1207     unsigned i;
1208
1209     for (i = 0; i < mr->ioeventfd_nb; ++i) {
1210         if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
1211             break;
1212         }
1213     }
1214     ++mr->ioeventfd_nb;
1215     mr->ioeventfds = g_realloc(mr->ioeventfds,
1216                                   sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
1217     memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
1218             sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
1219     mr->ioeventfds[i] = mrfd;
1220     memory_region_update_topology(mr);
1221 }
1222
1223 void memory_region_del_eventfd(MemoryRegion *mr,
1224                                target_phys_addr_t addr,
1225                                unsigned size,
1226                                bool match_data,
1227                                uint64_t data,
1228                                EventNotifier *e)
1229 {
1230     MemoryRegionIoeventfd mrfd = {
1231         .addr.start = int128_make64(addr),
1232         .addr.size = int128_make64(size),
1233         .match_data = match_data,
1234         .data = data,
1235         .e = e,
1236     };
1237     unsigned i;
1238
1239     for (i = 0; i < mr->ioeventfd_nb; ++i) {
1240         if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
1241             break;
1242         }
1243     }
1244     assert(i != mr->ioeventfd_nb);
1245     memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
1246             sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
1247     --mr->ioeventfd_nb;
1248     mr->ioeventfds = g_realloc(mr->ioeventfds,
1249                                   sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
1250     memory_region_update_topology(mr);
1251 }
1252
1253 static void memory_region_add_subregion_common(MemoryRegion *mr,
1254                                                target_phys_addr_t offset,
1255                                                MemoryRegion *subregion)
1256 {
1257     MemoryRegion *other;
1258
1259     assert(!subregion->parent);
1260     subregion->parent = mr;
1261     subregion->addr = offset;
1262     QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1263         if (subregion->may_overlap || other->may_overlap) {
1264             continue;
1265         }
1266         if (int128_gt(int128_make64(offset),
1267                       int128_add(int128_make64(other->addr), other->size))
1268             || int128_le(int128_add(int128_make64(offset), subregion->size),
1269                          int128_make64(other->addr))) {
1270             continue;
1271         }
1272 #if 0
1273         printf("warning: subregion collision %llx/%llx (%s) "
1274                "vs %llx/%llx (%s)\n",
1275                (unsigned long long)offset,
1276                (unsigned long long)int128_get64(subregion->size),
1277                subregion->name,
1278                (unsigned long long)other->addr,
1279                (unsigned long long)int128_get64(other->size),
1280                other->name);
1281 #endif
1282     }
1283     QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
1284         if (subregion->priority >= other->priority) {
1285             QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
1286             goto done;
1287         }
1288     }
1289     QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
1290 done:
1291     memory_region_update_topology(mr);
1292 }
1293
1294
1295 void memory_region_add_subregion(MemoryRegion *mr,
1296                                  target_phys_addr_t offset,
1297                                  MemoryRegion *subregion)
1298 {
1299     subregion->may_overlap = false;
1300     subregion->priority = 0;
1301     memory_region_add_subregion_common(mr, offset, subregion);
1302 }
1303
1304 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1305                                          target_phys_addr_t offset,
1306                                          MemoryRegion *subregion,
1307                                          unsigned priority)
1308 {
1309     subregion->may_overlap = true;
1310     subregion->priority = priority;
1311     memory_region_add_subregion_common(mr, offset, subregion);
1312 }
1313
1314 void memory_region_del_subregion(MemoryRegion *mr,
1315                                  MemoryRegion *subregion)
1316 {
1317     assert(subregion->parent == mr);
1318     subregion->parent = NULL;
1319     QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
1320     memory_region_update_topology(mr);
1321 }
1322
1323 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
1324 {
1325     if (enabled == mr->enabled) {
1326         return;
1327     }
1328     mr->enabled = enabled;
1329     memory_region_update_topology(NULL);
1330 }
1331
1332 void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr)
1333 {
1334     MemoryRegion *parent = mr->parent;
1335     unsigned priority = mr->priority;
1336     bool may_overlap = mr->may_overlap;
1337
1338     if (addr == mr->addr || !parent) {
1339         mr->addr = addr;
1340         return;
1341     }
1342
1343     memory_region_transaction_begin();
1344     memory_region_del_subregion(parent, mr);
1345     if (may_overlap) {
1346         memory_region_add_subregion_overlap(parent, addr, mr, priority);
1347     } else {
1348         memory_region_add_subregion(parent, addr, mr);
1349     }
1350     memory_region_transaction_commit();
1351 }
1352
1353 void memory_region_set_alias_offset(MemoryRegion *mr, target_phys_addr_t offset)
1354 {
1355     target_phys_addr_t old_offset = mr->alias_offset;
1356
1357     assert(mr->alias);
1358     mr->alias_offset = offset;
1359
1360     if (offset == old_offset || !mr->parent) {
1361         return;
1362     }
1363
1364     memory_region_update_topology(mr);
1365 }
1366
1367 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1368 {
1369     return mr->ram_addr;
1370 }
1371
1372 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
1373 {
1374     const AddrRange *addr = addr_;
1375     const FlatRange *fr = fr_;
1376
1377     if (int128_le(addrrange_end(*addr), fr->addr.start)) {
1378         return -1;
1379     } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
1380         return 1;
1381     }
1382     return 0;
1383 }
1384
1385 static FlatRange *address_space_lookup(AddressSpace *as, AddrRange addr)
1386 {
1387     return bsearch(&addr, as->current_map.ranges, as->current_map.nr,
1388                    sizeof(FlatRange), cmp_flatrange_addr);
1389 }
1390
1391 MemoryRegionSection memory_region_find(MemoryRegion *address_space,
1392                                        target_phys_addr_t addr, uint64_t size)
1393 {
1394     AddressSpace *as = memory_region_to_address_space(address_space);
1395     AddrRange range = addrrange_make(int128_make64(addr),
1396                                      int128_make64(size));
1397     FlatRange *fr = address_space_lookup(as, range);
1398     MemoryRegionSection ret = { .mr = NULL, .size = 0 };
1399
1400     if (!fr) {
1401         return ret;
1402     }
1403
1404     while (fr > as->current_map.ranges
1405            && addrrange_intersects(fr[-1].addr, range)) {
1406         --fr;
1407     }
1408
1409     ret.mr = fr->mr;
1410     range = addrrange_intersection(range, fr->addr);
1411     ret.offset_within_region = fr->offset_in_region;
1412     ret.offset_within_region += int128_get64(int128_sub(range.start,
1413                                                         fr->addr.start));
1414     ret.size = int128_get64(range.size);
1415     ret.offset_within_address_space = int128_get64(range.start);
1416     ret.readonly = fr->readonly;
1417     return ret;
1418 }
1419
1420 void memory_global_sync_dirty_bitmap(MemoryRegion *address_space)
1421 {
1422     AddressSpace *as = memory_region_to_address_space(address_space);
1423     FlatRange *fr;
1424
1425     FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
1426         MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
1427     }
1428 }
1429
1430 void memory_global_dirty_log_start(void)
1431 {
1432     global_dirty_log = true;
1433     MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
1434 }
1435
1436 void memory_global_dirty_log_stop(void)
1437 {
1438     global_dirty_log = false;
1439     MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
1440 }
1441
1442 static void listener_add_address_space(MemoryListener *listener,
1443                                        AddressSpace *as)
1444 {
1445     FlatRange *fr;
1446
1447     if (listener->address_space_filter
1448         && listener->address_space_filter != as->root) {
1449         return;
1450     }
1451
1452     if (global_dirty_log) {
1453         listener->log_global_start(listener);
1454     }
1455     FOR_EACH_FLAT_RANGE(fr, &as->current_map) {
1456         MemoryRegionSection section = {
1457             .mr = fr->mr,
1458             .address_space = as->root,
1459             .offset_within_region = fr->offset_in_region,
1460             .size = int128_get64(fr->addr.size),
1461             .offset_within_address_space = int128_get64(fr->addr.start),
1462             .readonly = fr->readonly,
1463         };
1464         listener->region_add(listener, &section);
1465     }
1466 }
1467
1468 void memory_listener_register(MemoryListener *listener, MemoryRegion *filter)
1469 {
1470     MemoryListener *other = NULL;
1471
1472     listener->address_space_filter = filter;
1473     if (QTAILQ_EMPTY(&memory_listeners)
1474         || listener->priority >= QTAILQ_LAST(&memory_listeners,
1475                                              memory_listeners)->priority) {
1476         QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
1477     } else {
1478         QTAILQ_FOREACH(other, &memory_listeners, link) {
1479             if (listener->priority < other->priority) {
1480                 break;
1481             }
1482         }
1483         QTAILQ_INSERT_BEFORE(other, listener, link);
1484     }
1485     listener_add_address_space(listener, &address_space_memory);
1486     listener_add_address_space(listener, &address_space_io);
1487 }
1488
1489 void memory_listener_unregister(MemoryListener *listener)
1490 {
1491     QTAILQ_REMOVE(&memory_listeners, listener, link);
1492 }
1493
1494 void set_system_memory_map(MemoryRegion *mr)
1495 {
1496     address_space_memory.root = mr;
1497     memory_region_update_topology(NULL);
1498 }
1499
1500 void set_system_io_map(MemoryRegion *mr)
1501 {
1502     address_space_io.root = mr;
1503     memory_region_update_topology(NULL);
1504 }
1505
1506 uint64_t io_mem_read(MemoryRegion *mr, target_phys_addr_t addr, unsigned size)
1507 {
1508     return memory_region_dispatch_read(mr, addr, size);
1509 }
1510
1511 void io_mem_write(MemoryRegion *mr, target_phys_addr_t addr,
1512                   uint64_t val, unsigned size)
1513 {
1514     memory_region_dispatch_write(mr, addr, val, size);
1515 }
1516
1517 typedef struct MemoryRegionList MemoryRegionList;
1518
1519 struct MemoryRegionList {
1520     const MemoryRegion *mr;
1521     bool printed;
1522     QTAILQ_ENTRY(MemoryRegionList) queue;
1523 };
1524
1525 typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
1526
1527 static void mtree_print_mr(fprintf_function mon_printf, void *f,
1528                            const MemoryRegion *mr, unsigned int level,
1529                            target_phys_addr_t base,
1530                            MemoryRegionListHead *alias_print_queue)
1531 {
1532     MemoryRegionList *new_ml, *ml, *next_ml;
1533     MemoryRegionListHead submr_print_queue;
1534     const MemoryRegion *submr;
1535     unsigned int i;
1536
1537     if (!mr) {
1538         return;
1539     }
1540
1541     for (i = 0; i < level; i++) {
1542         mon_printf(f, "  ");
1543     }
1544
1545     if (mr->alias) {
1546         MemoryRegionList *ml;
1547         bool found = false;
1548
1549         /* check if the alias is already in the queue */
1550         QTAILQ_FOREACH(ml, alias_print_queue, queue) {
1551             if (ml->mr == mr->alias && !ml->printed) {
1552                 found = true;
1553             }
1554         }
1555
1556         if (!found) {
1557             ml = g_new(MemoryRegionList, 1);
1558             ml->mr = mr->alias;
1559             ml->printed = false;
1560             QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
1561         }
1562         mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
1563                    " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
1564                    "-" TARGET_FMT_plx "\n",
1565                    base + mr->addr,
1566                    base + mr->addr
1567                    + (target_phys_addr_t)int128_get64(mr->size) - 1,
1568                    mr->priority,
1569                    mr->readable ? 'R' : '-',
1570                    !mr->readonly && !(mr->rom_device && mr->readable) ? 'W'
1571                                                                       : '-',
1572                    mr->name,
1573                    mr->alias->name,
1574                    mr->alias_offset,
1575                    mr->alias_offset
1576                    + (target_phys_addr_t)int128_get64(mr->size) - 1);
1577     } else {
1578         mon_printf(f,
1579                    TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n",
1580                    base + mr->addr,
1581                    base + mr->addr
1582                    + (target_phys_addr_t)int128_get64(mr->size) - 1,
1583                    mr->priority,
1584                    mr->readable ? 'R' : '-',
1585                    !mr->readonly && !(mr->rom_device && mr->readable) ? 'W'
1586                                                                       : '-',
1587                    mr->name);
1588     }
1589
1590     QTAILQ_INIT(&submr_print_queue);
1591
1592     QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
1593         new_ml = g_new(MemoryRegionList, 1);
1594         new_ml->mr = submr;
1595         QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
1596             if (new_ml->mr->addr < ml->mr->addr ||
1597                 (new_ml->mr->addr == ml->mr->addr &&
1598                  new_ml->mr->priority > ml->mr->priority)) {
1599                 QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
1600                 new_ml = NULL;
1601                 break;
1602             }
1603         }
1604         if (new_ml) {
1605             QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
1606         }
1607     }
1608
1609     QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
1610         mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
1611                        alias_print_queue);
1612     }
1613
1614     QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
1615         g_free(ml);
1616     }
1617 }
1618
1619 void mtree_info(fprintf_function mon_printf, void *f)
1620 {
1621     MemoryRegionListHead ml_head;
1622     MemoryRegionList *ml, *ml2;
1623
1624     QTAILQ_INIT(&ml_head);
1625
1626     mon_printf(f, "memory\n");
1627     mtree_print_mr(mon_printf, f, address_space_memory.root, 0, 0, &ml_head);
1628
1629     if (address_space_io.root &&
1630         !QTAILQ_EMPTY(&address_space_io.root->subregions)) {
1631         mon_printf(f, "I/O\n");
1632         mtree_print_mr(mon_printf, f, address_space_io.root, 0, 0, &ml_head);
1633     }
1634
1635     mon_printf(f, "aliases\n");
1636     /* print aliased regions */
1637     QTAILQ_FOREACH(ml, &ml_head, queue) {
1638         if (!ml->printed) {
1639             mon_printf(f, "%s\n", ml->mr->name);
1640             mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
1641         }
1642     }
1643
1644     QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
1645         g_free(ml);
1646     }
1647 }