OSDN Git Service

linux-user/aarch64: Pass syndrome to EXC_*_ABORT
[qmiga/qemu.git] / softmmu / memory_mapping.c
1 /*
2  * QEMU memory mapping
3  *
4  * Copyright Fujitsu, Corp. 2011, 2012
5  *
6  * Authors:
7  *     Wen Congyang <wency@cn.fujitsu.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16
17 #include "cpu.h"
18 #include "sysemu/memory_mapping.h"
19 #include "exec/memory.h"
20 #include "exec/address-spaces.h"
21
22 //#define DEBUG_GUEST_PHYS_REGION_ADD
23
24 static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
25                                                    MemoryMapping *mapping)
26 {
27     MemoryMapping *p;
28
29     QTAILQ_FOREACH(p, &list->head, next) {
30         if (p->phys_addr >= mapping->phys_addr) {
31             QTAILQ_INSERT_BEFORE(p, mapping, next);
32             return;
33         }
34     }
35     QTAILQ_INSERT_TAIL(&list->head, mapping, next);
36 }
37
38 static void create_new_memory_mapping(MemoryMappingList *list,
39                                       hwaddr phys_addr,
40                                       hwaddr virt_addr,
41                                       ram_addr_t length)
42 {
43     MemoryMapping *memory_mapping;
44
45     memory_mapping = g_malloc(sizeof(MemoryMapping));
46     memory_mapping->phys_addr = phys_addr;
47     memory_mapping->virt_addr = virt_addr;
48     memory_mapping->length = length;
49     list->last_mapping = memory_mapping;
50     list->num++;
51     memory_mapping_list_add_mapping_sorted(list, memory_mapping);
52 }
53
54 static inline bool mapping_contiguous(MemoryMapping *map,
55                                       hwaddr phys_addr,
56                                       hwaddr virt_addr)
57 {
58     return phys_addr == map->phys_addr + map->length &&
59            virt_addr == map->virt_addr + map->length;
60 }
61
62 /*
63  * [map->phys_addr, map->phys_addr + map->length) and
64  * [phys_addr, phys_addr + length) have intersection?
65  */
66 static inline bool mapping_have_same_region(MemoryMapping *map,
67                                             hwaddr phys_addr,
68                                             ram_addr_t length)
69 {
70     return !(phys_addr + length < map->phys_addr ||
71              phys_addr >= map->phys_addr + map->length);
72 }
73
74 /*
75  * [map->phys_addr, map->phys_addr + map->length) and
76  * [phys_addr, phys_addr + length) have intersection. The virtual address in the
77  * intersection are the same?
78  */
79 static inline bool mapping_conflict(MemoryMapping *map,
80                                     hwaddr phys_addr,
81                                     hwaddr virt_addr)
82 {
83     return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
84 }
85
86 /*
87  * [map->virt_addr, map->virt_addr + map->length) and
88  * [virt_addr, virt_addr + length) have intersection. And the physical address
89  * in the intersection are the same.
90  */
91 static inline void mapping_merge(MemoryMapping *map,
92                                  hwaddr virt_addr,
93                                  ram_addr_t length)
94 {
95     if (virt_addr < map->virt_addr) {
96         map->length += map->virt_addr - virt_addr;
97         map->virt_addr = virt_addr;
98     }
99
100     if ((virt_addr + length) >
101         (map->virt_addr + map->length)) {
102         map->length = virt_addr + length - map->virt_addr;
103     }
104 }
105
106 void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
107                                           hwaddr phys_addr,
108                                           hwaddr virt_addr,
109                                           ram_addr_t length)
110 {
111     MemoryMapping *memory_mapping, *last_mapping;
112
113     if (QTAILQ_EMPTY(&list->head)) {
114         create_new_memory_mapping(list, phys_addr, virt_addr, length);
115         return;
116     }
117
118     last_mapping = list->last_mapping;
119     if (last_mapping) {
120         if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
121             last_mapping->length += length;
122             return;
123         }
124     }
125
126     QTAILQ_FOREACH(memory_mapping, &list->head, next) {
127         if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
128             memory_mapping->length += length;
129             list->last_mapping = memory_mapping;
130             return;
131         }
132
133         if (phys_addr + length < memory_mapping->phys_addr) {
134             /* create a new region before memory_mapping */
135             break;
136         }
137
138         if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
139             if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
140                 continue;
141             }
142
143             /* merge this region into memory_mapping */
144             mapping_merge(memory_mapping, virt_addr, length);
145             list->last_mapping = memory_mapping;
146             return;
147         }
148     }
149
150     /* this region can not be merged into any existed memory mapping. */
151     create_new_memory_mapping(list, phys_addr, virt_addr, length);
152 }
153
154 void memory_mapping_list_free(MemoryMappingList *list)
155 {
156     MemoryMapping *p, *q;
157
158     QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
159         QTAILQ_REMOVE(&list->head, p, next);
160         g_free(p);
161     }
162
163     list->num = 0;
164     list->last_mapping = NULL;
165 }
166
167 void memory_mapping_list_init(MemoryMappingList *list)
168 {
169     list->num = 0;
170     list->last_mapping = NULL;
171     QTAILQ_INIT(&list->head);
172 }
173
174 void guest_phys_blocks_free(GuestPhysBlockList *list)
175 {
176     GuestPhysBlock *p, *q;
177
178     QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
179         QTAILQ_REMOVE(&list->head, p, next);
180         memory_region_unref(p->mr);
181         g_free(p);
182     }
183     list->num = 0;
184 }
185
186 void guest_phys_blocks_init(GuestPhysBlockList *list)
187 {
188     list->num = 0;
189     QTAILQ_INIT(&list->head);
190 }
191
192 typedef struct GuestPhysListener {
193     GuestPhysBlockList *list;
194     MemoryListener listener;
195 } GuestPhysListener;
196
197 static void guest_phys_blocks_region_add(MemoryListener *listener,
198                                          MemoryRegionSection *section)
199 {
200     GuestPhysListener *g;
201     uint64_t section_size;
202     hwaddr target_start, target_end;
203     uint8_t *host_addr;
204     GuestPhysBlock *predecessor;
205
206     /* we only care about RAM */
207     if (!memory_region_is_ram(section->mr) ||
208         memory_region_is_ram_device(section->mr) ||
209         memory_region_is_nonvolatile(section->mr)) {
210         return;
211     }
212
213     g            = container_of(listener, GuestPhysListener, listener);
214     section_size = int128_get64(section->size);
215     target_start = section->offset_within_address_space;
216     target_end   = target_start + section_size;
217     host_addr    = memory_region_get_ram_ptr(section->mr) +
218                    section->offset_within_region;
219     predecessor  = NULL;
220
221     /* find continuity in guest physical address space */
222     if (!QTAILQ_EMPTY(&g->list->head)) {
223         hwaddr predecessor_size;
224
225         predecessor = QTAILQ_LAST(&g->list->head);
226         predecessor_size = predecessor->target_end - predecessor->target_start;
227
228         /* the memory API guarantees monotonically increasing traversal */
229         g_assert(predecessor->target_end <= target_start);
230
231         /* we want continuity in both guest-physical and host-virtual memory */
232         if (predecessor->target_end < target_start ||
233             predecessor->host_addr + predecessor_size != host_addr) {
234             predecessor = NULL;
235         }
236     }
237
238     if (predecessor == NULL) {
239         /* isolated mapping, allocate it and add it to the list */
240         GuestPhysBlock *block = g_malloc0(sizeof *block);
241
242         block->target_start = target_start;
243         block->target_end   = target_end;
244         block->host_addr    = host_addr;
245         block->mr           = section->mr;
246         memory_region_ref(section->mr);
247
248         QTAILQ_INSERT_TAIL(&g->list->head, block, next);
249         ++g->list->num;
250     } else {
251         /* expand predecessor until @target_end; predecessor's start doesn't
252          * change
253          */
254         predecessor->target_end = target_end;
255     }
256
257 #ifdef DEBUG_GUEST_PHYS_REGION_ADD
258     fprintf(stderr, "%s: target_start=" TARGET_FMT_plx " target_end="
259             TARGET_FMT_plx ": %s (count: %u)\n", __func__, target_start,
260             target_end, predecessor ? "joined" : "added", g->list->num);
261 #endif
262 }
263
264 void guest_phys_blocks_append(GuestPhysBlockList *list)
265 {
266     GuestPhysListener g = { 0 };
267
268     g.list = list;
269     g.listener.region_add = &guest_phys_blocks_region_add;
270     memory_listener_register(&g.listener, &address_space_memory);
271     memory_listener_unregister(&g.listener);
272 }
273
274 static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
275 {
276     CPUState *cpu;
277
278     CPU_FOREACH(cpu) {
279         if (cpu_paging_enabled(cpu)) {
280             return cpu;
281         }
282     }
283
284     return NULL;
285 }
286
287 void qemu_get_guest_memory_mapping(MemoryMappingList *list,
288                                    const GuestPhysBlockList *guest_phys_blocks,
289                                    Error **errp)
290 {
291     CPUState *cpu, *first_paging_enabled_cpu;
292     GuestPhysBlock *block;
293     ram_addr_t offset, length;
294
295     first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
296     if (first_paging_enabled_cpu) {
297         for (cpu = first_paging_enabled_cpu; cpu != NULL;
298              cpu = CPU_NEXT(cpu)) {
299             Error *err = NULL;
300             cpu_get_memory_mapping(cpu, list, &err);
301             if (err) {
302                 error_propagate(errp, err);
303                 return;
304             }
305         }
306         return;
307     }
308
309     /*
310      * If the guest doesn't use paging, the virtual address is equal to physical
311      * address.
312      */
313     QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
314         offset = block->target_start;
315         length = block->target_end - block->target_start;
316         create_new_memory_mapping(list, offset, offset, length);
317     }
318 }
319
320 void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
321                                    const GuestPhysBlockList *guest_phys_blocks)
322 {
323     GuestPhysBlock *block;
324
325     QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
326         create_new_memory_mapping(list, block->target_start, 0,
327                                   block->target_end - block->target_start);
328     }
329 }
330
331 void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
332                            int64_t length)
333 {
334     MemoryMapping *cur, *next;
335
336     QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
337         if (cur->phys_addr >= begin + length ||
338             cur->phys_addr + cur->length <= begin) {
339             QTAILQ_REMOVE(&list->head, cur, next);
340             g_free(cur);
341             list->num--;
342             continue;
343         }
344
345         if (cur->phys_addr < begin) {
346             cur->length -= begin - cur->phys_addr;
347             if (cur->virt_addr) {
348                 cur->virt_addr += begin - cur->phys_addr;
349             }
350             cur->phys_addr = begin;
351         }
352
353         if (cur->phys_addr + cur->length > begin + length) {
354             cur->length -= cur->phys_addr + cur->length - begin - length;
355         }
356     }
357 }