iscsi: fix segfault in url parsing
[sdk/emulator/qemu.git] / memory_mapping.c
1 /*
2  * QEMU memory mapping
3  *
4  * Copyright Fujitsu, Corp. 2011, 2012
5  *
6  * Authors:
7  *     Wen Congyang <wency@cn.fujitsu.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13
14 #include "cpu.h"
15 #include "cpu-all.h"
16 #include "memory_mapping.h"
17
18 static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
19                                                    MemoryMapping *mapping)
20 {
21     MemoryMapping *p;
22
23     QTAILQ_FOREACH(p, &list->head, next) {
24         if (p->phys_addr >= mapping->phys_addr) {
25             QTAILQ_INSERT_BEFORE(p, mapping, next);
26             return;
27         }
28     }
29     QTAILQ_INSERT_TAIL(&list->head, mapping, next);
30 }
31
32 static void create_new_memory_mapping(MemoryMappingList *list,
33                                       hwaddr phys_addr,
34                                       hwaddr virt_addr,
35                                       ram_addr_t length)
36 {
37     MemoryMapping *memory_mapping;
38
39     memory_mapping = g_malloc(sizeof(MemoryMapping));
40     memory_mapping->phys_addr = phys_addr;
41     memory_mapping->virt_addr = virt_addr;
42     memory_mapping->length = length;
43     list->last_mapping = memory_mapping;
44     list->num++;
45     memory_mapping_list_add_mapping_sorted(list, memory_mapping);
46 }
47
48 static inline bool mapping_contiguous(MemoryMapping *map,
49                                       hwaddr phys_addr,
50                                       hwaddr virt_addr)
51 {
52     return phys_addr == map->phys_addr + map->length &&
53            virt_addr == map->virt_addr + map->length;
54 }
55
56 /*
57  * [map->phys_addr, map->phys_addr + map->length) and
58  * [phys_addr, phys_addr + length) have intersection?
59  */
60 static inline bool mapping_have_same_region(MemoryMapping *map,
61                                             hwaddr phys_addr,
62                                             ram_addr_t length)
63 {
64     return !(phys_addr + length < map->phys_addr ||
65              phys_addr >= map->phys_addr + map->length);
66 }
67
68 /*
69  * [map->phys_addr, map->phys_addr + map->length) and
70  * [phys_addr, phys_addr + length) have intersection. The virtual address in the
71  * intersection are the same?
72  */
73 static inline bool mapping_conflict(MemoryMapping *map,
74                                     hwaddr phys_addr,
75                                     hwaddr virt_addr)
76 {
77     return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
78 }
79
80 /*
81  * [map->virt_addr, map->virt_addr + map->length) and
82  * [virt_addr, virt_addr + length) have intersection. And the physical address
83  * in the intersection are the same.
84  */
85 static inline void mapping_merge(MemoryMapping *map,
86                                  hwaddr virt_addr,
87                                  ram_addr_t length)
88 {
89     if (virt_addr < map->virt_addr) {
90         map->length += map->virt_addr - virt_addr;
91         map->virt_addr = virt_addr;
92     }
93
94     if ((virt_addr + length) >
95         (map->virt_addr + map->length)) {
96         map->length = virt_addr + length - map->virt_addr;
97     }
98 }
99
100 void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
101                                           hwaddr phys_addr,
102                                           hwaddr virt_addr,
103                                           ram_addr_t length)
104 {
105     MemoryMapping *memory_mapping, *last_mapping;
106
107     if (QTAILQ_EMPTY(&list->head)) {
108         create_new_memory_mapping(list, phys_addr, virt_addr, length);
109         return;
110     }
111
112     last_mapping = list->last_mapping;
113     if (last_mapping) {
114         if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
115             last_mapping->length += length;
116             return;
117         }
118     }
119
120     QTAILQ_FOREACH(memory_mapping, &list->head, next) {
121         if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
122             memory_mapping->length += length;
123             list->last_mapping = memory_mapping;
124             return;
125         }
126
127         if (phys_addr + length < memory_mapping->phys_addr) {
128             /* create a new region before memory_mapping */
129             break;
130         }
131
132         if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
133             if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
134                 continue;
135             }
136
137             /* merge this region into memory_mapping */
138             mapping_merge(memory_mapping, virt_addr, length);
139             list->last_mapping = memory_mapping;
140             return;
141         }
142     }
143
144     /* this region can not be merged into any existed memory mapping. */
145     create_new_memory_mapping(list, phys_addr, virt_addr, length);
146 }
147
148 void memory_mapping_list_free(MemoryMappingList *list)
149 {
150     MemoryMapping *p, *q;
151
152     QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
153         QTAILQ_REMOVE(&list->head, p, next);
154         g_free(p);
155     }
156
157     list->num = 0;
158     list->last_mapping = NULL;
159 }
160
161 void memory_mapping_list_init(MemoryMappingList *list)
162 {
163     list->num = 0;
164     list->last_mapping = NULL;
165     QTAILQ_INIT(&list->head);
166 }
167
168 static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu)
169 {
170     CPUArchState *env;
171
172     for (env = start_cpu; env != NULL; env = env->next_cpu) {
173         if (cpu_paging_enabled(env)) {
174             return env;
175         }
176     }
177
178     return NULL;
179 }
180
181 int qemu_get_guest_memory_mapping(MemoryMappingList *list)
182 {
183     CPUArchState *env, *first_paging_enabled_cpu;
184     RAMBlock *block;
185     ram_addr_t offset, length;
186     int ret;
187
188     first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
189     if (first_paging_enabled_cpu) {
190         for (env = first_paging_enabled_cpu; env != NULL; env = env->next_cpu) {
191             ret = cpu_get_memory_mapping(list, env);
192             if (ret < 0) {
193                 return -1;
194             }
195         }
196         return 0;
197     }
198
199     /*
200      * If the guest doesn't use paging, the virtual address is equal to physical
201      * address.
202      */
203     QLIST_FOREACH(block, &ram_list.blocks, next) {
204         offset = block->offset;
205         length = block->length;
206         create_new_memory_mapping(list, offset, offset, length);
207     }
208
209     return 0;
210 }
211
212 void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list)
213 {
214     RAMBlock *block;
215
216     QLIST_FOREACH(block, &ram_list.blocks, next) {
217         create_new_memory_mapping(list, block->offset, 0, block->length);
218     }
219 }
220
221 void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
222                            int64_t length)
223 {
224     MemoryMapping *cur, *next;
225
226     QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
227         if (cur->phys_addr >= begin + length ||
228             cur->phys_addr + cur->length <= begin) {
229             QTAILQ_REMOVE(&list->head, cur, next);
230             list->num--;
231             continue;
232         }
233
234         if (cur->phys_addr < begin) {
235             cur->length -= begin - cur->phys_addr;
236             if (cur->virt_addr) {
237                 cur->virt_addr += begin - cur->phys_addr;
238             }
239             cur->phys_addr = begin;
240         }
241
242         if (cur->phys_addr + cur->length > begin + length) {
243             cur->length -= cur->phys_addr + cur->length - begin - length;
244         }
245     }
246 }