tcg: synchronize exit_request and tcg_current_cpu accesses
[sdk/emulator/qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "trace.h"
22 #include "disas/disas.h"
23 #include "tcg.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
27 #include "exec/address-spaces.h"
28 #include "exec/memory-internal.h"
29 #include "qemu/rcu.h"
30 #include "exec/tb-hash.h"
31
32 /* -icount align implementation. */
33
34 typedef struct SyncClocks {
35     int64_t diff_clk;
36     int64_t last_cpu_icount;
37     int64_t realtime_clock;
38 } SyncClocks;
39
40 #if !defined(CONFIG_USER_ONLY)
41 /* Allow the guest to have a max 3ms advance.
42  * The difference between the 2 clocks could therefore
43  * oscillate around 0.
44  */
45 #define VM_CLOCK_ADVANCE 3000000
46 #define THRESHOLD_REDUCE 1.5
47 #define MAX_DELAY_PRINT_RATE 2000000000LL
48 #define MAX_NB_PRINTS 100
49
50 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
51 {
52     int64_t cpu_icount;
53
54     if (!icount_align_option) {
55         return;
56     }
57
58     cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
59     sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
60     sc->last_cpu_icount = cpu_icount;
61
62     if (sc->diff_clk > VM_CLOCK_ADVANCE) {
63 #ifndef _WIN32
64         struct timespec sleep_delay, rem_delay;
65         sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
66         sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
67         if (nanosleep(&sleep_delay, &rem_delay) < 0) {
68             sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
69         } else {
70             sc->diff_clk = 0;
71         }
72 #else
73         Sleep(sc->diff_clk / SCALE_MS);
74         sc->diff_clk = 0;
75 #endif
76     }
77 }
78
79 static void print_delay(const SyncClocks *sc)
80 {
81     static float threshold_delay;
82     static int64_t last_realtime_clock;
83     static int nb_prints;
84
85     if (icount_align_option &&
86         sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
87         nb_prints < MAX_NB_PRINTS) {
88         if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
89             (-sc->diff_clk / (float)1000000000LL <
90              (threshold_delay - THRESHOLD_REDUCE))) {
91             threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
92             printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
93                    threshold_delay - 1,
94                    threshold_delay);
95             nb_prints++;
96             last_realtime_clock = sc->realtime_clock;
97         }
98     }
99 }
100
101 static void init_delay_params(SyncClocks *sc,
102                               const CPUState *cpu)
103 {
104     if (!icount_align_option) {
105         return;
106     }
107     sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
108     sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
109     sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
110     if (sc->diff_clk < max_delay) {
111         max_delay = sc->diff_clk;
112     }
113     if (sc->diff_clk > max_advance) {
114         max_advance = sc->diff_clk;
115     }
116
117     /* Print every 2s max if the guest is late. We limit the number
118        of printed messages to NB_PRINT_MAX(currently 100) */
119     print_delay(sc);
120 }
121 #else
122 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
123 {
124 }
125
126 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
127 {
128 }
129 #endif /* CONFIG USER ONLY */
130
131 void cpu_loop_exit(CPUState *cpu)
132 {
133     cpu->current_tb = NULL;
134     siglongjmp(cpu->jmp_env, 1);
135 }
136
137 /* exit the current TB from a signal handler. The host registers are
138    restored in a state compatible with the CPU emulator
139  */
140 #if defined(CONFIG_SOFTMMU)
141 void cpu_resume_from_signal(CPUState *cpu, void *puc)
142 {
143     /* XXX: restore cpu registers saved in host registers */
144
145     cpu->exception_index = -1;
146     siglongjmp(cpu->jmp_env, 1);
147 }
148
149 void cpu_reload_memory_map(CPUState *cpu)
150 {
151     AddressSpaceDispatch *d;
152
153     if (qemu_in_vcpu_thread()) {
154         /* Do not let the guest prolong the critical section as much as it
155          * as it desires.
156          *
157          * Currently, this is prevented by the I/O thread's periodinc kicking
158          * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
159          * but this will go away once TCG's execution moves out of the global
160          * mutex.
161          *
162          * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
163          * only protects cpu->as->dispatch.  Since we reload it below, we can
164          * split the critical section.
165          */
166         rcu_read_unlock();
167         rcu_read_lock();
168     }
169
170     /* The CPU and TLB are protected by the iothread lock.  */
171     d = atomic_rcu_read(&cpu->as->dispatch);
172     cpu->memory_dispatch = d;
173     tlb_flush(cpu, 1);
174 }
175 #endif
176
177 /* Execute a TB, and fix up the CPU state afterwards if necessary */
178 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
179 {
180     CPUArchState *env = cpu->env_ptr;
181     uintptr_t next_tb;
182
183 #if defined(DEBUG_DISAS)
184     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
185 #if defined(TARGET_I386)
186         log_cpu_state(cpu, CPU_DUMP_CCOP);
187 #elif defined(TARGET_M68K)
188         /* ??? Should not modify env state for dumping.  */
189         cpu_m68k_flush_flags(env, env->cc_op);
190         env->cc_op = CC_OP_FLAGS;
191         env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
192         log_cpu_state(cpu, 0);
193 #else
194         log_cpu_state(cpu, 0);
195 #endif
196     }
197 #endif /* DEBUG_DISAS */
198
199     cpu->can_do_io = !use_icount;
200     next_tb = tcg_qemu_tb_exec(env, tb_ptr);
201     cpu->can_do_io = 1;
202     trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
203                        next_tb & TB_EXIT_MASK);
204
205     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
206         /* We didn't start executing this TB (eg because the instruction
207          * counter hit zero); we must restore the guest PC to the address
208          * of the start of the TB.
209          */
210         CPUClass *cc = CPU_GET_CLASS(cpu);
211         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
212         if (cc->synchronize_from_tb) {
213             cc->synchronize_from_tb(cpu, tb);
214         } else {
215             assert(cc->set_pc);
216             cc->set_pc(cpu, tb->pc);
217         }
218     }
219     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
220         /* We were asked to stop executing TBs (probably a pending
221          * interrupt. We've now stopped, so clear the flag.
222          */
223         cpu->tcg_exit_req = 0;
224     }
225     return next_tb;
226 }
227
228 /* Execute the code without caching the generated code. An interpreter
229    could be used if available. */
230 static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
231                              TranslationBlock *orig_tb)
232 {
233     TranslationBlock *tb;
234
235     /* Should never happen.
236        We only end up here when an existing TB is too long.  */
237     if (max_cycles > CF_COUNT_MASK)
238         max_cycles = CF_COUNT_MASK;
239
240     tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
241                      max_cycles | CF_NOCACHE);
242     tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
243     cpu->current_tb = tb;
244     /* execute the generated code */
245     trace_exec_tb_nocache(tb, tb->pc);
246     cpu_tb_exec(cpu, tb->tc_ptr);
247     cpu->current_tb = NULL;
248     tb_phys_invalidate(tb, -1);
249     tb_free(tb);
250 }
251
252 static TranslationBlock *tb_find_slow(CPUState *cpu,
253                                       target_ulong pc,
254                                       target_ulong cs_base,
255                                       uint64_t flags)
256 {
257     CPUArchState *env = (CPUArchState *)cpu->env_ptr;
258     TranslationBlock *tb, **ptb1;
259     unsigned int h;
260     tb_page_addr_t phys_pc, phys_page1;
261     target_ulong virt_page2;
262
263     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
264
265     /* find translated block using physical mappings */
266     phys_pc = get_page_addr_code(env, pc);
267     phys_page1 = phys_pc & TARGET_PAGE_MASK;
268     h = tb_phys_hash_func(phys_pc);
269     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
270     for(;;) {
271         tb = *ptb1;
272         if (!tb)
273             goto not_found;
274         if (tb->pc == pc &&
275             tb->page_addr[0] == phys_page1 &&
276             tb->cs_base == cs_base &&
277             tb->flags == flags) {
278             /* check next page if needed */
279             if (tb->page_addr[1] != -1) {
280                 tb_page_addr_t phys_page2;
281
282                 virt_page2 = (pc & TARGET_PAGE_MASK) +
283                     TARGET_PAGE_SIZE;
284                 phys_page2 = get_page_addr_code(env, virt_page2);
285                 if (tb->page_addr[1] == phys_page2)
286                     goto found;
287             } else {
288                 goto found;
289             }
290         }
291         ptb1 = &tb->phys_hash_next;
292     }
293  not_found:
294    /* if no translated code available, then translate it now */
295     tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
296
297  found:
298     /* Move the last found TB to the head of the list */
299     if (likely(*ptb1)) {
300         *ptb1 = tb->phys_hash_next;
301         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
302         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
303     }
304     /* we add the TB in the virtual pc hash table */
305     cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
306     return tb;
307 }
308
309 static inline TranslationBlock *tb_find_fast(CPUState *cpu)
310 {
311     CPUArchState *env = (CPUArchState *)cpu->env_ptr;
312     TranslationBlock *tb;
313     target_ulong cs_base, pc;
314     int flags;
315
316     /* we record a subset of the CPU state. It will
317        always be the same before a given translated block
318        is executed. */
319     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
320     tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
321     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
322                  tb->flags != flags)) {
323         tb = tb_find_slow(cpu, pc, cs_base, flags);
324     }
325     return tb;
326 }
327
328 static void cpu_handle_debug_exception(CPUState *cpu)
329 {
330     CPUClass *cc = CPU_GET_CLASS(cpu);
331     CPUWatchpoint *wp;
332
333     if (!cpu->watchpoint_hit) {
334         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
335             wp->flags &= ~BP_WATCHPOINT_HIT;
336         }
337     }
338
339     cc->debug_excp_handler(cpu);
340 }
341
342 /* main execution loop */
343
344 volatile sig_atomic_t exit_request;
345 CPUState *tcg_current_cpu;
346
347 int cpu_exec(CPUState *cpu)
348 {
349     CPUClass *cc = CPU_GET_CLASS(cpu);
350 #ifdef TARGET_I386
351     X86CPU *x86_cpu = X86_CPU(cpu);
352     CPUArchState *env = &x86_cpu->env;
353 #endif
354     int ret, interrupt_request;
355     TranslationBlock *tb;
356     uint8_t *tc_ptr;
357     uintptr_t next_tb;
358     SyncClocks sc;
359
360     /* This must be volatile so it is not trashed by longjmp() */
361     volatile bool have_tb_lock = false;
362
363     if (cpu->halted) {
364         if (!cpu_has_work(cpu)) {
365             return EXCP_HALTED;
366         }
367
368         cpu->halted = 0;
369     }
370
371     current_cpu = cpu;
372     atomic_mb_set(&tcg_current_cpu, cpu);
373     rcu_read_lock();
374
375     if (unlikely(atomic_mb_read(&exit_request))) {
376         cpu->exit_request = 1;
377     }
378
379     cc->cpu_exec_enter(cpu);
380
381     /* Calculate difference between guest clock and host clock.
382      * This delay includes the delay of the last cycle, so
383      * what we have to do is sleep until it is 0. As for the
384      * advance/delay we gain here, we try to fix it next time.
385      */
386     init_delay_params(&sc, cpu);
387
388     /* prepare setjmp context for exception handling */
389     for(;;) {
390         if (sigsetjmp(cpu->jmp_env, 0) == 0) {
391             /* if an exception is pending, we execute it here */
392             if (cpu->exception_index >= 0) {
393                 if (cpu->exception_index >= EXCP_INTERRUPT) {
394                     /* exit request from the cpu execution loop */
395                     ret = cpu->exception_index;
396                     if (ret == EXCP_DEBUG) {
397                         cpu_handle_debug_exception(cpu);
398                     }
399                     cpu->exception_index = -1;
400                     break;
401                 } else {
402 #if defined(CONFIG_USER_ONLY)
403                     /* if user mode only, we simulate a fake exception
404                        which will be handled outside the cpu execution
405                        loop */
406 #if defined(TARGET_I386)
407                     cc->do_interrupt(cpu);
408 #endif
409                     ret = cpu->exception_index;
410                     cpu->exception_index = -1;
411                     break;
412 #else
413                     cc->do_interrupt(cpu);
414                     cpu->exception_index = -1;
415 #endif
416                 }
417             }
418
419             next_tb = 0; /* force lookup of first TB */
420             for(;;) {
421                 interrupt_request = cpu->interrupt_request;
422                 if (unlikely(interrupt_request)) {
423                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
424                         /* Mask out external interrupts for this step. */
425                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
426                     }
427                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
428                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
429                         cpu->exception_index = EXCP_DEBUG;
430                         cpu_loop_exit(cpu);
431                     }
432                     if (interrupt_request & CPU_INTERRUPT_HALT) {
433                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
434                         cpu->halted = 1;
435                         cpu->exception_index = EXCP_HLT;
436                         cpu_loop_exit(cpu);
437                     }
438 #if defined(TARGET_I386)
439                     if (interrupt_request & CPU_INTERRUPT_INIT) {
440                         cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
441                         do_cpu_init(x86_cpu);
442                         cpu->exception_index = EXCP_HALTED;
443                         cpu_loop_exit(cpu);
444                     }
445 #else
446                     if (interrupt_request & CPU_INTERRUPT_RESET) {
447                         cpu_reset(cpu);
448                     }
449 #endif
450                     /* The target hook has 3 exit conditions:
451                        False when the interrupt isn't processed,
452                        True when it is, and we should restart on a new TB,
453                        and via longjmp via cpu_loop_exit.  */
454                     if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
455                         next_tb = 0;
456                     }
457                     /* Don't use the cached interrupt_request value,
458                        do_interrupt may have updated the EXITTB flag. */
459                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
460                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
461                         /* ensure that no TB jump will be modified as
462                            the program flow was changed */
463                         next_tb = 0;
464                     }
465                 }
466                 if (unlikely(cpu->exit_request)) {
467                     cpu->exit_request = 0;
468                     cpu->exception_index = EXCP_INTERRUPT;
469                     cpu_loop_exit(cpu);
470                 }
471                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
472                 have_tb_lock = true;
473                 tb = tb_find_fast(cpu);
474                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
475                    doing it in tb_find_slow */
476                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
477                     /* as some TB could have been invalidated because
478                        of memory exceptions while generating the code, we
479                        must recompute the hash index here */
480                     next_tb = 0;
481                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
482                 }
483                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
484                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
485                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
486                 }
487                 /* see if we can patch the calling TB. When the TB
488                    spans two pages, we cannot safely do a direct
489                    jump. */
490                 if (next_tb != 0 && tb->page_addr[1] == -1) {
491                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
492                                 next_tb & TB_EXIT_MASK, tb);
493                 }
494                 have_tb_lock = false;
495                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
496                 if (likely(!cpu->exit_request)) {
497                     trace_exec_tb(tb, tb->pc);
498                     tc_ptr = tb->tc_ptr;
499                     /* execute the generated code */
500                     cpu->current_tb = tb;
501                     next_tb = cpu_tb_exec(cpu, tc_ptr);
502                     cpu->current_tb = NULL;
503                     switch (next_tb & TB_EXIT_MASK) {
504                     case TB_EXIT_REQUESTED:
505                         /* Something asked us to stop executing
506                          * chained TBs; just continue round the main
507                          * loop. Whatever requested the exit will also
508                          * have set something else (eg exit_request or
509                          * interrupt_request) which we will handle
510                          * next time around the loop.  But we need to
511                          * ensure the tcg_exit_req read in generated code
512                          * comes before the next read of cpu->exit_request
513                          * or cpu->interrupt_request.
514                          */
515                         smp_rmb();
516                         next_tb = 0;
517                         break;
518                     case TB_EXIT_ICOUNT_EXPIRED:
519                     {
520                         /* Instruction counter expired.  */
521                         int insns_left = cpu->icount_decr.u32;
522                         if (cpu->icount_extra && insns_left >= 0) {
523                             /* Refill decrementer and continue execution.  */
524                             cpu->icount_extra += insns_left;
525                             insns_left = MIN(0xffff, cpu->icount_extra);
526                             cpu->icount_extra -= insns_left;
527                             cpu->icount_decr.u16.low = insns_left;
528                         } else {
529                             if (insns_left > 0) {
530                                 /* Execute remaining instructions.  */
531                                 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
532                                 cpu_exec_nocache(cpu, insns_left, tb);
533                                 align_clocks(&sc, cpu);
534                             }
535                             cpu->exception_index = EXCP_INTERRUPT;
536                             next_tb = 0;
537                             cpu_loop_exit(cpu);
538                         }
539                         break;
540                     }
541                     default:
542                         break;
543                     }
544                 }
545                 /* Try to align the host and virtual clocks
546                    if the guest is in advance */
547                 align_clocks(&sc, cpu);
548                 /* reset soft MMU for next block (it can currently
549                    only be set by a memory fault) */
550             } /* for(;;) */
551         } else {
552             /* Reload env after longjmp - the compiler may have smashed all
553              * local variables as longjmp is marked 'noreturn'. */
554             cpu = current_cpu;
555             cc = CPU_GET_CLASS(cpu);
556             cpu->can_do_io = 1;
557 #ifdef TARGET_I386
558             x86_cpu = X86_CPU(cpu);
559             env = &x86_cpu->env;
560 #endif
561             if (have_tb_lock) {
562                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
563                 have_tb_lock = false;
564             }
565         }
566     } /* for(;;) */
567
568     cc->cpu_exec_exit(cpu);
569     rcu_read_unlock();
570
571     /* fail safe : never use current_cpu outside cpu_exec() */
572     current_cpu = NULL;
573
574     /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
575     atomic_set(&tcg_current_cpu, NULL);
576     return ret;
577 }