4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "config-host.h"
28 #include "monitor/monitor.h"
29 #include "qapi/qmp/qerror.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/sysemu.h"
32 #include "exec/gdbstub.h"
33 #include "sysemu/dma.h"
34 #include "sysemu/kvm.h"
35 #include "qmp-commands.h"
37 #include "qemu/thread.h"
38 #include "sysemu/cpus.h"
39 #include "sysemu/qtest.h"
40 #include "qemu/main-loop.h"
41 #include "qemu/bitmap.h"
42 #include "qemu/seqlock.h"
43 #include "qapi-event.h"
47 #include "qemu/compatfd.h"
52 #include <sys/prctl.h>
55 #define PR_MCE_KILL 33
58 #ifndef PR_MCE_KILL_SET
59 #define PR_MCE_KILL_SET 1
62 #ifndef PR_MCE_KILL_EARLY
63 #define PR_MCE_KILL_EARLY 1
66 #endif /* CONFIG_LINUX */
68 static CPUState *next_cpu;
72 bool cpu_is_stopped(CPUState *cpu)
74 return cpu->stopped || !runstate_is_running();
77 static bool cpu_thread_is_idle(CPUState *cpu)
79 if (cpu->stop || cpu->queued_work_first) {
82 if (cpu_is_stopped(cpu)) {
85 if (!cpu->halted || cpu_has_work(cpu) ||
86 kvm_halt_in_kernel()) {
92 static bool all_cpu_threads_idle(void)
97 if (!cpu_thread_is_idle(cpu)) {
104 /***********************************************************/
105 /* guest cycle counter */
107 /* Protected by TimersState seqlock */
109 static bool icount_sleep = true;
110 static int64_t vm_clock_warp_start = -1;
111 /* Conversion factor from emulated instructions to virtual clock ticks. */
112 static int icount_time_shift;
113 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
114 #define MAX_ICOUNT_SHIFT 10
116 static QEMUTimer *icount_rt_timer;
117 static QEMUTimer *icount_vm_timer;
118 static QEMUTimer *icount_warp_timer;
120 typedef struct TimersState {
121 /* Protected by BQL. */
122 int64_t cpu_ticks_prev;
123 int64_t cpu_ticks_offset;
125 /* cpu_clock_offset can be read out of BQL, so protect it with
128 QemuSeqLock vm_clock_seqlock;
129 int64_t cpu_clock_offset;
130 int32_t cpu_ticks_enabled;
133 /* Compensate for varying guest execution speed. */
134 int64_t qemu_icount_bias;
135 /* Only written by TCG thread */
139 static TimersState timers_state;
141 int64_t cpu_get_icount_raw(void)
144 CPUState *cpu = current_cpu;
146 icount = timers_state.qemu_icount;
148 if (!cpu->can_do_io) {
149 fprintf(stderr, "Bad icount read\n");
152 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
157 /* Return the virtual CPU time, based on the instruction counter. */
158 static int64_t cpu_get_icount_locked(void)
160 int64_t icount = cpu_get_icount_raw();
161 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
164 int64_t cpu_get_icount(void)
170 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
171 icount = cpu_get_icount_locked();
172 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
177 int64_t cpu_icount_to_ns(int64_t icount)
179 return icount << icount_time_shift;
182 /* return the host CPU cycle counter and handle stop/restart */
183 /* Caller must hold the BQL */
184 int64_t cpu_get_ticks(void)
189 return cpu_get_icount();
192 ticks = timers_state.cpu_ticks_offset;
193 if (timers_state.cpu_ticks_enabled) {
194 ticks += cpu_get_real_ticks();
197 if (timers_state.cpu_ticks_prev > ticks) {
198 /* Note: non increasing ticks may happen if the host uses
200 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
201 ticks = timers_state.cpu_ticks_prev;
204 timers_state.cpu_ticks_prev = ticks;
208 static int64_t cpu_get_clock_locked(void)
212 ticks = timers_state.cpu_clock_offset;
213 if (timers_state.cpu_ticks_enabled) {
214 ticks += get_clock();
220 /* return the host CPU monotonic timer and handle stop/restart */
221 int64_t cpu_get_clock(void)
227 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
228 ti = cpu_get_clock_locked();
229 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
234 /* enable cpu_get_ticks()
235 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
237 void cpu_enable_ticks(void)
239 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
240 seqlock_write_lock(&timers_state.vm_clock_seqlock);
241 if (!timers_state.cpu_ticks_enabled) {
242 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
243 timers_state.cpu_clock_offset -= get_clock();
244 timers_state.cpu_ticks_enabled = 1;
246 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
249 /* disable cpu_get_ticks() : the clock is stopped. You must not call
250 * cpu_get_ticks() after that.
251 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
253 void cpu_disable_ticks(void)
255 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
256 seqlock_write_lock(&timers_state.vm_clock_seqlock);
257 if (timers_state.cpu_ticks_enabled) {
258 timers_state.cpu_ticks_offset += cpu_get_real_ticks();
259 timers_state.cpu_clock_offset = cpu_get_clock_locked();
260 timers_state.cpu_ticks_enabled = 0;
262 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
265 /* Correlation between real and virtual time is always going to be
266 fairly approximate, so ignore small variation.
267 When the guest is idle real and virtual time will be aligned in
269 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
271 static void icount_adjust(void)
277 /* Protected by TimersState mutex. */
278 static int64_t last_delta;
280 /* If the VM is not running, then do nothing. */
281 if (!runstate_is_running()) {
285 seqlock_write_lock(&timers_state.vm_clock_seqlock);
286 cur_time = cpu_get_clock_locked();
287 cur_icount = cpu_get_icount_locked();
289 delta = cur_icount - cur_time;
290 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
292 && last_delta + ICOUNT_WOBBLE < delta * 2
293 && icount_time_shift > 0) {
294 /* The guest is getting too far ahead. Slow time down. */
298 && last_delta - ICOUNT_WOBBLE > delta * 2
299 && icount_time_shift < MAX_ICOUNT_SHIFT) {
300 /* The guest is getting too far behind. Speed time up. */
304 timers_state.qemu_icount_bias = cur_icount
305 - (timers_state.qemu_icount << icount_time_shift);
306 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
309 static void icount_adjust_rt(void *opaque)
311 timer_mod(icount_rt_timer,
312 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
316 static void icount_adjust_vm(void *opaque)
318 timer_mod(icount_vm_timer,
319 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
320 get_ticks_per_sec() / 10);
324 static int64_t qemu_icount_round(int64_t count)
326 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
329 static void icount_warp_rt(void *opaque)
331 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
332 * changes from -1 to another value, so the race here is okay.
334 if (atomic_read(&vm_clock_warp_start) == -1) {
338 seqlock_write_lock(&timers_state.vm_clock_seqlock);
339 if (runstate_is_running()) {
340 int64_t clock = cpu_get_clock_locked();
343 warp_delta = clock - vm_clock_warp_start;
344 if (use_icount == 2) {
346 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
347 * far ahead of real time.
349 int64_t cur_icount = cpu_get_icount_locked();
350 int64_t delta = clock - cur_icount;
351 warp_delta = MIN(warp_delta, delta);
353 timers_state.qemu_icount_bias += warp_delta;
355 vm_clock_warp_start = -1;
356 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
358 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
359 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
363 void qtest_clock_warp(int64_t dest)
365 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
366 AioContext *aio_context;
367 assert(qtest_enabled());
368 aio_context = qemu_get_aio_context();
369 while (clock < dest) {
370 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
371 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
373 seqlock_write_lock(&timers_state.vm_clock_seqlock);
374 timers_state.qemu_icount_bias += warp;
375 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
377 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
378 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
379 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
381 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
384 void qemu_clock_warp(QEMUClockType type)
390 * There are too many global variables to make the "warp" behavior
391 * applicable to other clocks. But a clock argument removes the
392 * need for if statements all over the place.
394 if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
400 * If the CPUs have been sleeping, advance QEMU_CLOCK_VIRTUAL timer now.
401 * This ensures that the deadline for the timer is computed correctly
403 * This also makes sure that the insn counter is synchronized before
404 * the CPU starts running, in case the CPU is woken by an event other
405 * than the earliest QEMU_CLOCK_VIRTUAL timer.
407 icount_warp_rt(NULL);
408 timer_del(icount_warp_timer);
410 if (!all_cpu_threads_idle()) {
414 if (qtest_enabled()) {
415 /* When testing, qtest commands advance icount. */
419 /* We want to use the earliest deadline from ALL vm_clocks */
420 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
421 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
423 static bool notified;
424 if (!icount_sleep && !notified) {
425 error_report("WARNING: icount sleep disabled and no active timers");
433 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
434 * sleep. Otherwise, the CPU might be waiting for a future timer
435 * interrupt to wake it up, but the interrupt never comes because
436 * the vCPU isn't running any insns and thus doesn't advance the
437 * QEMU_CLOCK_VIRTUAL.
441 * We never let VCPUs sleep in no sleep icount mode.
442 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
443 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
444 * It is useful when we want a deterministic execution time,
445 * isolated from host latencies.
447 seqlock_write_lock(&timers_state.vm_clock_seqlock);
448 timers_state.qemu_icount_bias += deadline;
449 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
450 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
453 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
454 * "real" time, (related to the time left until the next event) has
455 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
456 * This avoids that the warps are visible externally; for example,
457 * you will not be sending network packets continuously instead of
460 seqlock_write_lock(&timers_state.vm_clock_seqlock);
461 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
462 vm_clock_warp_start = clock;
464 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
465 timer_mod_anticipate(icount_warp_timer, clock + deadline);
467 } else if (deadline == 0) {
468 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
472 static bool icount_state_needed(void *opaque)
478 * This is a subsection for icount migration.
480 static const VMStateDescription icount_vmstate_timers = {
481 .name = "timer/icount",
483 .minimum_version_id = 1,
484 .needed = icount_state_needed,
485 .fields = (VMStateField[]) {
486 VMSTATE_INT64(qemu_icount_bias, TimersState),
487 VMSTATE_INT64(qemu_icount, TimersState),
488 VMSTATE_END_OF_LIST()
492 static const VMStateDescription vmstate_timers = {
495 .minimum_version_id = 1,
496 .fields = (VMStateField[]) {
497 VMSTATE_INT64(cpu_ticks_offset, TimersState),
498 VMSTATE_INT64(dummy, TimersState),
499 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
500 VMSTATE_END_OF_LIST()
502 .subsections = (const VMStateDescription*[]) {
503 &icount_vmstate_timers,
508 void cpu_ticks_init(void)
510 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
511 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
514 void configure_icount(QemuOpts *opts, Error **errp)
517 char *rem_str = NULL;
519 option = qemu_opt_get(opts, "shift");
521 if (qemu_opt_get(opts, "align") != NULL) {
522 error_setg(errp, "Please specify shift option when using align");
527 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
529 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
530 icount_warp_rt, NULL);
533 icount_align_option = qemu_opt_get_bool(opts, "align", false);
535 if (icount_align_option && !icount_sleep) {
536 error_setg(errp, "align=on and sleep=no are incompatible");
538 if (strcmp(option, "auto") != 0) {
540 icount_time_shift = strtol(option, &rem_str, 0);
541 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
542 error_setg(errp, "icount: Invalid shift value");
546 } else if (icount_align_option) {
547 error_setg(errp, "shift=auto and align=on are incompatible");
548 } else if (!icount_sleep) {
549 error_setg(errp, "shift=auto and sleep=no are incompatible");
554 /* 125MIPS seems a reasonable initial guess at the guest speed.
555 It will be corrected fairly quickly anyway. */
556 icount_time_shift = 3;
558 /* Have both realtime and virtual time triggers for speed adjustment.
559 The realtime trigger catches emulated time passing too slowly,
560 the virtual time trigger catches emulated time passing too fast.
561 Realtime triggers occur even when idle, so use them less frequently
563 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
564 icount_adjust_rt, NULL);
565 timer_mod(icount_rt_timer,
566 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
567 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
568 icount_adjust_vm, NULL);
569 timer_mod(icount_vm_timer,
570 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
571 get_ticks_per_sec() / 10);
574 /***********************************************************/
575 void hw_error(const char *fmt, ...)
581 fprintf(stderr, "qemu: hardware error: ");
582 vfprintf(stderr, fmt, ap);
583 fprintf(stderr, "\n");
585 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
586 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
592 void cpu_synchronize_all_states(void)
597 cpu_synchronize_state(cpu);
601 void cpu_synchronize_all_post_reset(void)
606 cpu_synchronize_post_reset(cpu);
610 void cpu_synchronize_all_post_init(void)
615 cpu_synchronize_post_init(cpu);
619 void cpu_clean_all_dirty(void)
624 cpu_clean_state(cpu);
628 static int do_vm_stop(RunState state)
632 if (runstate_is_running()) {
636 vm_state_notify(0, state);
637 qapi_event_send_stop(&error_abort);
641 ret = bdrv_flush_all();
646 static bool cpu_can_run(CPUState *cpu)
651 if (cpu_is_stopped(cpu)) {
657 static void cpu_handle_guest_debug(CPUState *cpu)
659 gdb_set_stop_cpu(cpu);
660 qemu_system_debug_request();
664 static void cpu_signal(int sig)
667 /* Ensure whatever caused the exit has reached the CPU threads before
668 * writing exit_request.
670 atomic_mb_set(&exit_request, 1);
671 cpu = atomic_mb_read(&tcg_current_cpu);
678 static void sigbus_reraise(void)
681 struct sigaction action;
683 memset(&action, 0, sizeof(action));
684 action.sa_handler = SIG_DFL;
685 if (!sigaction(SIGBUS, &action, NULL)) {
688 sigaddset(&set, SIGBUS);
689 sigprocmask(SIG_UNBLOCK, &set, NULL);
691 perror("Failed to re-raise SIGBUS!\n");
695 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
698 if (kvm_on_sigbus(siginfo->ssi_code,
699 (void *)(intptr_t)siginfo->ssi_addr)) {
704 static void qemu_init_sigbus(void)
706 struct sigaction action;
708 memset(&action, 0, sizeof(action));
709 action.sa_flags = SA_SIGINFO;
710 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
711 sigaction(SIGBUS, &action, NULL);
713 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
716 static void qemu_kvm_eat_signals(CPUState *cpu)
718 struct timespec ts = { 0, 0 };
724 sigemptyset(&waitset);
725 sigaddset(&waitset, SIG_IPI);
726 sigaddset(&waitset, SIGBUS);
729 r = sigtimedwait(&waitset, &siginfo, &ts);
730 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
731 perror("sigtimedwait");
737 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
745 r = sigpending(&chkset);
747 perror("sigpending");
750 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
753 #else /* !CONFIG_LINUX */
755 static void qemu_init_sigbus(void)
759 static void qemu_kvm_eat_signals(CPUState *cpu)
762 #endif /* !CONFIG_LINUX */
765 static void dummy_signal(int sig)
769 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
773 struct sigaction sigact;
775 memset(&sigact, 0, sizeof(sigact));
776 sigact.sa_handler = dummy_signal;
777 sigaction(SIG_IPI, &sigact, NULL);
779 pthread_sigmask(SIG_BLOCK, NULL, &set);
780 sigdelset(&set, SIG_IPI);
781 sigdelset(&set, SIGBUS);
782 r = kvm_set_signal_mask(cpu, &set);
784 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
789 static void qemu_tcg_init_cpu_signals(void)
792 struct sigaction sigact;
794 memset(&sigact, 0, sizeof(sigact));
795 sigact.sa_handler = cpu_signal;
796 sigaction(SIG_IPI, &sigact, NULL);
799 sigaddset(&set, SIG_IPI);
800 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
804 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
809 static void qemu_tcg_init_cpu_signals(void)
814 static QemuMutex qemu_global_mutex;
815 static QemuCond qemu_io_proceeded_cond;
816 static unsigned iothread_requesting_mutex;
818 static QemuThread io_thread;
820 static QemuThread *tcg_cpu_thread;
821 static QemuCond *tcg_halt_cond;
824 static QemuCond qemu_cpu_cond;
826 static QemuCond qemu_pause_cond;
827 static QemuCond qemu_work_cond;
829 void qemu_init_cpu_loop(void)
832 qemu_cond_init(&qemu_cpu_cond);
833 qemu_cond_init(&qemu_pause_cond);
834 qemu_cond_init(&qemu_work_cond);
835 qemu_cond_init(&qemu_io_proceeded_cond);
836 qemu_mutex_init(&qemu_global_mutex);
838 qemu_thread_get_self(&io_thread);
841 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
843 struct qemu_work_item wi;
845 if (qemu_cpu_is_self(cpu)) {
853 if (cpu->queued_work_first == NULL) {
854 cpu->queued_work_first = &wi;
856 cpu->queued_work_last->next = &wi;
858 cpu->queued_work_last = &wi;
864 CPUState *self_cpu = current_cpu;
866 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
867 current_cpu = self_cpu;
871 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
873 struct qemu_work_item *wi;
875 if (qemu_cpu_is_self(cpu)) {
880 wi = g_malloc0(sizeof(struct qemu_work_item));
884 if (cpu->queued_work_first == NULL) {
885 cpu->queued_work_first = wi;
887 cpu->queued_work_last->next = wi;
889 cpu->queued_work_last = wi;
896 static void flush_queued_work(CPUState *cpu)
898 struct qemu_work_item *wi;
900 if (cpu->queued_work_first == NULL) {
904 while ((wi = cpu->queued_work_first)) {
905 cpu->queued_work_first = wi->next;
912 cpu->queued_work_last = NULL;
913 qemu_cond_broadcast(&qemu_work_cond);
916 static void qemu_wait_io_event_common(CPUState *cpu)
921 qemu_cond_signal(&qemu_pause_cond);
923 flush_queued_work(cpu);
924 cpu->thread_kicked = false;
927 static void qemu_tcg_wait_io_event(void)
931 while (all_cpu_threads_idle()) {
932 /* Start accounting real time to the virtual clock if the CPUs
934 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
935 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
938 while (iothread_requesting_mutex) {
939 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
943 qemu_wait_io_event_common(cpu);
947 static void qemu_kvm_wait_io_event(CPUState *cpu)
949 while (cpu_thread_is_idle(cpu)) {
950 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
953 qemu_kvm_eat_signals(cpu);
954 qemu_wait_io_event_common(cpu);
957 static void *qemu_kvm_cpu_thread_fn(void *arg)
962 rcu_register_thread();
964 qemu_mutex_lock_iothread();
965 qemu_thread_get_self(cpu->thread);
966 cpu->thread_id = qemu_get_thread_id();
970 r = kvm_init_vcpu(cpu);
972 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
976 qemu_kvm_init_cpu_signals(cpu);
978 /* signal CPU creation */
980 qemu_cond_signal(&qemu_cpu_cond);
983 if (cpu_can_run(cpu)) {
984 r = kvm_cpu_exec(cpu);
985 if (r == EXCP_DEBUG) {
986 cpu_handle_guest_debug(cpu);
989 qemu_kvm_wait_io_event(cpu);
995 static void *qemu_dummy_cpu_thread_fn(void *arg)
998 fprintf(stderr, "qtest is not supported under Windows\n");
1001 CPUState *cpu = arg;
1005 rcu_register_thread();
1007 qemu_mutex_lock_iothread();
1008 qemu_thread_get_self(cpu->thread);
1009 cpu->thread_id = qemu_get_thread_id();
1012 sigemptyset(&waitset);
1013 sigaddset(&waitset, SIG_IPI);
1015 /* signal CPU creation */
1016 cpu->created = true;
1017 qemu_cond_signal(&qemu_cpu_cond);
1022 qemu_mutex_unlock_iothread();
1025 r = sigwait(&waitset, &sig);
1026 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1031 qemu_mutex_lock_iothread();
1033 qemu_wait_io_event_common(cpu);
1040 static void tcg_exec_all(void);
1042 static void *qemu_tcg_cpu_thread_fn(void *arg)
1044 CPUState *cpu = arg;
1046 rcu_register_thread();
1048 qemu_mutex_lock_iothread();
1049 qemu_tcg_init_cpu_signals();
1050 qemu_thread_get_self(cpu->thread);
1053 cpu->thread_id = qemu_get_thread_id();
1054 cpu->created = true;
1057 qemu_cond_signal(&qemu_cpu_cond);
1059 /* wait for initial kick-off after machine start */
1060 while (first_cpu->stopped) {
1061 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
1063 /* process any pending work */
1065 qemu_wait_io_event_common(cpu);
1069 /* process any pending work */
1070 atomic_mb_set(&exit_request, 1);
1076 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1078 if (deadline == 0) {
1079 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1082 qemu_tcg_wait_io_event();
1088 static void qemu_cpu_kick_thread(CPUState *cpu)
1093 if (!tcg_enabled()) {
1094 if (cpu->thread_kicked) {
1097 cpu->thread_kicked = true;
1099 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1101 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1105 if (!qemu_cpu_is_self(cpu)) {
1108 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
1109 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1114 /* On multi-core systems, we are not sure that the thread is actually
1115 * suspended until we can get the context.
1117 tcgContext.ContextFlags = CONTEXT_CONTROL;
1118 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
1124 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1125 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1133 void qemu_cpu_kick(CPUState *cpu)
1135 qemu_cond_broadcast(cpu->halt_cond);
1136 qemu_cpu_kick_thread(cpu);
1139 void qemu_cpu_kick_self(void)
1142 assert(current_cpu);
1143 qemu_cpu_kick_thread(current_cpu);
1149 bool qemu_cpu_is_self(CPUState *cpu)
1151 return qemu_thread_is_self(cpu->thread);
1154 bool qemu_in_vcpu_thread(void)
1156 return current_cpu && qemu_cpu_is_self(current_cpu);
1159 static __thread bool iothread_locked = false;
1161 bool qemu_mutex_iothread_locked(void)
1163 return iothread_locked;
1166 void qemu_mutex_lock_iothread(void)
1168 atomic_inc(&iothread_requesting_mutex);
1169 /* In the simple case there is no need to bump the VCPU thread out of
1170 * TCG code execution.
1172 if (!tcg_enabled() || qemu_in_vcpu_thread() ||
1173 !first_cpu || !first_cpu->created) {
1174 qemu_mutex_lock(&qemu_global_mutex);
1175 atomic_dec(&iothread_requesting_mutex);
1177 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1178 qemu_cpu_kick_thread(first_cpu);
1179 qemu_mutex_lock(&qemu_global_mutex);
1181 atomic_dec(&iothread_requesting_mutex);
1182 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1184 iothread_locked = true;
1187 void qemu_mutex_unlock_iothread(void)
1189 iothread_locked = false;
1190 qemu_mutex_unlock(&qemu_global_mutex);
1193 static int all_vcpus_paused(void)
1198 if (!cpu->stopped) {
1206 void pause_all_vcpus(void)
1210 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1216 if (qemu_in_vcpu_thread()) {
1218 if (!kvm_enabled()) {
1221 cpu->stopped = true;
1227 while (!all_vcpus_paused()) {
1228 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1235 void cpu_resume(CPUState *cpu)
1238 cpu->stopped = false;
1242 void resume_all_vcpus(void)
1246 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1252 /* For temporary buffers for forming a name */
1253 #define VCPU_THREAD_NAME_SIZE 16
1255 static void qemu_tcg_init_vcpu(CPUState *cpu)
1257 char thread_name[VCPU_THREAD_NAME_SIZE];
1259 tcg_cpu_address_space_init(cpu, cpu->as);
1261 /* share a single thread for all cpus with TCG */
1262 if (!tcg_cpu_thread) {
1263 cpu->thread = g_malloc0(sizeof(QemuThread));
1264 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1265 qemu_cond_init(cpu->halt_cond);
1266 tcg_halt_cond = cpu->halt_cond;
1267 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1269 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1270 cpu, QEMU_THREAD_JOINABLE);
1272 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1274 while (!cpu->created) {
1275 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1277 tcg_cpu_thread = cpu->thread;
1279 cpu->thread = tcg_cpu_thread;
1280 cpu->halt_cond = tcg_halt_cond;
1284 static void qemu_kvm_start_vcpu(CPUState *cpu)
1286 char thread_name[VCPU_THREAD_NAME_SIZE];
1288 cpu->thread = g_malloc0(sizeof(QemuThread));
1289 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1290 qemu_cond_init(cpu->halt_cond);
1291 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1293 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1294 cpu, QEMU_THREAD_JOINABLE);
1295 while (!cpu->created) {
1296 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1300 static void qemu_dummy_start_vcpu(CPUState *cpu)
1302 char thread_name[VCPU_THREAD_NAME_SIZE];
1304 cpu->thread = g_malloc0(sizeof(QemuThread));
1305 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1306 qemu_cond_init(cpu->halt_cond);
1307 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1309 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1310 QEMU_THREAD_JOINABLE);
1311 while (!cpu->created) {
1312 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1316 void qemu_init_vcpu(CPUState *cpu)
1318 cpu->nr_cores = smp_cores;
1319 cpu->nr_threads = smp_threads;
1320 cpu->stopped = true;
1321 if (kvm_enabled()) {
1322 qemu_kvm_start_vcpu(cpu);
1323 } else if (tcg_enabled()) {
1324 qemu_tcg_init_vcpu(cpu);
1326 qemu_dummy_start_vcpu(cpu);
1330 void cpu_stop_current(void)
1333 current_cpu->stop = false;
1334 current_cpu->stopped = true;
1335 cpu_exit(current_cpu);
1336 qemu_cond_signal(&qemu_pause_cond);
1340 int vm_stop(RunState state)
1342 if (qemu_in_vcpu_thread()) {
1343 qemu_system_vmstop_request_prepare();
1344 qemu_system_vmstop_request(state);
1346 * FIXME: should not return to device code in case
1347 * vm_stop() has been requested.
1353 return do_vm_stop(state);
1356 /* does a state transition even if the VM is already stopped,
1357 current state is forgotten forever */
1358 int vm_stop_force_state(RunState state)
1360 if (runstate_is_running()) {
1361 return vm_stop(state);
1363 runstate_set(state);
1364 /* Make sure to return an error if the flush in a previous vm_stop()
1366 return bdrv_flush_all();
1370 static int tcg_cpu_exec(CPUState *cpu)
1373 #ifdef CONFIG_PROFILER
1377 #ifdef CONFIG_PROFILER
1378 ti = profile_getclock();
1384 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1385 + cpu->icount_extra);
1386 cpu->icount_decr.u16.low = 0;
1387 cpu->icount_extra = 0;
1388 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1390 /* Maintain prior (possibly buggy) behaviour where if no deadline
1391 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1392 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1395 if ((deadline < 0) || (deadline > INT32_MAX)) {
1396 deadline = INT32_MAX;
1399 count = qemu_icount_round(deadline);
1400 timers_state.qemu_icount += count;
1401 decr = (count > 0xffff) ? 0xffff : count;
1403 cpu->icount_decr.u16.low = decr;
1404 cpu->icount_extra = count;
1406 ret = cpu_exec(cpu);
1407 #ifdef CONFIG_PROFILER
1408 tcg_time += profile_getclock() - ti;
1411 /* Fold pending instructions back into the
1412 instruction counter, and clear the interrupt flag. */
1413 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1414 + cpu->icount_extra);
1415 cpu->icount_decr.u32 = 0;
1416 cpu->icount_extra = 0;
1421 static void tcg_exec_all(void)
1425 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1426 qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
1428 if (next_cpu == NULL) {
1429 next_cpu = first_cpu;
1431 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1432 CPUState *cpu = next_cpu;
1434 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1435 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1437 if (cpu_can_run(cpu)) {
1438 r = tcg_cpu_exec(cpu);
1439 if (r == EXCP_DEBUG) {
1440 cpu_handle_guest_debug(cpu);
1443 } else if (cpu->stop || cpu->stopped) {
1448 /* Pairs with smp_wmb in qemu_cpu_kick. */
1449 atomic_mb_set(&exit_request, 0);
1452 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1454 /* XXX: implement xxx_cpu_list for targets that still miss it */
1455 #if defined(cpu_list)
1456 cpu_list(f, cpu_fprintf);
1460 CpuInfoList *qmp_query_cpus(Error **errp)
1462 CpuInfoList *head = NULL, *cur_item = NULL;
1467 #if defined(TARGET_I386)
1468 X86CPU *x86_cpu = X86_CPU(cpu);
1469 CPUX86State *env = &x86_cpu->env;
1470 #elif defined(TARGET_PPC)
1471 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1472 CPUPPCState *env = &ppc_cpu->env;
1473 #elif defined(TARGET_SPARC)
1474 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1475 CPUSPARCState *env = &sparc_cpu->env;
1476 #elif defined(TARGET_MIPS)
1477 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1478 CPUMIPSState *env = &mips_cpu->env;
1479 #elif defined(TARGET_TRICORE)
1480 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1481 CPUTriCoreState *env = &tricore_cpu->env;
1484 cpu_synchronize_state(cpu);
1486 info = g_malloc0(sizeof(*info));
1487 info->value = g_malloc0(sizeof(*info->value));
1488 info->value->CPU = cpu->cpu_index;
1489 info->value->current = (cpu == first_cpu);
1490 info->value->halted = cpu->halted;
1491 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
1492 info->value->thread_id = cpu->thread_id;
1493 #if defined(TARGET_I386)
1494 info->value->has_pc = true;
1495 info->value->pc = env->eip + env->segs[R_CS].base;
1496 #elif defined(TARGET_PPC)
1497 info->value->has_nip = true;
1498 info->value->nip = env->nip;
1499 #elif defined(TARGET_SPARC)
1500 info->value->has_pc = true;
1501 info->value->pc = env->pc;
1502 info->value->has_npc = true;
1503 info->value->npc = env->npc;
1504 #elif defined(TARGET_MIPS)
1505 info->value->has_PC = true;
1506 info->value->PC = env->active_tc.PC;
1507 #elif defined(TARGET_TRICORE)
1508 info->value->has_PC = true;
1509 info->value->PC = env->PC;
1512 /* XXX: waiting for the qapi to support GSList */
1514 head = cur_item = info;
1516 cur_item->next = info;
1524 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1525 bool has_cpu, int64_t cpu_index, Error **errp)
1531 int64_t orig_addr = addr, orig_size = size;
1537 cpu = qemu_get_cpu(cpu_index);
1539 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1544 f = fopen(filename, "wb");
1546 error_setg_file_open(errp, errno, filename);
1554 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1555 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
1556 " specified", orig_addr, orig_size);
1559 if (fwrite(buf, 1, l, f) != l) {
1560 error_setg(errp, QERR_IO_ERROR);
1571 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1578 f = fopen(filename, "wb");
1580 error_setg_file_open(errp, errno, filename);
1588 cpu_physical_memory_read(addr, buf, l);
1589 if (fwrite(buf, 1, l, f) != l) {
1590 error_setg(errp, QERR_IO_ERROR);
1601 void qmp_inject_nmi(Error **errp)
1603 #if defined(TARGET_I386)
1607 X86CPU *cpu = X86_CPU(cs);
1609 if (!cpu->apic_state) {
1610 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1612 apic_deliver_nmi(cpu->apic_state);
1616 nmi_monitor_handle(monitor_get_cpu_index(), errp);
1620 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1626 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1627 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1628 if (icount_align_option) {
1629 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1630 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1632 cpu_fprintf(f, "Max guest delay NA\n");
1633 cpu_fprintf(f, "Max guest advance NA\n");