4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "qemu/osdep.h"
28 #include "monitor/monitor.h"
29 #include "qapi/qmp/qerror.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/block-backend.h"
33 #include "exec/gdbstub.h"
34 #include "sysemu/dma.h"
35 #include "sysemu/kvm.h"
36 #include "sysemu/hax.h"
37 #include "qmp-commands.h"
39 #include "qemu/thread.h"
40 #include "sysemu/cpus.h"
41 #include "sysemu/qtest.h"
42 #include "qemu/main-loop.h"
43 #include "qemu/bitmap.h"
44 #include "qemu/seqlock.h"
45 #include "qapi-event.h"
47 #include "sysemu/replay.h"
50 #include "qemu/compatfd.h"
55 #include <sys/prctl.h>
58 #define PR_MCE_KILL 33
61 #ifndef PR_MCE_KILL_SET
62 #define PR_MCE_KILL_SET 1
65 #ifndef PR_MCE_KILL_EARLY
66 #define PR_MCE_KILL_EARLY 1
69 #endif /* CONFIG_LINUX */
71 static CPUState *next_cpu;
75 /* vcpu throttling controls */
76 static QEMUTimer *throttle_timer;
77 static unsigned int throttle_percentage;
79 #define CPU_THROTTLE_PCT_MIN 1
80 #define CPU_THROTTLE_PCT_MAX 99
81 #define CPU_THROTTLE_TIMESLICE_NS 10000000
83 bool cpu_is_stopped(CPUState *cpu)
85 return cpu->stopped || !runstate_is_running();
88 static bool cpu_thread_is_idle(CPUState *cpu)
90 if (cpu->stop || cpu->queued_work_first) {
93 if (cpu_is_stopped(cpu)) {
96 if (!cpu->halted || cpu_has_work(cpu) ||
97 kvm_halt_in_kernel()) {
103 static bool all_cpu_threads_idle(void)
108 if (!cpu_thread_is_idle(cpu)) {
115 /***********************************************************/
116 /* guest cycle counter */
118 /* Protected by TimersState seqlock */
120 static bool icount_sleep = true;
121 static int64_t vm_clock_warp_start = -1;
122 /* Conversion factor from emulated instructions to virtual clock ticks. */
123 static int icount_time_shift;
124 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
125 #define MAX_ICOUNT_SHIFT 10
127 static QEMUTimer *icount_rt_timer;
128 static QEMUTimer *icount_vm_timer;
129 static QEMUTimer *icount_warp_timer;
131 typedef struct TimersState {
132 /* Protected by BQL. */
133 int64_t cpu_ticks_prev;
134 int64_t cpu_ticks_offset;
136 /* cpu_clock_offset can be read out of BQL, so protect it with
139 QemuSeqLock vm_clock_seqlock;
140 int64_t cpu_clock_offset;
141 int32_t cpu_ticks_enabled;
144 /* Compensate for varying guest execution speed. */
145 int64_t qemu_icount_bias;
146 /* Only written by TCG thread */
150 static TimersState timers_state;
152 int64_t cpu_get_icount_raw(void)
155 CPUState *cpu = current_cpu;
157 icount = timers_state.qemu_icount;
159 if (!cpu->can_do_io) {
160 fprintf(stderr, "Bad icount read\n");
163 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
168 /* Return the virtual CPU time, based on the instruction counter. */
169 static int64_t cpu_get_icount_locked(void)
171 int64_t icount = cpu_get_icount_raw();
172 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
175 int64_t cpu_get_icount(void)
181 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
182 icount = cpu_get_icount_locked();
183 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
188 int64_t cpu_icount_to_ns(int64_t icount)
190 return icount << icount_time_shift;
193 /* return the host CPU cycle counter and handle stop/restart */
194 /* Caller must hold the BQL */
195 int64_t cpu_get_ticks(void)
200 return cpu_get_icount();
203 ticks = timers_state.cpu_ticks_offset;
204 if (timers_state.cpu_ticks_enabled) {
205 ticks += cpu_get_host_ticks();
208 if (timers_state.cpu_ticks_prev > ticks) {
209 /* Note: non increasing ticks may happen if the host uses
211 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
212 ticks = timers_state.cpu_ticks_prev;
215 timers_state.cpu_ticks_prev = ticks;
219 static int64_t cpu_get_clock_locked(void)
223 ticks = timers_state.cpu_clock_offset;
224 if (timers_state.cpu_ticks_enabled) {
225 ticks += get_clock();
231 /* return the host CPU monotonic timer and handle stop/restart */
232 int64_t cpu_get_clock(void)
238 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
239 ti = cpu_get_clock_locked();
240 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
245 /* enable cpu_get_ticks()
246 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
248 void cpu_enable_ticks(void)
250 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
251 seqlock_write_lock(&timers_state.vm_clock_seqlock);
252 if (!timers_state.cpu_ticks_enabled) {
253 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
254 timers_state.cpu_clock_offset -= get_clock();
255 timers_state.cpu_ticks_enabled = 1;
257 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
260 /* disable cpu_get_ticks() : the clock is stopped. You must not call
261 * cpu_get_ticks() after that.
262 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
264 void cpu_disable_ticks(void)
266 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
267 seqlock_write_lock(&timers_state.vm_clock_seqlock);
268 if (timers_state.cpu_ticks_enabled) {
269 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
270 timers_state.cpu_clock_offset = cpu_get_clock_locked();
271 timers_state.cpu_ticks_enabled = 0;
273 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
276 /* Correlation between real and virtual time is always going to be
277 fairly approximate, so ignore small variation.
278 When the guest is idle real and virtual time will be aligned in
280 #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
282 static void icount_adjust(void)
288 /* Protected by TimersState mutex. */
289 static int64_t last_delta;
291 /* If the VM is not running, then do nothing. */
292 if (!runstate_is_running()) {
296 seqlock_write_lock(&timers_state.vm_clock_seqlock);
297 cur_time = cpu_get_clock_locked();
298 cur_icount = cpu_get_icount_locked();
300 delta = cur_icount - cur_time;
301 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
303 && last_delta + ICOUNT_WOBBLE < delta * 2
304 && icount_time_shift > 0) {
305 /* The guest is getting too far ahead. Slow time down. */
309 && last_delta - ICOUNT_WOBBLE > delta * 2
310 && icount_time_shift < MAX_ICOUNT_SHIFT) {
311 /* The guest is getting too far behind. Speed time up. */
315 timers_state.qemu_icount_bias = cur_icount
316 - (timers_state.qemu_icount << icount_time_shift);
317 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
320 static void icount_adjust_rt(void *opaque)
322 timer_mod(icount_rt_timer,
323 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
327 static void icount_adjust_vm(void *opaque)
329 timer_mod(icount_vm_timer,
330 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
331 NANOSECONDS_PER_SECOND / 10);
335 static int64_t qemu_icount_round(int64_t count)
337 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
340 static void icount_warp_rt(void)
345 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
346 * changes from -1 to another value, so the race here is okay.
349 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
350 warp_start = vm_clock_warp_start;
351 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
353 if (warp_start == -1) {
357 seqlock_write_lock(&timers_state.vm_clock_seqlock);
358 if (runstate_is_running()) {
359 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
360 cpu_get_clock_locked());
363 warp_delta = clock - vm_clock_warp_start;
364 if (use_icount == 2) {
366 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
367 * far ahead of real time.
369 int64_t cur_icount = cpu_get_icount_locked();
370 int64_t delta = clock - cur_icount;
371 warp_delta = MIN(warp_delta, delta);
373 timers_state.qemu_icount_bias += warp_delta;
375 vm_clock_warp_start = -1;
376 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
378 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
379 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
383 static void icount_timer_cb(void *opaque)
385 /* No need for a checkpoint because the timer already synchronizes
386 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
391 void qtest_clock_warp(int64_t dest)
393 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
394 AioContext *aio_context;
395 assert(qtest_enabled());
396 aio_context = qemu_get_aio_context();
397 while (clock < dest) {
398 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
399 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
401 seqlock_write_lock(&timers_state.vm_clock_seqlock);
402 timers_state.qemu_icount_bias += warp;
403 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
405 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
406 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
407 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
409 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
412 void qemu_start_warp_timer(void)
421 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
422 * do not fire, so computing the deadline does not make sense.
424 if (!runstate_is_running()) {
428 /* warp clock deterministically in record/replay mode */
429 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
433 if (!all_cpu_threads_idle()) {
437 if (qtest_enabled()) {
438 /* When testing, qtest commands advance icount. */
442 /* We want to use the earliest deadline from ALL vm_clocks */
443 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
444 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
446 static bool notified;
447 if (!icount_sleep && !notified) {
448 error_report("WARNING: icount sleep disabled and no active timers");
456 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
457 * sleep. Otherwise, the CPU might be waiting for a future timer
458 * interrupt to wake it up, but the interrupt never comes because
459 * the vCPU isn't running any insns and thus doesn't advance the
460 * QEMU_CLOCK_VIRTUAL.
464 * We never let VCPUs sleep in no sleep icount mode.
465 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
466 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
467 * It is useful when we want a deterministic execution time,
468 * isolated from host latencies.
470 seqlock_write_lock(&timers_state.vm_clock_seqlock);
471 timers_state.qemu_icount_bias += deadline;
472 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
473 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
476 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
477 * "real" time, (related to the time left until the next event) has
478 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
479 * This avoids that the warps are visible externally; for example,
480 * you will not be sending network packets continuously instead of
483 seqlock_write_lock(&timers_state.vm_clock_seqlock);
484 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
485 vm_clock_warp_start = clock;
487 seqlock_write_unlock(&timers_state.vm_clock_seqlock);
488 timer_mod_anticipate(icount_warp_timer, clock + deadline);
490 } else if (deadline == 0) {
491 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
495 static void qemu_account_warp_timer(void)
497 if (!use_icount || !icount_sleep) {
501 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
502 * do not fire, so computing the deadline does not make sense.
504 if (!runstate_is_running()) {
508 /* warp clock deterministically in record/replay mode */
509 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
513 timer_del(icount_warp_timer);
517 static bool icount_state_needed(void *opaque)
523 * This is a subsection for icount migration.
525 static const VMStateDescription icount_vmstate_timers = {
526 .name = "timer/icount",
528 .minimum_version_id = 1,
529 .needed = icount_state_needed,
530 .fields = (VMStateField[]) {
531 VMSTATE_INT64(qemu_icount_bias, TimersState),
532 VMSTATE_INT64(qemu_icount, TimersState),
533 VMSTATE_END_OF_LIST()
537 static const VMStateDescription vmstate_timers = {
540 .minimum_version_id = 1,
541 .fields = (VMStateField[]) {
542 VMSTATE_INT64(cpu_ticks_offset, TimersState),
543 VMSTATE_INT64(dummy, TimersState),
544 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
545 VMSTATE_END_OF_LIST()
547 .subsections = (const VMStateDescription*[]) {
548 &icount_vmstate_timers,
553 static void cpu_throttle_thread(void *opaque)
555 CPUState *cpu = opaque;
557 double throttle_ratio;
560 if (!cpu_throttle_get_percentage()) {
564 pct = (double)cpu_throttle_get_percentage()/100;
565 throttle_ratio = pct / (1 - pct);
566 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
568 qemu_mutex_unlock_iothread();
569 atomic_set(&cpu->throttle_thread_scheduled, 0);
570 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
571 qemu_mutex_lock_iothread();
574 static void cpu_throttle_timer_tick(void *opaque)
579 /* Stop the timer if needed */
580 if (!cpu_throttle_get_percentage()) {
584 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
585 async_run_on_cpu(cpu, cpu_throttle_thread, cpu);
589 pct = (double)cpu_throttle_get_percentage()/100;
590 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
591 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
594 void cpu_throttle_set(int new_throttle_pct)
596 /* Ensure throttle percentage is within valid range */
597 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
598 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
600 atomic_set(&throttle_percentage, new_throttle_pct);
602 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
603 CPU_THROTTLE_TIMESLICE_NS);
606 void cpu_throttle_stop(void)
608 atomic_set(&throttle_percentage, 0);
611 bool cpu_throttle_active(void)
613 return (cpu_throttle_get_percentage() != 0);
616 int cpu_throttle_get_percentage(void)
618 return atomic_read(&throttle_percentage);
621 void cpu_ticks_init(void)
623 seqlock_init(&timers_state.vm_clock_seqlock, NULL);
624 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
625 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
626 cpu_throttle_timer_tick, NULL);
629 void configure_icount(QemuOpts *opts, Error **errp)
632 char *rem_str = NULL;
634 option = qemu_opt_get(opts, "shift");
636 if (qemu_opt_get(opts, "align") != NULL) {
637 error_setg(errp, "Please specify shift option when using align");
642 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
644 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
645 icount_timer_cb, NULL);
648 icount_align_option = qemu_opt_get_bool(opts, "align", false);
650 if (icount_align_option && !icount_sleep) {
651 error_setg(errp, "align=on and sleep=off are incompatible");
653 if (strcmp(option, "auto") != 0) {
655 icount_time_shift = strtol(option, &rem_str, 0);
656 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
657 error_setg(errp, "icount: Invalid shift value");
661 } else if (icount_align_option) {
662 error_setg(errp, "shift=auto and align=on are incompatible");
663 } else if (!icount_sleep) {
664 error_setg(errp, "shift=auto and sleep=off are incompatible");
669 /* 125MIPS seems a reasonable initial guess at the guest speed.
670 It will be corrected fairly quickly anyway. */
671 icount_time_shift = 3;
673 /* Have both realtime and virtual time triggers for speed adjustment.
674 The realtime trigger catches emulated time passing too slowly,
675 the virtual time trigger catches emulated time passing too fast.
676 Realtime triggers occur even when idle, so use them less frequently
678 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
679 icount_adjust_rt, NULL);
680 timer_mod(icount_rt_timer,
681 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
682 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
683 icount_adjust_vm, NULL);
684 timer_mod(icount_vm_timer,
685 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
686 NANOSECONDS_PER_SECOND / 10);
689 /***********************************************************/
690 void hw_error(const char *fmt, ...)
696 fprintf(stderr, "qemu: hardware error: ");
697 vfprintf(stderr, fmt, ap);
698 fprintf(stderr, "\n");
700 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
701 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
707 void cpu_synchronize_all_states(void)
712 cpu_synchronize_state(cpu);
716 void cpu_synchronize_all_post_reset(void)
721 cpu_synchronize_post_reset(cpu);
723 if (hax_enabled() && hax_ug_platform())
724 hax_cpu_synchronize_post_reset(cpu);
729 void cpu_synchronize_all_post_init(void)
734 cpu_synchronize_post_init(cpu);
736 if (hax_enabled() && hax_ug_platform())
737 hax_cpu_synchronize_post_init(cpu);
742 static int do_vm_stop(RunState state)
746 if (runstate_is_running()) {
750 vm_state_notify(0, state);
751 qapi_event_send_stop(&error_abort);
755 ret = blk_flush_all();
760 static bool cpu_can_run(CPUState *cpu)
765 if (cpu_is_stopped(cpu)) {
771 static void cpu_handle_guest_debug(CPUState *cpu)
773 gdb_set_stop_cpu(cpu);
774 qemu_system_debug_request();
779 static void sigbus_reraise(void)
782 struct sigaction action;
784 memset(&action, 0, sizeof(action));
785 action.sa_handler = SIG_DFL;
786 if (!sigaction(SIGBUS, &action, NULL)) {
789 sigaddset(&set, SIGBUS);
790 sigprocmask(SIG_UNBLOCK, &set, NULL);
792 perror("Failed to re-raise SIGBUS!\n");
796 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
799 if (kvm_on_sigbus(siginfo->ssi_code,
800 (void *)(intptr_t)siginfo->ssi_addr)) {
805 static void qemu_init_sigbus(void)
807 struct sigaction action;
809 memset(&action, 0, sizeof(action));
810 action.sa_flags = SA_SIGINFO;
811 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
812 sigaction(SIGBUS, &action, NULL);
814 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
817 static void qemu_kvm_eat_signals(CPUState *cpu)
819 struct timespec ts = { 0, 0 };
825 sigemptyset(&waitset);
826 sigaddset(&waitset, SIG_IPI);
827 sigaddset(&waitset, SIGBUS);
830 r = sigtimedwait(&waitset, &siginfo, &ts);
831 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
832 perror("sigtimedwait");
838 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
846 r = sigpending(&chkset);
848 perror("sigpending");
851 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
854 #else /* !CONFIG_LINUX */
856 static void qemu_init_sigbus(void)
860 static void qemu_kvm_eat_signals(CPUState *cpu)
863 #endif /* !CONFIG_LINUX */
866 static void dummy_signal(int sig)
870 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
874 struct sigaction sigact;
876 memset(&sigact, 0, sizeof(sigact));
877 sigact.sa_handler = dummy_signal;
878 sigaction(SIG_IPI, &sigact, NULL);
880 pthread_sigmask(SIG_BLOCK, NULL, &set);
881 sigdelset(&set, SIG_IPI);
882 sigdelset(&set, SIGBUS);
883 r = kvm_set_signal_mask(cpu, &set);
885 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
891 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
897 static QemuMutex qemu_global_mutex;
898 static QemuCond qemu_io_proceeded_cond;
899 static unsigned iothread_requesting_mutex;
901 static QemuThread io_thread;
904 static QemuCond qemu_cpu_cond;
906 static QemuCond qemu_pause_cond;
907 static QemuCond qemu_work_cond;
909 void qemu_init_cpu_loop(void)
912 qemu_cond_init(&qemu_cpu_cond);
913 qemu_cond_init(&qemu_pause_cond);
914 qemu_cond_init(&qemu_work_cond);
915 qemu_cond_init(&qemu_io_proceeded_cond);
916 qemu_mutex_init(&qemu_global_mutex);
918 qemu_thread_get_self(&io_thread);
921 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
923 struct qemu_work_item wi;
925 if (qemu_cpu_is_self(cpu)) {
934 qemu_mutex_lock(&cpu->work_mutex);
935 if (cpu->queued_work_first == NULL) {
936 cpu->queued_work_first = &wi;
938 cpu->queued_work_last->next = &wi;
940 cpu->queued_work_last = &wi;
943 qemu_mutex_unlock(&cpu->work_mutex);
946 while (!atomic_mb_read(&wi.done)) {
947 CPUState *self_cpu = current_cpu;
949 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
950 current_cpu = self_cpu;
954 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
956 struct qemu_work_item *wi;
958 if (qemu_cpu_is_self(cpu)) {
963 wi = g_malloc0(sizeof(struct qemu_work_item));
968 qemu_mutex_lock(&cpu->work_mutex);
969 if (cpu->queued_work_first == NULL) {
970 cpu->queued_work_first = wi;
972 cpu->queued_work_last->next = wi;
974 cpu->queued_work_last = wi;
977 qemu_mutex_unlock(&cpu->work_mutex);
982 static void flush_queued_work(CPUState *cpu)
984 struct qemu_work_item *wi;
986 if (cpu->queued_work_first == NULL) {
990 qemu_mutex_lock(&cpu->work_mutex);
991 while (cpu->queued_work_first != NULL) {
992 wi = cpu->queued_work_first;
993 cpu->queued_work_first = wi->next;
994 if (!cpu->queued_work_first) {
995 cpu->queued_work_last = NULL;
997 qemu_mutex_unlock(&cpu->work_mutex);
999 qemu_mutex_lock(&cpu->work_mutex);
1003 atomic_mb_set(&wi->done, true);
1006 qemu_mutex_unlock(&cpu->work_mutex);
1007 qemu_cond_broadcast(&qemu_work_cond);
1010 static void qemu_wait_io_event_common(CPUState *cpu)
1014 cpu->stopped = true;
1015 qemu_cond_broadcast(&qemu_pause_cond);
1017 flush_queued_work(cpu);
1018 cpu->thread_kicked = false;
1021 static void qemu_tcg_wait_io_event(CPUState *cpu)
1023 while (all_cpu_threads_idle()) {
1024 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1027 while (iothread_requesting_mutex) {
1028 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
1032 qemu_wait_io_event_common(cpu);
1037 static void qemu_hax_wait_io_event(CPUState *cpu)
1039 while (cpu_thread_is_idle(cpu)) {
1040 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1043 qemu_wait_io_event_common(cpu);
1047 static void qemu_kvm_wait_io_event(CPUState *cpu)
1049 while (cpu_thread_is_idle(cpu)) {
1050 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1053 qemu_kvm_eat_signals(cpu);
1054 qemu_wait_io_event_common(cpu);
1057 static void *qemu_kvm_cpu_thread_fn(void *arg)
1059 CPUState *cpu = arg;
1062 rcu_register_thread();
1064 qemu_mutex_lock_iothread();
1065 qemu_thread_get_self(cpu->thread);
1066 cpu->thread_id = qemu_get_thread_id();
1070 r = kvm_init_vcpu(cpu);
1072 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
1076 qemu_kvm_init_cpu_signals(cpu);
1078 /* signal CPU creation */
1079 cpu->created = true;
1080 qemu_cond_signal(&qemu_cpu_cond);
1083 if (cpu_can_run(cpu)) {
1084 r = kvm_cpu_exec(cpu);
1085 if (r == EXCP_DEBUG) {
1086 cpu_handle_guest_debug(cpu);
1089 qemu_kvm_wait_io_event(cpu);
1095 static void *qemu_dummy_cpu_thread_fn(void *arg)
1098 fprintf(stderr, "qtest is not supported under Windows\n");
1101 CPUState *cpu = arg;
1105 rcu_register_thread();
1107 qemu_mutex_lock_iothread();
1108 qemu_thread_get_self(cpu->thread);
1109 cpu->thread_id = qemu_get_thread_id();
1112 sigemptyset(&waitset);
1113 sigaddset(&waitset, SIG_IPI);
1115 /* signal CPU creation */
1116 cpu->created = true;
1117 qemu_cond_signal(&qemu_cpu_cond);
1122 qemu_mutex_unlock_iothread();
1125 r = sigwait(&waitset, &sig);
1126 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1131 qemu_mutex_lock_iothread();
1133 qemu_wait_io_event_common(cpu);
1140 static void tcg_exec_all(void);
1142 static void *qemu_tcg_cpu_thread_fn(void *arg)
1144 CPUState *cpu = arg;
1146 rcu_register_thread();
1148 qemu_mutex_lock_iothread();
1149 qemu_thread_get_self(cpu->thread);
1152 cpu->thread_id = qemu_get_thread_id();
1153 cpu->created = true;
1156 qemu_cond_signal(&qemu_cpu_cond);
1158 /* wait for initial kick-off after machine start */
1159 while (first_cpu->stopped) {
1160 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
1162 /* process any pending work */
1164 qemu_wait_io_event_common(cpu);
1168 /* process any pending work */
1169 atomic_mb_set(&exit_request, 1);
1175 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1177 if (deadline == 0) {
1178 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1181 qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
1188 static void *qemu_hax_cpu_thread_fn(void *arg)
1190 CPUState *cpu = arg;
1192 qemu_thread_get_self(cpu->thread);
1193 qemu_mutex_lock(&qemu_global_mutex);
1195 cpu->thread_id = qemu_get_thread_id();
1196 cpu->created = true;
1201 qemu_cond_signal(&qemu_cpu_cond);
1204 if (cpu_can_run(cpu)) {
1205 r = hax_smp_cpu_exec(cpu);
1206 if (r == EXCP_DEBUG) {
1207 cpu_handle_guest_debug(cpu);
1210 qemu_hax_wait_io_event(cpu);
1216 static void qemu_cpu_kick_thread(CPUState *cpu)
1221 if (cpu->thread_kicked) {
1224 cpu->thread_kicked = true;
1225 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1227 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1231 # ifdef CONFIG_DARWIN
1232 /* The cpu thread cannot catch it reliably when shutdown the guest on Mac.
1233 * We can double check it and resend it
1235 if (!exit_request) {
1236 // FIXME: check it soon
1240 if (hax_enabled() && hax_ug_platform()) {
1241 cpu->exit_request = 1;
1248 // FIXME: check it soon
1250 if (!qemu_cpu_is_self(cpu)) {
1253 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
1254 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1259 /* On multi-core systems, we are not sure that the thread is actually
1260 * suspended until we can get the context.
1262 tcgContext.ContextFlags = CONTEXT_CONTROL;
1263 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
1269 if(hax_enabled() && hax_ug_platform()) {
1270 cpu->exit_request = 1;
1274 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1275 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1281 if (!qemu_cpu_is_self(cpu)) {
1282 if(hax_enabled() && hax_ug_platform()) {
1283 cpu->exit_request = 1;
1290 static void qemu_cpu_kick_no_halt(void)
1293 /* Ensure whatever caused the exit has reached the CPU threads before
1294 * writing exit_request.
1296 atomic_mb_set(&exit_request, 1);
1297 cpu = atomic_mb_read(&tcg_current_cpu);
1303 void qemu_cpu_kick(CPUState *cpu)
1305 qemu_cond_broadcast(cpu->halt_cond);
1306 if (tcg_enabled()) {
1307 qemu_cpu_kick_no_halt();
1309 qemu_cpu_kick_thread(cpu);
1313 void qemu_cpu_kick_self(void)
1315 assert(current_cpu);
1316 qemu_cpu_kick_thread(current_cpu);
1319 bool qemu_cpu_is_self(CPUState *cpu)
1321 return qemu_thread_is_self(cpu->thread);
1324 bool qemu_in_vcpu_thread(void)
1326 return current_cpu && qemu_cpu_is_self(current_cpu);
1329 static __thread bool iothread_locked = false;
1331 bool qemu_mutex_iothread_locked(void)
1333 return iothread_locked;
1336 void qemu_mutex_lock_iothread(void)
1338 atomic_inc(&iothread_requesting_mutex);
1339 /* In the simple case there is no need to bump the VCPU thread out of
1340 * TCG code execution.
1342 if (!tcg_enabled() || qemu_in_vcpu_thread() ||
1343 !first_cpu || !first_cpu->created) {
1344 qemu_mutex_lock(&qemu_global_mutex);
1345 atomic_dec(&iothread_requesting_mutex);
1347 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1348 qemu_cpu_kick_no_halt();
1349 qemu_mutex_lock(&qemu_global_mutex);
1351 atomic_dec(&iothread_requesting_mutex);
1352 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1354 iothread_locked = true;
1357 void qemu_mutex_unlock_iothread(void)
1359 iothread_locked = false;
1360 qemu_mutex_unlock(&qemu_global_mutex);
1363 static int all_vcpus_paused(void)
1368 if (!cpu->stopped) {
1376 void pause_all_vcpus(void)
1380 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1386 if (qemu_in_vcpu_thread()) {
1388 if (!kvm_enabled()) {
1391 cpu->stopped = true;
1397 while (!all_vcpus_paused()) {
1398 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1405 void cpu_resume(CPUState *cpu)
1408 cpu->stopped = false;
1412 void resume_all_vcpus(void)
1416 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1422 /* For temporary buffers for forming a name */
1423 #define VCPU_THREAD_NAME_SIZE 16
1425 static void qemu_tcg_init_vcpu(CPUState *cpu)
1428 if (hax_enabled()) {
1432 char thread_name[VCPU_THREAD_NAME_SIZE];
1433 static QemuCond *tcg_halt_cond;
1434 static QemuThread *tcg_cpu_thread;
1436 /* share a single thread for all cpus with TCG */
1437 if (!tcg_cpu_thread) {
1438 cpu->thread = g_malloc0(sizeof(QemuThread));
1439 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1440 qemu_cond_init(cpu->halt_cond);
1441 tcg_halt_cond = cpu->halt_cond;
1442 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1444 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1445 cpu, QEMU_THREAD_JOINABLE);
1447 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1449 while (!cpu->created) {
1450 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1452 tcg_cpu_thread = cpu->thread;
1454 cpu->thread = tcg_cpu_thread;
1455 cpu->halt_cond = tcg_halt_cond;
1460 static void qemu_hax_start_vcpu(CPUState *cpu)
1462 char thread_name[VCPU_THREAD_NAME_SIZE];
1464 cpu->thread = g_malloc0(sizeof(QemuThread));
1465 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1466 qemu_cond_init(cpu->halt_cond);
1468 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1471 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1472 cpu, QEMU_THREAD_JOINABLE);
1474 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1476 while (!cpu->created) {
1477 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1482 static void qemu_kvm_start_vcpu(CPUState *cpu)
1484 char thread_name[VCPU_THREAD_NAME_SIZE];
1486 cpu->thread = g_malloc0(sizeof(QemuThread));
1487 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1488 qemu_cond_init(cpu->halt_cond);
1489 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1491 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1492 cpu, QEMU_THREAD_JOINABLE);
1493 while (!cpu->created) {
1494 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1498 static void qemu_dummy_start_vcpu(CPUState *cpu)
1500 char thread_name[VCPU_THREAD_NAME_SIZE];
1502 cpu->thread = g_malloc0(sizeof(QemuThread));
1503 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1504 qemu_cond_init(cpu->halt_cond);
1505 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1507 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1508 QEMU_THREAD_JOINABLE);
1509 while (!cpu->created) {
1510 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1514 void qemu_init_vcpu(CPUState *cpu)
1516 cpu->nr_cores = smp_cores;
1517 cpu->nr_threads = smp_threads;
1518 cpu->stopped = true;
1521 /* If the target cpu hasn't set up any address spaces itself,
1522 * give it the default one.
1524 AddressSpace *as = address_space_init_shareable(cpu->memory,
1527 cpu_address_space_init(cpu, as, 0);
1530 if (kvm_enabled()) {
1531 qemu_kvm_start_vcpu(cpu);
1533 } else if (hax_enabled() && hax_ug_platform()) {
1534 qemu_hax_start_vcpu(cpu);
1536 } else if (tcg_enabled()) {
1537 qemu_tcg_init_vcpu(cpu);
1539 qemu_dummy_start_vcpu(cpu);
1543 void cpu_stop_current(void)
1546 current_cpu->stop = false;
1547 current_cpu->stopped = true;
1548 cpu_exit(current_cpu);
1549 qemu_cond_broadcast(&qemu_pause_cond);
1553 int vm_stop(RunState state)
1555 if (qemu_in_vcpu_thread()) {
1556 qemu_system_vmstop_request_prepare();
1557 qemu_system_vmstop_request(state);
1559 * FIXME: should not return to device code in case
1560 * vm_stop() has been requested.
1566 return do_vm_stop(state);
1569 /* does a state transition even if the VM is already stopped,
1570 current state is forgotten forever */
1571 int vm_stop_force_state(RunState state)
1573 if (runstate_is_running()) {
1574 return vm_stop(state);
1576 runstate_set(state);
1579 /* Make sure to return an error if the flush in a previous vm_stop()
1581 return blk_flush_all();
1585 static int64_t tcg_get_icount_limit(void)
1589 if (replay_mode != REPLAY_MODE_PLAY) {
1590 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1592 /* Maintain prior (possibly buggy) behaviour where if no deadline
1593 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1594 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1597 if ((deadline < 0) || (deadline > INT32_MAX)) {
1598 deadline = INT32_MAX;
1601 return qemu_icount_round(deadline);
1603 return replay_get_instructions();
1607 static int tcg_cpu_exec(CPUState *cpu)
1610 #ifdef CONFIG_PROFILER
1614 #ifdef CONFIG_PROFILER
1615 ti = profile_getclock();
1620 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1621 + cpu->icount_extra);
1622 cpu->icount_decr.u16.low = 0;
1623 cpu->icount_extra = 0;
1624 count = tcg_get_icount_limit();
1625 timers_state.qemu_icount += count;
1626 decr = (count > 0xffff) ? 0xffff : count;
1628 cpu->icount_decr.u16.low = decr;
1629 cpu->icount_extra = count;
1631 ret = cpu_exec(cpu);
1632 #ifdef CONFIG_PROFILER
1633 tcg_time += profile_getclock() - ti;
1636 /* Fold pending instructions back into the
1637 instruction counter, and clear the interrupt flag. */
1638 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1639 + cpu->icount_extra);
1640 cpu->icount_decr.u32 = 0;
1641 cpu->icount_extra = 0;
1642 replay_account_executed_instructions();
1647 static void tcg_exec_all(void)
1651 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1652 qemu_account_warp_timer();
1654 if (next_cpu == NULL) {
1655 next_cpu = first_cpu;
1657 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1658 CPUState *cpu = next_cpu;
1660 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1661 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1663 if (cpu_can_run(cpu)) {
1664 r = tcg_cpu_exec(cpu);
1665 if (r == EXCP_DEBUG) {
1666 cpu_handle_guest_debug(cpu);
1669 } else if (cpu->stop || cpu->stopped) {
1674 /* Pairs with smp_wmb in qemu_cpu_kick. */
1675 atomic_mb_set(&exit_request, 0);
1678 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1680 /* XXX: implement xxx_cpu_list for targets that still miss it */
1681 #if defined(cpu_list)
1682 cpu_list(f, cpu_fprintf);
1686 CpuInfoList *qmp_query_cpus(Error **errp)
1688 CpuInfoList *head = NULL, *cur_item = NULL;
1693 #if defined(TARGET_I386)
1694 X86CPU *x86_cpu = X86_CPU(cpu);
1695 CPUX86State *env = &x86_cpu->env;
1696 #elif defined(TARGET_PPC)
1697 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1698 CPUPPCState *env = &ppc_cpu->env;
1699 #elif defined(TARGET_SPARC)
1700 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1701 CPUSPARCState *env = &sparc_cpu->env;
1702 #elif defined(TARGET_MIPS)
1703 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1704 CPUMIPSState *env = &mips_cpu->env;
1705 #elif defined(TARGET_TRICORE)
1706 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1707 CPUTriCoreState *env = &tricore_cpu->env;
1710 cpu_synchronize_state(cpu);
1712 info = g_malloc0(sizeof(*info));
1713 info->value = g_malloc0(sizeof(*info->value));
1714 info->value->CPU = cpu->cpu_index;
1715 info->value->current = (cpu == first_cpu);
1716 info->value->halted = cpu->halted;
1717 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
1718 info->value->thread_id = cpu->thread_id;
1719 #if defined(TARGET_I386)
1720 info->value->arch = CPU_INFO_ARCH_X86;
1721 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
1722 #elif defined(TARGET_PPC)
1723 info->value->arch = CPU_INFO_ARCH_PPC;
1724 info->value->u.ppc.nip = env->nip;
1725 #elif defined(TARGET_SPARC)
1726 info->value->arch = CPU_INFO_ARCH_SPARC;
1727 info->value->u.q_sparc.pc = env->pc;
1728 info->value->u.q_sparc.npc = env->npc;
1729 #elif defined(TARGET_MIPS)
1730 info->value->arch = CPU_INFO_ARCH_MIPS;
1731 info->value->u.q_mips.PC = env->active_tc.PC;
1732 #elif defined(TARGET_TRICORE)
1733 info->value->arch = CPU_INFO_ARCH_TRICORE;
1734 info->value->u.tricore.PC = env->PC;
1736 info->value->arch = CPU_INFO_ARCH_OTHER;
1739 /* XXX: waiting for the qapi to support GSList */
1741 head = cur_item = info;
1743 cur_item->next = info;
1751 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1752 bool has_cpu, int64_t cpu_index, Error **errp)
1758 int64_t orig_addr = addr, orig_size = size;
1764 cpu = qemu_get_cpu(cpu_index);
1766 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1771 f = fopen(filename, "wb");
1773 error_setg_file_open(errp, errno, filename);
1781 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1782 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
1783 " specified", orig_addr, orig_size);
1786 if (fwrite(buf, 1, l, f) != l) {
1787 error_setg(errp, QERR_IO_ERROR);
1798 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1805 f = fopen(filename, "wb");
1807 error_setg_file_open(errp, errno, filename);
1815 cpu_physical_memory_read(addr, buf, l);
1816 if (fwrite(buf, 1, l, f) != l) {
1817 error_setg(errp, QERR_IO_ERROR);
1828 void qmp_inject_nmi(Error **errp)
1830 #if defined(TARGET_I386)
1834 X86CPU *cpu = X86_CPU(cs);
1836 if (!cpu->apic_state) {
1837 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
1839 apic_deliver_nmi(cpu->apic_state);
1843 nmi_monitor_handle(monitor_get_cpu_index(), errp);
1847 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1853 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1854 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1855 if (icount_align_option) {
1856 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1857 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1859 cpu_fprintf(f, "Max guest delay NA\n");
1860 cpu_fprintf(f, "Max guest advance NA\n");