4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed early for CONFIG_BSD etc. */
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
29 #include "monitor/monitor.h"
30 #include "qapi/qmp/qerror.h"
31 #include "qemu/error-report.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/block-backend.h"
34 #include "exec/gdbstub.h"
35 #include "sysemu/dma.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/hax.h"
38 #include "qmp-commands.h"
39 #include "exec/exec-all.h"
41 #include "qemu/thread.h"
42 #include "sysemu/cpus.h"
43 #include "sysemu/qtest.h"
44 #include "qemu/main-loop.h"
45 #include "qemu/bitmap.h"
46 #include "qemu/seqlock.h"
47 #include "qapi-event.h"
49 #include "sysemu/replay.h"
52 #include "qemu/compatfd.h"
57 #include <sys/prctl.h>
60 #define PR_MCE_KILL 33
63 #ifndef PR_MCE_KILL_SET
64 #define PR_MCE_KILL_SET 1
67 #ifndef PR_MCE_KILL_EARLY
68 #define PR_MCE_KILL_EARLY 1
71 #endif /* CONFIG_LINUX */
73 static CPUState *next_cpu;
77 /* vcpu throttling controls */
78 static QEMUTimer *throttle_timer;
79 static unsigned int throttle_percentage;
81 #define CPU_THROTTLE_PCT_MIN 1
82 #define CPU_THROTTLE_PCT_MAX 99
83 #define CPU_THROTTLE_TIMESLICE_NS 10000000
85 bool cpu_is_stopped(CPUState *cpu)
87 return cpu->stopped || !runstate_is_running();
90 static bool cpu_thread_is_idle(CPUState *cpu)
92 if (cpu->stop || cpu->queued_work_first) {
95 if (cpu_is_stopped(cpu)) {
98 if (!cpu->halted || cpu_has_work(cpu) ||
99 kvm_halt_in_kernel()) {
105 static bool all_cpu_threads_idle(void)
110 if (!cpu_thread_is_idle(cpu)) {
117 /***********************************************************/
118 /* guest cycle counter */
120 /* Protected by TimersState seqlock */
122 static bool icount_sleep = true;
123 static int64_t vm_clock_warp_start = -1;
124 /* Conversion factor from emulated instructions to virtual clock ticks. */
125 static int icount_time_shift;
126 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
127 #define MAX_ICOUNT_SHIFT 10
129 static QEMUTimer *icount_rt_timer;
130 static QEMUTimer *icount_vm_timer;
131 static QEMUTimer *icount_warp_timer;
133 typedef struct TimersState {
134 /* Protected by BQL. */
135 int64_t cpu_ticks_prev;
136 int64_t cpu_ticks_offset;
138 /* cpu_clock_offset can be read out of BQL, so protect it with
141 QemuSeqLock vm_clock_seqlock;
142 int64_t cpu_clock_offset;
143 int32_t cpu_ticks_enabled;
146 /* Compensate for varying guest execution speed. */
147 int64_t qemu_icount_bias;
148 /* Only written by TCG thread */
152 static TimersState timers_state;
154 int64_t cpu_get_icount_raw(void)
157 CPUState *cpu = current_cpu;
159 icount = timers_state.qemu_icount;
161 if (!cpu->can_do_io) {
162 fprintf(stderr, "Bad icount read\n");
165 icount -= (cpu->icount_decr.u16.low + cpu->icount_extra);
170 /* Return the virtual CPU time, based on the instruction counter. */
171 static int64_t cpu_get_icount_locked(void)
173 int64_t icount = cpu_get_icount_raw();
174 return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount);
177 int64_t cpu_get_icount(void)
183 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
184 icount = cpu_get_icount_locked();
185 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
190 int64_t cpu_icount_to_ns(int64_t icount)
192 return icount << icount_time_shift;
195 /* return the host CPU cycle counter and handle stop/restart */
196 /* Caller must hold the BQL */
197 int64_t cpu_get_ticks(void)
202 return cpu_get_icount();
205 ticks = timers_state.cpu_ticks_offset;
206 if (timers_state.cpu_ticks_enabled) {
207 ticks += cpu_get_host_ticks();
210 if (timers_state.cpu_ticks_prev > ticks) {
211 /* Note: non increasing ticks may happen if the host uses
213 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
214 ticks = timers_state.cpu_ticks_prev;
217 timers_state.cpu_ticks_prev = ticks;
221 static int64_t cpu_get_clock_locked(void)
225 ticks = timers_state.cpu_clock_offset;
226 if (timers_state.cpu_ticks_enabled) {
227 ticks += get_clock();
233 /* return the host CPU monotonic timer and handle stop/restart */
234 int64_t cpu_get_clock(void)
240 start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
241 ti = cpu_get_clock_locked();
242 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
247 /* enable cpu_get_ticks()
248 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
250 void cpu_enable_ticks(void)
252 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
253 seqlock_write_begin(&timers_state.vm_clock_seqlock);
254 if (!timers_state.cpu_ticks_enabled) {
255 timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
256 timers_state.cpu_clock_offset -= get_clock();
257 timers_state.cpu_ticks_enabled = 1;
259 seqlock_write_end(&timers_state.vm_clock_seqlock);
262 /* disable cpu_get_ticks() : the clock is stopped. You must not call
263 * cpu_get_ticks() after that.
264 * Caller must hold BQL which server as mutex for vm_clock_seqlock.
266 void cpu_disable_ticks(void)
268 /* Here, the really thing protected by seqlock is cpu_clock_offset. */
269 seqlock_write_begin(&timers_state.vm_clock_seqlock);
270 if (timers_state.cpu_ticks_enabled) {
271 timers_state.cpu_ticks_offset += cpu_get_host_ticks();
272 timers_state.cpu_clock_offset = cpu_get_clock_locked();
273 timers_state.cpu_ticks_enabled = 0;
275 seqlock_write_end(&timers_state.vm_clock_seqlock);
278 /* Correlation between real and virtual time is always going to be
279 fairly approximate, so ignore small variation.
280 When the guest is idle real and virtual time will be aligned in
282 #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
284 static void icount_adjust(void)
290 /* Protected by TimersState mutex. */
291 static int64_t last_delta;
293 /* If the VM is not running, then do nothing. */
294 if (!runstate_is_running()) {
298 seqlock_write_begin(&timers_state.vm_clock_seqlock);
299 cur_time = cpu_get_clock_locked();
300 cur_icount = cpu_get_icount_locked();
302 delta = cur_icount - cur_time;
303 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
305 && last_delta + ICOUNT_WOBBLE < delta * 2
306 && icount_time_shift > 0) {
307 /* The guest is getting too far ahead. Slow time down. */
311 && last_delta - ICOUNT_WOBBLE > delta * 2
312 && icount_time_shift < MAX_ICOUNT_SHIFT) {
313 /* The guest is getting too far behind. Speed time up. */
317 timers_state.qemu_icount_bias = cur_icount
318 - (timers_state.qemu_icount << icount_time_shift);
319 seqlock_write_end(&timers_state.vm_clock_seqlock);
322 static void icount_adjust_rt(void *opaque)
324 timer_mod(icount_rt_timer,
325 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
329 static void icount_adjust_vm(void *opaque)
331 timer_mod(icount_vm_timer,
332 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
333 NANOSECONDS_PER_SECOND / 10);
337 static int64_t qemu_icount_round(int64_t count)
339 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
342 static void icount_warp_rt(void)
347 /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
348 * changes from -1 to another value, so the race here is okay.
351 seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
352 warp_start = vm_clock_warp_start;
353 } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
355 if (warp_start == -1) {
359 seqlock_write_begin(&timers_state.vm_clock_seqlock);
360 if (runstate_is_running()) {
361 int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT,
362 cpu_get_clock_locked());
365 warp_delta = clock - vm_clock_warp_start;
366 if (use_icount == 2) {
368 * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
369 * far ahead of real time.
371 int64_t cur_icount = cpu_get_icount_locked();
372 int64_t delta = clock - cur_icount;
373 warp_delta = MIN(warp_delta, delta);
375 timers_state.qemu_icount_bias += warp_delta;
377 vm_clock_warp_start = -1;
378 seqlock_write_end(&timers_state.vm_clock_seqlock);
380 if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
381 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
385 static void icount_timer_cb(void *opaque)
387 /* No need for a checkpoint because the timer already synchronizes
388 * with CHECKPOINT_CLOCK_VIRTUAL_RT.
393 void qtest_clock_warp(int64_t dest)
395 int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
396 AioContext *aio_context;
397 assert(qtest_enabled());
398 aio_context = qemu_get_aio_context();
399 while (clock < dest) {
400 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
401 int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
403 seqlock_write_begin(&timers_state.vm_clock_seqlock);
404 timers_state.qemu_icount_bias += warp;
405 seqlock_write_end(&timers_state.vm_clock_seqlock);
407 qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
408 timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
409 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
411 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
414 void qemu_start_warp_timer(void)
423 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
424 * do not fire, so computing the deadline does not make sense.
426 if (!runstate_is_running()) {
430 /* warp clock deterministically in record/replay mode */
431 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
435 if (!all_cpu_threads_idle()) {
439 if (qtest_enabled()) {
440 /* When testing, qtest commands advance icount. */
444 /* We want to use the earliest deadline from ALL vm_clocks */
445 clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
446 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
448 static bool notified;
449 if (!icount_sleep && !notified) {
450 error_report("WARNING: icount sleep disabled and no active timers");
458 * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
459 * sleep. Otherwise, the CPU might be waiting for a future timer
460 * interrupt to wake it up, but the interrupt never comes because
461 * the vCPU isn't running any insns and thus doesn't advance the
462 * QEMU_CLOCK_VIRTUAL.
466 * We never let VCPUs sleep in no sleep icount mode.
467 * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
468 * to the next QEMU_CLOCK_VIRTUAL event and notify it.
469 * It is useful when we want a deterministic execution time,
470 * isolated from host latencies.
472 seqlock_write_begin(&timers_state.vm_clock_seqlock);
473 timers_state.qemu_icount_bias += deadline;
474 seqlock_write_end(&timers_state.vm_clock_seqlock);
475 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
478 * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
479 * "real" time, (related to the time left until the next event) has
480 * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
481 * This avoids that the warps are visible externally; for example,
482 * you will not be sending network packets continuously instead of
485 seqlock_write_begin(&timers_state.vm_clock_seqlock);
486 if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
487 vm_clock_warp_start = clock;
489 seqlock_write_end(&timers_state.vm_clock_seqlock);
490 timer_mod_anticipate(icount_warp_timer, clock + deadline);
492 } else if (deadline == 0) {
493 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
497 static void qemu_account_warp_timer(void)
499 if (!use_icount || !icount_sleep) {
503 /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
504 * do not fire, so computing the deadline does not make sense.
506 if (!runstate_is_running()) {
510 /* warp clock deterministically in record/replay mode */
511 if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
515 timer_del(icount_warp_timer);
519 static bool icount_state_needed(void *opaque)
525 * This is a subsection for icount migration.
527 static const VMStateDescription icount_vmstate_timers = {
528 .name = "timer/icount",
530 .minimum_version_id = 1,
531 .needed = icount_state_needed,
532 .fields = (VMStateField[]) {
533 VMSTATE_INT64(qemu_icount_bias, TimersState),
534 VMSTATE_INT64(qemu_icount, TimersState),
535 VMSTATE_END_OF_LIST()
539 static const VMStateDescription vmstate_timers = {
542 .minimum_version_id = 1,
543 .fields = (VMStateField[]) {
544 VMSTATE_INT64(cpu_ticks_offset, TimersState),
545 VMSTATE_INT64(dummy, TimersState),
546 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
547 VMSTATE_END_OF_LIST()
549 .subsections = (const VMStateDescription*[]) {
550 &icount_vmstate_timers,
555 static void cpu_throttle_thread(void *opaque)
557 CPUState *cpu = opaque;
559 double throttle_ratio;
562 if (!cpu_throttle_get_percentage()) {
566 pct = (double)cpu_throttle_get_percentage()/100;
567 throttle_ratio = pct / (1 - pct);
568 sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS);
570 qemu_mutex_unlock_iothread();
571 atomic_set(&cpu->throttle_thread_scheduled, 0);
572 g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */
573 qemu_mutex_lock_iothread();
576 static void cpu_throttle_timer_tick(void *opaque)
581 /* Stop the timer if needed */
582 if (!cpu_throttle_get_percentage()) {
586 if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
587 async_run_on_cpu(cpu, cpu_throttle_thread, cpu);
591 pct = (double)cpu_throttle_get_percentage()/100;
592 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
593 CPU_THROTTLE_TIMESLICE_NS / (1-pct));
596 void cpu_throttle_set(int new_throttle_pct)
598 /* Ensure throttle percentage is within valid range */
599 new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
600 new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
602 atomic_set(&throttle_percentage, new_throttle_pct);
604 timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
605 CPU_THROTTLE_TIMESLICE_NS);
608 void cpu_throttle_stop(void)
610 atomic_set(&throttle_percentage, 0);
613 bool cpu_throttle_active(void)
615 return (cpu_throttle_get_percentage() != 0);
618 int cpu_throttle_get_percentage(void)
620 return atomic_read(&throttle_percentage);
623 void cpu_ticks_init(void)
625 seqlock_init(&timers_state.vm_clock_seqlock);
626 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
627 throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
628 cpu_throttle_timer_tick, NULL);
631 void configure_icount(QemuOpts *opts, Error **errp)
634 char *rem_str = NULL;
636 option = qemu_opt_get(opts, "shift");
638 if (qemu_opt_get(opts, "align") != NULL) {
639 error_setg(errp, "Please specify shift option when using align");
644 icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
646 icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
647 icount_timer_cb, NULL);
650 icount_align_option = qemu_opt_get_bool(opts, "align", false);
652 if (icount_align_option && !icount_sleep) {
653 error_setg(errp, "align=on and sleep=off are incompatible");
655 if (strcmp(option, "auto") != 0) {
657 icount_time_shift = strtol(option, &rem_str, 0);
658 if (errno != 0 || *rem_str != '\0' || !strlen(option)) {
659 error_setg(errp, "icount: Invalid shift value");
663 } else if (icount_align_option) {
664 error_setg(errp, "shift=auto and align=on are incompatible");
665 } else if (!icount_sleep) {
666 error_setg(errp, "shift=auto and sleep=off are incompatible");
671 /* 125MIPS seems a reasonable initial guess at the guest speed.
672 It will be corrected fairly quickly anyway. */
673 icount_time_shift = 3;
675 /* Have both realtime and virtual time triggers for speed adjustment.
676 The realtime trigger catches emulated time passing too slowly,
677 the virtual time trigger catches emulated time passing too fast.
678 Realtime triggers occur even when idle, so use them less frequently
680 icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
681 icount_adjust_rt, NULL);
682 timer_mod(icount_rt_timer,
683 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
684 icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
685 icount_adjust_vm, NULL);
686 timer_mod(icount_vm_timer,
687 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
688 NANOSECONDS_PER_SECOND / 10);
691 /***********************************************************/
692 void hw_error(const char *fmt, ...)
698 fprintf(stderr, "qemu: hardware error: ");
699 vfprintf(stderr, fmt, ap);
700 fprintf(stderr, "\n");
702 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
703 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
709 void cpu_synchronize_all_states(void)
714 cpu_synchronize_state(cpu);
718 void cpu_synchronize_all_post_reset(void)
723 cpu_synchronize_post_reset(cpu);
725 if (hax_enabled() && hax_ug_platform())
726 hax_cpu_synchronize_post_reset(cpu);
731 void cpu_synchronize_all_post_init(void)
736 cpu_synchronize_post_init(cpu);
738 if (hax_enabled() && hax_ug_platform())
739 hax_cpu_synchronize_post_init(cpu);
744 static int do_vm_stop(RunState state)
748 if (runstate_is_running()) {
752 vm_state_notify(0, state);
753 qapi_event_send_stop(&error_abort);
757 ret = blk_flush_all();
762 static bool cpu_can_run(CPUState *cpu)
767 if (cpu_is_stopped(cpu)) {
773 static void cpu_handle_guest_debug(CPUState *cpu)
775 gdb_set_stop_cpu(cpu);
776 qemu_system_debug_request();
781 static void sigbus_reraise(void)
784 struct sigaction action;
786 memset(&action, 0, sizeof(action));
787 action.sa_handler = SIG_DFL;
788 if (!sigaction(SIGBUS, &action, NULL)) {
791 sigaddset(&set, SIGBUS);
792 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
794 perror("Failed to re-raise SIGBUS!\n");
798 static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
801 if (kvm_on_sigbus(siginfo->ssi_code,
802 (void *)(intptr_t)siginfo->ssi_addr)) {
807 static void qemu_init_sigbus(void)
809 struct sigaction action;
811 memset(&action, 0, sizeof(action));
812 action.sa_flags = SA_SIGINFO;
813 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
814 sigaction(SIGBUS, &action, NULL);
816 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
819 static void qemu_kvm_eat_signals(CPUState *cpu)
821 struct timespec ts = { 0, 0 };
827 sigemptyset(&waitset);
828 sigaddset(&waitset, SIG_IPI);
829 sigaddset(&waitset, SIGBUS);
832 r = sigtimedwait(&waitset, &siginfo, &ts);
833 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
834 perror("sigtimedwait");
840 if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) {
848 r = sigpending(&chkset);
850 perror("sigpending");
853 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
856 #else /* !CONFIG_LINUX */
858 static void qemu_init_sigbus(void)
862 static void qemu_kvm_eat_signals(CPUState *cpu)
865 #endif /* !CONFIG_LINUX */
868 static void dummy_signal(int sig)
872 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
876 struct sigaction sigact;
878 memset(&sigact, 0, sizeof(sigact));
879 sigact.sa_handler = dummy_signal;
880 sigaction(SIG_IPI, &sigact, NULL);
882 pthread_sigmask(SIG_BLOCK, NULL, &set);
883 sigdelset(&set, SIG_IPI);
884 sigdelset(&set, SIGBUS);
885 r = kvm_set_signal_mask(cpu, &set);
887 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
893 static void qemu_kvm_init_cpu_signals(CPUState *cpu)
899 static QemuMutex qemu_global_mutex;
900 static QemuCond qemu_io_proceeded_cond;
901 static unsigned iothread_requesting_mutex;
903 static QemuThread io_thread;
906 static QemuCond qemu_cpu_cond;
908 static QemuCond qemu_pause_cond;
909 static QemuCond qemu_work_cond;
911 void qemu_init_cpu_loop(void)
914 qemu_cond_init(&qemu_cpu_cond);
915 qemu_cond_init(&qemu_pause_cond);
916 qemu_cond_init(&qemu_work_cond);
917 qemu_cond_init(&qemu_io_proceeded_cond);
918 qemu_mutex_init(&qemu_global_mutex);
920 qemu_thread_get_self(&io_thread);
923 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
925 struct qemu_work_item wi;
927 if (qemu_cpu_is_self(cpu)) {
936 qemu_mutex_lock(&cpu->work_mutex);
937 if (cpu->queued_work_first == NULL) {
938 cpu->queued_work_first = &wi;
940 cpu->queued_work_last->next = &wi;
942 cpu->queued_work_last = &wi;
945 qemu_mutex_unlock(&cpu->work_mutex);
948 while (!atomic_mb_read(&wi.done)) {
949 CPUState *self_cpu = current_cpu;
951 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
952 current_cpu = self_cpu;
956 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
958 struct qemu_work_item *wi;
960 if (qemu_cpu_is_self(cpu)) {
965 wi = g_malloc0(sizeof(struct qemu_work_item));
970 qemu_mutex_lock(&cpu->work_mutex);
971 if (cpu->queued_work_first == NULL) {
972 cpu->queued_work_first = wi;
974 cpu->queued_work_last->next = wi;
976 cpu->queued_work_last = wi;
979 qemu_mutex_unlock(&cpu->work_mutex);
984 static void qemu_kvm_destroy_vcpu(CPUState *cpu)
986 if (kvm_destroy_vcpu(cpu) < 0) {
987 error_report("kvm_destroy_vcpu failed");
992 static void qemu_tcg_destroy_vcpu(CPUState *cpu)
996 static void flush_queued_work(CPUState *cpu)
998 struct qemu_work_item *wi;
1000 if (cpu->queued_work_first == NULL) {
1004 qemu_mutex_lock(&cpu->work_mutex);
1005 while (cpu->queued_work_first != NULL) {
1006 wi = cpu->queued_work_first;
1007 cpu->queued_work_first = wi->next;
1008 if (!cpu->queued_work_first) {
1009 cpu->queued_work_last = NULL;
1011 qemu_mutex_unlock(&cpu->work_mutex);
1013 qemu_mutex_lock(&cpu->work_mutex);
1017 atomic_mb_set(&wi->done, true);
1020 qemu_mutex_unlock(&cpu->work_mutex);
1021 qemu_cond_broadcast(&qemu_work_cond);
1024 static void qemu_wait_io_event_common(CPUState *cpu)
1028 cpu->stopped = true;
1029 qemu_cond_broadcast(&qemu_pause_cond);
1031 flush_queued_work(cpu);
1032 cpu->thread_kicked = false;
1035 static void qemu_tcg_wait_io_event(CPUState *cpu)
1037 while (all_cpu_threads_idle()) {
1038 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1041 while (iothread_requesting_mutex) {
1042 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
1046 qemu_wait_io_event_common(cpu);
1051 static void qemu_hax_wait_io_event(CPUState *cpu)
1053 while (cpu_thread_is_idle(cpu)) {
1054 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1057 qemu_wait_io_event_common(cpu);
1061 static void qemu_kvm_wait_io_event(CPUState *cpu)
1063 while (cpu_thread_is_idle(cpu)) {
1064 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
1067 qemu_kvm_eat_signals(cpu);
1068 qemu_wait_io_event_common(cpu);
1071 static void *qemu_kvm_cpu_thread_fn(void *arg)
1073 CPUState *cpu = arg;
1076 rcu_register_thread();
1078 qemu_mutex_lock_iothread();
1079 qemu_thread_get_self(cpu->thread);
1080 cpu->thread_id = qemu_get_thread_id();
1084 r = kvm_init_vcpu(cpu);
1086 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
1090 qemu_kvm_init_cpu_signals(cpu);
1092 /* signal CPU creation */
1093 cpu->created = true;
1094 qemu_cond_signal(&qemu_cpu_cond);
1097 if (cpu_can_run(cpu)) {
1098 r = kvm_cpu_exec(cpu);
1099 if (r == EXCP_DEBUG) {
1100 cpu_handle_guest_debug(cpu);
1103 qemu_kvm_wait_io_event(cpu);
1104 } while (!cpu->unplug || cpu_can_run(cpu));
1106 qemu_kvm_destroy_vcpu(cpu);
1107 cpu->created = false;
1108 qemu_cond_signal(&qemu_cpu_cond);
1109 qemu_mutex_unlock_iothread();
1113 static void *qemu_dummy_cpu_thread_fn(void *arg)
1116 fprintf(stderr, "qtest is not supported under Windows\n");
1119 CPUState *cpu = arg;
1123 rcu_register_thread();
1125 qemu_mutex_lock_iothread();
1126 qemu_thread_get_self(cpu->thread);
1127 cpu->thread_id = qemu_get_thread_id();
1130 sigemptyset(&waitset);
1131 sigaddset(&waitset, SIG_IPI);
1133 /* signal CPU creation */
1134 cpu->created = true;
1135 qemu_cond_signal(&qemu_cpu_cond);
1140 qemu_mutex_unlock_iothread();
1143 r = sigwait(&waitset, &sig);
1144 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
1149 qemu_mutex_lock_iothread();
1151 qemu_wait_io_event_common(cpu);
1158 static void tcg_exec_all(void);
1160 static void *qemu_tcg_cpu_thread_fn(void *arg)
1162 CPUState *cpu = arg;
1163 CPUState *remove_cpu = NULL;
1165 rcu_register_thread();
1167 qemu_mutex_lock_iothread();
1168 qemu_thread_get_self(cpu->thread);
1171 cpu->thread_id = qemu_get_thread_id();
1172 cpu->created = true;
1175 qemu_cond_signal(&qemu_cpu_cond);
1177 /* wait for initial kick-off after machine start */
1178 while (first_cpu->stopped) {
1179 qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
1181 /* process any pending work */
1183 qemu_wait_io_event_common(cpu);
1187 /* process any pending work */
1188 atomic_mb_set(&exit_request, 1);
1194 int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1196 if (deadline == 0) {
1197 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
1200 qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
1202 if (cpu->unplug && !cpu_can_run(cpu)) {
1208 qemu_tcg_destroy_vcpu(remove_cpu);
1209 cpu->created = false;
1210 qemu_cond_signal(&qemu_cpu_cond);
1219 static void *qemu_hax_cpu_thread_fn(void *arg)
1221 CPUState *cpu = arg;
1223 qemu_thread_get_self(cpu->thread);
1224 qemu_mutex_lock(&qemu_global_mutex);
1226 cpu->thread_id = qemu_get_thread_id();
1227 cpu->created = true;
1232 qemu_cond_signal(&qemu_cpu_cond);
1235 if (cpu_can_run(cpu)) {
1236 r = hax_smp_cpu_exec(cpu);
1237 if (r == EXCP_DEBUG) {
1238 cpu_handle_guest_debug(cpu);
1241 qemu_hax_wait_io_event(cpu);
1247 static void qemu_cpu_kick_thread(CPUState *cpu)
1252 if (cpu->thread_kicked) {
1255 cpu->thread_kicked = true;
1256 err = pthread_kill(cpu->thread->thread, SIG_IPI);
1258 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
1262 # ifdef CONFIG_DARWIN
1263 /* The cpu thread cannot catch it reliably when shutdown the guest on Mac.
1264 * We can double check it and resend it
1266 if (!exit_request) {
1267 // FIXME: check it soon
1271 if (hax_enabled() && hax_ug_platform()) {
1272 cpu->exit_request = 1;
1279 // FIXME: check it soon
1281 if (!qemu_cpu_is_self(cpu)) {
1284 if (SuspendThread(cpu->hThread) == (DWORD)-1) {
1285 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1290 /* On multi-core systems, we are not sure that the thread is actually
1291 * suspended until we can get the context.
1293 tcgContext.ContextFlags = CONTEXT_CONTROL;
1294 while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
1300 if(hax_enabled() && hax_ug_platform()) {
1301 cpu->exit_request = 1;
1305 if (ResumeThread(cpu->hThread) == (DWORD)-1) {
1306 fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
1312 if (!qemu_cpu_is_self(cpu)) {
1313 if(hax_enabled() && hax_ug_platform()) {
1314 cpu->exit_request = 1;
1321 static void qemu_cpu_kick_no_halt(void)
1324 /* Ensure whatever caused the exit has reached the CPU threads before
1325 * writing exit_request.
1327 atomic_mb_set(&exit_request, 1);
1328 cpu = atomic_mb_read(&tcg_current_cpu);
1334 void qemu_cpu_kick(CPUState *cpu)
1336 qemu_cond_broadcast(cpu->halt_cond);
1337 if (tcg_enabled()) {
1338 qemu_cpu_kick_no_halt();
1340 qemu_cpu_kick_thread(cpu);
1344 void qemu_cpu_kick_self(void)
1346 assert(current_cpu);
1347 qemu_cpu_kick_thread(current_cpu);
1350 bool qemu_cpu_is_self(CPUState *cpu)
1352 return qemu_thread_is_self(cpu->thread);
1355 bool qemu_in_vcpu_thread(void)
1357 return current_cpu && qemu_cpu_is_self(current_cpu);
1360 static __thread bool iothread_locked = false;
1362 bool qemu_mutex_iothread_locked(void)
1364 return iothread_locked;
1367 void qemu_mutex_lock_iothread(void)
1369 atomic_inc(&iothread_requesting_mutex);
1370 /* In the simple case there is no need to bump the VCPU thread out of
1371 * TCG code execution.
1373 if (!tcg_enabled() || qemu_in_vcpu_thread() ||
1374 !first_cpu || !first_cpu->created) {
1375 qemu_mutex_lock(&qemu_global_mutex);
1376 atomic_dec(&iothread_requesting_mutex);
1378 if (qemu_mutex_trylock(&qemu_global_mutex)) {
1379 qemu_cpu_kick_no_halt();
1380 qemu_mutex_lock(&qemu_global_mutex);
1382 atomic_dec(&iothread_requesting_mutex);
1383 qemu_cond_broadcast(&qemu_io_proceeded_cond);
1385 iothread_locked = true;
1388 void qemu_mutex_unlock_iothread(void)
1390 iothread_locked = false;
1391 qemu_mutex_unlock(&qemu_global_mutex);
1394 static int all_vcpus_paused(void)
1399 if (!cpu->stopped) {
1407 void pause_all_vcpus(void)
1411 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
1417 if (qemu_in_vcpu_thread()) {
1419 if (!kvm_enabled()) {
1422 cpu->stopped = true;
1428 while (!all_vcpus_paused()) {
1429 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
1436 void cpu_resume(CPUState *cpu)
1439 cpu->stopped = false;
1443 void resume_all_vcpus(void)
1447 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
1453 void cpu_remove(CPUState *cpu)
1460 void cpu_remove_sync(CPUState *cpu)
1463 while (cpu->created) {
1464 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1468 /* For temporary buffers for forming a name */
1469 #define VCPU_THREAD_NAME_SIZE 16
1471 static void qemu_tcg_init_vcpu(CPUState *cpu)
1474 if (hax_enabled()) {
1478 char thread_name[VCPU_THREAD_NAME_SIZE];
1479 static QemuCond *tcg_halt_cond;
1480 static QemuThread *tcg_cpu_thread;
1482 /* share a single thread for all cpus with TCG */
1483 if (!tcg_cpu_thread) {
1484 cpu->thread = g_malloc0(sizeof(QemuThread));
1485 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1486 qemu_cond_init(cpu->halt_cond);
1487 tcg_halt_cond = cpu->halt_cond;
1488 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
1490 qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn,
1491 cpu, QEMU_THREAD_JOINABLE);
1493 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1495 while (!cpu->created) {
1496 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1498 tcg_cpu_thread = cpu->thread;
1500 cpu->thread = tcg_cpu_thread;
1501 cpu->halt_cond = tcg_halt_cond;
1506 static void qemu_hax_start_vcpu(CPUState *cpu)
1508 char thread_name[VCPU_THREAD_NAME_SIZE];
1510 cpu->thread = g_malloc0(sizeof(QemuThread));
1511 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1512 qemu_cond_init(cpu->halt_cond);
1514 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
1517 qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
1518 cpu, QEMU_THREAD_JOINABLE);
1520 cpu->hThread = qemu_thread_get_handle(cpu->thread);
1522 while (!cpu->created) {
1523 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1528 static void qemu_kvm_start_vcpu(CPUState *cpu)
1530 char thread_name[VCPU_THREAD_NAME_SIZE];
1532 cpu->thread = g_malloc0(sizeof(QemuThread));
1533 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1534 qemu_cond_init(cpu->halt_cond);
1535 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
1537 qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn,
1538 cpu, QEMU_THREAD_JOINABLE);
1539 while (!cpu->created) {
1540 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1544 static void qemu_dummy_start_vcpu(CPUState *cpu)
1546 char thread_name[VCPU_THREAD_NAME_SIZE];
1548 cpu->thread = g_malloc0(sizeof(QemuThread));
1549 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1550 qemu_cond_init(cpu->halt_cond);
1551 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
1553 qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu,
1554 QEMU_THREAD_JOINABLE);
1555 while (!cpu->created) {
1556 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1560 void qemu_init_vcpu(CPUState *cpu)
1562 cpu->nr_cores = smp_cores;
1563 cpu->nr_threads = smp_threads;
1564 cpu->stopped = true;
1567 /* If the target cpu hasn't set up any address spaces itself,
1568 * give it the default one.
1570 AddressSpace *as = address_space_init_shareable(cpu->memory,
1573 cpu_address_space_init(cpu, as, 0);
1576 if (kvm_enabled()) {
1577 qemu_kvm_start_vcpu(cpu);
1579 } else if (hax_enabled() && hax_ug_platform()) {
1580 qemu_hax_start_vcpu(cpu);
1582 } else if (tcg_enabled()) {
1583 qemu_tcg_init_vcpu(cpu);
1585 qemu_dummy_start_vcpu(cpu);
1589 void cpu_stop_current(void)
1592 current_cpu->stop = false;
1593 current_cpu->stopped = true;
1594 cpu_exit(current_cpu);
1595 qemu_cond_broadcast(&qemu_pause_cond);
1599 int vm_stop(RunState state)
1601 if (qemu_in_vcpu_thread()) {
1602 qemu_system_vmstop_request_prepare();
1603 qemu_system_vmstop_request(state);
1605 * FIXME: should not return to device code in case
1606 * vm_stop() has been requested.
1612 return do_vm_stop(state);
1615 /* does a state transition even if the VM is already stopped,
1616 current state is forgotten forever */
1617 int vm_stop_force_state(RunState state)
1619 if (runstate_is_running()) {
1620 return vm_stop(state);
1622 runstate_set(state);
1625 /* Make sure to return an error if the flush in a previous vm_stop()
1627 return blk_flush_all();
1631 static int64_t tcg_get_icount_limit(void)
1635 if (replay_mode != REPLAY_MODE_PLAY) {
1636 deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
1638 /* Maintain prior (possibly buggy) behaviour where if no deadline
1639 * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
1640 * INT32_MAX nanoseconds ahead, we still use INT32_MAX
1643 if ((deadline < 0) || (deadline > INT32_MAX)) {
1644 deadline = INT32_MAX;
1647 return qemu_icount_round(deadline);
1649 return replay_get_instructions();
1653 static int tcg_cpu_exec(CPUState *cpu)
1656 #ifdef CONFIG_PROFILER
1660 #ifdef CONFIG_PROFILER
1661 ti = profile_getclock();
1666 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1667 + cpu->icount_extra);
1668 cpu->icount_decr.u16.low = 0;
1669 cpu->icount_extra = 0;
1670 count = tcg_get_icount_limit();
1671 timers_state.qemu_icount += count;
1672 decr = (count > 0xffff) ? 0xffff : count;
1674 cpu->icount_decr.u16.low = decr;
1675 cpu->icount_extra = count;
1677 ret = cpu_exec(cpu);
1678 #ifdef CONFIG_PROFILER
1679 tcg_time += profile_getclock() - ti;
1682 /* Fold pending instructions back into the
1683 instruction counter, and clear the interrupt flag. */
1684 timers_state.qemu_icount -= (cpu->icount_decr.u16.low
1685 + cpu->icount_extra);
1686 cpu->icount_decr.u32 = 0;
1687 cpu->icount_extra = 0;
1688 replay_account_executed_instructions();
1693 static void tcg_exec_all(void)
1697 /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
1698 qemu_account_warp_timer();
1700 if (next_cpu == NULL) {
1701 next_cpu = first_cpu;
1703 for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
1704 CPUState *cpu = next_cpu;
1706 qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
1707 (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
1709 if (cpu_can_run(cpu)) {
1710 r = tcg_cpu_exec(cpu);
1711 if (r == EXCP_DEBUG) {
1712 cpu_handle_guest_debug(cpu);
1715 } else if (cpu->stop || cpu->stopped) {
1717 next_cpu = CPU_NEXT(cpu);
1723 /* Pairs with smp_wmb in qemu_cpu_kick. */
1724 atomic_mb_set(&exit_request, 0);
1727 void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1729 /* XXX: implement xxx_cpu_list for targets that still miss it */
1730 #if defined(cpu_list)
1731 cpu_list(f, cpu_fprintf);
1735 CpuInfoList *qmp_query_cpus(Error **errp)
1737 CpuInfoList *head = NULL, *cur_item = NULL;
1742 #if defined(TARGET_I386)
1743 X86CPU *x86_cpu = X86_CPU(cpu);
1744 CPUX86State *env = &x86_cpu->env;
1745 #elif defined(TARGET_PPC)
1746 PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
1747 CPUPPCState *env = &ppc_cpu->env;
1748 #elif defined(TARGET_SPARC)
1749 SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
1750 CPUSPARCState *env = &sparc_cpu->env;
1751 #elif defined(TARGET_MIPS)
1752 MIPSCPU *mips_cpu = MIPS_CPU(cpu);
1753 CPUMIPSState *env = &mips_cpu->env;
1754 #elif defined(TARGET_TRICORE)
1755 TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu);
1756 CPUTriCoreState *env = &tricore_cpu->env;
1759 cpu_synchronize_state(cpu);
1761 info = g_malloc0(sizeof(*info));
1762 info->value = g_malloc0(sizeof(*info->value));
1763 info->value->CPU = cpu->cpu_index;
1764 info->value->current = (cpu == first_cpu);
1765 info->value->halted = cpu->halted;
1766 info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
1767 info->value->thread_id = cpu->thread_id;
1768 #if defined(TARGET_I386)
1769 info->value->arch = CPU_INFO_ARCH_X86;
1770 info->value->u.x86.pc = env->eip + env->segs[R_CS].base;
1771 #elif defined(TARGET_PPC)
1772 info->value->arch = CPU_INFO_ARCH_PPC;
1773 info->value->u.ppc.nip = env->nip;
1774 #elif defined(TARGET_SPARC)
1775 info->value->arch = CPU_INFO_ARCH_SPARC;
1776 info->value->u.q_sparc.pc = env->pc;
1777 info->value->u.q_sparc.npc = env->npc;
1778 #elif defined(TARGET_MIPS)
1779 info->value->arch = CPU_INFO_ARCH_MIPS;
1780 info->value->u.q_mips.PC = env->active_tc.PC;
1781 #elif defined(TARGET_TRICORE)
1782 info->value->arch = CPU_INFO_ARCH_TRICORE;
1783 info->value->u.tricore.PC = env->PC;
1785 info->value->arch = CPU_INFO_ARCH_OTHER;
1788 /* XXX: waiting for the qapi to support GSList */
1790 head = cur_item = info;
1792 cur_item->next = info;
1800 void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1801 bool has_cpu, int64_t cpu_index, Error **errp)
1807 int64_t orig_addr = addr, orig_size = size;
1813 cpu = qemu_get_cpu(cpu_index);
1815 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1820 f = fopen(filename, "wb");
1822 error_setg_file_open(errp, errno, filename);
1830 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
1831 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
1832 " specified", orig_addr, orig_size);
1835 if (fwrite(buf, 1, l, f) != l) {
1836 error_setg(errp, QERR_IO_ERROR);
1847 void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1854 f = fopen(filename, "wb");
1856 error_setg_file_open(errp, errno, filename);
1864 cpu_physical_memory_read(addr, buf, l);
1865 if (fwrite(buf, 1, l, f) != l) {
1866 error_setg(errp, QERR_IO_ERROR);
1877 void qmp_inject_nmi(Error **errp)
1879 nmi_monitor_handle(monitor_get_cpu_index(), errp);
1882 void dump_drift_info(FILE *f, fprintf_function cpu_fprintf)
1888 cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n",
1889 (cpu_get_clock() - cpu_get_icount())/SCALE_MS);
1890 if (icount_align_option) {
1891 cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS);
1892 cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS);
1894 cpu_fprintf(f, "Max guest delay NA\n");
1895 cpu_fprintf(f, "Max guest advance NA\n");