4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
32 #include "monitor/monitor.h"
33 #include "sysemu/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
38 #define MAX_PACKET_LENGTH 4096
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
45 static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
46 uint8_t *buf, int len, bool is_write)
48 CPUClass *cc = CPU_GET_CLASS(cpu);
50 if (cc->memory_rw_debug) {
51 return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
53 return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
65 GDB_SIGNAL_UNKNOWN = 143
68 #ifdef CONFIG_USER_ONLY
70 /* Map target signal numbers to GDB protocol signal numbers and vice
71 * versa. For user emulation's currently supported systems, we can
72 * assume most signals are defined.
75 static int gdb_signal_table[] = {
235 /* In system mode we only need SIGINT and SIGTRAP; other signals
236 are not yet supported. */
243 static int gdb_signal_table[] = {
253 #ifdef CONFIG_USER_ONLY
254 static int target_signal_to_gdb (int sig)
257 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
258 if (gdb_signal_table[i] == sig)
260 return GDB_SIGNAL_UNKNOWN;
264 static int gdb_signal_to_target (int sig)
266 if (sig < ARRAY_SIZE (gdb_signal_table))
267 return gdb_signal_table[sig];
274 typedef struct GDBRegisterState {
280 struct GDBRegisterState *next;
290 typedef struct GDBState {
291 CPUState *c_cpu; /* current CPU for step/continue ops */
292 CPUState *g_cpu; /* current CPU for other ops */
293 CPUState *query_cpu; /* for q{f|s}ThreadInfo */
294 enum RSState state; /* parsing state */
295 char line_buf[MAX_PACKET_LENGTH];
298 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
301 #ifdef CONFIG_USER_ONLY
305 CharDriverState *chr;
306 CharDriverState *mon_chr;
308 char syscall_buf[256];
309 gdb_syscall_complete_cb current_syscall_cb;
312 /* By default use no IRQs and no timers while single stepping so as to
313 * make single stepping like an ICE HW step.
315 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
317 static GDBState *gdbserver_state;
319 /* This is an ugly hack to cope with both new and old gdb.
320 If gdb sends qXfer:features:read then assume we're talking to a newish
321 gdb that understands target descriptions. */
322 static int gdb_has_xml;
324 #ifdef CONFIG_USER_ONLY
325 /* XXX: This is not thread safe. Do we care? */
326 static int gdbserver_fd = -1;
328 static int get_char(GDBState *s)
334 ret = qemu_recv(s->fd, &ch, 1, 0);
336 if (errno == ECONNRESET)
338 if (errno != EINTR && errno != EAGAIN)
340 } else if (ret == 0) {
358 /* If gdb is connected when the first semihosting syscall occurs then use
359 remote gdb syscalls. Otherwise use native file IO. */
360 int use_gdb_syscalls(void)
362 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
363 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
366 return gdb_syscall_mode == GDB_SYS_ENABLED;
369 /* Resume execution. */
370 static inline void gdb_continue(GDBState *s)
372 #ifdef CONFIG_USER_ONLY
373 s->running_state = 1;
375 if (runstate_check(RUN_STATE_GUEST_PANICKED)) {
376 runstate_set(RUN_STATE_DEBUG);
378 if (!runstate_needs_reset()) {
384 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
386 #ifdef CONFIG_USER_ONLY
390 ret = send(s->fd, buf, len, 0);
392 if (errno != EINTR && errno != EAGAIN)
400 qemu_chr_fe_write(s->chr, buf, len);
404 static inline int fromhex(int v)
406 if (v >= '0' && v <= '9')
408 else if (v >= 'A' && v <= 'F')
410 else if (v >= 'a' && v <= 'f')
416 static inline int tohex(int v)
424 static void memtohex(char *buf, const uint8_t *mem, int len)
429 for(i = 0; i < len; i++) {
431 *q++ = tohex(c >> 4);
432 *q++ = tohex(c & 0xf);
437 static void hextomem(uint8_t *mem, const char *buf, int len)
441 for(i = 0; i < len; i++) {
442 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
447 /* return -1 if error, 0 if OK */
448 static int put_packet_binary(GDBState *s, const char *buf, int len)
459 for(i = 0; i < len; i++) {
463 *(p++) = tohex((csum >> 4) & 0xf);
464 *(p++) = tohex((csum) & 0xf);
466 s->last_packet_len = p - s->last_packet;
467 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
469 #ifdef CONFIG_USER_ONLY
482 /* return -1 if error, 0 if OK */
483 static int put_packet(GDBState *s, const char *buf)
486 printf("reply='%s'\n", buf);
489 return put_packet_binary(s, buf, strlen(buf));
492 /* The GDB remote protocol transfers values in target byte order. This means
493 we can use the raw memory access routines to access the value buffer.
494 Conveniently, these also handle the case where the buffer is mis-aligned.
496 #define GET_REG8(val) do { \
497 stb_p(mem_buf, val); \
500 #define GET_REG16(val) do { \
501 stw_p(mem_buf, val); \
504 #define GET_REG32(val) do { \
505 stl_p(mem_buf, val); \
508 #define GET_REG64(val) do { \
509 stq_p(mem_buf, val); \
513 #if TARGET_LONG_BITS == 64
514 #define GET_REGL(val) GET_REG64(val)
515 #define ldtul_p(addr) ldq_p(addr)
517 #define GET_REGL(val) GET_REG32(val)
518 #define ldtul_p(addr) ldl_p(addr)
521 #if defined(TARGET_I386)
524 static const int gpr_map[16] = {
525 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
526 8, 9, 10, 11, 12, 13, 14, 15
529 #define gpr_map gpr_map32
531 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
533 #define IDX_IP_REG CPU_NB_REGS
534 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
535 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
536 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
537 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
538 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
540 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
542 if (n < CPU_NB_REGS) {
543 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
544 GET_REG64(env->regs[gpr_map[n]]);
545 } else if (n < CPU_NB_REGS32) {
546 GET_REG32(env->regs[gpr_map32[n]]);
548 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
549 #ifdef USE_X86LDOUBLE
550 /* FIXME: byteswap float values - after fixing fpregs layout. */
551 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
553 memset(mem_buf, 0, 10);
556 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
558 if (n < CPU_NB_REGS32 ||
559 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
560 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
561 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
567 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
573 GET_REG32(env->eflags);
576 GET_REG32(env->segs[R_CS].selector);
577 case IDX_SEG_REGS + 1:
578 GET_REG32(env->segs[R_SS].selector);
579 case IDX_SEG_REGS + 2:
580 GET_REG32(env->segs[R_DS].selector);
581 case IDX_SEG_REGS + 3:
582 GET_REG32(env->segs[R_ES].selector);
583 case IDX_SEG_REGS + 4:
584 GET_REG32(env->segs[R_FS].selector);
585 case IDX_SEG_REGS + 5:
586 GET_REG32(env->segs[R_GS].selector);
588 case IDX_FP_REGS + 8:
589 GET_REG32(env->fpuc);
590 case IDX_FP_REGS + 9:
591 GET_REG32((env->fpus & ~0x3800) |
592 (env->fpstt & 0x7) << 11);
593 case IDX_FP_REGS + 10:
594 GET_REG32(0); /* ftag */
595 case IDX_FP_REGS + 11:
596 GET_REG32(0); /* fiseg */
597 case IDX_FP_REGS + 12:
598 GET_REG32(0); /* fioff */
599 case IDX_FP_REGS + 13:
600 GET_REG32(0); /* foseg */
601 case IDX_FP_REGS + 14:
602 GET_REG32(0); /* fooff */
603 case IDX_FP_REGS + 15:
604 GET_REG32(0); /* fop */
607 GET_REG32(env->mxcsr);
613 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
615 uint16_t selector = ldl_p(mem_buf);
617 if (selector != env->segs[sreg].selector) {
618 #if defined(CONFIG_USER_ONLY)
619 cpu_x86_load_seg(env, sreg, selector);
621 unsigned int limit, flags;
624 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
625 base = selector << 4;
629 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
634 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
640 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
644 if (n < CPU_NB_REGS) {
645 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
646 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
647 return sizeof(target_ulong);
648 } else if (n < CPU_NB_REGS32) {
650 env->regs[n] &= ~0xffffffffUL;
651 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
654 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
655 #ifdef USE_X86LDOUBLE
656 /* FIXME: byteswap float values - after fixing fpregs layout. */
657 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
660 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
662 if (n < CPU_NB_REGS32 ||
663 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
664 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
665 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
671 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
672 env->eip = ldq_p(mem_buf);
675 env->eip &= ~0xffffffffUL;
676 env->eip |= (uint32_t)ldl_p(mem_buf);
680 env->eflags = ldl_p(mem_buf);
684 return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
685 case IDX_SEG_REGS + 1:
686 return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
687 case IDX_SEG_REGS + 2:
688 return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
689 case IDX_SEG_REGS + 3:
690 return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
691 case IDX_SEG_REGS + 4:
692 return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
693 case IDX_SEG_REGS + 5:
694 return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
696 case IDX_FP_REGS + 8:
697 env->fpuc = ldl_p(mem_buf);
699 case IDX_FP_REGS + 9:
700 tmp = ldl_p(mem_buf);
701 env->fpstt = (tmp >> 11) & 7;
702 env->fpus = tmp & ~0x3800;
704 case IDX_FP_REGS + 10: /* ftag */
706 case IDX_FP_REGS + 11: /* fiseg */
708 case IDX_FP_REGS + 12: /* fioff */
710 case IDX_FP_REGS + 13: /* foseg */
712 case IDX_FP_REGS + 14: /* fooff */
714 case IDX_FP_REGS + 15: /* fop */
718 env->mxcsr = ldl_p(mem_buf);
722 /* Unrecognised register. */
726 #elif defined (TARGET_PPC)
728 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
729 expects whatever the target description contains. Due to a
730 historical mishap the FP registers appear in between core integer
731 regs and PC, MSR, CR, and so forth. We hack round this by giving the
732 FP regs zero size when talking to a newer gdb. */
733 #if defined (TARGET_PPC64)
734 #define GDB_CORE_XML "power64-core.xml"
736 #define GDB_CORE_XML "power-core.xml"
739 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
743 GET_REGL(env->gpr[n]);
749 stfq_p(mem_buf, env->fpr[n-32]);
761 for (i = 0; i < 8; i++) {
762 cr |= env->crf[i] << (32 - ((i + 1) * 4));
777 GET_REG32(env->fpscr);
784 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
788 env->gpr[n] = ldtul_p(mem_buf);
789 return sizeof(target_ulong);
795 env->fpr[n-32] = ldfq_p(mem_buf);
800 env->nip = ldtul_p(mem_buf);
801 return sizeof(target_ulong);
803 ppc_store_msr(env, ldtul_p(mem_buf));
804 return sizeof(target_ulong);
807 uint32_t cr = ldl_p(mem_buf);
809 for (i = 0; i < 8; i++) {
810 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
815 env->lr = ldtul_p(mem_buf);
816 return sizeof(target_ulong);
818 env->ctr = ldtul_p(mem_buf);
819 return sizeof(target_ulong);
821 env->xer = ldtul_p(mem_buf);
822 return sizeof(target_ulong);
828 store_fpscr(env, ldtul_p(mem_buf), 0xffffffff);
829 return sizeof(target_ulong);
835 #elif defined (TARGET_SPARC)
838 #define GET_REGA(val) GET_REG32(val)
840 #define GET_REGA(val) GET_REGL(val)
843 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
847 GET_REGA(env->gregs[n]);
850 /* register window */
851 GET_REGA(env->regwptr[n - 8]);
853 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
857 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
859 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
862 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
867 GET_REGA(cpu_get_psr(env));
879 GET_REGA(0); /* csr */
887 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
889 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
893 /* f32-f62 (double width, even numbers only) */
894 GET_REG64(env->fpr[(n - 32) / 2].ll);
902 GET_REGL((cpu_get_ccr(env) << 32) |
903 ((env->asi & 0xff) << 24) |
904 ((env->pstate & 0xfff) << 8) |
917 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
919 #if defined(TARGET_ABI32)
922 tmp = ldl_p(mem_buf);
926 tmp = ldtul_p(mem_buf);
933 /* register window */
934 env->regwptr[n - 8] = tmp;
936 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
941 env->fpr[(n - 32) / 2].l.lower = tmp;
943 env->fpr[(n - 32) / 2].l.upper = tmp;
946 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
952 cpu_put_psr(env, tmp);
977 tmp = ldl_p(mem_buf);
979 env->fpr[(n - 32) / 2].l.lower = tmp;
981 env->fpr[(n - 32) / 2].l.upper = tmp;
985 /* f32-f62 (double width, even numbers only) */
986 env->fpr[(n - 32) / 2].ll = tmp;
996 cpu_put_ccr(env, tmp >> 32);
997 env->asi = (tmp >> 24) & 0xff;
998 env->pstate = (tmp >> 8) & 0xfff;
999 cpu_put_cwp64(env, tmp & 0xff);
1017 #elif defined (TARGET_ARM)
1019 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
1020 whatever the target description contains. Due to a historical mishap
1021 the FPA registers appear in between core integer regs and the CPSR.
1022 We hack round this by giving the FPA regs zero size when talking to a
1024 #define GDB_CORE_XML "arm-core.xml"
1026 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
1029 /* Core integer register. */
1030 GET_REG32(env->regs[n]);
1033 /* FPA registers. */
1037 memset(mem_buf, 0, 12);
1042 /* FPA status register. */
1049 GET_REG32(cpsr_read(env));
1051 /* Unknown register. */
1055 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
1059 tmp = ldl_p(mem_buf);
1061 /* Mask out low bit of PC to workaround gdb bugs. This will probably
1062 cause problems if we ever implement the Jazelle DBX extensions. */
1068 /* Core integer register. */
1072 if (n < 24) { /* 16-23 */
1073 /* FPA registers (ignored). */
1081 /* FPA status register (ignored). */
1088 cpsr_write(env, tmp, 0xffffffff);
1091 /* Unknown register. */
1095 #elif defined (TARGET_M68K)
1097 #define GDB_CORE_XML "cf-core.xml"
1099 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1103 GET_REG32(env->dregs[n]);
1104 } else if (n < 16) {
1106 GET_REG32(env->aregs[n - 8]);
1115 /* FP registers not included here because they vary between
1116 ColdFire and m68k. Use XML bits for these. */
1120 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1124 tmp = ldl_p(mem_buf);
1128 env->dregs[n] = tmp;
1129 } else if (n < 16) {
1131 env->aregs[n - 8] = tmp;
1146 #elif defined (TARGET_MIPS)
1148 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1151 GET_REGL(env->active_tc.gpr[n]);
1153 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1154 if (n >= 38 && n < 70) {
1155 if (env->CP0_Status & (1 << CP0St_FR)) {
1156 GET_REGL(env->active_fpu.fpr[n - 38].d);
1158 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1163 GET_REGL((int32_t)env->active_fpu.fcr31);
1165 GET_REGL((int32_t)env->active_fpu.fcr0);
1170 GET_REGL((int32_t)env->CP0_Status);
1172 GET_REGL(env->active_tc.LO[0]);
1174 GET_REGL(env->active_tc.HI[0]);
1176 GET_REGL(env->CP0_BadVAddr);
1178 GET_REGL((int32_t)env->CP0_Cause);
1180 GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1182 GET_REGL(0); /* fp */
1184 GET_REGL((int32_t)env->CP0_PRid);
1186 if (n >= 73 && n <= 88) {
1187 /* 16 embedded regs. */
1194 /* convert MIPS rounding mode in FCR31 to IEEE library */
1195 static unsigned int ieee_rm[] = {
1196 float_round_nearest_even,
1197 float_round_to_zero,
1201 #define RESTORE_ROUNDING_MODE \
1202 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], \
1203 &env->active_fpu.fp_status)
1205 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1209 tmp = ldtul_p(mem_buf);
1212 env->active_tc.gpr[n] = tmp;
1213 return sizeof(target_ulong);
1215 if (env->CP0_Config1 & (1 << CP0C1_FP)
1216 && n >= 38 && n < 73) {
1218 if (env->CP0_Status & (1 << CP0St_FR)) {
1219 env->active_fpu.fpr[n - 38].d = tmp;
1221 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1226 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1227 /* set rounding mode */
1228 RESTORE_ROUNDING_MODE;
1231 env->active_fpu.fcr0 = tmp;
1234 return sizeof(target_ulong);
1238 env->CP0_Status = tmp;
1241 env->active_tc.LO[0] = tmp;
1244 env->active_tc.HI[0] = tmp;
1247 env->CP0_BadVAddr = tmp;
1250 env->CP0_Cause = tmp;
1253 env->active_tc.PC = tmp & ~(target_ulong)1;
1255 env->hflags |= MIPS_HFLAG_M16;
1257 env->hflags &= ~(MIPS_HFLAG_M16);
1260 case 72: /* fp, ignored */
1266 /* Other registers are readonly. Ignore writes. */
1270 return sizeof(target_ulong);
1272 #elif defined(TARGET_OPENRISC)
1274 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1277 GET_REG32(env->gpr[n]);
1281 GET_REG32(env->ppc);
1284 GET_REG32(env->npc);
1296 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1297 uint8_t *mem_buf, int n)
1299 OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
1300 CPUClass *cc = CPU_GET_CLASS(cpu);
1303 if (n > cc->gdb_num_core_regs) {
1307 tmp = ldl_p(mem_buf);
1331 #elif defined (TARGET_SH4)
1333 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1334 /* FIXME: We should use XML for this. */
1336 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1340 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1341 GET_REGL(env->gregs[n + 16]);
1343 GET_REGL(env->gregs[n]);
1346 GET_REGL(env->gregs[n]);
1356 GET_REGL(env->mach);
1358 GET_REGL(env->macl);
1362 GET_REGL(env->fpul);
1364 GET_REGL(env->fpscr);
1366 if (env->fpscr & FPSCR_FR) {
1367 stfl_p(mem_buf, env->fregs[n - 9]);
1369 stfl_p(mem_buf, env->fregs[n - 25]);
1377 GET_REGL(env->gregs[n - 43]);
1379 GET_REGL(env->gregs[n - (51 - 16)]);
1385 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1389 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1390 env->gregs[n + 16] = ldl_p(mem_buf);
1392 env->gregs[n] = ldl_p(mem_buf);
1396 env->gregs[n] = ldl_p(mem_buf);
1399 env->pc = ldl_p(mem_buf);
1402 env->pr = ldl_p(mem_buf);
1405 env->gbr = ldl_p(mem_buf);
1408 env->vbr = ldl_p(mem_buf);
1411 env->mach = ldl_p(mem_buf);
1414 env->macl = ldl_p(mem_buf);
1417 env->sr = ldl_p(mem_buf);
1420 env->fpul = ldl_p(mem_buf);
1423 env->fpscr = ldl_p(mem_buf);
1426 if (env->fpscr & FPSCR_FR) {
1427 env->fregs[n - 9] = ldfl_p(mem_buf);
1429 env->fregs[n - 25] = ldfl_p(mem_buf);
1433 env->ssr = ldl_p(mem_buf);
1436 env->spc = ldl_p(mem_buf);
1439 env->gregs[n - 43] = ldl_p(mem_buf);
1442 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1450 #elif defined (TARGET_MICROBLAZE)
1452 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1455 GET_REG32(env->regs[n]);
1457 GET_REG32(env->sregs[n - 32]);
1462 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1464 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1465 CPUClass *cc = CPU_GET_CLASS(cpu);
1468 if (n > cc->gdb_num_core_regs) {
1472 tmp = ldl_p(mem_buf);
1477 env->sregs[n - 32] = tmp;
1481 #elif defined (TARGET_CRIS)
1484 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1487 GET_REG32(env->regs[n]);
1497 GET_REG8(env->pregs[n - 16]);
1499 GET_REG8(env->pregs[n - 16]);
1502 GET_REG16(env->pregs[n - 16]);
1505 GET_REG32(env->pregs[n - 16]);
1513 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1517 if (env->pregs[PR_VR] < 32) {
1518 return read_register_crisv10(env, mem_buf, n);
1521 srs = env->pregs[PR_SRS];
1523 GET_REG32(env->regs[n]);
1526 if (n >= 21 && n < 32) {
1527 GET_REG32(env->pregs[n - 16]);
1529 if (n >= 33 && n < 49) {
1530 GET_REG32(env->sregs[srs][n - 33]);
1534 GET_REG8(env->pregs[0]);
1536 GET_REG8(env->pregs[1]);
1538 GET_REG32(env->pregs[2]);
1542 GET_REG16(env->pregs[4]);
1550 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1558 tmp = ldl_p(mem_buf);
1564 if (n >= 21 && n < 32) {
1565 env->pregs[n - 16] = tmp;
1568 /* FIXME: Should support function regs be writable? */
1575 env->pregs[PR_PID] = tmp;
1588 #elif defined (TARGET_ALPHA)
1590 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1600 d.d = env->fir[n - 32];
1604 val = cpu_alpha_load_fpcr(env);
1614 /* 31 really is the zero register; 65 is unassigned in the
1615 gdb protocol, but is still required to occupy 8 bytes. */
1624 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1626 target_ulong tmp = ldtul_p(mem_buf);
1635 env->fir[n - 32] = d.d;
1638 cpu_alpha_store_fpcr(env, tmp);
1648 /* 31 really is the zero register; 65 is unassigned in the
1649 gdb protocol, but is still required to occupy 8 bytes. */
1656 #elif defined (TARGET_S390X)
1658 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1664 case S390_PSWM_REGNUM:
1665 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1666 val = deposit64(env->psw.mask, 44, 2, cc_op);
1668 case S390_PSWA_REGNUM:
1669 GET_REGL(env->psw.addr);
1670 case S390_R0_REGNUM ... S390_R15_REGNUM:
1671 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1672 case S390_A0_REGNUM ... S390_A15_REGNUM:
1673 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1674 case S390_FPC_REGNUM:
1675 GET_REG32(env->fpc);
1676 case S390_F0_REGNUM ... S390_F15_REGNUM:
1677 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1683 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1688 tmpl = ldtul_p(mem_buf);
1689 tmp32 = ldl_p(mem_buf);
1692 case S390_PSWM_REGNUM:
1693 env->psw.mask = tmpl;
1694 env->cc_op = extract64(tmpl, 44, 2);
1696 case S390_PSWA_REGNUM:
1697 env->psw.addr = tmpl;
1699 case S390_R0_REGNUM ... S390_R15_REGNUM:
1700 env->regs[n-S390_R0_REGNUM] = tmpl;
1702 case S390_A0_REGNUM ... S390_A15_REGNUM:
1703 env->aregs[n-S390_A0_REGNUM] = tmp32;
1706 case S390_FPC_REGNUM:
1710 case S390_F0_REGNUM ... S390_F15_REGNUM:
1711 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1718 #elif defined (TARGET_LM32)
1720 #include "hw/lm32/lm32_pic.h"
1722 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1725 GET_REG32(env->regs[n]);
1730 /* FIXME: put in right exception ID */
1734 GET_REG32(env->eba);
1736 GET_REG32(env->deba);
1740 GET_REG32(lm32_pic_get_im(env->pic_state));
1742 GET_REG32(lm32_pic_get_ip(env->pic_state));
1748 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1750 LM32CPU *cpu = lm32_env_get_cpu(env);
1751 CPUClass *cc = CPU_GET_CLASS(cpu);
1754 if (n > cc->gdb_num_core_regs) {
1758 tmp = ldl_p(mem_buf);
1777 lm32_pic_set_im(env->pic_state, tmp);
1780 lm32_pic_set_ip(env->pic_state, tmp);
1786 #elif defined(TARGET_XTENSA)
1788 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1790 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1792 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1796 switch (reg->type) {
1801 xtensa_sync_phys_from_window(env);
1802 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1805 GET_REG32(env->sregs[reg->targno & 0xff]);
1808 GET_REG32(env->uregs[reg->targno & 0xff]);
1811 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1814 GET_REG32(env->regs[reg->targno & 0x0f]);
1817 qemu_log("%s from reg %d of unsupported type %d\n",
1818 __func__, n, reg->type);
1823 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1826 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1828 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1832 tmp = ldl_p(mem_buf);
1834 switch (reg->type) {
1840 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1841 xtensa_sync_window_from_phys(env);
1845 env->sregs[reg->targno & 0xff] = tmp;
1849 env->uregs[reg->targno & 0xff] = tmp;
1853 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1857 env->regs[reg->targno & 0x0f] = tmp;
1861 qemu_log("%s to reg %d of unsupported type %d\n",
1862 __func__, n, reg->type);
1870 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1875 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1883 /* Encode data using the encoding for 'x' packets. */
1884 static int memtox(char *buf, const char *mem, int len)
1892 case '#': case '$': case '*': case '}':
1904 static const char *get_feature_xml(const char *p, const char **newp)
1909 static char target_xml[1024];
1912 while (p[len] && p[len] != ':')
1917 if (strncmp(p, "target.xml", len) == 0) {
1918 /* Generate the XML description for this CPU. */
1919 if (!target_xml[0]) {
1920 GDBRegisterState *r;
1921 CPUState *cpu = first_cpu;
1923 snprintf(target_xml, sizeof(target_xml),
1924 "<?xml version=\"1.0\"?>"
1925 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1927 "<xi:include href=\"%s\"/>",
1930 for (r = cpu->gdb_regs; r; r = r->next) {
1931 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1932 pstrcat(target_xml, sizeof(target_xml), r->xml);
1933 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1935 pstrcat(target_xml, sizeof(target_xml), "</target>");
1939 for (i = 0; ; i++) {
1940 name = xml_builtin[i][0];
1941 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1944 return name ? xml_builtin[i][1] : NULL;
1948 static int gdb_read_register(CPUState *cpu, uint8_t *mem_buf, int reg)
1950 CPUClass *cc = CPU_GET_CLASS(cpu);
1951 CPUArchState *env = cpu->env_ptr;
1952 GDBRegisterState *r;
1954 if (reg < cc->gdb_num_core_regs) {
1955 return cpu_gdb_read_register(env, mem_buf, reg);
1958 for (r = cpu->gdb_regs; r; r = r->next) {
1959 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1960 return r->get_reg(env, mem_buf, reg - r->base_reg);
1966 static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
1968 CPUClass *cc = CPU_GET_CLASS(cpu);
1969 CPUArchState *env = cpu->env_ptr;
1970 GDBRegisterState *r;
1972 if (reg < cc->gdb_num_core_regs) {
1973 return cpu_gdb_write_register(env, mem_buf, reg);
1976 for (r = cpu->gdb_regs; r; r = r->next) {
1977 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1978 return r->set_reg(env, mem_buf, reg - r->base_reg);
1984 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1985 specifies the first register number and these registers are included in
1986 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1987 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1990 void gdb_register_coprocessor(CPUState *cpu,
1991 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1992 int num_regs, const char *xml, int g_pos)
1994 GDBRegisterState *s;
1995 GDBRegisterState **p;
1999 /* Check for duplicates. */
2000 if (strcmp((*p)->xml, xml) == 0)
2005 s = g_new0(GDBRegisterState, 1);
2006 s->base_reg = cpu->gdb_num_regs;
2007 s->num_regs = num_regs;
2008 s->get_reg = get_reg;
2009 s->set_reg = set_reg;
2012 /* Add to end of list. */
2013 cpu->gdb_num_regs += num_regs;
2016 if (g_pos != s->base_reg) {
2017 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
2018 "Expected %d got %d\n", xml, g_pos, s->base_reg);
2023 #ifndef CONFIG_USER_ONLY
2024 static const int xlat_gdb_type[] = {
2025 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
2026 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
2027 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
2031 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
2037 if (kvm_enabled()) {
2038 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
2042 case GDB_BREAKPOINT_SW:
2043 case GDB_BREAKPOINT_HW:
2044 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2046 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
2051 #ifndef CONFIG_USER_ONLY
2052 case GDB_WATCHPOINT_WRITE:
2053 case GDB_WATCHPOINT_READ:
2054 case GDB_WATCHPOINT_ACCESS:
2055 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2057 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
2069 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
2075 if (kvm_enabled()) {
2076 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
2080 case GDB_BREAKPOINT_SW:
2081 case GDB_BREAKPOINT_HW:
2082 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2084 err = cpu_breakpoint_remove(env, addr, BP_GDB);
2089 #ifndef CONFIG_USER_ONLY
2090 case GDB_WATCHPOINT_WRITE:
2091 case GDB_WATCHPOINT_READ:
2092 case GDB_WATCHPOINT_ACCESS:
2093 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2095 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
2106 static void gdb_breakpoint_remove_all(void)
2111 if (kvm_enabled()) {
2112 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
2116 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2118 cpu_breakpoint_remove_all(env, BP_GDB);
2119 #ifndef CONFIG_USER_ONLY
2120 cpu_watchpoint_remove_all(env, BP_GDB);
2125 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2127 CPUState *cpu = s->c_cpu;
2128 CPUClass *cc = CPU_GET_CLASS(cpu);
2130 cpu_synchronize_state(cpu);
2132 cc->set_pc(cpu, pc);
2136 static CPUState *find_cpu(uint32_t thread_id)
2140 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2141 if (cpu_index(cpu) == thread_id) {
2149 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2154 int ch, reg_size, type, res;
2155 char buf[MAX_PACKET_LENGTH];
2156 uint8_t mem_buf[MAX_PACKET_LENGTH];
2158 target_ulong addr, len;
2161 printf("command='%s'\n", line_buf);
2167 /* TODO: Make this return the correct value for user-mode. */
2168 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2169 cpu_index(s->c_cpu));
2171 /* Remove all the breakpoints when this query is issued,
2172 * because gdb is doing and initial connect and the state
2173 * should be cleaned up.
2175 gdb_breakpoint_remove_all();
2179 addr = strtoull(p, (char **)&p, 16);
2180 gdb_set_cpu_pc(s, addr);
2186 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2187 if (s->signal == -1)
2192 if (strncmp(p, "Cont", 4) == 0) {
2193 int res_signal, res_thread;
2197 put_packet(s, "vCont;c;C;s;S");
2212 if (action == 'C' || action == 'S') {
2213 signal = strtoul(p, (char **)&p, 16);
2214 } else if (action != 'c' && action != 's') {
2220 thread = strtoull(p+1, (char **)&p, 16);
2222 action = tolower(action);
2223 if (res == 0 || (res == 'c' && action == 's')) {
2225 res_signal = signal;
2226 res_thread = thread;
2230 if (res_thread != -1 && res_thread != 0) {
2231 cpu = find_cpu(res_thread);
2233 put_packet(s, "E22");
2239 cpu_single_step(s->c_cpu, sstep_flags);
2241 s->signal = res_signal;
2247 goto unknown_command;
2250 #ifdef CONFIG_USER_ONLY
2251 /* Kill the target */
2252 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2257 gdb_breakpoint_remove_all();
2258 gdb_syscall_mode = GDB_SYS_DISABLED;
2260 put_packet(s, "OK");
2264 addr = strtoull(p, (char **)&p, 16);
2265 gdb_set_cpu_pc(s, addr);
2267 cpu_single_step(s->c_cpu, sstep_flags);
2275 ret = strtoull(p, (char **)&p, 16);
2278 err = strtoull(p, (char **)&p, 16);
2285 if (s->current_syscall_cb) {
2286 s->current_syscall_cb(s->c_cpu, ret, err);
2287 s->current_syscall_cb = NULL;
2290 put_packet(s, "T02");
2297 cpu_synchronize_state(s->g_cpu);
2299 for (addr = 0; addr < s->g_cpu->gdb_num_regs; addr++) {
2300 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2303 memtohex(buf, mem_buf, len);
2307 cpu_synchronize_state(s->g_cpu);
2308 registers = mem_buf;
2309 len = strlen(p) / 2;
2310 hextomem((uint8_t *)registers, p, len);
2311 for (addr = 0; addr < s->g_cpu->gdb_num_regs && len > 0; addr++) {
2312 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2314 registers += reg_size;
2316 put_packet(s, "OK");
2319 addr = strtoull(p, (char **)&p, 16);
2322 len = strtoull(p, NULL, 16);
2323 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, false) != 0) {
2324 put_packet (s, "E14");
2326 memtohex(buf, mem_buf, len);
2331 addr = strtoull(p, (char **)&p, 16);
2334 len = strtoull(p, (char **)&p, 16);
2337 hextomem(mem_buf, p, len);
2338 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len,
2340 put_packet(s, "E14");
2342 put_packet(s, "OK");
2346 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2347 This works, but can be very slow. Anything new enough to
2348 understand XML also knows how to use this properly. */
2350 goto unknown_command;
2351 addr = strtoull(p, (char **)&p, 16);
2352 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2354 memtohex(buf, mem_buf, reg_size);
2357 put_packet(s, "E14");
2362 goto unknown_command;
2363 addr = strtoull(p, (char **)&p, 16);
2366 reg_size = strlen(p) / 2;
2367 hextomem(mem_buf, p, reg_size);
2368 gdb_write_register(s->g_cpu, mem_buf, addr);
2369 put_packet(s, "OK");
2373 type = strtoul(p, (char **)&p, 16);
2376 addr = strtoull(p, (char **)&p, 16);
2379 len = strtoull(p, (char **)&p, 16);
2381 res = gdb_breakpoint_insert(addr, len, type);
2383 res = gdb_breakpoint_remove(addr, len, type);
2385 put_packet(s, "OK");
2386 else if (res == -ENOSYS)
2389 put_packet(s, "E22");
2393 thread = strtoull(p, (char **)&p, 16);
2394 if (thread == -1 || thread == 0) {
2395 put_packet(s, "OK");
2398 cpu = find_cpu(thread);
2400 put_packet(s, "E22");
2406 put_packet(s, "OK");
2410 put_packet(s, "OK");
2413 put_packet(s, "E22");
2418 thread = strtoull(p, (char **)&p, 16);
2419 cpu = find_cpu(thread);
2422 put_packet(s, "OK");
2424 put_packet(s, "E22");
2429 /* parse any 'q' packets here */
2430 if (!strcmp(p,"qemu.sstepbits")) {
2431 /* Query Breakpoint bit definitions */
2432 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2438 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2439 /* Display or change the sstep_flags */
2442 /* Display current setting */
2443 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2448 type = strtoul(p, (char **)&p, 16);
2450 put_packet(s, "OK");
2452 } else if (strcmp(p,"C") == 0) {
2453 /* "Current thread" remains vague in the spec, so always return
2454 * the first CPU (gdb returns the first thread). */
2455 put_packet(s, "QC1");
2457 } else if (strcmp(p,"fThreadInfo") == 0) {
2458 s->query_cpu = first_cpu;
2459 goto report_cpuinfo;
2460 } else if (strcmp(p,"sThreadInfo") == 0) {
2463 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2465 s->query_cpu = s->query_cpu->next_cpu;
2469 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2470 thread = strtoull(p+16, (char **)&p, 16);
2471 cpu = find_cpu(thread);
2473 cpu_synchronize_state(cpu);
2474 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2475 "CPU#%d [%s]", cpu->cpu_index,
2476 cpu->halted ? "halted " : "running");
2477 memtohex(buf, mem_buf, len);
2482 #ifdef CONFIG_USER_ONLY
2483 else if (strncmp(p, "Offsets", 7) == 0) {
2484 CPUArchState *env = s->c_cpu->env_ptr;
2485 TaskState *ts = env->opaque;
2487 snprintf(buf, sizeof(buf),
2488 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2489 ";Bss=" TARGET_ABI_FMT_lx,
2490 ts->info->code_offset,
2491 ts->info->data_offset,
2492 ts->info->data_offset);
2496 #else /* !CONFIG_USER_ONLY */
2497 else if (strncmp(p, "Rcmd,", 5) == 0) {
2498 int len = strlen(p + 5);
2500 if ((len % 2) != 0) {
2501 put_packet(s, "E01");
2504 hextomem(mem_buf, p + 5, len);
2507 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2508 put_packet(s, "OK");
2511 #endif /* !CONFIG_USER_ONLY */
2512 if (strncmp(p, "Supported", 9) == 0) {
2513 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2515 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2521 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2523 target_ulong total_len;
2527 xml = get_feature_xml(p, &p);
2529 snprintf(buf, sizeof(buf), "E00");
2536 addr = strtoul(p, (char **)&p, 16);
2539 len = strtoul(p, (char **)&p, 16);
2541 total_len = strlen(xml);
2542 if (addr > total_len) {
2543 snprintf(buf, sizeof(buf), "E00");
2547 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2548 len = (MAX_PACKET_LENGTH - 5) / 2;
2549 if (len < total_len - addr) {
2551 len = memtox(buf + 1, xml + addr, len);
2554 len = memtox(buf + 1, xml + addr, total_len - addr);
2556 put_packet_binary(s, buf, len + 1);
2560 /* Unrecognised 'q' command. */
2561 goto unknown_command;
2565 /* put empty packet */
2573 void gdb_set_stop_cpu(CPUState *cpu)
2575 gdbserver_state->c_cpu = cpu;
2576 gdbserver_state->g_cpu = cpu;
2579 #ifndef CONFIG_USER_ONLY
2580 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2582 GDBState *s = gdbserver_state;
2583 CPUArchState *env = s->c_cpu->env_ptr;
2584 CPUState *cpu = s->c_cpu;
2589 if (running || s->state == RS_INACTIVE) {
2592 /* Is there a GDB syscall waiting to be sent? */
2593 if (s->current_syscall_cb) {
2594 put_packet(s, s->syscall_buf);
2598 case RUN_STATE_DEBUG:
2599 if (env->watchpoint_hit) {
2600 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2611 snprintf(buf, sizeof(buf),
2612 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2613 GDB_SIGNAL_TRAP, cpu_index(cpu), type,
2614 env->watchpoint_hit->vaddr);
2615 env->watchpoint_hit = NULL;
2619 ret = GDB_SIGNAL_TRAP;
2621 case RUN_STATE_PAUSED:
2622 ret = GDB_SIGNAL_INT;
2624 case RUN_STATE_SHUTDOWN:
2625 ret = GDB_SIGNAL_QUIT;
2627 case RUN_STATE_IO_ERROR:
2628 ret = GDB_SIGNAL_IO;
2630 case RUN_STATE_WATCHDOG:
2631 ret = GDB_SIGNAL_ALRM;
2633 case RUN_STATE_INTERNAL_ERROR:
2634 ret = GDB_SIGNAL_ABRT;
2636 case RUN_STATE_SAVE_VM:
2637 case RUN_STATE_RESTORE_VM:
2639 case RUN_STATE_FINISH_MIGRATE:
2640 ret = GDB_SIGNAL_XCPU;
2643 ret = GDB_SIGNAL_UNKNOWN;
2646 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(cpu));
2651 /* disable single step if it was enabled */
2652 cpu_single_step(cpu, 0);
2656 /* Send a gdb syscall request.
2657 This accepts limited printf-style format specifiers, specifically:
2658 %x - target_ulong argument printed in hex.
2659 %lx - 64-bit argument printed in hex.
2660 %s - string pointer (target_ulong) and length (int) pair. */
2661 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2670 s = gdbserver_state;
2673 s->current_syscall_cb = cb;
2674 #ifndef CONFIG_USER_ONLY
2675 vm_stop(RUN_STATE_DEBUG);
2679 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2686 addr = va_arg(va, target_ulong);
2687 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2690 if (*(fmt++) != 'x')
2692 i64 = va_arg(va, uint64_t);
2693 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2696 addr = va_arg(va, target_ulong);
2697 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2698 addr, va_arg(va, int));
2702 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2712 #ifdef CONFIG_USER_ONLY
2713 put_packet(s, s->syscall_buf);
2714 gdb_handlesig(s->c_cpu, 0);
2716 /* In this case wait to send the syscall packet until notification that
2717 the CPU has stopped. This must be done because if the packet is sent
2718 now the reply from the syscall request could be received while the CPU
2719 is still in the running state, which can cause packets to be dropped
2720 and state transition 'T' packets to be sent while the syscall is still
2726 static void gdb_read_byte(GDBState *s, int ch)
2731 #ifndef CONFIG_USER_ONLY
2732 if (s->last_packet_len) {
2733 /* Waiting for a response to the last packet. If we see the start
2734 of a new command then abandon the previous response. */
2737 printf("Got NACK, retransmitting\n");
2739 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2743 printf("Got ACK\n");
2745 printf("Got '%c' when expecting ACK/NACK\n", ch);
2747 if (ch == '+' || ch == '$')
2748 s->last_packet_len = 0;
2752 if (runstate_is_running()) {
2753 /* when the CPU is running, we cannot do anything except stop
2754 it when receiving a char */
2755 vm_stop(RUN_STATE_PAUSED);
2762 s->line_buf_index = 0;
2763 s->state = RS_GETLINE;
2768 s->state = RS_CHKSUM1;
2769 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2772 s->line_buf[s->line_buf_index++] = ch;
2776 s->line_buf[s->line_buf_index] = '\0';
2777 s->line_csum = fromhex(ch) << 4;
2778 s->state = RS_CHKSUM2;
2781 s->line_csum |= fromhex(ch);
2783 for(i = 0; i < s->line_buf_index; i++) {
2784 csum += s->line_buf[i];
2786 if (s->line_csum != (csum & 0xff)) {
2788 put_buffer(s, &reply, 1);
2792 put_buffer(s, &reply, 1);
2793 s->state = gdb_handle_packet(s, s->line_buf);
2802 /* Tell the remote gdb that the process has exited. */
2803 void gdb_exit(CPUArchState *env, int code)
2808 s = gdbserver_state;
2812 #ifdef CONFIG_USER_ONLY
2813 if (gdbserver_fd < 0 || s->fd < 0) {
2818 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2821 #ifndef CONFIG_USER_ONLY
2823 qemu_chr_delete(s->chr);
2828 #ifdef CONFIG_USER_ONLY
2834 s = gdbserver_state;
2836 if (gdbserver_fd < 0 || s->fd < 0)
2843 gdb_handlesig(CPUState *cpu, int sig)
2845 CPUArchState *env = cpu->env_ptr;
2850 s = gdbserver_state;
2851 if (gdbserver_fd < 0 || s->fd < 0) {
2855 /* disable single step if it was enabled */
2856 cpu_single_step(cpu, 0);
2860 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig));
2863 /* put_packet() might have detected that the peer terminated the
2871 s->running_state = 0;
2872 while (s->running_state == 0) {
2873 n = read(s->fd, buf, 256);
2877 for (i = 0; i < n; i++) {
2878 gdb_read_byte(s, buf[i]);
2880 } else if (n == 0 || errno != EAGAIN) {
2881 /* XXX: Connection closed. Should probably wait for another
2882 connection before continuing. */
2891 /* Tell the remote gdb that the process has exited due to SIG. */
2892 void gdb_signalled(CPUArchState *env, int sig)
2897 s = gdbserver_state;
2898 if (gdbserver_fd < 0 || s->fd < 0) {
2902 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb(sig));
2906 static void gdb_accept(void)
2909 struct sockaddr_in sockaddr;
2914 len = sizeof(sockaddr);
2915 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2916 if (fd < 0 && errno != EINTR) {
2919 } else if (fd >= 0) {
2921 fcntl(fd, F_SETFD, FD_CLOEXEC);
2927 /* set short latency */
2928 socket_set_nodelay(fd);
2930 s = g_malloc0(sizeof(GDBState));
2931 s->c_cpu = first_cpu;
2932 s->g_cpu = first_cpu;
2936 gdbserver_state = s;
2938 fcntl(fd, F_SETFL, O_NONBLOCK);
2941 static int gdbserver_open(int port)
2943 struct sockaddr_in sockaddr;
2946 fd = socket(PF_INET, SOCK_STREAM, 0);
2952 fcntl(fd, F_SETFD, FD_CLOEXEC);
2955 /* allow fast reuse */
2957 qemu_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
2959 sockaddr.sin_family = AF_INET;
2960 sockaddr.sin_port = htons(port);
2961 sockaddr.sin_addr.s_addr = 0;
2962 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2968 ret = listen(fd, 0);
2977 int gdbserver_start(int port)
2979 gdbserver_fd = gdbserver_open(port);
2980 if (gdbserver_fd < 0)
2982 /* accept connections */
2987 /* Disable gdb stub for child processes. */
2988 void gdbserver_fork(CPUArchState *env)
2990 GDBState *s = gdbserver_state;
2991 if (gdbserver_fd < 0 || s->fd < 0)
2995 cpu_breakpoint_remove_all(env, BP_GDB);
2996 cpu_watchpoint_remove_all(env, BP_GDB);
2999 static int gdb_chr_can_receive(void *opaque)
3001 /* We can handle an arbitrarily large amount of data.
3002 Pick the maximum packet size, which is as good as anything. */
3003 return MAX_PACKET_LENGTH;
3006 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
3010 for (i = 0; i < size; i++) {
3011 gdb_read_byte(gdbserver_state, buf[i]);
3015 static void gdb_chr_event(void *opaque, int event)
3018 case CHR_EVENT_OPENED:
3019 vm_stop(RUN_STATE_PAUSED);
3027 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
3029 char buf[MAX_PACKET_LENGTH];
3032 if (len > (MAX_PACKET_LENGTH/2) - 1)
3033 len = (MAX_PACKET_LENGTH/2) - 1;
3034 memtohex(buf + 1, (uint8_t *)msg, len);
3038 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
3040 const char *p = (const char *)buf;
3043 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
3045 if (len <= max_sz) {
3046 gdb_monitor_output(gdbserver_state, p, len);
3049 gdb_monitor_output(gdbserver_state, p, max_sz);
3057 static void gdb_sigterm_handler(int signal)
3059 if (runstate_is_running()) {
3060 vm_stop(RUN_STATE_PAUSED);
3065 int gdbserver_start(const char *device)
3068 char gdbstub_device_name[128];
3069 CharDriverState *chr = NULL;
3070 CharDriverState *mon_chr;
3074 if (strcmp(device, "none") != 0) {
3075 if (strstart(device, "tcp:", NULL)) {
3076 /* enforce required TCP attributes */
3077 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
3078 "%s,nowait,nodelay,server", device);
3079 device = gdbstub_device_name;
3082 else if (strcmp(device, "stdio") == 0) {
3083 struct sigaction act;
3085 memset(&act, 0, sizeof(act));
3086 act.sa_handler = gdb_sigterm_handler;
3087 sigaction(SIGINT, &act, NULL);
3090 chr = qemu_chr_new("gdb", device, NULL);
3094 qemu_chr_fe_claim_no_fail(chr);
3095 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3096 gdb_chr_event, NULL);
3099 s = gdbserver_state;
3101 s = g_malloc0(sizeof(GDBState));
3102 gdbserver_state = s;
3104 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3106 /* Initialize a monitor terminal for gdb */
3107 mon_chr = g_malloc0(sizeof(*mon_chr));
3108 mon_chr->chr_write = gdb_monitor_write;
3109 monitor_init(mon_chr, 0);
3112 qemu_chr_delete(s->chr);
3113 mon_chr = s->mon_chr;
3114 memset(s, 0, sizeof(GDBState));
3116 s->c_cpu = first_cpu;
3117 s->g_cpu = first_cpu;
3119 s->state = chr ? RS_IDLE : RS_INACTIVE;
3120 s->mon_chr = mon_chr;
3121 s->current_syscall_cb = NULL;