4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
32 #include "monitor/monitor.h"
33 #include "sysemu/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
38 #define MAX_PACKET_LENGTH 4096
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
45 static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
46 uint8_t *buf, int len, bool is_write)
48 CPUClass *cc = CPU_GET_CLASS(cpu);
50 if (cc->memory_rw_debug) {
51 return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
53 return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
65 GDB_SIGNAL_UNKNOWN = 143
68 #ifdef CONFIG_USER_ONLY
70 /* Map target signal numbers to GDB protocol signal numbers and vice
71 * versa. For user emulation's currently supported systems, we can
72 * assume most signals are defined.
75 static int gdb_signal_table[] = {
235 /* In system mode we only need SIGINT and SIGTRAP; other signals
236 are not yet supported. */
243 static int gdb_signal_table[] = {
253 #ifdef CONFIG_USER_ONLY
254 static int target_signal_to_gdb (int sig)
257 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
258 if (gdb_signal_table[i] == sig)
260 return GDB_SIGNAL_UNKNOWN;
264 static int gdb_signal_to_target (int sig)
266 if (sig < ARRAY_SIZE (gdb_signal_table))
267 return gdb_signal_table[sig];
274 typedef struct GDBRegisterState {
280 struct GDBRegisterState *next;
290 typedef struct GDBState {
291 CPUState *c_cpu; /* current CPU for step/continue ops */
292 CPUState *g_cpu; /* current CPU for other ops */
293 CPUState *query_cpu; /* for q{f|s}ThreadInfo */
294 enum RSState state; /* parsing state */
295 char line_buf[MAX_PACKET_LENGTH];
298 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
301 #ifdef CONFIG_USER_ONLY
305 CharDriverState *chr;
306 CharDriverState *mon_chr;
308 char syscall_buf[256];
309 gdb_syscall_complete_cb current_syscall_cb;
312 /* By default use no IRQs and no timers while single stepping so as to
313 * make single stepping like an ICE HW step.
315 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
317 static GDBState *gdbserver_state;
319 /* This is an ugly hack to cope with both new and old gdb.
320 If gdb sends qXfer:features:read then assume we're talking to a newish
321 gdb that understands target descriptions. */
322 static int gdb_has_xml;
324 #ifdef CONFIG_USER_ONLY
325 /* XXX: This is not thread safe. Do we care? */
326 static int gdbserver_fd = -1;
328 static int get_char(GDBState *s)
334 ret = qemu_recv(s->fd, &ch, 1, 0);
336 if (errno == ECONNRESET)
338 if (errno != EINTR && errno != EAGAIN)
340 } else if (ret == 0) {
358 /* If gdb is connected when the first semihosting syscall occurs then use
359 remote gdb syscalls. Otherwise use native file IO. */
360 int use_gdb_syscalls(void)
362 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
363 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
366 return gdb_syscall_mode == GDB_SYS_ENABLED;
369 /* Resume execution. */
370 static inline void gdb_continue(GDBState *s)
372 #ifdef CONFIG_USER_ONLY
373 s->running_state = 1;
375 if (runstate_check(RUN_STATE_GUEST_PANICKED)) {
376 runstate_set(RUN_STATE_DEBUG);
378 if (!runstate_needs_reset()) {
384 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
386 #ifdef CONFIG_USER_ONLY
390 ret = send(s->fd, buf, len, 0);
392 if (errno != EINTR && errno != EAGAIN)
400 qemu_chr_fe_write(s->chr, buf, len);
404 static inline int fromhex(int v)
406 if (v >= '0' && v <= '9')
408 else if (v >= 'A' && v <= 'F')
410 else if (v >= 'a' && v <= 'f')
416 static inline int tohex(int v)
424 static void memtohex(char *buf, const uint8_t *mem, int len)
429 for(i = 0; i < len; i++) {
431 *q++ = tohex(c >> 4);
432 *q++ = tohex(c & 0xf);
437 static void hextomem(uint8_t *mem, const char *buf, int len)
441 for(i = 0; i < len; i++) {
442 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
447 /* return -1 if error, 0 if OK */
448 static int put_packet_binary(GDBState *s, const char *buf, int len)
459 for(i = 0; i < len; i++) {
463 *(p++) = tohex((csum >> 4) & 0xf);
464 *(p++) = tohex((csum) & 0xf);
466 s->last_packet_len = p - s->last_packet;
467 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
469 #ifdef CONFIG_USER_ONLY
482 /* return -1 if error, 0 if OK */
483 static int put_packet(GDBState *s, const char *buf)
486 printf("reply='%s'\n", buf);
489 return put_packet_binary(s, buf, strlen(buf));
492 /* The GDB remote protocol transfers values in target byte order. This means
493 we can use the raw memory access routines to access the value buffer.
494 Conveniently, these also handle the case where the buffer is mis-aligned.
496 #define GET_REG8(val) do { \
497 stb_p(mem_buf, val); \
500 #define GET_REG16(val) do { \
501 stw_p(mem_buf, val); \
504 #define GET_REG32(val) do { \
505 stl_p(mem_buf, val); \
508 #define GET_REG64(val) do { \
509 stq_p(mem_buf, val); \
513 #if TARGET_LONG_BITS == 64
514 #define GET_REGL(val) GET_REG64(val)
515 #define ldtul_p(addr) ldq_p(addr)
517 #define GET_REGL(val) GET_REG32(val)
518 #define ldtul_p(addr) ldl_p(addr)
521 #if defined(TARGET_I386)
524 static const int gpr_map[16] = {
525 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
526 8, 9, 10, 11, 12, 13, 14, 15
529 #define gpr_map gpr_map32
531 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
533 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
535 #define IDX_IP_REG CPU_NB_REGS
536 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
537 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
538 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
539 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
540 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
542 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
544 if (n < CPU_NB_REGS) {
545 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
546 GET_REG64(env->regs[gpr_map[n]]);
547 } else if (n < CPU_NB_REGS32) {
548 GET_REG32(env->regs[gpr_map32[n]]);
550 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
551 #ifdef USE_X86LDOUBLE
552 /* FIXME: byteswap float values - after fixing fpregs layout. */
553 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
555 memset(mem_buf, 0, 10);
558 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
560 if (n < CPU_NB_REGS32 ||
561 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
562 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
563 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
569 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
575 GET_REG32(env->eflags);
578 GET_REG32(env->segs[R_CS].selector);
579 case IDX_SEG_REGS + 1:
580 GET_REG32(env->segs[R_SS].selector);
581 case IDX_SEG_REGS + 2:
582 GET_REG32(env->segs[R_DS].selector);
583 case IDX_SEG_REGS + 3:
584 GET_REG32(env->segs[R_ES].selector);
585 case IDX_SEG_REGS + 4:
586 GET_REG32(env->segs[R_FS].selector);
587 case IDX_SEG_REGS + 5:
588 GET_REG32(env->segs[R_GS].selector);
590 case IDX_FP_REGS + 8:
591 GET_REG32(env->fpuc);
592 case IDX_FP_REGS + 9:
593 GET_REG32((env->fpus & ~0x3800) |
594 (env->fpstt & 0x7) << 11);
595 case IDX_FP_REGS + 10:
596 GET_REG32(0); /* ftag */
597 case IDX_FP_REGS + 11:
598 GET_REG32(0); /* fiseg */
599 case IDX_FP_REGS + 12:
600 GET_REG32(0); /* fioff */
601 case IDX_FP_REGS + 13:
602 GET_REG32(0); /* foseg */
603 case IDX_FP_REGS + 14:
604 GET_REG32(0); /* fooff */
605 case IDX_FP_REGS + 15:
606 GET_REG32(0); /* fop */
609 GET_REG32(env->mxcsr);
615 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
617 uint16_t selector = ldl_p(mem_buf);
619 if (selector != env->segs[sreg].selector) {
620 #if defined(CONFIG_USER_ONLY)
621 cpu_x86_load_seg(env, sreg, selector);
623 unsigned int limit, flags;
626 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
627 base = selector << 4;
631 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
636 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
642 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
646 if (n < CPU_NB_REGS) {
647 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
648 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
649 return sizeof(target_ulong);
650 } else if (n < CPU_NB_REGS32) {
652 env->regs[n] &= ~0xffffffffUL;
653 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
656 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
657 #ifdef USE_X86LDOUBLE
658 /* FIXME: byteswap float values - after fixing fpregs layout. */
659 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
662 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
664 if (n < CPU_NB_REGS32 ||
665 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
666 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
667 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
673 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
674 env->eip = ldq_p(mem_buf);
677 env->eip &= ~0xffffffffUL;
678 env->eip |= (uint32_t)ldl_p(mem_buf);
682 env->eflags = ldl_p(mem_buf);
686 return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
687 case IDX_SEG_REGS + 1:
688 return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
689 case IDX_SEG_REGS + 2:
690 return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
691 case IDX_SEG_REGS + 3:
692 return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
693 case IDX_SEG_REGS + 4:
694 return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
695 case IDX_SEG_REGS + 5:
696 return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
698 case IDX_FP_REGS + 8:
699 env->fpuc = ldl_p(mem_buf);
701 case IDX_FP_REGS + 9:
702 tmp = ldl_p(mem_buf);
703 env->fpstt = (tmp >> 11) & 7;
704 env->fpus = tmp & ~0x3800;
706 case IDX_FP_REGS + 10: /* ftag */
708 case IDX_FP_REGS + 11: /* fiseg */
710 case IDX_FP_REGS + 12: /* fioff */
712 case IDX_FP_REGS + 13: /* foseg */
714 case IDX_FP_REGS + 14: /* fooff */
716 case IDX_FP_REGS + 15: /* fop */
720 env->mxcsr = ldl_p(mem_buf);
724 /* Unrecognised register. */
728 #elif defined (TARGET_PPC)
730 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
731 expects whatever the target description contains. Due to a
732 historical mishap the FP registers appear in between core integer
733 regs and PC, MSR, CR, and so forth. We hack round this by giving the
734 FP regs zero size when talking to a newer gdb. */
735 #define NUM_CORE_REGS 71
736 #if defined (TARGET_PPC64)
737 #define GDB_CORE_XML "power64-core.xml"
739 #define GDB_CORE_XML "power-core.xml"
742 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
746 GET_REGL(env->gpr[n]);
752 stfq_p(mem_buf, env->fpr[n-32]);
764 for (i = 0; i < 8; i++) {
765 cr |= env->crf[i] << (32 - ((i + 1) * 4));
780 GET_REG32(env->fpscr);
787 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
791 env->gpr[n] = ldtul_p(mem_buf);
792 return sizeof(target_ulong);
798 env->fpr[n-32] = ldfq_p(mem_buf);
803 env->nip = ldtul_p(mem_buf);
804 return sizeof(target_ulong);
806 ppc_store_msr(env, ldtul_p(mem_buf));
807 return sizeof(target_ulong);
810 uint32_t cr = ldl_p(mem_buf);
812 for (i = 0; i < 8; i++) {
813 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
818 env->lr = ldtul_p(mem_buf);
819 return sizeof(target_ulong);
821 env->ctr = ldtul_p(mem_buf);
822 return sizeof(target_ulong);
824 env->xer = ldtul_p(mem_buf);
825 return sizeof(target_ulong);
831 store_fpscr(env, ldtul_p(mem_buf), 0xffffffff);
832 return sizeof(target_ulong);
838 #elif defined (TARGET_SPARC)
840 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
841 #define NUM_CORE_REGS 86
843 #define NUM_CORE_REGS 72
847 #define GET_REGA(val) GET_REG32(val)
849 #define GET_REGA(val) GET_REGL(val)
852 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
856 GET_REGA(env->gregs[n]);
859 /* register window */
860 GET_REGA(env->regwptr[n - 8]);
862 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
866 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
868 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
871 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
876 GET_REGA(cpu_get_psr(env));
888 GET_REGA(0); /* csr */
896 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
898 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
902 /* f32-f62 (double width, even numbers only) */
903 GET_REG64(env->fpr[(n - 32) / 2].ll);
911 GET_REGL((cpu_get_ccr(env) << 32) |
912 ((env->asi & 0xff) << 24) |
913 ((env->pstate & 0xfff) << 8) |
926 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
928 #if defined(TARGET_ABI32)
931 tmp = ldl_p(mem_buf);
935 tmp = ldtul_p(mem_buf);
942 /* register window */
943 env->regwptr[n - 8] = tmp;
945 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
950 env->fpr[(n - 32) / 2].l.lower = tmp;
952 env->fpr[(n - 32) / 2].l.upper = tmp;
955 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
961 cpu_put_psr(env, tmp);
986 tmp = ldl_p(mem_buf);
988 env->fpr[(n - 32) / 2].l.lower = tmp;
990 env->fpr[(n - 32) / 2].l.upper = tmp;
994 /* f32-f62 (double width, even numbers only) */
995 env->fpr[(n - 32) / 2].ll = tmp;
1005 cpu_put_ccr(env, tmp >> 32);
1006 env->asi = (tmp >> 24) & 0xff;
1007 env->pstate = (tmp >> 8) & 0xfff;
1008 cpu_put_cwp64(env, tmp & 0xff);
1026 #elif defined (TARGET_ARM)
1028 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
1029 whatever the target description contains. Due to a historical mishap
1030 the FPA registers appear in between core integer regs and the CPSR.
1031 We hack round this by giving the FPA regs zero size when talking to a
1033 #define NUM_CORE_REGS 26
1034 #define GDB_CORE_XML "arm-core.xml"
1036 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
1039 /* Core integer register. */
1040 GET_REG32(env->regs[n]);
1043 /* FPA registers. */
1047 memset(mem_buf, 0, 12);
1052 /* FPA status register. */
1059 GET_REG32(cpsr_read(env));
1061 /* Unknown register. */
1065 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
1069 tmp = ldl_p(mem_buf);
1071 /* Mask out low bit of PC to workaround gdb bugs. This will probably
1072 cause problems if we ever implement the Jazelle DBX extensions. */
1078 /* Core integer register. */
1082 if (n < 24) { /* 16-23 */
1083 /* FPA registers (ignored). */
1091 /* FPA status register (ignored). */
1098 cpsr_write(env, tmp, 0xffffffff);
1101 /* Unknown register. */
1105 #elif defined (TARGET_M68K)
1107 #define NUM_CORE_REGS 18
1109 #define GDB_CORE_XML "cf-core.xml"
1111 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1115 GET_REG32(env->dregs[n]);
1116 } else if (n < 16) {
1118 GET_REG32(env->aregs[n - 8]);
1127 /* FP registers not included here because they vary between
1128 ColdFire and m68k. Use XML bits for these. */
1132 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1136 tmp = ldl_p(mem_buf);
1140 env->dregs[n] = tmp;
1141 } else if (n < 16) {
1143 env->aregs[n - 8] = tmp;
1158 #elif defined (TARGET_MIPS)
1160 #define NUM_CORE_REGS 73
1162 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1165 GET_REGL(env->active_tc.gpr[n]);
1167 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1168 if (n >= 38 && n < 70) {
1169 if (env->CP0_Status & (1 << CP0St_FR)) {
1170 GET_REGL(env->active_fpu.fpr[n - 38].d);
1172 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1177 GET_REGL((int32_t)env->active_fpu.fcr31);
1179 GET_REGL((int32_t)env->active_fpu.fcr0);
1184 GET_REGL((int32_t)env->CP0_Status);
1186 GET_REGL(env->active_tc.LO[0]);
1188 GET_REGL(env->active_tc.HI[0]);
1190 GET_REGL(env->CP0_BadVAddr);
1192 GET_REGL((int32_t)env->CP0_Cause);
1194 GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1196 GET_REGL(0); /* fp */
1198 GET_REGL((int32_t)env->CP0_PRid);
1200 if (n >= 73 && n <= 88) {
1201 /* 16 embedded regs. */
1208 /* convert MIPS rounding mode in FCR31 to IEEE library */
1209 static unsigned int ieee_rm[] = {
1210 float_round_nearest_even,
1211 float_round_to_zero,
1215 #define RESTORE_ROUNDING_MODE \
1216 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], \
1217 &env->active_fpu.fp_status)
1219 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1223 tmp = ldtul_p(mem_buf);
1226 env->active_tc.gpr[n] = tmp;
1227 return sizeof(target_ulong);
1229 if (env->CP0_Config1 & (1 << CP0C1_FP)
1230 && n >= 38 && n < 73) {
1232 if (env->CP0_Status & (1 << CP0St_FR)) {
1233 env->active_fpu.fpr[n - 38].d = tmp;
1235 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1240 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1241 /* set rounding mode */
1242 RESTORE_ROUNDING_MODE;
1245 env->active_fpu.fcr0 = tmp;
1248 return sizeof(target_ulong);
1252 env->CP0_Status = tmp;
1255 env->active_tc.LO[0] = tmp;
1258 env->active_tc.HI[0] = tmp;
1261 env->CP0_BadVAddr = tmp;
1264 env->CP0_Cause = tmp;
1267 env->active_tc.PC = tmp & ~(target_ulong)1;
1269 env->hflags |= MIPS_HFLAG_M16;
1271 env->hflags &= ~(MIPS_HFLAG_M16);
1274 case 72: /* fp, ignored */
1280 /* Other registers are readonly. Ignore writes. */
1284 return sizeof(target_ulong);
1286 #elif defined(TARGET_OPENRISC)
1288 #define NUM_CORE_REGS (32 + 3)
1290 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1293 GET_REG32(env->gpr[n]);
1297 GET_REG32(env->ppc);
1301 GET_REG32(env->npc);
1315 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1316 uint8_t *mem_buf, int n)
1320 if (n > NUM_CORE_REGS) {
1324 tmp = ldl_p(mem_buf);
1348 #elif defined (TARGET_SH4)
1350 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1351 /* FIXME: We should use XML for this. */
1353 #define NUM_CORE_REGS 59
1355 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1359 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1360 GET_REGL(env->gregs[n + 16]);
1362 GET_REGL(env->gregs[n]);
1365 GET_REGL(env->gregs[n]);
1375 GET_REGL(env->mach);
1377 GET_REGL(env->macl);
1381 GET_REGL(env->fpul);
1383 GET_REGL(env->fpscr);
1385 if (env->fpscr & FPSCR_FR) {
1386 stfl_p(mem_buf, env->fregs[n - 9]);
1388 stfl_p(mem_buf, env->fregs[n - 25]);
1396 GET_REGL(env->gregs[n - 43]);
1398 GET_REGL(env->gregs[n - (51 - 16)]);
1404 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1408 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1409 env->gregs[n + 16] = ldl_p(mem_buf);
1411 env->gregs[n] = ldl_p(mem_buf);
1415 env->gregs[n] = ldl_p(mem_buf);
1418 env->pc = ldl_p(mem_buf);
1421 env->pr = ldl_p(mem_buf);
1424 env->gbr = ldl_p(mem_buf);
1427 env->vbr = ldl_p(mem_buf);
1430 env->mach = ldl_p(mem_buf);
1433 env->macl = ldl_p(mem_buf);
1436 env->sr = ldl_p(mem_buf);
1439 env->fpul = ldl_p(mem_buf);
1442 env->fpscr = ldl_p(mem_buf);
1445 if (env->fpscr & FPSCR_FR) {
1446 env->fregs[n - 9] = ldfl_p(mem_buf);
1448 env->fregs[n - 25] = ldfl_p(mem_buf);
1452 env->ssr = ldl_p(mem_buf);
1455 env->spc = ldl_p(mem_buf);
1458 env->gregs[n - 43] = ldl_p(mem_buf);
1461 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1469 #elif defined (TARGET_MICROBLAZE)
1471 #define NUM_CORE_REGS (32 + 5)
1473 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1476 GET_REG32(env->regs[n]);
1478 GET_REG32(env->sregs[n - 32]);
1483 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1487 if (n > NUM_CORE_REGS) {
1491 tmp = ldl_p(mem_buf);
1496 env->sregs[n - 32] = tmp;
1500 #elif defined (TARGET_CRIS)
1502 #define NUM_CORE_REGS 49
1505 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1508 GET_REG32(env->regs[n]);
1518 GET_REG8(env->pregs[n - 16]);
1521 GET_REG8(env->pregs[n - 16]);
1525 GET_REG16(env->pregs[n - 16]);
1529 GET_REG32(env->pregs[n - 16]);
1537 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1541 if (env->pregs[PR_VR] < 32) {
1542 return read_register_crisv10(env, mem_buf, n);
1545 srs = env->pregs[PR_SRS];
1547 GET_REG32(env->regs[n]);
1550 if (n >= 21 && n < 32) {
1551 GET_REG32(env->pregs[n - 16]);
1553 if (n >= 33 && n < 49) {
1554 GET_REG32(env->sregs[srs][n - 33]);
1558 GET_REG8(env->pregs[0]);
1560 GET_REG8(env->pregs[1]);
1562 GET_REG32(env->pregs[2]);
1566 GET_REG16(env->pregs[4]);
1574 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1582 tmp = ldl_p(mem_buf);
1588 if (n >= 21 && n < 32) {
1589 env->pregs[n - 16] = tmp;
1592 /* FIXME: Should support function regs be writable? */
1599 env->pregs[PR_PID] = tmp;
1612 #elif defined (TARGET_ALPHA)
1614 #define NUM_CORE_REGS 67
1616 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1626 d.d = env->fir[n - 32];
1630 val = cpu_alpha_load_fpcr(env);
1640 /* 31 really is the zero register; 65 is unassigned in the
1641 gdb protocol, but is still required to occupy 8 bytes. */
1650 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1652 target_ulong tmp = ldtul_p(mem_buf);
1661 env->fir[n - 32] = d.d;
1664 cpu_alpha_store_fpcr(env, tmp);
1674 /* 31 really is the zero register; 65 is unassigned in the
1675 gdb protocol, but is still required to occupy 8 bytes. */
1682 #elif defined (TARGET_S390X)
1684 #define NUM_CORE_REGS S390_NUM_REGS
1686 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1692 case S390_PSWM_REGNUM:
1693 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1694 val = deposit64(env->psw.mask, 44, 2, cc_op);
1697 case S390_PSWA_REGNUM:
1698 GET_REGL(env->psw.addr);
1700 case S390_R0_REGNUM ... S390_R15_REGNUM:
1701 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1703 case S390_A0_REGNUM ... S390_A15_REGNUM:
1704 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1706 case S390_FPC_REGNUM:
1707 GET_REG32(env->fpc);
1709 case S390_F0_REGNUM ... S390_F15_REGNUM:
1710 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1717 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1722 tmpl = ldtul_p(mem_buf);
1723 tmp32 = ldl_p(mem_buf);
1726 case S390_PSWM_REGNUM:
1727 env->psw.mask = tmpl;
1728 env->cc_op = extract64(tmpl, 44, 2);
1730 case S390_PSWA_REGNUM:
1731 env->psw.addr = tmpl;
1733 case S390_R0_REGNUM ... S390_R15_REGNUM:
1734 env->regs[n-S390_R0_REGNUM] = tmpl;
1736 case S390_A0_REGNUM ... S390_A15_REGNUM:
1737 env->aregs[n-S390_A0_REGNUM] = tmp32;
1740 case S390_FPC_REGNUM:
1744 case S390_F0_REGNUM ... S390_F15_REGNUM:
1745 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1752 #elif defined (TARGET_LM32)
1754 #include "hw/lm32/lm32_pic.h"
1755 #define NUM_CORE_REGS (32 + 7)
1757 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1760 GET_REG32(env->regs[n]);
1766 /* FIXME: put in right exception ID */
1771 GET_REG32(env->eba);
1774 GET_REG32(env->deba);
1780 GET_REG32(lm32_pic_get_im(env->pic_state));
1783 GET_REG32(lm32_pic_get_ip(env->pic_state));
1790 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1794 if (n > NUM_CORE_REGS) {
1798 tmp = ldl_p(mem_buf);
1817 lm32_pic_set_im(env->pic_state, tmp);
1820 lm32_pic_set_ip(env->pic_state, tmp);
1826 #elif defined(TARGET_XTENSA)
1828 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1829 * Use num_regs to see all registers. gdb modification is required for that:
1830 * reset bit 0 in the 'flags' field of the registers definitions in the
1831 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1833 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1834 #define num_g_regs NUM_CORE_REGS
1836 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1838 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1840 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1844 switch (reg->type) {
1850 xtensa_sync_phys_from_window(env);
1851 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1855 GET_REG32(env->sregs[reg->targno & 0xff]);
1859 GET_REG32(env->uregs[reg->targno & 0xff]);
1863 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1867 GET_REG32(env->regs[reg->targno & 0x0f]);
1871 qemu_log("%s from reg %d of unsupported type %d\n",
1872 __func__, n, reg->type);
1877 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1880 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1882 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1886 tmp = ldl_p(mem_buf);
1888 switch (reg->type) {
1894 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1895 xtensa_sync_window_from_phys(env);
1899 env->sregs[reg->targno & 0xff] = tmp;
1903 env->uregs[reg->targno & 0xff] = tmp;
1907 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1911 env->regs[reg->targno & 0x0f] = tmp;
1915 qemu_log("%s to reg %d of unsupported type %d\n",
1916 __func__, n, reg->type);
1924 #define NUM_CORE_REGS 0
1926 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1931 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1938 #if !defined(TARGET_XTENSA)
1939 static int num_g_regs = NUM_CORE_REGS;
1943 /* Encode data using the encoding for 'x' packets. */
1944 static int memtox(char *buf, const char *mem, int len)
1952 case '#': case '$': case '*': case '}':
1964 static const char *get_feature_xml(const char *p, const char **newp)
1969 static char target_xml[1024];
1972 while (p[len] && p[len] != ':')
1977 if (strncmp(p, "target.xml", len) == 0) {
1978 /* Generate the XML description for this CPU. */
1979 if (!target_xml[0]) {
1980 GDBRegisterState *r;
1981 CPUState *cpu = first_cpu;
1983 snprintf(target_xml, sizeof(target_xml),
1984 "<?xml version=\"1.0\"?>"
1985 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1987 "<xi:include href=\"%s\"/>",
1990 for (r = cpu->gdb_regs; r; r = r->next) {
1991 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1992 pstrcat(target_xml, sizeof(target_xml), r->xml);
1993 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1995 pstrcat(target_xml, sizeof(target_xml), "</target>");
1999 for (i = 0; ; i++) {
2000 name = xml_builtin[i][0];
2001 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
2004 return name ? xml_builtin[i][1] : NULL;
2008 static int gdb_read_register(CPUState *cpu, uint8_t *mem_buf, int reg)
2010 CPUArchState *env = cpu->env_ptr;
2011 GDBRegisterState *r;
2013 if (reg < NUM_CORE_REGS)
2014 return cpu_gdb_read_register(env, mem_buf, reg);
2016 for (r = cpu->gdb_regs; r; r = r->next) {
2017 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
2018 return r->get_reg(env, mem_buf, reg - r->base_reg);
2024 static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
2026 CPUArchState *env = cpu->env_ptr;
2027 GDBRegisterState *r;
2029 if (reg < NUM_CORE_REGS)
2030 return cpu_gdb_write_register(env, mem_buf, reg);
2032 for (r = cpu->gdb_regs; r; r = r->next) {
2033 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
2034 return r->set_reg(env, mem_buf, reg - r->base_reg);
2040 #if !defined(TARGET_XTENSA)
2041 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
2042 specifies the first register number and these registers are included in
2043 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
2044 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
2047 void gdb_register_coprocessor(CPUState *cpu,
2048 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
2049 int num_regs, const char *xml, int g_pos)
2051 GDBRegisterState *s;
2052 GDBRegisterState **p;
2053 static int last_reg = NUM_CORE_REGS;
2057 /* Check for duplicates. */
2058 if (strcmp((*p)->xml, xml) == 0)
2063 s = g_new0(GDBRegisterState, 1);
2064 s->base_reg = last_reg;
2065 s->num_regs = num_regs;
2066 s->get_reg = get_reg;
2067 s->set_reg = set_reg;
2070 /* Add to end of list. */
2071 last_reg += num_regs;
2074 if (g_pos != s->base_reg) {
2075 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
2076 "Expected %d got %d\n", xml, g_pos, s->base_reg);
2078 num_g_regs = last_reg;
2084 #ifndef CONFIG_USER_ONLY
2085 static const int xlat_gdb_type[] = {
2086 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
2087 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
2088 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
2092 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
2098 if (kvm_enabled()) {
2099 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
2103 case GDB_BREAKPOINT_SW:
2104 case GDB_BREAKPOINT_HW:
2105 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2107 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
2112 #ifndef CONFIG_USER_ONLY
2113 case GDB_WATCHPOINT_WRITE:
2114 case GDB_WATCHPOINT_READ:
2115 case GDB_WATCHPOINT_ACCESS:
2116 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2118 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
2130 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
2136 if (kvm_enabled()) {
2137 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
2141 case GDB_BREAKPOINT_SW:
2142 case GDB_BREAKPOINT_HW:
2143 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2145 err = cpu_breakpoint_remove(env, addr, BP_GDB);
2150 #ifndef CONFIG_USER_ONLY
2151 case GDB_WATCHPOINT_WRITE:
2152 case GDB_WATCHPOINT_READ:
2153 case GDB_WATCHPOINT_ACCESS:
2154 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2156 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
2167 static void gdb_breakpoint_remove_all(void)
2172 if (kvm_enabled()) {
2173 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
2177 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2179 cpu_breakpoint_remove_all(env, BP_GDB);
2180 #ifndef CONFIG_USER_ONLY
2181 cpu_watchpoint_remove_all(env, BP_GDB);
2186 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2188 CPUState *cpu = s->c_cpu;
2189 CPUClass *cc = CPU_GET_CLASS(cpu);
2191 cpu_synchronize_state(cpu);
2193 cc->set_pc(cpu, pc);
2197 static CPUState *find_cpu(uint32_t thread_id)
2201 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2202 if (cpu_index(cpu) == thread_id) {
2210 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2212 #ifdef TARGET_XTENSA
2218 int ch, reg_size, type, res;
2219 char buf[MAX_PACKET_LENGTH];
2220 uint8_t mem_buf[MAX_PACKET_LENGTH];
2222 target_ulong addr, len;
2225 printf("command='%s'\n", line_buf);
2231 /* TODO: Make this return the correct value for user-mode. */
2232 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2233 cpu_index(s->c_cpu));
2235 /* Remove all the breakpoints when this query is issued,
2236 * because gdb is doing and initial connect and the state
2237 * should be cleaned up.
2239 gdb_breakpoint_remove_all();
2243 addr = strtoull(p, (char **)&p, 16);
2244 gdb_set_cpu_pc(s, addr);
2250 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2251 if (s->signal == -1)
2256 if (strncmp(p, "Cont", 4) == 0) {
2257 int res_signal, res_thread;
2261 put_packet(s, "vCont;c;C;s;S");
2276 if (action == 'C' || action == 'S') {
2277 signal = strtoul(p, (char **)&p, 16);
2278 } else if (action != 'c' && action != 's') {
2284 thread = strtoull(p+1, (char **)&p, 16);
2286 action = tolower(action);
2287 if (res == 0 || (res == 'c' && action == 's')) {
2289 res_signal = signal;
2290 res_thread = thread;
2294 if (res_thread != -1 && res_thread != 0) {
2295 cpu = find_cpu(res_thread);
2297 put_packet(s, "E22");
2303 cpu_single_step(s->c_cpu, sstep_flags);
2305 s->signal = res_signal;
2311 goto unknown_command;
2314 #ifdef CONFIG_USER_ONLY
2315 /* Kill the target */
2316 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2321 gdb_breakpoint_remove_all();
2322 gdb_syscall_mode = GDB_SYS_DISABLED;
2324 put_packet(s, "OK");
2328 addr = strtoull(p, (char **)&p, 16);
2329 gdb_set_cpu_pc(s, addr);
2331 cpu_single_step(s->c_cpu, sstep_flags);
2339 ret = strtoull(p, (char **)&p, 16);
2342 err = strtoull(p, (char **)&p, 16);
2349 if (s->current_syscall_cb) {
2350 s->current_syscall_cb(s->c_cpu, ret, err);
2351 s->current_syscall_cb = NULL;
2354 put_packet(s, "T02");
2361 cpu_synchronize_state(s->g_cpu);
2362 #ifdef TARGET_XTENSA
2363 env = s->g_cpu->env_ptr;
2366 for (addr = 0; addr < num_g_regs; addr++) {
2367 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2370 memtohex(buf, mem_buf, len);
2374 cpu_synchronize_state(s->g_cpu);
2375 #ifdef TARGET_XTENSA
2376 env = s->g_cpu->env_ptr;
2378 registers = mem_buf;
2379 len = strlen(p) / 2;
2380 hextomem((uint8_t *)registers, p, len);
2381 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2382 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2384 registers += reg_size;
2386 put_packet(s, "OK");
2389 addr = strtoull(p, (char **)&p, 16);
2392 len = strtoull(p, NULL, 16);
2393 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, false) != 0) {
2394 put_packet (s, "E14");
2396 memtohex(buf, mem_buf, len);
2401 addr = strtoull(p, (char **)&p, 16);
2404 len = strtoull(p, (char **)&p, 16);
2407 hextomem(mem_buf, p, len);
2408 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len,
2410 put_packet(s, "E14");
2412 put_packet(s, "OK");
2416 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2417 This works, but can be very slow. Anything new enough to
2418 understand XML also knows how to use this properly. */
2420 goto unknown_command;
2421 addr = strtoull(p, (char **)&p, 16);
2422 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2424 memtohex(buf, mem_buf, reg_size);
2427 put_packet(s, "E14");
2432 goto unknown_command;
2433 addr = strtoull(p, (char **)&p, 16);
2436 reg_size = strlen(p) / 2;
2437 hextomem(mem_buf, p, reg_size);
2438 gdb_write_register(s->g_cpu, mem_buf, addr);
2439 put_packet(s, "OK");
2443 type = strtoul(p, (char **)&p, 16);
2446 addr = strtoull(p, (char **)&p, 16);
2449 len = strtoull(p, (char **)&p, 16);
2451 res = gdb_breakpoint_insert(addr, len, type);
2453 res = gdb_breakpoint_remove(addr, len, type);
2455 put_packet(s, "OK");
2456 else if (res == -ENOSYS)
2459 put_packet(s, "E22");
2463 thread = strtoull(p, (char **)&p, 16);
2464 if (thread == -1 || thread == 0) {
2465 put_packet(s, "OK");
2468 cpu = find_cpu(thread);
2470 put_packet(s, "E22");
2476 put_packet(s, "OK");
2480 put_packet(s, "OK");
2483 put_packet(s, "E22");
2488 thread = strtoull(p, (char **)&p, 16);
2489 cpu = find_cpu(thread);
2492 put_packet(s, "OK");
2494 put_packet(s, "E22");
2499 /* parse any 'q' packets here */
2500 if (!strcmp(p,"qemu.sstepbits")) {
2501 /* Query Breakpoint bit definitions */
2502 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2508 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2509 /* Display or change the sstep_flags */
2512 /* Display current setting */
2513 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2518 type = strtoul(p, (char **)&p, 16);
2520 put_packet(s, "OK");
2522 } else if (strcmp(p,"C") == 0) {
2523 /* "Current thread" remains vague in the spec, so always return
2524 * the first CPU (gdb returns the first thread). */
2525 put_packet(s, "QC1");
2527 } else if (strcmp(p,"fThreadInfo") == 0) {
2528 s->query_cpu = first_cpu;
2529 goto report_cpuinfo;
2530 } else if (strcmp(p,"sThreadInfo") == 0) {
2533 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2535 s->query_cpu = s->query_cpu->next_cpu;
2539 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2540 thread = strtoull(p+16, (char **)&p, 16);
2541 cpu = find_cpu(thread);
2543 cpu_synchronize_state(cpu);
2544 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2545 "CPU#%d [%s]", cpu->cpu_index,
2546 cpu->halted ? "halted " : "running");
2547 memtohex(buf, mem_buf, len);
2552 #ifdef CONFIG_USER_ONLY
2553 else if (strncmp(p, "Offsets", 7) == 0) {
2554 CPUArchState *env = s->c_cpu->env_ptr;
2555 TaskState *ts = env->opaque;
2557 snprintf(buf, sizeof(buf),
2558 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2559 ";Bss=" TARGET_ABI_FMT_lx,
2560 ts->info->code_offset,
2561 ts->info->data_offset,
2562 ts->info->data_offset);
2566 #else /* !CONFIG_USER_ONLY */
2567 else if (strncmp(p, "Rcmd,", 5) == 0) {
2568 int len = strlen(p + 5);
2570 if ((len % 2) != 0) {
2571 put_packet(s, "E01");
2574 hextomem(mem_buf, p + 5, len);
2577 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2578 put_packet(s, "OK");
2581 #endif /* !CONFIG_USER_ONLY */
2582 if (strncmp(p, "Supported", 9) == 0) {
2583 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2585 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2591 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2593 target_ulong total_len;
2597 xml = get_feature_xml(p, &p);
2599 snprintf(buf, sizeof(buf), "E00");
2606 addr = strtoul(p, (char **)&p, 16);
2609 len = strtoul(p, (char **)&p, 16);
2611 total_len = strlen(xml);
2612 if (addr > total_len) {
2613 snprintf(buf, sizeof(buf), "E00");
2617 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2618 len = (MAX_PACKET_LENGTH - 5) / 2;
2619 if (len < total_len - addr) {
2621 len = memtox(buf + 1, xml + addr, len);
2624 len = memtox(buf + 1, xml + addr, total_len - addr);
2626 put_packet_binary(s, buf, len + 1);
2630 /* Unrecognised 'q' command. */
2631 goto unknown_command;
2635 /* put empty packet */
2643 void gdb_set_stop_cpu(CPUState *cpu)
2645 gdbserver_state->c_cpu = cpu;
2646 gdbserver_state->g_cpu = cpu;
2649 #ifndef CONFIG_USER_ONLY
2650 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2652 GDBState *s = gdbserver_state;
2653 CPUArchState *env = s->c_cpu->env_ptr;
2654 CPUState *cpu = s->c_cpu;
2659 if (running || s->state == RS_INACTIVE) {
2662 /* Is there a GDB syscall waiting to be sent? */
2663 if (s->current_syscall_cb) {
2664 put_packet(s, s->syscall_buf);
2668 case RUN_STATE_DEBUG:
2669 if (env->watchpoint_hit) {
2670 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2681 snprintf(buf, sizeof(buf),
2682 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2683 GDB_SIGNAL_TRAP, cpu_index(cpu), type,
2684 env->watchpoint_hit->vaddr);
2685 env->watchpoint_hit = NULL;
2689 ret = GDB_SIGNAL_TRAP;
2691 case RUN_STATE_PAUSED:
2692 ret = GDB_SIGNAL_INT;
2694 case RUN_STATE_SHUTDOWN:
2695 ret = GDB_SIGNAL_QUIT;
2697 case RUN_STATE_IO_ERROR:
2698 ret = GDB_SIGNAL_IO;
2700 case RUN_STATE_WATCHDOG:
2701 ret = GDB_SIGNAL_ALRM;
2703 case RUN_STATE_INTERNAL_ERROR:
2704 ret = GDB_SIGNAL_ABRT;
2706 case RUN_STATE_SAVE_VM:
2707 case RUN_STATE_RESTORE_VM:
2709 case RUN_STATE_FINISH_MIGRATE:
2710 ret = GDB_SIGNAL_XCPU;
2713 ret = GDB_SIGNAL_UNKNOWN;
2716 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(cpu));
2721 /* disable single step if it was enabled */
2722 cpu_single_step(cpu, 0);
2726 /* Send a gdb syscall request.
2727 This accepts limited printf-style format specifiers, specifically:
2728 %x - target_ulong argument printed in hex.
2729 %lx - 64-bit argument printed in hex.
2730 %s - string pointer (target_ulong) and length (int) pair. */
2731 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2740 s = gdbserver_state;
2743 s->current_syscall_cb = cb;
2744 #ifndef CONFIG_USER_ONLY
2745 vm_stop(RUN_STATE_DEBUG);
2749 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2756 addr = va_arg(va, target_ulong);
2757 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2760 if (*(fmt++) != 'x')
2762 i64 = va_arg(va, uint64_t);
2763 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2766 addr = va_arg(va, target_ulong);
2767 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2768 addr, va_arg(va, int));
2772 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2782 #ifdef CONFIG_USER_ONLY
2783 put_packet(s, s->syscall_buf);
2784 gdb_handlesig(s->c_cpu, 0);
2786 /* In this case wait to send the syscall packet until notification that
2787 the CPU has stopped. This must be done because if the packet is sent
2788 now the reply from the syscall request could be received while the CPU
2789 is still in the running state, which can cause packets to be dropped
2790 and state transition 'T' packets to be sent while the syscall is still
2796 static void gdb_read_byte(GDBState *s, int ch)
2801 #ifndef CONFIG_USER_ONLY
2802 if (s->last_packet_len) {
2803 /* Waiting for a response to the last packet. If we see the start
2804 of a new command then abandon the previous response. */
2807 printf("Got NACK, retransmitting\n");
2809 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2813 printf("Got ACK\n");
2815 printf("Got '%c' when expecting ACK/NACK\n", ch);
2817 if (ch == '+' || ch == '$')
2818 s->last_packet_len = 0;
2822 if (runstate_is_running()) {
2823 /* when the CPU is running, we cannot do anything except stop
2824 it when receiving a char */
2825 vm_stop(RUN_STATE_PAUSED);
2832 s->line_buf_index = 0;
2833 s->state = RS_GETLINE;
2838 s->state = RS_CHKSUM1;
2839 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2842 s->line_buf[s->line_buf_index++] = ch;
2846 s->line_buf[s->line_buf_index] = '\0';
2847 s->line_csum = fromhex(ch) << 4;
2848 s->state = RS_CHKSUM2;
2851 s->line_csum |= fromhex(ch);
2853 for(i = 0; i < s->line_buf_index; i++) {
2854 csum += s->line_buf[i];
2856 if (s->line_csum != (csum & 0xff)) {
2858 put_buffer(s, &reply, 1);
2862 put_buffer(s, &reply, 1);
2863 s->state = gdb_handle_packet(s, s->line_buf);
2872 /* Tell the remote gdb that the process has exited. */
2873 void gdb_exit(CPUArchState *env, int code)
2878 s = gdbserver_state;
2882 #ifdef CONFIG_USER_ONLY
2883 if (gdbserver_fd < 0 || s->fd < 0) {
2888 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2891 #ifndef CONFIG_USER_ONLY
2893 qemu_chr_delete(s->chr);
2898 #ifdef CONFIG_USER_ONLY
2904 s = gdbserver_state;
2906 if (gdbserver_fd < 0 || s->fd < 0)
2913 gdb_handlesig(CPUState *cpu, int sig)
2915 CPUArchState *env = cpu->env_ptr;
2920 s = gdbserver_state;
2921 if (gdbserver_fd < 0 || s->fd < 0) {
2925 /* disable single step if it was enabled */
2926 cpu_single_step(cpu, 0);
2930 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig));
2933 /* put_packet() might have detected that the peer terminated the
2941 s->running_state = 0;
2942 while (s->running_state == 0) {
2943 n = read(s->fd, buf, 256);
2947 for (i = 0; i < n; i++) {
2948 gdb_read_byte(s, buf[i]);
2950 } else if (n == 0 || errno != EAGAIN) {
2951 /* XXX: Connection closed. Should probably wait for another
2952 connection before continuing. */
2961 /* Tell the remote gdb that the process has exited due to SIG. */
2962 void gdb_signalled(CPUArchState *env, int sig)
2967 s = gdbserver_state;
2968 if (gdbserver_fd < 0 || s->fd < 0) {
2972 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb(sig));
2976 static void gdb_accept(void)
2979 struct sockaddr_in sockaddr;
2984 len = sizeof(sockaddr);
2985 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2986 if (fd < 0 && errno != EINTR) {
2989 } else if (fd >= 0) {
2991 fcntl(fd, F_SETFD, FD_CLOEXEC);
2997 /* set short latency */
2998 socket_set_nodelay(fd);
3000 s = g_malloc0(sizeof(GDBState));
3001 s->c_cpu = first_cpu;
3002 s->g_cpu = first_cpu;
3006 gdbserver_state = s;
3008 fcntl(fd, F_SETFL, O_NONBLOCK);
3011 static int gdbserver_open(int port)
3013 struct sockaddr_in sockaddr;
3016 fd = socket(PF_INET, SOCK_STREAM, 0);
3022 fcntl(fd, F_SETFD, FD_CLOEXEC);
3025 /* allow fast reuse */
3027 qemu_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
3029 sockaddr.sin_family = AF_INET;
3030 sockaddr.sin_port = htons(port);
3031 sockaddr.sin_addr.s_addr = 0;
3032 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
3038 ret = listen(fd, 0);
3047 int gdbserver_start(int port)
3049 gdbserver_fd = gdbserver_open(port);
3050 if (gdbserver_fd < 0)
3052 /* accept connections */
3057 /* Disable gdb stub for child processes. */
3058 void gdbserver_fork(CPUArchState *env)
3060 GDBState *s = gdbserver_state;
3061 if (gdbserver_fd < 0 || s->fd < 0)
3065 cpu_breakpoint_remove_all(env, BP_GDB);
3066 cpu_watchpoint_remove_all(env, BP_GDB);
3069 static int gdb_chr_can_receive(void *opaque)
3071 /* We can handle an arbitrarily large amount of data.
3072 Pick the maximum packet size, which is as good as anything. */
3073 return MAX_PACKET_LENGTH;
3076 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
3080 for (i = 0; i < size; i++) {
3081 gdb_read_byte(gdbserver_state, buf[i]);
3085 static void gdb_chr_event(void *opaque, int event)
3088 case CHR_EVENT_OPENED:
3089 vm_stop(RUN_STATE_PAUSED);
3097 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
3099 char buf[MAX_PACKET_LENGTH];
3102 if (len > (MAX_PACKET_LENGTH/2) - 1)
3103 len = (MAX_PACKET_LENGTH/2) - 1;
3104 memtohex(buf + 1, (uint8_t *)msg, len);
3108 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
3110 const char *p = (const char *)buf;
3113 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
3115 if (len <= max_sz) {
3116 gdb_monitor_output(gdbserver_state, p, len);
3119 gdb_monitor_output(gdbserver_state, p, max_sz);
3127 static void gdb_sigterm_handler(int signal)
3129 if (runstate_is_running()) {
3130 vm_stop(RUN_STATE_PAUSED);
3135 int gdbserver_start(const char *device)
3138 char gdbstub_device_name[128];
3139 CharDriverState *chr = NULL;
3140 CharDriverState *mon_chr;
3144 if (strcmp(device, "none") != 0) {
3145 if (strstart(device, "tcp:", NULL)) {
3146 /* enforce required TCP attributes */
3147 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
3148 "%s,nowait,nodelay,server", device);
3149 device = gdbstub_device_name;
3152 else if (strcmp(device, "stdio") == 0) {
3153 struct sigaction act;
3155 memset(&act, 0, sizeof(act));
3156 act.sa_handler = gdb_sigterm_handler;
3157 sigaction(SIGINT, &act, NULL);
3160 chr = qemu_chr_new("gdb", device, NULL);
3164 qemu_chr_fe_claim_no_fail(chr);
3165 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3166 gdb_chr_event, NULL);
3169 s = gdbserver_state;
3171 s = g_malloc0(sizeof(GDBState));
3172 gdbserver_state = s;
3174 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3176 /* Initialize a monitor terminal for gdb */
3177 mon_chr = g_malloc0(sizeof(*mon_chr));
3178 mon_chr->chr_write = gdb_monitor_write;
3179 monitor_init(mon_chr, 0);
3182 qemu_chr_delete(s->chr);
3183 mon_chr = s->mon_chr;
3184 memset(s, 0, sizeof(GDBState));
3186 s->c_cpu = first_cpu;
3187 s->g_cpu = first_cpu;
3189 s->state = chr ? RS_IDLE : RS_INACTIVE;
3190 s->mon_chr = mon_chr;
3191 s->current_syscall_cb = NULL;