4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
33 #include "qemu-char.h"
38 #define MAX_PACKET_LENGTH 4096
41 #include "qemu_socket.h"
54 GDB_SIGNAL_UNKNOWN = 143
57 #ifdef CONFIG_USER_ONLY
59 /* Map target signal numbers to GDB protocol signal numbers and vice
60 * versa. For user emulation's currently supported systems, we can
61 * assume most signals are defined.
64 static int gdb_signal_table[] = {
224 /* In system mode we only need SIGINT and SIGTRAP; other signals
225 are not yet supported. */
232 static int gdb_signal_table[] = {
242 #ifdef CONFIG_USER_ONLY
243 static int target_signal_to_gdb (int sig)
246 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
247 if (gdb_signal_table[i] == sig)
249 return GDB_SIGNAL_UNKNOWN;
253 static int gdb_signal_to_target (int sig)
255 if (sig < ARRAY_SIZE (gdb_signal_table))
256 return gdb_signal_table[sig];
263 typedef struct GDBRegisterState {
269 struct GDBRegisterState *next;
280 typedef struct GDBState {
281 CPUState *c_cpu; /* current CPU for step/continue ops */
282 CPUState *g_cpu; /* current CPU for other ops */
283 CPUState *query_cpu; /* for q{f|s}ThreadInfo */
284 enum RSState state; /* parsing state */
285 char line_buf[MAX_PACKET_LENGTH];
288 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
291 #ifdef CONFIG_USER_ONLY
295 CharDriverState *chr;
296 CharDriverState *mon_chr;
300 /* By default use no IRQs and no timers while single stepping so as to
301 * make single stepping like an ICE HW step.
303 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
305 static GDBState *gdbserver_state;
307 /* This is an ugly hack to cope with both new and old gdb.
308 If gdb sends qXfer:features:read then assume we're talking to a newish
309 gdb that understands target descriptions. */
310 static int gdb_has_xml;
312 #ifdef CONFIG_USER_ONLY
313 /* XXX: This is not thread safe. Do we care? */
314 static int gdbserver_fd = -1;
316 static int get_char(GDBState *s)
322 ret = qemu_recv(s->fd, &ch, 1, 0);
324 if (errno == ECONNRESET)
326 if (errno != EINTR && errno != EAGAIN)
328 } else if (ret == 0) {
340 static gdb_syscall_complete_cb gdb_current_syscall_cb;
348 /* If gdb is connected when the first semihosting syscall occurs then use
349 remote gdb syscalls. Otherwise use native file IO. */
350 int use_gdb_syscalls(void)
352 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
353 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
356 return gdb_syscall_mode == GDB_SYS_ENABLED;
359 /* Resume execution. */
360 static inline void gdb_continue(GDBState *s)
362 #ifdef CONFIG_USER_ONLY
363 s->running_state = 1;
369 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
371 #ifdef CONFIG_USER_ONLY
375 ret = send(s->fd, buf, len, 0);
377 if (errno != EINTR && errno != EAGAIN)
385 qemu_chr_fe_write(s->chr, buf, len);
389 static inline int fromhex(int v)
391 if (v >= '0' && v <= '9')
393 else if (v >= 'A' && v <= 'F')
395 else if (v >= 'a' && v <= 'f')
401 static inline int tohex(int v)
409 static void memtohex(char *buf, const uint8_t *mem, int len)
414 for(i = 0; i < len; i++) {
416 *q++ = tohex(c >> 4);
417 *q++ = tohex(c & 0xf);
422 static void hextomem(uint8_t *mem, const char *buf, int len)
426 for(i = 0; i < len; i++) {
427 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
432 /* return -1 if error, 0 if OK */
433 static int put_packet_binary(GDBState *s, const char *buf, int len)
444 for(i = 0; i < len; i++) {
448 *(p++) = tohex((csum >> 4) & 0xf);
449 *(p++) = tohex((csum) & 0xf);
451 s->last_packet_len = p - s->last_packet;
452 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
454 #ifdef CONFIG_USER_ONLY
467 /* return -1 if error, 0 if OK */
468 static int put_packet(GDBState *s, const char *buf)
471 printf("reply='%s'\n", buf);
474 return put_packet_binary(s, buf, strlen(buf));
477 /* The GDB remote protocol transfers values in target byte order. This means
478 we can use the raw memory access routines to access the value buffer.
479 Conveniently, these also handle the case where the buffer is mis-aligned.
481 #define GET_REG8(val) do { \
482 stb_p(mem_buf, val); \
485 #define GET_REG16(val) do { \
486 stw_p(mem_buf, val); \
489 #define GET_REG32(val) do { \
490 stl_p(mem_buf, val); \
493 #define GET_REG64(val) do { \
494 stq_p(mem_buf, val); \
498 #if TARGET_LONG_BITS == 64
499 #define GET_REGL(val) GET_REG64(val)
500 #define ldtul_p(addr) ldq_p(addr)
502 #define GET_REGL(val) GET_REG32(val)
503 #define ldtul_p(addr) ldl_p(addr)
506 #if defined(TARGET_I386)
509 static const int gpr_map[16] = {
510 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
511 8, 9, 10, 11, 12, 13, 14, 15
514 #define gpr_map gpr_map32
516 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
518 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
520 #define IDX_IP_REG CPU_NB_REGS
521 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
522 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
523 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
524 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
525 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
527 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
529 if (n < CPU_NB_REGS) {
530 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
531 GET_REG64(env->regs[gpr_map[n]]);
532 } else if (n < CPU_NB_REGS32) {
533 GET_REG32(env->regs[gpr_map32[n]]);
535 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
536 #ifdef USE_X86LDOUBLE
537 /* FIXME: byteswap float values - after fixing fpregs layout. */
538 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
540 memset(mem_buf, 0, 10);
543 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
545 if (n < CPU_NB_REGS32 ||
546 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
547 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
548 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
554 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
559 case IDX_FLAGS_REG: GET_REG32(env->eflags);
561 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
562 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
563 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
564 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
565 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
566 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
568 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
569 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
570 (env->fpstt & 0x7) << 11);
571 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
572 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
573 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
574 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
575 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
576 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
578 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
584 static int cpu_x86_gdb_load_seg(CPUState *env, int sreg, uint8_t *mem_buf)
586 uint16_t selector = ldl_p(mem_buf);
588 if (selector != env->segs[sreg].selector) {
589 #if defined(CONFIG_USER_ONLY)
590 cpu_x86_load_seg(env, sreg, selector);
592 unsigned int limit, flags;
595 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
596 base = selector << 4;
600 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
603 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
609 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
613 if (n < CPU_NB_REGS) {
614 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
615 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
616 return sizeof(target_ulong);
617 } else if (n < CPU_NB_REGS32) {
619 env->regs[n] &= ~0xffffffffUL;
620 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
623 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
624 #ifdef USE_X86LDOUBLE
625 /* FIXME: byteswap float values - after fixing fpregs layout. */
626 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
629 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
631 if (n < CPU_NB_REGS32 ||
632 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
633 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
634 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
640 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
641 env->eip = ldq_p(mem_buf);
644 env->eip &= ~0xffffffffUL;
645 env->eip |= (uint32_t)ldl_p(mem_buf);
649 env->eflags = ldl_p(mem_buf);
652 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
653 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
654 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
655 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
656 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
657 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
659 case IDX_FP_REGS + 8:
660 env->fpuc = ldl_p(mem_buf);
662 case IDX_FP_REGS + 9:
663 tmp = ldl_p(mem_buf);
664 env->fpstt = (tmp >> 11) & 7;
665 env->fpus = tmp & ~0x3800;
667 case IDX_FP_REGS + 10: /* ftag */ return 4;
668 case IDX_FP_REGS + 11: /* fiseg */ return 4;
669 case IDX_FP_REGS + 12: /* fioff */ return 4;
670 case IDX_FP_REGS + 13: /* foseg */ return 4;
671 case IDX_FP_REGS + 14: /* fooff */ return 4;
672 case IDX_FP_REGS + 15: /* fop */ return 4;
675 env->mxcsr = ldl_p(mem_buf);
679 /* Unrecognised register. */
683 #elif defined (TARGET_PPC)
685 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
686 expects whatever the target description contains. Due to a
687 historical mishap the FP registers appear in between core integer
688 regs and PC, MSR, CR, and so forth. We hack round this by giving the
689 FP regs zero size when talking to a newer gdb. */
690 #define NUM_CORE_REGS 71
691 #if defined (TARGET_PPC64)
692 #define GDB_CORE_XML "power64-core.xml"
694 #define GDB_CORE_XML "power-core.xml"
697 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
701 GET_REGL(env->gpr[n]);
706 stfq_p(mem_buf, env->fpr[n-32]);
710 case 64: GET_REGL(env->nip);
711 case 65: GET_REGL(env->msr);
716 for (i = 0; i < 8; i++)
717 cr |= env->crf[i] << (32 - ((i + 1) * 4));
720 case 67: GET_REGL(env->lr);
721 case 68: GET_REGL(env->ctr);
722 case 69: GET_REGL(env->xer);
727 GET_REG32(0); /* fpscr */
734 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
738 env->gpr[n] = ldtul_p(mem_buf);
739 return sizeof(target_ulong);
744 env->fpr[n-32] = ldfq_p(mem_buf);
749 env->nip = ldtul_p(mem_buf);
750 return sizeof(target_ulong);
752 ppc_store_msr(env, ldtul_p(mem_buf));
753 return sizeof(target_ulong);
756 uint32_t cr = ldl_p(mem_buf);
758 for (i = 0; i < 8; i++)
759 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
763 env->lr = ldtul_p(mem_buf);
764 return sizeof(target_ulong);
766 env->ctr = ldtul_p(mem_buf);
767 return sizeof(target_ulong);
769 env->xer = ldtul_p(mem_buf);
770 return sizeof(target_ulong);
781 #elif defined (TARGET_SPARC)
783 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
784 #define NUM_CORE_REGS 86
786 #define NUM_CORE_REGS 72
790 #define GET_REGA(val) GET_REG32(val)
792 #define GET_REGA(val) GET_REGL(val)
795 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
799 GET_REGA(env->gregs[n]);
802 /* register window */
803 GET_REGA(env->regwptr[n - 8]);
805 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
808 GET_REG32(*((uint32_t *)&env->fpr[n - 32]));
810 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
812 case 64: GET_REGA(env->y);
813 case 65: GET_REGA(cpu_get_psr(env));
814 case 66: GET_REGA(env->wim);
815 case 67: GET_REGA(env->tbr);
816 case 68: GET_REGA(env->pc);
817 case 69: GET_REGA(env->npc);
818 case 70: GET_REGA(env->fsr);
819 case 71: GET_REGA(0); /* csr */
820 default: GET_REGA(0);
825 GET_REG32(*((uint32_t *)&env->fpr[n - 32]));
828 /* f32-f62 (double width, even numbers only) */
831 val = (uint64_t)*((uint32_t *)&env->fpr[(n - 64) * 2 + 32]) << 32;
832 val |= *((uint32_t *)&env->fpr[(n - 64) * 2 + 33]);
836 case 80: GET_REGL(env->pc);
837 case 81: GET_REGL(env->npc);
838 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
839 ((env->asi & 0xff) << 24) |
840 ((env->pstate & 0xfff) << 8) |
842 case 83: GET_REGL(env->fsr);
843 case 84: GET_REGL(env->fprs);
844 case 85: GET_REGL(env->y);
850 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
852 #if defined(TARGET_ABI32)
855 tmp = ldl_p(mem_buf);
859 tmp = ldtul_p(mem_buf);
866 /* register window */
867 env->regwptr[n - 8] = tmp;
869 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
872 *((uint32_t *)&env->fpr[n - 32]) = tmp;
874 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
876 case 64: env->y = tmp; break;
877 case 65: cpu_put_psr(env, tmp); break;
878 case 66: env->wim = tmp; break;
879 case 67: env->tbr = tmp; break;
880 case 68: env->pc = tmp; break;
881 case 69: env->npc = tmp; break;
882 case 70: env->fsr = tmp; break;
890 env->fpr[n] = ldfl_p(mem_buf);
893 /* f32-f62 (double width, even numbers only) */
894 *((uint32_t *)&env->fpr[(n - 64) * 2 + 32]) = tmp >> 32;
895 *((uint32_t *)&env->fpr[(n - 64) * 2 + 33]) = tmp;
898 case 80: env->pc = tmp; break;
899 case 81: env->npc = tmp; break;
901 cpu_put_ccr(env, tmp >> 32);
902 env->asi = (tmp >> 24) & 0xff;
903 env->pstate = (tmp >> 8) & 0xfff;
904 cpu_put_cwp64(env, tmp & 0xff);
906 case 83: env->fsr = tmp; break;
907 case 84: env->fprs = tmp; break;
908 case 85: env->y = tmp; break;
915 #elif defined (TARGET_ARM)
917 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
918 whatever the target description contains. Due to a historical mishap
919 the FPA registers appear in between core integer regs and the CPSR.
920 We hack round this by giving the FPA regs zero size when talking to a
922 #define NUM_CORE_REGS 26
923 #define GDB_CORE_XML "arm-core.xml"
925 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
928 /* Core integer register. */
929 GET_REG32(env->regs[n]);
935 memset(mem_buf, 0, 12);
940 /* FPA status register. */
946 GET_REG32(cpsr_read(env));
948 /* Unknown register. */
952 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
956 tmp = ldl_p(mem_buf);
958 /* Mask out low bit of PC to workaround gdb bugs. This will probably
959 cause problems if we ever implement the Jazelle DBX extensions. */
964 /* Core integer register. */
968 if (n < 24) { /* 16-23 */
969 /* FPA registers (ignored). */
976 /* FPA status register (ignored). */
982 cpsr_write (env, tmp, 0xffffffff);
985 /* Unknown register. */
989 #elif defined (TARGET_M68K)
991 #define NUM_CORE_REGS 18
993 #define GDB_CORE_XML "cf-core.xml"
995 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
999 GET_REG32(env->dregs[n]);
1000 } else if (n < 16) {
1002 GET_REG32(env->aregs[n - 8]);
1005 case 16: GET_REG32(env->sr);
1006 case 17: GET_REG32(env->pc);
1009 /* FP registers not included here because they vary between
1010 ColdFire and m68k. Use XML bits for these. */
1014 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1018 tmp = ldl_p(mem_buf);
1022 env->dregs[n] = tmp;
1023 } else if (n < 16) {
1025 env->aregs[n - 8] = tmp;
1028 case 16: env->sr = tmp; break;
1029 case 17: env->pc = tmp; break;
1035 #elif defined (TARGET_MIPS)
1037 #define NUM_CORE_REGS 73
1039 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1042 GET_REGL(env->active_tc.gpr[n]);
1044 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1045 if (n >= 38 && n < 70) {
1046 if (env->CP0_Status & (1 << CP0St_FR))
1047 GET_REGL(env->active_fpu.fpr[n - 38].d);
1049 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1052 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1053 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1057 case 32: GET_REGL((int32_t)env->CP0_Status);
1058 case 33: GET_REGL(env->active_tc.LO[0]);
1059 case 34: GET_REGL(env->active_tc.HI[0]);
1060 case 35: GET_REGL(env->CP0_BadVAddr);
1061 case 36: GET_REGL((int32_t)env->CP0_Cause);
1062 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1063 case 72: GET_REGL(0); /* fp */
1064 case 89: GET_REGL((int32_t)env->CP0_PRid);
1066 if (n >= 73 && n <= 88) {
1067 /* 16 embedded regs. */
1074 /* convert MIPS rounding mode in FCR31 to IEEE library */
1075 static unsigned int ieee_rm[] =
1077 float_round_nearest_even,
1078 float_round_to_zero,
1082 #define RESTORE_ROUNDING_MODE \
1083 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1085 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1089 tmp = ldtul_p(mem_buf);
1092 env->active_tc.gpr[n] = tmp;
1093 return sizeof(target_ulong);
1095 if (env->CP0_Config1 & (1 << CP0C1_FP)
1096 && n >= 38 && n < 73) {
1098 if (env->CP0_Status & (1 << CP0St_FR))
1099 env->active_fpu.fpr[n - 38].d = tmp;
1101 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1105 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1106 /* set rounding mode */
1107 RESTORE_ROUNDING_MODE;
1109 case 71: env->active_fpu.fcr0 = tmp; break;
1111 return sizeof(target_ulong);
1114 case 32: env->CP0_Status = tmp; break;
1115 case 33: env->active_tc.LO[0] = tmp; break;
1116 case 34: env->active_tc.HI[0] = tmp; break;
1117 case 35: env->CP0_BadVAddr = tmp; break;
1118 case 36: env->CP0_Cause = tmp; break;
1120 env->active_tc.PC = tmp & ~(target_ulong)1;
1122 env->hflags |= MIPS_HFLAG_M16;
1124 env->hflags &= ~(MIPS_HFLAG_M16);
1127 case 72: /* fp, ignored */ break;
1131 /* Other registers are readonly. Ignore writes. */
1135 return sizeof(target_ulong);
1137 #elif defined (TARGET_SH4)
1139 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1140 /* FIXME: We should use XML for this. */
1142 #define NUM_CORE_REGS 59
1144 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1147 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1148 GET_REGL(env->gregs[n + 16]);
1150 GET_REGL(env->gregs[n]);
1152 } else if (n < 16) {
1153 GET_REGL(env->gregs[n]);
1154 } else if (n >= 25 && n < 41) {
1155 GET_REGL(env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)]);
1156 } else if (n >= 43 && n < 51) {
1157 GET_REGL(env->gregs[n - 43]);
1158 } else if (n >= 51 && n < 59) {
1159 GET_REGL(env->gregs[n - (51 - 16)]);
1162 case 16: GET_REGL(env->pc);
1163 case 17: GET_REGL(env->pr);
1164 case 18: GET_REGL(env->gbr);
1165 case 19: GET_REGL(env->vbr);
1166 case 20: GET_REGL(env->mach);
1167 case 21: GET_REGL(env->macl);
1168 case 22: GET_REGL(env->sr);
1169 case 23: GET_REGL(env->fpul);
1170 case 24: GET_REGL(env->fpscr);
1171 case 41: GET_REGL(env->ssr);
1172 case 42: GET_REGL(env->spc);
1178 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1182 tmp = ldl_p(mem_buf);
1185 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1186 env->gregs[n + 16] = tmp;
1188 env->gregs[n] = tmp;
1191 } else if (n < 16) {
1192 env->gregs[n] = tmp;
1194 } else if (n >= 25 && n < 41) {
1195 env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)] = tmp;
1197 } else if (n >= 43 && n < 51) {
1198 env->gregs[n - 43] = tmp;
1200 } else if (n >= 51 && n < 59) {
1201 env->gregs[n - (51 - 16)] = tmp;
1205 case 16: env->pc = tmp; break;
1206 case 17: env->pr = tmp; break;
1207 case 18: env->gbr = tmp; break;
1208 case 19: env->vbr = tmp; break;
1209 case 20: env->mach = tmp; break;
1210 case 21: env->macl = tmp; break;
1211 case 22: env->sr = tmp; break;
1212 case 23: env->fpul = tmp; break;
1213 case 24: env->fpscr = tmp; break;
1214 case 41: env->ssr = tmp; break;
1215 case 42: env->spc = tmp; break;
1221 #elif defined (TARGET_MICROBLAZE)
1223 #define NUM_CORE_REGS (32 + 5)
1225 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1228 GET_REG32(env->regs[n]);
1230 GET_REG32(env->sregs[n - 32]);
1235 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1239 if (n > NUM_CORE_REGS)
1242 tmp = ldl_p(mem_buf);
1247 env->sregs[n - 32] = tmp;
1251 #elif defined (TARGET_CRIS)
1253 #define NUM_CORE_REGS 49
1256 read_register_crisv10(CPUState *env, uint8_t *mem_buf, int n)
1259 GET_REG32(env->regs[n]);
1269 GET_REG8(env->pregs[n - 16]);
1272 GET_REG8(env->pregs[n - 16]);
1276 GET_REG16(env->pregs[n - 16]);
1280 GET_REG32(env->pregs[n - 16]);
1288 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1292 if (env->pregs[PR_VR] < 32)
1293 return read_register_crisv10(env, mem_buf, n);
1295 srs = env->pregs[PR_SRS];
1297 GET_REG32(env->regs[n]);
1300 if (n >= 21 && n < 32) {
1301 GET_REG32(env->pregs[n - 16]);
1303 if (n >= 33 && n < 49) {
1304 GET_REG32(env->sregs[srs][n - 33]);
1307 case 16: GET_REG8(env->pregs[0]);
1308 case 17: GET_REG8(env->pregs[1]);
1309 case 18: GET_REG32(env->pregs[2]);
1310 case 19: GET_REG8(srs);
1311 case 20: GET_REG16(env->pregs[4]);
1312 case 32: GET_REG32(env->pc);
1318 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1325 tmp = ldl_p(mem_buf);
1331 if (n >= 21 && n < 32) {
1332 env->pregs[n - 16] = tmp;
1335 /* FIXME: Should support function regs be writable? */
1339 case 18: env->pregs[PR_PID] = tmp; break;
1342 case 32: env->pc = tmp; break;
1347 #elif defined (TARGET_ALPHA)
1349 #define NUM_CORE_REGS 67
1351 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1361 d.d = env->fir[n - 32];
1365 val = cpu_alpha_load_fpcr(env);
1375 /* 31 really is the zero register; 65 is unassigned in the
1376 gdb protocol, but is still required to occupy 8 bytes. */
1385 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1387 target_ulong tmp = ldtul_p(mem_buf);
1396 env->fir[n - 32] = d.d;
1399 cpu_alpha_store_fpcr(env, tmp);
1409 /* 31 really is the zero register; 65 is unassigned in the
1410 gdb protocol, but is still required to occupy 8 bytes. */
1417 #elif defined (TARGET_S390X)
1419 #define NUM_CORE_REGS S390_NUM_TOTAL_REGS
1421 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1424 case S390_PSWM_REGNUM: GET_REGL(env->psw.mask); break;
1425 case S390_PSWA_REGNUM: GET_REGL(env->psw.addr); break;
1426 case S390_R0_REGNUM ... S390_R15_REGNUM:
1427 GET_REGL(env->regs[n-S390_R0_REGNUM]); break;
1428 case S390_A0_REGNUM ... S390_A15_REGNUM:
1429 GET_REG32(env->aregs[n-S390_A0_REGNUM]); break;
1430 case S390_FPC_REGNUM: GET_REG32(env->fpc); break;
1431 case S390_F0_REGNUM ... S390_F15_REGNUM:
1434 case S390_PC_REGNUM: GET_REGL(env->psw.addr); break;
1435 case S390_CC_REGNUM:
1436 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
1438 GET_REG32(env->cc_op);
1445 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1450 tmpl = ldtul_p(mem_buf);
1451 tmp32 = ldl_p(mem_buf);
1454 case S390_PSWM_REGNUM: env->psw.mask = tmpl; break;
1455 case S390_PSWA_REGNUM: env->psw.addr = tmpl; break;
1456 case S390_R0_REGNUM ... S390_R15_REGNUM:
1457 env->regs[n-S390_R0_REGNUM] = tmpl; break;
1458 case S390_A0_REGNUM ... S390_A15_REGNUM:
1459 env->aregs[n-S390_A0_REGNUM] = tmp32; r=4; break;
1460 case S390_FPC_REGNUM: env->fpc = tmp32; r=4; break;
1461 case S390_F0_REGNUM ... S390_F15_REGNUM:
1464 case S390_PC_REGNUM: env->psw.addr = tmpl; break;
1465 case S390_CC_REGNUM: env->cc_op = tmp32; r=4; break;
1470 #elif defined (TARGET_LM32)
1472 #include "hw/lm32_pic.h"
1473 #define NUM_CORE_REGS (32 + 7)
1475 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1478 GET_REG32(env->regs[n]);
1484 /* FIXME: put in right exception ID */
1489 GET_REG32(env->eba);
1492 GET_REG32(env->deba);
1498 GET_REG32(lm32_pic_get_im(env->pic_state));
1501 GET_REG32(lm32_pic_get_ip(env->pic_state));
1508 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1512 if (n > NUM_CORE_REGS) {
1516 tmp = ldl_p(mem_buf);
1535 lm32_pic_set_im(env->pic_state, tmp);
1538 lm32_pic_set_ip(env->pic_state, tmp);
1546 #define NUM_CORE_REGS 0
1548 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1553 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1560 static int num_g_regs = NUM_CORE_REGS;
1563 /* Encode data using the encoding for 'x' packets. */
1564 static int memtox(char *buf, const char *mem, int len)
1572 case '#': case '$': case '*': case '}':
1584 static const char *get_feature_xml(const char *p, const char **newp)
1589 static char target_xml[1024];
1592 while (p[len] && p[len] != ':')
1597 if (strncmp(p, "target.xml", len) == 0) {
1598 /* Generate the XML description for this CPU. */
1599 if (!target_xml[0]) {
1600 GDBRegisterState *r;
1602 snprintf(target_xml, sizeof(target_xml),
1603 "<?xml version=\"1.0\"?>"
1604 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1606 "<xi:include href=\"%s\"/>",
1609 for (r = first_cpu->gdb_regs; r; r = r->next) {
1610 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1611 pstrcat(target_xml, sizeof(target_xml), r->xml);
1612 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1614 pstrcat(target_xml, sizeof(target_xml), "</target>");
1618 for (i = 0; ; i++) {
1619 name = xml_builtin[i][0];
1620 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1623 return name ? xml_builtin[i][1] : NULL;
1627 static int gdb_read_register(CPUState *env, uint8_t *mem_buf, int reg)
1629 GDBRegisterState *r;
1631 if (reg < NUM_CORE_REGS)
1632 return cpu_gdb_read_register(env, mem_buf, reg);
1634 for (r = env->gdb_regs; r; r = r->next) {
1635 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1636 return r->get_reg(env, mem_buf, reg - r->base_reg);
1642 static int gdb_write_register(CPUState *env, uint8_t *mem_buf, int reg)
1644 GDBRegisterState *r;
1646 if (reg < NUM_CORE_REGS)
1647 return cpu_gdb_write_register(env, mem_buf, reg);
1649 for (r = env->gdb_regs; r; r = r->next) {
1650 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1651 return r->set_reg(env, mem_buf, reg - r->base_reg);
1657 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1658 specifies the first register number and these registers are included in
1659 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1660 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1663 void gdb_register_coprocessor(CPUState * env,
1664 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1665 int num_regs, const char *xml, int g_pos)
1667 GDBRegisterState *s;
1668 GDBRegisterState **p;
1669 static int last_reg = NUM_CORE_REGS;
1671 s = (GDBRegisterState *)g_malloc0(sizeof(GDBRegisterState));
1672 s->base_reg = last_reg;
1673 s->num_regs = num_regs;
1674 s->get_reg = get_reg;
1675 s->set_reg = set_reg;
1679 /* Check for duplicates. */
1680 if (strcmp((*p)->xml, xml) == 0)
1684 /* Add to end of list. */
1685 last_reg += num_regs;
1688 if (g_pos != s->base_reg) {
1689 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1690 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1692 num_g_regs = last_reg;
1697 #ifndef CONFIG_USER_ONLY
1698 static const int xlat_gdb_type[] = {
1699 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1700 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1701 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1705 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1711 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1714 case GDB_BREAKPOINT_SW:
1715 case GDB_BREAKPOINT_HW:
1716 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1717 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1722 #ifndef CONFIG_USER_ONLY
1723 case GDB_WATCHPOINT_WRITE:
1724 case GDB_WATCHPOINT_READ:
1725 case GDB_WATCHPOINT_ACCESS:
1726 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1727 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1739 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1745 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1748 case GDB_BREAKPOINT_SW:
1749 case GDB_BREAKPOINT_HW:
1750 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1751 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1756 #ifndef CONFIG_USER_ONLY
1757 case GDB_WATCHPOINT_WRITE:
1758 case GDB_WATCHPOINT_READ:
1759 case GDB_WATCHPOINT_ACCESS:
1760 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1761 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
1772 static void gdb_breakpoint_remove_all(void)
1776 if (kvm_enabled()) {
1777 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
1781 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1782 cpu_breakpoint_remove_all(env, BP_GDB);
1783 #ifndef CONFIG_USER_ONLY
1784 cpu_watchpoint_remove_all(env, BP_GDB);
1789 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
1791 #if defined(TARGET_I386)
1792 cpu_synchronize_state(s->c_cpu);
1794 #elif defined (TARGET_PPC)
1796 #elif defined (TARGET_SPARC)
1798 s->c_cpu->npc = pc + 4;
1799 #elif defined (TARGET_ARM)
1800 s->c_cpu->regs[15] = pc;
1801 #elif defined (TARGET_SH4)
1803 #elif defined (TARGET_MIPS)
1804 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
1806 s->c_cpu->hflags |= MIPS_HFLAG_M16;
1808 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
1810 #elif defined (TARGET_MICROBLAZE)
1811 s->c_cpu->sregs[SR_PC] = pc;
1812 #elif defined (TARGET_CRIS)
1814 #elif defined (TARGET_ALPHA)
1816 #elif defined (TARGET_S390X)
1817 cpu_synchronize_state(s->c_cpu);
1818 s->c_cpu->psw.addr = pc;
1819 #elif defined (TARGET_LM32)
1824 static inline int gdb_id(CPUState *env)
1826 #if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
1827 return env->host_tid;
1829 return env->cpu_index + 1;
1833 static CPUState *find_cpu(uint32_t thread_id)
1837 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1838 if (gdb_id(env) == thread_id) {
1846 static int gdb_handle_packet(GDBState *s, const char *line_buf)
1851 int ch, reg_size, type, res;
1852 char buf[MAX_PACKET_LENGTH];
1853 uint8_t mem_buf[MAX_PACKET_LENGTH];
1855 target_ulong addr, len;
1858 printf("command='%s'\n", line_buf);
1864 /* TODO: Make this return the correct value for user-mode. */
1865 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
1868 /* Remove all the breakpoints when this query is issued,
1869 * because gdb is doing and initial connect and the state
1870 * should be cleaned up.
1872 gdb_breakpoint_remove_all();
1876 addr = strtoull(p, (char **)&p, 16);
1877 gdb_set_cpu_pc(s, addr);
1883 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
1884 if (s->signal == -1)
1889 if (strncmp(p, "Cont", 4) == 0) {
1890 int res_signal, res_thread;
1894 put_packet(s, "vCont;c;C;s;S");
1909 if (action == 'C' || action == 'S') {
1910 signal = strtoul(p, (char **)&p, 16);
1911 } else if (action != 'c' && action != 's') {
1917 thread = strtoull(p+1, (char **)&p, 16);
1919 action = tolower(action);
1920 if (res == 0 || (res == 'c' && action == 's')) {
1922 res_signal = signal;
1923 res_thread = thread;
1927 if (res_thread != -1 && res_thread != 0) {
1928 env = find_cpu(res_thread);
1930 put_packet(s, "E22");
1936 cpu_single_step(s->c_cpu, sstep_flags);
1938 s->signal = res_signal;
1944 goto unknown_command;
1947 /* Kill the target */
1948 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
1952 gdb_breakpoint_remove_all();
1953 gdb_syscall_mode = GDB_SYS_DISABLED;
1955 put_packet(s, "OK");
1959 addr = strtoull(p, (char **)&p, 16);
1960 gdb_set_cpu_pc(s, addr);
1962 cpu_single_step(s->c_cpu, sstep_flags);
1970 ret = strtoull(p, (char **)&p, 16);
1973 err = strtoull(p, (char **)&p, 16);
1980 if (gdb_current_syscall_cb)
1981 gdb_current_syscall_cb(s->c_cpu, ret, err);
1983 put_packet(s, "T02");
1990 cpu_synchronize_state(s->g_cpu);
1992 for (addr = 0; addr < num_g_regs; addr++) {
1993 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
1996 memtohex(buf, mem_buf, len);
2000 cpu_synchronize_state(s->g_cpu);
2001 registers = mem_buf;
2002 len = strlen(p) / 2;
2003 hextomem((uint8_t *)registers, p, len);
2004 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2005 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2007 registers += reg_size;
2009 put_packet(s, "OK");
2012 addr = strtoull(p, (char **)&p, 16);
2015 len = strtoull(p, NULL, 16);
2016 if (cpu_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
2017 put_packet (s, "E14");
2019 memtohex(buf, mem_buf, len);
2024 addr = strtoull(p, (char **)&p, 16);
2027 len = strtoull(p, (char **)&p, 16);
2030 hextomem(mem_buf, p, len);
2031 if (cpu_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0)
2032 put_packet(s, "E14");
2034 put_packet(s, "OK");
2037 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2038 This works, but can be very slow. Anything new enough to
2039 understand XML also knows how to use this properly. */
2041 goto unknown_command;
2042 addr = strtoull(p, (char **)&p, 16);
2043 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2045 memtohex(buf, mem_buf, reg_size);
2048 put_packet(s, "E14");
2053 goto unknown_command;
2054 addr = strtoull(p, (char **)&p, 16);
2057 reg_size = strlen(p) / 2;
2058 hextomem(mem_buf, p, reg_size);
2059 gdb_write_register(s->g_cpu, mem_buf, addr);
2060 put_packet(s, "OK");
2064 type = strtoul(p, (char **)&p, 16);
2067 addr = strtoull(p, (char **)&p, 16);
2070 len = strtoull(p, (char **)&p, 16);
2072 res = gdb_breakpoint_insert(addr, len, type);
2074 res = gdb_breakpoint_remove(addr, len, type);
2076 put_packet(s, "OK");
2077 else if (res == -ENOSYS)
2080 put_packet(s, "E22");
2084 thread = strtoull(p, (char **)&p, 16);
2085 if (thread == -1 || thread == 0) {
2086 put_packet(s, "OK");
2089 env = find_cpu(thread);
2091 put_packet(s, "E22");
2097 put_packet(s, "OK");
2101 put_packet(s, "OK");
2104 put_packet(s, "E22");
2109 thread = strtoull(p, (char **)&p, 16);
2110 env = find_cpu(thread);
2113 put_packet(s, "OK");
2115 put_packet(s, "E22");
2120 /* parse any 'q' packets here */
2121 if (!strcmp(p,"qemu.sstepbits")) {
2122 /* Query Breakpoint bit definitions */
2123 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2129 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2130 /* Display or change the sstep_flags */
2133 /* Display current setting */
2134 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2139 type = strtoul(p, (char **)&p, 16);
2141 put_packet(s, "OK");
2143 } else if (strcmp(p,"C") == 0) {
2144 /* "Current thread" remains vague in the spec, so always return
2145 * the first CPU (gdb returns the first thread). */
2146 put_packet(s, "QC1");
2148 } else if (strcmp(p,"fThreadInfo") == 0) {
2149 s->query_cpu = first_cpu;
2150 goto report_cpuinfo;
2151 } else if (strcmp(p,"sThreadInfo") == 0) {
2154 snprintf(buf, sizeof(buf), "m%x", gdb_id(s->query_cpu));
2156 s->query_cpu = s->query_cpu->next_cpu;
2160 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2161 thread = strtoull(p+16, (char **)&p, 16);
2162 env = find_cpu(thread);
2164 cpu_synchronize_state(env);
2165 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2166 "CPU#%d [%s]", env->cpu_index,
2167 env->halted ? "halted " : "running");
2168 memtohex(buf, mem_buf, len);
2173 #ifdef CONFIG_USER_ONLY
2174 else if (strncmp(p, "Offsets", 7) == 0) {
2175 TaskState *ts = s->c_cpu->opaque;
2177 snprintf(buf, sizeof(buf),
2178 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2179 ";Bss=" TARGET_ABI_FMT_lx,
2180 ts->info->code_offset,
2181 ts->info->data_offset,
2182 ts->info->data_offset);
2186 #else /* !CONFIG_USER_ONLY */
2187 else if (strncmp(p, "Rcmd,", 5) == 0) {
2188 int len = strlen(p + 5);
2190 if ((len % 2) != 0) {
2191 put_packet(s, "E01");
2194 hextomem(mem_buf, p + 5, len);
2197 qemu_chr_read(s->mon_chr, mem_buf, len);
2198 put_packet(s, "OK");
2201 #endif /* !CONFIG_USER_ONLY */
2202 if (strncmp(p, "Supported", 9) == 0) {
2203 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2205 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2211 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2213 target_ulong total_len;
2217 xml = get_feature_xml(p, &p);
2219 snprintf(buf, sizeof(buf), "E00");
2226 addr = strtoul(p, (char **)&p, 16);
2229 len = strtoul(p, (char **)&p, 16);
2231 total_len = strlen(xml);
2232 if (addr > total_len) {
2233 snprintf(buf, sizeof(buf), "E00");
2237 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2238 len = (MAX_PACKET_LENGTH - 5) / 2;
2239 if (len < total_len - addr) {
2241 len = memtox(buf + 1, xml + addr, len);
2244 len = memtox(buf + 1, xml + addr, total_len - addr);
2246 put_packet_binary(s, buf, len + 1);
2250 /* Unrecognised 'q' command. */
2251 goto unknown_command;
2255 /* put empty packet */
2263 void gdb_set_stop_cpu(CPUState *env)
2265 gdbserver_state->c_cpu = env;
2266 gdbserver_state->g_cpu = env;
2269 #ifndef CONFIG_USER_ONLY
2270 static void gdb_vm_state_change(void *opaque, int running, int reason)
2272 GDBState *s = gdbserver_state;
2273 CPUState *env = s->c_cpu;
2278 if (running || s->state == RS_INACTIVE || s->state == RS_SYSCALL) {
2283 if (env->watchpoint_hit) {
2284 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2295 snprintf(buf, sizeof(buf),
2296 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2297 GDB_SIGNAL_TRAP, gdb_id(env), type,
2298 env->watchpoint_hit->vaddr);
2299 env->watchpoint_hit = NULL;
2303 ret = GDB_SIGNAL_TRAP;
2306 ret = GDB_SIGNAL_INT;
2308 case VMSTOP_SHUTDOWN:
2309 ret = GDB_SIGNAL_QUIT;
2311 case VMSTOP_DISKFULL:
2312 ret = GDB_SIGNAL_IO;
2314 case VMSTOP_WATCHDOG:
2315 ret = GDB_SIGNAL_ALRM;
2318 ret = GDB_SIGNAL_ABRT;
2323 case VMSTOP_MIGRATE:
2324 ret = GDB_SIGNAL_XCPU;
2327 ret = GDB_SIGNAL_UNKNOWN;
2330 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, gdb_id(env));
2335 /* disable single step if it was enabled */
2336 cpu_single_step(env, 0);
2340 /* Send a gdb syscall request.
2341 This accepts limited printf-style format specifiers, specifically:
2342 %x - target_ulong argument printed in hex.
2343 %lx - 64-bit argument printed in hex.
2344 %s - string pointer (target_ulong) and length (int) pair. */
2345 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2354 s = gdbserver_state;
2357 gdb_current_syscall_cb = cb;
2358 s->state = RS_SYSCALL;
2359 #ifndef CONFIG_USER_ONLY
2360 vm_stop(VMSTOP_DEBUG);
2371 addr = va_arg(va, target_ulong);
2372 p += snprintf(p, &buf[sizeof(buf)] - p, TARGET_FMT_lx, addr);
2375 if (*(fmt++) != 'x')
2377 i64 = va_arg(va, uint64_t);
2378 p += snprintf(p, &buf[sizeof(buf)] - p, "%" PRIx64, i64);
2381 addr = va_arg(va, target_ulong);
2382 p += snprintf(p, &buf[sizeof(buf)] - p, TARGET_FMT_lx "/%x",
2383 addr, va_arg(va, int));
2387 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2398 #ifdef CONFIG_USER_ONLY
2399 gdb_handlesig(s->c_cpu, 0);
2405 static void gdb_read_byte(GDBState *s, int ch)
2410 #ifndef CONFIG_USER_ONLY
2411 if (s->last_packet_len) {
2412 /* Waiting for a response to the last packet. If we see the start
2413 of a new command then abandon the previous response. */
2416 printf("Got NACK, retransmitting\n");
2418 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2422 printf("Got ACK\n");
2424 printf("Got '%c' when expecting ACK/NACK\n", ch);
2426 if (ch == '+' || ch == '$')
2427 s->last_packet_len = 0;
2432 /* when the CPU is running, we cannot do anything except stop
2433 it when receiving a char */
2434 vm_stop(VMSTOP_USER);
2441 s->line_buf_index = 0;
2442 s->state = RS_GETLINE;
2447 s->state = RS_CHKSUM1;
2448 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2451 s->line_buf[s->line_buf_index++] = ch;
2455 s->line_buf[s->line_buf_index] = '\0';
2456 s->line_csum = fromhex(ch) << 4;
2457 s->state = RS_CHKSUM2;
2460 s->line_csum |= fromhex(ch);
2462 for(i = 0; i < s->line_buf_index; i++) {
2463 csum += s->line_buf[i];
2465 if (s->line_csum != (csum & 0xff)) {
2467 put_buffer(s, &reply, 1);
2471 put_buffer(s, &reply, 1);
2472 s->state = gdb_handle_packet(s, s->line_buf);
2481 /* Tell the remote gdb that the process has exited. */
2482 void gdb_exit(CPUState *env, int code)
2487 s = gdbserver_state;
2491 #ifdef CONFIG_USER_ONLY
2492 if (gdbserver_fd < 0 || s->fd < 0) {
2497 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2500 #ifndef CONFIG_USER_ONLY
2502 qemu_chr_close(s->chr);
2507 #ifdef CONFIG_USER_ONLY
2513 s = gdbserver_state;
2515 if (gdbserver_fd < 0 || s->fd < 0)
2522 gdb_handlesig (CPUState *env, int sig)
2528 s = gdbserver_state;
2529 if (gdbserver_fd < 0 || s->fd < 0)
2532 /* disable single step if it was enabled */
2533 cpu_single_step(env, 0);
2538 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2541 /* put_packet() might have detected that the peer terminated the
2548 s->running_state = 0;
2549 while (s->running_state == 0) {
2550 n = read (s->fd, buf, 256);
2555 for (i = 0; i < n; i++)
2556 gdb_read_byte (s, buf[i]);
2558 else if (n == 0 || errno != EAGAIN)
2560 /* XXX: Connection closed. Should probably wait for annother
2561 connection before continuing. */
2570 /* Tell the remote gdb that the process has exited due to SIG. */
2571 void gdb_signalled(CPUState *env, int sig)
2576 s = gdbserver_state;
2577 if (gdbserver_fd < 0 || s->fd < 0)
2580 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2584 static void gdb_accept(void)
2587 struct sockaddr_in sockaddr;
2592 len = sizeof(sockaddr);
2593 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2594 if (fd < 0 && errno != EINTR) {
2597 } else if (fd >= 0) {
2599 fcntl(fd, F_SETFD, FD_CLOEXEC);
2605 /* set short latency */
2607 setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *)&val, sizeof(val));
2609 s = g_malloc0(sizeof(GDBState));
2610 s->c_cpu = first_cpu;
2611 s->g_cpu = first_cpu;
2615 gdbserver_state = s;
2617 fcntl(fd, F_SETFL, O_NONBLOCK);
2620 static int gdbserver_open(int port)
2622 struct sockaddr_in sockaddr;
2625 fd = socket(PF_INET, SOCK_STREAM, 0);
2631 fcntl(fd, F_SETFD, FD_CLOEXEC);
2634 /* allow fast reuse */
2636 setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val));
2638 sockaddr.sin_family = AF_INET;
2639 sockaddr.sin_port = htons(port);
2640 sockaddr.sin_addr.s_addr = 0;
2641 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2646 ret = listen(fd, 0);
2654 int gdbserver_start(int port)
2656 gdbserver_fd = gdbserver_open(port);
2657 if (gdbserver_fd < 0)
2659 /* accept connections */
2664 /* Disable gdb stub for child processes. */
2665 void gdbserver_fork(CPUState *env)
2667 GDBState *s = gdbserver_state;
2668 if (gdbserver_fd < 0 || s->fd < 0)
2672 cpu_breakpoint_remove_all(env, BP_GDB);
2673 cpu_watchpoint_remove_all(env, BP_GDB);
2676 static int gdb_chr_can_receive(void *opaque)
2678 /* We can handle an arbitrarily large amount of data.
2679 Pick the maximum packet size, which is as good as anything. */
2680 return MAX_PACKET_LENGTH;
2683 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2687 for (i = 0; i < size; i++) {
2688 gdb_read_byte(gdbserver_state, buf[i]);
2692 static void gdb_chr_event(void *opaque, int event)
2695 case CHR_EVENT_OPENED:
2696 vm_stop(VMSTOP_USER);
2704 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2706 char buf[MAX_PACKET_LENGTH];
2709 if (len > (MAX_PACKET_LENGTH/2) - 1)
2710 len = (MAX_PACKET_LENGTH/2) - 1;
2711 memtohex(buf + 1, (uint8_t *)msg, len);
2715 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2717 const char *p = (const char *)buf;
2720 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2722 if (len <= max_sz) {
2723 gdb_monitor_output(gdbserver_state, p, len);
2726 gdb_monitor_output(gdbserver_state, p, max_sz);
2734 static void gdb_sigterm_handler(int signal)
2737 vm_stop(VMSTOP_USER);
2742 int gdbserver_start(const char *device)
2745 char gdbstub_device_name[128];
2746 CharDriverState *chr = NULL;
2747 CharDriverState *mon_chr;
2751 if (strcmp(device, "none") != 0) {
2752 if (strstart(device, "tcp:", NULL)) {
2753 /* enforce required TCP attributes */
2754 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
2755 "%s,nowait,nodelay,server", device);
2756 device = gdbstub_device_name;
2759 else if (strcmp(device, "stdio") == 0) {
2760 struct sigaction act;
2762 memset(&act, 0, sizeof(act));
2763 act.sa_handler = gdb_sigterm_handler;
2764 sigaction(SIGINT, &act, NULL);
2767 chr = qemu_chr_open("gdb", device, NULL);
2771 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
2772 gdb_chr_event, NULL);
2775 s = gdbserver_state;
2777 s = g_malloc0(sizeof(GDBState));
2778 gdbserver_state = s;
2780 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
2782 /* Initialize a monitor terminal for gdb */
2783 mon_chr = g_malloc0(sizeof(*mon_chr));
2784 mon_chr->chr_write = gdb_monitor_write;
2785 monitor_init(mon_chr, 0);
2788 qemu_chr_close(s->chr);
2789 mon_chr = s->mon_chr;
2790 memset(s, 0, sizeof(GDBState));
2792 s->c_cpu = first_cpu;
2793 s->g_cpu = first_cpu;
2795 s->state = chr ? RS_IDLE : RS_INACTIVE;
2796 s->mon_chr = mon_chr;