4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
32 #include "monitor/monitor.h"
33 #include "sysemu/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
38 #define MAX_PACKET_LENGTH 4096
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
45 static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
46 uint8_t *buf, int len, bool is_write)
48 CPUClass *cc = CPU_GET_CLASS(cpu);
50 if (cc->memory_rw_debug) {
51 return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
53 return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
65 GDB_SIGNAL_UNKNOWN = 143
68 #ifdef CONFIG_USER_ONLY
70 /* Map target signal numbers to GDB protocol signal numbers and vice
71 * versa. For user emulation's currently supported systems, we can
72 * assume most signals are defined.
75 static int gdb_signal_table[] = {
235 /* In system mode we only need SIGINT and SIGTRAP; other signals
236 are not yet supported. */
243 static int gdb_signal_table[] = {
253 #ifdef CONFIG_USER_ONLY
254 static int target_signal_to_gdb (int sig)
257 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
258 if (gdb_signal_table[i] == sig)
260 return GDB_SIGNAL_UNKNOWN;
264 static int gdb_signal_to_target (int sig)
266 if (sig < ARRAY_SIZE (gdb_signal_table))
267 return gdb_signal_table[sig];
274 typedef struct GDBRegisterState {
280 struct GDBRegisterState *next;
290 typedef struct GDBState {
291 CPUState *c_cpu; /* current CPU for step/continue ops */
292 CPUState *g_cpu; /* current CPU for other ops */
293 CPUState *query_cpu; /* for q{f|s}ThreadInfo */
294 enum RSState state; /* parsing state */
295 char line_buf[MAX_PACKET_LENGTH];
298 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
301 #ifdef CONFIG_USER_ONLY
305 CharDriverState *chr;
306 CharDriverState *mon_chr;
308 char syscall_buf[256];
309 gdb_syscall_complete_cb current_syscall_cb;
312 /* By default use no IRQs and no timers while single stepping so as to
313 * make single stepping like an ICE HW step.
315 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
317 static GDBState *gdbserver_state;
319 /* This is an ugly hack to cope with both new and old gdb.
320 If gdb sends qXfer:features:read then assume we're talking to a newish
321 gdb that understands target descriptions. */
322 static int gdb_has_xml;
324 #ifdef CONFIG_USER_ONLY
325 /* XXX: This is not thread safe. Do we care? */
326 static int gdbserver_fd = -1;
328 static int get_char(GDBState *s)
334 ret = qemu_recv(s->fd, &ch, 1, 0);
336 if (errno == ECONNRESET)
338 if (errno != EINTR && errno != EAGAIN)
340 } else if (ret == 0) {
358 /* If gdb is connected when the first semihosting syscall occurs then use
359 remote gdb syscalls. Otherwise use native file IO. */
360 int use_gdb_syscalls(void)
362 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
363 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
366 return gdb_syscall_mode == GDB_SYS_ENABLED;
369 /* Resume execution. */
370 static inline void gdb_continue(GDBState *s)
372 #ifdef CONFIG_USER_ONLY
373 s->running_state = 1;
375 if (runstate_check(RUN_STATE_GUEST_PANICKED)) {
376 runstate_set(RUN_STATE_DEBUG);
378 if (!runstate_needs_reset()) {
384 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
386 #ifdef CONFIG_USER_ONLY
390 ret = send(s->fd, buf, len, 0);
392 if (errno != EINTR && errno != EAGAIN)
400 qemu_chr_fe_write(s->chr, buf, len);
404 static inline int fromhex(int v)
406 if (v >= '0' && v <= '9')
408 else if (v >= 'A' && v <= 'F')
410 else if (v >= 'a' && v <= 'f')
416 static inline int tohex(int v)
424 static void memtohex(char *buf, const uint8_t *mem, int len)
429 for(i = 0; i < len; i++) {
431 *q++ = tohex(c >> 4);
432 *q++ = tohex(c & 0xf);
437 static void hextomem(uint8_t *mem, const char *buf, int len)
441 for(i = 0; i < len; i++) {
442 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
447 /* return -1 if error, 0 if OK */
448 static int put_packet_binary(GDBState *s, const char *buf, int len)
459 for(i = 0; i < len; i++) {
463 *(p++) = tohex((csum >> 4) & 0xf);
464 *(p++) = tohex((csum) & 0xf);
466 s->last_packet_len = p - s->last_packet;
467 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
469 #ifdef CONFIG_USER_ONLY
482 /* return -1 if error, 0 if OK */
483 static int put_packet(GDBState *s, const char *buf)
486 printf("reply='%s'\n", buf);
489 return put_packet_binary(s, buf, strlen(buf));
492 /* The GDB remote protocol transfers values in target byte order. This means
493 we can use the raw memory access routines to access the value buffer.
494 Conveniently, these also handle the case where the buffer is mis-aligned.
496 #define GET_REG8(val) do { \
497 stb_p(mem_buf, val); \
500 #define GET_REG16(val) do { \
501 stw_p(mem_buf, val); \
504 #define GET_REG32(val) do { \
505 stl_p(mem_buf, val); \
508 #define GET_REG64(val) do { \
509 stq_p(mem_buf, val); \
513 #if TARGET_LONG_BITS == 64
514 #define GET_REGL(val) GET_REG64(val)
515 #define ldtul_p(addr) ldq_p(addr)
517 #define GET_REGL(val) GET_REG32(val)
518 #define ldtul_p(addr) ldl_p(addr)
521 #if defined(TARGET_I386)
523 #include "target-i386/gdbstub.c"
525 #elif defined (TARGET_PPC)
527 #if defined (TARGET_PPC64)
528 #define GDB_CORE_XML "power64-core.xml"
530 #define GDB_CORE_XML "power-core.xml"
533 #include "target-ppc/gdbstub.c"
535 #elif defined (TARGET_SPARC)
538 #define GET_REGA(val) GET_REG32(val)
540 #define GET_REGA(val) GET_REGL(val)
543 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
547 GET_REGA(env->gregs[n]);
550 /* register window */
551 GET_REGA(env->regwptr[n - 8]);
553 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
557 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
559 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
562 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
567 GET_REGA(cpu_get_psr(env));
579 GET_REGA(0); /* csr */
587 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
589 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
593 /* f32-f62 (double width, even numbers only) */
594 GET_REG64(env->fpr[(n - 32) / 2].ll);
602 GET_REGL((cpu_get_ccr(env) << 32) |
603 ((env->asi & 0xff) << 24) |
604 ((env->pstate & 0xfff) << 8) |
617 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
619 #if defined(TARGET_ABI32)
622 tmp = ldl_p(mem_buf);
626 tmp = ldtul_p(mem_buf);
633 /* register window */
634 env->regwptr[n - 8] = tmp;
636 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
641 env->fpr[(n - 32) / 2].l.lower = tmp;
643 env->fpr[(n - 32) / 2].l.upper = tmp;
646 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
652 cpu_put_psr(env, tmp);
677 tmp = ldl_p(mem_buf);
679 env->fpr[(n - 32) / 2].l.lower = tmp;
681 env->fpr[(n - 32) / 2].l.upper = tmp;
685 /* f32-f62 (double width, even numbers only) */
686 env->fpr[(n - 32) / 2].ll = tmp;
696 cpu_put_ccr(env, tmp >> 32);
697 env->asi = (tmp >> 24) & 0xff;
698 env->pstate = (tmp >> 8) & 0xfff;
699 cpu_put_cwp64(env, tmp & 0xff);
717 #elif defined (TARGET_ARM)
719 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
720 whatever the target description contains. Due to a historical mishap
721 the FPA registers appear in between core integer regs and the CPSR.
722 We hack round this by giving the FPA regs zero size when talking to a
724 #define GDB_CORE_XML "arm-core.xml"
726 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
729 /* Core integer register. */
730 GET_REG32(env->regs[n]);
737 memset(mem_buf, 0, 12);
742 /* FPA status register. */
749 GET_REG32(cpsr_read(env));
751 /* Unknown register. */
755 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
759 tmp = ldl_p(mem_buf);
761 /* Mask out low bit of PC to workaround gdb bugs. This will probably
762 cause problems if we ever implement the Jazelle DBX extensions. */
768 /* Core integer register. */
772 if (n < 24) { /* 16-23 */
773 /* FPA registers (ignored). */
781 /* FPA status register (ignored). */
788 cpsr_write(env, tmp, 0xffffffff);
791 /* Unknown register. */
795 #elif defined (TARGET_M68K)
797 #define GDB_CORE_XML "cf-core.xml"
799 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
803 GET_REG32(env->dregs[n]);
806 GET_REG32(env->aregs[n - 8]);
815 /* FP registers not included here because they vary between
816 ColdFire and m68k. Use XML bits for these. */
820 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
824 tmp = ldl_p(mem_buf);
831 env->aregs[n - 8] = tmp;
846 #elif defined (TARGET_MIPS)
848 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
851 GET_REGL(env->active_tc.gpr[n]);
853 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
854 if (n >= 38 && n < 70) {
855 if (env->CP0_Status & (1 << CP0St_FR)) {
856 GET_REGL(env->active_fpu.fpr[n - 38].d);
858 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
863 GET_REGL((int32_t)env->active_fpu.fcr31);
865 GET_REGL((int32_t)env->active_fpu.fcr0);
870 GET_REGL((int32_t)env->CP0_Status);
872 GET_REGL(env->active_tc.LO[0]);
874 GET_REGL(env->active_tc.HI[0]);
876 GET_REGL(env->CP0_BadVAddr);
878 GET_REGL((int32_t)env->CP0_Cause);
880 GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
882 GET_REGL(0); /* fp */
884 GET_REGL((int32_t)env->CP0_PRid);
886 if (n >= 73 && n <= 88) {
887 /* 16 embedded regs. */
894 /* convert MIPS rounding mode in FCR31 to IEEE library */
895 static unsigned int ieee_rm[] = {
896 float_round_nearest_even,
901 #define RESTORE_ROUNDING_MODE \
902 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], \
903 &env->active_fpu.fp_status)
905 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
909 tmp = ldtul_p(mem_buf);
912 env->active_tc.gpr[n] = tmp;
913 return sizeof(target_ulong);
915 if (env->CP0_Config1 & (1 << CP0C1_FP)
916 && n >= 38 && n < 73) {
918 if (env->CP0_Status & (1 << CP0St_FR)) {
919 env->active_fpu.fpr[n - 38].d = tmp;
921 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
926 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
927 /* set rounding mode */
928 RESTORE_ROUNDING_MODE;
931 env->active_fpu.fcr0 = tmp;
934 return sizeof(target_ulong);
938 env->CP0_Status = tmp;
941 env->active_tc.LO[0] = tmp;
944 env->active_tc.HI[0] = tmp;
947 env->CP0_BadVAddr = tmp;
950 env->CP0_Cause = tmp;
953 env->active_tc.PC = tmp & ~(target_ulong)1;
955 env->hflags |= MIPS_HFLAG_M16;
957 env->hflags &= ~(MIPS_HFLAG_M16);
960 case 72: /* fp, ignored */
966 /* Other registers are readonly. Ignore writes. */
970 return sizeof(target_ulong);
972 #elif defined(TARGET_OPENRISC)
974 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
977 GET_REG32(env->gpr[n]);
996 static int cpu_gdb_write_register(CPUOpenRISCState *env,
997 uint8_t *mem_buf, int n)
999 OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
1000 CPUClass *cc = CPU_GET_CLASS(cpu);
1003 if (n > cc->gdb_num_core_regs) {
1007 tmp = ldl_p(mem_buf);
1031 #elif defined (TARGET_SH4)
1033 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1034 /* FIXME: We should use XML for this. */
1036 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1040 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1041 GET_REGL(env->gregs[n + 16]);
1043 GET_REGL(env->gregs[n]);
1046 GET_REGL(env->gregs[n]);
1056 GET_REGL(env->mach);
1058 GET_REGL(env->macl);
1062 GET_REGL(env->fpul);
1064 GET_REGL(env->fpscr);
1066 if (env->fpscr & FPSCR_FR) {
1067 stfl_p(mem_buf, env->fregs[n - 9]);
1069 stfl_p(mem_buf, env->fregs[n - 25]);
1077 GET_REGL(env->gregs[n - 43]);
1079 GET_REGL(env->gregs[n - (51 - 16)]);
1085 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1089 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1090 env->gregs[n + 16] = ldl_p(mem_buf);
1092 env->gregs[n] = ldl_p(mem_buf);
1096 env->gregs[n] = ldl_p(mem_buf);
1099 env->pc = ldl_p(mem_buf);
1102 env->pr = ldl_p(mem_buf);
1105 env->gbr = ldl_p(mem_buf);
1108 env->vbr = ldl_p(mem_buf);
1111 env->mach = ldl_p(mem_buf);
1114 env->macl = ldl_p(mem_buf);
1117 env->sr = ldl_p(mem_buf);
1120 env->fpul = ldl_p(mem_buf);
1123 env->fpscr = ldl_p(mem_buf);
1126 if (env->fpscr & FPSCR_FR) {
1127 env->fregs[n - 9] = ldfl_p(mem_buf);
1129 env->fregs[n - 25] = ldfl_p(mem_buf);
1133 env->ssr = ldl_p(mem_buf);
1136 env->spc = ldl_p(mem_buf);
1139 env->gregs[n - 43] = ldl_p(mem_buf);
1142 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1150 #elif defined (TARGET_MICROBLAZE)
1152 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1155 GET_REG32(env->regs[n]);
1157 GET_REG32(env->sregs[n - 32]);
1162 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1164 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1165 CPUClass *cc = CPU_GET_CLASS(cpu);
1168 if (n > cc->gdb_num_core_regs) {
1172 tmp = ldl_p(mem_buf);
1177 env->sregs[n - 32] = tmp;
1181 #elif defined (TARGET_CRIS)
1184 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1187 GET_REG32(env->regs[n]);
1197 GET_REG8(env->pregs[n - 16]);
1199 GET_REG8(env->pregs[n - 16]);
1202 GET_REG16(env->pregs[n - 16]);
1205 GET_REG32(env->pregs[n - 16]);
1213 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1217 if (env->pregs[PR_VR] < 32) {
1218 return read_register_crisv10(env, mem_buf, n);
1221 srs = env->pregs[PR_SRS];
1223 GET_REG32(env->regs[n]);
1226 if (n >= 21 && n < 32) {
1227 GET_REG32(env->pregs[n - 16]);
1229 if (n >= 33 && n < 49) {
1230 GET_REG32(env->sregs[srs][n - 33]);
1234 GET_REG8(env->pregs[0]);
1236 GET_REG8(env->pregs[1]);
1238 GET_REG32(env->pregs[2]);
1242 GET_REG16(env->pregs[4]);
1250 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1258 tmp = ldl_p(mem_buf);
1264 if (n >= 21 && n < 32) {
1265 env->pregs[n - 16] = tmp;
1268 /* FIXME: Should support function regs be writable? */
1275 env->pregs[PR_PID] = tmp;
1288 #elif defined (TARGET_ALPHA)
1290 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1300 d.d = env->fir[n - 32];
1304 val = cpu_alpha_load_fpcr(env);
1314 /* 31 really is the zero register; 65 is unassigned in the
1315 gdb protocol, but is still required to occupy 8 bytes. */
1324 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1326 target_ulong tmp = ldtul_p(mem_buf);
1335 env->fir[n - 32] = d.d;
1338 cpu_alpha_store_fpcr(env, tmp);
1348 /* 31 really is the zero register; 65 is unassigned in the
1349 gdb protocol, but is still required to occupy 8 bytes. */
1356 #elif defined (TARGET_S390X)
1358 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1364 case S390_PSWM_REGNUM:
1365 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1366 val = deposit64(env->psw.mask, 44, 2, cc_op);
1368 case S390_PSWA_REGNUM:
1369 GET_REGL(env->psw.addr);
1370 case S390_R0_REGNUM ... S390_R15_REGNUM:
1371 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1372 case S390_A0_REGNUM ... S390_A15_REGNUM:
1373 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1374 case S390_FPC_REGNUM:
1375 GET_REG32(env->fpc);
1376 case S390_F0_REGNUM ... S390_F15_REGNUM:
1377 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1383 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1388 tmpl = ldtul_p(mem_buf);
1389 tmp32 = ldl_p(mem_buf);
1392 case S390_PSWM_REGNUM:
1393 env->psw.mask = tmpl;
1394 env->cc_op = extract64(tmpl, 44, 2);
1396 case S390_PSWA_REGNUM:
1397 env->psw.addr = tmpl;
1399 case S390_R0_REGNUM ... S390_R15_REGNUM:
1400 env->regs[n-S390_R0_REGNUM] = tmpl;
1402 case S390_A0_REGNUM ... S390_A15_REGNUM:
1403 env->aregs[n-S390_A0_REGNUM] = tmp32;
1406 case S390_FPC_REGNUM:
1410 case S390_F0_REGNUM ... S390_F15_REGNUM:
1411 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1418 #elif defined (TARGET_LM32)
1420 #include "hw/lm32/lm32_pic.h"
1422 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1425 GET_REG32(env->regs[n]);
1430 /* FIXME: put in right exception ID */
1434 GET_REG32(env->eba);
1436 GET_REG32(env->deba);
1440 GET_REG32(lm32_pic_get_im(env->pic_state));
1442 GET_REG32(lm32_pic_get_ip(env->pic_state));
1448 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1450 LM32CPU *cpu = lm32_env_get_cpu(env);
1451 CPUClass *cc = CPU_GET_CLASS(cpu);
1454 if (n > cc->gdb_num_core_regs) {
1458 tmp = ldl_p(mem_buf);
1477 lm32_pic_set_im(env->pic_state, tmp);
1480 lm32_pic_set_ip(env->pic_state, tmp);
1486 #elif defined(TARGET_XTENSA)
1488 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1490 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1492 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1496 switch (reg->type) {
1501 xtensa_sync_phys_from_window(env);
1502 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1505 GET_REG32(env->sregs[reg->targno & 0xff]);
1508 GET_REG32(env->uregs[reg->targno & 0xff]);
1511 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1514 GET_REG32(env->regs[reg->targno & 0x0f]);
1517 qemu_log("%s from reg %d of unsupported type %d\n",
1518 __func__, n, reg->type);
1523 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1526 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1528 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1532 tmp = ldl_p(mem_buf);
1534 switch (reg->type) {
1540 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1541 xtensa_sync_window_from_phys(env);
1545 env->sregs[reg->targno & 0xff] = tmp;
1549 env->uregs[reg->targno & 0xff] = tmp;
1553 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1557 env->regs[reg->targno & 0x0f] = tmp;
1561 qemu_log("%s to reg %d of unsupported type %d\n",
1562 __func__, n, reg->type);
1570 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1575 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1583 /* Encode data using the encoding for 'x' packets. */
1584 static int memtox(char *buf, const char *mem, int len)
1592 case '#': case '$': case '*': case '}':
1604 static const char *get_feature_xml(const char *p, const char **newp)
1609 static char target_xml[1024];
1612 while (p[len] && p[len] != ':')
1617 if (strncmp(p, "target.xml", len) == 0) {
1618 /* Generate the XML description for this CPU. */
1619 if (!target_xml[0]) {
1620 GDBRegisterState *r;
1621 CPUState *cpu = first_cpu;
1623 snprintf(target_xml, sizeof(target_xml),
1624 "<?xml version=\"1.0\"?>"
1625 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1627 "<xi:include href=\"%s\"/>",
1630 for (r = cpu->gdb_regs; r; r = r->next) {
1631 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1632 pstrcat(target_xml, sizeof(target_xml), r->xml);
1633 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1635 pstrcat(target_xml, sizeof(target_xml), "</target>");
1639 for (i = 0; ; i++) {
1640 name = xml_builtin[i][0];
1641 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1644 return name ? xml_builtin[i][1] : NULL;
1648 static int gdb_read_register(CPUState *cpu, uint8_t *mem_buf, int reg)
1650 CPUClass *cc = CPU_GET_CLASS(cpu);
1651 CPUArchState *env = cpu->env_ptr;
1652 GDBRegisterState *r;
1654 if (reg < cc->gdb_num_core_regs) {
1655 return cpu_gdb_read_register(env, mem_buf, reg);
1658 for (r = cpu->gdb_regs; r; r = r->next) {
1659 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1660 return r->get_reg(env, mem_buf, reg - r->base_reg);
1666 static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
1668 CPUClass *cc = CPU_GET_CLASS(cpu);
1669 CPUArchState *env = cpu->env_ptr;
1670 GDBRegisterState *r;
1672 if (reg < cc->gdb_num_core_regs) {
1673 return cpu_gdb_write_register(env, mem_buf, reg);
1676 for (r = cpu->gdb_regs; r; r = r->next) {
1677 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1678 return r->set_reg(env, mem_buf, reg - r->base_reg);
1684 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1685 specifies the first register number and these registers are included in
1686 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1687 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1690 void gdb_register_coprocessor(CPUState *cpu,
1691 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1692 int num_regs, const char *xml, int g_pos)
1694 GDBRegisterState *s;
1695 GDBRegisterState **p;
1699 /* Check for duplicates. */
1700 if (strcmp((*p)->xml, xml) == 0)
1705 s = g_new0(GDBRegisterState, 1);
1706 s->base_reg = cpu->gdb_num_regs;
1707 s->num_regs = num_regs;
1708 s->get_reg = get_reg;
1709 s->set_reg = set_reg;
1712 /* Add to end of list. */
1713 cpu->gdb_num_regs += num_regs;
1716 if (g_pos != s->base_reg) {
1717 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1718 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1723 #ifndef CONFIG_USER_ONLY
1724 static const int xlat_gdb_type[] = {
1725 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1726 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1727 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1731 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1737 if (kvm_enabled()) {
1738 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1742 case GDB_BREAKPOINT_SW:
1743 case GDB_BREAKPOINT_HW:
1744 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1746 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1751 #ifndef CONFIG_USER_ONLY
1752 case GDB_WATCHPOINT_WRITE:
1753 case GDB_WATCHPOINT_READ:
1754 case GDB_WATCHPOINT_ACCESS:
1755 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1757 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1769 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1775 if (kvm_enabled()) {
1776 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1780 case GDB_BREAKPOINT_SW:
1781 case GDB_BREAKPOINT_HW:
1782 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1784 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1789 #ifndef CONFIG_USER_ONLY
1790 case GDB_WATCHPOINT_WRITE:
1791 case GDB_WATCHPOINT_READ:
1792 case GDB_WATCHPOINT_ACCESS:
1793 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1795 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
1806 static void gdb_breakpoint_remove_all(void)
1811 if (kvm_enabled()) {
1812 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
1816 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1818 cpu_breakpoint_remove_all(env, BP_GDB);
1819 #ifndef CONFIG_USER_ONLY
1820 cpu_watchpoint_remove_all(env, BP_GDB);
1825 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
1827 CPUState *cpu = s->c_cpu;
1828 CPUClass *cc = CPU_GET_CLASS(cpu);
1830 cpu_synchronize_state(cpu);
1832 cc->set_pc(cpu, pc);
1836 static CPUState *find_cpu(uint32_t thread_id)
1840 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1841 if (cpu_index(cpu) == thread_id) {
1849 static int gdb_handle_packet(GDBState *s, const char *line_buf)
1854 int ch, reg_size, type, res;
1855 char buf[MAX_PACKET_LENGTH];
1856 uint8_t mem_buf[MAX_PACKET_LENGTH];
1858 target_ulong addr, len;
1861 printf("command='%s'\n", line_buf);
1867 /* TODO: Make this return the correct value for user-mode. */
1868 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
1869 cpu_index(s->c_cpu));
1871 /* Remove all the breakpoints when this query is issued,
1872 * because gdb is doing and initial connect and the state
1873 * should be cleaned up.
1875 gdb_breakpoint_remove_all();
1879 addr = strtoull(p, (char **)&p, 16);
1880 gdb_set_cpu_pc(s, addr);
1886 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
1887 if (s->signal == -1)
1892 if (strncmp(p, "Cont", 4) == 0) {
1893 int res_signal, res_thread;
1897 put_packet(s, "vCont;c;C;s;S");
1912 if (action == 'C' || action == 'S') {
1913 signal = strtoul(p, (char **)&p, 16);
1914 } else if (action != 'c' && action != 's') {
1920 thread = strtoull(p+1, (char **)&p, 16);
1922 action = tolower(action);
1923 if (res == 0 || (res == 'c' && action == 's')) {
1925 res_signal = signal;
1926 res_thread = thread;
1930 if (res_thread != -1 && res_thread != 0) {
1931 cpu = find_cpu(res_thread);
1933 put_packet(s, "E22");
1939 cpu_single_step(s->c_cpu, sstep_flags);
1941 s->signal = res_signal;
1947 goto unknown_command;
1950 #ifdef CONFIG_USER_ONLY
1951 /* Kill the target */
1952 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
1957 gdb_breakpoint_remove_all();
1958 gdb_syscall_mode = GDB_SYS_DISABLED;
1960 put_packet(s, "OK");
1964 addr = strtoull(p, (char **)&p, 16);
1965 gdb_set_cpu_pc(s, addr);
1967 cpu_single_step(s->c_cpu, sstep_flags);
1975 ret = strtoull(p, (char **)&p, 16);
1978 err = strtoull(p, (char **)&p, 16);
1985 if (s->current_syscall_cb) {
1986 s->current_syscall_cb(s->c_cpu, ret, err);
1987 s->current_syscall_cb = NULL;
1990 put_packet(s, "T02");
1997 cpu_synchronize_state(s->g_cpu);
1999 for (addr = 0; addr < s->g_cpu->gdb_num_regs; addr++) {
2000 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2003 memtohex(buf, mem_buf, len);
2007 cpu_synchronize_state(s->g_cpu);
2008 registers = mem_buf;
2009 len = strlen(p) / 2;
2010 hextomem((uint8_t *)registers, p, len);
2011 for (addr = 0; addr < s->g_cpu->gdb_num_regs && len > 0; addr++) {
2012 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2014 registers += reg_size;
2016 put_packet(s, "OK");
2019 addr = strtoull(p, (char **)&p, 16);
2022 len = strtoull(p, NULL, 16);
2023 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, false) != 0) {
2024 put_packet (s, "E14");
2026 memtohex(buf, mem_buf, len);
2031 addr = strtoull(p, (char **)&p, 16);
2034 len = strtoull(p, (char **)&p, 16);
2037 hextomem(mem_buf, p, len);
2038 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len,
2040 put_packet(s, "E14");
2042 put_packet(s, "OK");
2046 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2047 This works, but can be very slow. Anything new enough to
2048 understand XML also knows how to use this properly. */
2050 goto unknown_command;
2051 addr = strtoull(p, (char **)&p, 16);
2052 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2054 memtohex(buf, mem_buf, reg_size);
2057 put_packet(s, "E14");
2062 goto unknown_command;
2063 addr = strtoull(p, (char **)&p, 16);
2066 reg_size = strlen(p) / 2;
2067 hextomem(mem_buf, p, reg_size);
2068 gdb_write_register(s->g_cpu, mem_buf, addr);
2069 put_packet(s, "OK");
2073 type = strtoul(p, (char **)&p, 16);
2076 addr = strtoull(p, (char **)&p, 16);
2079 len = strtoull(p, (char **)&p, 16);
2081 res = gdb_breakpoint_insert(addr, len, type);
2083 res = gdb_breakpoint_remove(addr, len, type);
2085 put_packet(s, "OK");
2086 else if (res == -ENOSYS)
2089 put_packet(s, "E22");
2093 thread = strtoull(p, (char **)&p, 16);
2094 if (thread == -1 || thread == 0) {
2095 put_packet(s, "OK");
2098 cpu = find_cpu(thread);
2100 put_packet(s, "E22");
2106 put_packet(s, "OK");
2110 put_packet(s, "OK");
2113 put_packet(s, "E22");
2118 thread = strtoull(p, (char **)&p, 16);
2119 cpu = find_cpu(thread);
2122 put_packet(s, "OK");
2124 put_packet(s, "E22");
2129 /* parse any 'q' packets here */
2130 if (!strcmp(p,"qemu.sstepbits")) {
2131 /* Query Breakpoint bit definitions */
2132 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2138 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2139 /* Display or change the sstep_flags */
2142 /* Display current setting */
2143 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2148 type = strtoul(p, (char **)&p, 16);
2150 put_packet(s, "OK");
2152 } else if (strcmp(p,"C") == 0) {
2153 /* "Current thread" remains vague in the spec, so always return
2154 * the first CPU (gdb returns the first thread). */
2155 put_packet(s, "QC1");
2157 } else if (strcmp(p,"fThreadInfo") == 0) {
2158 s->query_cpu = first_cpu;
2159 goto report_cpuinfo;
2160 } else if (strcmp(p,"sThreadInfo") == 0) {
2163 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2165 s->query_cpu = s->query_cpu->next_cpu;
2169 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2170 thread = strtoull(p+16, (char **)&p, 16);
2171 cpu = find_cpu(thread);
2173 cpu_synchronize_state(cpu);
2174 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2175 "CPU#%d [%s]", cpu->cpu_index,
2176 cpu->halted ? "halted " : "running");
2177 memtohex(buf, mem_buf, len);
2182 #ifdef CONFIG_USER_ONLY
2183 else if (strncmp(p, "Offsets", 7) == 0) {
2184 CPUArchState *env = s->c_cpu->env_ptr;
2185 TaskState *ts = env->opaque;
2187 snprintf(buf, sizeof(buf),
2188 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2189 ";Bss=" TARGET_ABI_FMT_lx,
2190 ts->info->code_offset,
2191 ts->info->data_offset,
2192 ts->info->data_offset);
2196 #else /* !CONFIG_USER_ONLY */
2197 else if (strncmp(p, "Rcmd,", 5) == 0) {
2198 int len = strlen(p + 5);
2200 if ((len % 2) != 0) {
2201 put_packet(s, "E01");
2204 hextomem(mem_buf, p + 5, len);
2207 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2208 put_packet(s, "OK");
2211 #endif /* !CONFIG_USER_ONLY */
2212 if (strncmp(p, "Supported", 9) == 0) {
2213 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2215 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2221 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2223 target_ulong total_len;
2227 xml = get_feature_xml(p, &p);
2229 snprintf(buf, sizeof(buf), "E00");
2236 addr = strtoul(p, (char **)&p, 16);
2239 len = strtoul(p, (char **)&p, 16);
2241 total_len = strlen(xml);
2242 if (addr > total_len) {
2243 snprintf(buf, sizeof(buf), "E00");
2247 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2248 len = (MAX_PACKET_LENGTH - 5) / 2;
2249 if (len < total_len - addr) {
2251 len = memtox(buf + 1, xml + addr, len);
2254 len = memtox(buf + 1, xml + addr, total_len - addr);
2256 put_packet_binary(s, buf, len + 1);
2260 /* Unrecognised 'q' command. */
2261 goto unknown_command;
2265 /* put empty packet */
2273 void gdb_set_stop_cpu(CPUState *cpu)
2275 gdbserver_state->c_cpu = cpu;
2276 gdbserver_state->g_cpu = cpu;
2279 #ifndef CONFIG_USER_ONLY
2280 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2282 GDBState *s = gdbserver_state;
2283 CPUArchState *env = s->c_cpu->env_ptr;
2284 CPUState *cpu = s->c_cpu;
2289 if (running || s->state == RS_INACTIVE) {
2292 /* Is there a GDB syscall waiting to be sent? */
2293 if (s->current_syscall_cb) {
2294 put_packet(s, s->syscall_buf);
2298 case RUN_STATE_DEBUG:
2299 if (env->watchpoint_hit) {
2300 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2311 snprintf(buf, sizeof(buf),
2312 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2313 GDB_SIGNAL_TRAP, cpu_index(cpu), type,
2314 env->watchpoint_hit->vaddr);
2315 env->watchpoint_hit = NULL;
2319 ret = GDB_SIGNAL_TRAP;
2321 case RUN_STATE_PAUSED:
2322 ret = GDB_SIGNAL_INT;
2324 case RUN_STATE_SHUTDOWN:
2325 ret = GDB_SIGNAL_QUIT;
2327 case RUN_STATE_IO_ERROR:
2328 ret = GDB_SIGNAL_IO;
2330 case RUN_STATE_WATCHDOG:
2331 ret = GDB_SIGNAL_ALRM;
2333 case RUN_STATE_INTERNAL_ERROR:
2334 ret = GDB_SIGNAL_ABRT;
2336 case RUN_STATE_SAVE_VM:
2337 case RUN_STATE_RESTORE_VM:
2339 case RUN_STATE_FINISH_MIGRATE:
2340 ret = GDB_SIGNAL_XCPU;
2343 ret = GDB_SIGNAL_UNKNOWN;
2346 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(cpu));
2351 /* disable single step if it was enabled */
2352 cpu_single_step(cpu, 0);
2356 /* Send a gdb syscall request.
2357 This accepts limited printf-style format specifiers, specifically:
2358 %x - target_ulong argument printed in hex.
2359 %lx - 64-bit argument printed in hex.
2360 %s - string pointer (target_ulong) and length (int) pair. */
2361 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2370 s = gdbserver_state;
2373 s->current_syscall_cb = cb;
2374 #ifndef CONFIG_USER_ONLY
2375 vm_stop(RUN_STATE_DEBUG);
2379 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2386 addr = va_arg(va, target_ulong);
2387 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2390 if (*(fmt++) != 'x')
2392 i64 = va_arg(va, uint64_t);
2393 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2396 addr = va_arg(va, target_ulong);
2397 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2398 addr, va_arg(va, int));
2402 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2412 #ifdef CONFIG_USER_ONLY
2413 put_packet(s, s->syscall_buf);
2414 gdb_handlesig(s->c_cpu, 0);
2416 /* In this case wait to send the syscall packet until notification that
2417 the CPU has stopped. This must be done because if the packet is sent
2418 now the reply from the syscall request could be received while the CPU
2419 is still in the running state, which can cause packets to be dropped
2420 and state transition 'T' packets to be sent while the syscall is still
2426 static void gdb_read_byte(GDBState *s, int ch)
2431 #ifndef CONFIG_USER_ONLY
2432 if (s->last_packet_len) {
2433 /* Waiting for a response to the last packet. If we see the start
2434 of a new command then abandon the previous response. */
2437 printf("Got NACK, retransmitting\n");
2439 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2443 printf("Got ACK\n");
2445 printf("Got '%c' when expecting ACK/NACK\n", ch);
2447 if (ch == '+' || ch == '$')
2448 s->last_packet_len = 0;
2452 if (runstate_is_running()) {
2453 /* when the CPU is running, we cannot do anything except stop
2454 it when receiving a char */
2455 vm_stop(RUN_STATE_PAUSED);
2462 s->line_buf_index = 0;
2463 s->state = RS_GETLINE;
2468 s->state = RS_CHKSUM1;
2469 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2472 s->line_buf[s->line_buf_index++] = ch;
2476 s->line_buf[s->line_buf_index] = '\0';
2477 s->line_csum = fromhex(ch) << 4;
2478 s->state = RS_CHKSUM2;
2481 s->line_csum |= fromhex(ch);
2483 for(i = 0; i < s->line_buf_index; i++) {
2484 csum += s->line_buf[i];
2486 if (s->line_csum != (csum & 0xff)) {
2488 put_buffer(s, &reply, 1);
2492 put_buffer(s, &reply, 1);
2493 s->state = gdb_handle_packet(s, s->line_buf);
2502 /* Tell the remote gdb that the process has exited. */
2503 void gdb_exit(CPUArchState *env, int code)
2508 s = gdbserver_state;
2512 #ifdef CONFIG_USER_ONLY
2513 if (gdbserver_fd < 0 || s->fd < 0) {
2518 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2521 #ifndef CONFIG_USER_ONLY
2523 qemu_chr_delete(s->chr);
2528 #ifdef CONFIG_USER_ONLY
2534 s = gdbserver_state;
2536 if (gdbserver_fd < 0 || s->fd < 0)
2543 gdb_handlesig(CPUState *cpu, int sig)
2545 CPUArchState *env = cpu->env_ptr;
2550 s = gdbserver_state;
2551 if (gdbserver_fd < 0 || s->fd < 0) {
2555 /* disable single step if it was enabled */
2556 cpu_single_step(cpu, 0);
2560 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig));
2563 /* put_packet() might have detected that the peer terminated the
2571 s->running_state = 0;
2572 while (s->running_state == 0) {
2573 n = read(s->fd, buf, 256);
2577 for (i = 0; i < n; i++) {
2578 gdb_read_byte(s, buf[i]);
2580 } else if (n == 0 || errno != EAGAIN) {
2581 /* XXX: Connection closed. Should probably wait for another
2582 connection before continuing. */
2591 /* Tell the remote gdb that the process has exited due to SIG. */
2592 void gdb_signalled(CPUArchState *env, int sig)
2597 s = gdbserver_state;
2598 if (gdbserver_fd < 0 || s->fd < 0) {
2602 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb(sig));
2606 static void gdb_accept(void)
2609 struct sockaddr_in sockaddr;
2614 len = sizeof(sockaddr);
2615 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2616 if (fd < 0 && errno != EINTR) {
2619 } else if (fd >= 0) {
2621 fcntl(fd, F_SETFD, FD_CLOEXEC);
2627 /* set short latency */
2628 socket_set_nodelay(fd);
2630 s = g_malloc0(sizeof(GDBState));
2631 s->c_cpu = first_cpu;
2632 s->g_cpu = first_cpu;
2636 gdbserver_state = s;
2638 fcntl(fd, F_SETFL, O_NONBLOCK);
2641 static int gdbserver_open(int port)
2643 struct sockaddr_in sockaddr;
2646 fd = socket(PF_INET, SOCK_STREAM, 0);
2652 fcntl(fd, F_SETFD, FD_CLOEXEC);
2655 /* allow fast reuse */
2657 qemu_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
2659 sockaddr.sin_family = AF_INET;
2660 sockaddr.sin_port = htons(port);
2661 sockaddr.sin_addr.s_addr = 0;
2662 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2668 ret = listen(fd, 0);
2677 int gdbserver_start(int port)
2679 gdbserver_fd = gdbserver_open(port);
2680 if (gdbserver_fd < 0)
2682 /* accept connections */
2687 /* Disable gdb stub for child processes. */
2688 void gdbserver_fork(CPUArchState *env)
2690 GDBState *s = gdbserver_state;
2691 if (gdbserver_fd < 0 || s->fd < 0)
2695 cpu_breakpoint_remove_all(env, BP_GDB);
2696 cpu_watchpoint_remove_all(env, BP_GDB);
2699 static int gdb_chr_can_receive(void *opaque)
2701 /* We can handle an arbitrarily large amount of data.
2702 Pick the maximum packet size, which is as good as anything. */
2703 return MAX_PACKET_LENGTH;
2706 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2710 for (i = 0; i < size; i++) {
2711 gdb_read_byte(gdbserver_state, buf[i]);
2715 static void gdb_chr_event(void *opaque, int event)
2718 case CHR_EVENT_OPENED:
2719 vm_stop(RUN_STATE_PAUSED);
2727 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2729 char buf[MAX_PACKET_LENGTH];
2732 if (len > (MAX_PACKET_LENGTH/2) - 1)
2733 len = (MAX_PACKET_LENGTH/2) - 1;
2734 memtohex(buf + 1, (uint8_t *)msg, len);
2738 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2740 const char *p = (const char *)buf;
2743 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2745 if (len <= max_sz) {
2746 gdb_monitor_output(gdbserver_state, p, len);
2749 gdb_monitor_output(gdbserver_state, p, max_sz);
2757 static void gdb_sigterm_handler(int signal)
2759 if (runstate_is_running()) {
2760 vm_stop(RUN_STATE_PAUSED);
2765 int gdbserver_start(const char *device)
2768 char gdbstub_device_name[128];
2769 CharDriverState *chr = NULL;
2770 CharDriverState *mon_chr;
2774 if (strcmp(device, "none") != 0) {
2775 if (strstart(device, "tcp:", NULL)) {
2776 /* enforce required TCP attributes */
2777 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
2778 "%s,nowait,nodelay,server", device);
2779 device = gdbstub_device_name;
2782 else if (strcmp(device, "stdio") == 0) {
2783 struct sigaction act;
2785 memset(&act, 0, sizeof(act));
2786 act.sa_handler = gdb_sigterm_handler;
2787 sigaction(SIGINT, &act, NULL);
2790 chr = qemu_chr_new("gdb", device, NULL);
2794 qemu_chr_fe_claim_no_fail(chr);
2795 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
2796 gdb_chr_event, NULL);
2799 s = gdbserver_state;
2801 s = g_malloc0(sizeof(GDBState));
2802 gdbserver_state = s;
2804 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
2806 /* Initialize a monitor terminal for gdb */
2807 mon_chr = g_malloc0(sizeof(*mon_chr));
2808 mon_chr->chr_write = gdb_monitor_write;
2809 monitor_init(mon_chr, 0);
2812 qemu_chr_delete(s->chr);
2813 mon_chr = s->mon_chr;
2814 memset(s, 0, sizeof(GDBState));
2816 s->c_cpu = first_cpu;
2817 s->g_cpu = first_cpu;
2819 s->state = chr ? RS_IDLE : RS_INACTIVE;
2820 s->mon_chr = mon_chr;
2821 s->current_syscall_cb = NULL;