2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "tcg-be-ldst.h"
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 #if TCG_TARGET_REG_BITS == 64
30 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
31 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
33 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
38 static const int tcg_target_reg_alloc_order[] = {
39 #if TCG_TARGET_REG_BITS == 64
66 static const int tcg_target_call_iarg_regs[] = {
67 #if TCG_TARGET_REG_BITS == 64
80 /* 32 bit mode uses stack based calling convention (GCC default). */
84 static const int tcg_target_call_oarg_regs[] = {
86 #if TCG_TARGET_REG_BITS == 32
91 /* Registers used with L constraint, which are the first argument
92 registers on x86_64, and two random call clobbered registers on
94 #if TCG_TARGET_REG_BITS == 64
95 # define TCG_REG_L0 tcg_target_call_iarg_regs[0]
96 # define TCG_REG_L1 tcg_target_call_iarg_regs[1]
98 # define TCG_REG_L0 TCG_REG_EAX
99 # define TCG_REG_L1 TCG_REG_EDX
102 /* For 32-bit, we are going to attempt to determine at runtime whether cmov
103 is available. However, the host compiler must supply <cpuid.h>, as we're
104 not going to go so far as our own inline assembly. */
105 #if TCG_TARGET_REG_BITS == 64
107 #elif defined(CONFIG_CPUID_H)
110 #define bit_CMOV (1 << 15)
112 static bool have_cmov;
117 static uint8_t *tb_ret_addr;
119 static void patch_reloc(uint8_t *code_ptr, int type,
120 intptr_t value, intptr_t addend)
125 value -= (uintptr_t)code_ptr;
126 if (value != (int32_t)value) {
129 *(uint32_t *)code_ptr = value;
132 value -= (uintptr_t)code_ptr;
133 if (value != (int8_t)value) {
136 *(uint8_t *)code_ptr = value;
143 /* parse target specific constraints */
144 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
151 ct->ct |= TCG_CT_REG;
152 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
155 ct->ct |= TCG_CT_REG;
156 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
159 ct->ct |= TCG_CT_REG;
160 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
163 ct->ct |= TCG_CT_REG;
164 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
167 ct->ct |= TCG_CT_REG;
168 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
171 ct->ct |= TCG_CT_REG;
172 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
175 ct->ct |= TCG_CT_REG;
176 if (TCG_TARGET_REG_BITS == 64) {
177 tcg_regset_set32(ct->u.regs, 0, 0xffff);
179 tcg_regset_set32(ct->u.regs, 0, 0xf);
183 ct->ct |= TCG_CT_REG;
184 tcg_regset_set32(ct->u.regs, 0, 0xf);
187 ct->ct |= TCG_CT_REG;
188 if (TCG_TARGET_REG_BITS == 64) {
189 tcg_regset_set32(ct->u.regs, 0, 0xffff);
191 tcg_regset_set32(ct->u.regs, 0, 0xff);
195 /* qemu_ld/st address constraint */
197 ct->ct |= TCG_CT_REG;
198 if (TCG_TARGET_REG_BITS == 64) {
199 tcg_regset_set32(ct->u.regs, 0, 0xffff);
201 tcg_regset_set32(ct->u.regs, 0, 0xff);
203 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L0);
204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_L1);
208 ct->ct |= TCG_CT_CONST_S32;
211 ct->ct |= TCG_CT_CONST_U32;
222 /* test if a constant matches the constraint */
223 static inline int tcg_target_const_match(tcg_target_long val,
224 const TCGArgConstraint *arg_ct)
227 if (ct & TCG_CT_CONST) {
230 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
233 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
239 #if TCG_TARGET_REG_BITS == 64
240 # define LOWREGMASK(x) ((x) & 7)
242 # define LOWREGMASK(x) (x)
245 #define P_EXT 0x100 /* 0x0f opcode prefix */
246 #define P_DATA16 0x200 /* 0x66 opcode prefix */
247 #if TCG_TARGET_REG_BITS == 64
248 # define P_ADDR32 0x400 /* 0x67 opcode prefix */
249 # define P_REXW 0x800 /* Set REX.W = 1 */
250 # define P_REXB_R 0x1000 /* REG field as byte register */
251 # define P_REXB_RM 0x2000 /* R/M field as byte register */
252 # define P_GS 0x4000 /* gs segment override */
261 #define OPC_ARITH_EvIz (0x81)
262 #define OPC_ARITH_EvIb (0x83)
263 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
264 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
265 #define OPC_BSWAP (0xc8 | P_EXT)
266 #define OPC_CALL_Jz (0xe8)
267 #define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
268 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3))
269 #define OPC_DEC_r32 (0x48)
270 #define OPC_IMUL_GvEv (0xaf | P_EXT)
271 #define OPC_IMUL_GvEvIb (0x6b)
272 #define OPC_IMUL_GvEvIz (0x69)
273 #define OPC_INC_r32 (0x40)
274 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */
275 #define OPC_JCC_short (0x70) /* ... plus condition code */
276 #define OPC_JMP_long (0xe9)
277 #define OPC_JMP_short (0xeb)
278 #define OPC_LEA (0x8d)
279 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */
280 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */
281 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
282 #define OPC_MOVB_EvIz (0xc6)
283 #define OPC_MOVL_EvIz (0xc7)
284 #define OPC_MOVL_Iv (0xb8)
285 #define OPC_MOVSBL (0xbe | P_EXT)
286 #define OPC_MOVSWL (0xbf | P_EXT)
287 #define OPC_MOVSLQ (0x63 | P_REXW)
288 #define OPC_MOVZBL (0xb6 | P_EXT)
289 #define OPC_MOVZWL (0xb7 | P_EXT)
290 #define OPC_POP_r32 (0x58)
291 #define OPC_PUSH_r32 (0x50)
292 #define OPC_PUSH_Iv (0x68)
293 #define OPC_PUSH_Ib (0x6a)
294 #define OPC_RET (0xc3)
295 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */
296 #define OPC_SHIFT_1 (0xd1)
297 #define OPC_SHIFT_Ib (0xc1)
298 #define OPC_SHIFT_cl (0xd3)
299 #define OPC_TESTL (0x85)
300 #define OPC_XCHG_ax_r32 (0x90)
302 #define OPC_GRP3_Ev (0xf7)
303 #define OPC_GRP5 (0xff)
305 /* Group 1 opcode extensions for 0x80-0x83.
306 These are also used as modifiers for OPC_ARITH. */
316 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */
323 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
331 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */
332 #define EXT5_INC_Ev 0
333 #define EXT5_DEC_Ev 1
334 #define EXT5_CALLN_Ev 2
335 #define EXT5_JMPN_Ev 4
337 /* Condition codes to be added to OPC_JCC_{long,short}. */
356 static const uint8_t tcg_cond_to_jcc[] = {
357 [TCG_COND_EQ] = JCC_JE,
358 [TCG_COND_NE] = JCC_JNE,
359 [TCG_COND_LT] = JCC_JL,
360 [TCG_COND_GE] = JCC_JGE,
361 [TCG_COND_LE] = JCC_JLE,
362 [TCG_COND_GT] = JCC_JG,
363 [TCG_COND_LTU] = JCC_JB,
364 [TCG_COND_GEU] = JCC_JAE,
365 [TCG_COND_LEU] = JCC_JBE,
366 [TCG_COND_GTU] = JCC_JA,
369 #if TCG_TARGET_REG_BITS == 64
370 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
377 if (opc & P_DATA16) {
378 /* We should never be asking for both 16 and 64-bit operation. */
379 assert((opc & P_REXW) == 0);
382 if (opc & P_ADDR32) {
387 rex |= (opc & P_REXW) >> 8; /* REX.W */
388 rex |= (r & 8) >> 1; /* REX.R */
389 rex |= (x & 8) >> 2; /* REX.X */
390 rex |= (rm & 8) >> 3; /* REX.B */
392 /* P_REXB_{R,RM} indicates that the given register is the low byte.
393 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do,
394 as otherwise the encoding indicates %[abcd]h. Note that the values
395 that are ORed in merely indicate that the REX byte must be present;
396 those bits get discarded in output. */
397 rex |= opc & (r >= 4 ? P_REXB_R : 0);
398 rex |= opc & (rm >= 4 ? P_REXB_RM : 0);
401 tcg_out8(s, (uint8_t)(rex | 0x40));
410 static void tcg_out_opc(TCGContext *s, int opc)
412 if (opc & P_DATA16) {
420 /* Discard the register arguments to tcg_out_opc early, so as not to penalize
421 the 32-bit compilation paths. This method works with all versions of gcc,
422 whereas relying on optimization may not be able to exclude them. */
423 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc)
426 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
428 tcg_out_opc(s, opc, r, rm, 0);
429 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
432 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode.
433 We handle either RM and INDEX missing with a negative value. In 64-bit
434 mode for absolute addresses, ~RM is the size of the immediate operand
435 that will follow the instruction. */
437 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm,
438 int index, int shift, intptr_t offset)
442 if (index < 0 && rm < 0) {
443 if (TCG_TARGET_REG_BITS == 64) {
444 /* Try for a rip-relative addressing mode. This has replaced
445 the 32-bit-mode absolute addressing encoding. */
446 intptr_t pc = (intptr_t)s->code_ptr + 5 + ~rm;
447 intptr_t disp = offset - pc;
448 if (disp == (int32_t)disp) {
449 tcg_out_opc(s, opc, r, 0, 0);
450 tcg_out8(s, (LOWREGMASK(r) << 3) | 5);
455 /* Try for an absolute address encoding. This requires the
456 use of the MODRM+SIB encoding and is therefore larger than
457 rip-relative addressing. */
458 if (offset == (int32_t)offset) {
459 tcg_out_opc(s, opc, r, 0, 0);
460 tcg_out8(s, (LOWREGMASK(r) << 3) | 4);
461 tcg_out8(s, (4 << 3) | 5);
462 tcg_out32(s, offset);
466 /* ??? The memory isn't directly addressable. */
469 /* Absolute address. */
470 tcg_out_opc(s, opc, r, 0, 0);
471 tcg_out8(s, (r << 3) | 5);
472 tcg_out32(s, offset);
477 /* Find the length of the immediate addend. Note that the encoding
478 that would be used for (%ebp) indicates absolute addressing. */
480 mod = 0, len = 4, rm = 5;
481 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) {
483 } else if (offset == (int8_t)offset) {
489 /* Use a single byte MODRM format if possible. Note that the encoding
490 that would be used for %esp is the escape to the two byte form. */
491 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) {
492 /* Single byte MODRM format. */
493 tcg_out_opc(s, opc, r, rm, 0);
494 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm));
496 /* Two byte MODRM+SIB format. */
498 /* Note that the encoding that would place %esp into the index
499 field indicates no index register. In 64-bit mode, the REX.X
500 bit counts, so %r12 can be used as the index. */
504 assert(index != TCG_REG_ESP);
507 tcg_out_opc(s, opc, r, rm, index);
508 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4);
509 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm));
514 } else if (len == 4) {
515 tcg_out32(s, offset);
519 /* A simplification of the above with no index or shift. */
520 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r,
521 int rm, intptr_t offset)
523 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset);
526 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */
527 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src)
529 /* Propagate an opcode prefix, such as P_REXW. */
530 int ext = subop & ~0x7;
533 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src);
536 static inline void tcg_out_mov(TCGContext *s, TCGType type,
537 TCGReg ret, TCGReg arg)
540 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
541 tcg_out_modrm(s, opc, ret, arg);
545 static void tcg_out_movi(TCGContext *s, TCGType type,
546 TCGReg ret, tcg_target_long arg)
548 tcg_target_long diff;
551 tgen_arithr(s, ARITH_XOR, ret, ret);
554 if (arg == (uint32_t)arg || type == TCG_TYPE_I32) {
555 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0);
559 if (arg == (int32_t)arg) {
560 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret);
565 /* Try a 7 byte pc-relative lea before the 10 byte movq. */
566 diff = arg - ((uintptr_t)s->code_ptr + 7);
567 if (diff == (int32_t)diff) {
568 tcg_out_opc(s, OPC_LEA | P_REXW, ret, 0, 0);
569 tcg_out8(s, (LOWREGMASK(ret) << 3) | 5);
574 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0);
578 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val)
580 if (val == (int8_t)val) {
581 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0);
583 } else if (val == (int32_t)val) {
584 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0);
591 static inline void tcg_out_push(TCGContext *s, int reg)
593 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0);
596 static inline void tcg_out_pop(TCGContext *s, int reg)
598 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0);
601 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
602 TCGReg arg1, intptr_t arg2)
604 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0);
605 tcg_out_modrm_offset(s, opc, ret, arg1, arg2);
608 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
609 TCGReg arg1, intptr_t arg2)
611 int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0);
612 tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
615 static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base,
616 tcg_target_long ofs, tcg_target_long val)
618 int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0);
619 tcg_out_modrm_offset(s, opc, 0, base, ofs);
623 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
625 /* Propagate an opcode prefix, such as P_DATA16. */
626 int ext = subopc & ~0x7;
630 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg);
632 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg);
637 static inline void tcg_out_bswap32(TCGContext *s, int reg)
639 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0);
642 static inline void tcg_out_rolw_8(TCGContext *s, int reg)
644 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8);
647 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src)
650 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
651 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
654 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw)
657 assert(src < 4 || TCG_TARGET_REG_BITS == 64);
658 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
661 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src)
664 tcg_out_modrm(s, OPC_MOVZWL, dest, src);
667 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw)
670 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src);
673 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src)
675 /* 32-bit mov zero extends. */
676 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src);
679 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src)
681 tcg_out_modrm(s, OPC_MOVSLQ, dest, src);
684 static inline void tcg_out_bswap64(TCGContext *s, int reg)
686 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0);
689 static void tgen_arithi(TCGContext *s, int c, int r0,
690 tcg_target_long val, int cf)
694 if (TCG_TARGET_REG_BITS == 64) {
699 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce
700 partial flags update stalls on Pentium4 and are not recommended
701 by current Intel optimization manuals. */
702 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) {
703 int is_inc = (c == ARITH_ADD) ^ (val < 0);
704 if (TCG_TARGET_REG_BITS == 64) {
705 /* The single-byte increment encodings are re-tasked as the
706 REX prefixes. Use the MODRM encoding. */
707 tcg_out_modrm(s, OPC_GRP5 + rexw,
708 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0);
710 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0);
715 if (c == ARITH_AND) {
716 if (TCG_TARGET_REG_BITS == 64) {
717 if (val == 0xffffffffu) {
718 tcg_out_ext32u(s, r0, r0);
721 if (val == (uint32_t)val) {
722 /* AND with no high bits set can use a 32-bit operation. */
726 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) {
727 tcg_out_ext8u(s, r0, r0);
730 if (val == 0xffffu) {
731 tcg_out_ext16u(s, r0, r0);
736 if (val == (int8_t)val) {
737 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0);
741 if (rexw == 0 || val == (int32_t)val) {
742 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0);
750 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
753 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0);
757 /* Use SMALL != 0 to force a short forward branch. */
758 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small)
761 TCGLabel *l = &s->labels[label_index];
764 val = l->u.value - (intptr_t)s->code_ptr;
766 if ((int8_t)val1 == val1) {
768 tcg_out8(s, OPC_JMP_short);
770 tcg_out8(s, OPC_JCC_short + opc);
778 tcg_out8(s, OPC_JMP_long);
779 tcg_out32(s, val - 5);
781 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
782 tcg_out32(s, val - 6);
787 tcg_out8(s, OPC_JMP_short);
789 tcg_out8(s, OPC_JCC_short + opc);
791 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1);
795 tcg_out8(s, OPC_JMP_long);
797 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0);
799 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
804 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
805 int const_arg2, int rexw)
810 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
812 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
815 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
819 static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
820 TCGArg arg1, TCGArg arg2, int const_arg2,
821 int label_index, int small)
823 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
824 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
827 #if TCG_TARGET_REG_BITS == 64
828 static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
829 TCGArg arg1, TCGArg arg2, int const_arg2,
830 int label_index, int small)
832 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
833 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small);
836 /* XXX: we implement it at the target level to avoid having to
837 handle cross basic blocks temporaries */
838 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
839 const int *const_args, int small)
842 label_next = gen_new_label();
845 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
847 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
851 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
853 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
857 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
859 tcg_out_jxx(s, JCC_JNE, label_next, 1);
860 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
864 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
866 tcg_out_jxx(s, JCC_JNE, label_next, 1);
867 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
871 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
873 tcg_out_jxx(s, JCC_JNE, label_next, 1);
874 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
878 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
880 tcg_out_jxx(s, JCC_JNE, label_next, 1);
881 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
885 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
887 tcg_out_jxx(s, JCC_JNE, label_next, 1);
888 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
892 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
894 tcg_out_jxx(s, JCC_JNE, label_next, 1);
895 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
899 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
901 tcg_out_jxx(s, JCC_JNE, label_next, 1);
902 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
906 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
908 tcg_out_jxx(s, JCC_JNE, label_next, 1);
909 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
915 tcg_out_label(s, label_next, s->code_ptr);
919 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
920 TCGArg arg1, TCGArg arg2, int const_arg2)
922 tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
923 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
924 tcg_out_ext8u(s, dest, dest);
927 #if TCG_TARGET_REG_BITS == 64
928 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
929 TCGArg arg1, TCGArg arg2, int const_arg2)
931 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
932 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
933 tcg_out_ext8u(s, dest, dest);
936 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
937 const int *const_args)
940 int label_true, label_over;
942 memcpy(new_args, args+1, 5*sizeof(TCGArg));
944 if (args[0] == args[1] || args[0] == args[2]
945 || (!const_args[3] && args[0] == args[3])
946 || (!const_args[4] && args[0] == args[4])) {
947 /* When the destination overlaps with one of the argument
948 registers, don't do anything tricky. */
949 label_true = gen_new_label();
950 label_over = gen_new_label();
952 new_args[5] = label_true;
953 tcg_out_brcond2(s, new_args, const_args+1, 1);
955 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
956 tcg_out_jxx(s, JCC_JMP, label_over, 1);
957 tcg_out_label(s, label_true, s->code_ptr);
959 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1);
960 tcg_out_label(s, label_over, s->code_ptr);
962 /* When the destination does not overlap one of the arguments,
963 clear the destination first, jump if cond false, and emit an
964 increment in the true case. This results in smaller code. */
966 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0);
968 label_over = gen_new_label();
969 new_args[4] = tcg_invert_cond(new_args[4]);
970 new_args[5] = label_over;
971 tcg_out_brcond2(s, new_args, const_args+1, 1);
973 tgen_arithi(s, ARITH_ADD, args[0], 1, 0);
974 tcg_out_label(s, label_over, s->code_ptr);
979 static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest,
980 TCGArg c1, TCGArg c2, int const_c2,
983 tcg_out_cmp(s, c1, c2, const_c2, 0);
985 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
987 int over = gen_new_label();
988 tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
989 tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
990 tcg_out_label(s, over, s->code_ptr);
994 #if TCG_TARGET_REG_BITS == 64
995 static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest,
996 TCGArg c1, TCGArg c2, int const_c2,
999 tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
1000 tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1);
1004 static void tcg_out_branch(TCGContext *s, int call, uintptr_t dest)
1006 intptr_t disp = dest - (intptr_t)s->code_ptr - 5;
1008 if (disp == (int32_t)disp) {
1009 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0);
1012 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, dest);
1013 tcg_out_modrm(s, OPC_GRP5,
1014 call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10);
1018 static inline void tcg_out_calli(TCGContext *s, uintptr_t dest)
1020 tcg_out_branch(s, 1, dest);
1023 static void tcg_out_jmp(TCGContext *s, uintptr_t dest)
1025 tcg_out_branch(s, 0, dest);
1028 #if defined(CONFIG_SOFTMMU)
1029 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1030 * int mmu_idx, uintptr_t ra)
1032 static const void * const qemu_ld_helpers[16] = {
1033 [MO_UB] = helper_ret_ldub_mmu,
1034 [MO_LEUW] = helper_le_lduw_mmu,
1035 [MO_LEUL] = helper_le_ldul_mmu,
1036 [MO_LEQ] = helper_le_ldq_mmu,
1037 [MO_BEUW] = helper_be_lduw_mmu,
1038 [MO_BEUL] = helper_be_ldul_mmu,
1039 [MO_BEQ] = helper_be_ldq_mmu,
1042 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1043 * uintxx_t val, int mmu_idx, uintptr_t ra)
1045 static const void * const qemu_st_helpers[16] = {
1046 [MO_UB] = helper_ret_stb_mmu,
1047 [MO_LEUW] = helper_le_stw_mmu,
1048 [MO_LEUL] = helper_le_stl_mmu,
1049 [MO_LEQ] = helper_le_stq_mmu,
1050 [MO_BEUW] = helper_be_stw_mmu,
1051 [MO_BEUL] = helper_be_stl_mmu,
1052 [MO_BEQ] = helper_be_stq_mmu,
1055 /* Perform the TLB load and compare.
1058 ADDRLO and ADDRHI contain the low and high part of the address.
1060 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1062 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1063 This should be offsetof addr_read or addr_write.
1066 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses)
1067 positions of the displacements of forward jumps to the TLB miss case.
1069 Second argument register is loaded with the low part of the address.
1070 In the TLB hit case, it has been adjusted as indicated by the TLB
1071 and so is a host address. In the TLB miss case, it continues to
1072 hold a guest address.
1074 First argument register is clobbered. */
1076 static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1077 int mem_index, TCGMemOp s_bits,
1078 uint8_t **label_ptr, int which)
1080 const TCGReg r0 = TCG_REG_L0;
1081 const TCGReg r1 = TCG_REG_L1;
1082 TCGType ttype = TCG_TYPE_I32;
1083 TCGType htype = TCG_TYPE_I32;
1084 int trexw = 0, hrexw = 0;
1086 if (TCG_TARGET_REG_BITS == 64) {
1087 if (TARGET_LONG_BITS == 64) {
1088 ttype = TCG_TYPE_I64;
1091 if (TCG_TYPE_PTR == TCG_TYPE_I64) {
1092 htype = TCG_TYPE_I64;
1097 tcg_out_mov(s, htype, r0, addrlo);
1098 tcg_out_mov(s, ttype, r1, addrlo);
1100 tcg_out_shifti(s, SHIFT_SHR + hrexw, r0,
1101 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1103 tgen_arithi(s, ARITH_AND + trexw, r1,
1104 TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0);
1105 tgen_arithi(s, ARITH_AND + hrexw, r0,
1106 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
1108 tcg_out_modrm_sib_offset(s, OPC_LEA + hrexw, r0, TCG_AREG0, r0, 0,
1109 offsetof(CPUArchState, tlb_table[mem_index][0])
1113 tcg_out_modrm_offset(s, OPC_CMP_GvEv + trexw, r1, r0, 0);
1115 /* Prepare for both the fast path add of the tlb addend, and the slow
1116 path function argument setup. There are two cases worth note:
1117 For 32-bit guest and x86_64 host, MOVL zero-extends the guest address
1118 before the fastpath ADDQ below. For 64-bit guest and x32 host, MOVQ
1119 copies the entire guest address for the slow path, while truncation
1120 for the 32-bit host happens with the fastpath ADDL below. */
1121 tcg_out_mov(s, ttype, r1, addrlo);
1124 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1125 label_ptr[0] = s->code_ptr;
1128 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1129 /* cmp 4(r0), addrhi */
1130 tcg_out_modrm_offset(s, OPC_CMP_GvEv, addrhi, r0, 4);
1133 tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
1134 label_ptr[1] = s->code_ptr;
1140 /* add addend(r0), r1 */
1141 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r1, r0,
1142 offsetof(CPUTLBEntry, addend) - which);
1146 * Record the context of a call to the out of line helper code for the slow path
1147 * for a load or store, so that we can later generate the correct helper code
1149 static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
1150 TCGReg datalo, TCGReg datahi,
1151 TCGReg addrlo, TCGReg addrhi,
1152 int mem_index, uint8_t *raddr,
1153 uint8_t **label_ptr)
1155 TCGLabelQemuLdst *label = new_ldst_label(s);
1157 label->is_ld = is_ld;
1159 label->datalo_reg = datalo;
1160 label->datahi_reg = datahi;
1161 label->addrlo_reg = addrlo;
1162 label->addrhi_reg = addrhi;
1163 label->mem_index = mem_index;
1164 label->raddr = raddr;
1165 label->label_ptr[0] = label_ptr[0];
1166 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1167 label->label_ptr[1] = label_ptr[1];
1172 * Generate code for the slow path for a load at the end of block
1174 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1176 TCGMemOp opc = l->opc;
1178 uint8_t **label_ptr = &l->label_ptr[0];
1180 /* resolve label address */
1181 *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
1182 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1183 *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
1186 if (TCG_TARGET_REG_BITS == 32) {
1189 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1192 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1195 if (TARGET_LONG_BITS == 64) {
1196 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1200 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
1203 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
1205 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1206 /* The second argument is already loaded with addrlo. */
1207 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
1209 tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
1210 (uintptr_t)l->raddr);
1213 tcg_out_calli(s, (uintptr_t)qemu_ld_helpers[opc & ~MO_SIGN]);
1215 data_reg = l->datalo_reg;
1216 switch (opc & MO_SSIZE) {
1218 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
1221 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW);
1223 #if TCG_TARGET_REG_BITS == 64
1225 tcg_out_ext32s(s, data_reg, TCG_REG_EAX);
1230 /* Note that the helpers have zero-extended to tcg_target_long. */
1232 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1235 if (TCG_TARGET_REG_BITS == 64) {
1236 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
1237 } else if (data_reg == TCG_REG_EDX) {
1238 /* xchg %edx, %eax */
1239 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
1240 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
1242 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
1243 tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
1250 /* Jump to the code corresponding to next IR of qemu_st */
1251 tcg_out_jmp(s, (uintptr_t)l->raddr);
1255 * Generate code for the slow path for a store at the end of block
1257 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1259 TCGMemOp opc = l->opc;
1260 TCGMemOp s_bits = opc & MO_SIZE;
1261 uint8_t **label_ptr = &l->label_ptr[0];
1264 /* resolve label address */
1265 *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
1266 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1267 *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
1270 if (TCG_TARGET_REG_BITS == 32) {
1273 tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
1276 tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
1279 if (TARGET_LONG_BITS == 64) {
1280 tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
1284 tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
1287 if (s_bits == MO_64) {
1288 tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
1292 tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
1295 retaddr = TCG_REG_EAX;
1296 tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
1297 tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
1299 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
1300 /* The second argument is already loaded with addrlo. */
1301 tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
1302 tcg_target_call_iarg_regs[2], l->datalo_reg);
1303 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
1306 if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
1307 retaddr = tcg_target_call_iarg_regs[4];
1308 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1310 retaddr = TCG_REG_RAX;
1311 tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
1312 tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, 0);
1316 /* "Tail call" to the helper, with the return address back inline. */
1317 tcg_out_push(s, retaddr);
1318 tcg_out_jmp(s, (uintptr_t)qemu_st_helpers[opc]);
1320 #elif defined(__x86_64__) && defined(__linux__)
1321 # include <asm/prctl.h>
1322 # include <sys/prctl.h>
1324 int arch_prctl(int code, unsigned long addr);
1326 static int guest_base_flags;
1327 static inline void setup_guest_base_seg(void)
1329 if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) {
1330 guest_base_flags = P_GS;
1334 # define guest_base_flags 0
1335 static inline void setup_guest_base_seg(void) { }
1336 #endif /* SOFTMMU */
1338 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1339 TCGReg base, intptr_t ofs, int seg,
1342 const TCGMemOp bswap = memop & MO_BSWAP;
1344 switch (memop & MO_SSIZE) {
1346 tcg_out_modrm_offset(s, OPC_MOVZBL + seg, datalo, base, ofs);
1349 tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW + seg, datalo, base, ofs);
1352 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1354 tcg_out_rolw_8(s, datalo);
1359 tcg_out_modrm_offset(s, OPC_MOVZWL + seg, datalo, base, ofs);
1360 tcg_out_rolw_8(s, datalo);
1361 tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo);
1363 tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW + seg,
1368 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
1370 tcg_out_bswap32(s, datalo);
1373 #if TCG_TARGET_REG_BITS == 64
1376 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg, datalo, base, ofs);
1377 tcg_out_bswap32(s, datalo);
1378 tcg_out_ext32s(s, datalo, datalo);
1380 tcg_out_modrm_offset(s, OPC_MOVSLQ + seg, datalo, base, ofs);
1385 if (TCG_TARGET_REG_BITS == 64) {
1386 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + P_REXW + seg,
1389 tcg_out_bswap64(s, datalo);
1397 if (base != datalo) {
1398 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1400 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1401 datahi, base, ofs + 4);
1403 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1404 datahi, base, ofs + 4);
1405 tcg_out_modrm_offset(s, OPC_MOVL_GvEv + seg,
1409 tcg_out_bswap32(s, datalo);
1410 tcg_out_bswap32(s, datahi);
1419 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
1420 EAX. It will be useful once fixed registers globals are less
1422 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1424 TCGReg datalo, datahi, addrlo;
1425 TCGReg addrhi __attribute__((unused));
1427 #if defined(CONFIG_SOFTMMU)
1430 uint8_t *label_ptr[2];
1434 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
1436 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
1439 #if defined(CONFIG_SOFTMMU)
1440 mem_index = *args++;
1441 s_bits = opc & MO_SIZE;
1443 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
1444 label_ptr, offsetof(CPUTLBEntry, addr_read));
1447 tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
1449 /* Record the current context of a load into ldst label */
1450 add_qemu_ldst_label(s, 1, opc, datalo, datahi, addrlo, addrhi,
1451 mem_index, s->code_ptr, label_ptr);
1454 int32_t offset = GUEST_BASE;
1455 TCGReg base = addrlo;
1458 /* ??? We assume all operations have left us with register contents
1459 that are zero extended. So far this appears to be true. If we
1460 want to enforce this, we can either do an explicit zero-extension
1461 here, or (if GUEST_BASE == 0, or a segment register is in use)
1462 use the ADDR32 prefix. For now, do nothing. */
1463 if (GUEST_BASE && guest_base_flags) {
1464 seg = guest_base_flags;
1466 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1467 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1468 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1473 tcg_out_qemu_ld_direct(s, datalo, datahi, base, offset, seg, opc);
1478 static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
1479 TCGReg base, intptr_t ofs, int seg,
1482 const TCGMemOp bswap = memop & MO_BSWAP;
1484 /* ??? Ideally we wouldn't need a scratch register. For user-only,
1485 we could perform the bswap twice to restore the original value
1486 instead of moving to the scratch. But as it is, the L constraint
1487 means that TCG_REG_L0 is definitely free here. */
1488 const TCGReg scratch = TCG_REG_L0;
1490 switch (memop & MO_SIZE) {
1492 /* In 32-bit mode, 8-byte stores can only happen from [abcd]x.
1493 Use the scratch register if necessary. */
1494 if (TCG_TARGET_REG_BITS == 32 && datalo >= 4) {
1495 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1498 tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R + seg,
1503 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1504 tcg_out_rolw_8(s, scratch);
1507 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16 + seg,
1512 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1513 tcg_out_bswap32(s, scratch);
1516 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
1519 if (TCG_TARGET_REG_BITS == 64) {
1521 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo);
1522 tcg_out_bswap64(s, scratch);
1525 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_REXW + seg,
1528 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi);
1529 tcg_out_bswap32(s, scratch);
1530 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs);
1531 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo);
1532 tcg_out_bswap32(s, scratch);
1533 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, scratch, base, ofs+4);
1535 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datalo, base, ofs);
1536 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + seg, datahi, base, ofs+4);
1544 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1546 TCGReg datalo, datahi, addrlo;
1547 TCGReg addrhi __attribute__((unused));
1549 #if defined(CONFIG_SOFTMMU)
1552 uint8_t *label_ptr[2];
1556 datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
1558 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
1561 #if defined(CONFIG_SOFTMMU)
1562 mem_index = *args++;
1563 s_bits = opc & MO_SIZE;
1565 tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
1566 label_ptr, offsetof(CPUTLBEntry, addr_write));
1569 tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
1571 /* Record the current context of a store into ldst label */
1572 add_qemu_ldst_label(s, 0, opc, datalo, datahi, addrlo, addrhi,
1573 mem_index, s->code_ptr, label_ptr);
1576 int32_t offset = GUEST_BASE;
1577 TCGReg base = addrlo;
1580 /* ??? We assume all operations have left us with register contents
1581 that are zero extended. So far this appears to be true. If we
1582 want to enforce this, we can either do an explicit zero-extension
1583 here, or (if GUEST_BASE == 0, or a segment register is in use)
1584 use the ADDR32 prefix. For now, do nothing. */
1585 if (GUEST_BASE && guest_base_flags) {
1586 seg = guest_base_flags;
1588 } else if (TCG_TARGET_REG_BITS == 64 && offset != GUEST_BASE) {
1589 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE);
1590 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base);
1595 tcg_out_qemu_st_direct(s, datalo, datahi, base, offset, seg, opc);
1600 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1601 const TCGArg *args, const int *const_args)
1605 #if TCG_TARGET_REG_BITS == 64
1606 # define OP_32_64(x) \
1607 case glue(glue(INDEX_op_, x), _i64): \
1608 rexw = P_REXW; /* FALLTHRU */ \
1609 case glue(glue(INDEX_op_, x), _i32)
1611 # define OP_32_64(x) \
1612 case glue(glue(INDEX_op_, x), _i32)
1616 case INDEX_op_exit_tb:
1617 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
1618 tcg_out_jmp(s, (uintptr_t)tb_ret_addr);
1620 case INDEX_op_goto_tb:
1621 if (s->tb_jmp_offset) {
1622 /* direct jump method */
1623 tcg_out8(s, OPC_JMP_long); /* jmp im */
1624 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1627 /* indirect jump method */
1628 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
1629 (intptr_t)(s->tb_next + args[0]));
1631 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1634 if (const_args[0]) {
1635 tcg_out_calli(s, args[0]);
1638 tcg_out_modrm(s, OPC_GRP5, EXT5_CALLN_Ev, args[0]);
1642 tcg_out_jxx(s, JCC_JMP, args[0], 0);
1644 case INDEX_op_movi_i32:
1645 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1648 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1649 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
1652 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]);
1655 /* Note that we can ignore REXW for the zero-extend to 64-bit. */
1656 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
1659 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]);
1661 #if TCG_TARGET_REG_BITS == 64
1662 case INDEX_op_ld32u_i64:
1664 case INDEX_op_ld_i32:
1665 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1669 if (const_args[0]) {
1670 tcg_out_modrm_offset(s, OPC_MOVB_EvIz,
1671 0, args[1], args[2]);
1672 tcg_out8(s, args[0]);
1674 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R,
1675 args[0], args[1], args[2]);
1679 if (const_args[0]) {
1680 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16,
1681 0, args[1], args[2]);
1682 tcg_out16(s, args[0]);
1684 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16,
1685 args[0], args[1], args[2]);
1688 #if TCG_TARGET_REG_BITS == 64
1689 case INDEX_op_st32_i64:
1691 case INDEX_op_st_i32:
1692 if (const_args[0]) {
1693 tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, args[1], args[2]);
1694 tcg_out32(s, args[0]);
1696 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1701 /* For 3-operand addition, use LEA. */
1702 if (args[0] != args[1]) {
1703 TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0;
1705 if (const_args[2]) {
1707 } else if (a0 == a2) {
1708 /* Watch out for dest = src + dest, since we've removed
1709 the matching constraint on the add. */
1710 tgen_arithr(s, ARITH_ADD + rexw, a0, a1);
1714 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3);
1732 if (const_args[2]) {
1733 tgen_arithi(s, c + rexw, args[0], args[2], 0);
1735 tgen_arithr(s, c + rexw, args[0], args[2]);
1740 if (const_args[2]) {
1743 if (val == (int8_t)val) {
1744 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]);
1747 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]);
1751 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]);
1756 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]);
1759 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]);
1778 if (const_args[2]) {
1779 tcg_out_shifti(s, c + rexw, args[0], args[2]);
1781 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]);
1785 case INDEX_op_brcond_i32:
1786 tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
1789 case INDEX_op_setcond_i32:
1790 tcg_out_setcond32(s, args[3], args[0], args[1],
1791 args[2], const_args[2]);
1793 case INDEX_op_movcond_i32:
1794 tcg_out_movcond32(s, args[5], args[0], args[1],
1795 args[2], const_args[2], args[3]);
1799 tcg_out_rolw_8(s, args[0]);
1802 tcg_out_bswap32(s, args[0]);
1806 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]);
1809 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]);
1813 tcg_out_ext8s(s, args[0], args[1], rexw);
1816 tcg_out_ext16s(s, args[0], args[1], rexw);
1819 tcg_out_ext8u(s, args[0], args[1]);
1822 tcg_out_ext16u(s, args[0], args[1]);
1825 case INDEX_op_qemu_ld_i32:
1826 tcg_out_qemu_ld(s, args, 0);
1828 case INDEX_op_qemu_ld_i64:
1829 tcg_out_qemu_ld(s, args, 1);
1831 case INDEX_op_qemu_st_i32:
1832 tcg_out_qemu_st(s, args, 0);
1834 case INDEX_op_qemu_st_i64:
1835 tcg_out_qemu_st(s, args, 1);
1839 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
1842 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IMUL, args[3]);
1845 if (const_args[4]) {
1846 tgen_arithi(s, ARITH_ADD + rexw, args[0], args[4], 1);
1848 tgen_arithr(s, ARITH_ADD + rexw, args[0], args[4]);
1850 if (const_args[5]) {
1851 tgen_arithi(s, ARITH_ADC + rexw, args[1], args[5], 1);
1853 tgen_arithr(s, ARITH_ADC + rexw, args[1], args[5]);
1857 if (const_args[4]) {
1858 tgen_arithi(s, ARITH_SUB + rexw, args[0], args[4], 1);
1860 tgen_arithr(s, ARITH_SUB + rexw, args[0], args[4]);
1862 if (const_args[5]) {
1863 tgen_arithi(s, ARITH_SBB + rexw, args[1], args[5], 1);
1865 tgen_arithr(s, ARITH_SBB + rexw, args[1], args[5]);
1869 #if TCG_TARGET_REG_BITS == 32
1870 case INDEX_op_brcond2_i32:
1871 tcg_out_brcond2(s, args, const_args, 0);
1873 case INDEX_op_setcond2_i32:
1874 tcg_out_setcond2(s, args, const_args);
1876 #else /* TCG_TARGET_REG_BITS == 64 */
1877 case INDEX_op_movi_i64:
1878 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1880 case INDEX_op_ld32s_i64:
1881 tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
1883 case INDEX_op_ld_i64:
1884 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1886 case INDEX_op_st_i64:
1887 if (const_args[0]) {
1888 tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW,
1889 0, args[1], args[2]);
1890 tcg_out32(s, args[0]);
1892 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1896 case INDEX_op_brcond_i64:
1897 tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
1900 case INDEX_op_setcond_i64:
1901 tcg_out_setcond64(s, args[3], args[0], args[1],
1902 args[2], const_args[2]);
1904 case INDEX_op_movcond_i64:
1905 tcg_out_movcond64(s, args[5], args[0], args[1],
1906 args[2], const_args[2], args[3]);
1909 case INDEX_op_bswap64_i64:
1910 tcg_out_bswap64(s, args[0]);
1912 case INDEX_op_ext32u_i64:
1913 tcg_out_ext32u(s, args[0], args[1]);
1915 case INDEX_op_ext32s_i64:
1916 tcg_out_ext32s(s, args[0], args[1]);
1921 if (args[3] == 0 && args[4] == 8) {
1922 /* load bits 0..7 */
1923 tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM,
1925 } else if (args[3] == 8 && args[4] == 8) {
1926 /* load bits 8..15 */
1927 tcg_out_modrm(s, OPC_MOVB_EvGv, args[2], args[0] + 4);
1928 } else if (args[3] == 0 && args[4] == 16) {
1929 /* load bits 0..15 */
1930 tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, args[2], args[0]);
1943 static const TCGTargetOpDef x86_op_defs[] = {
1944 { INDEX_op_exit_tb, { } },
1945 { INDEX_op_goto_tb, { } },
1946 { INDEX_op_call, { "ri" } },
1947 { INDEX_op_br, { } },
1948 { INDEX_op_mov_i32, { "r", "r" } },
1949 { INDEX_op_movi_i32, { "r" } },
1950 { INDEX_op_ld8u_i32, { "r", "r" } },
1951 { INDEX_op_ld8s_i32, { "r", "r" } },
1952 { INDEX_op_ld16u_i32, { "r", "r" } },
1953 { INDEX_op_ld16s_i32, { "r", "r" } },
1954 { INDEX_op_ld_i32, { "r", "r" } },
1955 { INDEX_op_st8_i32, { "qi", "r" } },
1956 { INDEX_op_st16_i32, { "ri", "r" } },
1957 { INDEX_op_st_i32, { "ri", "r" } },
1959 { INDEX_op_add_i32, { "r", "r", "ri" } },
1960 { INDEX_op_sub_i32, { "r", "0", "ri" } },
1961 { INDEX_op_mul_i32, { "r", "0", "ri" } },
1962 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
1963 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
1964 { INDEX_op_and_i32, { "r", "0", "ri" } },
1965 { INDEX_op_or_i32, { "r", "0", "ri" } },
1966 { INDEX_op_xor_i32, { "r", "0", "ri" } },
1968 { INDEX_op_shl_i32, { "r", "0", "ci" } },
1969 { INDEX_op_shr_i32, { "r", "0", "ci" } },
1970 { INDEX_op_sar_i32, { "r", "0", "ci" } },
1971 { INDEX_op_rotl_i32, { "r", "0", "ci" } },
1972 { INDEX_op_rotr_i32, { "r", "0", "ci" } },
1974 { INDEX_op_brcond_i32, { "r", "ri" } },
1976 { INDEX_op_bswap16_i32, { "r", "0" } },
1977 { INDEX_op_bswap32_i32, { "r", "0" } },
1979 { INDEX_op_neg_i32, { "r", "0" } },
1981 { INDEX_op_not_i32, { "r", "0" } },
1983 { INDEX_op_ext8s_i32, { "r", "q" } },
1984 { INDEX_op_ext16s_i32, { "r", "r" } },
1985 { INDEX_op_ext8u_i32, { "r", "q" } },
1986 { INDEX_op_ext16u_i32, { "r", "r" } },
1988 { INDEX_op_setcond_i32, { "q", "r", "ri" } },
1990 { INDEX_op_deposit_i32, { "Q", "0", "Q" } },
1991 #if TCG_TARGET_HAS_movcond_i32
1992 { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } },
1995 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
1996 { INDEX_op_muls2_i32, { "a", "d", "a", "r" } },
1997 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1998 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
2000 #if TCG_TARGET_REG_BITS == 32
2001 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
2002 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
2004 { INDEX_op_mov_i64, { "r", "r" } },
2005 { INDEX_op_movi_i64, { "r" } },
2006 { INDEX_op_ld8u_i64, { "r", "r" } },
2007 { INDEX_op_ld8s_i64, { "r", "r" } },
2008 { INDEX_op_ld16u_i64, { "r", "r" } },
2009 { INDEX_op_ld16s_i64, { "r", "r" } },
2010 { INDEX_op_ld32u_i64, { "r", "r" } },
2011 { INDEX_op_ld32s_i64, { "r", "r" } },
2012 { INDEX_op_ld_i64, { "r", "r" } },
2013 { INDEX_op_st8_i64, { "ri", "r" } },
2014 { INDEX_op_st16_i64, { "ri", "r" } },
2015 { INDEX_op_st32_i64, { "ri", "r" } },
2016 { INDEX_op_st_i64, { "re", "r" } },
2018 { INDEX_op_add_i64, { "r", "r", "re" } },
2019 { INDEX_op_mul_i64, { "r", "0", "re" } },
2020 { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
2021 { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
2022 { INDEX_op_sub_i64, { "r", "0", "re" } },
2023 { INDEX_op_and_i64, { "r", "0", "reZ" } },
2024 { INDEX_op_or_i64, { "r", "0", "re" } },
2025 { INDEX_op_xor_i64, { "r", "0", "re" } },
2027 { INDEX_op_shl_i64, { "r", "0", "ci" } },
2028 { INDEX_op_shr_i64, { "r", "0", "ci" } },
2029 { INDEX_op_sar_i64, { "r", "0", "ci" } },
2030 { INDEX_op_rotl_i64, { "r", "0", "ci" } },
2031 { INDEX_op_rotr_i64, { "r", "0", "ci" } },
2033 { INDEX_op_brcond_i64, { "r", "re" } },
2034 { INDEX_op_setcond_i64, { "r", "r", "re" } },
2036 { INDEX_op_bswap16_i64, { "r", "0" } },
2037 { INDEX_op_bswap32_i64, { "r", "0" } },
2038 { INDEX_op_bswap64_i64, { "r", "0" } },
2039 { INDEX_op_neg_i64, { "r", "0" } },
2040 { INDEX_op_not_i64, { "r", "0" } },
2042 { INDEX_op_ext8s_i64, { "r", "r" } },
2043 { INDEX_op_ext16s_i64, { "r", "r" } },
2044 { INDEX_op_ext32s_i64, { "r", "r" } },
2045 { INDEX_op_ext8u_i64, { "r", "r" } },
2046 { INDEX_op_ext16u_i64, { "r", "r" } },
2047 { INDEX_op_ext32u_i64, { "r", "r" } },
2049 { INDEX_op_deposit_i64, { "Q", "0", "Q" } },
2050 { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
2052 { INDEX_op_mulu2_i64, { "a", "d", "a", "r" } },
2053 { INDEX_op_muls2_i64, { "a", "d", "a", "r" } },
2054 { INDEX_op_add2_i64, { "r", "r", "0", "1", "re", "re" } },
2055 { INDEX_op_sub2_i64, { "r", "r", "0", "1", "re", "re" } },
2058 #if TCG_TARGET_REG_BITS == 64
2059 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2060 { INDEX_op_qemu_st_i32, { "L", "L" } },
2061 { INDEX_op_qemu_ld_i64, { "r", "L" } },
2062 { INDEX_op_qemu_st_i64, { "L", "L" } },
2063 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
2064 { INDEX_op_qemu_ld_i32, { "r", "L" } },
2065 { INDEX_op_qemu_st_i32, { "L", "L" } },
2066 { INDEX_op_qemu_ld_i64, { "r", "r", "L" } },
2067 { INDEX_op_qemu_st_i64, { "L", "L", "L" } },
2069 { INDEX_op_qemu_ld_i32, { "r", "L", "L" } },
2070 { INDEX_op_qemu_st_i32, { "L", "L", "L" } },
2071 { INDEX_op_qemu_ld_i64, { "r", "r", "L", "L" } },
2072 { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } },
2077 static int tcg_target_callee_save_regs[] = {
2078 #if TCG_TARGET_REG_BITS == 64
2087 TCG_REG_R14, /* Currently used for the global env. */
2090 TCG_REG_EBP, /* Currently used for the global env. */
2097 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2098 and tcg_register_jit. */
2101 ((1 + ARRAY_SIZE(tcg_target_callee_save_regs)) \
2102 * (TCG_TARGET_REG_BITS / 8))
2104 #define FRAME_SIZE \
2106 + TCG_STATIC_CALL_ARGS_SIZE \
2107 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2108 + TCG_TARGET_STACK_ALIGN - 1) \
2109 & ~(TCG_TARGET_STACK_ALIGN - 1))
2111 /* Generate global QEMU prologue and epilogue code */
2112 static void tcg_target_qemu_prologue(TCGContext *s)
2114 int i, stack_addend;
2118 /* Reserve some stack space, also for TCG temps. */
2119 stack_addend = FRAME_SIZE - PUSH_SIZE;
2120 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2121 CPU_TEMP_BUF_NLONGS * sizeof(long));
2123 /* Save all callee saved registers. */
2124 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2125 tcg_out_push(s, tcg_target_callee_save_regs[i]);
2128 #if TCG_TARGET_REG_BITS == 32
2129 tcg_out_ld(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP,
2130 (ARRAY_SIZE(tcg_target_callee_save_regs) + 1) * 4);
2131 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2133 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, TCG_REG_ESP,
2134 (ARRAY_SIZE(tcg_target_callee_save_regs) + 2) * 4
2137 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2138 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
2140 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[1]);
2144 tb_ret_addr = s->code_ptr;
2146 tcg_out_addi(s, TCG_REG_CALL_STACK, stack_addend);
2148 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
2149 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
2151 tcg_out_opc(s, OPC_RET, 0, 0, 0);
2153 #if !defined(CONFIG_SOFTMMU)
2154 /* Try to set up a segment register to point to GUEST_BASE. */
2156 setup_guest_base_seg();
2161 static void tcg_target_init(TCGContext *s)
2163 /* For 32-bit, 99% certainty that we're running on hardware that supports
2164 cmov, but we still need to check. In case cmov is not available, we'll
2165 use a small forward branch. */
2168 unsigned a, b, c, d;
2169 have_cmov = (__get_cpuid(1, &a, &b, &c, &d) && (d & bit_CMOV));
2173 if (TCG_TARGET_REG_BITS == 64) {
2174 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2175 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2177 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
2180 tcg_regset_clear(tcg_target_call_clobber_regs);
2181 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX);
2182 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX);
2183 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX);
2184 if (TCG_TARGET_REG_BITS == 64) {
2185 #if !defined(_WIN64)
2186 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI);
2187 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI);
2189 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
2190 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
2191 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
2192 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
2195 tcg_regset_clear(s->reserved_regs);
2196 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2198 tcg_add_target_add_op_defs(x86_op_defs);
2203 DebugFrameFDEHeader fde;
2204 uint8_t fde_def_cfa[4];
2205 uint8_t fde_reg_ofs[14];
2208 /* We're expecting a 2 byte uleb128 encoded value. */
2209 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2211 #if !defined(__ELF__)
2212 /* Host machine without ELF. */
2213 #elif TCG_TARGET_REG_BITS == 64
2214 #define ELF_HOST_MACHINE EM_X86_64
2215 static DebugFrame debug_frame = {
2216 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2219 .cie.code_align = 1,
2220 .cie.data_align = 0x78, /* sleb128 -8 */
2221 .cie.return_column = 16,
2223 /* Total FDE size does not include the "len" member. */
2224 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2227 12, 7, /* DW_CFA_def_cfa %rsp, ... */
2228 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2232 0x90, 1, /* DW_CFA_offset, %rip, -8 */
2233 /* The following ordering must match tcg_target_callee_save_regs. */
2234 0x86, 2, /* DW_CFA_offset, %rbp, -16 */
2235 0x83, 3, /* DW_CFA_offset, %rbx, -24 */
2236 0x8c, 4, /* DW_CFA_offset, %r12, -32 */
2237 0x8d, 5, /* DW_CFA_offset, %r13, -40 */
2238 0x8e, 6, /* DW_CFA_offset, %r14, -48 */
2239 0x8f, 7, /* DW_CFA_offset, %r15, -56 */
2243 #define ELF_HOST_MACHINE EM_386
2244 static DebugFrame debug_frame = {
2245 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2248 .cie.code_align = 1,
2249 .cie.data_align = 0x7c, /* sleb128 -4 */
2250 .cie.return_column = 8,
2252 /* Total FDE size does not include the "len" member. */
2253 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2256 12, 4, /* DW_CFA_def_cfa %esp, ... */
2257 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2261 0x88, 1, /* DW_CFA_offset, %eip, -4 */
2262 /* The following ordering must match tcg_target_callee_save_regs. */
2263 0x85, 2, /* DW_CFA_offset, %ebp, -8 */
2264 0x83, 3, /* DW_CFA_offset, %ebx, -12 */
2265 0x86, 4, /* DW_CFA_offset, %esi, -16 */
2266 0x87, 5, /* DW_CFA_offset, %edi, -20 */
2271 #if defined(ELF_HOST_MACHINE)
2272 void tcg_register_jit(void *buf, size_t buf_size)
2274 debug_frame.fde.func_start = (uintptr_t)buf;
2275 debug_frame.fde.func_len = buf_size;
2277 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));