2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
5 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
6 * Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 /* ??? The translation blocks produced by TCG are generally small enough to
28 be entirely reachable with a 16-bit displacement. Leaving the option for
29 a 32-bit displacement here Just In Case. */
30 #define USE_LONG_BRANCHES 0
32 #define TCG_CT_CONST_32 0x0100
33 #define TCG_CT_CONST_NEG 0x0200
34 #define TCG_CT_CONST_ADDI 0x0400
35 #define TCG_CT_CONST_MULI 0x0800
36 #define TCG_CT_CONST_ANDI 0x1000
37 #define TCG_CT_CONST_ORI 0x2000
38 #define TCG_CT_CONST_XORI 0x4000
39 #define TCG_CT_CONST_CMPI 0x8000
41 /* Several places within the instruction set 0 means "no register"
42 rather than TCG_REG_R0. */
43 #define TCG_REG_NONE 0
45 /* A scratch register that may be be used throughout the backend. */
46 #define TCG_TMP0 TCG_REG_R14
48 #ifdef CONFIG_USE_GUEST_BASE
49 #define TCG_GUEST_BASE_REG TCG_REG_R13
51 #define TCG_GUEST_BASE_REG TCG_REG_R0
59 /* All of the following instructions are prefixed with their instruction
60 format, and are defined as 8- or 16-bit quantities, even when the two
61 halves of the 16-bit quantity may appear 32 bits apart in the insn.
62 This makes it easy to copy the values from the tables in Appendix B. */
63 typedef enum S390Opcode {
207 #define LD_SIGNED 0x04
208 #define LD_UINT8 0x00
209 #define LD_INT8 (LD_UINT8 | LD_SIGNED)
210 #define LD_UINT16 0x01
211 #define LD_INT16 (LD_UINT16 | LD_SIGNED)
212 #define LD_UINT32 0x02
213 #define LD_INT32 (LD_UINT32 | LD_SIGNED)
214 #define LD_UINT64 0x03
215 #define LD_INT64 (LD_UINT64 | LD_SIGNED)
218 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
219 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
220 "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15"
224 /* Since R6 is a potential argument register, choose it last of the
225 call-saved registers. Likewise prefer the call-clobbered registers
226 in reverse order to maximize the chance of avoiding the arguments. */
227 static const int tcg_target_reg_alloc_order[] = {
245 static const int tcg_target_call_iarg_regs[] = {
253 static const int tcg_target_call_oarg_regs[] = {
255 #if TCG_TARGET_REG_BITS == 32
264 #define S390_CC_NE (S390_CC_LT | S390_CC_GT)
265 #define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
266 #define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
267 #define S390_CC_NEVER 0
268 #define S390_CC_ALWAYS 15
270 /* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
271 static const uint8_t tcg_cond_to_s390_cond[] = {
272 [TCG_COND_EQ] = S390_CC_EQ,
273 [TCG_COND_NE] = S390_CC_NE,
274 [TCG_COND_LT] = S390_CC_LT,
275 [TCG_COND_LE] = S390_CC_LE,
276 [TCG_COND_GT] = S390_CC_GT,
277 [TCG_COND_GE] = S390_CC_GE,
278 [TCG_COND_LTU] = S390_CC_LT,
279 [TCG_COND_LEU] = S390_CC_LE,
280 [TCG_COND_GTU] = S390_CC_GT,
281 [TCG_COND_GEU] = S390_CC_GE,
284 /* Condition codes that result from a LOAD AND TEST. Here, we have no
285 unsigned instruction variation, however since the test is vs zero we
286 can re-map the outcomes appropriately. */
287 static const uint8_t tcg_cond_to_ltr_cond[] = {
288 [TCG_COND_EQ] = S390_CC_EQ,
289 [TCG_COND_NE] = S390_CC_NE,
290 [TCG_COND_LT] = S390_CC_LT,
291 [TCG_COND_LE] = S390_CC_LE,
292 [TCG_COND_GT] = S390_CC_GT,
293 [TCG_COND_GE] = S390_CC_GE,
294 [TCG_COND_LTU] = S390_CC_NEVER,
295 [TCG_COND_LEU] = S390_CC_EQ,
296 [TCG_COND_GTU] = S390_CC_NE,
297 [TCG_COND_GEU] = S390_CC_ALWAYS,
300 #ifdef CONFIG_SOFTMMU
302 #include "exec/softmmu_defs.h"
304 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
306 static const void * const qemu_ld_helpers[4] = {
313 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
314 uintxx_t val, int mmu_idx) */
315 static const void * const qemu_st_helpers[4] = {
323 static uint8_t *tb_ret_addr;
325 /* A list of relevant facilities used by this translator. Some of these
326 are required for proper operation, and these are checked at startup. */
328 #define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
329 #define FACILITY_LONG_DISP (1ULL << (63 - 18))
330 #define FACILITY_EXT_IMM (1ULL << (63 - 21))
331 #define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
333 static uint64_t facilities;
335 static void patch_reloc(uint8_t *code_ptr, int type,
336 tcg_target_long value, tcg_target_long addend)
338 tcg_target_long code_ptr_tl = (tcg_target_long)code_ptr;
339 tcg_target_long pcrel2;
341 /* ??? Not the usual definition of "addend". */
342 pcrel2 = (value - (code_ptr_tl + addend)) >> 1;
346 assert(pcrel2 == (int16_t)pcrel2);
347 *(int16_t *)code_ptr = pcrel2;
350 assert(pcrel2 == (int32_t)pcrel2);
351 *(int32_t *)code_ptr = pcrel2;
359 /* parse target specific constraints */
360 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
362 const char *ct_str = *pct_str;
365 case 'r': /* all registers */
366 ct->ct |= TCG_CT_REG;
367 tcg_regset_set32(ct->u.regs, 0, 0xffff);
369 case 'R': /* not R0 */
370 ct->ct |= TCG_CT_REG;
371 tcg_regset_set32(ct->u.regs, 0, 0xffff);
372 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
374 case 'L': /* qemu_ld/st constraint */
375 ct->ct |= TCG_CT_REG;
376 tcg_regset_set32(ct->u.regs, 0, 0xffff);
377 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R2);
378 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
380 case 'a': /* force R2 for division */
381 ct->ct |= TCG_CT_REG;
382 tcg_regset_clear(ct->u.regs);
383 tcg_regset_set_reg(ct->u.regs, TCG_REG_R2);
385 case 'b': /* force R3 for division */
386 ct->ct |= TCG_CT_REG;
387 tcg_regset_clear(ct->u.regs);
388 tcg_regset_set_reg(ct->u.regs, TCG_REG_R3);
390 case 'N': /* force immediate negate */
391 ct->ct |= TCG_CT_CONST_NEG;
393 case 'W': /* force 32-bit ("word") immediate */
394 ct->ct |= TCG_CT_CONST_32;
397 ct->ct |= TCG_CT_CONST_ADDI;
400 ct->ct |= TCG_CT_CONST_MULI;
403 ct->ct |= TCG_CT_CONST_ANDI;
406 ct->ct |= TCG_CT_CONST_ORI;
409 ct->ct |= TCG_CT_CONST_XORI;
412 ct->ct |= TCG_CT_CONST_CMPI;
423 /* Immediates to be used with logical AND. This is an optimization only,
424 since a full 64-bit immediate AND can always be performed with 4 sequential
425 NI[LH][LH] instructions. What we're looking for is immediates that we
426 can load efficiently, and the immediate load plus the reg-reg AND is
427 smaller than the sequential NI's. */
429 static int tcg_match_andi(int ct, tcg_target_ulong val)
433 if (facilities & FACILITY_EXT_IMM) {
434 if (ct & TCG_CT_CONST_32) {
435 /* All 32-bit ANDs can be performed with 1 48-bit insn. */
439 /* Zero-extensions. */
440 if (val == 0xff || val == 0xffff || val == 0xffffffff) {
444 if (ct & TCG_CT_CONST_32) {
446 } else if (val == 0xffffffff) {
451 /* Try all 32-bit insns that can perform it in one go. */
452 for (i = 0; i < 4; i++) {
453 tcg_target_ulong mask = ~(0xffffull << i*16);
454 if ((val & mask) == mask) {
459 /* Look for 16-bit values performing the mask. These are better
460 to load with LLI[LH][LH]. */
461 for (i = 0; i < 4; i++) {
462 tcg_target_ulong mask = 0xffffull << i*16;
463 if ((val & mask) == val) {
468 /* Look for 32-bit values performing the 64-bit mask. These
469 are better to load with LLI[LH]F, or if extended immediates
470 not available, with a pair of LLI insns. */
471 if ((ct & TCG_CT_CONST_32) == 0) {
472 if (val <= 0xffffffff || (val & 0xffffffff) == 0) {
480 /* Immediates to be used with logical OR. This is an optimization only,
481 since a full 64-bit immediate OR can always be performed with 4 sequential
482 OI[LH][LH] instructions. What we're looking for is immediates that we
483 can load efficiently, and the immediate load plus the reg-reg OR is
484 smaller than the sequential OI's. */
486 static int tcg_match_ori(int ct, tcg_target_long val)
488 if (facilities & FACILITY_EXT_IMM) {
489 if (ct & TCG_CT_CONST_32) {
490 /* All 32-bit ORs can be performed with 1 48-bit insn. */
495 /* Look for negative values. These are best to load with LGHI. */
497 if (val == (int16_t)val) {
500 if (facilities & FACILITY_EXT_IMM) {
501 if (val == (int32_t)val) {
510 /* Immediates to be used with logical XOR. This is almost, but not quite,
511 only an optimization. XOR with immediate is only supported with the
512 extended-immediate facility. That said, there are a few patterns for
513 which it is better to load the value into a register first. */
515 static int tcg_match_xori(int ct, tcg_target_long val)
517 if ((facilities & FACILITY_EXT_IMM) == 0) {
521 if (ct & TCG_CT_CONST_32) {
522 /* All 32-bit XORs can be performed with 1 48-bit insn. */
526 /* Look for negative values. These are best to load with LGHI. */
527 if (val < 0 && val == (int32_t)val) {
534 /* Imediates to be used with comparisons. */
536 static int tcg_match_cmpi(int ct, tcg_target_long val)
538 if (facilities & FACILITY_EXT_IMM) {
539 /* The COMPARE IMMEDIATE instruction is available. */
540 if (ct & TCG_CT_CONST_32) {
541 /* We have a 32-bit immediate and can compare against anything. */
544 /* ??? We have no insight here into whether the comparison is
545 signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit
546 signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses
547 a 32-bit unsigned immediate. If we were to use the (semi)
548 obvious "val == (int32_t)val" we would be enabling unsigned
549 comparisons vs very large numbers. The only solution is to
550 take the intersection of the ranges. */
551 /* ??? Another possible solution is to simply lie and allow all
552 constants here and force the out-of-range values into a temp
553 register in tgen_cmp when we have knowledge of the actual
554 comparison code in use. */
555 return val >= 0 && val <= 0x7fffffff;
558 /* Only the LOAD AND TEST instruction is available. */
563 /* Test if a constant matches the constraint. */
564 static int tcg_target_const_match(tcg_target_long val,
565 const TCGArgConstraint *arg_ct)
569 if (ct & TCG_CT_CONST) {
573 /* Handle the modifiers. */
574 if (ct & TCG_CT_CONST_NEG) {
577 if (ct & TCG_CT_CONST_32) {
581 /* The following are mutually exclusive. */
582 if (ct & TCG_CT_CONST_ADDI) {
583 /* Immediates that may be used with add. If we have the
584 extended-immediates facility then we have ADD IMMEDIATE
585 with signed and unsigned 32-bit, otherwise we have only
586 ADD HALFWORD IMMEDIATE with a signed 16-bit. */
587 if (facilities & FACILITY_EXT_IMM) {
588 return val == (int32_t)val || val == (uint32_t)val;
590 return val == (int16_t)val;
592 } else if (ct & TCG_CT_CONST_MULI) {
593 /* Immediates that may be used with multiply. If we have the
594 general-instruction-extensions, then we have MULTIPLY SINGLE
595 IMMEDIATE with a signed 32-bit, otherwise we have only
596 MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
597 if (facilities & FACILITY_GEN_INST_EXT) {
598 return val == (int32_t)val;
600 return val == (int16_t)val;
602 } else if (ct & TCG_CT_CONST_ANDI) {
603 return tcg_match_andi(ct, val);
604 } else if (ct & TCG_CT_CONST_ORI) {
605 return tcg_match_ori(ct, val);
606 } else if (ct & TCG_CT_CONST_XORI) {
607 return tcg_match_xori(ct, val);
608 } else if (ct & TCG_CT_CONST_CMPI) {
609 return tcg_match_cmpi(ct, val);
615 /* Emit instructions according to the given instruction format. */
617 static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
619 tcg_out16(s, (op << 8) | (r1 << 4) | r2);
622 static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
623 TCGReg r1, TCGReg r2)
625 tcg_out32(s, (op << 16) | (r1 << 4) | r2);
628 static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
630 tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
633 static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
635 tcg_out16(s, op | (r1 << 4));
639 static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
640 TCGReg b2, TCGReg r3, int disp)
642 tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
646 static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
647 TCGReg b2, TCGReg r3, int disp)
649 tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
650 tcg_out32(s, (op & 0xff) | (b2 << 28)
651 | ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
654 #define tcg_out_insn_RX tcg_out_insn_RS
655 #define tcg_out_insn_RXY tcg_out_insn_RSY
657 /* Emit an opcode with "type-checking" of the format. */
658 #define tcg_out_insn(S, FMT, OP, ...) \
659 glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
662 /* emit 64-bit shifts */
663 static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
664 TCGReg src, TCGReg sh_reg, int sh_imm)
666 tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
669 /* emit 32-bit shifts */
670 static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
671 TCGReg sh_reg, int sh_imm)
673 tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
676 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
679 if (type == TCG_TYPE_I32) {
680 tcg_out_insn(s, RR, LR, dst, src);
682 tcg_out_insn(s, RRE, LGR, dst, src);
687 /* load a register with an immediate value */
688 static void tcg_out_movi(TCGContext *s, TCGType type,
689 TCGReg ret, tcg_target_long sval)
691 static const S390Opcode lli_insns[4] = {
692 RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
695 tcg_target_ulong uval = sval;
698 if (type == TCG_TYPE_I32) {
699 uval = (uint32_t)sval;
700 sval = (int32_t)sval;
703 /* Try all 32-bit insns that can load it in one go. */
704 if (sval >= -0x8000 && sval < 0x8000) {
705 tcg_out_insn(s, RI, LGHI, ret, sval);
709 for (i = 0; i < 4; i++) {
710 tcg_target_long mask = 0xffffull << i*16;
711 if ((uval & mask) == uval) {
712 tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
717 /* Try all 48-bit insns that can load it in one go. */
718 if (facilities & FACILITY_EXT_IMM) {
719 if (sval == (int32_t)sval) {
720 tcg_out_insn(s, RIL, LGFI, ret, sval);
723 if (uval <= 0xffffffff) {
724 tcg_out_insn(s, RIL, LLILF, ret, uval);
727 if ((uval & 0xffffffff) == 0) {
728 tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
733 /* Try for PC-relative address load. */
734 if ((sval & 1) == 0) {
735 intptr_t off = (sval - (intptr_t)s->code_ptr) >> 1;
736 if (off == (int32_t)off) {
737 tcg_out_insn(s, RIL, LARL, ret, off);
742 /* If extended immediates are not present, then we may have to issue
743 several instructions to load the low 32 bits. */
744 if (!(facilities & FACILITY_EXT_IMM)) {
745 /* A 32-bit unsigned value can be loaded in 2 insns. And given
746 that the lli_insns loop above did not succeed, we know that
747 both insns are required. */
748 if (uval <= 0xffffffff) {
749 tcg_out_insn(s, RI, LLILL, ret, uval);
750 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
754 /* If all high bits are set, the value can be loaded in 2 or 3 insns.
755 We first want to make sure that all the high bits get set. With
756 luck the low 16-bits can be considered negative to perform that for
757 free, otherwise we load an explicit -1. */
758 if (sval >> 31 >> 1 == -1) {
760 tcg_out_insn(s, RI, LGHI, ret, uval);
762 tcg_out_insn(s, RI, LGHI, ret, -1);
763 tcg_out_insn(s, RI, IILL, ret, uval);
765 tcg_out_insn(s, RI, IILH, ret, uval >> 16);
770 /* If we get here, both the high and low parts have non-zero bits. */
772 /* Recurse to load the lower 32-bits. */
773 tcg_out_movi(s, TCG_TYPE_I32, ret, sval);
775 /* Insert data into the high 32-bits. */
776 uval = uval >> 31 >> 1;
777 if (facilities & FACILITY_EXT_IMM) {
778 if (uval < 0x10000) {
779 tcg_out_insn(s, RI, IIHL, ret, uval);
780 } else if ((uval & 0xffff) == 0) {
781 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
783 tcg_out_insn(s, RIL, IIHF, ret, uval);
787 tcg_out_insn(s, RI, IIHL, ret, uval);
789 if (uval & 0xffff0000) {
790 tcg_out_insn(s, RI, IIHH, ret, uval >> 16);
796 /* Emit a load/store type instruction. Inputs are:
797 DATA: The register to be loaded or stored.
798 BASE+OFS: The effective address.
799 OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
800 OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
802 static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
803 TCGReg data, TCGReg base, TCGReg index,
806 if (ofs < -0x80000 || ofs >= 0x80000) {
807 /* Combine the low 16 bits of the offset with the actual load insn;
808 the high 48 bits must come from an immediate load. */
809 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0xffff);
812 /* If we were already given an index register, add it in. */
813 if (index != TCG_REG_NONE) {
814 tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
819 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
820 tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
822 tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
827 /* load data without address translation or endianness conversion */
828 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
829 TCGReg base, tcg_target_long ofs)
831 if (type == TCG_TYPE_I32) {
832 tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
834 tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
838 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
839 TCGReg base, tcg_target_long ofs)
841 if (type == TCG_TYPE_I32) {
842 tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
844 tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
848 /* load data from an absolute host address */
849 static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
851 tcg_target_long addr = (tcg_target_long)abs;
853 if (facilities & FACILITY_GEN_INST_EXT) {
854 tcg_target_long disp = (addr - (tcg_target_long)s->code_ptr) >> 1;
855 if (disp == (int32_t)disp) {
856 if (type == TCG_TYPE_I32) {
857 tcg_out_insn(s, RIL, LRL, dest, disp);
859 tcg_out_insn(s, RIL, LGRL, dest, disp);
865 tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
866 tcg_out_ld(s, type, dest, dest, addr & 0xffff);
869 static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
871 if (facilities & FACILITY_EXT_IMM) {
872 tcg_out_insn(s, RRE, LGBR, dest, src);
876 if (type == TCG_TYPE_I32) {
878 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
880 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
882 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
884 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
885 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
889 static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
891 if (facilities & FACILITY_EXT_IMM) {
892 tcg_out_insn(s, RRE, LLGCR, dest, src);
897 tcg_out_movi(s, type, TCG_TMP0, 0xff);
900 tcg_out_movi(s, type, dest, 0xff);
902 if (type == TCG_TYPE_I32) {
903 tcg_out_insn(s, RR, NR, dest, src);
905 tcg_out_insn(s, RRE, NGR, dest, src);
909 static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
911 if (facilities & FACILITY_EXT_IMM) {
912 tcg_out_insn(s, RRE, LGHR, dest, src);
916 if (type == TCG_TYPE_I32) {
918 tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
920 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
922 tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
924 tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
925 tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
929 static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
931 if (facilities & FACILITY_EXT_IMM) {
932 tcg_out_insn(s, RRE, LLGHR, dest, src);
937 tcg_out_movi(s, type, TCG_TMP0, 0xffff);
940 tcg_out_movi(s, type, dest, 0xffff);
942 if (type == TCG_TYPE_I32) {
943 tcg_out_insn(s, RR, NR, dest, src);
945 tcg_out_insn(s, RRE, NGR, dest, src);
949 static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
951 tcg_out_insn(s, RRE, LGFR, dest, src);
954 static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
956 tcg_out_insn(s, RRE, LLGFR, dest, src);
959 static inline void tgen32_addi(TCGContext *s, TCGReg dest, int32_t val)
961 if (val == (int16_t)val) {
962 tcg_out_insn(s, RI, AHI, dest, val);
964 tcg_out_insn(s, RIL, AFI, dest, val);
968 static inline void tgen64_addi(TCGContext *s, TCGReg dest, int64_t val)
970 if (val == (int16_t)val) {
971 tcg_out_insn(s, RI, AGHI, dest, val);
972 } else if (val == (int32_t)val) {
973 tcg_out_insn(s, RIL, AGFI, dest, val);
974 } else if (val == (uint32_t)val) {
975 tcg_out_insn(s, RIL, ALGFI, dest, val);
982 static void tgen64_andi(TCGContext *s, TCGReg dest, tcg_target_ulong val)
984 static const S390Opcode ni_insns[4] = {
985 RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
987 static const S390Opcode nif_insns[2] = {
993 /* Look for no-op. */
998 /* Look for the zero-extensions. */
999 if (val == 0xffffffff) {
1000 tgen_ext32u(s, dest, dest);
1004 if (facilities & FACILITY_EXT_IMM) {
1006 tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
1009 if (val == 0xffff) {
1010 tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
1014 /* Try all 32-bit insns that can perform it in one go. */
1015 for (i = 0; i < 4; i++) {
1016 tcg_target_ulong mask = ~(0xffffull << i*16);
1017 if ((val & mask) == mask) {
1018 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1023 /* Try all 48-bit insns that can perform it in one go. */
1024 if (facilities & FACILITY_EXT_IMM) {
1025 for (i = 0; i < 2; i++) {
1026 tcg_target_ulong mask = ~(0xffffffffull << i*32);
1027 if ((val & mask) == mask) {
1028 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1034 /* Perform the AND via sequential modifications to the high and low
1035 parts. Do this via recursion to handle 16-bit vs 32-bit masks in
1037 tgen64_andi(s, dest, val | 0xffffffff00000000ull);
1038 tgen64_andi(s, dest, val | 0x00000000ffffffffull);
1040 /* With no extended-immediate facility, just emit the sequence. */
1041 for (i = 0; i < 4; i++) {
1042 tcg_target_ulong mask = 0xffffull << i*16;
1043 if ((val & mask) != mask) {
1044 tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
1050 static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1052 static const S390Opcode oi_insns[4] = {
1053 RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
1055 static const S390Opcode nif_insns[2] = {
1061 /* Look for no-op. */
1066 if (facilities & FACILITY_EXT_IMM) {
1067 /* Try all 32-bit insns that can perform it in one go. */
1068 for (i = 0; i < 4; i++) {
1069 tcg_target_ulong mask = (0xffffull << i*16);
1070 if ((val & mask) != 0 && (val & ~mask) == 0) {
1071 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1076 /* Try all 48-bit insns that can perform it in one go. */
1077 for (i = 0; i < 2; i++) {
1078 tcg_target_ulong mask = (0xffffffffull << i*32);
1079 if ((val & mask) != 0 && (val & ~mask) == 0) {
1080 tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
1085 /* Perform the OR via sequential modifications to the high and
1086 low parts. Do this via recursion to handle 16-bit vs 32-bit
1087 masks in each half. */
1088 tgen64_ori(s, dest, val & 0x00000000ffffffffull);
1089 tgen64_ori(s, dest, val & 0xffffffff00000000ull);
1091 /* With no extended-immediate facility, we don't need to be so
1092 clever. Just iterate over the insns and mask in the constant. */
1093 for (i = 0; i < 4; i++) {
1094 tcg_target_ulong mask = (0xffffull << i*16);
1095 if ((val & mask) != 0) {
1096 tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
1102 static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1104 /* Perform the xor by parts. */
1105 if (val & 0xffffffff) {
1106 tcg_out_insn(s, RIL, XILF, dest, val);
1108 if (val > 0xffffffff) {
1109 tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
1113 static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
1114 TCGArg c2, int c2const)
1116 bool is_unsigned = is_unsigned_cond(c);
1119 if (type == TCG_TYPE_I32) {
1120 tcg_out_insn(s, RR, LTR, r1, r1);
1122 tcg_out_insn(s, RRE, LTGR, r1, r1);
1124 return tcg_cond_to_ltr_cond[c];
1127 if (type == TCG_TYPE_I32) {
1128 tcg_out_insn(s, RIL, CLFI, r1, c2);
1130 tcg_out_insn(s, RIL, CLGFI, r1, c2);
1133 if (type == TCG_TYPE_I32) {
1134 tcg_out_insn(s, RIL, CFI, r1, c2);
1136 tcg_out_insn(s, RIL, CGFI, r1, c2);
1142 if (type == TCG_TYPE_I32) {
1143 tcg_out_insn(s, RR, CLR, r1, c2);
1145 tcg_out_insn(s, RRE, CLGR, r1, c2);
1148 if (type == TCG_TYPE_I32) {
1149 tcg_out_insn(s, RR, CR, r1, c2);
1151 tcg_out_insn(s, RRE, CGR, r1, c2);
1155 return tcg_cond_to_s390_cond[c];
1158 static void tgen_setcond(TCGContext *s, TCGType type, TCGCond c,
1159 TCGReg dest, TCGReg r1, TCGArg c2, int c2const)
1161 int cc = tgen_cmp(s, type, c, r1, c2, c2const);
1163 /* Emit: r1 = 1; if (cc) goto over; r1 = 0; over: */
1164 tcg_out_movi(s, type, dest, 1);
1165 tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
1166 tcg_out_movi(s, type, dest, 0);
1169 static void tgen_gotoi(TCGContext *s, int cc, tcg_target_long dest)
1171 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1172 if (off > -0x8000 && off < 0x7fff) {
1173 tcg_out_insn(s, RI, BRC, cc, off);
1174 } else if (off == (int32_t)off) {
1175 tcg_out_insn(s, RIL, BRCL, cc, off);
1177 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1178 tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
1182 static void tgen_branch(TCGContext *s, int cc, int labelno)
1184 TCGLabel* l = &s->labels[labelno];
1186 tgen_gotoi(s, cc, l->u.value);
1187 } else if (USE_LONG_BRANCHES) {
1188 tcg_out16(s, RIL_BRCL | (cc << 4));
1189 tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, labelno, -2);
1192 tcg_out16(s, RI_BRC | (cc << 4));
1193 tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, labelno, -2);
1198 static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
1199 TCGReg r1, TCGReg r2, int labelno)
1201 TCGLabel* l = &s->labels[labelno];
1202 tcg_target_long off;
1205 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1207 /* We need to keep the offset unchanged for retranslation. */
1208 off = ((int16_t *)s->code_ptr)[1];
1209 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1212 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
1214 tcg_out16(s, cc << 12 | (opc & 0xff));
1217 static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
1218 TCGReg r1, int i2, int labelno)
1220 TCGLabel* l = &s->labels[labelno];
1221 tcg_target_long off;
1224 off = (l->u.value - (tcg_target_long)s->code_ptr) >> 1;
1226 /* We need to keep the offset unchanged for retranslation. */
1227 off = ((int16_t *)s->code_ptr)[1];
1228 tcg_out_reloc(s, s->code_ptr + 2, R_390_PC16DBL, labelno, -2);
1231 tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
1233 tcg_out16(s, (i2 << 8) | (opc & 0xff));
1236 static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
1237 TCGReg r1, TCGArg c2, int c2const, int labelno)
1241 if (facilities & FACILITY_GEN_INST_EXT) {
1242 bool is_unsigned = (c > TCG_COND_GT);
1246 cc = tcg_cond_to_s390_cond[c];
1249 opc = (type == TCG_TYPE_I32
1250 ? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
1251 : (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
1252 tgen_compare_branch(s, opc, cc, r1, c2, labelno);
1256 /* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
1257 If the immediate we've been given does not fit that range, we'll
1258 fall back to separate compare and branch instructions using the
1259 larger comparison range afforded by COMPARE IMMEDIATE. */
1260 if (type == TCG_TYPE_I32) {
1263 in_range = (uint32_t)c2 == (uint8_t)c2;
1266 in_range = (int32_t)c2 == (int8_t)c2;
1271 in_range = (uint64_t)c2 == (uint8_t)c2;
1274 in_range = (int64_t)c2 == (int8_t)c2;
1278 tgen_compare_imm_branch(s, opc, cc, r1, c2, labelno);
1283 cc = tgen_cmp(s, type, c, r1, c2, c2const);
1284 tgen_branch(s, cc, labelno);
1287 static void tgen_calli(TCGContext *s, tcg_target_long dest)
1289 tcg_target_long off = (dest - (tcg_target_long)s->code_ptr) >> 1;
1290 if (off == (int32_t)off) {
1291 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
1293 tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
1294 tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
1298 static void tcg_out_qemu_ld_direct(TCGContext *s, int opc, TCGReg data,
1299 TCGReg base, TCGReg index, int disp)
1301 #ifdef TARGET_WORDS_BIGENDIAN
1302 const int bswap = 0;
1304 const int bswap = 1;
1308 tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
1311 tcg_out_insn(s, RXY, LGB, data, base, index, disp);
1315 /* swapped unsigned halfword load with upper bits zeroed */
1316 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1317 tgen_ext16u(s, TCG_TYPE_I64, data, data);
1319 tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
1324 /* swapped sign-extended halfword load */
1325 tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
1326 tgen_ext16s(s, TCG_TYPE_I64, data, data);
1328 tcg_out_insn(s, RXY, LGH, data, base, index, disp);
1333 /* swapped unsigned int load with upper bits zeroed */
1334 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1335 tgen_ext32u(s, data, data);
1337 tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
1342 /* swapped sign-extended int load */
1343 tcg_out_insn(s, RXY, LRV, data, base, index, disp);
1344 tgen_ext32s(s, data, data);
1346 tcg_out_insn(s, RXY, LGF, data, base, index, disp);
1351 tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
1353 tcg_out_insn(s, RXY, LG, data, base, index, disp);
1361 static void tcg_out_qemu_st_direct(TCGContext *s, int opc, TCGReg data,
1362 TCGReg base, TCGReg index, int disp)
1364 #ifdef TARGET_WORDS_BIGENDIAN
1365 const int bswap = 0;
1367 const int bswap = 1;
1371 if (disp >= 0 && disp < 0x1000) {
1372 tcg_out_insn(s, RX, STC, data, base, index, disp);
1374 tcg_out_insn(s, RXY, STCY, data, base, index, disp);
1379 tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
1380 } else if (disp >= 0 && disp < 0x1000) {
1381 tcg_out_insn(s, RX, STH, data, base, index, disp);
1383 tcg_out_insn(s, RXY, STHY, data, base, index, disp);
1388 tcg_out_insn(s, RXY, STRV, data, base, index, disp);
1389 } else if (disp >= 0 && disp < 0x1000) {
1390 tcg_out_insn(s, RX, ST, data, base, index, disp);
1392 tcg_out_insn(s, RXY, STY, data, base, index, disp);
1397 tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
1399 tcg_out_insn(s, RXY, STG, data, base, index, disp);
1407 #if defined(CONFIG_SOFTMMU)
1408 static void tgen64_andi_tmp(TCGContext *s, TCGReg dest, tcg_target_ulong val)
1410 if (tcg_match_andi(0, val)) {
1411 tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, val);
1412 tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
1414 tgen64_andi(s, dest, val);
1418 static void tcg_prepare_qemu_ldst(TCGContext* s, TCGReg data_reg,
1419 TCGReg addr_reg, int mem_index, int opc,
1420 uint16_t **label2_ptr_p, int is_store)
1422 const TCGReg arg0 = TCG_REG_R2;
1423 const TCGReg arg1 = TCG_REG_R3;
1424 int s_bits = opc & 3;
1425 uint16_t *label1_ptr;
1426 tcg_target_long ofs;
1428 if (TARGET_LONG_BITS == 32) {
1429 tgen_ext32u(s, arg0, addr_reg);
1431 tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1434 tcg_out_sh64(s, RSY_SRLG, arg1, addr_reg, TCG_REG_NONE,
1435 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
1437 tgen64_andi_tmp(s, arg0, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
1438 tgen64_andi_tmp(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1441 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1443 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1445 assert(ofs < 0x80000);
1447 if (TARGET_LONG_BITS == 32) {
1448 tcg_out_mem(s, RX_C, RXY_CY, arg0, arg1, TCG_AREG0, ofs);
1450 tcg_out_mem(s, 0, RXY_CG, arg0, arg1, TCG_AREG0, ofs);
1453 if (TARGET_LONG_BITS == 32) {
1454 tgen_ext32u(s, arg0, addr_reg);
1456 tcg_out_mov(s, TCG_TYPE_I64, arg0, addr_reg);
1459 label1_ptr = (uint16_t*)s->code_ptr;
1461 /* je label1 (offset will be patched in later) */
1462 tcg_out_insn(s, RI, BRC, S390_CC_EQ, 0);
1464 /* call load/store helper */
1466 /* Make sure to zero-extend the value to the full register
1467 for the calling convention. */
1470 tgen_ext8u(s, TCG_TYPE_I64, arg1, data_reg);
1473 tgen_ext16u(s, TCG_TYPE_I64, arg1, data_reg);
1476 tgen_ext32u(s, arg1, data_reg);
1479 tcg_out_mov(s, TCG_TYPE_I64, arg1, data_reg);
1484 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, mem_index);
1485 /* XXX/FIXME: suboptimal */
1486 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[3],
1487 tcg_target_call_iarg_regs[2]);
1488 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
1489 tcg_target_call_iarg_regs[1]);
1490 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
1491 tcg_target_call_iarg_regs[0]);
1492 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
1494 tgen_calli(s, (tcg_target_ulong)qemu_st_helpers[s_bits]);
1496 tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
1497 /* XXX/FIXME: suboptimal */
1498 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
1499 tcg_target_call_iarg_regs[1]);
1500 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[1],
1501 tcg_target_call_iarg_regs[0]);
1502 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0],
1504 tgen_calli(s, (tcg_target_ulong)qemu_ld_helpers[s_bits]);
1506 /* sign extension */
1509 tgen_ext8s(s, TCG_TYPE_I64, data_reg, arg0);
1512 tgen_ext16s(s, TCG_TYPE_I64, data_reg, arg0);
1515 tgen_ext32s(s, data_reg, arg0);
1518 /* unsigned -> just copy */
1519 tcg_out_mov(s, TCG_TYPE_I64, data_reg, arg0);
1524 /* jump to label2 (end) */
1525 *label2_ptr_p = (uint16_t*)s->code_ptr;
1527 tcg_out_insn(s, RI, BRC, S390_CC_ALWAYS, 0);
1529 /* this is label1, patch branch */
1530 *(label1_ptr + 1) = ((unsigned long)s->code_ptr -
1531 (unsigned long)label1_ptr) >> 1;
1533 ofs = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
1534 assert(ofs < 0x80000);
1536 tcg_out_mem(s, 0, RXY_AG, arg0, arg1, TCG_AREG0, ofs);
1539 static void tcg_finish_qemu_ldst(TCGContext* s, uint16_t *label2_ptr)
1542 *(label2_ptr + 1) = ((unsigned long)s->code_ptr -
1543 (unsigned long)label2_ptr) >> 1;
1546 static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
1547 TCGReg *index_reg, tcg_target_long *disp)
1549 if (TARGET_LONG_BITS == 32) {
1550 tgen_ext32u(s, TCG_TMP0, *addr_reg);
1551 *addr_reg = TCG_TMP0;
1553 if (GUEST_BASE < 0x80000) {
1554 *index_reg = TCG_REG_NONE;
1557 *index_reg = TCG_GUEST_BASE_REG;
1561 #endif /* CONFIG_SOFTMMU */
1563 /* load data with address translation (if applicable)
1564 and endianness conversion */
1565 static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* args, int opc)
1567 TCGReg addr_reg, data_reg;
1568 #if defined(CONFIG_SOFTMMU)
1570 uint16_t *label2_ptr;
1573 tcg_target_long disp;
1579 #if defined(CONFIG_SOFTMMU)
1582 tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1583 opc, &label2_ptr, 0);
1585 tcg_out_qemu_ld_direct(s, opc, data_reg, TCG_REG_R2, TCG_REG_NONE, 0);
1587 tcg_finish_qemu_ldst(s, label2_ptr);
1589 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1590 tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1594 static void tcg_out_qemu_st(TCGContext* s, const TCGArg* args, int opc)
1596 TCGReg addr_reg, data_reg;
1597 #if defined(CONFIG_SOFTMMU)
1599 uint16_t *label2_ptr;
1602 tcg_target_long disp;
1608 #if defined(CONFIG_SOFTMMU)
1611 tcg_prepare_qemu_ldst(s, data_reg, addr_reg, mem_index,
1612 opc, &label2_ptr, 1);
1614 tcg_out_qemu_st_direct(s, opc, data_reg, TCG_REG_R2, TCG_REG_NONE, 0);
1616 tcg_finish_qemu_ldst(s, label2_ptr);
1618 tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
1619 tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
1623 #if TCG_TARGET_REG_BITS == 64
1624 # define OP_32_64(x) \
1625 case glue(glue(INDEX_op_,x),_i32): \
1626 case glue(glue(INDEX_op_,x),_i64)
1628 # define OP_32_64(x) \
1629 case glue(glue(INDEX_op_,x),_i32)
1632 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1633 const TCGArg *args, const int *const_args)
1638 case INDEX_op_exit_tb:
1640 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, args[0]);
1641 tgen_gotoi(s, S390_CC_ALWAYS, (unsigned long)tb_ret_addr);
1644 case INDEX_op_goto_tb:
1645 if (s->tb_jmp_offset) {
1648 /* load address stored at s->tb_next + args[0] */
1649 tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]);
1651 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0);
1653 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1657 if (const_args[0]) {
1658 tgen_calli(s, args[0]);
1660 tcg_out_insn(s, RR, BASR, TCG_REG_R14, args[0]);
1664 case INDEX_op_mov_i32:
1665 tcg_out_mov(s, TCG_TYPE_I32, args[0], args[1]);
1667 case INDEX_op_movi_i32:
1668 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1672 /* ??? LLC (RXY format) is only present with the extended-immediate
1673 facility, whereas LLGC is always present. */
1674 tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
1678 /* ??? LB is no smaller than LGB, so no point to using it. */
1679 tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
1683 /* ??? LLH (RXY format) is only present with the extended-immediate
1684 facility, whereas LLGH is always present. */
1685 tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
1688 case INDEX_op_ld16s_i32:
1689 tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
1692 case INDEX_op_ld_i32:
1693 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1697 tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
1698 TCG_REG_NONE, args[2]);
1702 tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
1703 TCG_REG_NONE, args[2]);
1706 case INDEX_op_st_i32:
1707 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1710 case INDEX_op_add_i32:
1711 if (const_args[2]) {
1712 tgen32_addi(s, args[0], args[2]);
1714 tcg_out_insn(s, RR, AR, args[0], args[2]);
1717 case INDEX_op_sub_i32:
1718 if (const_args[2]) {
1719 tgen32_addi(s, args[0], -args[2]);
1721 tcg_out_insn(s, RR, SR, args[0], args[2]);
1725 case INDEX_op_and_i32:
1726 if (const_args[2]) {
1727 tgen64_andi(s, args[0], args[2] | 0xffffffff00000000ull);
1729 tcg_out_insn(s, RR, NR, args[0], args[2]);
1732 case INDEX_op_or_i32:
1733 if (const_args[2]) {
1734 tgen64_ori(s, args[0], args[2] & 0xffffffff);
1736 tcg_out_insn(s, RR, OR, args[0], args[2]);
1739 case INDEX_op_xor_i32:
1740 if (const_args[2]) {
1741 tgen64_xori(s, args[0], args[2] & 0xffffffff);
1743 tcg_out_insn(s, RR, XR, args[0], args[2]);
1747 case INDEX_op_neg_i32:
1748 tcg_out_insn(s, RR, LCR, args[0], args[1]);
1751 case INDEX_op_mul_i32:
1752 if (const_args[2]) {
1753 if ((int32_t)args[2] == (int16_t)args[2]) {
1754 tcg_out_insn(s, RI, MHI, args[0], args[2]);
1756 tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
1759 tcg_out_insn(s, RRE, MSR, args[0], args[2]);
1763 case INDEX_op_div2_i32:
1764 tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
1766 case INDEX_op_divu2_i32:
1767 tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
1770 case INDEX_op_shl_i32:
1773 if (const_args[2]) {
1774 tcg_out_sh32(s, op, args[0], TCG_REG_NONE, args[2]);
1776 tcg_out_sh32(s, op, args[0], args[2], 0);
1779 case INDEX_op_shr_i32:
1782 case INDEX_op_sar_i32:
1786 case INDEX_op_rotl_i32:
1787 /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
1788 if (const_args[2]) {
1789 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
1791 tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
1794 case INDEX_op_rotr_i32:
1795 if (const_args[2]) {
1796 tcg_out_sh64(s, RSY_RLL, args[0], args[1],
1797 TCG_REG_NONE, (32 - args[2]) & 31);
1799 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
1800 tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
1804 case INDEX_op_ext8s_i32:
1805 tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
1807 case INDEX_op_ext16s_i32:
1808 tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
1810 case INDEX_op_ext8u_i32:
1811 tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
1813 case INDEX_op_ext16u_i32:
1814 tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
1818 /* The TCG bswap definition requires bits 0-47 already be zero.
1819 Thus we don't need the G-type insns to implement bswap16_i64. */
1820 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1821 tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
1824 tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
1828 tgen_branch(s, S390_CC_ALWAYS, args[0]);
1831 case INDEX_op_brcond_i32:
1832 tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
1833 args[1], const_args[1], args[3]);
1835 case INDEX_op_setcond_i32:
1836 tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
1837 args[2], const_args[2]);
1840 case INDEX_op_qemu_ld8u:
1841 tcg_out_qemu_ld(s, args, LD_UINT8);
1843 case INDEX_op_qemu_ld8s:
1844 tcg_out_qemu_ld(s, args, LD_INT8);
1846 case INDEX_op_qemu_ld16u:
1847 tcg_out_qemu_ld(s, args, LD_UINT16);
1849 case INDEX_op_qemu_ld16s:
1850 tcg_out_qemu_ld(s, args, LD_INT16);
1852 case INDEX_op_qemu_ld32:
1853 /* ??? Technically we can use a non-extending instruction. */
1854 tcg_out_qemu_ld(s, args, LD_UINT32);
1856 case INDEX_op_qemu_ld64:
1857 tcg_out_qemu_ld(s, args, LD_UINT64);
1860 case INDEX_op_qemu_st8:
1861 tcg_out_qemu_st(s, args, LD_UINT8);
1863 case INDEX_op_qemu_st16:
1864 tcg_out_qemu_st(s, args, LD_UINT16);
1866 case INDEX_op_qemu_st32:
1867 tcg_out_qemu_st(s, args, LD_UINT32);
1869 case INDEX_op_qemu_st64:
1870 tcg_out_qemu_st(s, args, LD_UINT64);
1873 #if TCG_TARGET_REG_BITS == 64
1874 case INDEX_op_mov_i64:
1875 tcg_out_mov(s, TCG_TYPE_I64, args[0], args[1]);
1877 case INDEX_op_movi_i64:
1878 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1881 case INDEX_op_ld16s_i64:
1882 tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
1884 case INDEX_op_ld32u_i64:
1885 tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
1887 case INDEX_op_ld32s_i64:
1888 tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
1890 case INDEX_op_ld_i64:
1891 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1894 case INDEX_op_st32_i64:
1895 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
1897 case INDEX_op_st_i64:
1898 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
1901 case INDEX_op_add_i64:
1902 if (const_args[2]) {
1903 tgen64_addi(s, args[0], args[2]);
1905 tcg_out_insn(s, RRE, AGR, args[0], args[2]);
1908 case INDEX_op_sub_i64:
1909 if (const_args[2]) {
1910 tgen64_addi(s, args[0], -args[2]);
1912 tcg_out_insn(s, RRE, SGR, args[0], args[2]);
1916 case INDEX_op_and_i64:
1917 if (const_args[2]) {
1918 tgen64_andi(s, args[0], args[2]);
1920 tcg_out_insn(s, RRE, NGR, args[0], args[2]);
1923 case INDEX_op_or_i64:
1924 if (const_args[2]) {
1925 tgen64_ori(s, args[0], args[2]);
1927 tcg_out_insn(s, RRE, OGR, args[0], args[2]);
1930 case INDEX_op_xor_i64:
1931 if (const_args[2]) {
1932 tgen64_xori(s, args[0], args[2]);
1934 tcg_out_insn(s, RRE, XGR, args[0], args[2]);
1938 case INDEX_op_neg_i64:
1939 tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
1941 case INDEX_op_bswap64_i64:
1942 tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
1945 case INDEX_op_mul_i64:
1946 if (const_args[2]) {
1947 if (args[2] == (int16_t)args[2]) {
1948 tcg_out_insn(s, RI, MGHI, args[0], args[2]);
1950 tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
1953 tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
1957 case INDEX_op_div2_i64:
1958 /* ??? We get an unnecessary sign-extension of the dividend
1959 into R3 with this definition, but as we do in fact always
1960 produce both quotient and remainder using INDEX_op_div_i64
1961 instead requires jumping through even more hoops. */
1962 tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
1964 case INDEX_op_divu2_i64:
1965 tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
1968 case INDEX_op_shl_i64:
1971 if (const_args[2]) {
1972 tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
1974 tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
1977 case INDEX_op_shr_i64:
1980 case INDEX_op_sar_i64:
1984 case INDEX_op_rotl_i64:
1985 if (const_args[2]) {
1986 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
1987 TCG_REG_NONE, args[2]);
1989 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
1992 case INDEX_op_rotr_i64:
1993 if (const_args[2]) {
1994 tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
1995 TCG_REG_NONE, (64 - args[2]) & 63);
1997 /* We can use the smaller 32-bit negate because only the
1998 low 6 bits are examined for the rotate. */
1999 tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
2000 tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
2004 case INDEX_op_ext8s_i64:
2005 tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
2007 case INDEX_op_ext16s_i64:
2008 tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
2010 case INDEX_op_ext32s_i64:
2011 tgen_ext32s(s, args[0], args[1]);
2013 case INDEX_op_ext8u_i64:
2014 tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
2016 case INDEX_op_ext16u_i64:
2017 tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
2019 case INDEX_op_ext32u_i64:
2020 tgen_ext32u(s, args[0], args[1]);
2023 case INDEX_op_brcond_i64:
2024 tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
2025 args[1], const_args[1], args[3]);
2027 case INDEX_op_setcond_i64:
2028 tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
2029 args[2], const_args[2]);
2032 case INDEX_op_qemu_ld32u:
2033 tcg_out_qemu_ld(s, args, LD_UINT32);
2035 case INDEX_op_qemu_ld32s:
2036 tcg_out_qemu_ld(s, args, LD_INT32);
2038 #endif /* TCG_TARGET_REG_BITS == 64 */
2041 fprintf(stderr,"unimplemented opc 0x%x\n",opc);
2046 static const TCGTargetOpDef s390_op_defs[] = {
2047 { INDEX_op_exit_tb, { } },
2048 { INDEX_op_goto_tb, { } },
2049 { INDEX_op_call, { "ri" } },
2050 { INDEX_op_br, { } },
2052 { INDEX_op_mov_i32, { "r", "r" } },
2053 { INDEX_op_movi_i32, { "r" } },
2055 { INDEX_op_ld8u_i32, { "r", "r" } },
2056 { INDEX_op_ld8s_i32, { "r", "r" } },
2057 { INDEX_op_ld16u_i32, { "r", "r" } },
2058 { INDEX_op_ld16s_i32, { "r", "r" } },
2059 { INDEX_op_ld_i32, { "r", "r" } },
2060 { INDEX_op_st8_i32, { "r", "r" } },
2061 { INDEX_op_st16_i32, { "r", "r" } },
2062 { INDEX_op_st_i32, { "r", "r" } },
2064 { INDEX_op_add_i32, { "r", "0", "rWI" } },
2065 { INDEX_op_sub_i32, { "r", "0", "rWNI" } },
2066 { INDEX_op_mul_i32, { "r", "0", "rK" } },
2068 { INDEX_op_div2_i32, { "b", "a", "0", "1", "r" } },
2069 { INDEX_op_divu2_i32, { "b", "a", "0", "1", "r" } },
2071 { INDEX_op_and_i32, { "r", "0", "rWA" } },
2072 { INDEX_op_or_i32, { "r", "0", "rWO" } },
2073 { INDEX_op_xor_i32, { "r", "0", "rWX" } },
2075 { INDEX_op_neg_i32, { "r", "r" } },
2077 { INDEX_op_shl_i32, { "r", "0", "Ri" } },
2078 { INDEX_op_shr_i32, { "r", "0", "Ri" } },
2079 { INDEX_op_sar_i32, { "r", "0", "Ri" } },
2081 { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
2082 { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
2084 { INDEX_op_ext8s_i32, { "r", "r" } },
2085 { INDEX_op_ext8u_i32, { "r", "r" } },
2086 { INDEX_op_ext16s_i32, { "r", "r" } },
2087 { INDEX_op_ext16u_i32, { "r", "r" } },
2089 { INDEX_op_bswap16_i32, { "r", "r" } },
2090 { INDEX_op_bswap32_i32, { "r", "r" } },
2092 { INDEX_op_brcond_i32, { "r", "rWC" } },
2093 { INDEX_op_setcond_i32, { "r", "r", "rWC" } },
2095 { INDEX_op_qemu_ld8u, { "r", "L" } },
2096 { INDEX_op_qemu_ld8s, { "r", "L" } },
2097 { INDEX_op_qemu_ld16u, { "r", "L" } },
2098 { INDEX_op_qemu_ld16s, { "r", "L" } },
2099 { INDEX_op_qemu_ld32, { "r", "L" } },
2100 { INDEX_op_qemu_ld64, { "r", "L" } },
2102 { INDEX_op_qemu_st8, { "L", "L" } },
2103 { INDEX_op_qemu_st16, { "L", "L" } },
2104 { INDEX_op_qemu_st32, { "L", "L" } },
2105 { INDEX_op_qemu_st64, { "L", "L" } },
2107 #if defined(__s390x__)
2108 { INDEX_op_mov_i64, { "r", "r" } },
2109 { INDEX_op_movi_i64, { "r" } },
2111 { INDEX_op_ld8u_i64, { "r", "r" } },
2112 { INDEX_op_ld8s_i64, { "r", "r" } },
2113 { INDEX_op_ld16u_i64, { "r", "r" } },
2114 { INDEX_op_ld16s_i64, { "r", "r" } },
2115 { INDEX_op_ld32u_i64, { "r", "r" } },
2116 { INDEX_op_ld32s_i64, { "r", "r" } },
2117 { INDEX_op_ld_i64, { "r", "r" } },
2119 { INDEX_op_st8_i64, { "r", "r" } },
2120 { INDEX_op_st16_i64, { "r", "r" } },
2121 { INDEX_op_st32_i64, { "r", "r" } },
2122 { INDEX_op_st_i64, { "r", "r" } },
2124 { INDEX_op_add_i64, { "r", "0", "rI" } },
2125 { INDEX_op_sub_i64, { "r", "0", "rNI" } },
2126 { INDEX_op_mul_i64, { "r", "0", "rK" } },
2128 { INDEX_op_div2_i64, { "b", "a", "0", "1", "r" } },
2129 { INDEX_op_divu2_i64, { "b", "a", "0", "1", "r" } },
2131 { INDEX_op_and_i64, { "r", "0", "rA" } },
2132 { INDEX_op_or_i64, { "r", "0", "rO" } },
2133 { INDEX_op_xor_i64, { "r", "0", "rX" } },
2135 { INDEX_op_neg_i64, { "r", "r" } },
2137 { INDEX_op_shl_i64, { "r", "r", "Ri" } },
2138 { INDEX_op_shr_i64, { "r", "r", "Ri" } },
2139 { INDEX_op_sar_i64, { "r", "r", "Ri" } },
2141 { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
2142 { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
2144 { INDEX_op_ext8s_i64, { "r", "r" } },
2145 { INDEX_op_ext8u_i64, { "r", "r" } },
2146 { INDEX_op_ext16s_i64, { "r", "r" } },
2147 { INDEX_op_ext16u_i64, { "r", "r" } },
2148 { INDEX_op_ext32s_i64, { "r", "r" } },
2149 { INDEX_op_ext32u_i64, { "r", "r" } },
2151 { INDEX_op_bswap16_i64, { "r", "r" } },
2152 { INDEX_op_bswap32_i64, { "r", "r" } },
2153 { INDEX_op_bswap64_i64, { "r", "r" } },
2155 { INDEX_op_brcond_i64, { "r", "rC" } },
2156 { INDEX_op_setcond_i64, { "r", "r", "rC" } },
2158 { INDEX_op_qemu_ld32u, { "r", "L" } },
2159 { INDEX_op_qemu_ld32s, { "r", "L" } },
2165 /* ??? Linux kernels provide an AUXV entry AT_HWCAP that provides most of
2166 this information. However, getting at that entry is not easy this far
2167 away from main. Our options are: start searching from environ, but
2168 that fails as soon as someone does a setenv in between. Read the data
2169 from /proc/self/auxv. Or do the probing ourselves. The only thing
2170 extra that AT_HWCAP gives us is HWCAP_S390_HIGH_GPRS, which indicates
2171 that the kernel saves all 64-bits of the registers around traps while
2172 in 31-bit mode. But this is true of all "recent" kernels (ought to dig
2173 back and see from when this might not be true). */
2177 static volatile sig_atomic_t got_sigill;
2179 static void sigill_handler(int sig)
2184 static void query_facilities(void)
2186 struct sigaction sa_old, sa_new;
2187 register int r0 __asm__("0");
2188 register void *r1 __asm__("1");
2191 memset(&sa_new, 0, sizeof(sa_new));
2192 sa_new.sa_handler = sigill_handler;
2193 sigaction(SIGILL, &sa_new, &sa_old);
2195 /* First, try STORE FACILITY LIST EXTENDED. If this is present, then
2196 we need not do any more probing. Unfortunately, this itself is an
2197 extension and the original STORE FACILITY LIST instruction is
2198 kernel-only, storing its results at absolute address 200. */
2201 asm volatile(".word 0xb2b0,0x1000"
2202 : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
2205 /* STORE FACILITY EXTENDED is not available. Probe for one of each
2206 kind of instruction that we're interested in. */
2207 /* ??? Possibly some of these are in practice never present unless
2208 the store-facility-extended facility is also present. But since
2209 that isn't documented it's just better to probe for each. */
2211 /* Test for z/Architecture. Required even in 31-bit mode. */
2214 asm volatile(".word 0xb908,0x0000" : "=r"(r0) : : "cc");
2216 facilities |= FACILITY_ZARCH_ACTIVE;
2219 /* Test for long displacement. */
2223 asm volatile(".word 0xe300,0x1000,0x0058"
2224 : "=r"(r0) : "r"(r1) : "cc");
2226 facilities |= FACILITY_LONG_DISP;
2229 /* Test for extended immediates. */
2232 asm volatile(".word 0xc209,0x0000,0x0000" : : : "cc");
2234 facilities |= FACILITY_EXT_IMM;
2237 /* Test for general-instructions-extension. */
2240 asm volatile(".word 0xc201,0x0000,0x0001");
2242 facilities |= FACILITY_GEN_INST_EXT;
2246 sigaction(SIGILL, &sa_old, NULL);
2248 /* The translator currently uses these extensions unconditionally.
2249 Pruning this back to the base ESA/390 architecture doesn't seem
2250 worthwhile, since even the KVM target requires z/Arch. */
2252 if ((facilities & FACILITY_ZARCH_ACTIVE) == 0) {
2253 fprintf(stderr, "TCG: z/Arch facility is required.\n");
2254 fprintf(stderr, "TCG: Boot with a 64-bit enabled kernel.\n");
2257 if ((facilities & FACILITY_LONG_DISP) == 0) {
2258 fprintf(stderr, "TCG: long-displacement facility is required.\n");
2262 /* So far there's just enough support for 31-bit mode to let the
2263 compile succeed. This is good enough to run QEMU with KVM. */
2264 if (sizeof(void *) != 8) {
2265 fprintf(stderr, "TCG: 31-bit mode is not supported.\n");
2274 static void tcg_target_init(TCGContext *s)
2276 #if !defined(CONFIG_USER_ONLY)
2278 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) {
2285 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2286 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
2288 tcg_regset_clear(tcg_target_call_clobber_regs);
2289 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2290 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2291 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2292 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2293 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
2294 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
2295 /* The return register can be considered call-clobbered. */
2296 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2298 tcg_regset_clear(s->reserved_regs);
2299 tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
2300 /* XXX many insns can't be used with R0, so we better avoid it for now */
2301 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
2302 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2304 tcg_add_target_add_op_defs(s390_op_defs);
2305 tcg_set_frame(s, TCG_AREG0, offsetof(CPUArchState, temp_buf),
2306 CPU_TEMP_BUF_NLONGS * sizeof(long));
2309 static void tcg_target_qemu_prologue(TCGContext *s)
2311 /* stmg %r6,%r15,48(%r15) (save registers) */
2312 tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
2314 /* aghi %r15,-160 (stack frame) */
2315 tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -160);
2317 if (GUEST_BASE >= 0x80000) {
2318 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
2319 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2322 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2323 /* br %r3 (go to TB) */
2324 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
2326 tb_ret_addr = s->code_ptr;
2328 /* lmg %r6,%r15,208(%r15) (restore registers) */
2329 tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 208);
2331 /* br %r14 (return) */
2332 tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);