4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
57 bool singlestep_enabled;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
105 cpu_fprintf(f, "\n");
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
114 cpu_fprintf(f, "\n");
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
124 cpu_fprintf(f, "\n");
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
138 cpu_fprintf(f, "\n");
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
155 void s390x_translate_init(void)
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
190 /* register helpers */
195 static TCGv_i64 load_reg(int reg)
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
202 static TCGv_i64 load_freg32_i64(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
209 static void store_reg(int reg, TCGv_i64 v)
211 tcg_gen_mov_i64(regs[reg], v);
214 static void store_freg(int reg, TCGv_i64 v)
216 tcg_gen_mov_i64(fregs[reg], v);
219 static void store_reg32_i64(int reg, TCGv_i64 v)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
230 static void store_freg32_i64(int reg, TCGv_i64 v)
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
235 static void return_low128(TCGv_i64 dest)
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
240 static void update_psw_addr(DisasContext *s)
243 tcg_gen_movi_i64(psw_addr, s->pc);
246 static void update_cc_op(DisasContext *s)
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
253 static void potential_page_fault(DisasContext *s)
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
261 return (uint64_t)cpu_lduw_code(env, pc);
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
274 static int get_mem_index(DisasContext *s)
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
279 case PSW_ASC_SECONDARY >> 32:
281 case PSW_ASC_HOME >> 32:
289 static void gen_exception(int excp)
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
296 static void gen_program_exception(DisasContext *s, int code)
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
309 /* Advance past instruction. */
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
320 static inline void gen_illegal_opcode(DisasContext *s)
322 gen_program_exception(s, PGM_SPECIFICATION);
325 static inline void check_privileged(DisasContext *s)
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s->tb->flags & FLAG_MASK_64)) {
343 tmp = tcg_const_i64(d2);
344 tcg_gen_add_i64(tmp, tmp, regs[x2]);
349 tcg_gen_add_i64(tmp, tmp, regs[b2]);
353 tmp = tcg_const_i64(d2);
354 tcg_gen_add_i64(tmp, tmp, regs[b2]);
359 tmp = tcg_const_i64(d2);
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
364 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
370 static inline bool live_cc_data(DisasContext *s)
372 return (s->cc_op != CC_OP_DYNAMIC
373 && s->cc_op != CC_OP_STATIC
377 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_dst);
382 tcg_gen_discard_i64(cc_vr);
384 s->cc_op = CC_OP_CONST0 + val;
387 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_src);
391 tcg_gen_discard_i64(cc_vr);
393 tcg_gen_mov_i64(cc_dst, dst);
397 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
400 if (live_cc_data(s)) {
401 tcg_gen_discard_i64(cc_vr);
403 tcg_gen_mov_i64(cc_src, src);
404 tcg_gen_mov_i64(cc_dst, dst);
408 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
409 TCGv_i64 dst, TCGv_i64 vr)
411 tcg_gen_mov_i64(cc_src, src);
412 tcg_gen_mov_i64(cc_dst, dst);
413 tcg_gen_mov_i64(cc_vr, vr);
417 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
419 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
422 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
424 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
427 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
429 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
432 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
434 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
437 /* CC value is in env->cc_op */
438 static void set_cc_static(DisasContext *s)
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
445 s->cc_op = CC_OP_STATIC;
448 /* calculates cc into cc_op */
449 static void gen_op_calc_cc(DisasContext *s)
451 TCGv_i32 local_cc_op;
454 TCGV_UNUSED_I32(local_cc_op);
455 TCGV_UNUSED_I64(dummy);
458 dummy = tcg_const_i64(0);
472 local_cc_op = tcg_const_i32(s->cc_op);
488 /* s->cc_op is the cc value */
489 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
492 /* env->cc_op already is the cc value */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
512 case CC_OP_LTUGTU_32:
513 case CC_OP_LTUGTU_64:
520 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
535 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
538 /* unknown operation - assume 3 arguments and cc_op in env */
539 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
545 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
546 tcg_temp_free_i32(local_cc_op);
548 if (!TCGV_IS_UNUSED_I64(dummy)) {
549 tcg_temp_free_i64(dummy);
552 /* We now have cc in cc_op as constant */
556 static int use_goto_tb(DisasContext *s, uint64_t dest)
558 /* NOTE: we handle the case where the TB spans two pages here */
559 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
560 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
561 && !s->singlestep_enabled
562 && !(s->tb->cflags & CF_LAST_IO));
565 static void account_noninline_branch(DisasContext *s, int cc_op)
567 #ifdef DEBUG_INLINE_BRANCHES
568 inline_branch_miss[cc_op]++;
572 static void account_inline_branch(DisasContext *s, int cc_op)
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_hit[cc_op]++;
579 /* Table of mask values to comparison codes, given a comparison as input.
580 For such, CC=3 should not be possible. */
581 static const TCGCond ltgt_cond[16] = {
582 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
583 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
584 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
585 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
586 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
587 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
588 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
589 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
592 /* Table of mask values to comparison codes, given a logic op as input.
593 For such, only CC=0 and CC=1 should be possible. */
594 static const TCGCond nz_cond[16] = {
595 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
596 TCG_COND_NEVER, TCG_COND_NEVER,
597 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
598 TCG_COND_NE, TCG_COND_NE,
599 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
600 TCG_COND_EQ, TCG_COND_EQ,
601 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
602 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
605 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
606 details required to generate a TCG comparison. */
607 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
610 enum cc_op old_cc_op = s->cc_op;
612 if (mask == 15 || mask == 0) {
613 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
616 c->g1 = c->g2 = true;
621 /* Find the TCG condition for the mask + cc op. */
627 cond = ltgt_cond[mask];
628 if (cond == TCG_COND_NEVER) {
631 account_inline_branch(s, old_cc_op);
634 case CC_OP_LTUGTU_32:
635 case CC_OP_LTUGTU_64:
636 cond = tcg_unsigned_cond(ltgt_cond[mask]);
637 if (cond == TCG_COND_NEVER) {
640 account_inline_branch(s, old_cc_op);
644 cond = nz_cond[mask];
645 if (cond == TCG_COND_NEVER) {
648 account_inline_branch(s, old_cc_op);
663 account_inline_branch(s, old_cc_op);
678 account_inline_branch(s, old_cc_op);
682 switch (mask & 0xa) {
683 case 8: /* src == 0 -> no one bit found */
686 case 2: /* src != 0 -> one bit found */
692 account_inline_branch(s, old_cc_op);
697 /* Calculate cc value. */
702 /* Jump based on CC. We'll load up the real cond below;
703 the assignment here merely avoids a compiler warning. */
704 account_noninline_branch(s, old_cc_op);
705 old_cc_op = CC_OP_STATIC;
706 cond = TCG_COND_NEVER;
710 /* Load up the arguments of the comparison. */
712 c->g1 = c->g2 = false;
716 c->u.s32.a = tcg_temp_new_i32();
717 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
718 c->u.s32.b = tcg_const_i32(0);
721 case CC_OP_LTUGTU_32:
723 c->u.s32.a = tcg_temp_new_i32();
724 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
725 c->u.s32.b = tcg_temp_new_i32();
726 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
733 c->u.s64.b = tcg_const_i64(0);
737 case CC_OP_LTUGTU_64:
740 c->g1 = c->g2 = true;
746 c->u.s64.a = tcg_temp_new_i64();
747 c->u.s64.b = tcg_const_i64(0);
748 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
756 case 0x8 | 0x4 | 0x2: /* cc != 3 */
758 c->u.s32.b = tcg_const_i32(3);
760 case 0x8 | 0x4 | 0x1: /* cc != 2 */
762 c->u.s32.b = tcg_const_i32(2);
764 case 0x8 | 0x2 | 0x1: /* cc != 1 */
766 c->u.s32.b = tcg_const_i32(1);
768 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
771 c->u.s32.a = tcg_temp_new_i32();
772 c->u.s32.b = tcg_const_i32(0);
773 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
775 case 0x8 | 0x4: /* cc < 2 */
777 c->u.s32.b = tcg_const_i32(2);
779 case 0x8: /* cc == 0 */
781 c->u.s32.b = tcg_const_i32(0);
783 case 0x4 | 0x2 | 0x1: /* cc != 0 */
785 c->u.s32.b = tcg_const_i32(0);
787 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
790 c->u.s32.a = tcg_temp_new_i32();
791 c->u.s32.b = tcg_const_i32(0);
792 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
794 case 0x4: /* cc == 1 */
796 c->u.s32.b = tcg_const_i32(1);
798 case 0x2 | 0x1: /* cc > 1 */
800 c->u.s32.b = tcg_const_i32(1);
802 case 0x2: /* cc == 2 */
804 c->u.s32.b = tcg_const_i32(2);
806 case 0x1: /* cc == 3 */
808 c->u.s32.b = tcg_const_i32(3);
811 /* CC is masked by something else: (8 >> cc) & mask. */
814 c->u.s32.a = tcg_const_i32(8);
815 c->u.s32.b = tcg_const_i32(0);
816 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
817 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
828 static void free_compare(DisasCompare *c)
832 tcg_temp_free_i64(c->u.s64.a);
834 tcg_temp_free_i32(c->u.s32.a);
839 tcg_temp_free_i64(c->u.s64.b);
841 tcg_temp_free_i32(c->u.s32.b);
846 /* ====================================================================== */
847 /* Define the insn format enumeration. */
848 #define F0(N) FMT_##N,
849 #define F1(N, X1) F0(N)
850 #define F2(N, X1, X2) F0(N)
851 #define F3(N, X1, X2, X3) F0(N)
852 #define F4(N, X1, X2, X3, X4) F0(N)
853 #define F5(N, X1, X2, X3, X4, X5) F0(N)
856 #include "insn-format.def"
866 /* Define a structure to hold the decoded fields. We'll store each inside
867 an array indexed by an enum. In order to conserve memory, we'll arrange
868 for fields that do not exist at the same time to overlap, thus the "C"
869 for compact. For checking purposes there is an "O" for original index
870 as well that will be applied to availability bitmaps. */
872 enum DisasFieldIndexO {
895 enum DisasFieldIndexC {
929 unsigned presentC:16;
930 unsigned int presentO;
934 /* This is the way fields are to be accessed out of DisasFields. */
935 #define have_field(S, F) have_field1((S), FLD_O_##F)
936 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
938 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
940 return (f->presentO >> c) & 1;
943 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
944 enum DisasFieldIndexC c)
946 assert(have_field1(f, o));
950 /* Describe the layout of each field in each format. */
951 typedef struct DisasField {
955 unsigned int indexC:6;
956 enum DisasFieldIndexO indexO:8;
959 typedef struct DisasFormatInfo {
960 DisasField op[NUM_C_FIELD];
963 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
964 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
965 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
966 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
967 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
968 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
969 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
970 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
971 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
972 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
973 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
974 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
975 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
976 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
978 #define F0(N) { { } },
979 #define F1(N, X1) { { X1 } },
980 #define F2(N, X1, X2) { { X1, X2 } },
981 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
982 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
983 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
985 static const DisasFormatInfo format_info[] = {
986 #include "insn-format.def"
1004 /* Generally, we'll extract operands into this structures, operate upon
1005 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1006 of routines below for more details. */
1008 bool g_out, g_out2, g_in1, g_in2;
1009 TCGv_i64 out, out2, in1, in2;
1013 /* Instructions can place constraints on their operands, raising specification
1014 exceptions if they are violated. To make this easy to automate, each "in1",
1015 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1016 of the following, or 0. To make this easy to document, we'll put the
1017 SPEC_<name> defines next to <name>. */
1019 #define SPEC_r1_even 1
1020 #define SPEC_r2_even 2
1021 #define SPEC_r1_f128 4
1022 #define SPEC_r2_f128 8
1024 /* Return values from translate_one, indicating the state of the TB. */
1026 /* Continue the TB. */
1028 /* We have emitted one or more goto_tb. No fixup required. */
1030 /* We are not using a goto_tb (for whatever reason), but have updated
1031 the PC (for whatever reason), so there's no need to do it again on
1034 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1035 updated the PC for the next instruction to be executed. */
1037 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1038 No following code will be executed. */
1042 typedef enum DisasFacility {
1043 FAC_Z, /* zarch (default) */
1044 FAC_CASS, /* compare and swap and store */
1045 FAC_CASS2, /* compare and swap and store 2*/
1046 FAC_DFP, /* decimal floating point */
1047 FAC_DFPR, /* decimal floating point rounding */
1048 FAC_DO, /* distinct operands */
1049 FAC_EE, /* execute extensions */
1050 FAC_EI, /* extended immediate */
1051 FAC_FPE, /* floating point extension */
1052 FAC_FPSSH, /* floating point support sign handling */
1053 FAC_FPRGR, /* FPR-GR transfer */
1054 FAC_GIE, /* general instructions extension */
1055 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1056 FAC_HW, /* high-word */
1057 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1058 FAC_LOC, /* load/store on condition */
1059 FAC_LD, /* long displacement */
1060 FAC_PC, /* population count */
1061 FAC_SCF, /* store clock fast */
1062 FAC_SFLE, /* store facility list extended */
1068 DisasFacility fac:6;
1073 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1074 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1075 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1076 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1077 void (*help_cout)(DisasContext *, DisasOps *);
1078 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1083 /* ====================================================================== */
1084 /* Miscelaneous helpers, used by several operations. */
1086 static void help_l2_shift(DisasContext *s, DisasFields *f,
1087 DisasOps *o, int mask)
1089 int b2 = get_field(f, b2);
1090 int d2 = get_field(f, d2);
1093 o->in2 = tcg_const_i64(d2 & mask);
1095 o->in2 = get_address(s, 0, b2, d2);
1096 tcg_gen_andi_i64(o->in2, o->in2, mask);
1100 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1102 if (dest == s->next_pc) {
1105 if (use_goto_tb(s, dest)) {
1108 tcg_gen_movi_i64(psw_addr, dest);
1109 tcg_gen_exit_tb((tcg_target_long)s->tb);
1110 return EXIT_GOTO_TB;
1112 tcg_gen_movi_i64(psw_addr, dest);
1113 return EXIT_PC_UPDATED;
1117 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1118 bool is_imm, int imm, TCGv_i64 cdest)
1121 uint64_t dest = s->pc + 2 * imm;
1124 /* Take care of the special cases first. */
1125 if (c->cond == TCG_COND_NEVER) {
1130 if (dest == s->next_pc) {
1131 /* Branch to next. */
1135 if (c->cond == TCG_COND_ALWAYS) {
1136 ret = help_goto_direct(s, dest);
1140 if (TCGV_IS_UNUSED_I64(cdest)) {
1141 /* E.g. bcr %r0 -> no branch. */
1145 if (c->cond == TCG_COND_ALWAYS) {
1146 tcg_gen_mov_i64(psw_addr, cdest);
1147 ret = EXIT_PC_UPDATED;
1152 if (use_goto_tb(s, s->next_pc)) {
1153 if (is_imm && use_goto_tb(s, dest)) {
1154 /* Both exits can use goto_tb. */
1157 lab = gen_new_label();
1159 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1161 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1164 /* Branch not taken. */
1166 tcg_gen_movi_i64(psw_addr, s->next_pc);
1167 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1172 tcg_gen_movi_i64(psw_addr, dest);
1173 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1177 /* Fallthru can use goto_tb, but taken branch cannot. */
1178 /* Store taken branch destination before the brcond. This
1179 avoids having to allocate a new local temp to hold it.
1180 We'll overwrite this in the not taken case anyway. */
1182 tcg_gen_mov_i64(psw_addr, cdest);
1185 lab = gen_new_label();
1187 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1189 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1192 /* Branch not taken. */
1195 tcg_gen_movi_i64(psw_addr, s->next_pc);
1196 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1200 tcg_gen_movi_i64(psw_addr, dest);
1202 ret = EXIT_PC_UPDATED;
1205 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1206 Most commonly we're single-stepping or some other condition that
1207 disables all use of goto_tb. Just update the PC and exit. */
1209 TCGv_i64 next = tcg_const_i64(s->next_pc);
1211 cdest = tcg_const_i64(dest);
1215 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1218 TCGv_i32 t0 = tcg_temp_new_i32();
1219 TCGv_i64 t1 = tcg_temp_new_i64();
1220 TCGv_i64 z = tcg_const_i64(0);
1221 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1222 tcg_gen_extu_i32_i64(t1, t0);
1223 tcg_temp_free_i32(t0);
1224 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1225 tcg_temp_free_i64(t1);
1226 tcg_temp_free_i64(z);
1230 tcg_temp_free_i64(cdest);
1232 tcg_temp_free_i64(next);
1234 ret = EXIT_PC_UPDATED;
1242 /* ====================================================================== */
1243 /* The operations. These perform the bulk of the work for any insn,
1244 usually after the operands have been loaded and output initialized. */
1246 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1248 gen_helper_abs_i64(o->out, o->in2);
1252 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1254 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1258 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1260 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1264 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1266 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1267 tcg_gen_mov_i64(o->out2, o->in2);
1271 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1273 tcg_gen_add_i64(o->out, o->in1, o->in2);
1277 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1281 tcg_gen_add_i64(o->out, o->in1, o->in2);
1283 /* XXX possible optimization point */
1285 cc = tcg_temp_new_i64();
1286 tcg_gen_extu_i32_i64(cc, cc_op);
1287 tcg_gen_shri_i64(cc, cc, 1);
1289 tcg_gen_add_i64(o->out, o->out, cc);
1290 tcg_temp_free_i64(cc);
1294 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1296 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1300 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1302 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1306 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1308 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1309 return_low128(o->out2);
1313 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1315 tcg_gen_and_i64(o->out, o->in1, o->in2);
1319 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1321 int shift = s->insn->data & 0xff;
1322 int size = s->insn->data >> 8;
1323 uint64_t mask = ((1ull << size) - 1) << shift;
1326 tcg_gen_shli_i64(o->in2, o->in2, shift);
1327 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1328 tcg_gen_and_i64(o->out, o->in1, o->in2);
1330 /* Produce the CC from only the bits manipulated. */
1331 tcg_gen_andi_i64(cc_dst, o->out, mask);
1332 set_cc_nz_u64(s, cc_dst);
1336 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1338 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1339 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1340 tcg_gen_mov_i64(psw_addr, o->in2);
1341 return EXIT_PC_UPDATED;
1347 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1349 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1350 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1353 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1355 int m1 = get_field(s->fields, m1);
1356 bool is_imm = have_field(s->fields, i2);
1357 int imm = is_imm ? get_field(s->fields, i2) : 0;
1360 disas_jcc(s, &c, m1);
1361 return help_branch(s, &c, is_imm, imm, o->in2);
1364 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1366 int r1 = get_field(s->fields, r1);
1367 bool is_imm = have_field(s->fields, i2);
1368 int imm = is_imm ? get_field(s->fields, i2) : 0;
1372 c.cond = TCG_COND_NE;
1377 t = tcg_temp_new_i64();
1378 tcg_gen_subi_i64(t, regs[r1], 1);
1379 store_reg32_i64(r1, t);
1380 c.u.s32.a = tcg_temp_new_i32();
1381 c.u.s32.b = tcg_const_i32(0);
1382 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1383 tcg_temp_free_i64(t);
1385 return help_branch(s, &c, is_imm, imm, o->in2);
1388 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1390 int r1 = get_field(s->fields, r1);
1391 bool is_imm = have_field(s->fields, i2);
1392 int imm = is_imm ? get_field(s->fields, i2) : 0;
1395 c.cond = TCG_COND_NE;
1400 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1401 c.u.s64.a = regs[r1];
1402 c.u.s64.b = tcg_const_i64(0);
1404 return help_branch(s, &c, is_imm, imm, o->in2);
1407 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1409 int r1 = get_field(s->fields, r1);
1410 int r3 = get_field(s->fields, r3);
1411 bool is_imm = have_field(s->fields, i2);
1412 int imm = is_imm ? get_field(s->fields, i2) : 0;
1416 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1421 t = tcg_temp_new_i64();
1422 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1423 c.u.s32.a = tcg_temp_new_i32();
1424 c.u.s32.b = tcg_temp_new_i32();
1425 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1426 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1427 store_reg32_i64(r1, t);
1428 tcg_temp_free_i64(t);
1430 return help_branch(s, &c, is_imm, imm, o->in2);
1433 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1435 int r1 = get_field(s->fields, r1);
1436 int r3 = get_field(s->fields, r3);
1437 bool is_imm = have_field(s->fields, i2);
1438 int imm = is_imm ? get_field(s->fields, i2) : 0;
1441 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1444 if (r1 == (r3 | 1)) {
1445 c.u.s64.b = load_reg(r3 | 1);
1448 c.u.s64.b = regs[r3 | 1];
1452 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1453 c.u.s64.a = regs[r1];
1456 return help_branch(s, &c, is_imm, imm, o->in2);
1459 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1461 int imm, m3 = get_field(s->fields, m3);
1465 c.cond = ltgt_cond[m3];
1466 if (s->insn->data) {
1467 c.cond = tcg_unsigned_cond(c.cond);
1469 c.is_64 = c.g1 = c.g2 = true;
1473 is_imm = have_field(s->fields, i4);
1475 imm = get_field(s->fields, i4);
1478 o->out = get_address(s, 0, get_field(s->fields, b4),
1479 get_field(s->fields, d4));
1482 return help_branch(s, &c, is_imm, imm, o->out);
1485 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1487 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1492 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1494 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1499 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1501 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1506 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1508 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1509 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1510 tcg_temp_free_i32(m3);
1511 gen_set_cc_nz_f32(s, o->in2);
1515 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1517 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1518 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1519 tcg_temp_free_i32(m3);
1520 gen_set_cc_nz_f64(s, o->in2);
1524 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1526 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1527 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1528 tcg_temp_free_i32(m3);
1529 gen_set_cc_nz_f128(s, o->in1, o->in2);
1533 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1535 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1536 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1537 tcg_temp_free_i32(m3);
1538 gen_set_cc_nz_f32(s, o->in2);
1542 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1544 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1545 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1546 tcg_temp_free_i32(m3);
1547 gen_set_cc_nz_f64(s, o->in2);
1551 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1553 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1554 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1555 tcg_temp_free_i32(m3);
1556 gen_set_cc_nz_f128(s, o->in1, o->in2);
1560 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1562 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1563 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1564 tcg_temp_free_i32(m3);
1565 gen_set_cc_nz_f32(s, o->in2);
1569 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1571 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1572 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1573 tcg_temp_free_i32(m3);
1574 gen_set_cc_nz_f64(s, o->in2);
1578 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1580 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1581 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1582 tcg_temp_free_i32(m3);
1583 gen_set_cc_nz_f128(s, o->in1, o->in2);
1587 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1589 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1590 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1591 tcg_temp_free_i32(m3);
1592 gen_set_cc_nz_f32(s, o->in2);
1596 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1598 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1599 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1600 tcg_temp_free_i32(m3);
1601 gen_set_cc_nz_f64(s, o->in2);
1605 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1607 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1608 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1609 tcg_temp_free_i32(m3);
1610 gen_set_cc_nz_f128(s, o->in1, o->in2);
1614 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1616 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1617 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1618 tcg_temp_free_i32(m3);
1622 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1624 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1625 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1626 tcg_temp_free_i32(m3);
1630 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1632 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1633 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1634 tcg_temp_free_i32(m3);
1635 return_low128(o->out2);
1639 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1641 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1642 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1643 tcg_temp_free_i32(m3);
1647 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1649 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1650 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1651 tcg_temp_free_i32(m3);
1655 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1657 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1658 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1659 tcg_temp_free_i32(m3);
1660 return_low128(o->out2);
1664 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1666 int r2 = get_field(s->fields, r2);
1667 TCGv_i64 len = tcg_temp_new_i64();
1669 potential_page_fault(s);
1670 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1672 return_low128(o->out);
1674 tcg_gen_add_i64(regs[r2], regs[r2], len);
1675 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1676 tcg_temp_free_i64(len);
1681 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1683 int l = get_field(s->fields, l1);
1688 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1689 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1692 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1693 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1696 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1697 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1700 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1701 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1704 potential_page_fault(s);
1705 vl = tcg_const_i32(l);
1706 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1707 tcg_temp_free_i32(vl);
1711 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1715 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1717 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1718 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1719 potential_page_fault(s);
1720 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1721 tcg_temp_free_i32(r1);
1722 tcg_temp_free_i32(r3);
1727 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1729 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1730 TCGv_i32 t1 = tcg_temp_new_i32();
1731 tcg_gen_trunc_i64_i32(t1, o->in1);
1732 potential_page_fault(s);
1733 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1735 tcg_temp_free_i32(t1);
1736 tcg_temp_free_i32(m3);
1740 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1742 potential_page_fault(s);
1743 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1745 return_low128(o->in2);
1749 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1751 TCGv_i64 t = tcg_temp_new_i64();
1752 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1753 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1754 tcg_gen_or_i64(o->out, o->out, t);
1755 tcg_temp_free_i64(t);
1759 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1761 int r3 = get_field(s->fields, r3);
1762 potential_page_fault(s);
1763 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1768 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1770 int r3 = get_field(s->fields, r3);
1771 potential_page_fault(s);
1772 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1777 #ifndef CONFIG_USER_ONLY
1778 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1780 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1781 check_privileged(s);
1782 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1783 tcg_temp_free_i32(r1);
1789 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1791 int r3 = get_field(s->fields, r3);
1792 TCGv_i64 in3 = tcg_temp_new_i64();
1793 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1794 potential_page_fault(s);
1795 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1796 tcg_temp_free_i64(in3);
1801 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1803 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1804 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1805 potential_page_fault(s);
1806 /* XXX rewrite in tcg */
1807 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1812 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1814 TCGv_i64 t1 = tcg_temp_new_i64();
1815 TCGv_i32 t2 = tcg_temp_new_i32();
1816 tcg_gen_trunc_i64_i32(t2, o->in1);
1817 gen_helper_cvd(t1, t2);
1818 tcg_temp_free_i32(t2);
1819 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1820 tcg_temp_free_i64(t1);
1824 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1826 int m3 = get_field(s->fields, m3);
1827 int lab = gen_new_label();
1831 c = tcg_invert_cond(ltgt_cond[m3]);
1832 if (s->insn->data) {
1833 c = tcg_unsigned_cond(c);
1835 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1837 /* Set DXC to 0xff. */
1838 t = tcg_temp_new_i32();
1839 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1840 tcg_gen_ori_i32(t, t, 0xff00);
1841 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1842 tcg_temp_free_i32(t);
1845 gen_program_exception(s, PGM_DATA);
1851 #ifndef CONFIG_USER_ONLY
1852 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1856 check_privileged(s);
1857 potential_page_fault(s);
1859 /* We pretend the format is RX_a so that D2 is the field we want. */
1860 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1861 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1862 tcg_temp_free_i32(tmp);
1867 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1869 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1870 return_low128(o->out);
1874 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1876 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1877 return_low128(o->out);
1881 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1883 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1884 return_low128(o->out);
1888 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1890 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1891 return_low128(o->out);
1895 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1897 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1901 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1903 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1907 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1909 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1910 return_low128(o->out2);
1914 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1916 int r2 = get_field(s->fields, r2);
1917 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1921 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1923 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1927 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1929 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1930 tb->flags, (ab)use the tb->cs_base field as the address of
1931 the template in memory, and grab 8 bits of tb->flags/cflags for
1932 the contents of the register. We would then recognize all this
1933 in gen_intermediate_code_internal, generating code for exactly
1934 one instruction. This new TB then gets executed normally.
1936 On the other hand, this seems to be mostly used for modifying
1937 MVC inside of memcpy, which needs a helper call anyway. So
1938 perhaps this doesn't bear thinking about any further. */
1945 tmp = tcg_const_i64(s->next_pc);
1946 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
1947 tcg_temp_free_i64(tmp);
1953 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
1955 /* We'll use the original input for cc computation, since we get to
1956 compare that against 0, which ought to be better than comparing
1957 the real output against 64. It also lets cc_dst be a convenient
1958 temporary during our computation. */
1959 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
1961 /* R1 = IN ? CLZ(IN) : 64. */
1962 gen_helper_clz(o->out, o->in2);
1964 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1965 value by 64, which is undefined. But since the shift is 64 iff the
1966 input is zero, we still get the correct result after and'ing. */
1967 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
1968 tcg_gen_shr_i64(o->out2, o->out2, o->out);
1969 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
1973 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
1975 int m3 = get_field(s->fields, m3);
1976 int pos, len, base = s->insn->data;
1977 TCGv_i64 tmp = tcg_temp_new_i64();
1982 /* Effectively a 32-bit load. */
1983 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
1990 /* Effectively a 16-bit load. */
1991 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
1999 /* Effectively an 8-bit load. */
2000 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2005 pos = base + ctz32(m3) * 8;
2006 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2007 ccm = ((1ull << len) - 1) << pos;
2011 /* This is going to be a sequence of loads and inserts. */
2012 pos = base + 32 - 8;
2016 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2017 tcg_gen_addi_i64(o->in2, o->in2, 1);
2018 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2021 m3 = (m3 << 1) & 0xf;
2027 tcg_gen_movi_i64(tmp, ccm);
2028 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2029 tcg_temp_free_i64(tmp);
2033 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2035 int shift = s->insn->data & 0xff;
2036 int size = s->insn->data >> 8;
2037 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2041 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2046 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2048 t1 = tcg_temp_new_i64();
2049 tcg_gen_shli_i64(t1, psw_mask, 20);
2050 tcg_gen_shri_i64(t1, t1, 36);
2051 tcg_gen_or_i64(o->out, o->out, t1);
2053 tcg_gen_extu_i32_i64(t1, cc_op);
2054 tcg_gen_shli_i64(t1, t1, 28);
2055 tcg_gen_or_i64(o->out, o->out, t1);
2056 tcg_temp_free_i64(t1);
2060 #ifndef CONFIG_USER_ONLY
2061 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2063 check_privileged(s);
2064 gen_helper_ipte(cpu_env, o->in1, o->in2);
2068 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2070 check_privileged(s);
2071 gen_helper_iske(o->out, cpu_env, o->in2);
2076 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2078 gen_helper_ldeb(o->out, cpu_env, o->in2);
2082 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2084 gen_helper_ledb(o->out, cpu_env, o->in2);
2088 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2090 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2094 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2096 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2100 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2102 gen_helper_lxdb(o->out, cpu_env, o->in2);
2103 return_low128(o->out2);
2107 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2109 gen_helper_lxeb(o->out, cpu_env, o->in2);
2110 return_low128(o->out2);
2114 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2116 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2120 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2122 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2126 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2128 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2132 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2134 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2138 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2140 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2144 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2146 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2150 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2152 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2156 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2158 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2162 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2166 disas_jcc(s, &c, get_field(s->fields, m3));
2169 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2173 TCGv_i32 t32 = tcg_temp_new_i32();
2176 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2179 t = tcg_temp_new_i64();
2180 tcg_gen_extu_i32_i64(t, t32);
2181 tcg_temp_free_i32(t32);
2183 z = tcg_const_i64(0);
2184 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2185 tcg_temp_free_i64(t);
2186 tcg_temp_free_i64(z);
2192 #ifndef CONFIG_USER_ONLY
2193 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2195 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2196 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2197 check_privileged(s);
2198 potential_page_fault(s);
2199 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2200 tcg_temp_free_i32(r1);
2201 tcg_temp_free_i32(r3);
2205 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2207 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2208 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2209 check_privileged(s);
2210 potential_page_fault(s);
2211 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2212 tcg_temp_free_i32(r1);
2213 tcg_temp_free_i32(r3);
2216 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2218 check_privileged(s);
2219 potential_page_fault(s);
2220 gen_helper_lra(o->out, cpu_env, o->in2);
2225 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2229 check_privileged(s);
2231 t1 = tcg_temp_new_i64();
2232 t2 = tcg_temp_new_i64();
2233 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2234 tcg_gen_addi_i64(o->in2, o->in2, 4);
2235 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2236 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2237 tcg_gen_shli_i64(t1, t1, 32);
2238 gen_helper_load_psw(cpu_env, t1, t2);
2239 tcg_temp_free_i64(t1);
2240 tcg_temp_free_i64(t2);
2241 return EXIT_NORETURN;
2244 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2248 check_privileged(s);
2250 t1 = tcg_temp_new_i64();
2251 t2 = tcg_temp_new_i64();
2252 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2253 tcg_gen_addi_i64(o->in2, o->in2, 8);
2254 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2255 gen_helper_load_psw(cpu_env, t1, t2);
2256 tcg_temp_free_i64(t1);
2257 tcg_temp_free_i64(t2);
2258 return EXIT_NORETURN;
2262 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2264 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2265 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2266 potential_page_fault(s);
2267 gen_helper_lam(cpu_env, r1, o->in2, r3);
2268 tcg_temp_free_i32(r1);
2269 tcg_temp_free_i32(r3);
2273 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2275 int r1 = get_field(s->fields, r1);
2276 int r3 = get_field(s->fields, r3);
2277 TCGv_i64 t = tcg_temp_new_i64();
2278 TCGv_i64 t4 = tcg_const_i64(4);
2281 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2282 store_reg32_i64(r1, t);
2286 tcg_gen_add_i64(o->in2, o->in2, t4);
2290 tcg_temp_free_i64(t);
2291 tcg_temp_free_i64(t4);
2295 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2297 int r1 = get_field(s->fields, r1);
2298 int r3 = get_field(s->fields, r3);
2299 TCGv_i64 t = tcg_temp_new_i64();
2300 TCGv_i64 t4 = tcg_const_i64(4);
2303 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2304 store_reg32h_i64(r1, t);
2308 tcg_gen_add_i64(o->in2, o->in2, t4);
2312 tcg_temp_free_i64(t);
2313 tcg_temp_free_i64(t4);
2317 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2319 int r1 = get_field(s->fields, r1);
2320 int r3 = get_field(s->fields, r3);
2321 TCGv_i64 t8 = tcg_const_i64(8);
2324 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2328 tcg_gen_add_i64(o->in2, o->in2, t8);
2332 tcg_temp_free_i64(t8);
2336 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2339 o->g_out = o->g_in2;
2340 TCGV_UNUSED_I64(o->in2);
2345 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2349 o->g_out = o->g_in1;
2350 o->g_out2 = o->g_in2;
2351 TCGV_UNUSED_I64(o->in1);
2352 TCGV_UNUSED_I64(o->in2);
2353 o->g_in1 = o->g_in2 = false;
2357 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2359 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2360 potential_page_fault(s);
2361 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2362 tcg_temp_free_i32(l);
2366 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2368 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2369 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2370 potential_page_fault(s);
2371 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2372 tcg_temp_free_i32(r1);
2373 tcg_temp_free_i32(r2);
2378 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2380 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2381 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2382 potential_page_fault(s);
2383 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2384 tcg_temp_free_i32(r1);
2385 tcg_temp_free_i32(r3);
2390 #ifndef CONFIG_USER_ONLY
2391 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2393 int r1 = get_field(s->fields, l1);
2394 check_privileged(s);
2395 potential_page_fault(s);
2396 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2401 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2403 int r1 = get_field(s->fields, l1);
2404 check_privileged(s);
2405 potential_page_fault(s);
2406 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2412 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2414 potential_page_fault(s);
2415 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2420 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2422 potential_page_fault(s);
2423 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2425 return_low128(o->in2);
2429 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2431 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2435 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2437 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2438 return_low128(o->out2);
2442 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2444 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2448 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2450 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2454 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2456 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2460 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2462 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2463 return_low128(o->out2);
2467 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2469 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2470 return_low128(o->out2);
2474 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2476 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2477 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2478 tcg_temp_free_i64(r3);
2482 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2484 int r3 = get_field(s->fields, r3);
2485 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2489 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2491 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2492 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2493 tcg_temp_free_i64(r3);
2497 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2499 int r3 = get_field(s->fields, r3);
2500 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2504 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2506 gen_helper_nabs_i64(o->out, o->in2);
2510 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2512 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2516 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2518 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2522 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2524 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2525 tcg_gen_mov_i64(o->out2, o->in2);
2529 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2531 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2532 potential_page_fault(s);
2533 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2534 tcg_temp_free_i32(l);
2539 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2541 tcg_gen_neg_i64(o->out, o->in2);
2545 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2547 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2551 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2553 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2557 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2559 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2560 tcg_gen_mov_i64(o->out2, o->in2);
2564 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2566 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2567 potential_page_fault(s);
2568 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2569 tcg_temp_free_i32(l);
2574 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2576 tcg_gen_or_i64(o->out, o->in1, o->in2);
2580 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2582 int shift = s->insn->data & 0xff;
2583 int size = s->insn->data >> 8;
2584 uint64_t mask = ((1ull << size) - 1) << shift;
2587 tcg_gen_shli_i64(o->in2, o->in2, shift);
2588 tcg_gen_or_i64(o->out, o->in1, o->in2);
2590 /* Produce the CC from only the bits manipulated. */
2591 tcg_gen_andi_i64(cc_dst, o->out, mask);
2592 set_cc_nz_u64(s, cc_dst);
2596 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2598 gen_helper_popcnt(o->out, o->in2);
2602 #ifndef CONFIG_USER_ONLY
2603 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2605 check_privileged(s);
2606 gen_helper_ptlb(cpu_env);
2611 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2613 int i3 = get_field(s->fields, i3);
2614 int i4 = get_field(s->fields, i4);
2615 int i5 = get_field(s->fields, i5);
2616 int do_zero = i4 & 0x80;
2617 uint64_t mask, imask, pmask;
2620 /* Adjust the arguments for the specific insn. */
2621 switch (s->fields->op2) {
2622 case 0x55: /* risbg */
2627 case 0x5d: /* risbhg */
2630 pmask = 0xffffffff00000000ull;
2632 case 0x51: /* risblg */
2635 pmask = 0x00000000ffffffffull;
2641 /* MASK is the set of bits to be inserted from R2.
2642 Take care for I3/I4 wraparound. */
2645 mask ^= pmask >> i4 >> 1;
2647 mask |= ~(pmask >> i4 >> 1);
2651 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2652 insns, we need to keep the other half of the register. */
2653 imask = ~mask | ~pmask;
2655 if (s->fields->op2 == 0x55) {
2662 /* In some cases we can implement this with deposit, which can be more
2663 efficient on some hosts. */
2664 if (~mask == imask && i3 <= i4) {
2665 if (s->fields->op2 == 0x5d) {
2668 /* Note that we rotate the bits to be inserted to the lsb, not to
2669 the position as described in the PoO. */
2672 rot = (i5 - pos) & 63;
2678 /* Rotate the input as necessary. */
2679 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2681 /* Insert the selected bits into the output. */
2683 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2684 } else if (imask == 0) {
2685 tcg_gen_andi_i64(o->out, o->in2, mask);
2687 tcg_gen_andi_i64(o->in2, o->in2, mask);
2688 tcg_gen_andi_i64(o->out, o->out, imask);
2689 tcg_gen_or_i64(o->out, o->out, o->in2);
2694 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2696 int i3 = get_field(s->fields, i3);
2697 int i4 = get_field(s->fields, i4);
2698 int i5 = get_field(s->fields, i5);
2701 /* If this is a test-only form, arrange to discard the result. */
2703 o->out = tcg_temp_new_i64();
2711 /* MASK is the set of bits to be operated on from R2.
2712 Take care for I3/I4 wraparound. */
2715 mask ^= ~0ull >> i4 >> 1;
2717 mask |= ~(~0ull >> i4 >> 1);
2720 /* Rotate the input as necessary. */
2721 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2724 switch (s->fields->op2) {
2725 case 0x55: /* AND */
2726 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2727 tcg_gen_and_i64(o->out, o->out, o->in2);
2730 tcg_gen_andi_i64(o->in2, o->in2, mask);
2731 tcg_gen_or_i64(o->out, o->out, o->in2);
2733 case 0x57: /* XOR */
2734 tcg_gen_andi_i64(o->in2, o->in2, mask);
2735 tcg_gen_xor_i64(o->out, o->out, o->in2);
2742 tcg_gen_andi_i64(cc_dst, o->out, mask);
2743 set_cc_nz_u64(s, cc_dst);
2747 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2749 tcg_gen_bswap16_i64(o->out, o->in2);
2753 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2755 tcg_gen_bswap32_i64(o->out, o->in2);
2759 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2761 tcg_gen_bswap64_i64(o->out, o->in2);
2765 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2767 TCGv_i32 t1 = tcg_temp_new_i32();
2768 TCGv_i32 t2 = tcg_temp_new_i32();
2769 TCGv_i32 to = tcg_temp_new_i32();
2770 tcg_gen_trunc_i64_i32(t1, o->in1);
2771 tcg_gen_trunc_i64_i32(t2, o->in2);
2772 tcg_gen_rotl_i32(to, t1, t2);
2773 tcg_gen_extu_i32_i64(o->out, to);
2774 tcg_temp_free_i32(t1);
2775 tcg_temp_free_i32(t2);
2776 tcg_temp_free_i32(to);
2780 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2782 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2786 #ifndef CONFIG_USER_ONLY
2787 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2789 check_privileged(s);
2790 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2795 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2797 check_privileged(s);
2798 gen_helper_sacf(cpu_env, o->in2);
2799 /* Addressing mode has changed, so end the block. */
2800 return EXIT_PC_STALE;
2804 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2806 int r1 = get_field(s->fields, r1);
2807 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2811 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2813 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2817 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2819 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2823 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2825 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2826 return_low128(o->out2);
2830 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2832 gen_helper_sqeb(o->out, cpu_env, o->in2);
2836 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2838 gen_helper_sqdb(o->out, cpu_env, o->in2);
2842 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2844 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2845 return_low128(o->out2);
2849 #ifndef CONFIG_USER_ONLY
2850 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2852 check_privileged(s);
2853 potential_page_fault(s);
2854 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2859 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2861 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2862 check_privileged(s);
2863 potential_page_fault(s);
2864 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2865 tcg_temp_free_i32(r1);
2870 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2876 disas_jcc(s, &c, get_field(s->fields, m3));
2878 lab = gen_new_label();
2880 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
2882 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
2886 r1 = get_field(s->fields, r1);
2887 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2888 if (s->insn->data) {
2889 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
2891 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
2893 tcg_temp_free_i64(a);
2899 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2901 uint64_t sign = 1ull << s->insn->data;
2902 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2903 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2904 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2905 /* The arithmetic left shift is curious in that it does not affect
2906 the sign bit. Copy that over from the source unchanged. */
2907 tcg_gen_andi_i64(o->out, o->out, ~sign);
2908 tcg_gen_andi_i64(o->in1, o->in1, sign);
2909 tcg_gen_or_i64(o->out, o->out, o->in1);
2913 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2915 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2919 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2921 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2925 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2927 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2931 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2933 gen_helper_sfpc(cpu_env, o->in2);
2937 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
2939 gen_helper_sfas(cpu_env, o->in2);
2943 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
2945 int b2 = get_field(s->fields, b2);
2946 int d2 = get_field(s->fields, d2);
2947 TCGv_i64 t1 = tcg_temp_new_i64();
2948 TCGv_i64 t2 = tcg_temp_new_i64();
2951 switch (s->fields->op2) {
2952 case 0x99: /* SRNM */
2955 case 0xb8: /* SRNMB */
2958 case 0xb9: /* SRNMT */
2963 mask = (1 << len) - 1;
2965 /* Insert the value into the appropriate field of the FPC. */
2967 tcg_gen_movi_i64(t1, d2 & mask);
2969 tcg_gen_addi_i64(t1, regs[b2], d2);
2970 tcg_gen_andi_i64(t1, t1, mask);
2972 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
2973 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
2974 tcg_temp_free_i64(t1);
2976 /* Then install the new FPC to set the rounding mode in fpu_status. */
2977 gen_helper_sfpc(cpu_env, t2);
2978 tcg_temp_free_i64(t2);
2982 #ifndef CONFIG_USER_ONLY
2983 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2985 check_privileged(s);
2986 tcg_gen_shri_i64(o->in2, o->in2, 4);
2987 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2991 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2993 check_privileged(s);
2994 gen_helper_sske(cpu_env, o->in1, o->in2);
2998 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3000 check_privileged(s);
3001 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3005 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3007 check_privileged(s);
3008 /* ??? Surely cpu address != cpu number. In any case the previous
3009 version of this stored more than the required half-word, so it
3010 is unlikely this has ever been tested. */
3011 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3015 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3017 gen_helper_stck(o->out, cpu_env);
3018 /* ??? We don't implement clock states. */
3019 gen_op_movi_cc(s, 0);
3023 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3025 TCGv_i64 c1 = tcg_temp_new_i64();
3026 TCGv_i64 c2 = tcg_temp_new_i64();
3027 gen_helper_stck(c1, cpu_env);
3028 /* Shift the 64-bit value into its place as a zero-extended
3029 104-bit value. Note that "bit positions 64-103 are always
3030 non-zero so that they compare differently to STCK"; we set
3031 the least significant bit to 1. */
3032 tcg_gen_shli_i64(c2, c1, 56);
3033 tcg_gen_shri_i64(c1, c1, 8);
3034 tcg_gen_ori_i64(c2, c2, 0x10000);
3035 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3036 tcg_gen_addi_i64(o->in2, o->in2, 8);
3037 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3038 tcg_temp_free_i64(c1);
3039 tcg_temp_free_i64(c2);
3040 /* ??? We don't implement clock states. */
3041 gen_op_movi_cc(s, 0);
3045 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3047 check_privileged(s);
3048 gen_helper_sckc(cpu_env, o->in2);
3052 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3054 check_privileged(s);
3055 gen_helper_stckc(o->out, cpu_env);
3059 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3061 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3062 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3063 check_privileged(s);
3064 potential_page_fault(s);
3065 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3066 tcg_temp_free_i32(r1);
3067 tcg_temp_free_i32(r3);
3071 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3073 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3074 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3075 check_privileged(s);
3076 potential_page_fault(s);
3077 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3078 tcg_temp_free_i32(r1);
3079 tcg_temp_free_i32(r3);
3083 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3085 check_privileged(s);
3086 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3090 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3092 check_privileged(s);
3093 gen_helper_spt(cpu_env, o->in2);
3097 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3100 /* We really ought to have more complete indication of facilities
3101 that we implement. Address this when STFLE is implemented. */
3102 check_privileged(s);
3103 f = tcg_const_i64(0xc0000000);
3104 a = tcg_const_i64(200);
3105 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3106 tcg_temp_free_i64(f);
3107 tcg_temp_free_i64(a);
3111 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3113 check_privileged(s);
3114 gen_helper_stpt(o->out, cpu_env);
3118 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3120 check_privileged(s);
3121 potential_page_fault(s);
3122 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3127 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3129 check_privileged(s);
3130 gen_helper_spx(cpu_env, o->in2);
3134 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3136 check_privileged(s);
3137 /* Not operational. */
3138 gen_op_movi_cc(s, 3);
3142 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3144 check_privileged(s);
3145 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3146 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3150 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3152 uint64_t i2 = get_field(s->fields, i2);
3155 check_privileged(s);
3157 /* It is important to do what the instruction name says: STORE THEN.
3158 If we let the output hook perform the store then if we fault and
3159 restart, we'll have the wrong SYSTEM MASK in place. */
3160 t = tcg_temp_new_i64();
3161 tcg_gen_shri_i64(t, psw_mask, 56);
3162 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3163 tcg_temp_free_i64(t);
3165 if (s->fields->op == 0xac) {
3166 tcg_gen_andi_i64(psw_mask, psw_mask,
3167 (i2 << 56) | 0x00ffffffffffffffull);
3169 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3174 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3176 check_privileged(s);
3177 potential_page_fault(s);
3178 gen_helper_stura(cpu_env, o->in2, o->in1);
3183 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3185 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3189 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3191 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3195 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3197 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3201 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3203 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3207 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3209 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3210 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3211 potential_page_fault(s);
3212 gen_helper_stam(cpu_env, r1, o->in2, r3);
3213 tcg_temp_free_i32(r1);
3214 tcg_temp_free_i32(r3);
3218 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3220 int m3 = get_field(s->fields, m3);
3221 int pos, base = s->insn->data;
3222 TCGv_i64 tmp = tcg_temp_new_i64();
3224 pos = base + ctz32(m3) * 8;
3227 /* Effectively a 32-bit store. */
3228 tcg_gen_shri_i64(tmp, o->in1, pos);
3229 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3235 /* Effectively a 16-bit store. */
3236 tcg_gen_shri_i64(tmp, o->in1, pos);
3237 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3244 /* Effectively an 8-bit store. */
3245 tcg_gen_shri_i64(tmp, o->in1, pos);
3246 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3250 /* This is going to be a sequence of shifts and stores. */
3251 pos = base + 32 - 8;
3254 tcg_gen_shri_i64(tmp, o->in1, pos);
3255 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3256 tcg_gen_addi_i64(o->in2, o->in2, 1);
3258 m3 = (m3 << 1) & 0xf;
3263 tcg_temp_free_i64(tmp);
3267 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3269 int r1 = get_field(s->fields, r1);
3270 int r3 = get_field(s->fields, r3);
3271 int size = s->insn->data;
3272 TCGv_i64 tsize = tcg_const_i64(size);
3276 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3278 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3283 tcg_gen_add_i64(o->in2, o->in2, tsize);
3287 tcg_temp_free_i64(tsize);
3291 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3293 int r1 = get_field(s->fields, r1);
3294 int r3 = get_field(s->fields, r3);
3295 TCGv_i64 t = tcg_temp_new_i64();
3296 TCGv_i64 t4 = tcg_const_i64(4);
3297 TCGv_i64 t32 = tcg_const_i64(32);
3300 tcg_gen_shl_i64(t, regs[r1], t32);
3301 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3305 tcg_gen_add_i64(o->in2, o->in2, t4);
3309 tcg_temp_free_i64(t);
3310 tcg_temp_free_i64(t4);
3311 tcg_temp_free_i64(t32);
3315 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3317 potential_page_fault(s);
3318 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3320 return_low128(o->in2);
3324 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3326 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3330 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3335 tcg_gen_not_i64(o->in2, o->in2);
3336 tcg_gen_add_i64(o->out, o->in1, o->in2);
3338 /* XXX possible optimization point */
3340 cc = tcg_temp_new_i64();
3341 tcg_gen_extu_i32_i64(cc, cc_op);
3342 tcg_gen_shri_i64(cc, cc, 1);
3343 tcg_gen_add_i64(o->out, o->out, cc);
3344 tcg_temp_free_i64(cc);
3348 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3355 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3356 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3357 tcg_temp_free_i32(t);
3359 t = tcg_const_i32(s->next_pc - s->pc);
3360 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3361 tcg_temp_free_i32(t);
3363 gen_exception(EXCP_SVC);
3364 return EXIT_NORETURN;
3367 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3369 gen_helper_tceb(cc_op, o->in1, o->in2);
3374 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3376 gen_helper_tcdb(cc_op, o->in1, o->in2);
3381 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3383 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3388 #ifndef CONFIG_USER_ONLY
3389 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3391 potential_page_fault(s);
3392 gen_helper_tprot(cc_op, o->addr1, o->in2);
3398 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3400 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3401 potential_page_fault(s);
3402 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3403 tcg_temp_free_i32(l);
3408 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3410 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3411 potential_page_fault(s);
3412 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3413 tcg_temp_free_i32(l);
3417 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3419 int d1 = get_field(s->fields, d1);
3420 int d2 = get_field(s->fields, d2);
3421 int b1 = get_field(s->fields, b1);
3422 int b2 = get_field(s->fields, b2);
3423 int l = get_field(s->fields, l1);
3426 o->addr1 = get_address(s, 0, b1, d1);
3428 /* If the addresses are identical, this is a store/memset of zero. */
3429 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3430 o->in2 = tcg_const_i64(0);
3434 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3437 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3441 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3444 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3448 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3451 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3455 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3457 gen_op_movi_cc(s, 0);
3461 /* But in general we'll defer to a helper. */
3462 o->in2 = get_address(s, 0, b2, d2);
3463 t32 = tcg_const_i32(l);
3464 potential_page_fault(s);
3465 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3466 tcg_temp_free_i32(t32);
3471 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3473 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3477 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3479 int shift = s->insn->data & 0xff;
3480 int size = s->insn->data >> 8;
3481 uint64_t mask = ((1ull << size) - 1) << shift;
3484 tcg_gen_shli_i64(o->in2, o->in2, shift);
3485 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3487 /* Produce the CC from only the bits manipulated. */
3488 tcg_gen_andi_i64(cc_dst, o->out, mask);
3489 set_cc_nz_u64(s, cc_dst);
3493 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3495 o->out = tcg_const_i64(0);
3499 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3501 o->out = tcg_const_i64(0);
3507 /* ====================================================================== */
3508 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3509 the original inputs), update the various cc data structures in order to
3510 be able to compute the new condition code. */
3512 static void cout_abs32(DisasContext *s, DisasOps *o)
3514 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3517 static void cout_abs64(DisasContext *s, DisasOps *o)
3519 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3522 static void cout_adds32(DisasContext *s, DisasOps *o)
3524 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3527 static void cout_adds64(DisasContext *s, DisasOps *o)
3529 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3532 static void cout_addu32(DisasContext *s, DisasOps *o)
3534 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3537 static void cout_addu64(DisasContext *s, DisasOps *o)
3539 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3542 static void cout_addc32(DisasContext *s, DisasOps *o)
3544 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3547 static void cout_addc64(DisasContext *s, DisasOps *o)
3549 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3552 static void cout_cmps32(DisasContext *s, DisasOps *o)
3554 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3557 static void cout_cmps64(DisasContext *s, DisasOps *o)
3559 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3562 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3564 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3567 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3569 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3572 static void cout_f32(DisasContext *s, DisasOps *o)
3574 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3577 static void cout_f64(DisasContext *s, DisasOps *o)
3579 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3582 static void cout_f128(DisasContext *s, DisasOps *o)
3584 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3587 static void cout_nabs32(DisasContext *s, DisasOps *o)
3589 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3592 static void cout_nabs64(DisasContext *s, DisasOps *o)
3594 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3597 static void cout_neg32(DisasContext *s, DisasOps *o)
3599 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3602 static void cout_neg64(DisasContext *s, DisasOps *o)
3604 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3607 static void cout_nz32(DisasContext *s, DisasOps *o)
3609 tcg_gen_ext32u_i64(cc_dst, o->out);
3610 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3613 static void cout_nz64(DisasContext *s, DisasOps *o)
3615 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3618 static void cout_s32(DisasContext *s, DisasOps *o)
3620 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3623 static void cout_s64(DisasContext *s, DisasOps *o)
3625 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3628 static void cout_subs32(DisasContext *s, DisasOps *o)
3630 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3633 static void cout_subs64(DisasContext *s, DisasOps *o)
3635 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3638 static void cout_subu32(DisasContext *s, DisasOps *o)
3640 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3643 static void cout_subu64(DisasContext *s, DisasOps *o)
3645 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3648 static void cout_subb32(DisasContext *s, DisasOps *o)
3650 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3653 static void cout_subb64(DisasContext *s, DisasOps *o)
3655 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3658 static void cout_tm32(DisasContext *s, DisasOps *o)
3660 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3663 static void cout_tm64(DisasContext *s, DisasOps *o)
3665 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3668 /* ====================================================================== */
3669 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3670 with the TCG register to which we will write. Used in combination with
3671 the "wout" generators, in some cases we need a new temporary, and in
3672 some cases we can write to a TCG global. */
3674 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3676 o->out = tcg_temp_new_i64();
3678 #define SPEC_prep_new 0
3680 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3682 o->out = tcg_temp_new_i64();
3683 o->out2 = tcg_temp_new_i64();
3685 #define SPEC_prep_new_P 0
3687 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3689 o->out = regs[get_field(f, r1)];
3692 #define SPEC_prep_r1 0
3694 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3696 int r1 = get_field(f, r1);
3698 o->out2 = regs[r1 + 1];
3699 o->g_out = o->g_out2 = true;
3701 #define SPEC_prep_r1_P SPEC_r1_even
3703 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3705 o->out = fregs[get_field(f, r1)];
3708 #define SPEC_prep_f1 0
3710 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3712 int r1 = get_field(f, r1);
3714 o->out2 = fregs[r1 + 2];
3715 o->g_out = o->g_out2 = true;
3717 #define SPEC_prep_x1 SPEC_r1_f128
3719 /* ====================================================================== */
3720 /* The "Write OUTput" generators. These generally perform some non-trivial
3721 copy of data to TCG globals, or to main memory. The trivial cases are
3722 generally handled by having a "prep" generator install the TCG global
3723 as the destination of the operation. */
3725 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3727 store_reg(get_field(f, r1), o->out);
3729 #define SPEC_wout_r1 0
3731 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3733 int r1 = get_field(f, r1);
3734 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3736 #define SPEC_wout_r1_8 0
3738 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3740 int r1 = get_field(f, r1);
3741 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3743 #define SPEC_wout_r1_16 0
3745 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3747 store_reg32_i64(get_field(f, r1), o->out);
3749 #define SPEC_wout_r1_32 0
3751 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3753 int r1 = get_field(f, r1);
3754 store_reg32_i64(r1, o->out);
3755 store_reg32_i64(r1 + 1, o->out2);
3757 #define SPEC_wout_r1_P32 SPEC_r1_even
3759 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3761 int r1 = get_field(f, r1);
3762 store_reg32_i64(r1 + 1, o->out);
3763 tcg_gen_shri_i64(o->out, o->out, 32);
3764 store_reg32_i64(r1, o->out);
3766 #define SPEC_wout_r1_D32 SPEC_r1_even
3768 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3770 store_freg32_i64(get_field(f, r1), o->out);
3772 #define SPEC_wout_e1 0
3774 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3776 store_freg(get_field(f, r1), o->out);
3778 #define SPEC_wout_f1 0
3780 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3782 int f1 = get_field(s->fields, r1);
3783 store_freg(f1, o->out);
3784 store_freg(f1 + 2, o->out2);
3786 #define SPEC_wout_x1 SPEC_r1_f128
3788 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3790 if (get_field(f, r1) != get_field(f, r2)) {
3791 store_reg32_i64(get_field(f, r1), o->out);
3794 #define SPEC_wout_cond_r1r2_32 0
3796 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3798 if (get_field(f, r1) != get_field(f, r2)) {
3799 store_freg32_i64(get_field(f, r1), o->out);
3802 #define SPEC_wout_cond_e1e2 0
3804 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3806 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3808 #define SPEC_wout_m1_8 0
3810 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3812 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3814 #define SPEC_wout_m1_16 0
3816 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3818 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3820 #define SPEC_wout_m1_32 0
3822 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3824 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3826 #define SPEC_wout_m1_64 0
3828 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3830 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3832 #define SPEC_wout_m2_32 0
3834 /* ====================================================================== */
3835 /* The "INput 1" generators. These load the first operand to an insn. */
3837 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3839 o->in1 = load_reg(get_field(f, r1));
3841 #define SPEC_in1_r1 0
3843 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3845 o->in1 = regs[get_field(f, r1)];
3848 #define SPEC_in1_r1_o 0
3850 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3852 o->in1 = tcg_temp_new_i64();
3853 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3855 #define SPEC_in1_r1_32s 0
3857 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3859 o->in1 = tcg_temp_new_i64();
3860 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3862 #define SPEC_in1_r1_32u 0
3864 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3866 o->in1 = tcg_temp_new_i64();
3867 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3869 #define SPEC_in1_r1_sr32 0
3871 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3873 o->in1 = load_reg(get_field(f, r1) + 1);
3875 #define SPEC_in1_r1p1 SPEC_r1_even
3877 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3879 o->in1 = tcg_temp_new_i64();
3880 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
3882 #define SPEC_in1_r1p1_32s SPEC_r1_even
3884 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3886 o->in1 = tcg_temp_new_i64();
3887 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
3889 #define SPEC_in1_r1p1_32u SPEC_r1_even
3891 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3893 int r1 = get_field(f, r1);
3894 o->in1 = tcg_temp_new_i64();
3895 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3897 #define SPEC_in1_r1_D32 SPEC_r1_even
3899 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3901 o->in1 = load_reg(get_field(f, r2));
3903 #define SPEC_in1_r2 0
3905 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3907 o->in1 = load_reg(get_field(f, r3));
3909 #define SPEC_in1_r3 0
3911 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3913 o->in1 = regs[get_field(f, r3)];
3916 #define SPEC_in1_r3_o 0
3918 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3920 o->in1 = tcg_temp_new_i64();
3921 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3923 #define SPEC_in1_r3_32s 0
3925 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3927 o->in1 = tcg_temp_new_i64();
3928 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3930 #define SPEC_in1_r3_32u 0
3932 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3934 o->in1 = load_freg32_i64(get_field(f, r1));
3936 #define SPEC_in1_e1 0
3938 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3940 o->in1 = fregs[get_field(f, r1)];
3943 #define SPEC_in1_f1_o 0
3945 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3947 int r1 = get_field(f, r1);
3949 o->out2 = fregs[r1 + 2];
3950 o->g_out = o->g_out2 = true;
3952 #define SPEC_in1_x1_o SPEC_r1_f128
3954 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3956 o->in1 = fregs[get_field(f, r3)];
3959 #define SPEC_in1_f3_o 0
3961 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3963 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3965 #define SPEC_in1_la1 0
3967 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3969 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3970 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3972 #define SPEC_in1_la2 0
3974 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3977 o->in1 = tcg_temp_new_i64();
3978 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3980 #define SPEC_in1_m1_8u 0
3982 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3985 o->in1 = tcg_temp_new_i64();
3986 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3988 #define SPEC_in1_m1_16s 0
3990 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3993 o->in1 = tcg_temp_new_i64();
3994 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3996 #define SPEC_in1_m1_16u 0
3998 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4001 o->in1 = tcg_temp_new_i64();
4002 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4004 #define SPEC_in1_m1_32s 0
4006 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4009 o->in1 = tcg_temp_new_i64();
4010 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4012 #define SPEC_in1_m1_32u 0
4014 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4017 o->in1 = tcg_temp_new_i64();
4018 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4020 #define SPEC_in1_m1_64 0
4022 /* ====================================================================== */
4023 /* The "INput 2" generators. These load the second operand to an insn. */
4025 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4027 o->in2 = regs[get_field(f, r1)];
4030 #define SPEC_in2_r1_o 0
4032 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4034 o->in2 = tcg_temp_new_i64();
4035 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4037 #define SPEC_in2_r1_16u 0
4039 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4041 o->in2 = tcg_temp_new_i64();
4042 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4044 #define SPEC_in2_r1_32u 0
4046 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4048 o->in2 = load_reg(get_field(f, r2));
4050 #define SPEC_in2_r2 0
4052 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4054 o->in2 = regs[get_field(f, r2)];
4057 #define SPEC_in2_r2_o 0
4059 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4061 int r2 = get_field(f, r2);
4063 o->in2 = load_reg(r2);
4066 #define SPEC_in2_r2_nz 0
4068 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4070 o->in2 = tcg_temp_new_i64();
4071 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4073 #define SPEC_in2_r2_8s 0
4075 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4077 o->in2 = tcg_temp_new_i64();
4078 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4080 #define SPEC_in2_r2_8u 0
4082 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4084 o->in2 = tcg_temp_new_i64();
4085 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4087 #define SPEC_in2_r2_16s 0
4089 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4091 o->in2 = tcg_temp_new_i64();
4092 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4094 #define SPEC_in2_r2_16u 0
4096 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4098 o->in2 = load_reg(get_field(f, r3));
4100 #define SPEC_in2_r3 0
4102 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4104 o->in2 = tcg_temp_new_i64();
4105 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4107 #define SPEC_in2_r2_32s 0
4109 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4111 o->in2 = tcg_temp_new_i64();
4112 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4114 #define SPEC_in2_r2_32u 0
4116 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4118 o->in2 = load_freg32_i64(get_field(f, r2));
4120 #define SPEC_in2_e2 0
4122 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4124 o->in2 = fregs[get_field(f, r2)];
4127 #define SPEC_in2_f2_o 0
4129 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4131 int r2 = get_field(f, r2);
4133 o->in2 = fregs[r2 + 2];
4134 o->g_in1 = o->g_in2 = true;
4136 #define SPEC_in2_x2_o SPEC_r2_f128
4138 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4140 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4142 #define SPEC_in2_ra2 0
4144 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4146 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4147 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4149 #define SPEC_in2_a2 0
4151 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4153 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4155 #define SPEC_in2_ri2 0
4157 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4159 help_l2_shift(s, f, o, 31);
4161 #define SPEC_in2_sh32 0
4163 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4165 help_l2_shift(s, f, o, 63);
4167 #define SPEC_in2_sh64 0
4169 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4172 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4174 #define SPEC_in2_m2_8u 0
4176 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4179 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4181 #define SPEC_in2_m2_16s 0
4183 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4186 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4188 #define SPEC_in2_m2_16u 0
4190 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4193 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4195 #define SPEC_in2_m2_32s 0
4197 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4200 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4202 #define SPEC_in2_m2_32u 0
4204 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4207 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4209 #define SPEC_in2_m2_64 0
4211 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4214 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4216 #define SPEC_in2_mri2_16u 0
4218 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4221 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4223 #define SPEC_in2_mri2_32s 0
4225 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4228 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4230 #define SPEC_in2_mri2_32u 0
4232 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4235 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4237 #define SPEC_in2_mri2_64 0
4239 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4241 o->in2 = tcg_const_i64(get_field(f, i2));
4243 #define SPEC_in2_i2 0
4245 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4247 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4249 #define SPEC_in2_i2_8u 0
4251 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4253 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4255 #define SPEC_in2_i2_16u 0
4257 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4259 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4261 #define SPEC_in2_i2_32u 0
4263 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4265 uint64_t i2 = (uint16_t)get_field(f, i2);
4266 o->in2 = tcg_const_i64(i2 << s->insn->data);
4268 #define SPEC_in2_i2_16u_shl 0
4270 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4272 uint64_t i2 = (uint32_t)get_field(f, i2);
4273 o->in2 = tcg_const_i64(i2 << s->insn->data);
4275 #define SPEC_in2_i2_32u_shl 0
4277 /* ====================================================================== */
4279 /* Find opc within the table of insns. This is formulated as a switch
4280 statement so that (1) we get compile-time notice of cut-paste errors
4281 for duplicated opcodes, and (2) the compiler generates the binary
4282 search tree, rather than us having to post-process the table. */
4284 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4285 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4287 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4289 enum DisasInsnEnum {
4290 #include "insn-data.def"
4294 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4298 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4300 .help_in1 = in1_##I1, \
4301 .help_in2 = in2_##I2, \
4302 .help_prep = prep_##P, \
4303 .help_wout = wout_##W, \
4304 .help_cout = cout_##CC, \
4305 .help_op = op_##OP, \
4309 /* Allow 0 to be used for NULL in the table below. */
4317 #define SPEC_in1_0 0
4318 #define SPEC_in2_0 0
4319 #define SPEC_prep_0 0
4320 #define SPEC_wout_0 0
4322 static const DisasInsn insn_info[] = {
4323 #include "insn-data.def"
4327 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4328 case OPC: return &insn_info[insn_ ## NM];
4330 static const DisasInsn *lookup_opc(uint16_t opc)
4333 #include "insn-data.def"
4342 /* Extract a field from the insn. The INSN should be left-aligned in
4343 the uint64_t so that we can more easily utilize the big-bit-endian
4344 definitions we extract from the Principals of Operation. */
4346 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4354 /* Zero extract the field from the insn. */
4355 r = (insn << f->beg) >> (64 - f->size);
4357 /* Sign-extend, or un-swap the field as necessary. */
4359 case 0: /* unsigned */
4361 case 1: /* signed */
4362 assert(f->size <= 32);
4363 m = 1u << (f->size - 1);
4366 case 2: /* dl+dh split, signed 20 bit. */
4367 r = ((int8_t)r << 12) | (r >> 8);
4373 /* Validate that the "compressed" encoding we selected above is valid.
4374 I.e. we havn't make two different original fields overlap. */
4375 assert(((o->presentC >> f->indexC) & 1) == 0);
4376 o->presentC |= 1 << f->indexC;
4377 o->presentO |= 1 << f->indexO;
4379 o->c[f->indexC] = r;
4382 /* Lookup the insn at the current PC, extracting the operands into O and
4383 returning the info struct for the insn. Returns NULL for invalid insn. */
4385 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4388 uint64_t insn, pc = s->pc;
4390 const DisasInsn *info;
4392 insn = ld_code2(env, pc);
4393 op = (insn >> 8) & 0xff;
4394 ilen = get_ilen(op);
4395 s->next_pc = s->pc + ilen;
4402 insn = ld_code4(env, pc) << 32;
4405 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4411 /* We can't actually determine the insn format until we've looked up
4412 the full insn opcode. Which we can't do without locating the
4413 secondary opcode. Assume by default that OP2 is at bit 40; for
4414 those smaller insns that don't actually have a secondary opcode
4415 this will correctly result in OP2 = 0. */
4421 case 0xb2: /* S, RRF, RRE */
4422 case 0xb3: /* RRE, RRD, RRF */
4423 case 0xb9: /* RRE, RRF */
4424 case 0xe5: /* SSE, SIL */
4425 op2 = (insn << 8) >> 56;
4429 case 0xc0: /* RIL */
4430 case 0xc2: /* RIL */
4431 case 0xc4: /* RIL */
4432 case 0xc6: /* RIL */
4433 case 0xc8: /* SSF */
4434 case 0xcc: /* RIL */
4435 op2 = (insn << 12) >> 60;
4437 case 0xd0 ... 0xdf: /* SS */
4443 case 0xee ... 0xf3: /* SS */
4444 case 0xf8 ... 0xfd: /* SS */
4448 op2 = (insn << 40) >> 56;
4452 memset(f, 0, sizeof(*f));
4456 /* Lookup the instruction. */
4457 info = lookup_opc(op << 8 | op2);
4459 /* If we found it, extract the operands. */
4461 DisasFormat fmt = info->fmt;
4464 for (i = 0; i < NUM_C_FIELD; ++i) {
4465 extract_field(f, &format_info[fmt].op[i], insn);
4471 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4473 const DisasInsn *insn;
4474 ExitStatus ret = NO_EXIT;
4478 /* Search for the insn in the table. */
4479 insn = extract_insn(env, s, &f);
4481 /* Not found means unimplemented/illegal opcode. */
4483 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4485 gen_illegal_opcode(s);
4486 return EXIT_NORETURN;
4489 /* Check for insn specification exceptions. */
4491 int spec = insn->spec, excp = 0, r;
4493 if (spec & SPEC_r1_even) {
4494 r = get_field(&f, r1);
4496 excp = PGM_SPECIFICATION;
4499 if (spec & SPEC_r2_even) {
4500 r = get_field(&f, r2);
4502 excp = PGM_SPECIFICATION;
4505 if (spec & SPEC_r1_f128) {
4506 r = get_field(&f, r1);
4508 excp = PGM_SPECIFICATION;
4511 if (spec & SPEC_r2_f128) {
4512 r = get_field(&f, r2);
4514 excp = PGM_SPECIFICATION;
4518 gen_program_exception(s, excp);
4519 return EXIT_NORETURN;
4523 /* Set up the strutures we use to communicate with the helpers. */
4526 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4527 TCGV_UNUSED_I64(o.out);
4528 TCGV_UNUSED_I64(o.out2);
4529 TCGV_UNUSED_I64(o.in1);
4530 TCGV_UNUSED_I64(o.in2);
4531 TCGV_UNUSED_I64(o.addr1);
4533 /* Implement the instruction. */
4534 if (insn->help_in1) {
4535 insn->help_in1(s, &f, &o);
4537 if (insn->help_in2) {
4538 insn->help_in2(s, &f, &o);
4540 if (insn->help_prep) {
4541 insn->help_prep(s, &f, &o);
4543 if (insn->help_op) {
4544 ret = insn->help_op(s, &o);
4546 if (insn->help_wout) {
4547 insn->help_wout(s, &f, &o);
4549 if (insn->help_cout) {
4550 insn->help_cout(s, &o);
4553 /* Free any temporaries created by the helpers. */
4554 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4555 tcg_temp_free_i64(o.out);
4557 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4558 tcg_temp_free_i64(o.out2);
4560 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4561 tcg_temp_free_i64(o.in1);
4563 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4564 tcg_temp_free_i64(o.in2);
4566 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4567 tcg_temp_free_i64(o.addr1);
4570 /* Advance to the next instruction. */
4575 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4576 TranslationBlock *tb,
4580 target_ulong pc_start;
4581 uint64_t next_page_start;
4582 uint16_t *gen_opc_end;
4584 int num_insns, max_insns;
4592 if (!(tb->flags & FLAG_MASK_64)) {
4593 pc_start &= 0x7fffffff;
4598 dc.cc_op = CC_OP_DYNAMIC;
4599 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4601 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4603 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4606 max_insns = tb->cflags & CF_COUNT_MASK;
4607 if (max_insns == 0) {
4608 max_insns = CF_COUNT_MASK;
4615 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4619 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4622 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4623 gen_opc_cc_op[lj] = dc.cc_op;
4624 tcg_ctx.gen_opc_instr_start[lj] = 1;
4625 tcg_ctx.gen_opc_icount[lj] = num_insns;
4627 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4631 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4632 tcg_gen_debug_insn_start(dc.pc);
4636 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4637 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4638 if (bp->pc == dc.pc) {
4639 status = EXIT_PC_STALE;
4645 if (status == NO_EXIT) {
4646 status = translate_one(env, &dc);
4649 /* If we reach a page boundary, are single stepping,
4650 or exhaust instruction count, stop generation. */
4651 if (status == NO_EXIT
4652 && (dc.pc >= next_page_start
4653 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4654 || num_insns >= max_insns
4656 || env->singlestep_enabled)) {
4657 status = EXIT_PC_STALE;
4659 } while (status == NO_EXIT);
4661 if (tb->cflags & CF_LAST_IO) {
4670 update_psw_addr(&dc);
4672 case EXIT_PC_UPDATED:
4673 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4674 cc op type is in env */
4676 /* Exit the TB, either by raising a debug exception or by return. */
4678 gen_exception(EXCP_DEBUG);
4687 gen_icount_end(tb, num_insns);
4688 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4690 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4693 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4696 tb->size = dc.pc - pc_start;
4697 tb->icount = num_insns;
4700 #if defined(S390X_DEBUG_DISAS)
4701 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4702 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4703 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4709 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4711 gen_intermediate_code_internal(env, tb, 0);
4714 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4716 gen_intermediate_code_internal(env, tb, 1);
4719 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4722 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4723 cc_op = gen_opc_cc_op[pc_pos];
4724 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {