4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
57 bool singlestep_enabled;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
105 cpu_fprintf(f, "\n");
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
114 cpu_fprintf(f, "\n");
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
124 cpu_fprintf(f, "\n");
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
138 cpu_fprintf(f, "\n");
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
155 void s390x_translate_init(void)
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
190 /* register helpers */
195 static TCGv_i64 load_reg(int reg)
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
202 static TCGv_i64 load_freg32_i64(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
209 static void store_reg(int reg, TCGv_i64 v)
211 tcg_gen_mov_i64(regs[reg], v);
214 static void store_freg(int reg, TCGv_i64 v)
216 tcg_gen_mov_i64(fregs[reg], v);
219 static void store_reg32_i64(int reg, TCGv_i64 v)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
230 static void store_freg32_i64(int reg, TCGv_i64 v)
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
235 static void return_low128(TCGv_i64 dest)
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
240 static void update_psw_addr(DisasContext *s)
243 tcg_gen_movi_i64(psw_addr, s->pc);
246 static void update_cc_op(DisasContext *s)
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
253 static void potential_page_fault(DisasContext *s)
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
261 return (uint64_t)cpu_lduw_code(env, pc);
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
274 static int get_mem_index(DisasContext *s)
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
279 case PSW_ASC_SECONDARY >> 32:
281 case PSW_ASC_HOME >> 32:
289 static void gen_exception(int excp)
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
296 static void gen_program_exception(DisasContext *s, int code)
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
309 /* Advance past instruction. */
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
320 static inline void gen_illegal_opcode(DisasContext *s)
322 gen_program_exception(s, PGM_SPECIFICATION);
325 static inline void check_privileged(DisasContext *s)
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s->tb->flags & FLAG_MASK_64)) {
343 tmp = tcg_const_i64(d2);
344 tcg_gen_add_i64(tmp, tmp, regs[x2]);
349 tcg_gen_add_i64(tmp, tmp, regs[b2]);
353 tmp = tcg_const_i64(d2);
354 tcg_gen_add_i64(tmp, tmp, regs[b2]);
359 tmp = tcg_const_i64(d2);
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
364 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
370 static inline bool live_cc_data(DisasContext *s)
372 return (s->cc_op != CC_OP_DYNAMIC
373 && s->cc_op != CC_OP_STATIC
377 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_dst);
382 tcg_gen_discard_i64(cc_vr);
384 s->cc_op = CC_OP_CONST0 + val;
387 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_src);
391 tcg_gen_discard_i64(cc_vr);
393 tcg_gen_mov_i64(cc_dst, dst);
397 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
400 if (live_cc_data(s)) {
401 tcg_gen_discard_i64(cc_vr);
403 tcg_gen_mov_i64(cc_src, src);
404 tcg_gen_mov_i64(cc_dst, dst);
408 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
409 TCGv_i64 dst, TCGv_i64 vr)
411 tcg_gen_mov_i64(cc_src, src);
412 tcg_gen_mov_i64(cc_dst, dst);
413 tcg_gen_mov_i64(cc_vr, vr);
417 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
419 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
422 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
424 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
427 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
429 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
432 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
434 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
437 /* CC value is in env->cc_op */
438 static void set_cc_static(DisasContext *s)
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
445 s->cc_op = CC_OP_STATIC;
448 /* calculates cc into cc_op */
449 static void gen_op_calc_cc(DisasContext *s)
451 TCGv_i32 local_cc_op;
454 TCGV_UNUSED_I32(local_cc_op);
455 TCGV_UNUSED_I64(dummy);
458 dummy = tcg_const_i64(0);
472 local_cc_op = tcg_const_i32(s->cc_op);
488 /* s->cc_op is the cc value */
489 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
492 /* env->cc_op already is the cc value */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
512 case CC_OP_LTUGTU_32:
513 case CC_OP_LTUGTU_64:
520 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
535 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
538 /* unknown operation - assume 3 arguments and cc_op in env */
539 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
545 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
546 tcg_temp_free_i32(local_cc_op);
548 if (!TCGV_IS_UNUSED_I64(dummy)) {
549 tcg_temp_free_i64(dummy);
552 /* We now have cc in cc_op as constant */
556 static int use_goto_tb(DisasContext *s, uint64_t dest)
558 /* NOTE: we handle the case where the TB spans two pages here */
559 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
560 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
561 && !s->singlestep_enabled
562 && !(s->tb->cflags & CF_LAST_IO));
565 static void account_noninline_branch(DisasContext *s, int cc_op)
567 #ifdef DEBUG_INLINE_BRANCHES
568 inline_branch_miss[cc_op]++;
572 static void account_inline_branch(DisasContext *s, int cc_op)
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_hit[cc_op]++;
579 /* Table of mask values to comparison codes, given a comparison as input.
580 For such, CC=3 should not be possible. */
581 static const TCGCond ltgt_cond[16] = {
582 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
583 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
584 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
585 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
586 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
587 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
588 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
589 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
592 /* Table of mask values to comparison codes, given a logic op as input.
593 For such, only CC=0 and CC=1 should be possible. */
594 static const TCGCond nz_cond[16] = {
595 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
596 TCG_COND_NEVER, TCG_COND_NEVER,
597 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
598 TCG_COND_NE, TCG_COND_NE,
599 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
600 TCG_COND_EQ, TCG_COND_EQ,
601 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
602 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
605 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
606 details required to generate a TCG comparison. */
607 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
610 enum cc_op old_cc_op = s->cc_op;
612 if (mask == 15 || mask == 0) {
613 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
616 c->g1 = c->g2 = true;
621 /* Find the TCG condition for the mask + cc op. */
627 cond = ltgt_cond[mask];
628 if (cond == TCG_COND_NEVER) {
631 account_inline_branch(s, old_cc_op);
634 case CC_OP_LTUGTU_32:
635 case CC_OP_LTUGTU_64:
636 cond = tcg_unsigned_cond(ltgt_cond[mask]);
637 if (cond == TCG_COND_NEVER) {
640 account_inline_branch(s, old_cc_op);
644 cond = nz_cond[mask];
645 if (cond == TCG_COND_NEVER) {
648 account_inline_branch(s, old_cc_op);
663 account_inline_branch(s, old_cc_op);
678 account_inline_branch(s, old_cc_op);
682 switch (mask & 0xa) {
683 case 8: /* src == 0 -> no one bit found */
686 case 2: /* src != 0 -> one bit found */
692 account_inline_branch(s, old_cc_op);
698 case 8 | 2: /* vr == 0 */
701 case 4 | 1: /* vr != 0 */
704 case 8 | 4: /* no carry -> vr >= src */
707 case 2 | 1: /* carry -> vr < src */
713 account_inline_branch(s, old_cc_op);
718 /* Note that CC=0 is impossible; treat it as dont-care. */
720 case 2: /* zero -> op1 == op2 */
723 case 4 | 1: /* !zero -> op1 != op2 */
726 case 4: /* borrow (!carry) -> op1 < op2 */
729 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
735 account_inline_branch(s, old_cc_op);
740 /* Calculate cc value. */
745 /* Jump based on CC. We'll load up the real cond below;
746 the assignment here merely avoids a compiler warning. */
747 account_noninline_branch(s, old_cc_op);
748 old_cc_op = CC_OP_STATIC;
749 cond = TCG_COND_NEVER;
753 /* Load up the arguments of the comparison. */
755 c->g1 = c->g2 = false;
759 c->u.s32.a = tcg_temp_new_i32();
760 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
761 c->u.s32.b = tcg_const_i32(0);
764 case CC_OP_LTUGTU_32:
767 c->u.s32.a = tcg_temp_new_i32();
768 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
769 c->u.s32.b = tcg_temp_new_i32();
770 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
777 c->u.s64.b = tcg_const_i64(0);
781 case CC_OP_LTUGTU_64:
785 c->g1 = c->g2 = true;
791 c->u.s64.a = tcg_temp_new_i64();
792 c->u.s64.b = tcg_const_i64(0);
793 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
798 c->u.s32.a = tcg_temp_new_i32();
799 c->u.s32.b = tcg_temp_new_i32();
800 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
801 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
802 tcg_gen_movi_i32(c->u.s32.b, 0);
804 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
811 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
812 c->u.s64.b = tcg_const_i64(0);
824 case 0x8 | 0x4 | 0x2: /* cc != 3 */
826 c->u.s32.b = tcg_const_i32(3);
828 case 0x8 | 0x4 | 0x1: /* cc != 2 */
830 c->u.s32.b = tcg_const_i32(2);
832 case 0x8 | 0x2 | 0x1: /* cc != 1 */
834 c->u.s32.b = tcg_const_i32(1);
836 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
839 c->u.s32.a = tcg_temp_new_i32();
840 c->u.s32.b = tcg_const_i32(0);
841 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
843 case 0x8 | 0x4: /* cc < 2 */
845 c->u.s32.b = tcg_const_i32(2);
847 case 0x8: /* cc == 0 */
849 c->u.s32.b = tcg_const_i32(0);
851 case 0x4 | 0x2 | 0x1: /* cc != 0 */
853 c->u.s32.b = tcg_const_i32(0);
855 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
858 c->u.s32.a = tcg_temp_new_i32();
859 c->u.s32.b = tcg_const_i32(0);
860 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
862 case 0x4: /* cc == 1 */
864 c->u.s32.b = tcg_const_i32(1);
866 case 0x2 | 0x1: /* cc > 1 */
868 c->u.s32.b = tcg_const_i32(1);
870 case 0x2: /* cc == 2 */
872 c->u.s32.b = tcg_const_i32(2);
874 case 0x1: /* cc == 3 */
876 c->u.s32.b = tcg_const_i32(3);
879 /* CC is masked by something else: (8 >> cc) & mask. */
882 c->u.s32.a = tcg_const_i32(8);
883 c->u.s32.b = tcg_const_i32(0);
884 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
885 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
896 static void free_compare(DisasCompare *c)
900 tcg_temp_free_i64(c->u.s64.a);
902 tcg_temp_free_i32(c->u.s32.a);
907 tcg_temp_free_i64(c->u.s64.b);
909 tcg_temp_free_i32(c->u.s32.b);
914 /* ====================================================================== */
915 /* Define the insn format enumeration. */
916 #define F0(N) FMT_##N,
917 #define F1(N, X1) F0(N)
918 #define F2(N, X1, X2) F0(N)
919 #define F3(N, X1, X2, X3) F0(N)
920 #define F4(N, X1, X2, X3, X4) F0(N)
921 #define F5(N, X1, X2, X3, X4, X5) F0(N)
924 #include "insn-format.def"
934 /* Define a structure to hold the decoded fields. We'll store each inside
935 an array indexed by an enum. In order to conserve memory, we'll arrange
936 for fields that do not exist at the same time to overlap, thus the "C"
937 for compact. For checking purposes there is an "O" for original index
938 as well that will be applied to availability bitmaps. */
940 enum DisasFieldIndexO {
963 enum DisasFieldIndexC {
997 unsigned presentC:16;
998 unsigned int presentO;
1002 /* This is the way fields are to be accessed out of DisasFields. */
1003 #define have_field(S, F) have_field1((S), FLD_O_##F)
1004 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1006 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1008 return (f->presentO >> c) & 1;
1011 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1012 enum DisasFieldIndexC c)
1014 assert(have_field1(f, o));
1018 /* Describe the layout of each field in each format. */
1019 typedef struct DisasField {
1021 unsigned int size:8;
1022 unsigned int type:2;
1023 unsigned int indexC:6;
1024 enum DisasFieldIndexO indexO:8;
1027 typedef struct DisasFormatInfo {
1028 DisasField op[NUM_C_FIELD];
1031 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1032 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1033 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1034 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1035 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1036 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1037 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1038 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1039 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1040 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1041 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1042 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1043 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1044 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1046 #define F0(N) { { } },
1047 #define F1(N, X1) { { X1 } },
1048 #define F2(N, X1, X2) { { X1, X2 } },
1049 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1050 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1051 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1053 static const DisasFormatInfo format_info[] = {
1054 #include "insn-format.def"
1072 /* Generally, we'll extract operands into this structures, operate upon
1073 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1074 of routines below for more details. */
1076 bool g_out, g_out2, g_in1, g_in2;
1077 TCGv_i64 out, out2, in1, in2;
1081 /* Instructions can place constraints on their operands, raising specification
1082 exceptions if they are violated. To make this easy to automate, each "in1",
1083 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1084 of the following, or 0. To make this easy to document, we'll put the
1085 SPEC_<name> defines next to <name>. */
1087 #define SPEC_r1_even 1
1088 #define SPEC_r2_even 2
1089 #define SPEC_r1_f128 4
1090 #define SPEC_r2_f128 8
1092 /* Return values from translate_one, indicating the state of the TB. */
1094 /* Continue the TB. */
1096 /* We have emitted one or more goto_tb. No fixup required. */
1098 /* We are not using a goto_tb (for whatever reason), but have updated
1099 the PC (for whatever reason), so there's no need to do it again on
1102 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1103 updated the PC for the next instruction to be executed. */
1105 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1106 No following code will be executed. */
1110 typedef enum DisasFacility {
1111 FAC_Z, /* zarch (default) */
1112 FAC_CASS, /* compare and swap and store */
1113 FAC_CASS2, /* compare and swap and store 2*/
1114 FAC_DFP, /* decimal floating point */
1115 FAC_DFPR, /* decimal floating point rounding */
1116 FAC_DO, /* distinct operands */
1117 FAC_EE, /* execute extensions */
1118 FAC_EI, /* extended immediate */
1119 FAC_FPE, /* floating point extension */
1120 FAC_FPSSH, /* floating point support sign handling */
1121 FAC_FPRGR, /* FPR-GR transfer */
1122 FAC_GIE, /* general instructions extension */
1123 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1124 FAC_HW, /* high-word */
1125 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1126 FAC_LOC, /* load/store on condition */
1127 FAC_LD, /* long displacement */
1128 FAC_PC, /* population count */
1129 FAC_SCF, /* store clock fast */
1130 FAC_SFLE, /* store facility list extended */
1136 DisasFacility fac:6;
1141 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1142 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1143 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1144 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1145 void (*help_cout)(DisasContext *, DisasOps *);
1146 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1151 /* ====================================================================== */
1152 /* Miscelaneous helpers, used by several operations. */
1154 static void help_l2_shift(DisasContext *s, DisasFields *f,
1155 DisasOps *o, int mask)
1157 int b2 = get_field(f, b2);
1158 int d2 = get_field(f, d2);
1161 o->in2 = tcg_const_i64(d2 & mask);
1163 o->in2 = get_address(s, 0, b2, d2);
1164 tcg_gen_andi_i64(o->in2, o->in2, mask);
1168 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1170 if (dest == s->next_pc) {
1173 if (use_goto_tb(s, dest)) {
1176 tcg_gen_movi_i64(psw_addr, dest);
1177 tcg_gen_exit_tb((tcg_target_long)s->tb);
1178 return EXIT_GOTO_TB;
1180 tcg_gen_movi_i64(psw_addr, dest);
1181 return EXIT_PC_UPDATED;
1185 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1186 bool is_imm, int imm, TCGv_i64 cdest)
1189 uint64_t dest = s->pc + 2 * imm;
1192 /* Take care of the special cases first. */
1193 if (c->cond == TCG_COND_NEVER) {
1198 if (dest == s->next_pc) {
1199 /* Branch to next. */
1203 if (c->cond == TCG_COND_ALWAYS) {
1204 ret = help_goto_direct(s, dest);
1208 if (TCGV_IS_UNUSED_I64(cdest)) {
1209 /* E.g. bcr %r0 -> no branch. */
1213 if (c->cond == TCG_COND_ALWAYS) {
1214 tcg_gen_mov_i64(psw_addr, cdest);
1215 ret = EXIT_PC_UPDATED;
1220 if (use_goto_tb(s, s->next_pc)) {
1221 if (is_imm && use_goto_tb(s, dest)) {
1222 /* Both exits can use goto_tb. */
1225 lab = gen_new_label();
1227 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1229 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1232 /* Branch not taken. */
1234 tcg_gen_movi_i64(psw_addr, s->next_pc);
1235 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1240 tcg_gen_movi_i64(psw_addr, dest);
1241 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1245 /* Fallthru can use goto_tb, but taken branch cannot. */
1246 /* Store taken branch destination before the brcond. This
1247 avoids having to allocate a new local temp to hold it.
1248 We'll overwrite this in the not taken case anyway. */
1250 tcg_gen_mov_i64(psw_addr, cdest);
1253 lab = gen_new_label();
1255 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1257 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1260 /* Branch not taken. */
1263 tcg_gen_movi_i64(psw_addr, s->next_pc);
1264 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1268 tcg_gen_movi_i64(psw_addr, dest);
1270 ret = EXIT_PC_UPDATED;
1273 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1274 Most commonly we're single-stepping or some other condition that
1275 disables all use of goto_tb. Just update the PC and exit. */
1277 TCGv_i64 next = tcg_const_i64(s->next_pc);
1279 cdest = tcg_const_i64(dest);
1283 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1286 TCGv_i32 t0 = tcg_temp_new_i32();
1287 TCGv_i64 t1 = tcg_temp_new_i64();
1288 TCGv_i64 z = tcg_const_i64(0);
1289 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1290 tcg_gen_extu_i32_i64(t1, t0);
1291 tcg_temp_free_i32(t0);
1292 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1293 tcg_temp_free_i64(t1);
1294 tcg_temp_free_i64(z);
1298 tcg_temp_free_i64(cdest);
1300 tcg_temp_free_i64(next);
1302 ret = EXIT_PC_UPDATED;
1310 /* ====================================================================== */
1311 /* The operations. These perform the bulk of the work for any insn,
1312 usually after the operands have been loaded and output initialized. */
1314 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1316 gen_helper_abs_i64(o->out, o->in2);
1320 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1322 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1326 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1328 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1332 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1334 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1335 tcg_gen_mov_i64(o->out2, o->in2);
1339 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1341 tcg_gen_add_i64(o->out, o->in1, o->in2);
1345 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1349 tcg_gen_add_i64(o->out, o->in1, o->in2);
1351 /* XXX possible optimization point */
1353 cc = tcg_temp_new_i64();
1354 tcg_gen_extu_i32_i64(cc, cc_op);
1355 tcg_gen_shri_i64(cc, cc, 1);
1357 tcg_gen_add_i64(o->out, o->out, cc);
1358 tcg_temp_free_i64(cc);
1362 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1364 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1368 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1370 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1374 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1376 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1377 return_low128(o->out2);
1381 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1383 tcg_gen_and_i64(o->out, o->in1, o->in2);
1387 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1389 int shift = s->insn->data & 0xff;
1390 int size = s->insn->data >> 8;
1391 uint64_t mask = ((1ull << size) - 1) << shift;
1394 tcg_gen_shli_i64(o->in2, o->in2, shift);
1395 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1396 tcg_gen_and_i64(o->out, o->in1, o->in2);
1398 /* Produce the CC from only the bits manipulated. */
1399 tcg_gen_andi_i64(cc_dst, o->out, mask);
1400 set_cc_nz_u64(s, cc_dst);
1404 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1406 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1407 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1408 tcg_gen_mov_i64(psw_addr, o->in2);
1409 return EXIT_PC_UPDATED;
1415 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1417 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1418 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1421 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1423 int m1 = get_field(s->fields, m1);
1424 bool is_imm = have_field(s->fields, i2);
1425 int imm = is_imm ? get_field(s->fields, i2) : 0;
1428 disas_jcc(s, &c, m1);
1429 return help_branch(s, &c, is_imm, imm, o->in2);
1432 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1434 int r1 = get_field(s->fields, r1);
1435 bool is_imm = have_field(s->fields, i2);
1436 int imm = is_imm ? get_field(s->fields, i2) : 0;
1440 c.cond = TCG_COND_NE;
1445 t = tcg_temp_new_i64();
1446 tcg_gen_subi_i64(t, regs[r1], 1);
1447 store_reg32_i64(r1, t);
1448 c.u.s32.a = tcg_temp_new_i32();
1449 c.u.s32.b = tcg_const_i32(0);
1450 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1451 tcg_temp_free_i64(t);
1453 return help_branch(s, &c, is_imm, imm, o->in2);
1456 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1458 int r1 = get_field(s->fields, r1);
1459 bool is_imm = have_field(s->fields, i2);
1460 int imm = is_imm ? get_field(s->fields, i2) : 0;
1463 c.cond = TCG_COND_NE;
1468 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1469 c.u.s64.a = regs[r1];
1470 c.u.s64.b = tcg_const_i64(0);
1472 return help_branch(s, &c, is_imm, imm, o->in2);
1475 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1477 int r1 = get_field(s->fields, r1);
1478 int r3 = get_field(s->fields, r3);
1479 bool is_imm = have_field(s->fields, i2);
1480 int imm = is_imm ? get_field(s->fields, i2) : 0;
1484 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1489 t = tcg_temp_new_i64();
1490 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1491 c.u.s32.a = tcg_temp_new_i32();
1492 c.u.s32.b = tcg_temp_new_i32();
1493 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1494 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1495 store_reg32_i64(r1, t);
1496 tcg_temp_free_i64(t);
1498 return help_branch(s, &c, is_imm, imm, o->in2);
1501 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1503 int r1 = get_field(s->fields, r1);
1504 int r3 = get_field(s->fields, r3);
1505 bool is_imm = have_field(s->fields, i2);
1506 int imm = is_imm ? get_field(s->fields, i2) : 0;
1509 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1512 if (r1 == (r3 | 1)) {
1513 c.u.s64.b = load_reg(r3 | 1);
1516 c.u.s64.b = regs[r3 | 1];
1520 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1521 c.u.s64.a = regs[r1];
1524 return help_branch(s, &c, is_imm, imm, o->in2);
1527 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1529 int imm, m3 = get_field(s->fields, m3);
1533 c.cond = ltgt_cond[m3];
1534 if (s->insn->data) {
1535 c.cond = tcg_unsigned_cond(c.cond);
1537 c.is_64 = c.g1 = c.g2 = true;
1541 is_imm = have_field(s->fields, i4);
1543 imm = get_field(s->fields, i4);
1546 o->out = get_address(s, 0, get_field(s->fields, b4),
1547 get_field(s->fields, d4));
1550 return help_branch(s, &c, is_imm, imm, o->out);
1553 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1555 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1560 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1562 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1567 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1569 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1574 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1576 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1577 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1578 tcg_temp_free_i32(m3);
1579 gen_set_cc_nz_f32(s, o->in2);
1583 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1585 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1586 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1587 tcg_temp_free_i32(m3);
1588 gen_set_cc_nz_f64(s, o->in2);
1592 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1594 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1595 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1596 tcg_temp_free_i32(m3);
1597 gen_set_cc_nz_f128(s, o->in1, o->in2);
1601 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1603 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1604 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1605 tcg_temp_free_i32(m3);
1606 gen_set_cc_nz_f32(s, o->in2);
1610 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1612 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1613 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1614 tcg_temp_free_i32(m3);
1615 gen_set_cc_nz_f64(s, o->in2);
1619 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1621 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1622 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1623 tcg_temp_free_i32(m3);
1624 gen_set_cc_nz_f128(s, o->in1, o->in2);
1628 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1630 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1631 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1632 tcg_temp_free_i32(m3);
1633 gen_set_cc_nz_f32(s, o->in2);
1637 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1639 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1640 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1641 tcg_temp_free_i32(m3);
1642 gen_set_cc_nz_f64(s, o->in2);
1646 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1648 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1649 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1650 tcg_temp_free_i32(m3);
1651 gen_set_cc_nz_f128(s, o->in1, o->in2);
1655 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1657 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1658 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1659 tcg_temp_free_i32(m3);
1660 gen_set_cc_nz_f32(s, o->in2);
1664 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1666 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1667 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1668 tcg_temp_free_i32(m3);
1669 gen_set_cc_nz_f64(s, o->in2);
1673 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1675 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1676 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1677 tcg_temp_free_i32(m3);
1678 gen_set_cc_nz_f128(s, o->in1, o->in2);
1682 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1684 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1685 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1686 tcg_temp_free_i32(m3);
1690 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1698 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1700 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1701 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1702 tcg_temp_free_i32(m3);
1703 return_low128(o->out2);
1707 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1709 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1710 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1711 tcg_temp_free_i32(m3);
1715 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1717 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1718 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1719 tcg_temp_free_i32(m3);
1723 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1725 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1726 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1727 tcg_temp_free_i32(m3);
1728 return_low128(o->out2);
1732 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1734 int r2 = get_field(s->fields, r2);
1735 TCGv_i64 len = tcg_temp_new_i64();
1737 potential_page_fault(s);
1738 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1740 return_low128(o->out);
1742 tcg_gen_add_i64(regs[r2], regs[r2], len);
1743 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1744 tcg_temp_free_i64(len);
1749 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1751 int l = get_field(s->fields, l1);
1756 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1757 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1760 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1761 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1764 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1765 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1768 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1769 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1772 potential_page_fault(s);
1773 vl = tcg_const_i32(l);
1774 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1775 tcg_temp_free_i32(vl);
1779 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1783 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1785 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1786 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1787 potential_page_fault(s);
1788 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1789 tcg_temp_free_i32(r1);
1790 tcg_temp_free_i32(r3);
1795 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1797 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1798 TCGv_i32 t1 = tcg_temp_new_i32();
1799 tcg_gen_trunc_i64_i32(t1, o->in1);
1800 potential_page_fault(s);
1801 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1803 tcg_temp_free_i32(t1);
1804 tcg_temp_free_i32(m3);
1808 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1810 potential_page_fault(s);
1811 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1813 return_low128(o->in2);
1817 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1819 TCGv_i64 t = tcg_temp_new_i64();
1820 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1821 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1822 tcg_gen_or_i64(o->out, o->out, t);
1823 tcg_temp_free_i64(t);
1827 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1829 int r3 = get_field(s->fields, r3);
1830 potential_page_fault(s);
1831 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1836 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1838 int r3 = get_field(s->fields, r3);
1839 potential_page_fault(s);
1840 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1845 #ifndef CONFIG_USER_ONLY
1846 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1848 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1849 check_privileged(s);
1850 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1851 tcg_temp_free_i32(r1);
1857 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1859 int r3 = get_field(s->fields, r3);
1860 TCGv_i64 in3 = tcg_temp_new_i64();
1861 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1862 potential_page_fault(s);
1863 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1864 tcg_temp_free_i64(in3);
1869 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1871 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1872 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1873 potential_page_fault(s);
1874 /* XXX rewrite in tcg */
1875 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1880 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1882 TCGv_i64 t1 = tcg_temp_new_i64();
1883 TCGv_i32 t2 = tcg_temp_new_i32();
1884 tcg_gen_trunc_i64_i32(t2, o->in1);
1885 gen_helper_cvd(t1, t2);
1886 tcg_temp_free_i32(t2);
1887 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1888 tcg_temp_free_i64(t1);
1892 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1894 int m3 = get_field(s->fields, m3);
1895 int lab = gen_new_label();
1899 c = tcg_invert_cond(ltgt_cond[m3]);
1900 if (s->insn->data) {
1901 c = tcg_unsigned_cond(c);
1903 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1905 /* Set DXC to 0xff. */
1906 t = tcg_temp_new_i32();
1907 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1908 tcg_gen_ori_i32(t, t, 0xff00);
1909 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1910 tcg_temp_free_i32(t);
1913 gen_program_exception(s, PGM_DATA);
1919 #ifndef CONFIG_USER_ONLY
1920 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1924 check_privileged(s);
1925 potential_page_fault(s);
1927 /* We pretend the format is RX_a so that D2 is the field we want. */
1928 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1929 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1930 tcg_temp_free_i32(tmp);
1935 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1937 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1938 return_low128(o->out);
1942 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1944 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1945 return_low128(o->out);
1949 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1951 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1952 return_low128(o->out);
1956 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1958 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1959 return_low128(o->out);
1963 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1965 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1969 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1971 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1975 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1977 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1978 return_low128(o->out2);
1982 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1984 int r2 = get_field(s->fields, r2);
1985 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1989 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1991 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1995 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1997 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1998 tb->flags, (ab)use the tb->cs_base field as the address of
1999 the template in memory, and grab 8 bits of tb->flags/cflags for
2000 the contents of the register. We would then recognize all this
2001 in gen_intermediate_code_internal, generating code for exactly
2002 one instruction. This new TB then gets executed normally.
2004 On the other hand, this seems to be mostly used for modifying
2005 MVC inside of memcpy, which needs a helper call anyway. So
2006 perhaps this doesn't bear thinking about any further. */
2013 tmp = tcg_const_i64(s->next_pc);
2014 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2015 tcg_temp_free_i64(tmp);
2021 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2023 /* We'll use the original input for cc computation, since we get to
2024 compare that against 0, which ought to be better than comparing
2025 the real output against 64. It also lets cc_dst be a convenient
2026 temporary during our computation. */
2027 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2029 /* R1 = IN ? CLZ(IN) : 64. */
2030 gen_helper_clz(o->out, o->in2);
2032 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2033 value by 64, which is undefined. But since the shift is 64 iff the
2034 input is zero, we still get the correct result after and'ing. */
2035 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2036 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2037 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2041 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2043 int m3 = get_field(s->fields, m3);
2044 int pos, len, base = s->insn->data;
2045 TCGv_i64 tmp = tcg_temp_new_i64();
2050 /* Effectively a 32-bit load. */
2051 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2058 /* Effectively a 16-bit load. */
2059 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2067 /* Effectively an 8-bit load. */
2068 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2073 pos = base + ctz32(m3) * 8;
2074 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2075 ccm = ((1ull << len) - 1) << pos;
2079 /* This is going to be a sequence of loads and inserts. */
2080 pos = base + 32 - 8;
2084 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2085 tcg_gen_addi_i64(o->in2, o->in2, 1);
2086 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2089 m3 = (m3 << 1) & 0xf;
2095 tcg_gen_movi_i64(tmp, ccm);
2096 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2097 tcg_temp_free_i64(tmp);
2101 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2103 int shift = s->insn->data & 0xff;
2104 int size = s->insn->data >> 8;
2105 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2109 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2114 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2116 t1 = tcg_temp_new_i64();
2117 tcg_gen_shli_i64(t1, psw_mask, 20);
2118 tcg_gen_shri_i64(t1, t1, 36);
2119 tcg_gen_or_i64(o->out, o->out, t1);
2121 tcg_gen_extu_i32_i64(t1, cc_op);
2122 tcg_gen_shli_i64(t1, t1, 28);
2123 tcg_gen_or_i64(o->out, o->out, t1);
2124 tcg_temp_free_i64(t1);
2128 #ifndef CONFIG_USER_ONLY
2129 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2131 check_privileged(s);
2132 gen_helper_ipte(cpu_env, o->in1, o->in2);
2136 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2138 check_privileged(s);
2139 gen_helper_iske(o->out, cpu_env, o->in2);
2144 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2146 gen_helper_ldeb(o->out, cpu_env, o->in2);
2150 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2152 gen_helper_ledb(o->out, cpu_env, o->in2);
2156 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2158 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2162 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2164 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2168 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2170 gen_helper_lxdb(o->out, cpu_env, o->in2);
2171 return_low128(o->out2);
2175 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2177 gen_helper_lxeb(o->out, cpu_env, o->in2);
2178 return_low128(o->out2);
2182 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2184 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2188 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2190 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2194 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2196 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2200 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2202 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2206 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2208 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2212 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2214 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2218 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2220 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2224 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2226 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2230 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2234 disas_jcc(s, &c, get_field(s->fields, m3));
2237 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2241 TCGv_i32 t32 = tcg_temp_new_i32();
2244 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2247 t = tcg_temp_new_i64();
2248 tcg_gen_extu_i32_i64(t, t32);
2249 tcg_temp_free_i32(t32);
2251 z = tcg_const_i64(0);
2252 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2253 tcg_temp_free_i64(t);
2254 tcg_temp_free_i64(z);
2260 #ifndef CONFIG_USER_ONLY
2261 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2263 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2264 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2265 check_privileged(s);
2266 potential_page_fault(s);
2267 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2268 tcg_temp_free_i32(r1);
2269 tcg_temp_free_i32(r3);
2273 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2275 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2276 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2277 check_privileged(s);
2278 potential_page_fault(s);
2279 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2280 tcg_temp_free_i32(r1);
2281 tcg_temp_free_i32(r3);
2284 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2286 check_privileged(s);
2287 potential_page_fault(s);
2288 gen_helper_lra(o->out, cpu_env, o->in2);
2293 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2297 check_privileged(s);
2299 t1 = tcg_temp_new_i64();
2300 t2 = tcg_temp_new_i64();
2301 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2302 tcg_gen_addi_i64(o->in2, o->in2, 4);
2303 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2304 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2305 tcg_gen_shli_i64(t1, t1, 32);
2306 gen_helper_load_psw(cpu_env, t1, t2);
2307 tcg_temp_free_i64(t1);
2308 tcg_temp_free_i64(t2);
2309 return EXIT_NORETURN;
2312 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2316 check_privileged(s);
2318 t1 = tcg_temp_new_i64();
2319 t2 = tcg_temp_new_i64();
2320 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2321 tcg_gen_addi_i64(o->in2, o->in2, 8);
2322 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2323 gen_helper_load_psw(cpu_env, t1, t2);
2324 tcg_temp_free_i64(t1);
2325 tcg_temp_free_i64(t2);
2326 return EXIT_NORETURN;
2330 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2332 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2333 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2334 potential_page_fault(s);
2335 gen_helper_lam(cpu_env, r1, o->in2, r3);
2336 tcg_temp_free_i32(r1);
2337 tcg_temp_free_i32(r3);
2341 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2343 int r1 = get_field(s->fields, r1);
2344 int r3 = get_field(s->fields, r3);
2345 TCGv_i64 t = tcg_temp_new_i64();
2346 TCGv_i64 t4 = tcg_const_i64(4);
2349 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2350 store_reg32_i64(r1, t);
2354 tcg_gen_add_i64(o->in2, o->in2, t4);
2358 tcg_temp_free_i64(t);
2359 tcg_temp_free_i64(t4);
2363 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2365 int r1 = get_field(s->fields, r1);
2366 int r3 = get_field(s->fields, r3);
2367 TCGv_i64 t = tcg_temp_new_i64();
2368 TCGv_i64 t4 = tcg_const_i64(4);
2371 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2372 store_reg32h_i64(r1, t);
2376 tcg_gen_add_i64(o->in2, o->in2, t4);
2380 tcg_temp_free_i64(t);
2381 tcg_temp_free_i64(t4);
2385 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2387 int r1 = get_field(s->fields, r1);
2388 int r3 = get_field(s->fields, r3);
2389 TCGv_i64 t8 = tcg_const_i64(8);
2392 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2396 tcg_gen_add_i64(o->in2, o->in2, t8);
2400 tcg_temp_free_i64(t8);
2404 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2407 o->g_out = o->g_in2;
2408 TCGV_UNUSED_I64(o->in2);
2413 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2417 o->g_out = o->g_in1;
2418 o->g_out2 = o->g_in2;
2419 TCGV_UNUSED_I64(o->in1);
2420 TCGV_UNUSED_I64(o->in2);
2421 o->g_in1 = o->g_in2 = false;
2425 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2427 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2428 potential_page_fault(s);
2429 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2430 tcg_temp_free_i32(l);
2434 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2436 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2437 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2438 potential_page_fault(s);
2439 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2440 tcg_temp_free_i32(r1);
2441 tcg_temp_free_i32(r2);
2446 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2448 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2449 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2450 potential_page_fault(s);
2451 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2452 tcg_temp_free_i32(r1);
2453 tcg_temp_free_i32(r3);
2458 #ifndef CONFIG_USER_ONLY
2459 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2461 int r1 = get_field(s->fields, l1);
2462 check_privileged(s);
2463 potential_page_fault(s);
2464 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2469 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2471 int r1 = get_field(s->fields, l1);
2472 check_privileged(s);
2473 potential_page_fault(s);
2474 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2480 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2482 potential_page_fault(s);
2483 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2488 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2490 potential_page_fault(s);
2491 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2493 return_low128(o->in2);
2497 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2499 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2503 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2505 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2506 return_low128(o->out2);
2510 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2512 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2516 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2518 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2522 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2524 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2528 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2530 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2531 return_low128(o->out2);
2535 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2537 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2538 return_low128(o->out2);
2542 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2544 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2545 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2546 tcg_temp_free_i64(r3);
2550 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2552 int r3 = get_field(s->fields, r3);
2553 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2557 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2559 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2560 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2561 tcg_temp_free_i64(r3);
2565 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2567 int r3 = get_field(s->fields, r3);
2568 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2572 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2574 gen_helper_nabs_i64(o->out, o->in2);
2578 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2580 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2584 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2586 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2590 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2592 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2593 tcg_gen_mov_i64(o->out2, o->in2);
2597 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2599 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2600 potential_page_fault(s);
2601 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2602 tcg_temp_free_i32(l);
2607 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2609 tcg_gen_neg_i64(o->out, o->in2);
2613 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2615 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2619 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2621 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2625 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2627 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2628 tcg_gen_mov_i64(o->out2, o->in2);
2632 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2634 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2635 potential_page_fault(s);
2636 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2637 tcg_temp_free_i32(l);
2642 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2644 tcg_gen_or_i64(o->out, o->in1, o->in2);
2648 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2650 int shift = s->insn->data & 0xff;
2651 int size = s->insn->data >> 8;
2652 uint64_t mask = ((1ull << size) - 1) << shift;
2655 tcg_gen_shli_i64(o->in2, o->in2, shift);
2656 tcg_gen_or_i64(o->out, o->in1, o->in2);
2658 /* Produce the CC from only the bits manipulated. */
2659 tcg_gen_andi_i64(cc_dst, o->out, mask);
2660 set_cc_nz_u64(s, cc_dst);
2664 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2666 gen_helper_popcnt(o->out, o->in2);
2670 #ifndef CONFIG_USER_ONLY
2671 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2673 check_privileged(s);
2674 gen_helper_ptlb(cpu_env);
2679 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2681 int i3 = get_field(s->fields, i3);
2682 int i4 = get_field(s->fields, i4);
2683 int i5 = get_field(s->fields, i5);
2684 int do_zero = i4 & 0x80;
2685 uint64_t mask, imask, pmask;
2688 /* Adjust the arguments for the specific insn. */
2689 switch (s->fields->op2) {
2690 case 0x55: /* risbg */
2695 case 0x5d: /* risbhg */
2698 pmask = 0xffffffff00000000ull;
2700 case 0x51: /* risblg */
2703 pmask = 0x00000000ffffffffull;
2709 /* MASK is the set of bits to be inserted from R2.
2710 Take care for I3/I4 wraparound. */
2713 mask ^= pmask >> i4 >> 1;
2715 mask |= ~(pmask >> i4 >> 1);
2719 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2720 insns, we need to keep the other half of the register. */
2721 imask = ~mask | ~pmask;
2723 if (s->fields->op2 == 0x55) {
2730 /* In some cases we can implement this with deposit, which can be more
2731 efficient on some hosts. */
2732 if (~mask == imask && i3 <= i4) {
2733 if (s->fields->op2 == 0x5d) {
2736 /* Note that we rotate the bits to be inserted to the lsb, not to
2737 the position as described in the PoO. */
2740 rot = (i5 - pos) & 63;
2746 /* Rotate the input as necessary. */
2747 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2749 /* Insert the selected bits into the output. */
2751 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2752 } else if (imask == 0) {
2753 tcg_gen_andi_i64(o->out, o->in2, mask);
2755 tcg_gen_andi_i64(o->in2, o->in2, mask);
2756 tcg_gen_andi_i64(o->out, o->out, imask);
2757 tcg_gen_or_i64(o->out, o->out, o->in2);
2762 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2764 int i3 = get_field(s->fields, i3);
2765 int i4 = get_field(s->fields, i4);
2766 int i5 = get_field(s->fields, i5);
2769 /* If this is a test-only form, arrange to discard the result. */
2771 o->out = tcg_temp_new_i64();
2779 /* MASK is the set of bits to be operated on from R2.
2780 Take care for I3/I4 wraparound. */
2783 mask ^= ~0ull >> i4 >> 1;
2785 mask |= ~(~0ull >> i4 >> 1);
2788 /* Rotate the input as necessary. */
2789 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2792 switch (s->fields->op2) {
2793 case 0x55: /* AND */
2794 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2795 tcg_gen_and_i64(o->out, o->out, o->in2);
2798 tcg_gen_andi_i64(o->in2, o->in2, mask);
2799 tcg_gen_or_i64(o->out, o->out, o->in2);
2801 case 0x57: /* XOR */
2802 tcg_gen_andi_i64(o->in2, o->in2, mask);
2803 tcg_gen_xor_i64(o->out, o->out, o->in2);
2810 tcg_gen_andi_i64(cc_dst, o->out, mask);
2811 set_cc_nz_u64(s, cc_dst);
2815 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2817 tcg_gen_bswap16_i64(o->out, o->in2);
2821 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2823 tcg_gen_bswap32_i64(o->out, o->in2);
2827 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2829 tcg_gen_bswap64_i64(o->out, o->in2);
2833 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2835 TCGv_i32 t1 = tcg_temp_new_i32();
2836 TCGv_i32 t2 = tcg_temp_new_i32();
2837 TCGv_i32 to = tcg_temp_new_i32();
2838 tcg_gen_trunc_i64_i32(t1, o->in1);
2839 tcg_gen_trunc_i64_i32(t2, o->in2);
2840 tcg_gen_rotl_i32(to, t1, t2);
2841 tcg_gen_extu_i32_i64(o->out, to);
2842 tcg_temp_free_i32(t1);
2843 tcg_temp_free_i32(t2);
2844 tcg_temp_free_i32(to);
2848 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2850 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2854 #ifndef CONFIG_USER_ONLY
2855 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2857 check_privileged(s);
2858 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2863 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2865 check_privileged(s);
2866 gen_helper_sacf(cpu_env, o->in2);
2867 /* Addressing mode has changed, so end the block. */
2868 return EXIT_PC_STALE;
2872 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2874 int r1 = get_field(s->fields, r1);
2875 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2879 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2881 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2885 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2887 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2891 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2893 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2894 return_low128(o->out2);
2898 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2900 gen_helper_sqeb(o->out, cpu_env, o->in2);
2904 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2906 gen_helper_sqdb(o->out, cpu_env, o->in2);
2910 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2912 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2913 return_low128(o->out2);
2917 #ifndef CONFIG_USER_ONLY
2918 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2920 check_privileged(s);
2921 potential_page_fault(s);
2922 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2927 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2929 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2930 check_privileged(s);
2931 potential_page_fault(s);
2932 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2933 tcg_temp_free_i32(r1);
2938 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2944 disas_jcc(s, &c, get_field(s->fields, m3));
2946 lab = gen_new_label();
2948 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
2950 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
2954 r1 = get_field(s->fields, r1);
2955 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2956 if (s->insn->data) {
2957 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
2959 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
2961 tcg_temp_free_i64(a);
2967 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2969 uint64_t sign = 1ull << s->insn->data;
2970 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2971 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2972 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2973 /* The arithmetic left shift is curious in that it does not affect
2974 the sign bit. Copy that over from the source unchanged. */
2975 tcg_gen_andi_i64(o->out, o->out, ~sign);
2976 tcg_gen_andi_i64(o->in1, o->in1, sign);
2977 tcg_gen_or_i64(o->out, o->out, o->in1);
2981 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2983 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2987 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2989 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2993 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2995 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2999 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3001 gen_helper_sfpc(cpu_env, o->in2);
3005 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3007 gen_helper_sfas(cpu_env, o->in2);
3011 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3013 int b2 = get_field(s->fields, b2);
3014 int d2 = get_field(s->fields, d2);
3015 TCGv_i64 t1 = tcg_temp_new_i64();
3016 TCGv_i64 t2 = tcg_temp_new_i64();
3019 switch (s->fields->op2) {
3020 case 0x99: /* SRNM */
3023 case 0xb8: /* SRNMB */
3026 case 0xb9: /* SRNMT */
3031 mask = (1 << len) - 1;
3033 /* Insert the value into the appropriate field of the FPC. */
3035 tcg_gen_movi_i64(t1, d2 & mask);
3037 tcg_gen_addi_i64(t1, regs[b2], d2);
3038 tcg_gen_andi_i64(t1, t1, mask);
3040 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3041 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3042 tcg_temp_free_i64(t1);
3044 /* Then install the new FPC to set the rounding mode in fpu_status. */
3045 gen_helper_sfpc(cpu_env, t2);
3046 tcg_temp_free_i64(t2);
3050 #ifndef CONFIG_USER_ONLY
3051 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3053 check_privileged(s);
3054 tcg_gen_shri_i64(o->in2, o->in2, 4);
3055 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3059 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3061 check_privileged(s);
3062 gen_helper_sske(cpu_env, o->in1, o->in2);
3066 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3068 check_privileged(s);
3069 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3073 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3075 check_privileged(s);
3076 /* ??? Surely cpu address != cpu number. In any case the previous
3077 version of this stored more than the required half-word, so it
3078 is unlikely this has ever been tested. */
3079 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3083 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3085 gen_helper_stck(o->out, cpu_env);
3086 /* ??? We don't implement clock states. */
3087 gen_op_movi_cc(s, 0);
3091 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3093 TCGv_i64 c1 = tcg_temp_new_i64();
3094 TCGv_i64 c2 = tcg_temp_new_i64();
3095 gen_helper_stck(c1, cpu_env);
3096 /* Shift the 64-bit value into its place as a zero-extended
3097 104-bit value. Note that "bit positions 64-103 are always
3098 non-zero so that they compare differently to STCK"; we set
3099 the least significant bit to 1. */
3100 tcg_gen_shli_i64(c2, c1, 56);
3101 tcg_gen_shri_i64(c1, c1, 8);
3102 tcg_gen_ori_i64(c2, c2, 0x10000);
3103 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3104 tcg_gen_addi_i64(o->in2, o->in2, 8);
3105 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3106 tcg_temp_free_i64(c1);
3107 tcg_temp_free_i64(c2);
3108 /* ??? We don't implement clock states. */
3109 gen_op_movi_cc(s, 0);
3113 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3115 check_privileged(s);
3116 gen_helper_sckc(cpu_env, o->in2);
3120 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3122 check_privileged(s);
3123 gen_helper_stckc(o->out, cpu_env);
3127 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3129 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3130 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3131 check_privileged(s);
3132 potential_page_fault(s);
3133 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3134 tcg_temp_free_i32(r1);
3135 tcg_temp_free_i32(r3);
3139 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3141 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3142 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3143 check_privileged(s);
3144 potential_page_fault(s);
3145 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3146 tcg_temp_free_i32(r1);
3147 tcg_temp_free_i32(r3);
3151 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3153 check_privileged(s);
3154 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3158 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3160 check_privileged(s);
3161 gen_helper_spt(cpu_env, o->in2);
3165 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3168 /* We really ought to have more complete indication of facilities
3169 that we implement. Address this when STFLE is implemented. */
3170 check_privileged(s);
3171 f = tcg_const_i64(0xc0000000);
3172 a = tcg_const_i64(200);
3173 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3174 tcg_temp_free_i64(f);
3175 tcg_temp_free_i64(a);
3179 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3181 check_privileged(s);
3182 gen_helper_stpt(o->out, cpu_env);
3186 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3188 check_privileged(s);
3189 potential_page_fault(s);
3190 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3195 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3197 check_privileged(s);
3198 gen_helper_spx(cpu_env, o->in2);
3202 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3204 check_privileged(s);
3205 /* Not operational. */
3206 gen_op_movi_cc(s, 3);
3210 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3212 check_privileged(s);
3213 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3214 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3218 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3220 uint64_t i2 = get_field(s->fields, i2);
3223 check_privileged(s);
3225 /* It is important to do what the instruction name says: STORE THEN.
3226 If we let the output hook perform the store then if we fault and
3227 restart, we'll have the wrong SYSTEM MASK in place. */
3228 t = tcg_temp_new_i64();
3229 tcg_gen_shri_i64(t, psw_mask, 56);
3230 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3231 tcg_temp_free_i64(t);
3233 if (s->fields->op == 0xac) {
3234 tcg_gen_andi_i64(psw_mask, psw_mask,
3235 (i2 << 56) | 0x00ffffffffffffffull);
3237 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3242 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3244 check_privileged(s);
3245 potential_page_fault(s);
3246 gen_helper_stura(cpu_env, o->in2, o->in1);
3251 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3253 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3257 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3259 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3263 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3265 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3269 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3271 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3275 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3277 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3278 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3279 potential_page_fault(s);
3280 gen_helper_stam(cpu_env, r1, o->in2, r3);
3281 tcg_temp_free_i32(r1);
3282 tcg_temp_free_i32(r3);
3286 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3288 int m3 = get_field(s->fields, m3);
3289 int pos, base = s->insn->data;
3290 TCGv_i64 tmp = tcg_temp_new_i64();
3292 pos = base + ctz32(m3) * 8;
3295 /* Effectively a 32-bit store. */
3296 tcg_gen_shri_i64(tmp, o->in1, pos);
3297 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3303 /* Effectively a 16-bit store. */
3304 tcg_gen_shri_i64(tmp, o->in1, pos);
3305 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3312 /* Effectively an 8-bit store. */
3313 tcg_gen_shri_i64(tmp, o->in1, pos);
3314 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3318 /* This is going to be a sequence of shifts and stores. */
3319 pos = base + 32 - 8;
3322 tcg_gen_shri_i64(tmp, o->in1, pos);
3323 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3324 tcg_gen_addi_i64(o->in2, o->in2, 1);
3326 m3 = (m3 << 1) & 0xf;
3331 tcg_temp_free_i64(tmp);
3335 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3337 int r1 = get_field(s->fields, r1);
3338 int r3 = get_field(s->fields, r3);
3339 int size = s->insn->data;
3340 TCGv_i64 tsize = tcg_const_i64(size);
3344 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3346 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3351 tcg_gen_add_i64(o->in2, o->in2, tsize);
3355 tcg_temp_free_i64(tsize);
3359 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3361 int r1 = get_field(s->fields, r1);
3362 int r3 = get_field(s->fields, r3);
3363 TCGv_i64 t = tcg_temp_new_i64();
3364 TCGv_i64 t4 = tcg_const_i64(4);
3365 TCGv_i64 t32 = tcg_const_i64(32);
3368 tcg_gen_shl_i64(t, regs[r1], t32);
3369 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3373 tcg_gen_add_i64(o->in2, o->in2, t4);
3377 tcg_temp_free_i64(t);
3378 tcg_temp_free_i64(t4);
3379 tcg_temp_free_i64(t32);
3383 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3385 potential_page_fault(s);
3386 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3388 return_low128(o->in2);
3392 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3394 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3398 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3403 tcg_gen_not_i64(o->in2, o->in2);
3404 tcg_gen_add_i64(o->out, o->in1, o->in2);
3406 /* XXX possible optimization point */
3408 cc = tcg_temp_new_i64();
3409 tcg_gen_extu_i32_i64(cc, cc_op);
3410 tcg_gen_shri_i64(cc, cc, 1);
3411 tcg_gen_add_i64(o->out, o->out, cc);
3412 tcg_temp_free_i64(cc);
3416 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3423 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3424 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3425 tcg_temp_free_i32(t);
3427 t = tcg_const_i32(s->next_pc - s->pc);
3428 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3429 tcg_temp_free_i32(t);
3431 gen_exception(EXCP_SVC);
3432 return EXIT_NORETURN;
3435 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3437 gen_helper_tceb(cc_op, o->in1, o->in2);
3442 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3444 gen_helper_tcdb(cc_op, o->in1, o->in2);
3449 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3451 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3456 #ifndef CONFIG_USER_ONLY
3457 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3459 potential_page_fault(s);
3460 gen_helper_tprot(cc_op, o->addr1, o->in2);
3466 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3468 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3469 potential_page_fault(s);
3470 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3471 tcg_temp_free_i32(l);
3476 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3478 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3479 potential_page_fault(s);
3480 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3481 tcg_temp_free_i32(l);
3485 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3487 int d1 = get_field(s->fields, d1);
3488 int d2 = get_field(s->fields, d2);
3489 int b1 = get_field(s->fields, b1);
3490 int b2 = get_field(s->fields, b2);
3491 int l = get_field(s->fields, l1);
3494 o->addr1 = get_address(s, 0, b1, d1);
3496 /* If the addresses are identical, this is a store/memset of zero. */
3497 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3498 o->in2 = tcg_const_i64(0);
3502 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3505 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3509 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3512 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3516 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3519 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3523 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3525 gen_op_movi_cc(s, 0);
3529 /* But in general we'll defer to a helper. */
3530 o->in2 = get_address(s, 0, b2, d2);
3531 t32 = tcg_const_i32(l);
3532 potential_page_fault(s);
3533 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3534 tcg_temp_free_i32(t32);
3539 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3541 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3545 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3547 int shift = s->insn->data & 0xff;
3548 int size = s->insn->data >> 8;
3549 uint64_t mask = ((1ull << size) - 1) << shift;
3552 tcg_gen_shli_i64(o->in2, o->in2, shift);
3553 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3555 /* Produce the CC from only the bits manipulated. */
3556 tcg_gen_andi_i64(cc_dst, o->out, mask);
3557 set_cc_nz_u64(s, cc_dst);
3561 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3563 o->out = tcg_const_i64(0);
3567 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3569 o->out = tcg_const_i64(0);
3575 /* ====================================================================== */
3576 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3577 the original inputs), update the various cc data structures in order to
3578 be able to compute the new condition code. */
3580 static void cout_abs32(DisasContext *s, DisasOps *o)
3582 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3585 static void cout_abs64(DisasContext *s, DisasOps *o)
3587 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3590 static void cout_adds32(DisasContext *s, DisasOps *o)
3592 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3595 static void cout_adds64(DisasContext *s, DisasOps *o)
3597 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3600 static void cout_addu32(DisasContext *s, DisasOps *o)
3602 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3605 static void cout_addu64(DisasContext *s, DisasOps *o)
3607 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3610 static void cout_addc32(DisasContext *s, DisasOps *o)
3612 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3615 static void cout_addc64(DisasContext *s, DisasOps *o)
3617 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3620 static void cout_cmps32(DisasContext *s, DisasOps *o)
3622 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3625 static void cout_cmps64(DisasContext *s, DisasOps *o)
3627 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3630 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3632 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3635 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3637 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3640 static void cout_f32(DisasContext *s, DisasOps *o)
3642 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3645 static void cout_f64(DisasContext *s, DisasOps *o)
3647 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3650 static void cout_f128(DisasContext *s, DisasOps *o)
3652 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3655 static void cout_nabs32(DisasContext *s, DisasOps *o)
3657 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3660 static void cout_nabs64(DisasContext *s, DisasOps *o)
3662 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3665 static void cout_neg32(DisasContext *s, DisasOps *o)
3667 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3670 static void cout_neg64(DisasContext *s, DisasOps *o)
3672 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3675 static void cout_nz32(DisasContext *s, DisasOps *o)
3677 tcg_gen_ext32u_i64(cc_dst, o->out);
3678 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3681 static void cout_nz64(DisasContext *s, DisasOps *o)
3683 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3686 static void cout_s32(DisasContext *s, DisasOps *o)
3688 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3691 static void cout_s64(DisasContext *s, DisasOps *o)
3693 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3696 static void cout_subs32(DisasContext *s, DisasOps *o)
3698 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3701 static void cout_subs64(DisasContext *s, DisasOps *o)
3703 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3706 static void cout_subu32(DisasContext *s, DisasOps *o)
3708 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3711 static void cout_subu64(DisasContext *s, DisasOps *o)
3713 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3716 static void cout_subb32(DisasContext *s, DisasOps *o)
3718 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3721 static void cout_subb64(DisasContext *s, DisasOps *o)
3723 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3726 static void cout_tm32(DisasContext *s, DisasOps *o)
3728 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3731 static void cout_tm64(DisasContext *s, DisasOps *o)
3733 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3736 /* ====================================================================== */
3737 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3738 with the TCG register to which we will write. Used in combination with
3739 the "wout" generators, in some cases we need a new temporary, and in
3740 some cases we can write to a TCG global. */
3742 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3744 o->out = tcg_temp_new_i64();
3746 #define SPEC_prep_new 0
3748 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3750 o->out = tcg_temp_new_i64();
3751 o->out2 = tcg_temp_new_i64();
3753 #define SPEC_prep_new_P 0
3755 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3757 o->out = regs[get_field(f, r1)];
3760 #define SPEC_prep_r1 0
3762 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3764 int r1 = get_field(f, r1);
3766 o->out2 = regs[r1 + 1];
3767 o->g_out = o->g_out2 = true;
3769 #define SPEC_prep_r1_P SPEC_r1_even
3771 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3773 o->out = fregs[get_field(f, r1)];
3776 #define SPEC_prep_f1 0
3778 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3780 int r1 = get_field(f, r1);
3782 o->out2 = fregs[r1 + 2];
3783 o->g_out = o->g_out2 = true;
3785 #define SPEC_prep_x1 SPEC_r1_f128
3787 /* ====================================================================== */
3788 /* The "Write OUTput" generators. These generally perform some non-trivial
3789 copy of data to TCG globals, or to main memory. The trivial cases are
3790 generally handled by having a "prep" generator install the TCG global
3791 as the destination of the operation. */
3793 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3795 store_reg(get_field(f, r1), o->out);
3797 #define SPEC_wout_r1 0
3799 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3801 int r1 = get_field(f, r1);
3802 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3804 #define SPEC_wout_r1_8 0
3806 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3808 int r1 = get_field(f, r1);
3809 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3811 #define SPEC_wout_r1_16 0
3813 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3815 store_reg32_i64(get_field(f, r1), o->out);
3817 #define SPEC_wout_r1_32 0
3819 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3821 int r1 = get_field(f, r1);
3822 store_reg32_i64(r1, o->out);
3823 store_reg32_i64(r1 + 1, o->out2);
3825 #define SPEC_wout_r1_P32 SPEC_r1_even
3827 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3829 int r1 = get_field(f, r1);
3830 store_reg32_i64(r1 + 1, o->out);
3831 tcg_gen_shri_i64(o->out, o->out, 32);
3832 store_reg32_i64(r1, o->out);
3834 #define SPEC_wout_r1_D32 SPEC_r1_even
3836 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3838 store_freg32_i64(get_field(f, r1), o->out);
3840 #define SPEC_wout_e1 0
3842 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3844 store_freg(get_field(f, r1), o->out);
3846 #define SPEC_wout_f1 0
3848 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3850 int f1 = get_field(s->fields, r1);
3851 store_freg(f1, o->out);
3852 store_freg(f1 + 2, o->out2);
3854 #define SPEC_wout_x1 SPEC_r1_f128
3856 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3858 if (get_field(f, r1) != get_field(f, r2)) {
3859 store_reg32_i64(get_field(f, r1), o->out);
3862 #define SPEC_wout_cond_r1r2_32 0
3864 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3866 if (get_field(f, r1) != get_field(f, r2)) {
3867 store_freg32_i64(get_field(f, r1), o->out);
3870 #define SPEC_wout_cond_e1e2 0
3872 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3874 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3876 #define SPEC_wout_m1_8 0
3878 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3880 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3882 #define SPEC_wout_m1_16 0
3884 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3886 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3888 #define SPEC_wout_m1_32 0
3890 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3892 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3894 #define SPEC_wout_m1_64 0
3896 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3898 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3900 #define SPEC_wout_m2_32 0
3902 /* ====================================================================== */
3903 /* The "INput 1" generators. These load the first operand to an insn. */
3905 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3907 o->in1 = load_reg(get_field(f, r1));
3909 #define SPEC_in1_r1 0
3911 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3913 o->in1 = regs[get_field(f, r1)];
3916 #define SPEC_in1_r1_o 0
3918 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3920 o->in1 = tcg_temp_new_i64();
3921 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3923 #define SPEC_in1_r1_32s 0
3925 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3927 o->in1 = tcg_temp_new_i64();
3928 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3930 #define SPEC_in1_r1_32u 0
3932 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3934 o->in1 = tcg_temp_new_i64();
3935 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3937 #define SPEC_in1_r1_sr32 0
3939 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3941 o->in1 = load_reg(get_field(f, r1) + 1);
3943 #define SPEC_in1_r1p1 SPEC_r1_even
3945 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3947 o->in1 = tcg_temp_new_i64();
3948 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
3950 #define SPEC_in1_r1p1_32s SPEC_r1_even
3952 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3954 o->in1 = tcg_temp_new_i64();
3955 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
3957 #define SPEC_in1_r1p1_32u SPEC_r1_even
3959 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3961 int r1 = get_field(f, r1);
3962 o->in1 = tcg_temp_new_i64();
3963 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3965 #define SPEC_in1_r1_D32 SPEC_r1_even
3967 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3969 o->in1 = load_reg(get_field(f, r2));
3971 #define SPEC_in1_r2 0
3973 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3975 o->in1 = load_reg(get_field(f, r3));
3977 #define SPEC_in1_r3 0
3979 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3981 o->in1 = regs[get_field(f, r3)];
3984 #define SPEC_in1_r3_o 0
3986 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3988 o->in1 = tcg_temp_new_i64();
3989 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3991 #define SPEC_in1_r3_32s 0
3993 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3995 o->in1 = tcg_temp_new_i64();
3996 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3998 #define SPEC_in1_r3_32u 0
4000 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4002 o->in1 = load_freg32_i64(get_field(f, r1));
4004 #define SPEC_in1_e1 0
4006 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4008 o->in1 = fregs[get_field(f, r1)];
4011 #define SPEC_in1_f1_o 0
4013 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4015 int r1 = get_field(f, r1);
4017 o->out2 = fregs[r1 + 2];
4018 o->g_out = o->g_out2 = true;
4020 #define SPEC_in1_x1_o SPEC_r1_f128
4022 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4024 o->in1 = fregs[get_field(f, r3)];
4027 #define SPEC_in1_f3_o 0
4029 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4031 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4033 #define SPEC_in1_la1 0
4035 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4037 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4038 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4040 #define SPEC_in1_la2 0
4042 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4045 o->in1 = tcg_temp_new_i64();
4046 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4048 #define SPEC_in1_m1_8u 0
4050 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4053 o->in1 = tcg_temp_new_i64();
4054 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4056 #define SPEC_in1_m1_16s 0
4058 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4061 o->in1 = tcg_temp_new_i64();
4062 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4064 #define SPEC_in1_m1_16u 0
4066 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4069 o->in1 = tcg_temp_new_i64();
4070 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4072 #define SPEC_in1_m1_32s 0
4074 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4077 o->in1 = tcg_temp_new_i64();
4078 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4080 #define SPEC_in1_m1_32u 0
4082 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4085 o->in1 = tcg_temp_new_i64();
4086 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4088 #define SPEC_in1_m1_64 0
4090 /* ====================================================================== */
4091 /* The "INput 2" generators. These load the second operand to an insn. */
4093 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4095 o->in2 = regs[get_field(f, r1)];
4098 #define SPEC_in2_r1_o 0
4100 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4102 o->in2 = tcg_temp_new_i64();
4103 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4105 #define SPEC_in2_r1_16u 0
4107 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4109 o->in2 = tcg_temp_new_i64();
4110 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4112 #define SPEC_in2_r1_32u 0
4114 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4116 o->in2 = load_reg(get_field(f, r2));
4118 #define SPEC_in2_r2 0
4120 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4122 o->in2 = regs[get_field(f, r2)];
4125 #define SPEC_in2_r2_o 0
4127 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4129 int r2 = get_field(f, r2);
4131 o->in2 = load_reg(r2);
4134 #define SPEC_in2_r2_nz 0
4136 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4138 o->in2 = tcg_temp_new_i64();
4139 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4141 #define SPEC_in2_r2_8s 0
4143 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4145 o->in2 = tcg_temp_new_i64();
4146 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4148 #define SPEC_in2_r2_8u 0
4150 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4152 o->in2 = tcg_temp_new_i64();
4153 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4155 #define SPEC_in2_r2_16s 0
4157 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4159 o->in2 = tcg_temp_new_i64();
4160 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4162 #define SPEC_in2_r2_16u 0
4164 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4166 o->in2 = load_reg(get_field(f, r3));
4168 #define SPEC_in2_r3 0
4170 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4172 o->in2 = tcg_temp_new_i64();
4173 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4175 #define SPEC_in2_r2_32s 0
4177 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4179 o->in2 = tcg_temp_new_i64();
4180 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4182 #define SPEC_in2_r2_32u 0
4184 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4186 o->in2 = load_freg32_i64(get_field(f, r2));
4188 #define SPEC_in2_e2 0
4190 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4192 o->in2 = fregs[get_field(f, r2)];
4195 #define SPEC_in2_f2_o 0
4197 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4199 int r2 = get_field(f, r2);
4201 o->in2 = fregs[r2 + 2];
4202 o->g_in1 = o->g_in2 = true;
4204 #define SPEC_in2_x2_o SPEC_r2_f128
4206 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4208 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4210 #define SPEC_in2_ra2 0
4212 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4214 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4215 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4217 #define SPEC_in2_a2 0
4219 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4221 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4223 #define SPEC_in2_ri2 0
4225 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4227 help_l2_shift(s, f, o, 31);
4229 #define SPEC_in2_sh32 0
4231 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4233 help_l2_shift(s, f, o, 63);
4235 #define SPEC_in2_sh64 0
4237 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4240 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4242 #define SPEC_in2_m2_8u 0
4244 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4247 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4249 #define SPEC_in2_m2_16s 0
4251 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4254 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4256 #define SPEC_in2_m2_16u 0
4258 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4261 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4263 #define SPEC_in2_m2_32s 0
4265 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4268 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4270 #define SPEC_in2_m2_32u 0
4272 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4275 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4277 #define SPEC_in2_m2_64 0
4279 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4282 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4284 #define SPEC_in2_mri2_16u 0
4286 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4289 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4291 #define SPEC_in2_mri2_32s 0
4293 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4296 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4298 #define SPEC_in2_mri2_32u 0
4300 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4303 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4305 #define SPEC_in2_mri2_64 0
4307 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4309 o->in2 = tcg_const_i64(get_field(f, i2));
4311 #define SPEC_in2_i2 0
4313 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4315 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4317 #define SPEC_in2_i2_8u 0
4319 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4321 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4323 #define SPEC_in2_i2_16u 0
4325 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4327 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4329 #define SPEC_in2_i2_32u 0
4331 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4333 uint64_t i2 = (uint16_t)get_field(f, i2);
4334 o->in2 = tcg_const_i64(i2 << s->insn->data);
4336 #define SPEC_in2_i2_16u_shl 0
4338 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4340 uint64_t i2 = (uint32_t)get_field(f, i2);
4341 o->in2 = tcg_const_i64(i2 << s->insn->data);
4343 #define SPEC_in2_i2_32u_shl 0
4345 /* ====================================================================== */
4347 /* Find opc within the table of insns. This is formulated as a switch
4348 statement so that (1) we get compile-time notice of cut-paste errors
4349 for duplicated opcodes, and (2) the compiler generates the binary
4350 search tree, rather than us having to post-process the table. */
4352 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4353 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4355 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4357 enum DisasInsnEnum {
4358 #include "insn-data.def"
4362 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4366 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4368 .help_in1 = in1_##I1, \
4369 .help_in2 = in2_##I2, \
4370 .help_prep = prep_##P, \
4371 .help_wout = wout_##W, \
4372 .help_cout = cout_##CC, \
4373 .help_op = op_##OP, \
4377 /* Allow 0 to be used for NULL in the table below. */
4385 #define SPEC_in1_0 0
4386 #define SPEC_in2_0 0
4387 #define SPEC_prep_0 0
4388 #define SPEC_wout_0 0
4390 static const DisasInsn insn_info[] = {
4391 #include "insn-data.def"
4395 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4396 case OPC: return &insn_info[insn_ ## NM];
4398 static const DisasInsn *lookup_opc(uint16_t opc)
4401 #include "insn-data.def"
4410 /* Extract a field from the insn. The INSN should be left-aligned in
4411 the uint64_t so that we can more easily utilize the big-bit-endian
4412 definitions we extract from the Principals of Operation. */
4414 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4422 /* Zero extract the field from the insn. */
4423 r = (insn << f->beg) >> (64 - f->size);
4425 /* Sign-extend, or un-swap the field as necessary. */
4427 case 0: /* unsigned */
4429 case 1: /* signed */
4430 assert(f->size <= 32);
4431 m = 1u << (f->size - 1);
4434 case 2: /* dl+dh split, signed 20 bit. */
4435 r = ((int8_t)r << 12) | (r >> 8);
4441 /* Validate that the "compressed" encoding we selected above is valid.
4442 I.e. we havn't make two different original fields overlap. */
4443 assert(((o->presentC >> f->indexC) & 1) == 0);
4444 o->presentC |= 1 << f->indexC;
4445 o->presentO |= 1 << f->indexO;
4447 o->c[f->indexC] = r;
4450 /* Lookup the insn at the current PC, extracting the operands into O and
4451 returning the info struct for the insn. Returns NULL for invalid insn. */
4453 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4456 uint64_t insn, pc = s->pc;
4458 const DisasInsn *info;
4460 insn = ld_code2(env, pc);
4461 op = (insn >> 8) & 0xff;
4462 ilen = get_ilen(op);
4463 s->next_pc = s->pc + ilen;
4470 insn = ld_code4(env, pc) << 32;
4473 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4479 /* We can't actually determine the insn format until we've looked up
4480 the full insn opcode. Which we can't do without locating the
4481 secondary opcode. Assume by default that OP2 is at bit 40; for
4482 those smaller insns that don't actually have a secondary opcode
4483 this will correctly result in OP2 = 0. */
4489 case 0xb2: /* S, RRF, RRE */
4490 case 0xb3: /* RRE, RRD, RRF */
4491 case 0xb9: /* RRE, RRF */
4492 case 0xe5: /* SSE, SIL */
4493 op2 = (insn << 8) >> 56;
4497 case 0xc0: /* RIL */
4498 case 0xc2: /* RIL */
4499 case 0xc4: /* RIL */
4500 case 0xc6: /* RIL */
4501 case 0xc8: /* SSF */
4502 case 0xcc: /* RIL */
4503 op2 = (insn << 12) >> 60;
4505 case 0xd0 ... 0xdf: /* SS */
4511 case 0xee ... 0xf3: /* SS */
4512 case 0xf8 ... 0xfd: /* SS */
4516 op2 = (insn << 40) >> 56;
4520 memset(f, 0, sizeof(*f));
4524 /* Lookup the instruction. */
4525 info = lookup_opc(op << 8 | op2);
4527 /* If we found it, extract the operands. */
4529 DisasFormat fmt = info->fmt;
4532 for (i = 0; i < NUM_C_FIELD; ++i) {
4533 extract_field(f, &format_info[fmt].op[i], insn);
4539 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4541 const DisasInsn *insn;
4542 ExitStatus ret = NO_EXIT;
4546 /* Search for the insn in the table. */
4547 insn = extract_insn(env, s, &f);
4549 /* Not found means unimplemented/illegal opcode. */
4551 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4553 gen_illegal_opcode(s);
4554 return EXIT_NORETURN;
4557 /* Check for insn specification exceptions. */
4559 int spec = insn->spec, excp = 0, r;
4561 if (spec & SPEC_r1_even) {
4562 r = get_field(&f, r1);
4564 excp = PGM_SPECIFICATION;
4567 if (spec & SPEC_r2_even) {
4568 r = get_field(&f, r2);
4570 excp = PGM_SPECIFICATION;
4573 if (spec & SPEC_r1_f128) {
4574 r = get_field(&f, r1);
4576 excp = PGM_SPECIFICATION;
4579 if (spec & SPEC_r2_f128) {
4580 r = get_field(&f, r2);
4582 excp = PGM_SPECIFICATION;
4586 gen_program_exception(s, excp);
4587 return EXIT_NORETURN;
4591 /* Set up the strutures we use to communicate with the helpers. */
4594 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4595 TCGV_UNUSED_I64(o.out);
4596 TCGV_UNUSED_I64(o.out2);
4597 TCGV_UNUSED_I64(o.in1);
4598 TCGV_UNUSED_I64(o.in2);
4599 TCGV_UNUSED_I64(o.addr1);
4601 /* Implement the instruction. */
4602 if (insn->help_in1) {
4603 insn->help_in1(s, &f, &o);
4605 if (insn->help_in2) {
4606 insn->help_in2(s, &f, &o);
4608 if (insn->help_prep) {
4609 insn->help_prep(s, &f, &o);
4611 if (insn->help_op) {
4612 ret = insn->help_op(s, &o);
4614 if (insn->help_wout) {
4615 insn->help_wout(s, &f, &o);
4617 if (insn->help_cout) {
4618 insn->help_cout(s, &o);
4621 /* Free any temporaries created by the helpers. */
4622 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4623 tcg_temp_free_i64(o.out);
4625 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4626 tcg_temp_free_i64(o.out2);
4628 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4629 tcg_temp_free_i64(o.in1);
4631 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4632 tcg_temp_free_i64(o.in2);
4634 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4635 tcg_temp_free_i64(o.addr1);
4638 /* Advance to the next instruction. */
4643 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4644 TranslationBlock *tb,
4648 target_ulong pc_start;
4649 uint64_t next_page_start;
4650 uint16_t *gen_opc_end;
4652 int num_insns, max_insns;
4660 if (!(tb->flags & FLAG_MASK_64)) {
4661 pc_start &= 0x7fffffff;
4666 dc.cc_op = CC_OP_DYNAMIC;
4667 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4669 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4671 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4674 max_insns = tb->cflags & CF_COUNT_MASK;
4675 if (max_insns == 0) {
4676 max_insns = CF_COUNT_MASK;
4683 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4687 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4690 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4691 gen_opc_cc_op[lj] = dc.cc_op;
4692 tcg_ctx.gen_opc_instr_start[lj] = 1;
4693 tcg_ctx.gen_opc_icount[lj] = num_insns;
4695 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4699 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4700 tcg_gen_debug_insn_start(dc.pc);
4704 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4705 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4706 if (bp->pc == dc.pc) {
4707 status = EXIT_PC_STALE;
4713 if (status == NO_EXIT) {
4714 status = translate_one(env, &dc);
4717 /* If we reach a page boundary, are single stepping,
4718 or exhaust instruction count, stop generation. */
4719 if (status == NO_EXIT
4720 && (dc.pc >= next_page_start
4721 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4722 || num_insns >= max_insns
4724 || env->singlestep_enabled)) {
4725 status = EXIT_PC_STALE;
4727 } while (status == NO_EXIT);
4729 if (tb->cflags & CF_LAST_IO) {
4738 update_psw_addr(&dc);
4740 case EXIT_PC_UPDATED:
4741 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4742 cc op type is in env */
4744 /* Exit the TB, either by raising a debug exception or by return. */
4746 gen_exception(EXCP_DEBUG);
4755 gen_icount_end(tb, num_insns);
4756 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4758 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4761 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4764 tb->size = dc.pc - pc_start;
4765 tb->icount = num_insns;
4768 #if defined(S390X_DEBUG_DISAS)
4769 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4770 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4771 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4777 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4779 gen_intermediate_code_internal(env, tb, 0);
4782 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4784 gen_intermediate_code_internal(env, tb, 1);
4787 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4790 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4791 cc_op = gen_opc_cc_op[pc_pos];
4792 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {