4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
57 bool singlestep_enabled;
61 /* Information carried about a condition to be evaluated. */
68 struct { TCGv_i64 a, b; } s64;
69 struct { TCGv_i32 a, b; } s32;
75 static void gen_op_calc_cc(DisasContext *s);
77 #ifdef DEBUG_INLINE_BRANCHES
78 static uint64_t inline_branch_hit[CC_OP_MAX];
79 static uint64_t inline_branch_miss[CC_OP_MAX];
82 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
84 if (!(s->tb->flags & FLAG_MASK_64)) {
85 if (s->tb->flags & FLAG_MASK_32) {
86 return pc | 0x80000000;
92 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
99 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
102 env->psw.mask, env->psw.addr, env->cc_op);
105 for (i = 0; i < 16; i++) {
106 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 cpu_fprintf(f, "\n");
114 for (i = 0; i < 16; i++) {
115 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
117 cpu_fprintf(f, "\n");
123 #ifndef CONFIG_USER_ONLY
124 for (i = 0; i < 16; i++) {
125 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
127 cpu_fprintf(f, "\n");
134 #ifdef DEBUG_INLINE_BRANCHES
135 for (i = 0; i < CC_OP_MAX; i++) {
136 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
137 inline_branch_miss[i], inline_branch_hit[i]);
141 cpu_fprintf(f, "\n");
144 static TCGv_i64 psw_addr;
145 static TCGv_i64 psw_mask;
147 static TCGv_i32 cc_op;
148 static TCGv_i64 cc_src;
149 static TCGv_i64 cc_dst;
150 static TCGv_i64 cc_vr;
152 static char cpu_reg_names[32][4];
153 static TCGv_i64 regs[16];
154 static TCGv_i64 fregs[16];
156 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
158 void s390x_translate_init(void)
162 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
163 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.addr),
166 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
167 offsetof(CPUS390XState, psw.mask),
170 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
172 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
174 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
176 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
179 for (i = 0; i < 16; i++) {
180 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
181 regs[i] = tcg_global_mem_new(TCG_AREG0,
182 offsetof(CPUS390XState, regs[i]),
186 for (i = 0; i < 16; i++) {
187 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
188 fregs[i] = tcg_global_mem_new(TCG_AREG0,
189 offsetof(CPUS390XState, fregs[i].d),
190 cpu_reg_names[i + 16]);
193 /* register helpers */
198 static TCGv_i64 load_reg(int reg)
200 TCGv_i64 r = tcg_temp_new_i64();
201 tcg_gen_mov_i64(r, regs[reg]);
205 static TCGv_i64 load_freg32_i64(int reg)
207 TCGv_i64 r = tcg_temp_new_i64();
208 tcg_gen_shri_i64(r, fregs[reg], 32);
212 static void store_reg(int reg, TCGv_i64 v)
214 tcg_gen_mov_i64(regs[reg], v);
217 static void store_freg(int reg, TCGv_i64 v)
219 tcg_gen_mov_i64(fregs[reg], v);
222 static void store_reg32_i64(int reg, TCGv_i64 v)
224 /* 32 bit register writes keep the upper half */
225 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
228 static void store_reg32h_i64(int reg, TCGv_i64 v)
230 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
233 static void store_freg32_i64(int reg, TCGv_i64 v)
235 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
238 static void return_low128(TCGv_i64 dest)
240 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
243 static void update_psw_addr(DisasContext *s)
246 tcg_gen_movi_i64(psw_addr, s->pc);
249 static void potential_page_fault(DisasContext *s)
251 #ifndef CONFIG_USER_ONLY
257 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
259 return (uint64_t)cpu_lduw_code(env, pc);
262 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
264 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
267 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
269 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
272 static int get_mem_index(DisasContext *s)
274 switch (s->tb->flags & FLAG_MASK_ASC) {
275 case PSW_ASC_PRIMARY >> 32:
277 case PSW_ASC_SECONDARY >> 32:
279 case PSW_ASC_HOME >> 32:
287 static void gen_exception(int excp)
289 TCGv_i32 tmp = tcg_const_i32(excp);
290 gen_helper_exception(cpu_env, tmp);
291 tcg_temp_free_i32(tmp);
294 static void gen_program_exception(DisasContext *s, int code)
298 /* Remember what pgm exeption this was. */
299 tmp = tcg_const_i32(code);
300 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
301 tcg_temp_free_i32(tmp);
303 tmp = tcg_const_i32(s->next_pc - s->pc);
304 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
305 tcg_temp_free_i32(tmp);
307 /* Advance past instruction. */
314 /* Trigger exception. */
315 gen_exception(EXCP_PGM);
318 s->is_jmp = DISAS_EXCP;
321 static inline void gen_illegal_opcode(DisasContext *s)
323 gen_program_exception(s, PGM_SPECIFICATION);
326 static inline void check_privileged(DisasContext *s)
328 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
329 gen_program_exception(s, PGM_PRIVILEGED);
333 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
337 /* 31-bitify the immediate part; register contents are dealt with below */
338 if (!(s->tb->flags & FLAG_MASK_64)) {
344 tmp = tcg_const_i64(d2);
345 tcg_gen_add_i64(tmp, tmp, regs[x2]);
350 tcg_gen_add_i64(tmp, tmp, regs[b2]);
354 tmp = tcg_const_i64(d2);
355 tcg_gen_add_i64(tmp, tmp, regs[b2]);
360 tmp = tcg_const_i64(d2);
363 /* 31-bit mode mask if there are values loaded from registers */
364 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
365 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
371 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
373 s->cc_op = CC_OP_CONST0 + val;
376 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
378 tcg_gen_discard_i64(cc_src);
379 tcg_gen_mov_i64(cc_dst, dst);
380 tcg_gen_discard_i64(cc_vr);
384 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
387 tcg_gen_mov_i64(cc_src, src);
388 tcg_gen_mov_i64(cc_dst, dst);
389 tcg_gen_discard_i64(cc_vr);
393 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
394 TCGv_i64 dst, TCGv_i64 vr)
396 tcg_gen_mov_i64(cc_src, src);
397 tcg_gen_mov_i64(cc_dst, dst);
398 tcg_gen_mov_i64(cc_vr, vr);
402 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
404 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
407 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
409 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
412 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
414 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
417 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
419 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
422 /* CC value is in env->cc_op */
423 static void set_cc_static(DisasContext *s)
425 tcg_gen_discard_i64(cc_src);
426 tcg_gen_discard_i64(cc_dst);
427 tcg_gen_discard_i64(cc_vr);
428 s->cc_op = CC_OP_STATIC;
431 static void gen_op_set_cc_op(DisasContext *s)
433 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
434 tcg_gen_movi_i32(cc_op, s->cc_op);
438 static void gen_update_cc_op(DisasContext *s)
443 /* calculates cc into cc_op */
444 static void gen_op_calc_cc(DisasContext *s)
446 TCGv_i32 local_cc_op = tcg_const_i32(s->cc_op);
447 TCGv_i64 dummy = tcg_const_i64(0);
454 /* s->cc_op is the cc value */
455 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
458 /* env->cc_op already is the cc value */
473 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
478 case CC_OP_LTUGTU_32:
479 case CC_OP_LTUGTU_64:
486 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
501 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
504 /* unknown operation - assume 3 arguments and cc_op in env */
505 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
511 tcg_temp_free_i32(local_cc_op);
512 tcg_temp_free_i64(dummy);
514 /* We now have cc in cc_op as constant */
518 static int use_goto_tb(DisasContext *s, uint64_t dest)
520 /* NOTE: we handle the case where the TB spans two pages here */
521 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
522 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
523 && !s->singlestep_enabled
524 && !(s->tb->cflags & CF_LAST_IO));
527 static void account_noninline_branch(DisasContext *s, int cc_op)
529 #ifdef DEBUG_INLINE_BRANCHES
530 inline_branch_miss[cc_op]++;
534 static void account_inline_branch(DisasContext *s, int cc_op)
536 #ifdef DEBUG_INLINE_BRANCHES
537 inline_branch_hit[cc_op]++;
541 /* Table of mask values to comparison codes, given a comparison as input.
542 For a true comparison CC=3 will never be set, but we treat this
543 conservatively for possible use when CC=3 indicates overflow. */
544 static const TCGCond ltgt_cond[16] = {
545 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
546 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
547 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
548 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
549 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
550 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
551 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
552 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
555 /* Table of mask values to comparison codes, given a logic op as input.
556 For such, only CC=0 and CC=1 should be possible. */
557 static const TCGCond nz_cond[16] = {
559 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
561 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
563 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
564 /* EQ | NE | x | x */
565 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
568 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
569 details required to generate a TCG comparison. */
570 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
573 enum cc_op old_cc_op = s->cc_op;
575 if (mask == 15 || mask == 0) {
576 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
579 c->g1 = c->g2 = true;
584 /* Find the TCG condition for the mask + cc op. */
590 cond = ltgt_cond[mask];
591 if (cond == TCG_COND_NEVER) {
594 account_inline_branch(s, old_cc_op);
597 case CC_OP_LTUGTU_32:
598 case CC_OP_LTUGTU_64:
599 cond = tcg_unsigned_cond(ltgt_cond[mask]);
600 if (cond == TCG_COND_NEVER) {
603 account_inline_branch(s, old_cc_op);
607 cond = nz_cond[mask];
608 if (cond == TCG_COND_NEVER) {
611 account_inline_branch(s, old_cc_op);
626 account_inline_branch(s, old_cc_op);
641 account_inline_branch(s, old_cc_op);
645 switch (mask & 0xa) {
646 case 8: /* src == 0 -> no one bit found */
649 case 2: /* src != 0 -> one bit found */
655 account_inline_branch(s, old_cc_op);
660 /* Calculate cc value. */
665 /* Jump based on CC. We'll load up the real cond below;
666 the assignment here merely avoids a compiler warning. */
667 account_noninline_branch(s, old_cc_op);
668 old_cc_op = CC_OP_STATIC;
669 cond = TCG_COND_NEVER;
673 /* Load up the arguments of the comparison. */
675 c->g1 = c->g2 = false;
679 c->u.s32.a = tcg_temp_new_i32();
680 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
681 c->u.s32.b = tcg_const_i32(0);
684 case CC_OP_LTUGTU_32:
686 c->u.s32.a = tcg_temp_new_i32();
687 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
688 c->u.s32.b = tcg_temp_new_i32();
689 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
696 c->u.s64.b = tcg_const_i64(0);
700 case CC_OP_LTUGTU_64:
703 c->g1 = c->g2 = true;
709 c->u.s64.a = tcg_temp_new_i64();
710 c->u.s64.b = tcg_const_i64(0);
711 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
719 case 0x8 | 0x4 | 0x2: /* cc != 3 */
721 c->u.s32.b = tcg_const_i32(3);
723 case 0x8 | 0x4 | 0x1: /* cc != 2 */
725 c->u.s32.b = tcg_const_i32(2);
727 case 0x8 | 0x2 | 0x1: /* cc != 1 */
729 c->u.s32.b = tcg_const_i32(1);
731 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
734 c->u.s32.a = tcg_temp_new_i32();
735 c->u.s32.b = tcg_const_i32(0);
736 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
738 case 0x8 | 0x4: /* cc < 2 */
740 c->u.s32.b = tcg_const_i32(2);
742 case 0x8: /* cc == 0 */
744 c->u.s32.b = tcg_const_i32(0);
746 case 0x4 | 0x2 | 0x1: /* cc != 0 */
748 c->u.s32.b = tcg_const_i32(0);
750 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
753 c->u.s32.a = tcg_temp_new_i32();
754 c->u.s32.b = tcg_const_i32(0);
755 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
757 case 0x4: /* cc == 1 */
759 c->u.s32.b = tcg_const_i32(1);
761 case 0x2 | 0x1: /* cc > 1 */
763 c->u.s32.b = tcg_const_i32(1);
765 case 0x2: /* cc == 2 */
767 c->u.s32.b = tcg_const_i32(2);
769 case 0x1: /* cc == 3 */
771 c->u.s32.b = tcg_const_i32(3);
774 /* CC is masked by something else: (8 >> cc) & mask. */
777 c->u.s32.a = tcg_const_i32(8);
778 c->u.s32.b = tcg_const_i32(0);
779 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
780 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
791 static void free_compare(DisasCompare *c)
795 tcg_temp_free_i64(c->u.s64.a);
797 tcg_temp_free_i32(c->u.s32.a);
802 tcg_temp_free_i64(c->u.s64.b);
804 tcg_temp_free_i32(c->u.s32.b);
809 /* ====================================================================== */
810 /* Define the insn format enumeration. */
811 #define F0(N) FMT_##N,
812 #define F1(N, X1) F0(N)
813 #define F2(N, X1, X2) F0(N)
814 #define F3(N, X1, X2, X3) F0(N)
815 #define F4(N, X1, X2, X3, X4) F0(N)
816 #define F5(N, X1, X2, X3, X4, X5) F0(N)
819 #include "insn-format.def"
829 /* Define a structure to hold the decoded fields. We'll store each inside
830 an array indexed by an enum. In order to conserve memory, we'll arrange
831 for fields that do not exist at the same time to overlap, thus the "C"
832 for compact. For checking purposes there is an "O" for original index
833 as well that will be applied to availability bitmaps. */
835 enum DisasFieldIndexO {
858 enum DisasFieldIndexC {
892 unsigned presentC:16;
893 unsigned int presentO;
897 /* This is the way fields are to be accessed out of DisasFields. */
898 #define have_field(S, F) have_field1((S), FLD_O_##F)
899 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
901 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
903 return (f->presentO >> c) & 1;
906 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
907 enum DisasFieldIndexC c)
909 assert(have_field1(f, o));
913 /* Describe the layout of each field in each format. */
914 typedef struct DisasField {
918 unsigned int indexC:6;
919 enum DisasFieldIndexO indexO:8;
922 typedef struct DisasFormatInfo {
923 DisasField op[NUM_C_FIELD];
926 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
927 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
928 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
929 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
930 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
931 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
932 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
933 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
934 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
935 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
936 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
937 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
938 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
939 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
941 #define F0(N) { { } },
942 #define F1(N, X1) { { X1 } },
943 #define F2(N, X1, X2) { { X1, X2 } },
944 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
945 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
946 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
948 static const DisasFormatInfo format_info[] = {
949 #include "insn-format.def"
967 /* Generally, we'll extract operands into this structures, operate upon
968 them, and store them back. See the "in1", "in2", "prep", "wout" sets
969 of routines below for more details. */
971 bool g_out, g_out2, g_in1, g_in2;
972 TCGv_i64 out, out2, in1, in2;
976 /* Return values from translate_one, indicating the state of the TB. */
978 /* Continue the TB. */
980 /* We have emitted one or more goto_tb. No fixup required. */
982 /* We are not using a goto_tb (for whatever reason), but have updated
983 the PC (for whatever reason), so there's no need to do it again on
986 /* We are exiting the TB, but have neither emitted a goto_tb, nor
987 updated the PC for the next instruction to be executed. */
989 /* We are ending the TB with a noreturn function call, e.g. longjmp.
990 No following code will be executed. */
994 typedef enum DisasFacility {
995 FAC_Z, /* zarch (default) */
996 FAC_CASS, /* compare and swap and store */
997 FAC_CASS2, /* compare and swap and store 2*/
998 FAC_DFP, /* decimal floating point */
999 FAC_DFPR, /* decimal floating point rounding */
1000 FAC_DO, /* distinct operands */
1001 FAC_EE, /* execute extensions */
1002 FAC_EI, /* extended immediate */
1003 FAC_FPE, /* floating point extension */
1004 FAC_FPSSH, /* floating point support sign handling */
1005 FAC_FPRGR, /* FPR-GR transfer */
1006 FAC_GIE, /* general instructions extension */
1007 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1008 FAC_HW, /* high-word */
1009 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1010 FAC_LOC, /* load/store on condition */
1011 FAC_LD, /* long displacement */
1012 FAC_PC, /* population count */
1013 FAC_SCF, /* store clock fast */
1014 FAC_SFLE, /* store facility list extended */
1020 DisasFacility fac:6;
1024 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1025 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1026 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1027 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1028 void (*help_cout)(DisasContext *, DisasOps *);
1029 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1034 /* ====================================================================== */
1035 /* Miscelaneous helpers, used by several operations. */
1037 static void help_l2_shift(DisasContext *s, DisasFields *f,
1038 DisasOps *o, int mask)
1040 int b2 = get_field(f, b2);
1041 int d2 = get_field(f, d2);
1044 o->in2 = tcg_const_i64(d2 & mask);
1046 o->in2 = get_address(s, 0, b2, d2);
1047 tcg_gen_andi_i64(o->in2, o->in2, mask);
1051 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1053 if (dest == s->next_pc) {
1056 if (use_goto_tb(s, dest)) {
1057 gen_update_cc_op(s);
1059 tcg_gen_movi_i64(psw_addr, dest);
1060 tcg_gen_exit_tb((tcg_target_long)s->tb);
1061 return EXIT_GOTO_TB;
1063 tcg_gen_movi_i64(psw_addr, dest);
1064 return EXIT_PC_UPDATED;
1068 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1069 bool is_imm, int imm, TCGv_i64 cdest)
1072 uint64_t dest = s->pc + 2 * imm;
1075 /* Take care of the special cases first. */
1076 if (c->cond == TCG_COND_NEVER) {
1081 if (dest == s->next_pc) {
1082 /* Branch to next. */
1086 if (c->cond == TCG_COND_ALWAYS) {
1087 ret = help_goto_direct(s, dest);
1091 if (TCGV_IS_UNUSED_I64(cdest)) {
1092 /* E.g. bcr %r0 -> no branch. */
1096 if (c->cond == TCG_COND_ALWAYS) {
1097 tcg_gen_mov_i64(psw_addr, cdest);
1098 ret = EXIT_PC_UPDATED;
1103 if (use_goto_tb(s, s->next_pc)) {
1104 if (is_imm && use_goto_tb(s, dest)) {
1105 /* Both exits can use goto_tb. */
1106 gen_update_cc_op(s);
1108 lab = gen_new_label();
1110 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1112 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1115 /* Branch not taken. */
1117 tcg_gen_movi_i64(psw_addr, s->next_pc);
1118 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1123 tcg_gen_movi_i64(psw_addr, dest);
1124 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1128 /* Fallthru can use goto_tb, but taken branch cannot. */
1129 /* Store taken branch destination before the brcond. This
1130 avoids having to allocate a new local temp to hold it.
1131 We'll overwrite this in the not taken case anyway. */
1133 tcg_gen_mov_i64(psw_addr, cdest);
1136 lab = gen_new_label();
1138 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1140 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1143 /* Branch not taken. */
1144 gen_update_cc_op(s);
1146 tcg_gen_movi_i64(psw_addr, s->next_pc);
1147 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1151 tcg_gen_movi_i64(psw_addr, dest);
1153 ret = EXIT_PC_UPDATED;
1156 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1157 Most commonly we're single-stepping or some other condition that
1158 disables all use of goto_tb. Just update the PC and exit. */
1160 TCGv_i64 next = tcg_const_i64(s->next_pc);
1162 cdest = tcg_const_i64(dest);
1166 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1169 TCGv_i32 t0 = tcg_temp_new_i32();
1170 TCGv_i64 t1 = tcg_temp_new_i64();
1171 TCGv_i64 z = tcg_const_i64(0);
1172 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1173 tcg_gen_extu_i32_i64(t1, t0);
1174 tcg_temp_free_i32(t0);
1175 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1176 tcg_temp_free_i64(t1);
1177 tcg_temp_free_i64(z);
1181 tcg_temp_free_i64(cdest);
1183 tcg_temp_free_i64(next);
1185 ret = EXIT_PC_UPDATED;
1193 /* ====================================================================== */
1194 /* The operations. These perform the bulk of the work for any insn,
1195 usually after the operands have been loaded and output initialized. */
1197 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1199 gen_helper_abs_i64(o->out, o->in2);
1203 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1205 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1209 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1211 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1215 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1217 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1218 tcg_gen_mov_i64(o->out2, o->in2);
1222 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1224 tcg_gen_add_i64(o->out, o->in1, o->in2);
1228 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1232 tcg_gen_add_i64(o->out, o->in1, o->in2);
1234 /* XXX possible optimization point */
1236 cc = tcg_temp_new_i64();
1237 tcg_gen_extu_i32_i64(cc, cc_op);
1238 tcg_gen_shri_i64(cc, cc, 1);
1240 tcg_gen_add_i64(o->out, o->out, cc);
1241 tcg_temp_free_i64(cc);
1245 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1247 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1251 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1253 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1257 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1259 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1260 return_low128(o->out2);
1264 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1266 tcg_gen_and_i64(o->out, o->in1, o->in2);
1270 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1272 int shift = s->insn->data & 0xff;
1273 int size = s->insn->data >> 8;
1274 uint64_t mask = ((1ull << size) - 1) << shift;
1277 tcg_gen_shli_i64(o->in2, o->in2, shift);
1278 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1279 tcg_gen_and_i64(o->out, o->in1, o->in2);
1281 /* Produce the CC from only the bits manipulated. */
1282 tcg_gen_andi_i64(cc_dst, o->out, mask);
1283 set_cc_nz_u64(s, cc_dst);
1287 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1289 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1290 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1291 tcg_gen_mov_i64(psw_addr, o->in2);
1292 return EXIT_PC_UPDATED;
1298 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1300 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1301 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1304 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1306 int m1 = get_field(s->fields, m1);
1307 bool is_imm = have_field(s->fields, i2);
1308 int imm = is_imm ? get_field(s->fields, i2) : 0;
1311 disas_jcc(s, &c, m1);
1312 return help_branch(s, &c, is_imm, imm, o->in2);
1315 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1317 int r1 = get_field(s->fields, r1);
1318 bool is_imm = have_field(s->fields, i2);
1319 int imm = is_imm ? get_field(s->fields, i2) : 0;
1323 c.cond = TCG_COND_NE;
1328 t = tcg_temp_new_i64();
1329 tcg_gen_subi_i64(t, regs[r1], 1);
1330 store_reg32_i64(r1, t);
1331 c.u.s32.a = tcg_temp_new_i32();
1332 c.u.s32.b = tcg_const_i32(0);
1333 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1334 tcg_temp_free_i64(t);
1336 return help_branch(s, &c, is_imm, imm, o->in2);
1339 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1341 int r1 = get_field(s->fields, r1);
1342 bool is_imm = have_field(s->fields, i2);
1343 int imm = is_imm ? get_field(s->fields, i2) : 0;
1346 c.cond = TCG_COND_NE;
1351 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1352 c.u.s64.a = regs[r1];
1353 c.u.s64.b = tcg_const_i64(0);
1355 return help_branch(s, &c, is_imm, imm, o->in2);
1358 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1360 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1365 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1367 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1372 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1374 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1379 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1381 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1382 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1383 tcg_temp_free_i32(m3);
1384 gen_set_cc_nz_f32(s, o->in2);
1388 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1390 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1391 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1392 tcg_temp_free_i32(m3);
1393 gen_set_cc_nz_f64(s, o->in2);
1397 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1399 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1400 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1401 tcg_temp_free_i32(m3);
1402 gen_set_cc_nz_f128(s, o->in1, o->in2);
1406 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1408 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1409 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1410 tcg_temp_free_i32(m3);
1411 gen_set_cc_nz_f32(s, o->in2);
1415 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1417 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1418 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1419 tcg_temp_free_i32(m3);
1420 gen_set_cc_nz_f64(s, o->in2);
1424 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1426 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1427 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1428 tcg_temp_free_i32(m3);
1429 gen_set_cc_nz_f128(s, o->in1, o->in2);
1433 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1435 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1436 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1437 tcg_temp_free_i32(m3);
1441 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1443 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1444 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1445 tcg_temp_free_i32(m3);
1449 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1451 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1452 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1453 tcg_temp_free_i32(m3);
1454 return_low128(o->out2);
1458 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1460 int r2 = get_field(s->fields, r2);
1461 TCGv_i64 len = tcg_temp_new_i64();
1463 potential_page_fault(s);
1464 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1466 return_low128(o->out);
1468 tcg_gen_add_i64(regs[r2], regs[r2], len);
1469 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1470 tcg_temp_free_i64(len);
1475 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1477 int l = get_field(s->fields, l1);
1482 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1483 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1486 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1487 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1490 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1491 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1494 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1495 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1498 potential_page_fault(s);
1499 vl = tcg_const_i32(l);
1500 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1501 tcg_temp_free_i32(vl);
1505 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1509 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1511 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1512 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1513 potential_page_fault(s);
1514 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1515 tcg_temp_free_i32(r1);
1516 tcg_temp_free_i32(r3);
1521 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1523 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1524 TCGv_i32 t1 = tcg_temp_new_i32();
1525 tcg_gen_trunc_i64_i32(t1, o->in1);
1526 potential_page_fault(s);
1527 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1529 tcg_temp_free_i32(t1);
1530 tcg_temp_free_i32(m3);
1534 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1536 potential_page_fault(s);
1537 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1539 return_low128(o->in2);
1543 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1545 int r3 = get_field(s->fields, r3);
1546 potential_page_fault(s);
1547 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1552 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1554 int r3 = get_field(s->fields, r3);
1555 potential_page_fault(s);
1556 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1561 #ifndef CONFIG_USER_ONLY
1562 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1564 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1565 check_privileged(s);
1566 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1567 tcg_temp_free_i32(r1);
1573 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1575 int r3 = get_field(s->fields, r3);
1576 TCGv_i64 in3 = tcg_temp_new_i64();
1577 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1578 potential_page_fault(s);
1579 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1580 tcg_temp_free_i64(in3);
1585 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1587 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1588 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1589 potential_page_fault(s);
1590 /* XXX rewrite in tcg */
1591 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1596 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1598 TCGv_i64 t1 = tcg_temp_new_i64();
1599 TCGv_i32 t2 = tcg_temp_new_i32();
1600 tcg_gen_trunc_i64_i32(t2, o->in1);
1601 gen_helper_cvd(t1, t2);
1602 tcg_temp_free_i32(t2);
1603 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1604 tcg_temp_free_i64(t1);
1608 #ifndef CONFIG_USER_ONLY
1609 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1613 check_privileged(s);
1614 potential_page_fault(s);
1616 /* We pretend the format is RX_a so that D2 is the field we want. */
1617 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1618 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1619 tcg_temp_free_i32(tmp);
1624 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1626 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1627 return_low128(o->out);
1631 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1633 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1634 return_low128(o->out);
1638 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1640 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1641 return_low128(o->out);
1645 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1647 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1648 return_low128(o->out);
1652 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1654 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1658 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1660 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1664 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1666 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1667 return_low128(o->out2);
1671 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1673 int r2 = get_field(s->fields, r2);
1674 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1678 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1680 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1684 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1686 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1687 tb->flags, (ab)use the tb->cs_base field as the address of
1688 the template in memory, and grab 8 bits of tb->flags/cflags for
1689 the contents of the register. We would then recognize all this
1690 in gen_intermediate_code_internal, generating code for exactly
1691 one instruction. This new TB then gets executed normally.
1693 On the other hand, this seems to be mostly used for modifying
1694 MVC inside of memcpy, which needs a helper call anyway. So
1695 perhaps this doesn't bear thinking about any further. */
1702 tmp = tcg_const_i64(s->next_pc);
1703 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
1704 tcg_temp_free_i64(tmp);
1710 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
1712 /* We'll use the original input for cc computation, since we get to
1713 compare that against 0, which ought to be better than comparing
1714 the real output against 64. It also lets cc_dst be a convenient
1715 temporary during our computation. */
1716 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
1718 /* R1 = IN ? CLZ(IN) : 64. */
1719 gen_helper_clz(o->out, o->in2);
1721 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1722 value by 64, which is undefined. But since the shift is 64 iff the
1723 input is zero, we still get the correct result after and'ing. */
1724 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
1725 tcg_gen_shr_i64(o->out2, o->out2, o->out);
1726 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
1730 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
1732 int m3 = get_field(s->fields, m3);
1733 int pos, len, base = s->insn->data;
1734 TCGv_i64 tmp = tcg_temp_new_i64();
1739 /* Effectively a 32-bit load. */
1740 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
1747 /* Effectively a 16-bit load. */
1748 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
1756 /* Effectively an 8-bit load. */
1757 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
1762 pos = base + ctz32(m3) * 8;
1763 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
1764 ccm = ((1ull << len) - 1) << pos;
1768 /* This is going to be a sequence of loads and inserts. */
1769 pos = base + 32 - 8;
1773 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
1774 tcg_gen_addi_i64(o->in2, o->in2, 1);
1775 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
1778 m3 = (m3 << 1) & 0xf;
1784 tcg_gen_movi_i64(tmp, ccm);
1785 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
1786 tcg_temp_free_i64(tmp);
1790 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
1792 int shift = s->insn->data & 0xff;
1793 int size = s->insn->data >> 8;
1794 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
1798 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
1803 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
1805 t1 = tcg_temp_new_i64();
1806 tcg_gen_shli_i64(t1, psw_mask, 20);
1807 tcg_gen_shri_i64(t1, t1, 36);
1808 tcg_gen_or_i64(o->out, o->out, t1);
1810 tcg_gen_extu_i32_i64(t1, cc_op);
1811 tcg_gen_shli_i64(t1, t1, 28);
1812 tcg_gen_or_i64(o->out, o->out, t1);
1813 tcg_temp_free_i64(t1);
1817 #ifndef CONFIG_USER_ONLY
1818 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
1820 check_privileged(s);
1821 gen_helper_ipte(cpu_env, o->in1, o->in2);
1825 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
1827 check_privileged(s);
1828 gen_helper_iske(o->out, cpu_env, o->in2);
1833 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
1835 gen_helper_ldeb(o->out, cpu_env, o->in2);
1839 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
1841 gen_helper_ledb(o->out, cpu_env, o->in2);
1845 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
1847 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
1851 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
1853 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
1857 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
1859 gen_helper_lxdb(o->out, cpu_env, o->in2);
1860 return_low128(o->out2);
1864 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
1866 gen_helper_lxeb(o->out, cpu_env, o->in2);
1867 return_low128(o->out2);
1871 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
1873 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
1877 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
1879 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
1883 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
1885 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
1889 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
1891 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
1895 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
1897 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
1901 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
1903 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
1907 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
1909 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
1913 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
1915 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
1919 #ifndef CONFIG_USER_ONLY
1920 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
1922 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1923 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1924 check_privileged(s);
1925 potential_page_fault(s);
1926 gen_helper_lctl(cpu_env, r1, o->in2, r3);
1927 tcg_temp_free_i32(r1);
1928 tcg_temp_free_i32(r3);
1932 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
1934 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1935 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1936 check_privileged(s);
1937 potential_page_fault(s);
1938 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
1939 tcg_temp_free_i32(r1);
1940 tcg_temp_free_i32(r3);
1943 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
1945 check_privileged(s);
1946 potential_page_fault(s);
1947 gen_helper_lra(o->out, cpu_env, o->in2);
1952 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
1956 check_privileged(s);
1958 t1 = tcg_temp_new_i64();
1959 t2 = tcg_temp_new_i64();
1960 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
1961 tcg_gen_addi_i64(o->in2, o->in2, 4);
1962 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
1963 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
1964 tcg_gen_shli_i64(t1, t1, 32);
1965 gen_helper_load_psw(cpu_env, t1, t2);
1966 tcg_temp_free_i64(t1);
1967 tcg_temp_free_i64(t2);
1968 return EXIT_NORETURN;
1971 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
1975 check_privileged(s);
1977 t1 = tcg_temp_new_i64();
1978 t2 = tcg_temp_new_i64();
1979 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
1980 tcg_gen_addi_i64(o->in2, o->in2, 8);
1981 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
1982 gen_helper_load_psw(cpu_env, t1, t2);
1983 tcg_temp_free_i64(t1);
1984 tcg_temp_free_i64(t2);
1985 return EXIT_NORETURN;
1989 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
1991 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1992 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1993 potential_page_fault(s);
1994 gen_helper_lam(cpu_env, r1, o->in2, r3);
1995 tcg_temp_free_i32(r1);
1996 tcg_temp_free_i32(r3);
2000 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2002 int r1 = get_field(s->fields, r1);
2003 int r3 = get_field(s->fields, r3);
2004 TCGv_i64 t = tcg_temp_new_i64();
2005 TCGv_i64 t4 = tcg_const_i64(4);
2008 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2009 store_reg32_i64(r1, t);
2013 tcg_gen_add_i64(o->in2, o->in2, t4);
2017 tcg_temp_free_i64(t);
2018 tcg_temp_free_i64(t4);
2022 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2024 int r1 = get_field(s->fields, r1);
2025 int r3 = get_field(s->fields, r3);
2026 TCGv_i64 t = tcg_temp_new_i64();
2027 TCGv_i64 t4 = tcg_const_i64(4);
2030 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2031 store_reg32h_i64(r1, t);
2035 tcg_gen_add_i64(o->in2, o->in2, t4);
2039 tcg_temp_free_i64(t);
2040 tcg_temp_free_i64(t4);
2044 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2046 int r1 = get_field(s->fields, r1);
2047 int r3 = get_field(s->fields, r3);
2048 TCGv_i64 t8 = tcg_const_i64(8);
2051 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2055 tcg_gen_add_i64(o->in2, o->in2, t8);
2059 tcg_temp_free_i64(t8);
2063 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2066 o->g_out = o->g_in2;
2067 TCGV_UNUSED_I64(o->in2);
2072 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2076 o->g_out = o->g_in1;
2077 o->g_out2 = o->g_in2;
2078 TCGV_UNUSED_I64(o->in1);
2079 TCGV_UNUSED_I64(o->in2);
2080 o->g_in1 = o->g_in2 = false;
2084 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2086 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2087 potential_page_fault(s);
2088 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2089 tcg_temp_free_i32(l);
2093 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2095 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2096 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2097 potential_page_fault(s);
2098 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2099 tcg_temp_free_i32(r1);
2100 tcg_temp_free_i32(r2);
2105 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2107 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2108 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2109 potential_page_fault(s);
2110 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2111 tcg_temp_free_i32(r1);
2112 tcg_temp_free_i32(r3);
2117 #ifndef CONFIG_USER_ONLY
2118 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2120 int r1 = get_field(s->fields, l1);
2121 check_privileged(s);
2122 potential_page_fault(s);
2123 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2128 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2130 int r1 = get_field(s->fields, l1);
2131 check_privileged(s);
2132 potential_page_fault(s);
2133 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2139 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2141 potential_page_fault(s);
2142 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2147 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2149 potential_page_fault(s);
2150 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2152 return_low128(o->in2);
2156 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2158 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2162 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2164 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2165 return_low128(o->out2);
2169 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2171 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2175 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2177 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2181 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2183 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2187 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2189 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2190 return_low128(o->out2);
2194 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2196 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2197 return_low128(o->out2);
2201 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2203 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2204 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2205 tcg_temp_free_i64(r3);
2209 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2211 int r3 = get_field(s->fields, r3);
2212 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2216 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2218 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2219 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2220 tcg_temp_free_i64(r3);
2224 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2226 int r3 = get_field(s->fields, r3);
2227 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2231 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2233 gen_helper_nabs_i64(o->out, o->in2);
2237 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2239 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2243 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2245 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2249 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2251 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2252 tcg_gen_mov_i64(o->out2, o->in2);
2256 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2258 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2259 potential_page_fault(s);
2260 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2261 tcg_temp_free_i32(l);
2266 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2268 tcg_gen_neg_i64(o->out, o->in2);
2272 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2274 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2278 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2280 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2284 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2286 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2287 tcg_gen_mov_i64(o->out2, o->in2);
2291 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2293 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2294 potential_page_fault(s);
2295 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2296 tcg_temp_free_i32(l);
2301 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2303 tcg_gen_or_i64(o->out, o->in1, o->in2);
2307 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2309 int shift = s->insn->data & 0xff;
2310 int size = s->insn->data >> 8;
2311 uint64_t mask = ((1ull << size) - 1) << shift;
2314 tcg_gen_shli_i64(o->in2, o->in2, shift);
2315 tcg_gen_or_i64(o->out, o->in1, o->in2);
2317 /* Produce the CC from only the bits manipulated. */
2318 tcg_gen_andi_i64(cc_dst, o->out, mask);
2319 set_cc_nz_u64(s, cc_dst);
2323 #ifndef CONFIG_USER_ONLY
2324 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2326 check_privileged(s);
2327 gen_helper_ptlb(cpu_env);
2332 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2334 tcg_gen_bswap16_i64(o->out, o->in2);
2338 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2340 tcg_gen_bswap32_i64(o->out, o->in2);
2344 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2346 tcg_gen_bswap64_i64(o->out, o->in2);
2350 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2352 TCGv_i32 t1 = tcg_temp_new_i32();
2353 TCGv_i32 t2 = tcg_temp_new_i32();
2354 TCGv_i32 to = tcg_temp_new_i32();
2355 tcg_gen_trunc_i64_i32(t1, o->in1);
2356 tcg_gen_trunc_i64_i32(t2, o->in2);
2357 tcg_gen_rotl_i32(to, t1, t2);
2358 tcg_gen_extu_i32_i64(o->out, to);
2359 tcg_temp_free_i32(t1);
2360 tcg_temp_free_i32(t2);
2361 tcg_temp_free_i32(to);
2365 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2367 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2371 #ifndef CONFIG_USER_ONLY
2372 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2374 check_privileged(s);
2375 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2380 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2382 check_privileged(s);
2383 gen_helper_sacf(cpu_env, o->in2);
2384 /* Addressing mode has changed, so end the block. */
2385 return EXIT_PC_STALE;
2389 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2391 int r1 = get_field(s->fields, r1);
2392 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2396 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2398 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2402 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2404 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2408 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2410 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2411 return_low128(o->out2);
2415 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2417 gen_helper_sqeb(o->out, cpu_env, o->in2);
2421 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2423 gen_helper_sqdb(o->out, cpu_env, o->in2);
2427 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2429 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2430 return_low128(o->out2);
2434 #ifndef CONFIG_USER_ONLY
2435 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2437 check_privileged(s);
2438 potential_page_fault(s);
2439 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2444 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2446 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2447 check_privileged(s);
2448 potential_page_fault(s);
2449 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2450 tcg_temp_free_i32(r1);
2455 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2457 uint64_t sign = 1ull << s->insn->data;
2458 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2459 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2460 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2461 /* The arithmetic left shift is curious in that it does not affect
2462 the sign bit. Copy that over from the source unchanged. */
2463 tcg_gen_andi_i64(o->out, o->out, ~sign);
2464 tcg_gen_andi_i64(o->in1, o->in1, sign);
2465 tcg_gen_or_i64(o->out, o->out, o->in1);
2469 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2471 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2475 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2477 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2481 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2483 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2487 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2489 gen_helper_sfpc(cpu_env, o->in2);
2493 #ifndef CONFIG_USER_ONLY
2494 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2496 check_privileged(s);
2497 tcg_gen_shri_i64(o->in2, o->in2, 4);
2498 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2502 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2504 check_privileged(s);
2505 gen_helper_sske(cpu_env, o->in1, o->in2);
2509 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
2511 check_privileged(s);
2512 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
2516 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
2518 check_privileged(s);
2519 /* ??? Surely cpu address != cpu number. In any case the previous
2520 version of this stored more than the required half-word, so it
2521 is unlikely this has ever been tested. */
2522 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2526 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
2528 gen_helper_stck(o->out, cpu_env);
2529 /* ??? We don't implement clock states. */
2530 gen_op_movi_cc(s, 0);
2534 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
2536 TCGv_i64 c1 = tcg_temp_new_i64();
2537 TCGv_i64 c2 = tcg_temp_new_i64();
2538 gen_helper_stck(c1, cpu_env);
2539 /* Shift the 64-bit value into its place as a zero-extended
2540 104-bit value. Note that "bit positions 64-103 are always
2541 non-zero so that they compare differently to STCK"; we set
2542 the least significant bit to 1. */
2543 tcg_gen_shli_i64(c2, c1, 56);
2544 tcg_gen_shri_i64(c1, c1, 8);
2545 tcg_gen_ori_i64(c2, c2, 0x10000);
2546 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
2547 tcg_gen_addi_i64(o->in2, o->in2, 8);
2548 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
2549 tcg_temp_free_i64(c1);
2550 tcg_temp_free_i64(c2);
2551 /* ??? We don't implement clock states. */
2552 gen_op_movi_cc(s, 0);
2556 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
2558 check_privileged(s);
2559 gen_helper_sckc(cpu_env, o->in2);
2563 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
2565 check_privileged(s);
2566 gen_helper_stckc(o->out, cpu_env);
2570 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
2572 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2573 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2574 check_privileged(s);
2575 potential_page_fault(s);
2576 gen_helper_stctg(cpu_env, r1, o->in2, r3);
2577 tcg_temp_free_i32(r1);
2578 tcg_temp_free_i32(r3);
2582 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
2584 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2585 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2586 check_privileged(s);
2587 potential_page_fault(s);
2588 gen_helper_stctl(cpu_env, r1, o->in2, r3);
2589 tcg_temp_free_i32(r1);
2590 tcg_temp_free_i32(r3);
2594 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
2596 check_privileged(s);
2597 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
2601 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
2603 check_privileged(s);
2604 gen_helper_spt(cpu_env, o->in2);
2608 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
2611 /* We really ought to have more complete indication of facilities
2612 that we implement. Address this when STFLE is implemented. */
2613 check_privileged(s);
2614 f = tcg_const_i64(0xc0000000);
2615 a = tcg_const_i64(200);
2616 tcg_gen_qemu_st32(f, a, get_mem_index(s));
2617 tcg_temp_free_i64(f);
2618 tcg_temp_free_i64(a);
2622 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
2624 check_privileged(s);
2625 gen_helper_stpt(o->out, cpu_env);
2629 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
2631 check_privileged(s);
2632 potential_page_fault(s);
2633 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
2638 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
2640 check_privileged(s);
2641 gen_helper_spx(cpu_env, o->in2);
2645 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
2647 check_privileged(s);
2648 /* Not operational. */
2649 gen_op_movi_cc(s, 3);
2653 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
2655 check_privileged(s);
2656 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
2657 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
2661 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
2663 uint64_t i2 = get_field(s->fields, i2);
2666 check_privileged(s);
2668 /* It is important to do what the instruction name says: STORE THEN.
2669 If we let the output hook perform the store then if we fault and
2670 restart, we'll have the wrong SYSTEM MASK in place. */
2671 t = tcg_temp_new_i64();
2672 tcg_gen_shri_i64(t, psw_mask, 56);
2673 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
2674 tcg_temp_free_i64(t);
2676 if (s->fields->op == 0xac) {
2677 tcg_gen_andi_i64(psw_mask, psw_mask,
2678 (i2 << 56) | 0x00ffffffffffffffull);
2680 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
2685 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
2687 check_privileged(s);
2688 potential_page_fault(s);
2689 gen_helper_stura(cpu_env, o->in2, o->in1);
2694 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
2696 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
2700 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
2702 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
2706 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
2708 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
2712 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
2714 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
2718 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
2720 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2721 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2722 potential_page_fault(s);
2723 gen_helper_stam(cpu_env, r1, o->in2, r3);
2724 tcg_temp_free_i32(r1);
2725 tcg_temp_free_i32(r3);
2729 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
2731 int m3 = get_field(s->fields, m3);
2732 int pos, base = s->insn->data;
2733 TCGv_i64 tmp = tcg_temp_new_i64();
2735 pos = base + ctz32(m3) * 8;
2738 /* Effectively a 32-bit store. */
2739 tcg_gen_shri_i64(tmp, o->in1, pos);
2740 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
2746 /* Effectively a 16-bit store. */
2747 tcg_gen_shri_i64(tmp, o->in1, pos);
2748 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
2755 /* Effectively an 8-bit store. */
2756 tcg_gen_shri_i64(tmp, o->in1, pos);
2757 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
2761 /* This is going to be a sequence of shifts and stores. */
2762 pos = base + 32 - 8;
2765 tcg_gen_shri_i64(tmp, o->in1, pos);
2766 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
2767 tcg_gen_addi_i64(o->in2, o->in2, 1);
2769 m3 = (m3 << 1) & 0xf;
2774 tcg_temp_free_i64(tmp);
2778 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
2780 int r1 = get_field(s->fields, r1);
2781 int r3 = get_field(s->fields, r3);
2782 int size = s->insn->data;
2783 TCGv_i64 tsize = tcg_const_i64(size);
2787 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
2789 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
2794 tcg_gen_add_i64(o->in2, o->in2, tsize);
2798 tcg_temp_free_i64(tsize);
2802 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
2804 int r1 = get_field(s->fields, r1);
2805 int r3 = get_field(s->fields, r3);
2806 TCGv_i64 t = tcg_temp_new_i64();
2807 TCGv_i64 t4 = tcg_const_i64(4);
2808 TCGv_i64 t32 = tcg_const_i64(32);
2811 tcg_gen_shl_i64(t, regs[r1], t32);
2812 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
2816 tcg_gen_add_i64(o->in2, o->in2, t4);
2820 tcg_temp_free_i64(t);
2821 tcg_temp_free_i64(t4);
2822 tcg_temp_free_i64(t32);
2826 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
2828 potential_page_fault(s);
2829 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2831 return_low128(o->in2);
2835 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
2837 tcg_gen_sub_i64(o->out, o->in1, o->in2);
2841 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
2846 tcg_gen_not_i64(o->in2, o->in2);
2847 tcg_gen_add_i64(o->out, o->in1, o->in2);
2849 /* XXX possible optimization point */
2851 cc = tcg_temp_new_i64();
2852 tcg_gen_extu_i32_i64(cc, cc_op);
2853 tcg_gen_shri_i64(cc, cc, 1);
2854 tcg_gen_add_i64(o->out, o->out, cc);
2855 tcg_temp_free_i64(cc);
2859 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
2866 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
2867 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
2868 tcg_temp_free_i32(t);
2870 t = tcg_const_i32(s->next_pc - s->pc);
2871 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
2872 tcg_temp_free_i32(t);
2874 gen_exception(EXCP_SVC);
2875 return EXIT_NORETURN;
2878 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
2880 gen_helper_tceb(cc_op, o->in1, o->in2);
2885 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
2887 gen_helper_tcdb(cc_op, o->in1, o->in2);
2892 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
2894 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
2899 #ifndef CONFIG_USER_ONLY
2900 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
2902 potential_page_fault(s);
2903 gen_helper_tprot(cc_op, o->addr1, o->in2);
2909 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
2911 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2912 potential_page_fault(s);
2913 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
2914 tcg_temp_free_i32(l);
2919 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
2921 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2922 potential_page_fault(s);
2923 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
2924 tcg_temp_free_i32(l);
2928 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
2930 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2931 potential_page_fault(s);
2932 gen_helper_xc(cc_op, cpu_env, l, o->addr1, o->in2);
2933 tcg_temp_free_i32(l);
2938 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
2940 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2944 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
2946 int shift = s->insn->data & 0xff;
2947 int size = s->insn->data >> 8;
2948 uint64_t mask = ((1ull << size) - 1) << shift;
2951 tcg_gen_shli_i64(o->in2, o->in2, shift);
2952 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2954 /* Produce the CC from only the bits manipulated. */
2955 tcg_gen_andi_i64(cc_dst, o->out, mask);
2956 set_cc_nz_u64(s, cc_dst);
2960 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
2962 o->out = tcg_const_i64(0);
2966 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
2968 o->out = tcg_const_i64(0);
2974 /* ====================================================================== */
2975 /* The "Cc OUTput" generators. Given the generated output (and in some cases
2976 the original inputs), update the various cc data structures in order to
2977 be able to compute the new condition code. */
2979 static void cout_abs32(DisasContext *s, DisasOps *o)
2981 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
2984 static void cout_abs64(DisasContext *s, DisasOps *o)
2986 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
2989 static void cout_adds32(DisasContext *s, DisasOps *o)
2991 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
2994 static void cout_adds64(DisasContext *s, DisasOps *o)
2996 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
2999 static void cout_addu32(DisasContext *s, DisasOps *o)
3001 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3004 static void cout_addu64(DisasContext *s, DisasOps *o)
3006 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3009 static void cout_addc32(DisasContext *s, DisasOps *o)
3011 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3014 static void cout_addc64(DisasContext *s, DisasOps *o)
3016 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3019 static void cout_cmps32(DisasContext *s, DisasOps *o)
3021 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3024 static void cout_cmps64(DisasContext *s, DisasOps *o)
3026 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3029 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3031 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3034 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3036 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3039 static void cout_f32(DisasContext *s, DisasOps *o)
3041 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3044 static void cout_f64(DisasContext *s, DisasOps *o)
3046 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3049 static void cout_f128(DisasContext *s, DisasOps *o)
3051 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3054 static void cout_nabs32(DisasContext *s, DisasOps *o)
3056 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3059 static void cout_nabs64(DisasContext *s, DisasOps *o)
3061 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3064 static void cout_neg32(DisasContext *s, DisasOps *o)
3066 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3069 static void cout_neg64(DisasContext *s, DisasOps *o)
3071 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3074 static void cout_nz32(DisasContext *s, DisasOps *o)
3076 tcg_gen_ext32u_i64(cc_dst, o->out);
3077 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3080 static void cout_nz64(DisasContext *s, DisasOps *o)
3082 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3085 static void cout_s32(DisasContext *s, DisasOps *o)
3087 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3090 static void cout_s64(DisasContext *s, DisasOps *o)
3092 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3095 static void cout_subs32(DisasContext *s, DisasOps *o)
3097 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3100 static void cout_subs64(DisasContext *s, DisasOps *o)
3102 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3105 static void cout_subu32(DisasContext *s, DisasOps *o)
3107 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3110 static void cout_subu64(DisasContext *s, DisasOps *o)
3112 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3115 static void cout_subb32(DisasContext *s, DisasOps *o)
3117 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3120 static void cout_subb64(DisasContext *s, DisasOps *o)
3122 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3125 static void cout_tm32(DisasContext *s, DisasOps *o)
3127 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3130 static void cout_tm64(DisasContext *s, DisasOps *o)
3132 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3135 /* ====================================================================== */
3136 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3137 with the TCG register to which we will write. Used in combination with
3138 the "wout" generators, in some cases we need a new temporary, and in
3139 some cases we can write to a TCG global. */
3141 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3143 o->out = tcg_temp_new_i64();
3146 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3148 o->out = tcg_temp_new_i64();
3149 o->out2 = tcg_temp_new_i64();
3152 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3154 o->out = regs[get_field(f, r1)];
3158 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3160 /* ??? Specification exception: r1 must be even. */
3161 int r1 = get_field(f, r1);
3163 o->out2 = regs[(r1 + 1) & 15];
3164 o->g_out = o->g_out2 = true;
3167 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3169 o->out = fregs[get_field(f, r1)];
3173 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3175 /* ??? Specification exception: r1 must be < 14. */
3176 int r1 = get_field(f, r1);
3178 o->out2 = fregs[(r1 + 2) & 15];
3179 o->g_out = o->g_out2 = true;
3182 /* ====================================================================== */
3183 /* The "Write OUTput" generators. These generally perform some non-trivial
3184 copy of data to TCG globals, or to main memory. The trivial cases are
3185 generally handled by having a "prep" generator install the TCG global
3186 as the destination of the operation. */
3188 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3190 store_reg(get_field(f, r1), o->out);
3193 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3195 int r1 = get_field(f, r1);
3196 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3199 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3201 int r1 = get_field(f, r1);
3202 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3205 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3207 store_reg32_i64(get_field(f, r1), o->out);
3210 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3212 /* ??? Specification exception: r1 must be even. */
3213 int r1 = get_field(f, r1);
3214 store_reg32_i64(r1, o->out);
3215 store_reg32_i64((r1 + 1) & 15, o->out2);
3218 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3220 /* ??? Specification exception: r1 must be even. */
3221 int r1 = get_field(f, r1);
3222 store_reg32_i64((r1 + 1) & 15, o->out);
3223 tcg_gen_shri_i64(o->out, o->out, 32);
3224 store_reg32_i64(r1, o->out);
3227 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3229 store_freg32_i64(get_field(f, r1), o->out);
3232 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3234 store_freg(get_field(f, r1), o->out);
3237 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3239 /* ??? Specification exception: r1 must be < 14. */
3240 int f1 = get_field(s->fields, r1);
3241 store_freg(f1, o->out);
3242 store_freg((f1 + 2) & 15, o->out2);
3245 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3247 if (get_field(f, r1) != get_field(f, r2)) {
3248 store_reg32_i64(get_field(f, r1), o->out);
3252 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3254 if (get_field(f, r1) != get_field(f, r2)) {
3255 store_freg32_i64(get_field(f, r1), o->out);
3259 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3261 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3264 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3266 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3269 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3271 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3274 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3276 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3279 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3281 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3284 /* ====================================================================== */
3285 /* The "INput 1" generators. These load the first operand to an insn. */
3287 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3289 o->in1 = load_reg(get_field(f, r1));
3292 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3294 o->in1 = regs[get_field(f, r1)];
3298 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3300 o->in1 = tcg_temp_new_i64();
3301 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3304 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3306 o->in1 = tcg_temp_new_i64();
3307 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3310 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3312 o->in1 = tcg_temp_new_i64();
3313 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3316 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3318 /* ??? Specification exception: r1 must be even. */
3319 int r1 = get_field(f, r1);
3320 o->in1 = load_reg((r1 + 1) & 15);
3323 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3325 /* ??? Specification exception: r1 must be even. */
3326 int r1 = get_field(f, r1);
3327 o->in1 = tcg_temp_new_i64();
3328 tcg_gen_ext32s_i64(o->in1, regs[(r1 + 1) & 15]);
3331 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3333 /* ??? Specification exception: r1 must be even. */
3334 int r1 = get_field(f, r1);
3335 o->in1 = tcg_temp_new_i64();
3336 tcg_gen_ext32u_i64(o->in1, regs[(r1 + 1) & 15]);
3339 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3341 /* ??? Specification exception: r1 must be even. */
3342 int r1 = get_field(f, r1);
3343 o->in1 = tcg_temp_new_i64();
3344 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3347 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3349 o->in1 = load_reg(get_field(f, r2));
3352 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3354 o->in1 = load_reg(get_field(f, r3));
3357 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3359 o->in1 = regs[get_field(f, r3)];
3363 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3365 o->in1 = tcg_temp_new_i64();
3366 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3369 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3371 o->in1 = tcg_temp_new_i64();
3372 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3375 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3377 o->in1 = load_freg32_i64(get_field(f, r1));
3380 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3382 o->in1 = fregs[get_field(f, r1)];
3386 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3388 /* ??? Specification exception: r1 must be < 14. */
3389 int r1 = get_field(f, r1);
3391 o->out2 = fregs[(r1 + 2) & 15];
3392 o->g_out = o->g_out2 = true;
3395 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3397 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3400 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3402 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3403 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3406 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3409 o->in1 = tcg_temp_new_i64();
3410 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3413 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3416 o->in1 = tcg_temp_new_i64();
3417 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3420 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3423 o->in1 = tcg_temp_new_i64();
3424 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
3427 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3430 o->in1 = tcg_temp_new_i64();
3431 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
3434 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3437 o->in1 = tcg_temp_new_i64();
3438 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
3441 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3444 o->in1 = tcg_temp_new_i64();
3445 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
3448 /* ====================================================================== */
3449 /* The "INput 2" generators. These load the second operand to an insn. */
3451 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3453 o->in2 = regs[get_field(f, r1)];
3457 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3459 o->in2 = tcg_temp_new_i64();
3460 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
3463 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3465 o->in2 = tcg_temp_new_i64();
3466 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
3469 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3471 o->in2 = load_reg(get_field(f, r2));
3474 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3476 o->in2 = regs[get_field(f, r2)];
3480 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
3482 int r2 = get_field(f, r2);
3484 o->in2 = load_reg(r2);
3488 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
3490 o->in2 = tcg_temp_new_i64();
3491 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
3494 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3496 o->in2 = tcg_temp_new_i64();
3497 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
3500 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3502 o->in2 = tcg_temp_new_i64();
3503 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
3506 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3508 o->in2 = tcg_temp_new_i64();
3509 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
3512 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3514 o->in2 = load_reg(get_field(f, r3));
3517 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3519 o->in2 = tcg_temp_new_i64();
3520 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
3523 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3525 o->in2 = tcg_temp_new_i64();
3526 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
3529 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
3531 o->in2 = load_freg32_i64(get_field(f, r2));
3534 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3536 o->in2 = fregs[get_field(f, r2)];
3540 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
3542 /* ??? Specification exception: r1 must be < 14. */
3543 int r2 = get_field(f, r2);
3545 o->in2 = fregs[(r2 + 2) & 15];
3546 o->g_in1 = o->g_in2 = true;
3549 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
3551 o->in2 = get_address(s, 0, get_field(f, r2), 0);
3554 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
3556 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3557 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3560 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
3562 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
3565 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
3567 help_l2_shift(s, f, o, 31);
3570 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
3572 help_l2_shift(s, f, o, 63);
3575 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3578 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
3581 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3584 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
3587 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3590 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3593 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3596 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3599 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3602 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3605 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3608 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3611 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3614 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
3617 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3620 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
3623 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3626 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
3629 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
3632 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
3635 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
3637 o->in2 = tcg_const_i64(get_field(f, i2));
3640 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3642 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
3645 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3647 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
3650 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3652 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
3655 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3657 uint64_t i2 = (uint16_t)get_field(f, i2);
3658 o->in2 = tcg_const_i64(i2 << s->insn->data);
3661 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
3663 uint64_t i2 = (uint32_t)get_field(f, i2);
3664 o->in2 = tcg_const_i64(i2 << s->insn->data);
3667 /* ====================================================================== */
3669 /* Find opc within the table of insns. This is formulated as a switch
3670 statement so that (1) we get compile-time notice of cut-paste errors
3671 for duplicated opcodes, and (2) the compiler generates the binary
3672 search tree, rather than us having to post-process the table. */
3674 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
3675 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
3677 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
3679 enum DisasInsnEnum {
3680 #include "insn-data.def"
3684 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
3689 .help_in1 = in1_##I1, \
3690 .help_in2 = in2_##I2, \
3691 .help_prep = prep_##P, \
3692 .help_wout = wout_##W, \
3693 .help_cout = cout_##CC, \
3694 .help_op = op_##OP, \
3698 /* Allow 0 to be used for NULL in the table below. */
3706 static const DisasInsn insn_info[] = {
3707 #include "insn-data.def"
3711 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
3712 case OPC: return &insn_info[insn_ ## NM];
3714 static const DisasInsn *lookup_opc(uint16_t opc)
3717 #include "insn-data.def"
3726 /* Extract a field from the insn. The INSN should be left-aligned in
3727 the uint64_t so that we can more easily utilize the big-bit-endian
3728 definitions we extract from the Principals of Operation. */
3730 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
3738 /* Zero extract the field from the insn. */
3739 r = (insn << f->beg) >> (64 - f->size);
3741 /* Sign-extend, or un-swap the field as necessary. */
3743 case 0: /* unsigned */
3745 case 1: /* signed */
3746 assert(f->size <= 32);
3747 m = 1u << (f->size - 1);
3750 case 2: /* dl+dh split, signed 20 bit. */
3751 r = ((int8_t)r << 12) | (r >> 8);
3757 /* Validate that the "compressed" encoding we selected above is valid.
3758 I.e. we havn't make two different original fields overlap. */
3759 assert(((o->presentC >> f->indexC) & 1) == 0);
3760 o->presentC |= 1 << f->indexC;
3761 o->presentO |= 1 << f->indexO;
3763 o->c[f->indexC] = r;
3766 /* Lookup the insn at the current PC, extracting the operands into O and
3767 returning the info struct for the insn. Returns NULL for invalid insn. */
3769 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
3772 uint64_t insn, pc = s->pc;
3774 const DisasInsn *info;
3776 insn = ld_code2(env, pc);
3777 op = (insn >> 8) & 0xff;
3778 ilen = get_ilen(op);
3779 s->next_pc = s->pc + ilen;
3786 insn = ld_code4(env, pc) << 32;
3789 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
3795 /* We can't actually determine the insn format until we've looked up
3796 the full insn opcode. Which we can't do without locating the
3797 secondary opcode. Assume by default that OP2 is at bit 40; for
3798 those smaller insns that don't actually have a secondary opcode
3799 this will correctly result in OP2 = 0. */
3805 case 0xb2: /* S, RRF, RRE */
3806 case 0xb3: /* RRE, RRD, RRF */
3807 case 0xb9: /* RRE, RRF */
3808 case 0xe5: /* SSE, SIL */
3809 op2 = (insn << 8) >> 56;
3813 case 0xc0: /* RIL */
3814 case 0xc2: /* RIL */
3815 case 0xc4: /* RIL */
3816 case 0xc6: /* RIL */
3817 case 0xc8: /* SSF */
3818 case 0xcc: /* RIL */
3819 op2 = (insn << 12) >> 60;
3821 case 0xd0 ... 0xdf: /* SS */
3827 case 0xee ... 0xf3: /* SS */
3828 case 0xf8 ... 0xfd: /* SS */
3832 op2 = (insn << 40) >> 56;
3836 memset(f, 0, sizeof(*f));
3840 /* Lookup the instruction. */
3841 info = lookup_opc(op << 8 | op2);
3843 /* If we found it, extract the operands. */
3845 DisasFormat fmt = info->fmt;
3848 for (i = 0; i < NUM_C_FIELD; ++i) {
3849 extract_field(f, &format_info[fmt].op[i], insn);
3855 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
3857 const DisasInsn *insn;
3858 ExitStatus ret = NO_EXIT;
3862 /* Search for the insn in the table. */
3863 insn = extract_insn(env, s, &f);
3865 /* Not found means unimplemented/illegal opcode. */
3867 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
3869 gen_illegal_opcode(s);
3870 return EXIT_NORETURN;
3873 /* Set up the strutures we use to communicate with the helpers. */
3876 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
3877 TCGV_UNUSED_I64(o.out);
3878 TCGV_UNUSED_I64(o.out2);
3879 TCGV_UNUSED_I64(o.in1);
3880 TCGV_UNUSED_I64(o.in2);
3881 TCGV_UNUSED_I64(o.addr1);
3883 /* Implement the instruction. */
3884 if (insn->help_in1) {
3885 insn->help_in1(s, &f, &o);
3887 if (insn->help_in2) {
3888 insn->help_in2(s, &f, &o);
3890 if (insn->help_prep) {
3891 insn->help_prep(s, &f, &o);
3893 if (insn->help_op) {
3894 ret = insn->help_op(s, &o);
3896 if (insn->help_wout) {
3897 insn->help_wout(s, &f, &o);
3899 if (insn->help_cout) {
3900 insn->help_cout(s, &o);
3903 /* Free any temporaries created by the helpers. */
3904 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
3905 tcg_temp_free_i64(o.out);
3907 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
3908 tcg_temp_free_i64(o.out2);
3910 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
3911 tcg_temp_free_i64(o.in1);
3913 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
3914 tcg_temp_free_i64(o.in2);
3916 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
3917 tcg_temp_free_i64(o.addr1);
3920 /* Advance to the next instruction. */
3925 static inline void gen_intermediate_code_internal(CPUS390XState *env,
3926 TranslationBlock *tb,
3930 target_ulong pc_start;
3931 uint64_t next_page_start;
3932 uint16_t *gen_opc_end;
3934 int num_insns, max_insns;
3942 if (!(tb->flags & FLAG_MASK_64)) {
3943 pc_start &= 0x7fffffff;
3948 dc.cc_op = CC_OP_DYNAMIC;
3949 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
3950 dc.is_jmp = DISAS_NEXT;
3952 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
3954 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3957 max_insns = tb->cflags & CF_COUNT_MASK;
3958 if (max_insns == 0) {
3959 max_insns = CF_COUNT_MASK;
3966 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
3970 tcg_ctx.gen_opc_instr_start[lj++] = 0;
3973 tcg_ctx.gen_opc_pc[lj] = dc.pc;
3974 gen_opc_cc_op[lj] = dc.cc_op;
3975 tcg_ctx.gen_opc_instr_start[lj] = 1;
3976 tcg_ctx.gen_opc_icount[lj] = num_insns;
3978 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
3982 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
3983 tcg_gen_debug_insn_start(dc.pc);
3987 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
3988 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
3989 if (bp->pc == dc.pc) {
3990 status = EXIT_PC_STALE;
3996 if (status == NO_EXIT) {
3997 status = translate_one(env, &dc);
4000 /* If we reach a page boundary, are single stepping,
4001 or exhaust instruction count, stop generation. */
4002 if (status == NO_EXIT
4003 && (dc.pc >= next_page_start
4004 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4005 || num_insns >= max_insns
4007 || env->singlestep_enabled)) {
4008 status = EXIT_PC_STALE;
4010 } while (status == NO_EXIT);
4012 if (tb->cflags & CF_LAST_IO) {
4021 update_psw_addr(&dc);
4023 case EXIT_PC_UPDATED:
4024 if (singlestep && dc.cc_op != CC_OP_DYNAMIC) {
4025 gen_op_calc_cc(&dc);
4027 /* Next TB starts off with CC_OP_DYNAMIC,
4028 so make sure the cc op type is in env */
4029 gen_op_set_cc_op(&dc);
4032 gen_exception(EXCP_DEBUG);
4034 /* Generate the return instruction */
4042 gen_icount_end(tb, num_insns);
4043 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4045 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4048 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4051 tb->size = dc.pc - pc_start;
4052 tb->icount = num_insns;
4055 #if defined(S390X_DEBUG_DISAS)
4056 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4057 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4058 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4064 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4066 gen_intermediate_code_internal(env, tb, 0);
4069 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4071 gen_intermediate_code_internal(env, tb, 1);
4074 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4077 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4078 cc_op = gen_opc_cc_op[pc_pos];
4079 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {