4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
28 # define LOG_DISAS(...) do { } while (0)
32 #include "disas/disas.h"
35 #include "qemu/host-utils.h"
37 /* global register indexes */
38 static TCGv_ptr cpu_env;
40 #include "exec/gen-icount.h"
46 /* Information that (most) every instruction needs to manipulate. */
47 typedef struct DisasContext DisasContext;
48 typedef struct DisasInsn DisasInsn;
49 typedef struct DisasFields DisasFields;
52 struct TranslationBlock *tb;
53 const DisasInsn *insn;
57 bool singlestep_enabled;
60 /* Information carried about a condition to be evaluated. */
67 struct { TCGv_i64 a, b; } s64;
68 struct { TCGv_i32 a, b; } s32;
74 #ifdef DEBUG_INLINE_BRANCHES
75 static uint64_t inline_branch_hit[CC_OP_MAX];
76 static uint64_t inline_branch_miss[CC_OP_MAX];
79 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
81 if (!(s->tb->flags & FLAG_MASK_64)) {
82 if (s->tb->flags & FLAG_MASK_32) {
83 return pc | 0x80000000;
89 void cpu_dump_state(CPUS390XState *env, FILE *f, fprintf_function cpu_fprintf,
95 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
96 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
98 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
99 env->psw.mask, env->psw.addr, env->cc_op);
102 for (i = 0; i < 16; i++) {
103 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
105 cpu_fprintf(f, "\n");
111 for (i = 0; i < 16; i++) {
112 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
114 cpu_fprintf(f, "\n");
120 #ifndef CONFIG_USER_ONLY
121 for (i = 0; i < 16; i++) {
122 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
124 cpu_fprintf(f, "\n");
131 #ifdef DEBUG_INLINE_BRANCHES
132 for (i = 0; i < CC_OP_MAX; i++) {
133 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
134 inline_branch_miss[i], inline_branch_hit[i]);
138 cpu_fprintf(f, "\n");
141 static TCGv_i64 psw_addr;
142 static TCGv_i64 psw_mask;
144 static TCGv_i32 cc_op;
145 static TCGv_i64 cc_src;
146 static TCGv_i64 cc_dst;
147 static TCGv_i64 cc_vr;
149 static char cpu_reg_names[32][4];
150 static TCGv_i64 regs[16];
151 static TCGv_i64 fregs[16];
153 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
155 void s390x_translate_init(void)
159 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
160 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
161 offsetof(CPUS390XState, psw.addr),
163 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
164 offsetof(CPUS390XState, psw.mask),
167 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
169 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
171 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
173 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
176 for (i = 0; i < 16; i++) {
177 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
178 regs[i] = tcg_global_mem_new(TCG_AREG0,
179 offsetof(CPUS390XState, regs[i]),
183 for (i = 0; i < 16; i++) {
184 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
185 fregs[i] = tcg_global_mem_new(TCG_AREG0,
186 offsetof(CPUS390XState, fregs[i].d),
187 cpu_reg_names[i + 16]);
190 /* register helpers */
195 static TCGv_i64 load_reg(int reg)
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
202 static TCGv_i64 load_freg32_i64(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
209 static void store_reg(int reg, TCGv_i64 v)
211 tcg_gen_mov_i64(regs[reg], v);
214 static void store_freg(int reg, TCGv_i64 v)
216 tcg_gen_mov_i64(fregs[reg], v);
219 static void store_reg32_i64(int reg, TCGv_i64 v)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
230 static void store_freg32_i64(int reg, TCGv_i64 v)
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
235 static void return_low128(TCGv_i64 dest)
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
240 static void update_psw_addr(DisasContext *s)
243 tcg_gen_movi_i64(psw_addr, s->pc);
246 static void update_cc_op(DisasContext *s)
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
253 static void potential_page_fault(DisasContext *s)
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
261 return (uint64_t)cpu_lduw_code(env, pc);
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
269 static inline uint64_t ld_code6(CPUS390XState *env, uint64_t pc)
271 return (ld_code2(env, pc) << 32) | ld_code4(env, pc + 2);
274 static int get_mem_index(DisasContext *s)
276 switch (s->tb->flags & FLAG_MASK_ASC) {
277 case PSW_ASC_PRIMARY >> 32:
279 case PSW_ASC_SECONDARY >> 32:
281 case PSW_ASC_HOME >> 32:
289 static void gen_exception(int excp)
291 TCGv_i32 tmp = tcg_const_i32(excp);
292 gen_helper_exception(cpu_env, tmp);
293 tcg_temp_free_i32(tmp);
296 static void gen_program_exception(DisasContext *s, int code)
300 /* Remember what pgm exeption this was. */
301 tmp = tcg_const_i32(code);
302 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
303 tcg_temp_free_i32(tmp);
305 tmp = tcg_const_i32(s->next_pc - s->pc);
306 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
307 tcg_temp_free_i32(tmp);
309 /* Advance past instruction. */
316 /* Trigger exception. */
317 gen_exception(EXCP_PGM);
320 static inline void gen_illegal_opcode(DisasContext *s)
322 gen_program_exception(s, PGM_SPECIFICATION);
325 static inline void check_privileged(DisasContext *s)
327 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
328 gen_program_exception(s, PGM_PRIVILEGED);
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
336 /* 31-bitify the immediate part; register contents are dealt with below */
337 if (!(s->tb->flags & FLAG_MASK_64)) {
343 tmp = tcg_const_i64(d2);
344 tcg_gen_add_i64(tmp, tmp, regs[x2]);
349 tcg_gen_add_i64(tmp, tmp, regs[b2]);
353 tmp = tcg_const_i64(d2);
354 tcg_gen_add_i64(tmp, tmp, regs[b2]);
359 tmp = tcg_const_i64(d2);
362 /* 31-bit mode mask if there are values loaded from registers */
363 if (!(s->tb->flags & FLAG_MASK_64) && (x2 || b2)) {
364 tcg_gen_andi_i64(tmp, tmp, 0x7fffffffUL);
370 static inline bool live_cc_data(DisasContext *s)
372 return (s->cc_op != CC_OP_DYNAMIC
373 && s->cc_op != CC_OP_STATIC
377 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
379 if (live_cc_data(s)) {
380 tcg_gen_discard_i64(cc_src);
381 tcg_gen_discard_i64(cc_dst);
382 tcg_gen_discard_i64(cc_vr);
384 s->cc_op = CC_OP_CONST0 + val;
387 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_src);
391 tcg_gen_discard_i64(cc_vr);
393 tcg_gen_mov_i64(cc_dst, dst);
397 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
400 if (live_cc_data(s)) {
401 tcg_gen_discard_i64(cc_vr);
403 tcg_gen_mov_i64(cc_src, src);
404 tcg_gen_mov_i64(cc_dst, dst);
408 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
409 TCGv_i64 dst, TCGv_i64 vr)
411 tcg_gen_mov_i64(cc_src, src);
412 tcg_gen_mov_i64(cc_dst, dst);
413 tcg_gen_mov_i64(cc_vr, vr);
417 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
419 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
422 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
424 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
427 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
429 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
432 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
434 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
437 /* CC value is in env->cc_op */
438 static void set_cc_static(DisasContext *s)
440 if (live_cc_data(s)) {
441 tcg_gen_discard_i64(cc_src);
442 tcg_gen_discard_i64(cc_dst);
443 tcg_gen_discard_i64(cc_vr);
445 s->cc_op = CC_OP_STATIC;
448 /* calculates cc into cc_op */
449 static void gen_op_calc_cc(DisasContext *s)
451 TCGv_i32 local_cc_op;
454 TCGV_UNUSED_I32(local_cc_op);
455 TCGV_UNUSED_I64(dummy);
458 dummy = tcg_const_i64(0);
472 local_cc_op = tcg_const_i32(s->cc_op);
488 /* s->cc_op is the cc value */
489 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
492 /* env->cc_op already is the cc value */
507 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
512 case CC_OP_LTUGTU_32:
513 case CC_OP_LTUGTU_64:
520 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
535 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
538 /* unknown operation - assume 3 arguments and cc_op in env */
539 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
545 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
546 tcg_temp_free_i32(local_cc_op);
548 if (!TCGV_IS_UNUSED_I64(dummy)) {
549 tcg_temp_free_i64(dummy);
552 /* We now have cc in cc_op as constant */
556 static int use_goto_tb(DisasContext *s, uint64_t dest)
558 /* NOTE: we handle the case where the TB spans two pages here */
559 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
560 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
561 && !s->singlestep_enabled
562 && !(s->tb->cflags & CF_LAST_IO));
565 static void account_noninline_branch(DisasContext *s, int cc_op)
567 #ifdef DEBUG_INLINE_BRANCHES
568 inline_branch_miss[cc_op]++;
572 static void account_inline_branch(DisasContext *s, int cc_op)
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_hit[cc_op]++;
579 /* Table of mask values to comparison codes, given a comparison as input.
580 For a true comparison CC=3 will never be set, but we treat this
581 conservatively for possible use when CC=3 indicates overflow. */
582 static const TCGCond ltgt_cond[16] = {
583 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
584 TCG_COND_GT, TCG_COND_NEVER, /* | | GT | x */
585 TCG_COND_LT, TCG_COND_NEVER, /* | LT | | x */
586 TCG_COND_NE, TCG_COND_NEVER, /* | LT | GT | x */
587 TCG_COND_EQ, TCG_COND_NEVER, /* EQ | | | x */
588 TCG_COND_GE, TCG_COND_NEVER, /* EQ | | GT | x */
589 TCG_COND_LE, TCG_COND_NEVER, /* EQ | LT | | x */
590 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
593 /* Table of mask values to comparison codes, given a logic op as input.
594 For such, only CC=0 and CC=1 should be possible. */
595 static const TCGCond nz_cond[16] = {
597 TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER, TCG_COND_NEVER,
599 TCG_COND_NE, TCG_COND_NE, TCG_COND_NE, TCG_COND_NE,
601 TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ, TCG_COND_EQ,
602 /* EQ | NE | x | x */
603 TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS, TCG_COND_ALWAYS,
606 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
607 details required to generate a TCG comparison. */
608 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
611 enum cc_op old_cc_op = s->cc_op;
613 if (mask == 15 || mask == 0) {
614 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
617 c->g1 = c->g2 = true;
622 /* Find the TCG condition for the mask + cc op. */
628 cond = ltgt_cond[mask];
629 if (cond == TCG_COND_NEVER) {
632 account_inline_branch(s, old_cc_op);
635 case CC_OP_LTUGTU_32:
636 case CC_OP_LTUGTU_64:
637 cond = tcg_unsigned_cond(ltgt_cond[mask]);
638 if (cond == TCG_COND_NEVER) {
641 account_inline_branch(s, old_cc_op);
645 cond = nz_cond[mask];
646 if (cond == TCG_COND_NEVER) {
649 account_inline_branch(s, old_cc_op);
664 account_inline_branch(s, old_cc_op);
679 account_inline_branch(s, old_cc_op);
683 switch (mask & 0xa) {
684 case 8: /* src == 0 -> no one bit found */
687 case 2: /* src != 0 -> one bit found */
693 account_inline_branch(s, old_cc_op);
698 /* Calculate cc value. */
703 /* Jump based on CC. We'll load up the real cond below;
704 the assignment here merely avoids a compiler warning. */
705 account_noninline_branch(s, old_cc_op);
706 old_cc_op = CC_OP_STATIC;
707 cond = TCG_COND_NEVER;
711 /* Load up the arguments of the comparison. */
713 c->g1 = c->g2 = false;
717 c->u.s32.a = tcg_temp_new_i32();
718 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
719 c->u.s32.b = tcg_const_i32(0);
722 case CC_OP_LTUGTU_32:
724 c->u.s32.a = tcg_temp_new_i32();
725 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
726 c->u.s32.b = tcg_temp_new_i32();
727 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
734 c->u.s64.b = tcg_const_i64(0);
738 case CC_OP_LTUGTU_64:
741 c->g1 = c->g2 = true;
747 c->u.s64.a = tcg_temp_new_i64();
748 c->u.s64.b = tcg_const_i64(0);
749 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
757 case 0x8 | 0x4 | 0x2: /* cc != 3 */
759 c->u.s32.b = tcg_const_i32(3);
761 case 0x8 | 0x4 | 0x1: /* cc != 2 */
763 c->u.s32.b = tcg_const_i32(2);
765 case 0x8 | 0x2 | 0x1: /* cc != 1 */
767 c->u.s32.b = tcg_const_i32(1);
769 case 0x8 | 0x2: /* cc == 0 ||Â cc == 2 => (cc & 1) == 0 */
772 c->u.s32.a = tcg_temp_new_i32();
773 c->u.s32.b = tcg_const_i32(0);
774 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
776 case 0x8 | 0x4: /* cc < 2 */
778 c->u.s32.b = tcg_const_i32(2);
780 case 0x8: /* cc == 0 */
782 c->u.s32.b = tcg_const_i32(0);
784 case 0x4 | 0x2 | 0x1: /* cc != 0 */
786 c->u.s32.b = tcg_const_i32(0);
788 case 0x4 | 0x1: /* cc == 1 ||Â cc == 3 => (cc & 1) != 0 */
791 c->u.s32.a = tcg_temp_new_i32();
792 c->u.s32.b = tcg_const_i32(0);
793 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
795 case 0x4: /* cc == 1 */
797 c->u.s32.b = tcg_const_i32(1);
799 case 0x2 | 0x1: /* cc > 1 */
801 c->u.s32.b = tcg_const_i32(1);
803 case 0x2: /* cc == 2 */
805 c->u.s32.b = tcg_const_i32(2);
807 case 0x1: /* cc == 3 */
809 c->u.s32.b = tcg_const_i32(3);
812 /* CC is masked by something else: (8 >> cc) & mask. */
815 c->u.s32.a = tcg_const_i32(8);
816 c->u.s32.b = tcg_const_i32(0);
817 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
818 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
829 static void free_compare(DisasCompare *c)
833 tcg_temp_free_i64(c->u.s64.a);
835 tcg_temp_free_i32(c->u.s32.a);
840 tcg_temp_free_i64(c->u.s64.b);
842 tcg_temp_free_i32(c->u.s32.b);
847 /* ====================================================================== */
848 /* Define the insn format enumeration. */
849 #define F0(N) FMT_##N,
850 #define F1(N, X1) F0(N)
851 #define F2(N, X1, X2) F0(N)
852 #define F3(N, X1, X2, X3) F0(N)
853 #define F4(N, X1, X2, X3, X4) F0(N)
854 #define F5(N, X1, X2, X3, X4, X5) F0(N)
857 #include "insn-format.def"
867 /* Define a structure to hold the decoded fields. We'll store each inside
868 an array indexed by an enum. In order to conserve memory, we'll arrange
869 for fields that do not exist at the same time to overlap, thus the "C"
870 for compact. For checking purposes there is an "O" for original index
871 as well that will be applied to availability bitmaps. */
873 enum DisasFieldIndexO {
896 enum DisasFieldIndexC {
930 unsigned presentC:16;
931 unsigned int presentO;
935 /* This is the way fields are to be accessed out of DisasFields. */
936 #define have_field(S, F) have_field1((S), FLD_O_##F)
937 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
939 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
941 return (f->presentO >> c) & 1;
944 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
945 enum DisasFieldIndexC c)
947 assert(have_field1(f, o));
951 /* Describe the layout of each field in each format. */
952 typedef struct DisasField {
956 unsigned int indexC:6;
957 enum DisasFieldIndexO indexO:8;
960 typedef struct DisasFormatInfo {
961 DisasField op[NUM_C_FIELD];
964 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
965 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
966 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
967 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
968 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
969 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
970 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
971 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
972 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
973 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
974 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
975 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
976 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
977 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
979 #define F0(N) { { } },
980 #define F1(N, X1) { { X1 } },
981 #define F2(N, X1, X2) { { X1, X2 } },
982 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
983 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
984 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
986 static const DisasFormatInfo format_info[] = {
987 #include "insn-format.def"
1005 /* Generally, we'll extract operands into this structures, operate upon
1006 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1007 of routines below for more details. */
1009 bool g_out, g_out2, g_in1, g_in2;
1010 TCGv_i64 out, out2, in1, in2;
1014 /* Instructions can place constraints on their operands, raising specification
1015 exceptions if they are violated. To make this easy to automate, each "in1",
1016 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1017 of the following, or 0. To make this easy to document, we'll put the
1018 SPEC_<name> defines next to <name>. */
1020 #define SPEC_r1_even 1
1021 #define SPEC_r2_even 2
1022 #define SPEC_r1_f128 4
1023 #define SPEC_r2_f128 8
1025 /* Return values from translate_one, indicating the state of the TB. */
1027 /* Continue the TB. */
1029 /* We have emitted one or more goto_tb. No fixup required. */
1031 /* We are not using a goto_tb (for whatever reason), but have updated
1032 the PC (for whatever reason), so there's no need to do it again on
1035 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1036 updated the PC for the next instruction to be executed. */
1038 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1039 No following code will be executed. */
1043 typedef enum DisasFacility {
1044 FAC_Z, /* zarch (default) */
1045 FAC_CASS, /* compare and swap and store */
1046 FAC_CASS2, /* compare and swap and store 2*/
1047 FAC_DFP, /* decimal floating point */
1048 FAC_DFPR, /* decimal floating point rounding */
1049 FAC_DO, /* distinct operands */
1050 FAC_EE, /* execute extensions */
1051 FAC_EI, /* extended immediate */
1052 FAC_FPE, /* floating point extension */
1053 FAC_FPSSH, /* floating point support sign handling */
1054 FAC_FPRGR, /* FPR-GR transfer */
1055 FAC_GIE, /* general instructions extension */
1056 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1057 FAC_HW, /* high-word */
1058 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1059 FAC_LOC, /* load/store on condition */
1060 FAC_LD, /* long displacement */
1061 FAC_PC, /* population count */
1062 FAC_SCF, /* store clock fast */
1063 FAC_SFLE, /* store facility list extended */
1069 DisasFacility fac:6;
1074 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1075 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1076 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1077 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1078 void (*help_cout)(DisasContext *, DisasOps *);
1079 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1084 /* ====================================================================== */
1085 /* Miscelaneous helpers, used by several operations. */
1087 static void help_l2_shift(DisasContext *s, DisasFields *f,
1088 DisasOps *o, int mask)
1090 int b2 = get_field(f, b2);
1091 int d2 = get_field(f, d2);
1094 o->in2 = tcg_const_i64(d2 & mask);
1096 o->in2 = get_address(s, 0, b2, d2);
1097 tcg_gen_andi_i64(o->in2, o->in2, mask);
1101 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1103 if (dest == s->next_pc) {
1106 if (use_goto_tb(s, dest)) {
1109 tcg_gen_movi_i64(psw_addr, dest);
1110 tcg_gen_exit_tb((tcg_target_long)s->tb);
1111 return EXIT_GOTO_TB;
1113 tcg_gen_movi_i64(psw_addr, dest);
1114 return EXIT_PC_UPDATED;
1118 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1119 bool is_imm, int imm, TCGv_i64 cdest)
1122 uint64_t dest = s->pc + 2 * imm;
1125 /* Take care of the special cases first. */
1126 if (c->cond == TCG_COND_NEVER) {
1131 if (dest == s->next_pc) {
1132 /* Branch to next. */
1136 if (c->cond == TCG_COND_ALWAYS) {
1137 ret = help_goto_direct(s, dest);
1141 if (TCGV_IS_UNUSED_I64(cdest)) {
1142 /* E.g. bcr %r0 -> no branch. */
1146 if (c->cond == TCG_COND_ALWAYS) {
1147 tcg_gen_mov_i64(psw_addr, cdest);
1148 ret = EXIT_PC_UPDATED;
1153 if (use_goto_tb(s, s->next_pc)) {
1154 if (is_imm && use_goto_tb(s, dest)) {
1155 /* Both exits can use goto_tb. */
1158 lab = gen_new_label();
1160 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1162 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1165 /* Branch not taken. */
1167 tcg_gen_movi_i64(psw_addr, s->next_pc);
1168 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1173 tcg_gen_movi_i64(psw_addr, dest);
1174 tcg_gen_exit_tb((tcg_target_long)s->tb + 1);
1178 /* Fallthru can use goto_tb, but taken branch cannot. */
1179 /* Store taken branch destination before the brcond. This
1180 avoids having to allocate a new local temp to hold it.
1181 We'll overwrite this in the not taken case anyway. */
1183 tcg_gen_mov_i64(psw_addr, cdest);
1186 lab = gen_new_label();
1188 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1190 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1193 /* Branch not taken. */
1196 tcg_gen_movi_i64(psw_addr, s->next_pc);
1197 tcg_gen_exit_tb((tcg_target_long)s->tb + 0);
1201 tcg_gen_movi_i64(psw_addr, dest);
1203 ret = EXIT_PC_UPDATED;
1206 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1207 Most commonly we're single-stepping or some other condition that
1208 disables all use of goto_tb. Just update the PC and exit. */
1210 TCGv_i64 next = tcg_const_i64(s->next_pc);
1212 cdest = tcg_const_i64(dest);
1216 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1219 TCGv_i32 t0 = tcg_temp_new_i32();
1220 TCGv_i64 t1 = tcg_temp_new_i64();
1221 TCGv_i64 z = tcg_const_i64(0);
1222 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1223 tcg_gen_extu_i32_i64(t1, t0);
1224 tcg_temp_free_i32(t0);
1225 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1226 tcg_temp_free_i64(t1);
1227 tcg_temp_free_i64(z);
1231 tcg_temp_free_i64(cdest);
1233 tcg_temp_free_i64(next);
1235 ret = EXIT_PC_UPDATED;
1243 /* ====================================================================== */
1244 /* The operations. These perform the bulk of the work for any insn,
1245 usually after the operands have been loaded and output initialized. */
1247 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1249 gen_helper_abs_i64(o->out, o->in2);
1253 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1255 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1259 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1261 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1265 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1267 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1268 tcg_gen_mov_i64(o->out2, o->in2);
1272 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1274 tcg_gen_add_i64(o->out, o->in1, o->in2);
1278 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1282 tcg_gen_add_i64(o->out, o->in1, o->in2);
1284 /* XXX possible optimization point */
1286 cc = tcg_temp_new_i64();
1287 tcg_gen_extu_i32_i64(cc, cc_op);
1288 tcg_gen_shri_i64(cc, cc, 1);
1290 tcg_gen_add_i64(o->out, o->out, cc);
1291 tcg_temp_free_i64(cc);
1295 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1297 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1301 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1303 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1307 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1309 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1310 return_low128(o->out2);
1314 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1316 tcg_gen_and_i64(o->out, o->in1, o->in2);
1320 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1322 int shift = s->insn->data & 0xff;
1323 int size = s->insn->data >> 8;
1324 uint64_t mask = ((1ull << size) - 1) << shift;
1327 tcg_gen_shli_i64(o->in2, o->in2, shift);
1328 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1329 tcg_gen_and_i64(o->out, o->in1, o->in2);
1331 /* Produce the CC from only the bits manipulated. */
1332 tcg_gen_andi_i64(cc_dst, o->out, mask);
1333 set_cc_nz_u64(s, cc_dst);
1337 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1339 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1340 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1341 tcg_gen_mov_i64(psw_addr, o->in2);
1342 return EXIT_PC_UPDATED;
1348 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1350 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1351 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1354 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1356 int m1 = get_field(s->fields, m1);
1357 bool is_imm = have_field(s->fields, i2);
1358 int imm = is_imm ? get_field(s->fields, i2) : 0;
1361 disas_jcc(s, &c, m1);
1362 return help_branch(s, &c, is_imm, imm, o->in2);
1365 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1367 int r1 = get_field(s->fields, r1);
1368 bool is_imm = have_field(s->fields, i2);
1369 int imm = is_imm ? get_field(s->fields, i2) : 0;
1373 c.cond = TCG_COND_NE;
1378 t = tcg_temp_new_i64();
1379 tcg_gen_subi_i64(t, regs[r1], 1);
1380 store_reg32_i64(r1, t);
1381 c.u.s32.a = tcg_temp_new_i32();
1382 c.u.s32.b = tcg_const_i32(0);
1383 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1384 tcg_temp_free_i64(t);
1386 return help_branch(s, &c, is_imm, imm, o->in2);
1389 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1391 int r1 = get_field(s->fields, r1);
1392 bool is_imm = have_field(s->fields, i2);
1393 int imm = is_imm ? get_field(s->fields, i2) : 0;
1396 c.cond = TCG_COND_NE;
1401 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1402 c.u.s64.a = regs[r1];
1403 c.u.s64.b = tcg_const_i64(0);
1405 return help_branch(s, &c, is_imm, imm, o->in2);
1408 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1410 int r1 = get_field(s->fields, r1);
1411 int r3 = get_field(s->fields, r3);
1412 bool is_imm = have_field(s->fields, i2);
1413 int imm = is_imm ? get_field(s->fields, i2) : 0;
1417 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1422 t = tcg_temp_new_i64();
1423 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1424 c.u.s32.a = tcg_temp_new_i32();
1425 c.u.s32.b = tcg_temp_new_i32();
1426 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1427 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1428 store_reg32_i64(r1, t);
1429 tcg_temp_free_i64(t);
1431 return help_branch(s, &c, is_imm, imm, o->in2);
1434 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1436 int r1 = get_field(s->fields, r1);
1437 int r3 = get_field(s->fields, r3);
1438 bool is_imm = have_field(s->fields, i2);
1439 int imm = is_imm ? get_field(s->fields, i2) : 0;
1442 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1445 if (r1 == (r3 | 1)) {
1446 c.u.s64.b = load_reg(r3 | 1);
1449 c.u.s64.b = regs[r3 | 1];
1453 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1454 c.u.s64.a = regs[r1];
1457 return help_branch(s, &c, is_imm, imm, o->in2);
1460 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1462 int imm, m3 = get_field(s->fields, m3);
1466 /* Bit 3 of the m3 field is reserved and should be zero.
1467 Choose to ignore it wrt the ltgt_cond table above. */
1468 c.cond = ltgt_cond[m3 & 14];
1469 if (s->insn->data) {
1470 c.cond = tcg_unsigned_cond(c.cond);
1472 c.is_64 = c.g1 = c.g2 = true;
1476 is_imm = have_field(s->fields, i4);
1478 imm = get_field(s->fields, i4);
1481 o->out = get_address(s, 0, get_field(s->fields, b4),
1482 get_field(s->fields, d4));
1485 return help_branch(s, &c, is_imm, imm, o->out);
1488 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1490 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1495 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1497 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1502 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1504 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1509 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1511 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1512 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1513 tcg_temp_free_i32(m3);
1514 gen_set_cc_nz_f32(s, o->in2);
1518 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1520 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1521 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1522 tcg_temp_free_i32(m3);
1523 gen_set_cc_nz_f64(s, o->in2);
1527 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1529 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1530 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1531 tcg_temp_free_i32(m3);
1532 gen_set_cc_nz_f128(s, o->in1, o->in2);
1536 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1538 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1539 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1540 tcg_temp_free_i32(m3);
1541 gen_set_cc_nz_f32(s, o->in2);
1545 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1547 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1548 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1549 tcg_temp_free_i32(m3);
1550 gen_set_cc_nz_f64(s, o->in2);
1554 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1556 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1557 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1558 tcg_temp_free_i32(m3);
1559 gen_set_cc_nz_f128(s, o->in1, o->in2);
1563 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1565 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1566 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1567 tcg_temp_free_i32(m3);
1568 gen_set_cc_nz_f32(s, o->in2);
1572 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1574 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1575 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1576 tcg_temp_free_i32(m3);
1577 gen_set_cc_nz_f64(s, o->in2);
1581 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1583 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1584 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1585 tcg_temp_free_i32(m3);
1586 gen_set_cc_nz_f128(s, o->in1, o->in2);
1590 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1592 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1593 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1594 tcg_temp_free_i32(m3);
1595 gen_set_cc_nz_f32(s, o->in2);
1599 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1601 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1602 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1603 tcg_temp_free_i32(m3);
1604 gen_set_cc_nz_f64(s, o->in2);
1608 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1610 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1611 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1612 tcg_temp_free_i32(m3);
1613 gen_set_cc_nz_f128(s, o->in1, o->in2);
1617 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1619 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1620 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1621 tcg_temp_free_i32(m3);
1625 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1627 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1628 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1629 tcg_temp_free_i32(m3);
1633 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1635 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1636 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1637 tcg_temp_free_i32(m3);
1638 return_low128(o->out2);
1642 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1644 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1645 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1646 tcg_temp_free_i32(m3);
1650 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1652 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1653 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1654 tcg_temp_free_i32(m3);
1658 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1660 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1661 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1662 tcg_temp_free_i32(m3);
1663 return_low128(o->out2);
1667 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1669 int r2 = get_field(s->fields, r2);
1670 TCGv_i64 len = tcg_temp_new_i64();
1672 potential_page_fault(s);
1673 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1675 return_low128(o->out);
1677 tcg_gen_add_i64(regs[r2], regs[r2], len);
1678 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1679 tcg_temp_free_i64(len);
1684 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1686 int l = get_field(s->fields, l1);
1691 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1692 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1695 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1696 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1699 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1700 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1703 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1704 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1707 potential_page_fault(s);
1708 vl = tcg_const_i32(l);
1709 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1710 tcg_temp_free_i32(vl);
1714 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1718 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1720 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1721 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1722 potential_page_fault(s);
1723 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1724 tcg_temp_free_i32(r1);
1725 tcg_temp_free_i32(r3);
1730 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1732 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1733 TCGv_i32 t1 = tcg_temp_new_i32();
1734 tcg_gen_trunc_i64_i32(t1, o->in1);
1735 potential_page_fault(s);
1736 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1738 tcg_temp_free_i32(t1);
1739 tcg_temp_free_i32(m3);
1743 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1745 potential_page_fault(s);
1746 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1748 return_low128(o->in2);
1752 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1754 TCGv_i64 t = tcg_temp_new_i64();
1755 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1756 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1757 tcg_gen_or_i64(o->out, o->out, t);
1758 tcg_temp_free_i64(t);
1762 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1764 int r3 = get_field(s->fields, r3);
1765 potential_page_fault(s);
1766 gen_helper_cs(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1771 static ExitStatus op_csg(DisasContext *s, DisasOps *o)
1773 int r3 = get_field(s->fields, r3);
1774 potential_page_fault(s);
1775 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, regs[r3]);
1780 #ifndef CONFIG_USER_ONLY
1781 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1783 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1784 check_privileged(s);
1785 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1786 tcg_temp_free_i32(r1);
1792 static ExitStatus op_cds(DisasContext *s, DisasOps *o)
1794 int r3 = get_field(s->fields, r3);
1795 TCGv_i64 in3 = tcg_temp_new_i64();
1796 tcg_gen_deposit_i64(in3, regs[r3 + 1], regs[r3], 32, 32);
1797 potential_page_fault(s);
1798 gen_helper_csg(o->out, cpu_env, o->in1, o->in2, in3);
1799 tcg_temp_free_i64(in3);
1804 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1806 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1807 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1808 potential_page_fault(s);
1809 /* XXX rewrite in tcg */
1810 gen_helper_cdsg(cc_op, cpu_env, r1, o->in2, r3);
1815 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1817 TCGv_i64 t1 = tcg_temp_new_i64();
1818 TCGv_i32 t2 = tcg_temp_new_i32();
1819 tcg_gen_trunc_i64_i32(t2, o->in1);
1820 gen_helper_cvd(t1, t2);
1821 tcg_temp_free_i32(t2);
1822 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1823 tcg_temp_free_i64(t1);
1827 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1829 int m3 = get_field(s->fields, m3);
1830 int lab = gen_new_label();
1834 /* Bit 3 of the m3 field is reserved and should be zero.
1835 Choose to ignore it wrt the ltgt_cond table above. */
1836 c = tcg_invert_cond(ltgt_cond[m3 & 14]);
1837 if (s->insn->data) {
1838 c = tcg_unsigned_cond(c);
1840 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1842 /* Set DXC to 0xff. */
1843 t = tcg_temp_new_i32();
1844 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1845 tcg_gen_ori_i32(t, t, 0xff00);
1846 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1847 tcg_temp_free_i32(t);
1850 gen_program_exception(s, PGM_DATA);
1856 #ifndef CONFIG_USER_ONLY
1857 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1861 check_privileged(s);
1862 potential_page_fault(s);
1864 /* We pretend the format is RX_a so that D2 is the field we want. */
1865 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1866 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1867 tcg_temp_free_i32(tmp);
1872 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1874 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1875 return_low128(o->out);
1879 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
1881 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
1882 return_low128(o->out);
1886 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
1888 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
1889 return_low128(o->out);
1893 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
1895 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
1896 return_low128(o->out);
1900 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
1902 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
1906 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
1908 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
1912 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
1914 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1915 return_low128(o->out2);
1919 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
1921 int r2 = get_field(s->fields, r2);
1922 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
1926 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
1928 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
1932 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
1934 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
1935 tb->flags, (ab)use the tb->cs_base field as the address of
1936 the template in memory, and grab 8 bits of tb->flags/cflags for
1937 the contents of the register. We would then recognize all this
1938 in gen_intermediate_code_internal, generating code for exactly
1939 one instruction. This new TB then gets executed normally.
1941 On the other hand, this seems to be mostly used for modifying
1942 MVC inside of memcpy, which needs a helper call anyway. So
1943 perhaps this doesn't bear thinking about any further. */
1950 tmp = tcg_const_i64(s->next_pc);
1951 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
1952 tcg_temp_free_i64(tmp);
1958 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
1960 /* We'll use the original input for cc computation, since we get to
1961 compare that against 0, which ought to be better than comparing
1962 the real output against 64. It also lets cc_dst be a convenient
1963 temporary during our computation. */
1964 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
1966 /* R1 = IN ? CLZ(IN) : 64. */
1967 gen_helper_clz(o->out, o->in2);
1969 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
1970 value by 64, which is undefined. But since the shift is 64 iff the
1971 input is zero, we still get the correct result after and'ing. */
1972 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
1973 tcg_gen_shr_i64(o->out2, o->out2, o->out);
1974 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
1978 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
1980 int m3 = get_field(s->fields, m3);
1981 int pos, len, base = s->insn->data;
1982 TCGv_i64 tmp = tcg_temp_new_i64();
1987 /* Effectively a 32-bit load. */
1988 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
1995 /* Effectively a 16-bit load. */
1996 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2004 /* Effectively an 8-bit load. */
2005 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2010 pos = base + ctz32(m3) * 8;
2011 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2012 ccm = ((1ull << len) - 1) << pos;
2016 /* This is going to be a sequence of loads and inserts. */
2017 pos = base + 32 - 8;
2021 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2022 tcg_gen_addi_i64(o->in2, o->in2, 1);
2023 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2026 m3 = (m3 << 1) & 0xf;
2032 tcg_gen_movi_i64(tmp, ccm);
2033 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2034 tcg_temp_free_i64(tmp);
2038 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2040 int shift = s->insn->data & 0xff;
2041 int size = s->insn->data >> 8;
2042 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2046 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2051 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2053 t1 = tcg_temp_new_i64();
2054 tcg_gen_shli_i64(t1, psw_mask, 20);
2055 tcg_gen_shri_i64(t1, t1, 36);
2056 tcg_gen_or_i64(o->out, o->out, t1);
2058 tcg_gen_extu_i32_i64(t1, cc_op);
2059 tcg_gen_shli_i64(t1, t1, 28);
2060 tcg_gen_or_i64(o->out, o->out, t1);
2061 tcg_temp_free_i64(t1);
2065 #ifndef CONFIG_USER_ONLY
2066 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2068 check_privileged(s);
2069 gen_helper_ipte(cpu_env, o->in1, o->in2);
2073 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2075 check_privileged(s);
2076 gen_helper_iske(o->out, cpu_env, o->in2);
2081 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2083 gen_helper_ldeb(o->out, cpu_env, o->in2);
2087 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2089 gen_helper_ledb(o->out, cpu_env, o->in2);
2093 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2095 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2099 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2101 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2105 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2107 gen_helper_lxdb(o->out, cpu_env, o->in2);
2108 return_low128(o->out2);
2112 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2114 gen_helper_lxeb(o->out, cpu_env, o->in2);
2115 return_low128(o->out2);
2119 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2121 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2125 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2127 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2131 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2133 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2137 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2139 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2143 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2145 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2149 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2151 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2155 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2157 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2161 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2163 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2167 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2171 disas_jcc(s, &c, get_field(s->fields, m3));
2174 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2178 TCGv_i32 t32 = tcg_temp_new_i32();
2181 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2184 t = tcg_temp_new_i64();
2185 tcg_gen_extu_i32_i64(t, t32);
2186 tcg_temp_free_i32(t32);
2188 z = tcg_const_i64(0);
2189 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2190 tcg_temp_free_i64(t);
2191 tcg_temp_free_i64(z);
2197 #ifndef CONFIG_USER_ONLY
2198 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2200 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2201 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2202 check_privileged(s);
2203 potential_page_fault(s);
2204 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2205 tcg_temp_free_i32(r1);
2206 tcg_temp_free_i32(r3);
2210 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2212 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2213 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2214 check_privileged(s);
2215 potential_page_fault(s);
2216 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2217 tcg_temp_free_i32(r1);
2218 tcg_temp_free_i32(r3);
2221 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2223 check_privileged(s);
2224 potential_page_fault(s);
2225 gen_helper_lra(o->out, cpu_env, o->in2);
2230 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2234 check_privileged(s);
2236 t1 = tcg_temp_new_i64();
2237 t2 = tcg_temp_new_i64();
2238 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2239 tcg_gen_addi_i64(o->in2, o->in2, 4);
2240 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2241 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2242 tcg_gen_shli_i64(t1, t1, 32);
2243 gen_helper_load_psw(cpu_env, t1, t2);
2244 tcg_temp_free_i64(t1);
2245 tcg_temp_free_i64(t2);
2246 return EXIT_NORETURN;
2249 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2253 check_privileged(s);
2255 t1 = tcg_temp_new_i64();
2256 t2 = tcg_temp_new_i64();
2257 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2258 tcg_gen_addi_i64(o->in2, o->in2, 8);
2259 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2260 gen_helper_load_psw(cpu_env, t1, t2);
2261 tcg_temp_free_i64(t1);
2262 tcg_temp_free_i64(t2);
2263 return EXIT_NORETURN;
2267 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2269 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2270 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2271 potential_page_fault(s);
2272 gen_helper_lam(cpu_env, r1, o->in2, r3);
2273 tcg_temp_free_i32(r1);
2274 tcg_temp_free_i32(r3);
2278 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2280 int r1 = get_field(s->fields, r1);
2281 int r3 = get_field(s->fields, r3);
2282 TCGv_i64 t = tcg_temp_new_i64();
2283 TCGv_i64 t4 = tcg_const_i64(4);
2286 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2287 store_reg32_i64(r1, t);
2291 tcg_gen_add_i64(o->in2, o->in2, t4);
2295 tcg_temp_free_i64(t);
2296 tcg_temp_free_i64(t4);
2300 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2302 int r1 = get_field(s->fields, r1);
2303 int r3 = get_field(s->fields, r3);
2304 TCGv_i64 t = tcg_temp_new_i64();
2305 TCGv_i64 t4 = tcg_const_i64(4);
2308 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2309 store_reg32h_i64(r1, t);
2313 tcg_gen_add_i64(o->in2, o->in2, t4);
2317 tcg_temp_free_i64(t);
2318 tcg_temp_free_i64(t4);
2322 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2324 int r1 = get_field(s->fields, r1);
2325 int r3 = get_field(s->fields, r3);
2326 TCGv_i64 t8 = tcg_const_i64(8);
2329 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2333 tcg_gen_add_i64(o->in2, o->in2, t8);
2337 tcg_temp_free_i64(t8);
2341 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2344 o->g_out = o->g_in2;
2345 TCGV_UNUSED_I64(o->in2);
2350 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2354 o->g_out = o->g_in1;
2355 o->g_out2 = o->g_in2;
2356 TCGV_UNUSED_I64(o->in1);
2357 TCGV_UNUSED_I64(o->in2);
2358 o->g_in1 = o->g_in2 = false;
2362 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2364 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2365 potential_page_fault(s);
2366 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2367 tcg_temp_free_i32(l);
2371 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2373 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2374 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2375 potential_page_fault(s);
2376 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2377 tcg_temp_free_i32(r1);
2378 tcg_temp_free_i32(r2);
2383 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2385 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2386 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2387 potential_page_fault(s);
2388 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2389 tcg_temp_free_i32(r1);
2390 tcg_temp_free_i32(r3);
2395 #ifndef CONFIG_USER_ONLY
2396 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2398 int r1 = get_field(s->fields, l1);
2399 check_privileged(s);
2400 potential_page_fault(s);
2401 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2406 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2408 int r1 = get_field(s->fields, l1);
2409 check_privileged(s);
2410 potential_page_fault(s);
2411 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2417 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2419 potential_page_fault(s);
2420 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2425 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2427 potential_page_fault(s);
2428 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2430 return_low128(o->in2);
2434 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2436 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2440 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2442 gen_helper_mul128(o->out, cpu_env, o->in1, o->in2);
2443 return_low128(o->out2);
2447 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2449 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2453 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2455 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2459 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2461 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2465 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2467 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2468 return_low128(o->out2);
2472 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2474 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2475 return_low128(o->out2);
2479 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2481 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2482 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2483 tcg_temp_free_i64(r3);
2487 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2489 int r3 = get_field(s->fields, r3);
2490 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2494 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2496 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2497 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2498 tcg_temp_free_i64(r3);
2502 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2504 int r3 = get_field(s->fields, r3);
2505 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2509 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2511 gen_helper_nabs_i64(o->out, o->in2);
2515 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2517 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2521 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2523 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2527 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2529 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2530 tcg_gen_mov_i64(o->out2, o->in2);
2534 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2536 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2537 potential_page_fault(s);
2538 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2539 tcg_temp_free_i32(l);
2544 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2546 tcg_gen_neg_i64(o->out, o->in2);
2550 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2552 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2556 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2558 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2562 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2564 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2565 tcg_gen_mov_i64(o->out2, o->in2);
2569 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2571 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2572 potential_page_fault(s);
2573 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2574 tcg_temp_free_i32(l);
2579 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2581 tcg_gen_or_i64(o->out, o->in1, o->in2);
2585 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2587 int shift = s->insn->data & 0xff;
2588 int size = s->insn->data >> 8;
2589 uint64_t mask = ((1ull << size) - 1) << shift;
2592 tcg_gen_shli_i64(o->in2, o->in2, shift);
2593 tcg_gen_or_i64(o->out, o->in1, o->in2);
2595 /* Produce the CC from only the bits manipulated. */
2596 tcg_gen_andi_i64(cc_dst, o->out, mask);
2597 set_cc_nz_u64(s, cc_dst);
2601 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2603 gen_helper_popcnt(o->out, o->in2);
2607 #ifndef CONFIG_USER_ONLY
2608 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2610 check_privileged(s);
2611 gen_helper_ptlb(cpu_env);
2616 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2618 int i3 = get_field(s->fields, i3);
2619 int i4 = get_field(s->fields, i4);
2620 int i5 = get_field(s->fields, i5);
2621 int do_zero = i4 & 0x80;
2622 uint64_t mask, imask, pmask;
2625 /* Adjust the arguments for the specific insn. */
2626 switch (s->fields->op2) {
2627 case 0x55: /* risbg */
2632 case 0x5d: /* risbhg */
2635 pmask = 0xffffffff00000000ull;
2637 case 0x51: /* risblg */
2640 pmask = 0x00000000ffffffffull;
2646 /* MASK is the set of bits to be inserted from R2.
2647 Take care for I3/I4 wraparound. */
2650 mask ^= pmask >> i4 >> 1;
2652 mask |= ~(pmask >> i4 >> 1);
2656 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2657 insns, we need to keep the other half of the register. */
2658 imask = ~mask | ~pmask;
2660 if (s->fields->op2 == 0x55) {
2667 /* In some cases we can implement this with deposit, which can be more
2668 efficient on some hosts. */
2669 if (~mask == imask && i3 <= i4) {
2670 if (s->fields->op2 == 0x5d) {
2673 /* Note that we rotate the bits to be inserted to the lsb, not to
2674 the position as described in the PoO. */
2677 rot = (i5 - pos) & 63;
2683 /* Rotate the input as necessary. */
2684 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2686 /* Insert the selected bits into the output. */
2688 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2689 } else if (imask == 0) {
2690 tcg_gen_andi_i64(o->out, o->in2, mask);
2692 tcg_gen_andi_i64(o->in2, o->in2, mask);
2693 tcg_gen_andi_i64(o->out, o->out, imask);
2694 tcg_gen_or_i64(o->out, o->out, o->in2);
2699 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2701 int i3 = get_field(s->fields, i3);
2702 int i4 = get_field(s->fields, i4);
2703 int i5 = get_field(s->fields, i5);
2706 /* If this is a test-only form, arrange to discard the result. */
2708 o->out = tcg_temp_new_i64();
2716 /* MASK is the set of bits to be operated on from R2.
2717 Take care for I3/I4 wraparound. */
2720 mask ^= ~0ull >> i4 >> 1;
2722 mask |= ~(~0ull >> i4 >> 1);
2725 /* Rotate the input as necessary. */
2726 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2729 switch (s->fields->op2) {
2730 case 0x55: /* AND */
2731 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2732 tcg_gen_and_i64(o->out, o->out, o->in2);
2735 tcg_gen_andi_i64(o->in2, o->in2, mask);
2736 tcg_gen_or_i64(o->out, o->out, o->in2);
2738 case 0x57: /* XOR */
2739 tcg_gen_andi_i64(o->in2, o->in2, mask);
2740 tcg_gen_xor_i64(o->out, o->out, o->in2);
2747 tcg_gen_andi_i64(cc_dst, o->out, mask);
2748 set_cc_nz_u64(s, cc_dst);
2752 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2754 tcg_gen_bswap16_i64(o->out, o->in2);
2758 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2760 tcg_gen_bswap32_i64(o->out, o->in2);
2764 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2766 tcg_gen_bswap64_i64(o->out, o->in2);
2770 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2772 TCGv_i32 t1 = tcg_temp_new_i32();
2773 TCGv_i32 t2 = tcg_temp_new_i32();
2774 TCGv_i32 to = tcg_temp_new_i32();
2775 tcg_gen_trunc_i64_i32(t1, o->in1);
2776 tcg_gen_trunc_i64_i32(t2, o->in2);
2777 tcg_gen_rotl_i32(to, t1, t2);
2778 tcg_gen_extu_i32_i64(o->out, to);
2779 tcg_temp_free_i32(t1);
2780 tcg_temp_free_i32(t2);
2781 tcg_temp_free_i32(to);
2785 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2787 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2791 #ifndef CONFIG_USER_ONLY
2792 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2794 check_privileged(s);
2795 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2800 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2802 check_privileged(s);
2803 gen_helper_sacf(cpu_env, o->in2);
2804 /* Addressing mode has changed, so end the block. */
2805 return EXIT_PC_STALE;
2809 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
2811 int r1 = get_field(s->fields, r1);
2812 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
2816 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
2818 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
2822 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
2824 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
2828 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
2830 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2831 return_low128(o->out2);
2835 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
2837 gen_helper_sqeb(o->out, cpu_env, o->in2);
2841 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
2843 gen_helper_sqdb(o->out, cpu_env, o->in2);
2847 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
2849 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
2850 return_low128(o->out2);
2854 #ifndef CONFIG_USER_ONLY
2855 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
2857 check_privileged(s);
2858 potential_page_fault(s);
2859 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
2864 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
2866 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2867 check_privileged(s);
2868 potential_page_fault(s);
2869 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
2870 tcg_temp_free_i32(r1);
2875 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
2881 disas_jcc(s, &c, get_field(s->fields, m3));
2883 lab = gen_new_label();
2885 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
2887 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
2891 r1 = get_field(s->fields, r1);
2892 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2893 if (s->insn->data) {
2894 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
2896 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
2898 tcg_temp_free_i64(a);
2904 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
2906 uint64_t sign = 1ull << s->insn->data;
2907 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
2908 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
2909 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2910 /* The arithmetic left shift is curious in that it does not affect
2911 the sign bit. Copy that over from the source unchanged. */
2912 tcg_gen_andi_i64(o->out, o->out, ~sign);
2913 tcg_gen_andi_i64(o->in1, o->in1, sign);
2914 tcg_gen_or_i64(o->out, o->out, o->in1);
2918 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
2920 tcg_gen_shl_i64(o->out, o->in1, o->in2);
2924 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
2926 tcg_gen_sar_i64(o->out, o->in1, o->in2);
2930 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
2932 tcg_gen_shr_i64(o->out, o->in1, o->in2);
2936 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
2938 gen_helper_sfpc(cpu_env, o->in2);
2942 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
2944 gen_helper_sfas(cpu_env, o->in2);
2948 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
2950 int b2 = get_field(s->fields, b2);
2951 int d2 = get_field(s->fields, d2);
2952 TCGv_i64 t1 = tcg_temp_new_i64();
2953 TCGv_i64 t2 = tcg_temp_new_i64();
2956 switch (s->fields->op2) {
2957 case 0x99: /* SRNM */
2960 case 0xb8: /* SRNMB */
2963 case 0xb9: /* SRNMT */
2968 mask = (1 << len) - 1;
2970 /* Insert the value into the appropriate field of the FPC. */
2972 tcg_gen_movi_i64(t1, d2 & mask);
2974 tcg_gen_addi_i64(t1, regs[b2], d2);
2975 tcg_gen_andi_i64(t1, t1, mask);
2977 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
2978 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
2979 tcg_temp_free_i64(t1);
2981 /* Then install the new FPC to set the rounding mode in fpu_status. */
2982 gen_helper_sfpc(cpu_env, t2);
2983 tcg_temp_free_i64(t2);
2987 #ifndef CONFIG_USER_ONLY
2988 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
2990 check_privileged(s);
2991 tcg_gen_shri_i64(o->in2, o->in2, 4);
2992 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
2996 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
2998 check_privileged(s);
2999 gen_helper_sske(cpu_env, o->in1, o->in2);
3003 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3005 check_privileged(s);
3006 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3010 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3012 check_privileged(s);
3013 /* ??? Surely cpu address != cpu number. In any case the previous
3014 version of this stored more than the required half-word, so it
3015 is unlikely this has ever been tested. */
3016 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3020 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3022 gen_helper_stck(o->out, cpu_env);
3023 /* ??? We don't implement clock states. */
3024 gen_op_movi_cc(s, 0);
3028 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3030 TCGv_i64 c1 = tcg_temp_new_i64();
3031 TCGv_i64 c2 = tcg_temp_new_i64();
3032 gen_helper_stck(c1, cpu_env);
3033 /* Shift the 64-bit value into its place as a zero-extended
3034 104-bit value. Note that "bit positions 64-103 are always
3035 non-zero so that they compare differently to STCK"; we set
3036 the least significant bit to 1. */
3037 tcg_gen_shli_i64(c2, c1, 56);
3038 tcg_gen_shri_i64(c1, c1, 8);
3039 tcg_gen_ori_i64(c2, c2, 0x10000);
3040 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3041 tcg_gen_addi_i64(o->in2, o->in2, 8);
3042 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3043 tcg_temp_free_i64(c1);
3044 tcg_temp_free_i64(c2);
3045 /* ??? We don't implement clock states. */
3046 gen_op_movi_cc(s, 0);
3050 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3052 check_privileged(s);
3053 gen_helper_sckc(cpu_env, o->in2);
3057 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3059 check_privileged(s);
3060 gen_helper_stckc(o->out, cpu_env);
3064 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3066 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3067 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3068 check_privileged(s);
3069 potential_page_fault(s);
3070 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3071 tcg_temp_free_i32(r1);
3072 tcg_temp_free_i32(r3);
3076 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3078 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3079 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3080 check_privileged(s);
3081 potential_page_fault(s);
3082 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3083 tcg_temp_free_i32(r1);
3084 tcg_temp_free_i32(r3);
3088 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3090 check_privileged(s);
3091 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3095 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3097 check_privileged(s);
3098 gen_helper_spt(cpu_env, o->in2);
3102 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3105 /* We really ought to have more complete indication of facilities
3106 that we implement. Address this when STFLE is implemented. */
3107 check_privileged(s);
3108 f = tcg_const_i64(0xc0000000);
3109 a = tcg_const_i64(200);
3110 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3111 tcg_temp_free_i64(f);
3112 tcg_temp_free_i64(a);
3116 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3118 check_privileged(s);
3119 gen_helper_stpt(o->out, cpu_env);
3123 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3125 check_privileged(s);
3126 potential_page_fault(s);
3127 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3132 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3134 check_privileged(s);
3135 gen_helper_spx(cpu_env, o->in2);
3139 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3141 check_privileged(s);
3142 /* Not operational. */
3143 gen_op_movi_cc(s, 3);
3147 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3149 check_privileged(s);
3150 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3151 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3155 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3157 uint64_t i2 = get_field(s->fields, i2);
3160 check_privileged(s);
3162 /* It is important to do what the instruction name says: STORE THEN.
3163 If we let the output hook perform the store then if we fault and
3164 restart, we'll have the wrong SYSTEM MASK in place. */
3165 t = tcg_temp_new_i64();
3166 tcg_gen_shri_i64(t, psw_mask, 56);
3167 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3168 tcg_temp_free_i64(t);
3170 if (s->fields->op == 0xac) {
3171 tcg_gen_andi_i64(psw_mask, psw_mask,
3172 (i2 << 56) | 0x00ffffffffffffffull);
3174 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3179 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3181 check_privileged(s);
3182 potential_page_fault(s);
3183 gen_helper_stura(cpu_env, o->in2, o->in1);
3188 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3190 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3194 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3196 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3200 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3202 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3206 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3208 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3212 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3214 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3215 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3216 potential_page_fault(s);
3217 gen_helper_stam(cpu_env, r1, o->in2, r3);
3218 tcg_temp_free_i32(r1);
3219 tcg_temp_free_i32(r3);
3223 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3225 int m3 = get_field(s->fields, m3);
3226 int pos, base = s->insn->data;
3227 TCGv_i64 tmp = tcg_temp_new_i64();
3229 pos = base + ctz32(m3) * 8;
3232 /* Effectively a 32-bit store. */
3233 tcg_gen_shri_i64(tmp, o->in1, pos);
3234 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3240 /* Effectively a 16-bit store. */
3241 tcg_gen_shri_i64(tmp, o->in1, pos);
3242 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3249 /* Effectively an 8-bit store. */
3250 tcg_gen_shri_i64(tmp, o->in1, pos);
3251 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3255 /* This is going to be a sequence of shifts and stores. */
3256 pos = base + 32 - 8;
3259 tcg_gen_shri_i64(tmp, o->in1, pos);
3260 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3261 tcg_gen_addi_i64(o->in2, o->in2, 1);
3263 m3 = (m3 << 1) & 0xf;
3268 tcg_temp_free_i64(tmp);
3272 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3274 int r1 = get_field(s->fields, r1);
3275 int r3 = get_field(s->fields, r3);
3276 int size = s->insn->data;
3277 TCGv_i64 tsize = tcg_const_i64(size);
3281 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3283 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3288 tcg_gen_add_i64(o->in2, o->in2, tsize);
3292 tcg_temp_free_i64(tsize);
3296 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3298 int r1 = get_field(s->fields, r1);
3299 int r3 = get_field(s->fields, r3);
3300 TCGv_i64 t = tcg_temp_new_i64();
3301 TCGv_i64 t4 = tcg_const_i64(4);
3302 TCGv_i64 t32 = tcg_const_i64(32);
3305 tcg_gen_shl_i64(t, regs[r1], t32);
3306 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3310 tcg_gen_add_i64(o->in2, o->in2, t4);
3314 tcg_temp_free_i64(t);
3315 tcg_temp_free_i64(t4);
3316 tcg_temp_free_i64(t32);
3320 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3322 potential_page_fault(s);
3323 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3325 return_low128(o->in2);
3329 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3331 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3335 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3340 tcg_gen_not_i64(o->in2, o->in2);
3341 tcg_gen_add_i64(o->out, o->in1, o->in2);
3343 /* XXX possible optimization point */
3345 cc = tcg_temp_new_i64();
3346 tcg_gen_extu_i32_i64(cc, cc_op);
3347 tcg_gen_shri_i64(cc, cc, 1);
3348 tcg_gen_add_i64(o->out, o->out, cc);
3349 tcg_temp_free_i64(cc);
3353 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3360 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3361 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3362 tcg_temp_free_i32(t);
3364 t = tcg_const_i32(s->next_pc - s->pc);
3365 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3366 tcg_temp_free_i32(t);
3368 gen_exception(EXCP_SVC);
3369 return EXIT_NORETURN;
3372 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3374 gen_helper_tceb(cc_op, o->in1, o->in2);
3379 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3381 gen_helper_tcdb(cc_op, o->in1, o->in2);
3386 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3388 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3393 #ifndef CONFIG_USER_ONLY
3394 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3396 potential_page_fault(s);
3397 gen_helper_tprot(cc_op, o->addr1, o->in2);
3403 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3405 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3406 potential_page_fault(s);
3407 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3408 tcg_temp_free_i32(l);
3413 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3415 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3416 potential_page_fault(s);
3417 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3418 tcg_temp_free_i32(l);
3422 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3424 int d1 = get_field(s->fields, d1);
3425 int d2 = get_field(s->fields, d2);
3426 int b1 = get_field(s->fields, b1);
3427 int b2 = get_field(s->fields, b2);
3428 int l = get_field(s->fields, l1);
3431 o->addr1 = get_address(s, 0, b1, d1);
3433 /* If the addresses are identical, this is a store/memset of zero. */
3434 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3435 o->in2 = tcg_const_i64(0);
3439 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3442 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3446 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3449 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3453 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3456 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3460 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3462 gen_op_movi_cc(s, 0);
3466 /* But in general we'll defer to a helper. */
3467 o->in2 = get_address(s, 0, b2, d2);
3468 t32 = tcg_const_i32(l);
3469 potential_page_fault(s);
3470 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3471 tcg_temp_free_i32(t32);
3476 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3478 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3482 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3484 int shift = s->insn->data & 0xff;
3485 int size = s->insn->data >> 8;
3486 uint64_t mask = ((1ull << size) - 1) << shift;
3489 tcg_gen_shli_i64(o->in2, o->in2, shift);
3490 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3492 /* Produce the CC from only the bits manipulated. */
3493 tcg_gen_andi_i64(cc_dst, o->out, mask);
3494 set_cc_nz_u64(s, cc_dst);
3498 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3500 o->out = tcg_const_i64(0);
3504 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3506 o->out = tcg_const_i64(0);
3512 /* ====================================================================== */
3513 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3514 the original inputs), update the various cc data structures in order to
3515 be able to compute the new condition code. */
3517 static void cout_abs32(DisasContext *s, DisasOps *o)
3519 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3522 static void cout_abs64(DisasContext *s, DisasOps *o)
3524 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3527 static void cout_adds32(DisasContext *s, DisasOps *o)
3529 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3532 static void cout_adds64(DisasContext *s, DisasOps *o)
3534 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3537 static void cout_addu32(DisasContext *s, DisasOps *o)
3539 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3542 static void cout_addu64(DisasContext *s, DisasOps *o)
3544 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3547 static void cout_addc32(DisasContext *s, DisasOps *o)
3549 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3552 static void cout_addc64(DisasContext *s, DisasOps *o)
3554 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3557 static void cout_cmps32(DisasContext *s, DisasOps *o)
3559 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3562 static void cout_cmps64(DisasContext *s, DisasOps *o)
3564 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3567 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3569 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3572 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3574 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3577 static void cout_f32(DisasContext *s, DisasOps *o)
3579 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3582 static void cout_f64(DisasContext *s, DisasOps *o)
3584 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3587 static void cout_f128(DisasContext *s, DisasOps *o)
3589 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3592 static void cout_nabs32(DisasContext *s, DisasOps *o)
3594 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3597 static void cout_nabs64(DisasContext *s, DisasOps *o)
3599 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3602 static void cout_neg32(DisasContext *s, DisasOps *o)
3604 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3607 static void cout_neg64(DisasContext *s, DisasOps *o)
3609 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3612 static void cout_nz32(DisasContext *s, DisasOps *o)
3614 tcg_gen_ext32u_i64(cc_dst, o->out);
3615 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3618 static void cout_nz64(DisasContext *s, DisasOps *o)
3620 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3623 static void cout_s32(DisasContext *s, DisasOps *o)
3625 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3628 static void cout_s64(DisasContext *s, DisasOps *o)
3630 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3633 static void cout_subs32(DisasContext *s, DisasOps *o)
3635 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3638 static void cout_subs64(DisasContext *s, DisasOps *o)
3640 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3643 static void cout_subu32(DisasContext *s, DisasOps *o)
3645 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3648 static void cout_subu64(DisasContext *s, DisasOps *o)
3650 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3653 static void cout_subb32(DisasContext *s, DisasOps *o)
3655 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3658 static void cout_subb64(DisasContext *s, DisasOps *o)
3660 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3663 static void cout_tm32(DisasContext *s, DisasOps *o)
3665 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3668 static void cout_tm64(DisasContext *s, DisasOps *o)
3670 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3673 /* ====================================================================== */
3674 /* The "PREPeration" generators. These initialize the DisasOps.OUT fields
3675 with the TCG register to which we will write. Used in combination with
3676 the "wout" generators, in some cases we need a new temporary, and in
3677 some cases we can write to a TCG global. */
3679 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3681 o->out = tcg_temp_new_i64();
3683 #define SPEC_prep_new 0
3685 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3687 o->out = tcg_temp_new_i64();
3688 o->out2 = tcg_temp_new_i64();
3690 #define SPEC_prep_new_P 0
3692 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3694 o->out = regs[get_field(f, r1)];
3697 #define SPEC_prep_r1 0
3699 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3701 int r1 = get_field(f, r1);
3703 o->out2 = regs[r1 + 1];
3704 o->g_out = o->g_out2 = true;
3706 #define SPEC_prep_r1_P SPEC_r1_even
3708 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3710 o->out = fregs[get_field(f, r1)];
3713 #define SPEC_prep_f1 0
3715 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3717 int r1 = get_field(f, r1);
3719 o->out2 = fregs[r1 + 2];
3720 o->g_out = o->g_out2 = true;
3722 #define SPEC_prep_x1 SPEC_r1_f128
3724 /* ====================================================================== */
3725 /* The "Write OUTput" generators. These generally perform some non-trivial
3726 copy of data to TCG globals, or to main memory. The trivial cases are
3727 generally handled by having a "prep" generator install the TCG global
3728 as the destination of the operation. */
3730 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3732 store_reg(get_field(f, r1), o->out);
3734 #define SPEC_wout_r1 0
3736 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3738 int r1 = get_field(f, r1);
3739 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3741 #define SPEC_wout_r1_8 0
3743 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3745 int r1 = get_field(f, r1);
3746 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3748 #define SPEC_wout_r1_16 0
3750 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3752 store_reg32_i64(get_field(f, r1), o->out);
3754 #define SPEC_wout_r1_32 0
3756 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3758 int r1 = get_field(f, r1);
3759 store_reg32_i64(r1, o->out);
3760 store_reg32_i64(r1 + 1, o->out2);
3762 #define SPEC_wout_r1_P32 SPEC_r1_even
3764 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3766 int r1 = get_field(f, r1);
3767 store_reg32_i64(r1 + 1, o->out);
3768 tcg_gen_shri_i64(o->out, o->out, 32);
3769 store_reg32_i64(r1, o->out);
3771 #define SPEC_wout_r1_D32 SPEC_r1_even
3773 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3775 store_freg32_i64(get_field(f, r1), o->out);
3777 #define SPEC_wout_e1 0
3779 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3781 store_freg(get_field(f, r1), o->out);
3783 #define SPEC_wout_f1 0
3785 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3787 int f1 = get_field(s->fields, r1);
3788 store_freg(f1, o->out);
3789 store_freg(f1 + 2, o->out2);
3791 #define SPEC_wout_x1 SPEC_r1_f128
3793 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3795 if (get_field(f, r1) != get_field(f, r2)) {
3796 store_reg32_i64(get_field(f, r1), o->out);
3799 #define SPEC_wout_cond_r1r2_32 0
3801 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
3803 if (get_field(f, r1) != get_field(f, r2)) {
3804 store_freg32_i64(get_field(f, r1), o->out);
3807 #define SPEC_wout_cond_e1e2 0
3809 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3811 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
3813 #define SPEC_wout_m1_8 0
3815 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3817 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
3819 #define SPEC_wout_m1_16 0
3821 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3823 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
3825 #define SPEC_wout_m1_32 0
3827 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
3829 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
3831 #define SPEC_wout_m1_64 0
3833 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
3835 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
3837 #define SPEC_wout_m2_32 0
3839 /* ====================================================================== */
3840 /* The "INput 1" generators. These load the first operand to an insn. */
3842 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3844 o->in1 = load_reg(get_field(f, r1));
3846 #define SPEC_in1_r1 0
3848 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3850 o->in1 = regs[get_field(f, r1)];
3853 #define SPEC_in1_r1_o 0
3855 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3857 o->in1 = tcg_temp_new_i64();
3858 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
3860 #define SPEC_in1_r1_32s 0
3862 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3864 o->in1 = tcg_temp_new_i64();
3865 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
3867 #define SPEC_in1_r1_32u 0
3869 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
3871 o->in1 = tcg_temp_new_i64();
3872 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
3874 #define SPEC_in1_r1_sr32 0
3876 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
3878 o->in1 = load_reg(get_field(f, r1) + 1);
3880 #define SPEC_in1_r1p1 SPEC_r1_even
3882 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3884 o->in1 = tcg_temp_new_i64();
3885 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
3887 #define SPEC_in1_r1p1_32s SPEC_r1_even
3889 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3891 o->in1 = tcg_temp_new_i64();
3892 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
3894 #define SPEC_in1_r1p1_32u SPEC_r1_even
3896 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3898 int r1 = get_field(f, r1);
3899 o->in1 = tcg_temp_new_i64();
3900 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
3902 #define SPEC_in1_r1_D32 SPEC_r1_even
3904 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
3906 o->in1 = load_reg(get_field(f, r2));
3908 #define SPEC_in1_r2 0
3910 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
3912 o->in1 = load_reg(get_field(f, r3));
3914 #define SPEC_in1_r3 0
3916 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3918 o->in1 = regs[get_field(f, r3)];
3921 #define SPEC_in1_r3_o 0
3923 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
3925 o->in1 = tcg_temp_new_i64();
3926 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
3928 #define SPEC_in1_r3_32s 0
3930 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
3932 o->in1 = tcg_temp_new_i64();
3933 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
3935 #define SPEC_in1_r3_32u 0
3937 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
3939 o->in1 = load_freg32_i64(get_field(f, r1));
3941 #define SPEC_in1_e1 0
3943 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3945 o->in1 = fregs[get_field(f, r1)];
3948 #define SPEC_in1_f1_o 0
3950 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
3952 int r1 = get_field(f, r1);
3954 o->out2 = fregs[r1 + 2];
3955 o->g_out = o->g_out2 = true;
3957 #define SPEC_in1_x1_o SPEC_r1_f128
3959 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
3961 o->in1 = fregs[get_field(f, r3)];
3964 #define SPEC_in1_f3_o 0
3966 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
3968 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
3970 #define SPEC_in1_la1 0
3972 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
3974 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
3975 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
3977 #define SPEC_in1_la2 0
3979 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
3982 o->in1 = tcg_temp_new_i64();
3983 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
3985 #define SPEC_in1_m1_8u 0
3987 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
3990 o->in1 = tcg_temp_new_i64();
3991 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
3993 #define SPEC_in1_m1_16s 0
3995 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
3998 o->in1 = tcg_temp_new_i64();
3999 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4001 #define SPEC_in1_m1_16u 0
4003 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4006 o->in1 = tcg_temp_new_i64();
4007 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4009 #define SPEC_in1_m1_32s 0
4011 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4014 o->in1 = tcg_temp_new_i64();
4015 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4017 #define SPEC_in1_m1_32u 0
4019 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4022 o->in1 = tcg_temp_new_i64();
4023 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4025 #define SPEC_in1_m1_64 0
4027 /* ====================================================================== */
4028 /* The "INput 2" generators. These load the second operand to an insn. */
4030 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4032 o->in2 = regs[get_field(f, r1)];
4035 #define SPEC_in2_r1_o 0
4037 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4039 o->in2 = tcg_temp_new_i64();
4040 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4042 #define SPEC_in2_r1_16u 0
4044 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4046 o->in2 = tcg_temp_new_i64();
4047 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4049 #define SPEC_in2_r1_32u 0
4051 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4053 o->in2 = load_reg(get_field(f, r2));
4055 #define SPEC_in2_r2 0
4057 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4059 o->in2 = regs[get_field(f, r2)];
4062 #define SPEC_in2_r2_o 0
4064 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4066 int r2 = get_field(f, r2);
4068 o->in2 = load_reg(r2);
4071 #define SPEC_in2_r2_nz 0
4073 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4075 o->in2 = tcg_temp_new_i64();
4076 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4078 #define SPEC_in2_r2_8s 0
4080 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4082 o->in2 = tcg_temp_new_i64();
4083 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4085 #define SPEC_in2_r2_8u 0
4087 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4089 o->in2 = tcg_temp_new_i64();
4090 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4092 #define SPEC_in2_r2_16s 0
4094 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4096 o->in2 = tcg_temp_new_i64();
4097 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4099 #define SPEC_in2_r2_16u 0
4101 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4103 o->in2 = load_reg(get_field(f, r3));
4105 #define SPEC_in2_r3 0
4107 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4109 o->in2 = tcg_temp_new_i64();
4110 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4112 #define SPEC_in2_r2_32s 0
4114 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4116 o->in2 = tcg_temp_new_i64();
4117 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4119 #define SPEC_in2_r2_32u 0
4121 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4123 o->in2 = load_freg32_i64(get_field(f, r2));
4125 #define SPEC_in2_e2 0
4127 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4129 o->in2 = fregs[get_field(f, r2)];
4132 #define SPEC_in2_f2_o 0
4134 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4136 int r2 = get_field(f, r2);
4138 o->in2 = fregs[r2 + 2];
4139 o->g_in1 = o->g_in2 = true;
4141 #define SPEC_in2_x2_o SPEC_r2_f128
4143 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4145 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4147 #define SPEC_in2_ra2 0
4149 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4151 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4152 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4154 #define SPEC_in2_a2 0
4156 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4158 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4160 #define SPEC_in2_ri2 0
4162 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4164 help_l2_shift(s, f, o, 31);
4166 #define SPEC_in2_sh32 0
4168 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4170 help_l2_shift(s, f, o, 63);
4172 #define SPEC_in2_sh64 0
4174 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4177 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4179 #define SPEC_in2_m2_8u 0
4181 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4184 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4186 #define SPEC_in2_m2_16s 0
4188 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4191 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4193 #define SPEC_in2_m2_16u 0
4195 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4198 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4200 #define SPEC_in2_m2_32s 0
4202 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4205 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4207 #define SPEC_in2_m2_32u 0
4209 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4212 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4214 #define SPEC_in2_m2_64 0
4216 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4219 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4221 #define SPEC_in2_mri2_16u 0
4223 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4226 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4228 #define SPEC_in2_mri2_32s 0
4230 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4233 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4235 #define SPEC_in2_mri2_32u 0
4237 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4240 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4242 #define SPEC_in2_mri2_64 0
4244 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4246 o->in2 = tcg_const_i64(get_field(f, i2));
4248 #define SPEC_in2_i2 0
4250 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4252 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4254 #define SPEC_in2_i2_8u 0
4256 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4258 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4260 #define SPEC_in2_i2_16u 0
4262 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4264 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4266 #define SPEC_in2_i2_32u 0
4268 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4270 uint64_t i2 = (uint16_t)get_field(f, i2);
4271 o->in2 = tcg_const_i64(i2 << s->insn->data);
4273 #define SPEC_in2_i2_16u_shl 0
4275 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4277 uint64_t i2 = (uint32_t)get_field(f, i2);
4278 o->in2 = tcg_const_i64(i2 << s->insn->data);
4280 #define SPEC_in2_i2_32u_shl 0
4282 /* ====================================================================== */
4284 /* Find opc within the table of insns. This is formulated as a switch
4285 statement so that (1) we get compile-time notice of cut-paste errors
4286 for duplicated opcodes, and (2) the compiler generates the binary
4287 search tree, rather than us having to post-process the table. */
4289 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4290 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4292 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4294 enum DisasInsnEnum {
4295 #include "insn-data.def"
4299 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4303 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4305 .help_in1 = in1_##I1, \
4306 .help_in2 = in2_##I2, \
4307 .help_prep = prep_##P, \
4308 .help_wout = wout_##W, \
4309 .help_cout = cout_##CC, \
4310 .help_op = op_##OP, \
4314 /* Allow 0 to be used for NULL in the table below. */
4322 #define SPEC_in1_0 0
4323 #define SPEC_in2_0 0
4324 #define SPEC_prep_0 0
4325 #define SPEC_wout_0 0
4327 static const DisasInsn insn_info[] = {
4328 #include "insn-data.def"
4332 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4333 case OPC: return &insn_info[insn_ ## NM];
4335 static const DisasInsn *lookup_opc(uint16_t opc)
4338 #include "insn-data.def"
4347 /* Extract a field from the insn. The INSN should be left-aligned in
4348 the uint64_t so that we can more easily utilize the big-bit-endian
4349 definitions we extract from the Principals of Operation. */
4351 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4359 /* Zero extract the field from the insn. */
4360 r = (insn << f->beg) >> (64 - f->size);
4362 /* Sign-extend, or un-swap the field as necessary. */
4364 case 0: /* unsigned */
4366 case 1: /* signed */
4367 assert(f->size <= 32);
4368 m = 1u << (f->size - 1);
4371 case 2: /* dl+dh split, signed 20 bit. */
4372 r = ((int8_t)r << 12) | (r >> 8);
4378 /* Validate that the "compressed" encoding we selected above is valid.
4379 I.e. we havn't make two different original fields overlap. */
4380 assert(((o->presentC >> f->indexC) & 1) == 0);
4381 o->presentC |= 1 << f->indexC;
4382 o->presentO |= 1 << f->indexO;
4384 o->c[f->indexC] = r;
4387 /* Lookup the insn at the current PC, extracting the operands into O and
4388 returning the info struct for the insn. Returns NULL for invalid insn. */
4390 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4393 uint64_t insn, pc = s->pc;
4395 const DisasInsn *info;
4397 insn = ld_code2(env, pc);
4398 op = (insn >> 8) & 0xff;
4399 ilen = get_ilen(op);
4400 s->next_pc = s->pc + ilen;
4407 insn = ld_code4(env, pc) << 32;
4410 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4416 /* We can't actually determine the insn format until we've looked up
4417 the full insn opcode. Which we can't do without locating the
4418 secondary opcode. Assume by default that OP2 is at bit 40; for
4419 those smaller insns that don't actually have a secondary opcode
4420 this will correctly result in OP2 = 0. */
4426 case 0xb2: /* S, RRF, RRE */
4427 case 0xb3: /* RRE, RRD, RRF */
4428 case 0xb9: /* RRE, RRF */
4429 case 0xe5: /* SSE, SIL */
4430 op2 = (insn << 8) >> 56;
4434 case 0xc0: /* RIL */
4435 case 0xc2: /* RIL */
4436 case 0xc4: /* RIL */
4437 case 0xc6: /* RIL */
4438 case 0xc8: /* SSF */
4439 case 0xcc: /* RIL */
4440 op2 = (insn << 12) >> 60;
4442 case 0xd0 ... 0xdf: /* SS */
4448 case 0xee ... 0xf3: /* SS */
4449 case 0xf8 ... 0xfd: /* SS */
4453 op2 = (insn << 40) >> 56;
4457 memset(f, 0, sizeof(*f));
4461 /* Lookup the instruction. */
4462 info = lookup_opc(op << 8 | op2);
4464 /* If we found it, extract the operands. */
4466 DisasFormat fmt = info->fmt;
4469 for (i = 0; i < NUM_C_FIELD; ++i) {
4470 extract_field(f, &format_info[fmt].op[i], insn);
4476 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4478 const DisasInsn *insn;
4479 ExitStatus ret = NO_EXIT;
4483 /* Search for the insn in the table. */
4484 insn = extract_insn(env, s, &f);
4486 /* Not found means unimplemented/illegal opcode. */
4488 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4490 gen_illegal_opcode(s);
4491 return EXIT_NORETURN;
4494 /* Check for insn specification exceptions. */
4496 int spec = insn->spec, excp = 0, r;
4498 if (spec & SPEC_r1_even) {
4499 r = get_field(&f, r1);
4501 excp = PGM_SPECIFICATION;
4504 if (spec & SPEC_r2_even) {
4505 r = get_field(&f, r2);
4507 excp = PGM_SPECIFICATION;
4510 if (spec & SPEC_r1_f128) {
4511 r = get_field(&f, r1);
4513 excp = PGM_SPECIFICATION;
4516 if (spec & SPEC_r2_f128) {
4517 r = get_field(&f, r2);
4519 excp = PGM_SPECIFICATION;
4523 gen_program_exception(s, excp);
4524 return EXIT_NORETURN;
4528 /* Set up the strutures we use to communicate with the helpers. */
4531 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4532 TCGV_UNUSED_I64(o.out);
4533 TCGV_UNUSED_I64(o.out2);
4534 TCGV_UNUSED_I64(o.in1);
4535 TCGV_UNUSED_I64(o.in2);
4536 TCGV_UNUSED_I64(o.addr1);
4538 /* Implement the instruction. */
4539 if (insn->help_in1) {
4540 insn->help_in1(s, &f, &o);
4542 if (insn->help_in2) {
4543 insn->help_in2(s, &f, &o);
4545 if (insn->help_prep) {
4546 insn->help_prep(s, &f, &o);
4548 if (insn->help_op) {
4549 ret = insn->help_op(s, &o);
4551 if (insn->help_wout) {
4552 insn->help_wout(s, &f, &o);
4554 if (insn->help_cout) {
4555 insn->help_cout(s, &o);
4558 /* Free any temporaries created by the helpers. */
4559 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4560 tcg_temp_free_i64(o.out);
4562 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4563 tcg_temp_free_i64(o.out2);
4565 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4566 tcg_temp_free_i64(o.in1);
4568 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4569 tcg_temp_free_i64(o.in2);
4571 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4572 tcg_temp_free_i64(o.addr1);
4575 /* Advance to the next instruction. */
4580 static inline void gen_intermediate_code_internal(CPUS390XState *env,
4581 TranslationBlock *tb,
4585 target_ulong pc_start;
4586 uint64_t next_page_start;
4587 uint16_t *gen_opc_end;
4589 int num_insns, max_insns;
4597 if (!(tb->flags & FLAG_MASK_64)) {
4598 pc_start &= 0x7fffffff;
4603 dc.cc_op = CC_OP_DYNAMIC;
4604 do_debug = dc.singlestep_enabled = env->singlestep_enabled;
4606 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
4608 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4611 max_insns = tb->cflags & CF_COUNT_MASK;
4612 if (max_insns == 0) {
4613 max_insns = CF_COUNT_MASK;
4620 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4624 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4627 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4628 gen_opc_cc_op[lj] = dc.cc_op;
4629 tcg_ctx.gen_opc_instr_start[lj] = 1;
4630 tcg_ctx.gen_opc_icount[lj] = num_insns;
4632 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4636 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4637 tcg_gen_debug_insn_start(dc.pc);
4641 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
4642 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
4643 if (bp->pc == dc.pc) {
4644 status = EXIT_PC_STALE;
4650 if (status == NO_EXIT) {
4651 status = translate_one(env, &dc);
4654 /* If we reach a page boundary, are single stepping,
4655 or exhaust instruction count, stop generation. */
4656 if (status == NO_EXIT
4657 && (dc.pc >= next_page_start
4658 || tcg_ctx.gen_opc_ptr >= gen_opc_end
4659 || num_insns >= max_insns
4661 || env->singlestep_enabled)) {
4662 status = EXIT_PC_STALE;
4664 } while (status == NO_EXIT);
4666 if (tb->cflags & CF_LAST_IO) {
4675 update_psw_addr(&dc);
4677 case EXIT_PC_UPDATED:
4678 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4679 cc op type is in env */
4681 /* Exit the TB, either by raising a debug exception or by return. */
4683 gen_exception(EXCP_DEBUG);
4692 gen_icount_end(tb, num_insns);
4693 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
4695 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
4698 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4701 tb->size = dc.pc - pc_start;
4702 tb->icount = num_insns;
4705 #if defined(S390X_DEBUG_DISAS)
4706 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4707 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4708 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4714 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4716 gen_intermediate_code_internal(env, tb, 0);
4719 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4721 gen_intermediate_code_internal(env, tb, 1);
4724 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4727 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4728 cc_op = gen_opc_cc_op[pc_pos];
4729 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {