DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_CONST | TCG_CALL_PURE, i32, i64, i64)
DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64, i64)
DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_CONST | TCG_CALL_PURE, i32, i64, i64)
+DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_CONST | TCG_CALL_PURE, i64, i64, i64, i64)
#define VIS_HELPER(name) \
DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_CONST | TCG_CALL_PURE, \
i64, i64, i64) \
gen_movl_TN_reg(rd, cpu_dst);
break;
case 0x019: /* VIS II bmask */
- // XXX
- goto illegal_insn;
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = get_src1(insn, cpu_src1);
+ cpu_src2 = get_src1(insn, cpu_src2);
+ tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
+ gen_movl_TN_reg(rd, cpu_dst);
+ break;
case 0x020: /* VIS I fcmple16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
break;
case 0x04c: /* VIS II bshuffle */
- // XXX
- goto illegal_insn;
+ CHECK_FPU_FEATURE(dc, VIS2);
+ gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
+ break;
case 0x04d: /* VIS I fexpand */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
return ret;
}
+
+uint64 helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2)
+{
+ union {
+ uint64_t ll[2];
+ uint8_t b[16];
+ } s;
+ VIS64 r;
+ uint32_t i, mask, host;
+
+ /* Set up S such that we can index across all of the bytes. */
+#ifdef HOST_WORDS_BIGENDIAN
+ s.ll[0] = src1;
+ s.ll[1] = src2;
+ host = 0;
+#else
+ s.ll[1] = src1;
+ s.ll[0] = src2;
+ host = 15;
+#endif
+ mask = gsr >> 32;
+
+ for (i = 0; i < 8; ++i) {
+ unsigned e = (mask >> (28 - i*4)) & 0xf;
+ r.VIS_B64(i) = s.b[e ^ host];
+ }
+
+ return r.ll;
+}