ARM TCG conversion 14/16.
authorpbrook <pbrook@c046a42c-6fe2-441c-8c8c-71466251a162>
Mon, 31 Mar 2008 03:48:30 +0000 (03:48 +0000)
committerpbrook <pbrook@c046a42c-6fe2-441c-8c8c-71466251a162>
Mon, 31 Mar 2008 03:48:30 +0000 (03:48 +0000)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4151 c046a42c-6fe2-441c-8c8c-71466251a162

Makefile.target
target-arm/helper.c
target-arm/helpers.h
target-arm/neon_helper.c [new file with mode: 0644]
target-arm/op.c
target-arm/op_helper.c
target-arm/op_neon.h [deleted file]
target-arm/translate.c

index 5ac29a7d8261749ad34a23588de4d014cb83ef51..fef43646f36596fd412337d9bf3076d355d1c3c6 100644 (file)
@@ -211,7 +211,7 @@ LIBOBJS+= op_helper.o helper.o
 endif
 
 ifeq ($(TARGET_BASE_ARCH), arm)
-LIBOBJS+= op_helper.o helper.o
+LIBOBJS+= op_helper.o helper.o neon_helper.o
 endif
 
 ifeq ($(TARGET_BASE_ARCH), sh4)
index 48cd6c8925fc8ac45b70464ff52e4c3132b99ad5..a2dd7b15dc5554a9a2bfbe51f059ea2144024220 100644 (file)
@@ -256,30 +256,6 @@ void cpu_arm_close(CPUARMState *env)
     free(env);
 }
 
-/* Polynomial multiplication is like integer multiplcation except the
-   partial products are XORed, not added.  */
-uint32_t helper_neon_mul_p8(uint32_t op1, uint32_t op2)
-{
-    uint32_t mask;
-    uint32_t result;
-    result = 0;
-    while (op1) {
-        mask = 0;
-        if (op1 & 1)
-            mask |= 0xff;
-        if (op1 & (1 << 8))
-            mask |= (0xff << 8);
-        if (op1 & (1 << 16))
-            mask |= (0xff << 16);
-        if (op1 & (1 << 24))
-            mask |= (0xff << 24);
-        result ^= op2 & mask;
-        op1 = (op1 >> 1) & 0x7f7f7f7f;
-        op2 = (op2 << 1) & 0xfefefefe;
-    }
-    return result;
-}
-
 uint32_t cpsr_read(CPUARMState *env)
 {
     int ZF;
@@ -376,6 +352,11 @@ uint32_t HELPER(rbit)(uint32_t x)
     return x;
 }
 
+uint32_t HELPER(abs)(uint32_t x)
+{
+    return ((int32_t)x < 0) ? -x : x;
+}
+
 #if defined(CONFIG_USER_ONLY)
 
 void do_interrupt (CPUState *env)
index 8ebd25fae851ac97aaa40d63ccb4af9c92631148..1ae36eef4a8e808948dffe4d12186881d36409ce 100644 (file)
@@ -84,6 +84,7 @@ DEF_HELPER_1_1(double_saturate, uint32_t, (int32_t))
 DEF_HELPER_1_2(sdiv, int32_t, (int32_t, int32_t))
 DEF_HELPER_1_2(udiv, uint32_t, (uint32_t, uint32_t))
 DEF_HELPER_1_1(rbit, uint32_t, (uint32_t))
+DEF_HELPER_1_1(abs, uint32_t, (uint32_t))
 
 #define PAS_OP(pfx)  \
     DEF_HELPER_1_3(pfx ## add8, uint32_t, (uint32_t, uint32_t, uint32_t *)) \
@@ -208,6 +209,10 @@ DEF_HELPER_1_2(rsqrte_f32, float32, (float32, CPUState *))
 DEF_HELPER_1_2(recpe_u32, uint32_t, (uint32_t, CPUState *))
 DEF_HELPER_1_2(rsqrte_u32, uint32_t, (uint32_t, CPUState *))
 DEF_HELPER_1_4(neon_tbl, uint32_t, (uint32_t, uint32_t, uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_add_saturate_u64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_add_saturate_s64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_sub_saturate_u64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_sub_saturate_s64, uint64_t, (uint64_t, uint64_t))
 
 DEF_HELPER_1_2(add_cc, uint32_t, (uint32_t, uint32_t))
 DEF_HELPER_1_2(adc_cc, uint32_t, (uint32_t, uint32_t))
@@ -223,6 +228,209 @@ DEF_HELPER_1_2(shr_cc, uint32_t, (uint32_t, uint32_t))
 DEF_HELPER_1_2(sar_cc, uint32_t, (uint32_t, uint32_t))
 DEF_HELPER_1_2(ror_cc, uint32_t, (uint32_t, uint32_t))
 
+/* neon_helper.c */
+DEF_HELPER_1_3(neon_qadd_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qadd_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qadd_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qadd_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qsub_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qsub_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qsub_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qsub_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_hadd_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hadd_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hadd_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hadd_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hadd_s32, int32_t, (int32_t, int32_t))
+DEF_HELPER_1_2(neon_hadd_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rhadd_s32, int32_t, (int32_t, int32_t))
+DEF_HELPER_1_2(neon_rhadd_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_hsub_s32, int32_t, (int32_t, int32_t))
+DEF_HELPER_1_2(neon_hsub_u32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_cgt_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_s32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_min_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_min_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmin_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_pmax_s32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_abd_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_s32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_shl_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_shl_u64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_shl_s64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_rshl_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_s8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_s16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_s32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_rshl_u64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_rshl_s64, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_qshl_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_u32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qshl_u64, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_qshl_s64, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_qrshl_u8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_s8, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_u16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_u32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrshl_u64, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_qrshl_s64, uint64_t, (CPUState *, uint64_t, uint64_t))
+
+DEF_HELPER_1_2(neon_add_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_add_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_padd_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_padd_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_sub_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_sub_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mul_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mul_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mul_p8, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_2(neon_tst_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_tst_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_tst_u32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_ceq_u8, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_ceq_u16, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_ceq_u32, uint32_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_1(neon_abs_s8, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_abs_s16, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_clz_u8, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_clz_u16, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_cls_s8, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_cls_s16, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_cls_s32, uint32_t, (uint32_t))
+DEF_HELPER_1_1(neon_cnt_u8, uint32_t, (uint32_t))
+
+DEF_HELPER_1_3(neon_qdmulh_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrdmulh_s16, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qdmulh_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
+DEF_HELPER_1_3(neon_qrdmulh_s32, uint32_t, (CPUState *, uint32_t, uint32_t))
+
+DEF_HELPER_1_1(neon_narrow_u8, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_narrow_u16, uint32_t, (uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_u8, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_s8, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_u16, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_s16, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_u32, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_2(neon_narrow_sat_s32, uint32_t, (CPUState *, uint64_t))
+DEF_HELPER_1_1(neon_narrow_high_u8, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_narrow_high_u16, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_narrow_round_high_u8, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_narrow_round_high_u16, uint32_t, (uint64_t))
+DEF_HELPER_1_1(neon_widen_u8, uint64_t, (uint32_t))
+DEF_HELPER_1_1(neon_widen_s8, uint64_t, (uint32_t))
+DEF_HELPER_1_1(neon_widen_u16, uint64_t, (uint32_t))
+DEF_HELPER_1_1(neon_widen_s16, uint64_t, (uint32_t))
+
+DEF_HELPER_1_2(neon_addl_u16, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_addl_u32, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_paddl_u16, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_paddl_u32, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_subl_u16, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_subl_u32, uint64_t, (uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_addl_saturate_s32, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_3(neon_addl_saturate_s64, uint64_t, (CPUState *, uint64_t, uint64_t))
+DEF_HELPER_1_2(neon_abdl_u16, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_s16, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_u32, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_s32, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_u64, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abdl_s64, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mull_u8, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mull_s8, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mull_u16, uint64_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mull_s16, uint64_t, (uint32_t, uint32_t))
+
+DEF_HELPER_1_1(neon_negl_u16, uint64_t, (uint64_t))
+DEF_HELPER_1_1(neon_negl_u32, uint64_t, (uint64_t))
+DEF_HELPER_1_1(neon_negl_u64, uint64_t, (uint64_t))
+
+DEF_HELPER_1_2(neon_qabs_s8, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qabs_s16, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qabs_s32, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qneg_s8, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qneg_s16, uint32_t, (CPUState *, uint32_t))
+DEF_HELPER_1_2(neon_qneg_s32, uint32_t, (CPUState *, uint32_t))
+
+DEF_HELPER_0_0(neon_trn_u8, void, (void))
+DEF_HELPER_0_0(neon_trn_u16, void, (void))
+DEF_HELPER_0_0(neon_unzip_u8, void, (void))
+DEF_HELPER_0_0(neon_zip_u8, void, (void))
+DEF_HELPER_0_0(neon_zip_u16, void, (void))
+
+DEF_HELPER_1_2(neon_min_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_max_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_abd_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_add_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_sub_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_mul_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_ceq_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cge_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_cgt_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_acge_f32, uint32_t, (uint32_t, uint32_t))
+DEF_HELPER_1_2(neon_acgt_f32, uint32_t, (uint32_t, uint32_t))
+
 #undef DEF_HELPER
 #undef DEF_HELPER_0_0
 #undef DEF_HELPER_0_1
diff --git a/target-arm/neon_helper.c b/target-arm/neon_helper.c
new file mode 100644 (file)
index 0000000..c8ab2b4
--- /dev/null
@@ -0,0 +1,1449 @@
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "helpers.h"
+
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
+
+#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q
+
+static float_status neon_float_status;
+#define NFS &neon_float_status
+
+/* Helper routines to perform bitwise copies between float and int.  */
+static inline float32 vfp_itos(uint32_t i)
+{
+    union {
+        uint32_t i;
+        float32 s;
+    } v;
+
+    v.i = i;
+    return v.s;
+}
+
+static inline uint32_t vfp_stoi(float32 s)
+{
+    union {
+        uint32_t i;
+        float32 s;
+    } v;
+
+    v.s = s;
+    return v.i;
+}
+
+#define NEON_TYPE1(name, type) \
+typedef struct \
+{ \
+    type v1; \
+} neon_##name;
+#ifdef WORDS_BIGENDIAN
+#define NEON_TYPE2(name, type) \
+typedef struct \
+{ \
+    type v2; \
+    type v1; \
+} neon_##name;
+#define NEON_TYPE4(name, type) \
+typedef struct \
+{ \
+    type v4; \
+    type v3; \
+    type v2; \
+    type v1; \
+} neon_##name;
+#else
+#define NEON_TYPE2(name, type) \
+typedef struct \
+{ \
+    type v1; \
+    type v2; \
+} neon_##name;
+#define NEON_TYPE4(name, type) \
+typedef struct \
+{ \
+    type v1; \
+    type v2; \
+    type v3; \
+    type v4; \
+} neon_##name;
+#endif
+
+NEON_TYPE4(s8, int8_t)
+NEON_TYPE4(u8, uint8_t)
+NEON_TYPE2(s16, int16_t)
+NEON_TYPE2(u16, uint16_t)
+NEON_TYPE1(s32, int32_t)
+NEON_TYPE1(u32, uint32_t)
+#undef NEON_TYPE4
+#undef NEON_TYPE2
+#undef NEON_TYPE1
+
+/* Copy from a uint32_t to a vector structure type.  */
+#define NEON_UNPACK(vtype, dest, val) do { \
+    union { \
+        vtype v; \
+        uint32_t i; \
+    } conv_u; \
+    conv_u.i = (val); \
+    dest = conv_u.v; \
+    } while(0)
+
+/* Copy from a vector structure type to a uint32_t.  */
+#define NEON_PACK(vtype, dest, val) do { \
+    union { \
+        vtype v; \
+        uint32_t i; \
+    } conv_u; \
+    conv_u.v = (val); \
+    dest = conv_u.i; \
+    } while(0)
+
+#define NEON_DO1 \
+    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
+#define NEON_DO2 \
+    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
+    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
+#define NEON_DO4 \
+    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
+    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
+    NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
+    NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
+
+#define NEON_VOP_BODY(vtype, n) \
+{ \
+    uint32_t res; \
+    vtype vsrc1; \
+    vtype vsrc2; \
+    vtype vdest; \
+    NEON_UNPACK(vtype, vsrc1, arg1); \
+    NEON_UNPACK(vtype, vsrc2, arg2); \
+    NEON_DO##n; \
+    NEON_PACK(vtype, res, vdest); \
+    return res; \
+}
+
+#define NEON_VOP(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
+NEON_VOP_BODY(vtype, n)
+
+#define NEON_VOP_ENV(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(CPUState *env, uint32_t arg1, uint32_t arg2) \
+NEON_VOP_BODY(vtype, n)
+
+/* Pairwise operations.  */
+/* For 32-bit elements each segment only contains a single element, so
+   the elementwise and pairwise operations are the same.  */
+#define NEON_PDO2 \
+    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
+    NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
+#define NEON_PDO4 \
+    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
+    NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
+    NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
+    NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
+
+#define NEON_POP(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
+{ \
+    uint32_t res; \
+    vtype vsrc1; \
+    vtype vsrc2; \
+    vtype vdest; \
+    NEON_UNPACK(vtype, vsrc1, arg1); \
+    NEON_UNPACK(vtype, vsrc2, arg2); \
+    NEON_PDO##n; \
+    NEON_PACK(vtype, res, vdest); \
+    return res; \
+}
+
+/* Unary operators.  */
+#define NEON_VOP1(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
+{ \
+    vtype vsrc1; \
+    vtype vdest; \
+    NEON_UNPACK(vtype, vsrc1, arg); \
+    NEON_DO##n; \
+    NEON_PACK(vtype, arg, vdest); \
+    return arg; \
+}
+
+
+#define NEON_USAT(dest, src1, src2, type) do { \
+    uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
+    if (tmp != (type)tmp) { \
+        SET_QC(); \
+        dest = ~0; \
+    } else { \
+        dest = tmp; \
+    }} while(0)
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
+NEON_VOP_ENV(qadd_u8, neon_u8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
+NEON_VOP_ENV(qadd_u16, neon_u16, 2)
+#undef NEON_FN
+#undef NEON_USAT
+
+#define NEON_SSAT(dest, src1, src2, type) do { \
+    int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
+    if (tmp != (type)tmp) { \
+        SET_QC(); \
+        if (src2 > 0) { \
+            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
+        } else { \
+            tmp = 1 << (sizeof(type) * 8 - 1); \
+        } \
+    } \
+    dest = tmp; \
+    } while(0)
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
+NEON_VOP_ENV(qadd_s8, neon_s8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
+NEON_VOP_ENV(qadd_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_SSAT
+
+#define NEON_USAT(dest, src1, src2, type) do { \
+    uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
+    if (tmp != (type)tmp) { \
+        SET_QC(); \
+        dest = 0; \
+    } else { \
+        dest = tmp; \
+    }} while(0)
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
+NEON_VOP_ENV(qsub_u8, neon_u8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
+NEON_VOP_ENV(qsub_u16, neon_u16, 2)
+#undef NEON_FN
+#undef NEON_USAT
+
+#define NEON_SSAT(dest, src1, src2, type) do { \
+    int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
+    if (tmp != (type)tmp) { \
+        SET_QC(); \
+        if (src2 < 0) { \
+            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
+        } else { \
+            tmp = 1 << (sizeof(type) * 8 - 1); \
+        } \
+    } \
+    dest = tmp; \
+    } while(0)
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
+NEON_VOP_ENV(qsub_s8, neon_s8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
+NEON_VOP_ENV(qsub_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_SSAT
+
+#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
+NEON_VOP(hadd_s8, neon_s8, 4)
+NEON_VOP(hadd_u8, neon_u8, 4)
+NEON_VOP(hadd_s16, neon_s16, 2)
+NEON_VOP(hadd_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
+{
+    int32_t dest;
+
+    dest = (src1 >> 1) + (src2 >> 1);
+    if (src1 & src2 & 1)
+        dest++;
+    return dest;
+}
+
+uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
+{
+    uint32_t dest;
+
+    dest = (src1 >> 1) + (src2 >> 1);
+    if (src1 & src2 & 1)
+        dest++;
+    return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
+NEON_VOP(rhadd_s8, neon_s8, 4)
+NEON_VOP(rhadd_u8, neon_u8, 4)
+NEON_VOP(rhadd_s16, neon_s16, 2)
+NEON_VOP(rhadd_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
+{
+    int32_t dest;
+
+    dest = (src1 >> 1) + (src2 >> 1);
+    if ((src1 | src2) & 1)
+        dest++;
+    return dest;
+}
+
+uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
+{
+    uint32_t dest;
+
+    dest = (src1 >> 1) + (src2 >> 1);
+    if ((src1 | src2) & 1)
+        dest++;
+    return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
+NEON_VOP(hsub_s8, neon_s8, 4)
+NEON_VOP(hsub_u8, neon_u8, 4)
+NEON_VOP(hsub_s16, neon_s16, 2)
+NEON_VOP(hsub_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
+{
+    int32_t dest;
+
+    dest = (src1 >> 1) - (src2 >> 1);
+    if ((~src1) & src2 & 1)
+        dest--;
+    return dest;
+}
+
+uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
+{
+    uint32_t dest;
+
+    dest = (src1 >> 1) - (src2 >> 1);
+    if ((~src1) & src2 & 1)
+        dest--;
+    return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
+NEON_VOP(cgt_s8, neon_s8, 4)
+NEON_VOP(cgt_u8, neon_u8, 4)
+NEON_VOP(cgt_s16, neon_s16, 2)
+NEON_VOP(cgt_u16, neon_u16, 2)
+NEON_VOP(cgt_s32, neon_s32, 1)
+NEON_VOP(cgt_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
+NEON_VOP(cge_s8, neon_s8, 4)
+NEON_VOP(cge_u8, neon_u8, 4)
+NEON_VOP(cge_s16, neon_s16, 2)
+NEON_VOP(cge_u16, neon_u16, 2)
+NEON_VOP(cge_s32, neon_s32, 1)
+NEON_VOP(cge_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
+NEON_VOP(min_s8, neon_s8, 4)
+NEON_VOP(min_u8, neon_u8, 4)
+NEON_VOP(min_s16, neon_s16, 2)
+NEON_VOP(min_u16, neon_u16, 2)
+NEON_VOP(min_s32, neon_s32, 1)
+NEON_VOP(min_u32, neon_u32, 1)
+NEON_POP(pmin_s8, neon_s8, 4)
+NEON_POP(pmin_u8, neon_u8, 4)
+NEON_POP(pmin_s16, neon_s16, 2)
+NEON_POP(pmin_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
+NEON_VOP(max_s8, neon_s8, 4)
+NEON_VOP(max_u8, neon_u8, 4)
+NEON_VOP(max_s16, neon_s16, 2)
+NEON_VOP(max_u16, neon_u16, 2)
+NEON_VOP(max_s32, neon_s32, 1)
+NEON_VOP(max_u32, neon_u32, 1)
+NEON_POP(pmax_s8, neon_s8, 4)
+NEON_POP(pmax_u8, neon_u8, 4)
+NEON_POP(pmax_s16, neon_s16, 2)
+NEON_POP(pmax_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+    dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
+NEON_VOP(abd_s8, neon_s8, 4)
+NEON_VOP(abd_u8, neon_u8, 4)
+NEON_VOP(abd_s16, neon_s16, 2)
+NEON_VOP(abd_u16, neon_u16, 2)
+NEON_VOP(abd_s32, neon_s32, 1)
+NEON_VOP(abd_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) do { \
+    int8_t tmp; \
+    tmp = (int8_t)src2; \
+    if (tmp >= sizeof(src1) * 8 || tmp <= -sizeof(src1) * 8) { \
+        dest = 0; \
+    } else if (tmp < 0) { \
+        dest = src1 >> -tmp; \
+    } else { \
+        dest = src1 << tmp; \
+    }} while (0)
+NEON_VOP(shl_u8, neon_u8, 4)
+NEON_VOP(shl_u16, neon_u16, 2)
+NEON_VOP(shl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
+{
+    int8_t shift = (int8_t)shiftop;
+    if (shift >= 64 || shift <= -64) {
+        val = 0;
+    } else if (shift < 0) {
+        val >>= -shift;
+    } else {
+        val <<= shift;
+    }
+    return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+    int8_t tmp; \
+    tmp = (int8_t)src2; \
+    if (tmp >= sizeof(src1) * 8) { \
+        dest = 0; \
+    } else if (tmp <= -sizeof(src1) * 8) { \
+        dest = src1 >> (sizeof(src1) * 8 - 1); \
+    } else if (tmp < 0) { \
+        dest = src1 >> -tmp; \
+    } else { \
+        dest = src1 << tmp; \
+    }} while (0)
+NEON_VOP(shl_s8, neon_s8, 4)
+NEON_VOP(shl_s16, neon_s16, 2)
+NEON_VOP(shl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
+{
+    int8_t shift = (int8_t)shiftop;
+    int64_t val = valop;
+    if (shift >= 64) {
+        val = 0;
+    } else if (shift <= -64) {
+        val >>= 63;
+    } else if (shift < 0) {
+        val >>= -shift;
+    } else {
+        val <<= shift;
+    }
+    return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+    int8_t tmp; \
+    tmp = (int8_t)src2; \
+    if (tmp >= sizeof(src1) * 8) { \
+        dest = 0; \
+    } else if (tmp < -sizeof(src1) * 8) { \
+        dest >>= sizeof(src1) * 8 - 1; \
+    } else if (tmp == -sizeof(src1) * 8) { \
+        dest = src1 >> (tmp - 1); \
+        dest++; \
+        src2 >>= 1; \
+    } else if (tmp < 0) { \
+        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+    } else { \
+        dest = src1 << tmp; \
+    }} while (0)
+NEON_VOP(rshl_s8, neon_s8, 4)
+NEON_VOP(rshl_s16, neon_s16, 2)
+NEON_VOP(rshl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
+{
+    int8_t shift = (int8_t)shiftop;
+    int64_t val = valop;
+    if (shift >= 64) {
+        val = 0;
+    } else if (shift < -64) {
+        val >>= 63;
+    } else if (shift == -63) {
+        val >>= 63;
+        val++;
+        val >>= 1;
+    } else if (shift < 0) {
+        val = (val + ((int64_t)1 << (-1 - shift))) >> -shift;
+    } else {
+        val <<= shift;
+    }
+    return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+    int8_t tmp; \
+    tmp = (int8_t)src2; \
+    if (tmp >= sizeof(src1) * 8 || tmp < -sizeof(src1) * 8) { \
+        dest = 0; \
+    } else if (tmp == -sizeof(src1) * 8) { \
+        dest = src1 >> (tmp - 1); \
+    } else if (tmp < 0) { \
+        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+    } else { \
+        dest = src1 << tmp; \
+    }} while (0)
+NEON_VOP(rshl_u8, neon_u8, 4)
+NEON_VOP(rshl_u16, neon_u16, 2)
+NEON_VOP(rshl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
+{
+    int8_t shift = (uint8_t)shiftop;
+    if (shift >= 64 || shift < 64) {
+        val = 0;
+    } else if (shift == -64) {
+        /* Rounding a 1-bit result just preserves that bit.  */
+        val >>= 63;
+    } if (shift < 0) {
+        val = (val + ((uint64_t)1 << (-1 - shift))) >> -shift;
+        val >>= -shift;
+    } else {
+        val <<= shift;
+    }
+    return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+    int8_t tmp; \
+    tmp = (int8_t)src2; \
+    if (tmp >= sizeof(src1) * 8) { \
+        if (src1) { \
+            SET_QC(); \
+            dest = ~0; \
+        } else { \
+            dest = 0; \
+        } \
+    } else if (tmp <= -sizeof(src1) * 8) { \
+        dest = 0; \
+    } else if (tmp < 0) { \
+        dest = src1 >> -tmp; \
+    } else { \
+        dest = src1 << tmp; \
+        if ((dest >> tmp) != src1) { \
+            SET_QC(); \
+            dest = ~0; \
+        } \
+    }} while (0)
+NEON_VOP_ENV(qshl_u8, neon_u8, 4)
+NEON_VOP_ENV(qshl_u16, neon_u16, 2)
+NEON_VOP_ENV(qshl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
+{
+    int8_t shift = (int8_t)shiftop;
+    if (shift >= 64) {
+        if (val) {
+            val = ~(uint64_t)0;
+            SET_QC();
+        } else {
+            val = 0;
+        }
+    } else if (shift <= -64) {
+        val = 0;
+    } else if (shift < 0) {
+        val >>= -shift;
+    } else {
+        uint64_t tmp = val;
+        val <<= shift;
+        if ((val >> shift) != tmp) {
+            SET_QC();
+            val = ~(uint64_t)0;
+        }
+    }
+    return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+    int8_t tmp; \
+    tmp = (int8_t)src2; \
+    if (tmp >= sizeof(src1) * 8) { \
+        if (src1) \
+            SET_QC(); \
+        dest = src1 >> 31; \
+    } else if (tmp <= -sizeof(src1) * 8) { \
+        dest = src1 >> 31; \
+    } else if (tmp < 0) { \
+        dest = src1 >> -tmp; \
+    } else { \
+        dest = src1 << tmp; \
+        if ((dest >> tmp) != src1) { \
+            SET_QC(); \
+            dest = src2 >> 31; \
+        } \
+    }} while (0)
+NEON_VOP_ENV(qshl_s8, neon_s8, 4)
+NEON_VOP_ENV(qshl_s16, neon_s16, 2)
+NEON_VOP_ENV(qshl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
+{
+    int8_t shift = (uint8_t)shiftop;
+    int64_t val = valop;
+    if (shift >= 64) {
+        if (val) {
+            SET_QC();
+            val = (val >> 63) & ~SIGNBIT64;
+        }
+    } else if (shift <= 64) {
+        val >>= 63;
+    } else if (shift < 0) {
+        val >>= -shift;
+    } else {
+        int64_t tmp = val;
+        val <<= shift;
+        if ((val >> shift) != tmp) {
+            SET_QC();
+            val = (tmp >> 63) ^ ~SIGNBIT64;
+        }
+    }
+    return val;
+}
+
+
+/* FIXME: This is wrong.  */
+#define NEON_FN(dest, src1, src2) do { \
+    int8_t tmp; \
+    tmp = (int8_t)src2; \
+    if (tmp < 0) { \
+        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+    } else { \
+        dest = src1 << tmp; \
+        if ((dest >> tmp) != src1) { \
+            SET_QC(); \
+            dest = ~0; \
+        } \
+    }} while (0)
+NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
+NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
+NEON_VOP_ENV(qrshl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qrshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
+{
+    int8_t shift = (int8_t)shiftop;
+    if (shift < 0) {
+        val = (val + (1 << (-1 - shift))) >> -shift;
+    } else { \
+        uint64_t tmp = val;
+        val <<= shift;
+        if ((val >> shift) != tmp) {
+            SET_QC();
+            val = ~0;
+        }
+    }
+    return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+    int8_t tmp; \
+    tmp = (int8_t)src2; \
+    if (tmp < 0) { \
+        dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+    } else { \
+        dest = src1 << tmp; \
+        if ((dest >> tmp) != src1) { \
+            SET_QC(); \
+            dest = src1 >> 31; \
+        } \
+    }} while (0)
+NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
+NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
+NEON_VOP_ENV(qrshl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qrshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
+{
+    int8_t shift = (uint8_t)shiftop;
+    int64_t val = valop;
+
+    if (shift < 0) {
+        val = (val + (1 << (-1 - shift))) >> -shift;
+    } else {
+        int64_t tmp = val;;
+        val <<= shift;
+        if ((val >> shift) != tmp) {
+            SET_QC();
+            val = tmp >> 31;
+        }
+    }
+    return val;
+}
+
+uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
+{
+    uint32_t mask;
+    mask = (a ^ b) & 0x80808080u;
+    a &= ~0x80808080u;
+    b &= ~0x80808080u;
+    return (a + b) ^ mask;
+}
+
+uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
+{
+    uint32_t mask;
+    mask = (a ^ b) & 0x80008000u;
+    a &= ~0x80008000u;
+    b &= ~0x80008000u;
+    return (a + b) ^ mask;
+}
+
+#define NEON_FN(dest, src1, src2) dest = src1 + src2
+NEON_POP(padd_u8, neon_u8, 4)
+NEON_POP(padd_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = src1 - src2
+NEON_VOP(sub_u8, neon_u8, 4)
+NEON_VOP(sub_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = src1 * src2
+NEON_VOP(mul_u8, neon_u8, 4)
+NEON_VOP(mul_u16, neon_u16, 2)
+#undef NEON_FN
+
+/* Polynomial multiplication is like integer multiplcation except the
+   partial products are XORed, not added.  */
+uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
+{
+    uint32_t mask;
+    uint32_t result;
+    result = 0;
+    while (op1) {
+        mask = 0;
+        if (op1 & 1)
+            mask |= 0xff;
+        if (op1 & (1 << 8))
+            mask |= (0xff << 8);
+        if (op1 & (1 << 16))
+            mask |= (0xff << 16);
+        if (op1 & (1 << 24))
+            mask |= (0xff << 24);
+        result ^= op2 & mask;
+        op1 = (op1 >> 1) & 0x7f7f7f7f;
+        op2 = (op2 << 1) & 0xfefefefe;
+    }
+    return result;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
+NEON_VOP(tst_u8, neon_u8, 4)
+NEON_VOP(tst_u16, neon_u16, 2)
+NEON_VOP(tst_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
+NEON_VOP(ceq_u8, neon_u8, 4)
+NEON_VOP(ceq_u16, neon_u16, 2)
+NEON_VOP(ceq_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
+NEON_VOP1(abs_s8, neon_s8, 4)
+NEON_VOP1(abs_s16, neon_s16, 2)
+#undef NEON_FN
+
+/* Count Leading Sign/Zero Bits.  */
+static inline int do_clz8(uint8_t x)
+{
+    int n;
+    for (n = 8; x; n--)
+        x >>= 1;
+    return n;
+}
+
+static inline int do_clz16(uint16_t x)
+{
+    int n;
+    for (n = 16; x; n--)
+        x >>= 1;
+    return n;
+}
+
+#define NEON_FN(dest, src, dummy) dest = do_clz8(src)
+NEON_VOP1(clz_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz16(src)
+NEON_VOP1(clz_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
+NEON_VOP1(cls_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
+NEON_VOP1(cls_s16, neon_s16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_cls_s32)(uint32_t x)
+{
+    int count;
+    if ((int32_t)x < 0)
+        x = ~x;
+    for (count = 32; x; count--)
+        x = x >> 1;
+    return count - 1;
+}
+
+/* Bit count.  */
+uint32_t HELPER(neon_cnt_u8)(uint32_t x)
+{
+    x = (x & 0x55555555) + ((x >>  1) & 0x55555555);
+    x = (x & 0x33333333) + ((x >>  2) & 0x33333333);
+    x = (x & 0x0f0f0f0f) + ((x >>  4) & 0x0f0f0f0f);
+    return x;
+}
+
+#define NEON_QDMULH16(dest, src1, src2, round) do { \
+    uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
+    if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
+        SET_QC(); \
+        tmp = (tmp >> 31) ^ ~SIGNBIT; \
+    } \
+    tmp <<= 1; \
+    if (round) { \
+        int32_t old = tmp; \
+        tmp += 1 << 15; \
+        if ((int32_t)tmp < old) { \
+            SET_QC(); \
+            tmp = SIGNBIT - 1; \
+        } \
+    } \
+    dest = tmp >> 16; \
+    } while(0)
+#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
+NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
+NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_QDMULH16
+
+#define NEON_QDMULH32(dest, src1, src2, round) do { \
+    uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
+    if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
+        SET_QC(); \
+        tmp = (tmp >> 63) ^ ~SIGNBIT64; \
+    } else { \
+        tmp <<= 1; \
+    } \
+    if (round) { \
+        int64_t old = tmp; \
+        tmp += (int64_t)1 << 31; \
+        if ((int64_t)tmp < old) { \
+            SET_QC(); \
+            tmp = SIGNBIT64 - 1; \
+        } \
+    } \
+    dest = tmp >> 32; \
+    } while(0)
+#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
+NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
+NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
+#undef NEON_FN
+#undef NEON_QDMULH32
+
+uint32_t HELPER(neon_narrow_u8)(uint64_t x)
+{
+    return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
+           | ((x >> 24) & 0xff000000u);
+}
+
+uint32_t HELPER(neon_narrow_u16)(uint64_t x)
+{
+    return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
+}
+
+uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
+{
+    return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
+            | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
+}
+
+uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
+{
+    return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
+}
+
+uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
+{
+    x &= 0xff80ff80ff80ff80ull;
+    x += 0x0080008000800080ull;
+    return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
+            | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
+}
+
+uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
+{
+    x &= 0xffff8000ffff8000ull;
+    x += 0x0000800000008000ull;
+    return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
+}
+
+uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
+{
+    uint16_t s;
+    uint8_t d;
+    uint32_t res = 0;
+#define SAT8(n) \
+    s = x >> n; \
+    if (s > 0xff) { \
+        d = 0xff; \
+        SET_QC(); \
+    } else  { \
+        d = s; \
+    } \
+    res |= (uint32_t)d << (n / 2);
+
+    SAT8(0);
+    SAT8(16);
+    SAT8(32);
+    SAT8(48);
+#undef SAT8
+    return res;
+}
+
+uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
+{
+    int16_t s;
+    uint8_t d;
+    uint32_t res = 0;
+#define SAT8(n) \
+    s = x >> n; \
+    if (s != (int8_t)s) { \
+        d = (s >> 15) ^ 0x7f; \
+        SET_QC(); \
+    } else  { \
+        d = s; \
+    } \
+    res |= (uint32_t)d << (n / 2);
+
+    SAT8(0);
+    SAT8(16);
+    SAT8(32);
+    SAT8(48);
+#undef SAT8
+    return res;
+}
+
+uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
+{
+    uint32_t high;
+    uint32_t low;
+    low = x;
+    if (low > 0xffff) {
+        low = 0xffff;
+        SET_QC();
+    }
+    high = x >> 32;
+    if (high > 0xffff) {
+        high = 0xffff;
+        SET_QC();
+    }
+    return low | (high << 16);
+}
+
+uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
+{
+    int32_t low;
+    int32_t high;
+    low = x;
+    if (low != (int16_t)low) {
+        low = (low >> 31) ^ 0x7fff;
+        SET_QC();
+    }
+    high = x >> 32;
+    if (high != (int16_t)high) {
+        high = (high >> 31) ^ 0x7fff;
+        SET_QC();
+    }
+    return (uint16_t)low | (high << 16);
+}
+
+uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
+{
+    if (x > 0xffffffffu) {
+        SET_QC();
+        return 0xffffffffu;
+    }
+    return x;
+}
+
+uint32_t HELPER(neon_narrow_sat_s32)(CPUState *env, uint64_t x)
+{
+    if ((int64_t)x != (int32_t)x) {
+        SET_QC();
+        return (x >> 63) ^ 0x7fffffff;
+    }
+    return x;
+}
+
+uint64_t HELPER(neon_widen_u8)(uint32_t x)
+{
+    uint64_t tmp;
+    uint64_t ret;
+    ret = (uint8_t)x;
+    tmp = (uint8_t)(x >> 8);
+    ret |= tmp << 16;
+    tmp = (uint8_t)(x >> 16);
+    ret |= tmp << 32;
+    tmp = (uint8_t)(x >> 24);
+    ret |= tmp << 48;
+    return ret;
+}
+
+uint64_t HELPER(neon_widen_s8)(uint32_t x)
+{
+    uint64_t tmp;
+    uint64_t ret;
+    ret = (uint16_t)(int8_t)x;
+    tmp = (uint16_t)(int8_t)(x >> 8);
+    ret |= tmp << 16;
+    tmp = (uint16_t)(int8_t)(x >> 16);
+    ret |= tmp << 32;
+    tmp = (uint16_t)(int8_t)(x >> 24);
+    ret |= tmp << 48;
+    return ret;
+}
+
+uint64_t HELPER(neon_widen_u16)(uint32_t x)
+{
+    uint64_t high = (uint16_t)(x >> 16);
+    return ((uint16_t)x) | (high << 32);
+}
+
+uint64_t HELPER(neon_widen_s16)(uint32_t x)
+{
+    uint64_t high = (int16_t)(x >> 16);
+    return ((uint32_t)(int16_t)x) | (high << 32);
+}
+
+uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
+{
+    uint64_t mask;
+    mask = (a ^ b) & 0x8000800080008000ull;
+    a &= ~0x8000800080008000ull;
+    b &= ~0x8000800080008000ull;
+    return (a + b) ^ mask;
+}
+
+uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
+{
+    uint64_t mask;
+    mask = (a ^ b) & 0x8000000080000000ull;
+    a &= ~0x8000000080000000ull;
+    b &= ~0x8000000080000000ull;
+    return (a + b) ^ mask;
+}
+
+uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
+{
+    uint64_t tmp;
+    uint64_t tmp2;
+
+    tmp = a & 0x0000ffff0000ffffull;
+    tmp += (a >> 16) & 0x0000ffff0000ffffull;
+    tmp2 = b & 0xffff0000ffff0000ull;
+    tmp2 += (b << 16) & 0xffff0000ffff0000ull;
+    return    ( tmp         & 0xffff)
+            | ((tmp  >> 16) & 0xffff0000ull)
+            | ((tmp2 << 16) & 0xffff00000000ull)
+            | ( tmp2        & 0xffff000000000000ull);
+}
+
+uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
+{
+    uint32_t low = a + (a >> 32);
+    uint32_t high = b + (b >> 32);
+    return low + ((uint64_t)high << 32);
+}
+
+uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
+{
+    uint64_t mask;
+    mask = (a ^ ~b) & 0x8000800080008000ull;
+    a |= 0x8000800080008000ull;
+    b &= ~0x8000800080008000ull;
+    return (a - b) ^ mask;
+}
+
+uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
+{
+    uint64_t mask;
+    mask = (a ^ ~b) & 0x8000000080000000ull;
+    a |= 0x8000000080000000ull;
+    b &= ~0x8000000080000000ull;
+    return (a - b) ^ mask;
+}
+
+uint64_t HELPER(neon_addl_saturate_s32)(CPUState *env, uint64_t a, uint64_t b)
+{
+    uint32_t x, y;
+    uint32_t low, high;
+
+    x = a;
+    y = b;
+    low = x + y;
+    if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
+        SET_QC();
+        low = ((int32_t)x >> 31) ^ ~SIGNBIT;
+    }
+    x = a >> 32;
+    y = b >> 32;
+    high = x + y;
+    if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
+        SET_QC();
+        high = ((int32_t)x >> 31) ^ ~SIGNBIT;
+    }
+    return low | ((uint64_t)high << 32);
+}
+
+uint64_t HELPER(neon_addl_saturate_s64)(CPUState *env, uint64_t a, uint64_t b)
+{
+    uint64_t result;
+
+    result = a + b;
+    if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
+        SET_QC();
+        result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
+    }
+    return result;
+}
+
+#define DO_ABD(dest, x, y, type) do { \
+    type tmp_x = x; \
+    type tmp_y = y; \
+    dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
+    } while(0)
+
+uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
+{
+    uint64_t tmp;
+    uint64_t result;
+    DO_ABD(result, a, b, uint8_t);
+    DO_ABD(tmp, a >> 8, b >> 8, uint8_t);
+    result |= tmp << 16;
+    DO_ABD(tmp, a >> 16, b >> 16, uint8_t);
+    result |= tmp << 32;
+    DO_ABD(tmp, a >> 24, b >> 24, uint8_t);
+    result |= tmp << 48;
+    return result;
+}
+
+uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
+{
+    uint64_t tmp;
+    uint64_t result;
+    DO_ABD(result, a, b, int8_t);
+    DO_ABD(tmp, a >> 8, b >> 8, int8_t);
+    result |= tmp << 16;
+    DO_ABD(tmp, a >> 16, b >> 16, int8_t);
+    result |= tmp << 32;
+    DO_ABD(tmp, a >> 24, b >> 24, int8_t);
+    result |= tmp << 48;
+    return result;
+}
+
+uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
+{
+    uint64_t tmp;
+    uint64_t result;
+    DO_ABD(result, a, b, uint16_t);
+    DO_ABD(tmp, a >> 16, b >> 16, uint16_t);
+    return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
+{
+    uint64_t tmp;
+    uint64_t result;
+    DO_ABD(result, a, b, int16_t);
+    DO_ABD(tmp, a >> 16, b >> 16, int16_t);
+    return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
+{
+    uint64_t result;
+    DO_ABD(result, a, b, uint32_t);
+    return result;
+}
+
+uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
+{
+    uint64_t result;
+    DO_ABD(result, a, b, int32_t);
+    return result;
+}
+#undef DO_ABD
+
+/* Widening multiply. Named type is the source type.  */
+#define DO_MULL(dest, x, y, type1, type2) do { \
+    type1 tmp_x = x; \
+    type1 tmp_y = y; \
+    dest = (type2)((type2)tmp_x * (type2)tmp_y); \
+    } while(0)
+
+uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
+{
+    uint64_t tmp;
+    uint64_t result;
+
+    DO_MULL(result, a, b, uint8_t, uint16_t);
+    DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
+    result |= tmp << 16;
+    DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
+    result |= tmp << 32;
+    DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
+    result |= tmp << 48;
+    return result;
+}
+
+uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
+{
+    uint64_t tmp;
+    uint64_t result;
+
+    DO_MULL(result, a, b, int8_t, uint16_t);
+    DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
+    result |= tmp << 16;
+    DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
+    result |= tmp << 32;
+    DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
+    result |= tmp << 48;
+    return result;
+}
+
+uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
+{
+    uint64_t tmp;
+    uint64_t result;
+
+    DO_MULL(result, a, b, uint16_t, uint32_t);
+    DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
+    return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
+{
+    uint64_t tmp;
+    uint64_t result;
+
+    DO_MULL(result, a, b, int16_t, uint32_t);
+    DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
+    return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_negl_u16)(uint64_t x)
+{
+    uint16_t tmp;
+    uint64_t result;
+    result = (uint16_t)-x;
+    tmp = -(x >> 16);
+    result |= (uint64_t)tmp << 16;
+    tmp = -(x >> 32);
+    result |= (uint64_t)tmp << 32;
+    tmp = -(x >> 48);
+    result |= (uint64_t)tmp << 48;
+    return result;
+}
+
+#include <stdio.h>
+uint64_t HELPER(neon_negl_u32)(uint64_t x)
+{
+    uint32_t low = -x;
+    uint32_t high = -(x >> 32);
+    return low | ((uint64_t)high << 32);
+}
+
+/* FIXME:  There should be a native op for this.  */
+uint64_t HELPER(neon_negl_u64)(uint64_t x)
+{
+    return -x;
+}
+
+/* Saturnating sign manuipulation.  */
+/* ??? Make these use NEON_VOP1 */
+#define DO_QABS8(x) do { \
+    if (x == (int8_t)0x80) { \
+        x = 0x7f; \
+        SET_QC(); \
+    } else if (x < 0) { \
+        x = -x; \
+    }} while (0)
+uint32_t HELPER(neon_qabs_s8)(CPUState *env, uint32_t x)
+{
+    neon_s8 vec;
+    NEON_UNPACK(neon_s8, vec, x);
+    DO_QABS8(vec.v1);
+    DO_QABS8(vec.v2);
+    DO_QABS8(vec.v3);
+    DO_QABS8(vec.v4);
+    NEON_PACK(neon_s8, x, vec);
+    return x;
+}
+#undef DO_QABS8
+
+#define DO_QNEG8(x) do { \
+    if (x == (int8_t)0x80) { \
+        x = 0x7f; \
+        SET_QC(); \
+    } else { \
+        x = -x; \
+    }} while (0)
+uint32_t HELPER(neon_qneg_s8)(CPUState *env, uint32_t x)
+{
+    neon_s8 vec;
+    NEON_UNPACK(neon_s8, vec, x);
+    DO_QNEG8(vec.v1);
+    DO_QNEG8(vec.v2);
+    DO_QNEG8(vec.v3);
+    DO_QNEG8(vec.v4);
+    NEON_PACK(neon_s8, x, vec);
+    return x;
+}
+#undef DO_QNEG8
+
+#define DO_QABS16(x) do { \
+    if (x == (int16_t)0x8000) { \
+        x = 0x7fff; \
+        SET_QC(); \
+    } else if (x < 0) { \
+        x = -x; \
+    }} while (0)
+uint32_t HELPER(neon_qabs_s16)(CPUState *env, uint32_t x)
+{
+    neon_s16 vec;
+    NEON_UNPACK(neon_s16, vec, x);
+    DO_QABS16(vec.v1);
+    DO_QABS16(vec.v2);
+    NEON_PACK(neon_s16, x, vec);
+    return x;
+}
+#undef DO_QABS16
+
+#define DO_QNEG16(x) do { \
+    if (x == (int16_t)0x8000) { \
+        x = 0x7fff; \
+        SET_QC(); \
+    } else { \
+        x = -x; \
+    }} while (0)
+uint32_t HELPER(neon_qneg_s16)(CPUState *env, uint32_t x)
+{
+    neon_s16 vec;
+    NEON_UNPACK(neon_s16, vec, x);
+    DO_QNEG16(vec.v1);
+    DO_QNEG16(vec.v2);
+    NEON_PACK(neon_s16, x, vec);
+    return x;
+}
+#undef DO_QNEG16
+
+uint32_t HELPER(neon_qabs_s32)(CPUState *env, uint32_t x)
+{
+    if (x == SIGNBIT) {
+        SET_QC();
+        x = ~SIGNBIT;
+    } else if ((int32_t)x < 0) {
+        x = -x;
+    }
+    return x;
+}
+
+uint32_t HELPER(neon_qneg_s32)(CPUState *env, uint32_t x)
+{
+    if (x == SIGNBIT) {
+        SET_QC();
+        x = ~SIGNBIT;
+    } else {
+        x = -x;
+    }
+    return x;
+}
+
+/* NEON Float helpers.  */
+uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b)
+{
+    float32 f0 = vfp_itos(a);
+    float32 f1 = vfp_itos(b);
+    return (float32_compare_quiet(f0, f1, NFS) == -1) ? a : b;
+}
+
+uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b)
+{
+    float32 f0 = vfp_itos(a);
+    float32 f1 = vfp_itos(b);
+    return (float32_compare_quiet(f0, f1, NFS) == 1) ? a : b;
+}
+
+uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b)
+{
+    float32 f0 = vfp_itos(a);
+    float32 f1 = vfp_itos(b);
+    return vfp_stoi((float32_compare_quiet(f0, f1, NFS) == 1)
+                    ? float32_sub(f0, f1, NFS)
+                    : float32_sub(f1, f0, NFS));
+}
+
+uint32_t HELPER(neon_add_f32)(uint32_t a, uint32_t b)
+{
+    return vfp_stoi(float32_add(vfp_itos(a), vfp_itos(b), NFS));
+}
+
+uint32_t HELPER(neon_sub_f32)(uint32_t a, uint32_t b)
+{
+    return vfp_stoi(float32_sub(vfp_itos(a), vfp_itos(b), NFS));
+}
+
+uint32_t HELPER(neon_mul_f32)(uint32_t a, uint32_t b)
+{
+    return vfp_stoi(float32_mul(vfp_itos(a), vfp_itos(b), NFS));
+}
+
+/* Floating point comparisons produce an integer result.  */
+#define NEON_VOP_FCMP(name, cmp) \
+uint32_t HELPER(neon_##name)(uint32_t a, uint32_t b) \
+{ \
+    if (float32_compare_quiet(vfp_itos(a), vfp_itos(b), NFS) cmp 0) \
+        return ~0; \
+    else \
+        return 0; \
+}
+
+NEON_VOP_FCMP(ceq_f32, ==)
+NEON_VOP_FCMP(cge_f32, >=)
+NEON_VOP_FCMP(cgt_f32, >)
+
+uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b)
+{
+    float32 f0 = float32_abs(vfp_itos(a));
+    float32 f1 = float32_abs(vfp_itos(b));
+    return (float32_compare_quiet(f0, f1,NFS) >= 0) ? ~0 : 0;
+}
+
+uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b)
+{
+    float32 f0 = float32_abs(vfp_itos(a));
+    float32 f1 = float32_abs(vfp_itos(b));
+    return (float32_compare_quiet(f0, f1, NFS) > 0) ? ~0 : 0;
+}
index 57086e349cefecd7005f17cd651ac9475f095e8c..3937de9a4389256eb9f690fd2f0a66bc95e914ed 100644 (file)
@@ -32,7 +32,5 @@
 #include "op_mem.h"
 #endif
 
-#include "op_neon.h"
-
 /* iwMMXt support */
 #include "op_iwmmxt.c"
index d1ce3a657c1050bdaa6adc039231a47d21175869..20dafae8a439db1dd0cd9abb8bd13db16cd6ff24 100644 (file)
@@ -20,6 +20,9 @@
 #include "exec.h"
 #include "helpers.h"
 
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
+
 void raise_exception(int tt)
 {
     env->exception_index = tt;
@@ -116,7 +119,8 @@ void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
 }
 #endif
 
-#define SIGNBIT (uint32_t)0x80000000
+/* FIXME: Pass an axplicit pointer to QF to CPUState, and move saturating
+   instructions into helper.c  */
 uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
 {
     uint32_t res = a + b;
@@ -451,3 +455,114 @@ uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i)
     }
 }
 
+uint64_t HELPER(neon_add_saturate_s64)(uint64_t src1, uint64_t src2)
+{
+    uint64_t res;
+
+    res = src1 + src2;
+    if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
+        env->QF = 1;
+        res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
+    }
+    return res;
+}
+
+uint64_t HELPER(neon_add_saturate_u64)(uint64_t src1, uint64_t src2)
+{
+    uint64_t res;
+
+    res = src1 + src2;
+    if (res < src1) {
+        env->QF = 1;
+        res = ~(uint64_t)0;
+    }
+    return res;
+}
+
+uint64_t HELPER(neon_sub_saturate_s64)(uint64_t src1, uint64_t src2)
+{
+    uint64_t res;
+
+    res = src1 - src2;
+    if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
+        env->QF = 1;
+        res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
+    }
+    return res;
+}
+
+uint64_t HELPER(neon_sub_saturate_u64)(uint64_t src1, uint64_t src2)
+{
+    uint64_t res;
+
+    if (src1 < src2) {
+        env->QF = 1;
+        res = 0;
+    } else {
+        res = src1 - src2;
+    }
+    return res;
+}
+
+/* These need to return a pair of value, so still use T0/T1.  */
+/* Transpose.  Argument order is rather strange to avoid special casing
+   the tranlation code.
+   On input T0 = rm, T1 = rd.  On output T0 = rd, T1 = rm  */
+void HELPER(neon_trn_u8)(void)
+{
+    uint32_t rd;
+    uint32_t rm;
+    rd = ((T0 & 0x00ff00ff) << 8) | (T1 & 0x00ff00ff);
+    rm = ((T1 & 0xff00ff00) >> 8) | (T0 & 0xff00ff00);
+    T0 = rd;
+    T1 = rm;
+    FORCE_RET();
+}
+
+void HELPER(neon_trn_u16)(void)
+{
+    uint32_t rd;
+    uint32_t rm;
+    rd = (T0 << 16) | (T1 & 0xffff);
+    rm = (T1 >> 16) | (T0 & 0xffff0000);
+    T0 = rd;
+    T1 = rm;
+    FORCE_RET();
+}
+
+/* Worker routines for zip and unzip.  */
+void HELPER(neon_unzip_u8)(void)
+{
+    uint32_t rd;
+    uint32_t rm;
+    rd = (T0 & 0xff) | ((T0 >> 8) & 0xff00)
+         | ((T1 << 16) & 0xff0000) | ((T1 << 8) & 0xff000000);
+    rm = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00)
+         | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000);
+    T0 = rd;
+    T1 = rm;
+    FORCE_RET();
+}
+
+void HELPER(neon_zip_u8)(void)
+{
+    uint32_t rd;
+    uint32_t rm;
+    rd = (T0 & 0xff) | ((T1 << 8) & 0xff00)
+         | ((T0 << 16) & 0xff0000) | ((T1 << 24) & 0xff000000);
+    rm = ((T0 >> 16) & 0xff) | ((T1 >> 8) & 0xff00)
+         | ((T0 >> 8) & 0xff0000) | (T1 & 0xff000000);
+    T0 = rd;
+    T1 = rm;
+    FORCE_RET();
+}
+
+void HELPER(neon_zip_u16)(void)
+{
+    uint32_t tmp;
+
+    tmp = (T0 & 0xffff) | (T1 << 16);
+    T1 = (T1 & 0xffff0000) | (T0 >> 16);
+    T0 = tmp;
+    FORCE_RET();
+}
diff --git a/target-arm/op_neon.h b/target-arm/op_neon.h
deleted file mode 100644 (file)
index df3b7cb..0000000
+++ /dev/null
@@ -1,1690 +0,0 @@
-/*
- * ARM NEON vector operations.
- *
- * Copyright (c) 2007 CodeSourcery.
- * Written by Paul Brook
- *
- * This code is licenced under the GPL.
- */
-/* Note that for NEON an "l" prefix means it is a wide operation, unlike
-   scalar arm ops where it means a word size operation.  */
-
-#define SIGNBIT (uint32_t)0x80000000
-/* ??? NEON ops should probably have their own float status.  */
-#define NFS &env->vfp.fp_status
-#define NEON_OP(name) void OPPROTO op_neon_##name (void)
-
-/* Helper routines to perform bitwise copies between float and int.  */
-static inline float32 vfp_itos(uint32_t i)
-{
-    union {
-        uint32_t i;
-        float32 s;
-    } v;
-
-    v.i = i;
-    return v.s;
-}
-
-static inline uint32_t vfp_stoi(float32 s)
-{
-    union {
-        uint32_t i;
-        float32 s;
-    } v;
-
-    v.s = s;
-    return v.i;
-}
-
-NEON_OP(getreg_T0)
-{
-    T0 = *(uint32_t *)((char *) env + PARAM1);
-}
-
-NEON_OP(getreg_T1)
-{
-    T1 = *(uint32_t *)((char *) env + PARAM1);
-}
-
-NEON_OP(setreg_T0)
-{
-    *(uint32_t *)((char *) env + PARAM1) = T0;
-}
-
-NEON_OP(setreg_T1)
-{
-    *(uint32_t *)((char *) env + PARAM1) = T1;
-}
-
-#define NEON_TYPE1(name, type) \
-typedef struct \
-{ \
-    type v1; \
-} neon_##name;
-#ifdef WORDS_BIGENDIAN
-#define NEON_TYPE2(name, type) \
-typedef struct \
-{ \
-    type v2; \
-    type v1; \
-} neon_##name;
-#define NEON_TYPE4(name, type) \
-typedef struct \
-{ \
-    type v4; \
-    type v3; \
-    type v2; \
-    type v1; \
-} neon_##name;
-#else
-#define NEON_TYPE2(name, type) \
-typedef struct \
-{ \
-    type v1; \
-    type v2; \
-} neon_##name;
-#define NEON_TYPE4(name, type) \
-typedef struct \
-{ \
-    type v1; \
-    type v2; \
-    type v3; \
-    type v4; \
-} neon_##name;
-#endif
-
-NEON_TYPE4(s8, int8_t)
-NEON_TYPE4(u8, uint8_t)
-NEON_TYPE2(s16, int16_t)
-NEON_TYPE2(u16, uint16_t)
-NEON_TYPE1(s32, int32_t)
-NEON_TYPE1(u32, uint32_t)
-#undef NEON_TYPE4
-#undef NEON_TYPE2
-#undef NEON_TYPE1
-
-/* Copy from a uint32_t to a vector structure type.  */
-#define NEON_UNPACK(vtype, dest, val) do { \
-    union { \
-        vtype v; \
-        uint32_t i; \
-    } conv_u; \
-    conv_u.i = (val); \
-    dest = conv_u.v; \
-    } while(0)
-
-/* Copy from a vector structure type to a uint32_t.  */
-#define NEON_PACK(vtype, dest, val) do { \
-    union { \
-        vtype v; \
-        uint32_t i; \
-    } conv_u; \
-    conv_u.v = (val); \
-    dest = conv_u.i; \
-    } while(0)
-
-#define NEON_DO1 \
-    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
-#define NEON_DO2 \
-    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
-    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
-#define NEON_DO4 \
-    NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
-    NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
-    NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
-    NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
-
-#define NEON_VOP(name, vtype, n) \
-NEON_OP(name) \
-{ \
-    vtype vsrc1; \
-    vtype vsrc2; \
-    vtype vdest; \
-    NEON_UNPACK(vtype, vsrc1, T0); \
-    NEON_UNPACK(vtype, vsrc2, T1); \
-    NEON_DO##n; \
-    NEON_PACK(vtype, T0, vdest); \
-    FORCE_RET(); \
-}
-
-#define NEON_VOP1(name, vtype, n) \
-NEON_OP(name) \
-{ \
-    vtype vsrc1; \
-    vtype vdest; \
-    NEON_UNPACK(vtype, vsrc1, T0); \
-    NEON_DO##n; \
-    NEON_PACK(vtype, T0, vdest); \
-    FORCE_RET(); \
-}
-
-/* Pairwise operations.  */
-/* For 32-bit elements each segment only contains a single element, so
-   the elementwise and pairwise operations are the same.  */
-#define NEON_PDO2 \
-    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
-    NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
-#define NEON_PDO4 \
-    NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
-    NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
-    NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
-    NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
-
-#define NEON_POP(name, vtype, n) \
-NEON_OP(name) \
-{ \
-    vtype vsrc1; \
-    vtype vsrc2; \
-    vtype vdest; \
-    NEON_UNPACK(vtype, vsrc1, T0); \
-    NEON_UNPACK(vtype, vsrc2, T1); \
-    NEON_PDO##n; \
-    NEON_PACK(vtype, T0, vdest); \
-    FORCE_RET(); \
-}
-
-#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
-NEON_VOP(hadd_s8, neon_s8, 4)
-NEON_VOP(hadd_u8, neon_u8, 4)
-NEON_VOP(hadd_s16, neon_s16, 2)
-NEON_VOP(hadd_u16, neon_u16, 2)
-#undef NEON_FN
-
-NEON_OP(hadd_s32)
-{
-    int32_t src1 = T0;
-    int32_t src2 = T1;
-    int32_t dest;
-
-    dest = (src1 >> 1) + (src2 >> 1);
-    if (src1 & src2 & 1)
-        dest++;
-    T0 = dest;
-    FORCE_RET();
-}
-
-NEON_OP(hadd_u32)
-{
-    uint32_t src1 = T0;
-    uint32_t src2 = T1;
-    uint32_t dest;
-
-    dest = (src1 >> 1) + (src2 >> 1);
-    if (src1 & src2 & 1)
-        dest++;
-    T0 = dest;
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
-NEON_VOP(rhadd_s8, neon_s8, 4)
-NEON_VOP(rhadd_u8, neon_u8, 4)
-NEON_VOP(rhadd_s16, neon_s16, 2)
-NEON_VOP(rhadd_u16, neon_u16, 2)
-#undef NEON_FN
-
-NEON_OP(rhadd_s32)
-{
-    int32_t src1 = T0;
-    int32_t src2 = T1;
-    int32_t dest;
-
-    dest = (src1 >> 1) + (src2 >> 1);
-    if ((src1 | src2) & 1)
-        dest++;
-    T0 = dest;
-    FORCE_RET();
-}
-
-NEON_OP(rhadd_u32)
-{
-    uint32_t src1 = T0;
-    uint32_t src2 = T1;
-    uint32_t dest;
-
-    dest = (src1 >> 1) + (src2 >> 1);
-    if ((src1 | src2) & 1)
-        dest++;
-    T0 = dest;
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
-NEON_VOP(hsub_s8, neon_s8, 4)
-NEON_VOP(hsub_u8, neon_u8, 4)
-NEON_VOP(hsub_s16, neon_s16, 2)
-NEON_VOP(hsub_u16, neon_u16, 2)
-#undef NEON_FN
-
-NEON_OP(hsub_s32)
-{
-    int32_t src1 = T0;
-    int32_t src2 = T1;
-    int32_t dest;
-
-    dest = (src1 >> 1) - (src2 >> 1);
-    if ((~src1) & src2 & 1)
-        dest--;
-    T0 = dest;
-    FORCE_RET();
-}
-
-NEON_OP(hsub_u32)
-{
-    uint32_t src1 = T0;
-    uint32_t src2 = T1;
-    uint32_t dest;
-
-    dest = (src1 >> 1) - (src2 >> 1);
-    if ((~src1) & src2 & 1)
-        dest--;
-    T0 = dest;
-    FORCE_RET();
-}
-
-#define NEON_USAT(dest, src1, src2, type) do { \
-    uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
-    if (tmp != (type)tmp) { \
-        env->QF = 1; \
-        dest = ~0; \
-    } else { \
-        dest = tmp; \
-    }} while(0)
-#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
-NEON_VOP(qadd_u8, neon_u8, 4)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
-NEON_VOP(qadd_u16, neon_u16, 2)
-#undef NEON_FN
-#undef NEON_USAT
-
-#define NEON_SSAT(dest, src1, src2, type) do { \
-    int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
-    if (tmp != (type)tmp) { \
-        env->QF = 1; \
-        if (src2 > 0) { \
-            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
-        } else { \
-            tmp = 1 << (sizeof(type) * 8 - 1); \
-        } \
-    } \
-    dest = tmp; \
-    } while(0)
-#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
-NEON_VOP(qadd_s8, neon_s8, 4)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
-NEON_VOP(qadd_s16, neon_s16, 2)
-#undef NEON_FN
-#undef NEON_SSAT
-
-#define NEON_USAT(dest, src1, src2, type) do { \
-    uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
-    if (tmp != (type)tmp) { \
-        env->QF = 1; \
-        dest = 0; \
-    } else { \
-        dest = tmp; \
-    }} while(0)
-#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
-NEON_VOP(qsub_u8, neon_u8, 4)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
-NEON_VOP(qsub_u16, neon_u16, 2)
-#undef NEON_FN
-#undef NEON_USAT
-
-#define NEON_SSAT(dest, src1, src2, type) do { \
-    int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
-    if (tmp != (type)tmp) { \
-        env->QF = 1; \
-        if (src2 < 0) { \
-            tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
-        } else { \
-            tmp = 1 << (sizeof(type) * 8 - 1); \
-        } \
-    } \
-    dest = tmp; \
-    } while(0)
-#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
-NEON_VOP(qsub_s8, neon_s8, 4)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
-NEON_VOP(qsub_s16, neon_s16, 2)
-#undef NEON_FN
-#undef NEON_SSAT
-
-#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
-NEON_VOP(cgt_s8, neon_s8, 4)
-NEON_VOP(cgt_u8, neon_u8, 4)
-NEON_VOP(cgt_s16, neon_s16, 2)
-NEON_VOP(cgt_u16, neon_u16, 2)
-NEON_VOP(cgt_s32, neon_s32, 1)
-NEON_VOP(cgt_u32, neon_u32, 1)
-#undef NEON_FN
-
-#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
-NEON_VOP(cge_s8, neon_s8, 4)
-NEON_VOP(cge_u8, neon_u8, 4)
-NEON_VOP(cge_s16, neon_s16, 2)
-NEON_VOP(cge_u16, neon_u16, 2)
-NEON_VOP(cge_s32, neon_s32, 1)
-NEON_VOP(cge_u32, neon_u32, 1)
-#undef NEON_FN
-
-#define NEON_FN(dest, src1, src2) do { \
-    int8_t tmp; \
-    tmp = (int8_t)src2; \
-    if (tmp < 0) { \
-        dest = src1 >> -tmp; \
-    } else { \
-        dest = src1 << tmp; \
-    }} while (0)
-NEON_VOP(shl_s8, neon_s8, 4)
-NEON_VOP(shl_u8, neon_u8, 4)
-NEON_VOP(shl_s16, neon_s16, 2)
-NEON_VOP(shl_u16, neon_u16, 2)
-NEON_VOP(shl_s32, neon_s32, 1)
-NEON_VOP(shl_u32, neon_u32, 1)
-#undef NEON_FN
-
-NEON_OP(shl_u64)
-{
-    int8_t shift = env->vfp.scratch[0];
-    uint64_t val = T0 | ((uint64_t)T1 << 32);
-    if (shift < 0) {
-        val >>= -shift;
-    } else {
-        val <<= shift;
-    }
-    T0 = val;
-    T1 = val >> 32;
-    FORCE_RET();
-}
-
-NEON_OP(shl_s64)
-{
-    int8_t shift = env->vfp.scratch[0];
-    int64_t val = T0 | ((uint64_t)T1 << 32);
-    if (shift < 0) {
-        val >>= -shift;
-    } else {
-        val <<= shift;
-    }
-    T0 = val;
-    T1 = val >> 32;
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) do { \
-    int8_t tmp; \
-    tmp = (int8_t)src1; \
-    if (tmp < 0) { \
-        dest = (src2 + (1 << (-1 - tmp))) >> -tmp; \
-    } else { \
-        dest = src2 << tmp; \
-    }} while (0)
-
-NEON_VOP(rshl_s8, neon_s8, 4)
-NEON_VOP(rshl_u8, neon_u8, 4)
-NEON_VOP(rshl_s16, neon_s16, 2)
-NEON_VOP(rshl_u16, neon_u16, 2)
-NEON_VOP(rshl_s32, neon_s32, 1)
-NEON_VOP(rshl_u32, neon_u32, 1)
-#undef NEON_FN
-
-NEON_OP(rshl_u64)
-{
-    int8_t shift = env->vfp.scratch[0];
-    uint64_t val = T0 | ((uint64_t)T1 << 32);
-    if (shift < 0) {
-        val = (val + ((uint64_t)1 << (-1 - shift))) >> -shift;
-        val >>= -shift;
-    } else {
-        val <<= shift;
-    }
-    T0 = val;
-    T1 = val >> 32;
-    FORCE_RET();
-}
-
-NEON_OP(rshl_s64)
-{
-    int8_t shift = env->vfp.scratch[0];
-    int64_t val = T0 | ((uint64_t)T1 << 32);
-    if (shift < 0) {
-        val = (val + ((int64_t)1 << (-1 - shift))) >> -shift;
-    } else {
-        val <<= shift;
-    }
-    T0 = val;
-    T1 = val >> 32;
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) do { \
-    int8_t tmp; \
-    tmp = (int8_t)src1; \
-    if (tmp < 0) { \
-        dest = src2 >> -tmp; \
-    } else { \
-        dest = src2 << tmp; \
-        if ((dest >> tmp) != src2) { \
-            env->QF = 1; \
-            dest = ~0; \
-        } \
-    }} while (0)
-NEON_VOP(qshl_s8, neon_s8, 4)
-NEON_VOP(qshl_s16, neon_s16, 2)
-NEON_VOP(qshl_s32, neon_s32, 1)
-#undef NEON_FN
-
-NEON_OP(qshl_s64)
-{
-    int8_t shift = env->vfp.scratch[0];
-    int64_t val = T0 | ((uint64_t)T1 << 32);
-    if (shift < 0) {
-        val >>= -shift;
-    } else {
-        int64_t tmp = val;
-        val <<= shift;
-        if ((val >> shift) != tmp) {
-            env->QF = 1;
-            val = (tmp >> 63) ^ 0x7fffffffffffffffULL;
-        }
-    }
-    T0 = val;
-    T1 = val >> 32;
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) do { \
-    int8_t tmp; \
-    tmp = (int8_t)src1; \
-    if (tmp < 0) { \
-        dest = src2 >> -tmp; \
-    } else { \
-        dest = src2 << tmp; \
-        if ((dest >> tmp) != src2) { \
-            env->QF = 1; \
-            dest = src2 >> 31; \
-        } \
-    }} while (0)
-NEON_VOP(qshl_u8, neon_u8, 4)
-NEON_VOP(qshl_u16, neon_u16, 2)
-NEON_VOP(qshl_u32, neon_u32, 1)
-#undef NEON_FN
-
-NEON_OP(qshl_u64)
-{
-    int8_t shift = env->vfp.scratch[0];
-    uint64_t val = T0 | ((uint64_t)T1 << 32);
-    if (shift < 0) {
-        val >>= -shift;
-    } else {
-        uint64_t tmp = val;
-        val <<= shift;
-        if ((val >> shift) != tmp) {
-            env->QF = 1;
-            val = ~(uint64_t)0;
-        }
-    }
-    T0 = val;
-    T1 = val >> 32;
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) do { \
-    int8_t tmp; \
-    tmp = (int8_t)src1; \
-    if (tmp < 0) { \
-        dest = (src2 + (1 << (-1 - tmp))) >> -tmp; \
-    } else { \
-        dest = src2 << tmp; \
-        if ((dest >> tmp) != src2) { \
-            dest = ~0; \
-        } \
-    }} while (0)
-NEON_VOP(qrshl_s8, neon_s8, 4)
-NEON_VOP(qrshl_s16, neon_s16, 2)
-NEON_VOP(qrshl_s32, neon_s32, 1)
-#undef NEON_FN
-
-#define NEON_FN(dest, src1, src2) do { \
-    int8_t tmp; \
-    tmp = (int8_t)src1; \
-    if (tmp < 0) { \
-        dest = (src2 + (1 << (-1 - tmp))) >> -tmp; \
-    } else { \
-        dest = src2 << tmp; \
-        if ((dest >> tmp) != src2) { \
-            env->QF = 1; \
-            dest = src2 >> 31; \
-        } \
-    }} while (0)
-NEON_VOP(qrshl_u8, neon_u8, 4)
-NEON_VOP(qrshl_u16, neon_u16, 2)
-NEON_VOP(qrshl_u32, neon_u32, 1)
-#undef NEON_FN
-
-#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
-NEON_VOP(max_s8, neon_s8, 4)
-NEON_VOP(max_u8, neon_u8, 4)
-NEON_VOP(max_s16, neon_s16, 2)
-NEON_VOP(max_u16, neon_u16, 2)
-NEON_VOP(max_s32, neon_s32, 1)
-NEON_VOP(max_u32, neon_u32, 1)
-NEON_POP(pmax_s8, neon_s8, 4)
-NEON_POP(pmax_u8, neon_u8, 4)
-NEON_POP(pmax_s16, neon_s16, 2)
-NEON_POP(pmax_u16, neon_u16, 2)
-#undef NEON_FN
-
-NEON_OP(max_f32)
-{
-    float32 f0 = vfp_itos(T0);
-    float32 f1 = vfp_itos(T1);
-    T0 = (float32_compare_quiet(f0, f1, NFS) == 1) ? T0 : T1;
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
-NEON_VOP(min_s8, neon_s8, 4)
-NEON_VOP(min_u8, neon_u8, 4)
-NEON_VOP(min_s16, neon_s16, 2)
-NEON_VOP(min_u16, neon_u16, 2)
-NEON_VOP(min_s32, neon_s32, 1)
-NEON_VOP(min_u32, neon_u32, 1)
-NEON_POP(pmin_s8, neon_s8, 4)
-NEON_POP(pmin_u8, neon_u8, 4)
-NEON_POP(pmin_s16, neon_s16, 2)
-NEON_POP(pmin_u16, neon_u16, 2)
-#undef NEON_FN
-
-NEON_OP(min_f32)
-{
-    float32 f0 = vfp_itos(T0);
-    float32 f1 = vfp_itos(T1);
-    T0 = (float32_compare_quiet(f0, f1, NFS) == -1) ? T0 : T1;
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) \
-    dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
-NEON_VOP(abd_s8, neon_s8, 4)
-NEON_VOP(abd_u8, neon_u8, 4)
-NEON_VOP(abd_s16, neon_s16, 2)
-NEON_VOP(abd_u16, neon_u16, 2)
-NEON_VOP(abd_s32, neon_s32, 1)
-NEON_VOP(abd_u32, neon_u32, 1)
-#undef NEON_FN
-
-NEON_OP(abd_f32)
-{
-    float32 f0 = vfp_itos(T0);
-    float32 f1 = vfp_itos(T1);
-    T0 = vfp_stoi((float32_compare_quiet(f0, f1, NFS) == 1)
-                  ? float32_sub(f0, f1, NFS)
-                  : float32_sub(f1, f0, NFS));
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) dest = src1 + src2
-NEON_VOP(add_u8, neon_u8, 4)
-NEON_VOP(add_u16, neon_u16, 2)
-NEON_POP(padd_u8, neon_u8, 4)
-NEON_POP(padd_u16, neon_u16, 2)
-#undef NEON_FN
-
-NEON_OP(add_f32)
-{
-    T0 = vfp_stoi(float32_add(vfp_itos(T0), vfp_itos(T1), NFS));
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) dest = src1 - src2
-NEON_VOP(sub_u8, neon_u8, 4)
-NEON_VOP(sub_u16, neon_u16, 2)
-#undef NEON_FN
-
-NEON_OP(sub_f32)
-{
-    T0 = vfp_stoi(float32_sub(vfp_itos(T0), vfp_itos(T1), NFS));
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) dest = src2 - src1
-NEON_VOP(rsb_u8, neon_u8, 4)
-NEON_VOP(rsb_u16, neon_u16, 2)
-#undef NEON_FN
-
-NEON_OP(rsb_f32)
-{
-    T0 = vfp_stoi(float32_sub(vfp_itos(T1), vfp_itos(T0), NFS));
-    FORCE_RET();
-}
-
-#define NEON_FN(dest, src1, src2) dest = src1 * src2
-NEON_VOP(mul_u8, neon_u8, 4)
-NEON_VOP(mul_u16, neon_u16, 2)
-#undef NEON_FN
-
-NEON_OP(mul_f32)
-{
-    T0 = vfp_stoi(float32_mul(vfp_itos(T0), vfp_itos(T1), NFS));
-    FORCE_RET();
-}
-
-NEON_OP(mul_p8)
-{
-    T0 = helper_neon_mul_p8(T0, T1);
-}
-
-#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
-NEON_VOP(tst_u8, neon_u8, 4)
-NEON_VOP(tst_u16, neon_u16, 2)
-NEON_VOP(tst_u32, neon_u32, 1)
-#undef NEON_FN
-
-#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
-NEON_VOP(ceq_u8, neon_u8, 4)
-NEON_VOP(ceq_u16, neon_u16, 2)
-NEON_VOP(ceq_u32, neon_u32, 1)
-#undef NEON_FN
-
-#define NEON_QDMULH16(dest, src1, src2, round) do { \
-    uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
-    if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
-        env->QF = 1; \
-        tmp = (tmp >> 31) ^ ~SIGNBIT; \
-    } \
-    tmp <<= 1; \
-    if (round) { \
-        int32_t old = tmp; \
-        tmp += 1 << 15; \
-        if ((int32_t)tmp < old) { \
-            env->QF = 1; \
-            tmp = SIGNBIT - 1; \
-        } \
-    } \
-    dest = tmp >> 16; \
-    } while(0)
-#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
-NEON_VOP(qdmulh_s16, neon_s16, 2)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
-NEON_VOP(qrdmulh_s16, neon_s16, 2)
-#undef NEON_FN
-#undef NEON_QDMULH16
-
-#define SIGNBIT64 ((uint64_t)1 << 63)
-#define NEON_QDMULH32(dest, src1, src2, round) do { \
-    uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
-    if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
-        env->QF = 1; \
-        tmp = (tmp >> 63) ^ ~SIGNBIT64; \
-    } else { \
-        tmp <<= 1; \
-    } \
-    if (round) { \
-        int64_t old = tmp; \
-        tmp += (int64_t)1 << 31; \
-        if ((int64_t)tmp < old) { \
-            env->QF = 1; \
-            tmp = SIGNBIT64 - 1; \
-        } \
-    } \
-    dest = tmp >> 32; \
-    } while(0)
-#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
-NEON_VOP(qdmulh_s32, neon_s32, 1)
-#undef NEON_FN
-#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
-NEON_VOP(qrdmulh_s32, neon_s32, 1)
-#undef NEON_FN
-#undef NEON_QDMULH32
-
-/* Floating point comparisons produce an integer result.  */
-#define NEON_VOP_FCMP(name, cmp) \
-NEON_OP(name) \
-{ \
-    if (float32_compare_quiet(vfp_itos(T0), vfp_itos(T1), NFS) cmp 0) \
-        T0 = -1; \
-    else \
-        T0 = 0; \
-    FORCE_RET(); \
-}
-
-NEON_VOP_FCMP(ceq_f32, ==)
-NEON_VOP_FCMP(cge_f32, >=)
-NEON_VOP_FCMP(cgt_f32, >)
-
-NEON_OP(acge_f32)
-{
-    float32 f0 = float32_abs(vfp_itos(T0));
-    float32 f1 = float32_abs(vfp_itos(T1));
-    T0 = (float32_compare_quiet(f0, f1,NFS) >= 0) ? -1 : 0;
-    FORCE_RET();
-}
-
-NEON_OP(acgt_f32)
-{
-    float32 f0 = float32_abs(vfp_itos(T0));
-    float32 f1 = float32_abs(vfp_itos(T1));
-    T0 = (float32_compare_quiet(f0, f1, NFS) > 0) ? -1 : 0;
-    FORCE_RET();
-}
-
-/* Narrowing instructions.  The named type is the destination type.  */
-NEON_OP(narrow_u8)
-{
-    T0 = (T0 & 0xff) | ((T0 >> 8) & 0xff00)
-         | ((T1 << 16) & 0xff0000) | (T1 << 24);
-    FORCE_RET();
-}
-
-NEON_OP(narrow_sat_u8)
-{
-    neon_u16 src;
-    neon_u8 dest;
-#define SAT8(d, s) \
-    if (s > 0xff) { \
-        d = 0xff; \
-        env->QF = 1; \
-    } else  { \
-        d = s; \
-    }
-
-    NEON_UNPACK(neon_u16, src, T0);
-    SAT8(dest.v1, src.v1);
-    SAT8(dest.v2, src.v2);
-    NEON_UNPACK(neon_u16, src, T1);
-    SAT8(dest.v3, src.v1);
-    SAT8(dest.v4, src.v2);
-    NEON_PACK(neon_u8, T0, dest);
-    FORCE_RET();
-#undef SAT8
-}
-
-NEON_OP(narrow_sat_s8)
-{
-    neon_s16 src;
-    neon_s8 dest;
-#define SAT8(d, s) \
-    if (s != (uint8_t)s) { \
-        d = (s >> 15) ^ 0x7f; \
-        env->QF = 1; \
-    } else  { \
-        d = s; \
-    }
-
-    NEON_UNPACK(neon_s16, src, T0);
-    SAT8(dest.v1, src.v1);
-    SAT8(dest.v2, src.v2);
-    NEON_UNPACK(neon_s16, src, T1);
-    SAT8(dest.v3, src.v1);
-    SAT8(dest.v4, src.v2);
-    NEON_PACK(neon_s8, T0, dest);
-    FORCE_RET();
-#undef SAT8
-}
-
-NEON_OP(narrow_u16)
-{
-    T0 = (T0 & 0xffff) | (T1 << 16);
-}
-
-NEON_OP(narrow_sat_u16)
-{
-    if (T0 > 0xffff) {
-        T0 = 0xffff;
-        env->QF = 1;
-    }
-    if (T1 > 0xffff) {
-        T1 = 0xffff;
-        env->QF = 1;
-    }
-    T0 |= T1 << 16;
-    FORCE_RET();
-}
-
-NEON_OP(narrow_sat_s16)
-{
-    if ((int32_t)T0 != (int16_t)T0) {
-        T0 = ((int32_t)T0 >> 31) ^ 0x7fff;
-        env->QF = 1;
-    }
-    if ((int32_t)T1 != (int16_t) T1) {
-        T1 = ((int32_t)T1 >> 31) ^ 0x7fff;
-        env->QF = 1;
-    }
-    T0 = (uint16_t)T0 | (T1 << 16);
-    FORCE_RET();
-}
-
-NEON_OP(narrow_sat_u32)
-{
-    if (T1) {
-        T0 = 0xffffffffu;
-        env->QF = 1;
-    }
-    FORCE_RET();
-}
-
-NEON_OP(narrow_sat_s32)
-{
-    int32_t sign = (int32_t)T1 >> 31;
-
-    if ((int32_t)T1 != sign) {
-        T0 = sign ^ 0x7fffffff;
-        env->QF = 1;
-    }
-    FORCE_RET();
-}
-
-/* Narrowing instructions.  Named type is the narrow type.  */
-NEON_OP(narrow_high_u8)
-{
-    T0 = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00)
-        | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000);
-    FORCE_RET();
-}
-
-NEON_OP(narrow_high_u16)
-{
-    T0 = (T0 >> 16) | (T1 & 0xffff0000);
-    FORCE_RET();
-}
-
-NEON_OP(narrow_high_round_u8)
-{
-    T0 = (((T0 + 0x80) >> 8) & 0xff) | (((T0 + 0x800000) >> 16) & 0xff00)
-        | (((T1 + 0x80) << 8) & 0xff0000) | ((T1 + 0x800000) & 0xff000000);
-    FORCE_RET();
-}
-
-NEON_OP(narrow_high_round_u16)
-{
-    T0 = ((T0 + 0x8000) >> 16) | ((T1 + 0x8000) & 0xffff0000);
-    FORCE_RET();
-}
-
-NEON_OP(narrow_high_round_u32)
-{
-    if (T0 >= 0x80000000u)
-        T0 = T1 + 1;
-    else
-        T0 = T1;
-    FORCE_RET();
-}
-
-/* Widening instructions.  Named type is source type.  */
-NEON_OP(widen_s8)
-{
-    uint32_t src;
-
-    src = T0;
-    T0 = (uint16_t)(int8_t)src | ((int8_t)(src >> 8) << 16);
-    T1 = (uint16_t)(int8_t)(src >> 16) | ((int8_t)(src >> 24) << 16);
-}
-
-NEON_OP(widen_u8)
-{
-    T1 = ((T0 >> 8) & 0xff0000) | ((T0 >> 16) & 0xff);
-    T0 = ((T0 << 8) & 0xff0000) | (T0 & 0xff);
-}
-
-NEON_OP(widen_s16)
-{
-    int32_t src;
-
-    src = T0;
-    T0 = (int16_t)src;
-    T1 = src >> 16;
-}
-
-NEON_OP(widen_u16)
-{
-    T1 = T0 >> 16;
-    T0 &= 0xffff;
-}
-
-NEON_OP(widen_s32)
-{
-    T1 = (int32_t)T0 >> 31;
-    FORCE_RET();
-}
-
-NEON_OP(widen_high_u8)
-{
-    T1 = (T0 & 0xff000000) | ((T0 >> 8) & 0xff00);
-    T0 = ((T0 << 16) & 0xff000000) | ((T0 << 8) & 0xff00);
-}
-
-NEON_OP(widen_high_u16)
-{
-    T1 = T0 & 0xffff0000;
-    T0 <<= 16;
-}
-
-/* Long operations.  The type is the wide type.  */
-NEON_OP(shll_u16)
-{
-    int shift = PARAM1;
-    uint32_t mask;
-
-    mask = 0xffff >> (16 - shift);
-    mask |= mask << 16;
-    mask = ~mask;
-
-    T0 = (T0 << shift) & mask;
-    T1 = (T1 << shift) & mask;
-    FORCE_RET();
-}
-
-NEON_OP(shll_u64)
-{
-    int shift = PARAM1;
-
-    T1 <<= shift;
-    T1 |= T0 >> (32 - shift);
-    T0 <<= shift;
-    FORCE_RET();
-}
-
-NEON_OP(addl_u16)
-{
-    uint32_t tmp;
-    uint32_t high;
-
-    tmp = env->vfp.scratch[0];
-    high = (T0 >> 16) + (tmp >> 16);
-    T0 = (uint16_t)(T0 + tmp);
-    T0 |= (high << 16);
-    tmp = env->vfp.scratch[1];
-    high = (T1 >> 16) + (tmp >> 16);
-    T1 = (uint16_t)(T1 + tmp);
-    T1 |= (high << 16);
-    FORCE_RET();
-}
-
-NEON_OP(addl_u32)
-{
-    T0 += env->vfp.scratch[0];
-    T1 += env->vfp.scratch[1];
-    FORCE_RET();
-}
-
-NEON_OP(addl_u64)
-{
-    uint64_t tmp;
-    tmp = T0 | ((uint64_t)T1 << 32);
-    tmp += env->vfp.scratch[0];
-    tmp += (uint64_t)env->vfp.scratch[1] << 32;
-    T0 = tmp;
-    T1 = tmp >> 32;
-    FORCE_RET();
-}
-
-NEON_OP(subl_u16)
-{
-    uint32_t tmp;
-    uint32_t high;
-
-    tmp = env->vfp.scratch[0];
-    high = (T0 >> 16) - (tmp >> 16);
-    T0 = (uint16_t)(T0 - tmp);
-    T0 |= (high << 16);
-    tmp = env->vfp.scratch[1];
-    high = (T1 >> 16) - (tmp >> 16);
-    T1 = (uint16_t)(T1 - tmp);
-    T1 |= (high << 16);
-    FORCE_RET();
-}
-
-NEON_OP(subl_u32)
-{
-    T0 -= env->vfp.scratch[0];
-    T1 -= env->vfp.scratch[1];
-    FORCE_RET();
-}
-
-NEON_OP(subl_u64)
-{
-    uint64_t tmp;
-    tmp = T0 | ((uint64_t)T1 << 32);
-    tmp -= env->vfp.scratch[0];
-    tmp -= (uint64_t)env->vfp.scratch[1] << 32;
-    T0 = tmp;
-    T1 = tmp >> 32;
-    FORCE_RET();
-}
-
-#define DO_ABD(dest, x, y, type) do { \
-    type tmp_x = x; \
-    type tmp_y = y; \
-    dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
-    } while(0)
-
-NEON_OP(abdl_u16)
-{
-    uint32_t tmp;
-    uint32_t low;
-    uint32_t high;
-
-    DO_ABD(low, T0, T1, uint8_t);
-    DO_ABD(tmp, T0 >> 8, T1 >> 8, uint8_t);
-    low |= tmp << 16;
-    DO_ABD(high, T0 >> 16, T1 >> 16, uint8_t);
-    DO_ABD(tmp, T0 >> 24, T1 >> 24, uint8_t);
-    high |= tmp << 16;
-    T0 = low;
-    T1 = high;
-    FORCE_RET();
-}
-
-NEON_OP(abdl_s16)
-{
-    uint32_t tmp;
-    uint32_t low;
-    uint32_t high;
-
-    DO_ABD(low, T0, T1, int8_t);
-    DO_ABD(tmp, T0 >> 8, T1 >> 8, int8_t);
-    low |= tmp << 16;
-    DO_ABD(high, T0 >> 16, T1 >> 16, int8_t);
-    DO_ABD(tmp, T0 >> 24, T1 >> 24, int8_t);
-    high |= tmp << 16;
-    T0 = low;
-    T1 = high;
-    FORCE_RET();
-}
-
-NEON_OP(abdl_u32)
-{
-    uint32_t low;
-    uint32_t high;
-
-    DO_ABD(low, T0, T1, uint16_t);
-    DO_ABD(high, T0 >> 16, T1 >> 16, uint16_t);
-    T0 = low;
-    T1 = high;
-    FORCE_RET();
-}
-
-NEON_OP(abdl_s32)
-{
-    uint32_t low;
-    uint32_t high;
-
-    DO_ABD(low, T0, T1, int16_t);
-    DO_ABD(high, T0 >> 16, T1 >> 16, int16_t);
-    T0 = low;
-    T1 = high;
-    FORCE_RET();
-}
-
-NEON_OP(abdl_u64)
-{
-    DO_ABD(T0, T0, T1, uint32_t);
-    T1 = 0;
-}
-
-NEON_OP(abdl_s64)
-{
-    DO_ABD(T0, T0, T1, int32_t);
-    T1 = 0;
-}
-#undef DO_ABD
-
-/* Widening multiple. Named type is the source type.  */
-#define DO_MULL(dest, x, y, type1, type2) do { \
-    type1 tmp_x = x; \
-    type1 tmp_y = y; \
-    dest = (type2)((type2)tmp_x * (type2)tmp_y); \
-    } while(0)
-
-NEON_OP(mull_u8)
-{
-    uint32_t tmp;
-    uint32_t low;
-    uint32_t high;
-
-    DO_MULL(low, T0, T1, uint8_t, uint16_t);
-    DO_MULL(tmp, T0 >> 8, T1 >> 8, uint8_t, uint16_t);
-    low |= tmp << 16;
-    DO_MULL(high, T0 >> 16, T1 >> 16, uint8_t, uint16_t);
-    DO_MULL(tmp, T0 >> 24, T1 >> 24, uint8_t, uint16_t);
-    high |= tmp << 16;
-    T0 = low;
-    T1 = high;
-    FORCE_RET();
-}
-
-NEON_OP(mull_s8)
-{
-    uint32_t tmp;
-    uint32_t low;
-    uint32_t high;
-
-    DO_MULL(low, T0, T1, int8_t, uint16_t);
-    DO_MULL(tmp, T0 >> 8, T1 >> 8, int8_t, uint16_t);
-    low |= tmp << 16;
-    DO_MULL(high, T0 >> 16, T1 >> 16, int8_t, uint16_t);
-    DO_MULL(tmp, T0 >> 24, T1 >> 24, int8_t, uint16_t);
-    high |= tmp << 16;
-    T0 = low;
-    T1 = high;
-    FORCE_RET();
-}
-
-NEON_OP(mull_u16)
-{
-    uint32_t low;
-    uint32_t high;
-
-    DO_MULL(low, T0, T1, uint16_t, uint32_t);
-    DO_MULL(high, T0 >> 16, T1 >> 16, uint16_t, uint32_t);
-    T0 = low;
-    T1 = high;
-    FORCE_RET();
-}
-
-NEON_OP(mull_s16)
-{
-    uint32_t low;
-    uint32_t high;
-
-    DO_MULL(low, T0, T1, int16_t, uint32_t);
-    DO_MULL(high, T0 >> 16, T1 >> 16, int16_t, uint32_t);
-    T0 = low;
-    T1 = high;
-    FORCE_RET();
-}
-
-NEON_OP(addl_saturate_s32)
-{
-    uint32_t tmp;
-    uint32_t res;
-
-    tmp = env->vfp.scratch[0];
-    res = T0 + tmp;
-    if (((res ^ T0) & SIGNBIT) && !((T0 ^ tmp) & SIGNBIT)) {
-        env->QF = 1;
-        T0 = (T0 >> 31) ^ 0x7fffffff;
-    } else {
-      T0 = res;
-    }
-    tmp = env->vfp.scratch[1];
-    res = T1 + tmp;
-    if (((res ^ T1) & SIGNBIT) && !((T1 ^ tmp) & SIGNBIT)) {
-        env->QF = 1;
-        T1 = (T1 >> 31) ^ 0x7fffffff;
-    } else {
-      T1 = res;
-    }
-    FORCE_RET();
-}
-
-NEON_OP(addl_saturate_s64)
-{
-    uint64_t src1;
-    uint64_t src2;
-    uint64_t res;
-
-    src1 = T0 + ((uint64_t)T1 << 32);
-    src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32);
-    res = src1 + src2;
-    if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
-        env->QF = 1;
-        T0 = ~(int64_t)src1 >> 63;
-        T1 = T0 ^ 0x80000000;
-    } else {
-      T0 = res;
-      T1 = res >> 32;
-    }
-    FORCE_RET();
-}
-
-NEON_OP(addl_saturate_u64)
-{
-    uint64_t src1;
-    uint64_t src2;
-    uint64_t res;
-
-    src1 = T0 + ((uint64_t)T1 << 32);
-    src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32);
-    res = src1 + src2;
-    if (res < src1) {
-        env->QF = 1;
-        T0 = 0xffffffff;
-        T1 = 0xffffffff;
-    } else {
-      T0 = res;
-      T1 = res >> 32;
-    }
-    FORCE_RET();
-}
-
-NEON_OP(subl_saturate_s64)
-{
-    uint64_t src1;
-    uint64_t src2;
-    uint64_t res;
-
-    src1 = T0 + ((uint64_t)T1 << 32);
-    src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32);
-    res = src1 - src2;
-    if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
-        env->QF = 1;
-        T0 = ~(int64_t)src1 >> 63;
-        T1 = T0 ^ 0x80000000;
-    } else {
-      T0 = res;
-      T1 = res >> 32;
-    }
-    FORCE_RET();
-}
-
-NEON_OP(subl_saturate_u64)
-{
-    uint64_t src1;
-    uint64_t src2;
-    uint64_t res;
-
-    src1 = T0 + ((uint64_t)T1 << 32);
-    src2 = env->vfp.scratch[0] + ((uint64_t)env->vfp.scratch[1] << 32);
-    if (src1 < src2) {
-        env->QF = 1;
-        T0 = 0;
-        T1 = 0;
-    } else {
-      res = src1 - src2;
-      T0 = res;
-      T1 = res >> 32;
-    }
-    FORCE_RET();
-}
-
-NEON_OP(negl_u16)
-{
-    uint32_t tmp;
-    tmp = T0 >> 16;
-    tmp = -tmp;
-    T0 = (-T0 & 0xffff) | (tmp << 16);
-    tmp = T1 >> 16;
-    tmp = -tmp;
-    T1 = (-T1 & 0xffff) | (tmp << 16);
-    FORCE_RET();
-}
-
-NEON_OP(negl_u32)
-{
-    T0 = -T0;
-    T1 = -T1;
-    FORCE_RET();
-}
-
-NEON_OP(negl_u64)
-{
-    uint64_t val;
-
-    val = T0 | ((uint64_t)T1 << 32);
-    val = -val;
-    T0 = val;
-    T1 = val >> 32;
-    FORCE_RET();
-}
-
-/* Scalar operations.  */
-NEON_OP(dup_low16)
-{
-    T0 = (T0 & 0xffff) | (T0 << 16);
-    FORCE_RET();
-}
-
-NEON_OP(dup_high16)
-{
-    T0 = (T0 >> 16) | (T0 & 0xffff0000);
-    FORCE_RET();
-}
-
-/* Helper for VEXT */
-NEON_OP(extract)
-{
-    int shift = PARAM1;
-    T0 = (T0 >> shift) | (T1 << (32 - shift));
-    FORCE_RET();
-}
-
-/* Pairwise add long.  Named type is source type.  */
-NEON_OP(paddl_s8)
-{
-    int8_t src1;
-    int8_t src2;
-    uint16_t result;
-    src1 = T0 >> 24;
-    src2 = T0 >> 16;
-    result = (uint16_t)src1 + src2;
-    src1 = T0 >> 8;
-    src2 = T0;
-    T0 = (uint16_t)((uint16_t)src1 + src2) | ((uint32_t)result << 16);
-    FORCE_RET();
-}
-
-NEON_OP(paddl_u8)
-{
-    uint8_t src1;
-    uint8_t src2;
-    uint16_t result;
-    src1 = T0 >> 24;
-    src2 = T0 >> 16;
-    result = (uint16_t)src1 + src2;
-    src1 = T0 >> 8;
-    src2 = T0;
-    T0 = (uint16_t)((uint16_t)src1 + src2) | ((uint32_t)result << 16);
-    FORCE_RET();
-}
-
-NEON_OP(paddl_s16)
-{
-    T0 = (uint32_t)(int16_t)T0 + (uint32_t)(int16_t)(T0 >> 16);
-    FORCE_RET();
-}
-
-NEON_OP(paddl_u16)
-{
-    T0 = (uint32_t)(uint16_t)T0 + (uint32_t)(uint16_t)(T0 >> 16);
-    FORCE_RET();
-}
-
-NEON_OP(paddl_s32)
-{
-    int64_t tmp;
-    tmp = (int64_t)(int32_t)T0 + (int64_t)(int32_t)T1;
-    T0 = tmp;
-    T1 = tmp >> 32;
-    FORCE_RET();
-}
-
-NEON_OP(paddl_u32)
-{
-    uint64_t tmp;
-    tmp = (uint64_t)T0 + (uint64_t)T1;
-    T0 = tmp;
-    T1 = tmp >> 32;
-    FORCE_RET();
-}
-
-/* Count Leading Sign/Zero Bits.  */
-static inline int do_clz8(uint8_t x)
-{
-    int n;
-    for (n = 8; x; n--)
-        x >>= 1;
-    return n;
-}
-
-static inline int do_clz16(uint16_t x)
-{
-    int n;
-    for (n = 16; x; n--)
-        x >>= 1;
-    return n;
-}
-
-NEON_OP(clz_u8)
-{
-    uint32_t result;
-    uint32_t tmp;
-
-    tmp = T0;
-    result = do_clz8(tmp);
-    result |= do_clz8(tmp >> 8) << 8;
-    result |= do_clz8(tmp >> 16) << 16;
-    result |= do_clz8(tmp >> 24) << 24;
-    T0 = result;
-    FORCE_RET();
-}
-
-NEON_OP(clz_u16)
-{
-    uint32_t result;
-    uint32_t tmp;
-    tmp = T0;
-    result = do_clz16(tmp);
-    result |= do_clz16(tmp >> 16) << 16;
-    T0 = result;
-    FORCE_RET();
-}
-
-NEON_OP(cls_s8)
-{
-    uint32_t result;
-    int8_t tmp;
-    tmp = T0;
-    result = do_clz8((tmp < 0) ? ~tmp : tmp) - 1;
-    tmp = T0 >> 8;
-    result |= (do_clz8((tmp < 0) ? ~tmp : tmp) - 1) << 8;
-    tmp = T0 >> 16;
-    result |= (do_clz8((tmp < 0) ? ~tmp : tmp) - 1) << 16;
-    tmp = T0 >> 24;
-    result |= (do_clz8((tmp < 0) ? ~tmp : tmp) - 1) << 24;
-    T0 = result;
-    FORCE_RET();
-}
-
-NEON_OP(cls_s16)
-{
-    uint32_t result;
-    int16_t tmp;
-    tmp = T0;
-    result = do_clz16((tmp < 0) ? ~tmp : tmp) - 1;
-    tmp = T0 >> 16;
-    result |= (do_clz16((tmp < 0) ? ~tmp : tmp) - 1) << 16;
-    T0 = result;
-    FORCE_RET();
-}
-
-NEON_OP(cls_s32)
-{
-    int count;
-    if ((int32_t)T0 < 0)
-        T0 = ~T0;
-    for (count = 32; T0 > 0; count--)
-        T0 = T0 >> 1;
-    T0 = count - 1;
-    FORCE_RET();
-}
-
-/* Bit count.  */
-NEON_OP(cnt_u8)
-{
-    T0 = (T0 & 0x55555555) + ((T0 >>  1) & 0x55555555);
-    T0 = (T0 & 0x33333333) + ((T0 >>  2) & 0x33333333);
-    T0 = (T0 & 0x0f0f0f0f) + ((T0 >>  4) & 0x0f0f0f0f);
-    FORCE_RET();
-}
-
-/* Saturnating negation.  */
-/* ??? Make these use NEON_VOP1 */
-#define DO_QABS8(x) do { \
-    if (x == (int8_t)0x80) { \
-        x = 0x7f; \
-        env->QF = 1; \
-    } else if (x < 0) { \
-        x = -x; \
-    }} while (0)
-NEON_OP(qabs_s8)
-{
-    neon_s8 vec;
-    NEON_UNPACK(neon_s8, vec, T0);
-    DO_QABS8(vec.v1);
-    DO_QABS8(vec.v2);
-    DO_QABS8(vec.v3);
-    DO_QABS8(vec.v4);
-    NEON_PACK(neon_s8, T0, vec);
-    FORCE_RET();
-}
-#undef DO_QABS8
-
-#define DO_QNEG8(x) do { \
-    if (x == (int8_t)0x80) { \
-        x = 0x7f; \
-        env->QF = 1; \
-    } else { \
-        x = -x; \
-    }} while (0)
-NEON_OP(qneg_s8)
-{
-    neon_s8 vec;
-    NEON_UNPACK(neon_s8, vec, T0);
-    DO_QNEG8(vec.v1);
-    DO_QNEG8(vec.v2);
-    DO_QNEG8(vec.v3);
-    DO_QNEG8(vec.v4);
-    NEON_PACK(neon_s8, T0, vec);
-    FORCE_RET();
-}
-#undef DO_QNEG8
-
-#define DO_QABS16(x) do { \
-    if (x == (int16_t)0x8000) { \
-        x = 0x7fff; \
-        env->QF = 1; \
-    } else if (x < 0) { \
-        x = -x; \
-    }} while (0)
-NEON_OP(qabs_s16)
-{
-    neon_s16 vec;
-    NEON_UNPACK(neon_s16, vec, T0);
-    DO_QABS16(vec.v1);
-    DO_QABS16(vec.v2);
-    NEON_PACK(neon_s16, T0, vec);
-    FORCE_RET();
-}
-#undef DO_QABS16
-
-#define DO_QNEG16(x) do { \
-    if (x == (int16_t)0x8000) { \
-        x = 0x7fff; \
-        env->QF = 1; \
-    } else { \
-        x = -x; \
-    }} while (0)
-NEON_OP(qneg_s16)
-{
-    neon_s16 vec;
-    NEON_UNPACK(neon_s16, vec, T0);
-    DO_QNEG16(vec.v1);
-    DO_QNEG16(vec.v2);
-    NEON_PACK(neon_s16, T0, vec);
-    FORCE_RET();
-}
-#undef DO_QNEG16
-
-NEON_OP(qabs_s32)
-{
-    if (T0 == 0x80000000) {
-        T0 = 0x7fffffff;
-        env->QF = 1;
-    } else if ((int32_t)T0 < 0) {
-        T0 = -T0;
-    }
-    FORCE_RET();
-}
-
-NEON_OP(qneg_s32)
-{
-    if (T0 == 0x80000000) {
-        T0 = 0x7fffffff;
-        env->QF = 1;
-    } else {
-        T0 = -T0;
-    }
-    FORCE_RET();
-}
-
-/* Unary opperations */
-#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
-NEON_VOP1(abs_s8, neon_s8, 4)
-NEON_VOP1(abs_s16, neon_s16, 2)
-NEON_OP(abs_s32)
-{
-    if ((int32_t)T0 < 0)
-        T0 = -T0;
-    FORCE_RET();
-}
-#undef NEON_FN
-
-/* Transpose.  Argument order is rather strange to avoid special casing
-   the tranlation code.
-   On input T0 = rm, T1 = rd.  On output T0 = rd, T1 = rm  */
-NEON_OP(trn_u8)
-{
-    uint32_t rd;
-    uint32_t rm;
-    rd = ((T0 & 0x00ff00ff) << 8) | (T1 & 0x00ff00ff);
-    rm = ((T1 & 0xff00ff00) >> 8) | (T0 & 0xff00ff00);
-    T0 = rd;
-    T1 = rm;
-    FORCE_RET();
-}
-
-NEON_OP(trn_u16)
-{
-    uint32_t rd;
-    uint32_t rm;
-    rd = (T0 << 16) | (T1 & 0xffff);
-    rm = (T1 >> 16) | (T0 & 0xffff0000);
-    T0 = rd;
-    T1 = rm;
-    FORCE_RET();
-}
-
-/* Worker routines for zip and unzip.  */
-NEON_OP(unzip_u8)
-{
-    uint32_t rd;
-    uint32_t rm;
-    rd = (T0 & 0xff) | ((T0 >> 8) & 0xff00)
-         | ((T1 << 16) & 0xff0000) | ((T1 << 8) & 0xff000000);
-    rm = ((T0 >> 8) & 0xff) | ((T0 >> 16) & 0xff00)
-         | ((T1 << 8) & 0xff0000) | (T1 & 0xff000000);
-    T0 = rd;
-    T1 = rm;
-    FORCE_RET();
-}
-
-NEON_OP(zip_u8)
-{
-    uint32_t rd;
-    uint32_t rm;
-    rd = (T0 & 0xff) | ((T1 << 8) & 0xff00)
-         | ((T0 << 16) & 0xff0000) | ((T1 << 24) & 0xff000000);
-    rm = ((T0 >> 16) & 0xff) | ((T1 >> 8) & 0xff00)
-         | ((T0 >> 8) & 0xff0000) | (T1 & 0xff000000);
-    T0 = rd;
-    T1 = rm;
-    FORCE_RET();
-}
-
-NEON_OP(zip_u16)
-{
-    uint32_t tmp;
-
-    tmp = (T0 & 0xffff) | (T1 << 16);
-    T1 = (T1 & 0xffff0000) | (T0 >> 16);
-    T0 = tmp;
-    FORCE_RET();
-}
-
-NEON_OP(dup_u8)
-{
-    T0 = (T0 >> PARAM1) & 0xff;
-    T0 |= T0 << 8;
-    T0 |= T0 << 16;
-    FORCE_RET();
-}
index a4b1df504b7befeaba6898f3046cf7d2f34cb76d..582dfa73bea3c2db2d86beedcaff969d95d08681 100644 (file)
@@ -77,6 +77,9 @@ extern FILE *logfile;
 extern int loglevel;
 
 static TCGv cpu_env;
+/* We reuse the same 64-bit temporaries for efficiency.  */
+static TCGv cpu_V0, cpu_V1;
+
 /* FIXME:  These should be removed.  */
 static TCGv cpu_T[2];
 static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
@@ -468,6 +471,9 @@ static inline void gen_op_bicl_T0_T1(void)
     gen_op_andl_T0_T1();
 }
 
+/* FIXME:  Implement this natively.  */
+#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
+
 /* FIXME:  Implement this natively.  */
 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
 {
@@ -1166,8 +1172,13 @@ neon_reg_offset (int reg, int n)
     return vfp_reg_offset(0, sreg);
 }
 
-#define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
-#define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
+/* FIXME: Remove these.  */
+#define neon_T0 cpu_T[0]
+#define neon_T1 cpu_T[1]
+#define NEON_GET_REG(T, reg, n) \
+  tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
+#define NEON_SET_REG(T, reg, n) \
+  tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
 
 static TCGv neon_load_reg(int reg, int pass)
 {
@@ -1182,6 +1193,16 @@ static void neon_store_reg(int reg, int pass, TCGv var)
     dead_tmp(var);
 }
 
+static inline void neon_load_reg64(TCGv var, int reg)
+{
+    tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
+}
+
+static inline void neon_store_reg64(TCGv var, int reg)
+{
+    tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
+}
+
 #define tcg_gen_ld_f32 tcg_gen_ld_i32
 #define tcg_gen_ld_f64 tcg_gen_ld_i64
 #define tcg_gen_st_f32 tcg_gen_st_i32
@@ -2418,6 +2439,37 @@ vfp_enabled(CPUState * env)
     return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
 }
 
+static void gen_neon_dup_u8(TCGv var, int shift)
+{
+    TCGv tmp = new_tmp();
+    if (shift)
+        tcg_gen_shri_i32(var, var, shift);
+    tcg_gen_andi_i32(var, var, 0xff);
+    tcg_gen_shli_i32(tmp, var, 8);
+    tcg_gen_or_i32(var, var, tmp);
+    tcg_gen_shli_i32(tmp, var, 16);
+    tcg_gen_or_i32(var, var, tmp);
+    dead_tmp(tmp);
+}
+
+static void gen_neon_dup_low16(TCGv var)
+{
+    TCGv tmp = new_tmp();
+    tcg_gen_andi_i32(var, var, 0xffff);
+    tcg_gen_shli_i32(tmp, var, 16);
+    tcg_gen_or_i32(var, var, tmp);
+    dead_tmp(tmp);
+}
+
+static void gen_neon_dup_high16(TCGv var)
+{
+    TCGv tmp = new_tmp();
+    tcg_gen_andi_i32(var, var, 0xffff0000);
+    tcg_gen_shri_i32(tmp, var, 16);
+    tcg_gen_or_i32(var, var, tmp);
+    dead_tmp(tmp);
+}
+
 /* Disassemble a VFP instruction.  Returns nonzero if an error occured
    (ie. an undefined instruction).  */
 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
@@ -2425,6 +2477,7 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
     uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
     int dp, veclen;
     TCGv tmp;
+    TCGv tmp2;
 
     if (!arm_feature(env, ARM_FEATURE_VFP))
         return 1;
@@ -2468,66 +2521,66 @@ static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
                 }
                 if (insn & ARM_CP_RW_BIT) {
                     /* vfp->arm */
+                    tmp = neon_load_reg(rn, pass);
                     switch (size) {
                     case 0:
-                        NEON_GET_REG(T1, rn, pass);
                         if (offset)
-                            gen_op_shrl_T1_im(offset);
+                            tcg_gen_shri_i32(tmp, tmp, offset);
                         if (insn & (1 << 23))
-                            gen_uxtb(cpu_T[1]);
+                            gen_uxtb(tmp);
                         else
-                            gen_sxtb(cpu_T[1]);
+                            gen_sxtb(tmp);
                         break;
                     case 1:
-                        NEON_GET_REG(T1, rn, pass);
                         if (insn & (1 << 23)) {
                             if (offset) {
-                                gen_op_shrl_T1_im(16);
+                                tcg_gen_shri_i32(tmp, tmp, 16);
                             } else {
-                                gen_uxth(cpu_T[1]);
+                                gen_uxth(tmp);
                             }
                         } else {
                             if (offset) {
-                                gen_op_sarl_T1_im(16);
+                                tcg_gen_sari_i32(tmp, tmp, 16);
                             } else {
-                                gen_sxth(cpu_T[1]);
+                                gen_sxth(tmp);
                             }
                         }
                         break;
                     case 2:
-                        NEON_GET_REG(T1, rn, pass);
                         break;
                     }
-                    gen_movl_reg_T1(s, rd);
+                    store_reg(s, rd, tmp);
                 } else {
                     /* arm->vfp */
-                    gen_movl_T0_reg(s, rd);
+                    tmp = load_reg(s, rd);
                     if (insn & (1 << 23)) {
                         /* VDUP */
                         if (size == 0) {
-                            gen_op_neon_dup_u8(0);
+                            gen_neon_dup_u8(tmp, 0);
                         } else if (size == 1) {
-                            gen_op_neon_dup_low16();
+                            gen_neon_dup_low16(tmp);
                         }
-                        NEON_SET_REG(T0, rn, 0);
-                        NEON_SET_REG(T0, rn, 1);
+                        tmp2 = new_tmp();
+                        tcg_gen_mov_i32(tmp2, tmp);
+                        neon_store_reg(rn, 0, tmp2);
+                        neon_store_reg(rn, 0, tmp);
                     } else {
                         /* VMOV */
                         switch (size) {
                         case 0:
-                            tmp = neon_load_reg(rn, pass);
-                            gen_bfi(tmp, tmp, cpu_T[0], offset, 0xff);
-                            neon_store_reg(rn, pass, tmp);
+                            tmp2 = neon_load_reg(rn, pass);
+                            gen_bfi(tmp, tmp2, tmp, offset, 0xff);
+                            dead_tmp(tmp2);
                             break;
                         case 1:
-                            tmp = neon_load_reg(rn, pass);
-                            gen_bfi(tmp, tmp, cpu_T[0], offset, 0xffff);
-                            neon_store_reg(rn, pass, tmp);
+                            tmp2 = neon_load_reg(rn, pass);
+                            gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
+                            dead_tmp(tmp2);
                             break;
                         case 2:
-                            NEON_SET_REG(T0, rn, pass);
                             break;
                         }
+                        neon_store_reg(rn, pass, tmp);
                     }
                 }
             } else { /* !dp */
@@ -3210,179 +3263,90 @@ static void gen_nop_hint(DisasContext *s, int val)
     }
 }
 
-/* Neon shift by constant.  The actual ops are the same as used for variable
-   shifts.  [OP][U][SIZE]  */
-static GenOpFunc *gen_neon_shift_im[8][2][4] = {
-    { /* 0 */ /* VSHR */
-      {
-        gen_op_neon_shl_u8,
-        gen_op_neon_shl_u16,
-        gen_op_neon_shl_u32,
-        gen_op_neon_shl_u64
-      }, {
-        gen_op_neon_shl_s8,
-        gen_op_neon_shl_s16,
-        gen_op_neon_shl_s32,
-        gen_op_neon_shl_s64
-      }
-    }, { /* 1 */ /* VSRA */
-      {
-        gen_op_neon_shl_u8,
-        gen_op_neon_shl_u16,
-        gen_op_neon_shl_u32,
-        gen_op_neon_shl_u64
-      }, {
-        gen_op_neon_shl_s8,
-        gen_op_neon_shl_s16,
-        gen_op_neon_shl_s32,
-        gen_op_neon_shl_s64
-      }
-    }, { /* 2 */ /* VRSHR */
-      {
-        gen_op_neon_rshl_u8,
-        gen_op_neon_rshl_u16,
-        gen_op_neon_rshl_u32,
-        gen_op_neon_rshl_u64
-      }, {
-        gen_op_neon_rshl_s8,
-        gen_op_neon_rshl_s16,
-        gen_op_neon_rshl_s32,
-        gen_op_neon_rshl_s64
-      }
-    }, { /* 3 */ /* VRSRA */
-      {
-        gen_op_neon_rshl_u8,
-        gen_op_neon_rshl_u16,
-        gen_op_neon_rshl_u32,
-        gen_op_neon_rshl_u64
-      }, {
-        gen_op_neon_rshl_s8,
-        gen_op_neon_rshl_s16,
-        gen_op_neon_rshl_s32,
-        gen_op_neon_rshl_s64
-      }
-    }, { /* 4 */
-      {
-        NULL, NULL, NULL, NULL
-      }, { /* VSRI */
-        gen_op_neon_shl_u8,
-        gen_op_neon_shl_u16,
-        gen_op_neon_shl_u32,
-        gen_op_neon_shl_u64,
-      }
-    }, { /* 5 */
-      { /* VSHL */
-        gen_op_neon_shl_u8,
-        gen_op_neon_shl_u16,
-        gen_op_neon_shl_u32,
-        gen_op_neon_shl_u64,
-      }, { /* VSLI */
-        gen_op_neon_shl_u8,
-        gen_op_neon_shl_u16,
-        gen_op_neon_shl_u32,
-        gen_op_neon_shl_u64,
-      }
-    }, { /* 6 */ /* VQSHL */
-      {
-        gen_op_neon_qshl_u8,
-        gen_op_neon_qshl_u16,
-        gen_op_neon_qshl_u32,
-        gen_op_neon_qshl_u64
-      }, {
-        gen_op_neon_qshl_s8,
-        gen_op_neon_qshl_s16,
-        gen_op_neon_qshl_s32,
-        gen_op_neon_qshl_s64
-      }
-    }, { /* 7 */ /* VQSHLU */
-      {
-        gen_op_neon_qshl_u8,
-        gen_op_neon_qshl_u16,
-        gen_op_neon_qshl_u32,
-        gen_op_neon_qshl_u64
-      }, {
-        gen_op_neon_qshl_u8,
-        gen_op_neon_qshl_u16,
-        gen_op_neon_qshl_u32,
-        gen_op_neon_qshl_u64
-      }
-    }
-};
+/* These macros help make the code more readable when migrating from the
+   old dyngen helpers.  They should probably be removed when
+   T0/T1 are removed.  */
+#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
+#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
 
-/* [R][U][size - 1] */
-static GenOpFunc *gen_neon_shift_im_narrow[2][2][3] = {
-    {
-      {
-        gen_op_neon_shl_u16,
-        gen_op_neon_shl_u32,
-        gen_op_neon_shl_u64
-      }, {
-        gen_op_neon_shl_s16,
-        gen_op_neon_shl_s32,
-        gen_op_neon_shl_s64
-      }
-    }, {
-      {
-        gen_op_neon_rshl_u16,
-        gen_op_neon_rshl_u32,
-        gen_op_neon_rshl_u64
-      }, {
-        gen_op_neon_rshl_s16,
-        gen_op_neon_rshl_s32,
-        gen_op_neon_rshl_s64
-      }
-    }
-};
-
-static inline void
-gen_op_neon_narrow_u32 ()
-{
-    /* No-op.  */
-}
-
-static GenOpFunc *gen_neon_narrow[3] = {
-    gen_op_neon_narrow_u8,
-    gen_op_neon_narrow_u16,
-    gen_op_neon_narrow_u32
-};
-
-static GenOpFunc *gen_neon_narrow_satu[3] = {
-    gen_op_neon_narrow_sat_u8,
-    gen_op_neon_narrow_sat_u16,
-    gen_op_neon_narrow_sat_u32
-};
-
-static GenOpFunc *gen_neon_narrow_sats[3] = {
-    gen_op_neon_narrow_sat_s8,
-    gen_op_neon_narrow_sat_s16,
-    gen_op_neon_narrow_sat_s32
-};
+#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
 
 static inline int gen_neon_add(int size)
 {
     switch (size) {
-    case 0: gen_op_neon_add_u8(); break;
-    case 1: gen_op_neon_add_u16(); break;
+    case 0: gen_helper_neon_add_u8(CPU_T001); break;
+    case 1: gen_helper_neon_add_u16(CPU_T001); break;
     case 2: gen_op_addl_T0_T1(); break;
     default: return 1;
     }
     return 0;
 }
 
-/* 32-bit pairwise ops end up the same as the elementsise versions.  */
-#define gen_op_neon_pmax_s32  gen_op_neon_max_s32
-#define gen_op_neon_pmax_u32  gen_op_neon_max_u32
-#define gen_op_neon_pmin_s32  gen_op_neon_min_s32
-#define gen_op_neon_pmin_u32  gen_op_neon_min_u32
+static inline void gen_neon_rsb(int size)
+{
+    switch (size) {
+    case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
+    case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
+    case 2: gen_op_rsbl_T0_T1(); break;
+    default: return;
+    }
+}
+
+/* 32-bit pairwise ops end up the same as the elementwise versions.  */
+#define gen_helper_neon_pmax_s32  gen_helper_neon_max_s32
+#define gen_helper_neon_pmax_u32  gen_helper_neon_max_u32
+#define gen_helper_neon_pmin_s32  gen_helper_neon_min_s32
+#define gen_helper_neon_pmin_u32  gen_helper_neon_min_u32
+
+/* FIXME: This is wrong.  They set the wrong overflow bit.  */
+#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
+#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
+#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
+#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
+
+#define GEN_NEON_INTEGER_OP_ENV(name) do { \
+    switch ((size << 1) | u) { \
+    case 0: \
+        gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        break; \
+    case 1: \
+        gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        break; \
+    case 2: \
+        gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        break; \
+    case 3: \
+        gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        break; \
+    case 4: \
+        gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        break; \
+    case 5: \
+        gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        break; \
+    default: return 1; \
+    }} while (0)
 
 #define GEN_NEON_INTEGER_OP(name) do { \
     switch ((size << 1) | u) { \
-    case 0: gen_op_neon_##name##_s8(); break; \
-    case 1: gen_op_neon_##name##_u8(); break; \
-    case 2: gen_op_neon_##name##_s16(); break; \
-    case 3: gen_op_neon_##name##_u16(); break; \
-    case 4: gen_op_neon_##name##_s32(); break; \
-    case 5: gen_op_neon_##name##_u32(); break; \
+    case 0: \
+        gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        break; \
+    case 1: \
+        gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        break; \
+    case 2: \
+        gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        break; \
+    case 3: \
+        gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        break; \
+    case 4: \
+        gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        break; \
+    case 5: \
+        gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        break; \
     default: return 1; \
     }} while (0)
 
@@ -3392,7 +3356,7 @@ gen_neon_movl_scratch_T0(int scratch)
   uint32_t offset;
 
   offset = offsetof(CPUARMState, vfp.scratch[scratch]);
-  gen_op_neon_setreg_T0(offset);
+  tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
 }
 
 static inline void
@@ -3401,7 +3365,7 @@ gen_neon_movl_scratch_T1(int scratch)
   uint32_t offset;
 
   offset = offsetof(CPUARMState, vfp.scratch[scratch]);
-  gen_op_neon_setreg_T1(offset);
+  tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
 }
 
 static inline void
@@ -3410,7 +3374,7 @@ gen_neon_movl_T0_scratch(int scratch)
   uint32_t offset;
 
   offset = offsetof(CPUARMState, vfp.scratch[scratch]);
-  gen_op_neon_getreg_T0(offset);
+  tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
 }
 
 static inline void
@@ -3419,12 +3383,7 @@ gen_neon_movl_T1_scratch(int scratch)
   uint32_t offset;
 
   offset = offsetof(CPUARMState, vfp.scratch[scratch]);
-  gen_op_neon_getreg_T1(offset);
-}
-
-static inline void gen_op_neon_widen_u32(void)
-{
-    gen_op_movl_T1_im(0);
+  tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
 }
 
 static inline void gen_neon_get_scalar(int size, int reg)
@@ -3434,9 +3393,9 @@ static inline void gen_neon_get_scalar(int size, int reg)
     } else {
         NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
         if (reg & 1)
-            gen_op_neon_dup_low16();
+            gen_neon_dup_low16(cpu_T[0]);
         else
-            gen_op_neon_dup_high16();
+            gen_neon_dup_high16(cpu_T[0]);
     }
 }
 
@@ -3448,8 +3407,8 @@ static void gen_neon_unzip(int reg, int q, int tmp, int size)
         NEON_GET_REG(T0, reg, n);
         NEON_GET_REG(T0, reg, n + n);
         switch (size) {
-        case 0: gen_op_neon_unzip_u8(); break;
-        case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same.  */
+        case 0: gen_helper_neon_unzip_u8(); break;
+        case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same.  */
         case 2: /* no-op */; break;
         default: abort();
         }
@@ -3522,13 +3481,9 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
                 if (size == 2) {
                     if (load) {
                         tmp = gen_ld32(cpu_T[1], IS_USER(s));
-                        tcg_gen_mov_i32(cpu_T[0], tmp);
-                        dead_tmp(tmp);
-                        NEON_SET_REG(T0, rd, pass);
+                        neon_store_reg(rd, pass, tmp);
                     } else {
-                        NEON_GET_REG(T0, rd, pass);
-                        tmp = new_tmp();
-                        tcg_gen_mov_i32(tmp, cpu_T[0]);
+                        tmp = neon_load_reg(rd, pass);
                         gen_st32(tmp, cpu_T[1], IS_USER(s));
                     }
                     gen_op_addl_T1_im(stride);
@@ -3596,27 +3551,23 @@ static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
                 switch (size) {
                 case 0:
                     tmp = gen_ld8u(cpu_T[1], IS_USER(s));
-                    tcg_gen_mov_i32(cpu_T[0], tmp);
-                    dead_tmp(tmp);
-                    gen_op_neon_dup_u8(0);
+                    gen_neon_dup_u8(tmp, 0);
                     break;
                 case 1:
                     tmp = gen_ld16u(cpu_T[1], IS_USER(s));
-                    tcg_gen_mov_i32(cpu_T[0], tmp);
-                    dead_tmp(tmp);
-                    gen_op_neon_dup_low16();
+                    gen_neon_dup_low16(tmp);
                     break;
                 case 2:
                     tmp = gen_ld32(cpu_T[0], IS_USER(s));
-                    tcg_gen_mov_i32(cpu_T[0], tmp);
-                    dead_tmp(tmp);
                     break;
                 case 3:
                     return 1;
                 }
                 gen_op_addl_T1_im(1 << size);
-                NEON_SET_REG(T0, rd, 0);
-                NEON_SET_REG(T0, rd, 1);
+                tmp2 = new_tmp();
+                tcg_gen_mov_i32(tmp2, tmp);
+                neon_store_reg(rd, 0, tmp2);
+                neon_store_reg(rd, 0, tmp);
                 rd += stride;
             }
             stride = (1 << size) * nregs;
@@ -3707,12 +3658,158 @@ static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
     tcg_gen_or_i32(dest, t, f);
 }
 
+static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
+{
+    switch (size) {
+    case 0: gen_helper_neon_narrow_u8(dest, src); break;
+    case 1: gen_helper_neon_narrow_u16(dest, src); break;
+    case 2: tcg_gen_trunc_i64_i32(dest, src); break;
+    default: abort();
+    }
+}
+
+static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
+{
+    switch (size) {
+    case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
+    case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
+    case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
+    default: abort();
+    }
+}
+
+static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
+{
+    switch (size) {
+    case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
+    case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
+    case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
+    default: abort();
+    }
+}
+
+static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
+                                         int q, int u)
+{
+    if (q) {
+        if (u) {
+            switch (size) {
+            case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
+            case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
+            default: abort();
+            }
+        } else {
+            switch (size) {
+            case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
+            case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
+            default: abort();
+            }
+        }
+    } else {
+        if (u) {
+            switch (size) {
+            case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
+            case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
+            default: abort();
+            }
+        } else {
+            switch (size) {
+            case 1: gen_helper_neon_shl_s16(var, var, shift); break;
+            case 2: gen_helper_neon_shl_s32(var, var, shift); break;
+            default: abort();
+            }
+        }
+    }
+}
+
+static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
+{
+    if (u) {
+        switch (size) {
+        case 0: gen_helper_neon_widen_u8(dest, src); break;
+        case 1: gen_helper_neon_widen_u16(dest, src); break;
+        case 2: tcg_gen_extu_i32_i64(dest, src); break;
+        default: abort();
+        }
+    } else {
+        switch (size) {
+        case 0: gen_helper_neon_widen_s8(dest, src); break;
+        case 1: gen_helper_neon_widen_s16(dest, src); break;
+        case 2: tcg_gen_ext_i32_i64(dest, src); break;
+        default: abort();
+        }
+    }
+    dead_tmp(src);
+}
+
+static inline void gen_neon_addl(int size)
+{
+    switch (size) {
+    case 0: gen_helper_neon_addl_u16(CPU_V001); break;
+    case 1: gen_helper_neon_addl_u32(CPU_V001); break;
+    case 2: tcg_gen_add_i64(CPU_V001); break;
+    default: abort();
+    }
+}
+
+static inline void gen_neon_subl(int size)
+{
+    switch (size) {
+    case 0: gen_helper_neon_subl_u16(CPU_V001); break;
+    case 1: gen_helper_neon_subl_u32(CPU_V001); break;
+    case 2: tcg_gen_sub_i64(CPU_V001); break;
+    default: abort();
+    }
+}
+
+static inline void gen_neon_negl(TCGv var, int size)
+{
+    switch (size) {
+    case 0: gen_helper_neon_negl_u16(var, var); break;
+    case 1: gen_helper_neon_negl_u32(var, var); break;
+    case 2: gen_helper_neon_negl_u64(var, var); break;
+    default: abort();
+    }
+}
+
+static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
+{
+    switch (size) {
+    case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
+    case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
+    default: abort();
+    }
+}
+
+static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
+{
+    TCGv tmp;
+
+    switch ((size << 1) | u) {
+    case 0: gen_helper_neon_mull_s8(dest, a, b); break;
+    case 1: gen_helper_neon_mull_u8(dest, a, b); break;
+    case 2: gen_helper_neon_mull_s16(dest, a, b); break;
+    case 3: gen_helper_neon_mull_u16(dest, a, b); break;
+    case 4:
+        tmp = gen_muls_i64_i32(a, b);
+        tcg_gen_mov_i64(dest, tmp);
+        break;
+    case 5:
+        tmp = gen_mulu_i64_i32(a, b);
+        tcg_gen_mov_i64(dest, tmp);
+        break;
+    default: abort();
+    }
+    if (size < 2) {
+        dead_tmp(b);
+        dead_tmp(a);
+    }
+}
+
 /* Translate a NEON data processing instruction.  Return nonzero if the
    instruction is invalid.
-   In general we process vectors in 32-bit chunks.  This means we can reuse
-   some of the scalar ops, and hopefully the code generated for 32-bit
-   hosts won't be too awful.  The downside is that the few 64-bit operations
-   (mainly shifts) get complicated.  */
+   We process data in a mixture of 32-bit and 64-bit chunks.
+   Mostly we use 32-bit chunks so we can use normal scalar instructions.  */
 
 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
 {
@@ -3742,41 +3839,70 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
     if ((insn & (1 << 23)) == 0) {
         /* Three register same length.  */
         op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
-        if (size == 3 && (op == 1 || op == 5 || op == 16)) {
+        if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
+                          || op == 10 || op  == 11 || op == 16)) {
+            /* 64-bit element instructions.  */
             for (pass = 0; pass < (q ? 2 : 1); pass++) {
-                NEON_GET_REG(T0, rm, pass * 2);
-                NEON_GET_REG(T1, rm, pass * 2 + 1);
-                gen_neon_movl_scratch_T0(0);
-                gen_neon_movl_scratch_T1(1);
-                NEON_GET_REG(T0, rn, pass * 2);
-                NEON_GET_REG(T1, rn, pass * 2 + 1);
+                neon_load_reg64(cpu_V0, rn + pass);
+                neon_load_reg64(cpu_V1, rm + pass);
                 switch (op) {
                 case 1: /* VQADD */
                     if (u) {
-                        gen_op_neon_addl_saturate_u64();
+                        gen_helper_neon_add_saturate_u64(CPU_V001);
                     } else {
-                        gen_op_neon_addl_saturate_s64();
+                        gen_helper_neon_add_saturate_s64(CPU_V001);
                     }
                     break;
                 case 5: /* VQSUB */
                     if (u) {
-                        gen_op_neon_subl_saturate_u64();
+                        gen_helper_neon_sub_saturate_u64(CPU_V001);
                     } else {
-                        gen_op_neon_subl_saturate_s64();
+                        gen_helper_neon_sub_saturate_s64(CPU_V001);
+                    }
+                    break;
+                case 8: /* VSHL */
+                    if (u) {
+                        gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
+                    } else {
+                        gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
+                    }
+                    break;
+                case 9: /* VQSHL */
+                    if (u) {
+                        gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
+                                                 cpu_V0, cpu_V0);
+                    } else {
+                        gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
+                                                 cpu_V1, cpu_V0);
+                    }
+                    break;
+                case 10: /* VRSHL */
+                    if (u) {
+                        gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
+                    } else {
+                        gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
+                    }
+                    break;
+                case 11: /* VQRSHL */
+                    if (u) {
+                        gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
+                                                  cpu_V1, cpu_V0);
+                    } else {
+                        gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
+                                                  cpu_V1, cpu_V0);
                     }
                     break;
                 case 16:
                     if (u) {
-                        gen_op_neon_subl_u64();
+                        tcg_gen_sub_i64(CPU_V001);
                     } else {
-                        gen_op_neon_addl_u64();
+                        tcg_gen_add_i64(CPU_V001);
                     }
                     break;
                 default:
                     abort();
                 }
-                NEON_SET_REG(T0, rd, pass * 2);
-                NEON_SET_REG(T1, rd, pass * 2 + 1);
+                neon_store_reg64(cpu_V0, rd + pass);
             }
             return 0;
         }
@@ -3784,13 +3910,13 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
         case 8: /* VSHL */
         case 9: /* VQSHL */
         case 10: /* VRSHL */
-        case 11: /* VQSHL */
-            /* Shift operations have Rn and Rm reversed.  */
+        case 11: /* VQRSHL */
             {
-                int tmp;
-                tmp = rn;
+                int rtmp;
+                /* Shift instruction operands are reversed.  */
+                rtmp = rn;
                 rn = rm;
-                rm = tmp;
+                rm = rtmp;
                 pairwise = 0;
             }
             break;
@@ -3834,19 +3960,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             GEN_NEON_INTEGER_OP(hadd);
             break;
         case 1: /* VQADD */
-            switch (size << 1| u) {
-            case 0: gen_op_neon_qadd_s8(); break;
-            case 1: gen_op_neon_qadd_u8(); break;
-            case 2: gen_op_neon_qadd_s16(); break;
-            case 3: gen_op_neon_qadd_u16(); break;
-            case 4:
-                gen_helper_add_saturate(cpu_T[0], cpu_T[0], cpu_T[1]);
-                break;
-            case 5:
-                gen_helper_add_usaturate(cpu_T[0], cpu_T[0], cpu_T[1]);
-                break;
-            default: abort();
-            }
+            GEN_NEON_INTEGER_OP_ENV(qadd);
             break;
         case 2: /* VRHADD */
             GEN_NEON_INTEGER_OP(rhadd);
@@ -3890,19 +4004,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             GEN_NEON_INTEGER_OP(hsub);
             break;
         case 5: /* VQSUB */
-            switch ((size << 1) | u) {
-            case 0: gen_op_neon_qsub_s8(); break;
-            case 1: gen_op_neon_qsub_u8(); break;
-            case 2: gen_op_neon_qsub_s16(); break;
-            case 3: gen_op_neon_qsub_u16(); break;
-            case 4:
-                gen_helper_sub_saturate(cpu_T[0], cpu_T[0], cpu_T[1]);
-                break;
-            case 5:
-                gen_helper_sub_usaturate(cpu_T[0], cpu_T[0], cpu_T[1]);
-                break;
-            default: abort();
-            }
+            GEN_NEON_INTEGER_OP_ENV(qsub);
             break;
         case 6: /* VCGT */
             GEN_NEON_INTEGER_OP(cgt);
@@ -3911,76 +4013,16 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             GEN_NEON_INTEGER_OP(cge);
             break;
         case 8: /* VSHL */
-            switch ((size << 1) | u) {
-            case 0: gen_op_neon_shl_s8(); break;
-            case 1: gen_op_neon_shl_u8(); break;
-            case 2: gen_op_neon_shl_s16(); break;
-            case 3: gen_op_neon_shl_u16(); break;
-            case 4: gen_op_neon_shl_s32(); break;
-            case 5: gen_op_neon_shl_u32(); break;
-#if 0
-            /* ??? Implementing these is tricky because the vector ops work
-               on 32-bit pieces.  */
-            case 6: gen_op_neon_shl_s64(); break;
-            case 7: gen_op_neon_shl_u64(); break;
-#else
-            case 6: case 7: cpu_abort(env, "VSHL.64 not implemented");
-#endif
-            }
+            GEN_NEON_INTEGER_OP(shl);
             break;
         case 9: /* VQSHL */
-            switch ((size << 1) | u) {
-            case 0: gen_op_neon_qshl_s8(); break;
-            case 1: gen_op_neon_qshl_u8(); break;
-            case 2: gen_op_neon_qshl_s16(); break;
-            case 3: gen_op_neon_qshl_u16(); break;
-            case 4: gen_op_neon_qshl_s32(); break;
-            case 5: gen_op_neon_qshl_u32(); break;
-#if 0
-            /* ??? Implementing these is tricky because the vector ops work
-               on 32-bit pieces.  */
-            case 6: gen_op_neon_qshl_s64(); break;
-            case 7: gen_op_neon_qshl_u64(); break;
-#else
-            case 6: case 7: cpu_abort(env, "VQSHL.64 not implemented");
-#endif
-            }
+            GEN_NEON_INTEGER_OP_ENV(qshl);
             break;
         case 10: /* VRSHL */
-            switch ((size << 1) | u) {
-            case 0: gen_op_neon_rshl_s8(); break;
-            case 1: gen_op_neon_rshl_u8(); break;
-            case 2: gen_op_neon_rshl_s16(); break;
-            case 3: gen_op_neon_rshl_u16(); break;
-            case 4: gen_op_neon_rshl_s32(); break;
-            case 5: gen_op_neon_rshl_u32(); break;
-#if 0
-            /* ??? Implementing these is tricky because the vector ops work
-               on 32-bit pieces.  */
-            case 6: gen_op_neon_rshl_s64(); break;
-            case 7: gen_op_neon_rshl_u64(); break;
-#else
-            case 6: case 7: cpu_abort(env, "VRSHL.64 not implemented");
-#endif
-            }
+            GEN_NEON_INTEGER_OP(rshl);
             break;
         case 11: /* VQRSHL */
-            switch ((size << 1) | u) {
-            case 0: gen_op_neon_qrshl_s8(); break;
-            case 1: gen_op_neon_qrshl_u8(); break;
-            case 2: gen_op_neon_qrshl_s16(); break;
-            case 3: gen_op_neon_qrshl_u16(); break;
-            case 4: gen_op_neon_qrshl_s32(); break;
-            case 5: gen_op_neon_qrshl_u32(); break;
-#if 0
-            /* ??? Implementing these is tricky because the vector ops work
-               on 32-bit pieces.  */
-            case 6: gen_op_neon_qrshl_s64(); break;
-            case 7: gen_op_neon_qrshl_u64(); break;
-#else
-            case 6: case 7: cpu_abort(env, "VQRSHL.64 not implemented");
-#endif
-            }
+            GEN_NEON_INTEGER_OP_ENV(qrshl);
             break;
         case 12: /* VMAX */
             GEN_NEON_INTEGER_OP(max);
@@ -4002,8 +4044,8 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     return 1;
             } else { /* VSUB */
                 switch (size) {
-                case 0: gen_op_neon_sub_u8(); break;
-                case 1: gen_op_neon_sub_u16(); break;
+                case 0: gen_helper_neon_sub_u8(CPU_T001); break;
+                case 1: gen_helper_neon_sub_u16(CPU_T001); break;
                 case 2: gen_op_subl_T0_T1(); break;
                 default: return 1;
                 }
@@ -4012,46 +4054,41 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
         case 17:
             if (!u) { /* VTST */
                 switch (size) {
-                case 0: gen_op_neon_tst_u8(); break;
-                case 1: gen_op_neon_tst_u16(); break;
-                case 2: gen_op_neon_tst_u32(); break;
+                case 0: gen_helper_neon_tst_u8(CPU_T001); break;
+                case 1: gen_helper_neon_tst_u16(CPU_T001); break;
+                case 2: gen_helper_neon_tst_u32(CPU_T001); break;
                 default: return 1;
                 }
             } else { /* VCEQ */
                 switch (size) {
-                case 0: gen_op_neon_ceq_u8(); break;
-                case 1: gen_op_neon_ceq_u16(); break;
-                case 2: gen_op_neon_ceq_u32(); break;
+                case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
+                case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
+                case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
                 default: return 1;
                 }
             }
             break;
         case 18: /* Multiply.  */
             switch (size) {
-            case 0: gen_op_neon_mul_u8(); break;
-            case 1: gen_op_neon_mul_u16(); break;
+            case 0: gen_helper_neon_mul_u8(CPU_T001); break;
+            case 1: gen_helper_neon_mul_u16(CPU_T001); break;
             case 2: gen_op_mul_T0_T1(); break;
             default: return 1;
             }
             NEON_GET_REG(T1, rd, pass);
             if (u) { /* VMLS */
-                switch (size) {
-                case 0: gen_op_neon_rsb_u8(); break;
-                case 1: gen_op_neon_rsb_u16(); break;
-                case 2: gen_op_rsbl_T0_T1(); break;
-                default: return 1;
-                }
+                gen_neon_rsb(size);
             } else { /* VMLA */
                 gen_neon_add(size);
             }
             break;
         case 19: /* VMUL */
             if (u) { /* polynomial */
-                gen_op_neon_mul_p8();
+                gen_helper_neon_mul_p8(CPU_T001);
             } else { /* Integer */
                 switch (size) {
-                case 0: gen_op_neon_mul_u8(); break;
-                case 1: gen_op_neon_mul_u16(); break;
+                case 0: gen_helper_neon_mul_u8(CPU_T001); break;
+                case 1: gen_helper_neon_mul_u16(CPU_T001); break;
                 case 2: gen_op_mul_T0_T1(); break;
                 default: return 1;
                 }
@@ -4066,14 +4103,14 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
         case 22: /* Hultiply high.  */
             if (!u) { /* VQDMULH */
                 switch (size) {
-                case 1: gen_op_neon_qdmulh_s16(); break;
-                case 2: gen_op_neon_qdmulh_s32(); break;
+                case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
+                case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
                 default: return 1;
                 }
             } else { /* VQRDHMUL */
                 switch (size) {
-                case 1: gen_op_neon_qrdmulh_s16(); break;
-                case 2: gen_op_neon_qrdmulh_s32(); break;
+                case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
+                case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
                 default: return 1;
                 }
             }
@@ -4082,8 +4119,8 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             if (u)
                 return 1;
             switch (size) {
-            case 0: gen_op_neon_padd_u8(); break;
-            case 1: gen_op_neon_padd_u16(); break;
+            case 0: gen_helper_neon_padd_u8(CPU_T001); break;
+            case 1: gen_helper_neon_padd_u16(CPU_T001); break;
             case 2: gen_op_addl_T0_T1(); break;
             default: return 1;
             }
@@ -4091,55 +4128,55 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
         case 26: /* Floating point arithnetic.  */
             switch ((u << 2) | size) {
             case 0: /* VADD */
-                gen_op_neon_add_f32();
+                gen_helper_neon_add_f32(CPU_T001);
                 break;
             case 2: /* VSUB */
-                gen_op_neon_sub_f32();
+                gen_helper_neon_sub_f32(CPU_T001);
                 break;
             case 4: /* VPADD */
-                gen_op_neon_add_f32();
+                gen_helper_neon_add_f32(CPU_T001);
                 break;
             case 6: /* VABD */
-                gen_op_neon_abd_f32();
+                gen_helper_neon_abd_f32(CPU_T001);
                 break;
             default:
                 return 1;
             }
             break;
         case 27: /* Float multiply.  */
-            gen_op_neon_mul_f32();
+            gen_helper_neon_mul_f32(CPU_T001);
             if (!u) {
                 NEON_GET_REG(T1, rd, pass);
                 if (size == 0) {
-                    gen_op_neon_add_f32();
+                    gen_helper_neon_add_f32(CPU_T001);
                 } else {
-                    gen_op_neon_rsb_f32();
+                    gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
                 }
             }
             break;
         case 28: /* Float compare.  */
             if (!u) {
-                gen_op_neon_ceq_f32();
+                gen_helper_neon_ceq_f32(CPU_T001);
             } else {
                 if (size == 0)
-                    gen_op_neon_cge_f32();
+                    gen_helper_neon_cge_f32(CPU_T001);
                 else
-                    gen_op_neon_cgt_f32();
+                    gen_helper_neon_cgt_f32(CPU_T001);
             }
             break;
         case 29: /* Float compare absolute.  */
             if (!u)
                 return 1;
             if (size == 0)
-                gen_op_neon_acge_f32();
+                gen_helper_neon_acge_f32(CPU_T001);
             else
-                gen_op_neon_acgt_f32();
+                gen_helper_neon_acgt_f32(CPU_T001);
             break;
         case 30: /* Float min/max.  */
             if (size == 0)
-                gen_op_neon_max_f32();
+                gen_helper_neon_max_f32(CPU_T001);
             else
-                gen_op_neon_min_f32();
+                gen_helper_neon_min_f32(CPU_T001);
             break;
         case 31:
             if (size == 0)
@@ -4166,6 +4203,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                 NEON_SET_REG(T0, rd, pass);
             }
         }
+        /* End of 3 register same size operations.  */
     } else if (insn & (1 << 4)) {
         if ((insn & 0x00380080) != 0) {
             /* Two registers and shift.  */
@@ -4212,181 +4250,221 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                 }
 
                 for (pass = 0; pass < count; pass++) {
-                    if (size < 3) {
-                        /* Operands in T0 and T1.  */
-                        gen_op_movl_T1_im(imm);
-                        NEON_GET_REG(T0, rm, pass);
-                    } else {
-                        /* Operands in {T0, T1} and env->vfp.scratch.  */
-                        gen_op_movl_T0_im(imm);
-                        gen_neon_movl_scratch_T0(0);
-                        gen_op_movl_T0_im((int32_t)imm >> 31);
-                        gen_neon_movl_scratch_T0(1);
-                        NEON_GET_REG(T0, rm, pass * 2);
-                        NEON_GET_REG(T1, rm, pass * 2 + 1);
-                    }
-
-                    if (gen_neon_shift_im[op][u][size] == NULL)
-                        return 1;
-                    gen_neon_shift_im[op][u][size]();
-
-                    if (op == 1 || op == 3) {
-                        /* Accumulate.  */
-                        if (size == 3) {
-                            gen_neon_movl_scratch_T0(0);
-                            gen_neon_movl_scratch_T1(1);
-                            NEON_GET_REG(T0, rd, pass * 2);
-                            NEON_GET_REG(T1, rd, pass * 2 + 1);
-                            gen_op_neon_addl_u64();
-                        } else {
-                            NEON_GET_REG(T1, rd, pass);
-                            gen_neon_add(size);
-                        }
-                    } else if (op == 4 || (op == 5 && u)) {
-                        /* Insert */
-                        if (size == 3) {
-                            cpu_abort(env, "VS[LR]I.64 not implemented");
-                        }
-                        switch (size) {
-                        case 0:
-                            if (op == 4)
-                                imm = 0xff >> -shift;
+                    if (size == 3) {
+                        neon_load_reg64(cpu_V0, rm + pass);
+                        tcg_gen_movi_i64(cpu_V1, imm);
+                        switch (op) {
+                        case 0:  /* VSHR */
+                        case 1:  /* VSRA */
+                            if (u)
+                                gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
                             else
-                                imm = (uint8_t)(0xff << shift);
-                            imm |= imm << 8;
-                            imm |= imm << 16;
+                                gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
                             break;
-                        case 1:
-                            if (op == 4)
-                                imm = 0xffff >> -shift;
+                        case 2: /* VRSHR */
+                        case 3: /* VRSRA */
+                            if (u)
+                                gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
                             else
-                                imm = (uint16_t)(0xffff << shift);
-                            imm |= imm << 16;
+                                gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
                             break;
-                        case 2:
-                            if (op == 4)
-                                imm = 0xffffffffu >> -shift;
+                        case 4: /* VSRI */
+                            if (!u)
+                                return 1;
+                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
+                            break;
+                        case 5: /* VSHL, VSLI */
+                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
+                            break;
+                        case 6: /* VQSHL */
+                            if (u)
+                                gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
                             else
-                                imm = 0xffffffffu << shift;
+                                gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
+                            break;
+                        case 7: /* VQSHLU */
+                            gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
                             break;
-                        default:
-                            abort();
                         }
-                        tmp = neon_load_reg(rd, pass);
-                        tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
-                        tcg_gen_andi_i32(tmp, tmp, ~imm);
-                        tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
-                    }
-                    if (size == 3) {
-                        NEON_SET_REG(T0, rd, pass * 2);
-                        NEON_SET_REG(T1, rd, pass * 2 + 1);
-                    } else {
+                        if (op == 1 || op == 3) {
+                            /* Accumulate.  */
+                            neon_load_reg64(cpu_V0, rd + pass);
+                            tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
+                        } else if (op == 4 || (op == 5 && u)) {
+                            /* Insert */
+                            cpu_abort(env, "VS[LR]I.64 not implemented");
+                        }
+                        neon_store_reg64(cpu_V0, rd + pass);
+                    } else { /* size < 3 */
+                        /* Operands in T0 and T1.  */
+                        gen_op_movl_T1_im(imm);
+                        NEON_GET_REG(T0, rm, pass);
+                        switch (op) {
+                        case 0:  /* VSHR */
+                        case 1:  /* VSRA */
+                            GEN_NEON_INTEGER_OP(shl);
+                            break;
+                        case 2: /* VRSHR */
+                        case 3: /* VRSRA */
+                            GEN_NEON_INTEGER_OP(rshl);
+                            break;
+                        case 4: /* VSRI */
+                            if (!u)
+                                return 1;
+                            GEN_NEON_INTEGER_OP(shl);
+                            break;
+                        case 5: /* VSHL, VSLI */
+                            switch (size) {
+                            case 0: gen_helper_neon_shl_u8(CPU_T001); break;
+                            case 1: gen_helper_neon_shl_u16(CPU_T001); break;
+                            case 2: gen_helper_neon_shl_u32(CPU_T001); break;
+                            default: return 1;
+                            }
+                            break;
+                        case 6: /* VQSHL */
+                            GEN_NEON_INTEGER_OP_ENV(qshl);
+                            break;
+                        case 7: /* VQSHLU */
+                            switch (size) {
+                            case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
+                            case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
+                            case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
+                            default: return 1;
+                            }
+                            break;
+                        }
+
+                        if (op == 1 || op == 3) {
+                            /* Accumulate.  */
+                            NEON_GET_REG(T1, rd, pass);
+                            gen_neon_add(size);
+                        } else if (op == 4 || (op == 5 && u)) {
+                            /* Insert */
+                            switch (size) {
+                            case 0:
+                                if (op == 4)
+                                    imm = 0xff >> -shift;
+                                else
+                                    imm = (uint8_t)(0xff << shift);
+                                imm |= imm << 8;
+                                imm |= imm << 16;
+                                break;
+                            case 1:
+                                if (op == 4)
+                                    imm = 0xffff >> -shift;
+                                else
+                                    imm = (uint16_t)(0xffff << shift);
+                                imm |= imm << 16;
+                                break;
+                            case 2:
+                                if (op == 4)
+                                    imm = 0xffffffffu >> -shift;
+                                else
+                                    imm = 0xffffffffu << shift;
+                                break;
+                            default:
+                                abort();
+                            }
+                            tmp = neon_load_reg(rd, pass);
+                            tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
+                            tcg_gen_andi_i32(tmp, tmp, ~imm);
+                            tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
+                        }
                         NEON_SET_REG(T0, rd, pass);
                     }
                 } /* for pass */
             } else if (op < 10) {
-                /* Shift by immedaiate and narrow:
+                /* Shift by immediate and narrow:
                    VSHRN, VRSHRN, VQSHRN, VQRSHRN.  */
                 shift = shift - (1 << (size + 3));
                 size++;
-                if (size == 3) {
-                    count = q + 1;
-                } else {
-                    count = q ? 4: 2;
-                }
                 switch (size) {
                 case 1:
-                    imm = (uint16_t) shift;
+                    imm = (uint16_t)shift;
                     imm |= imm << 16;
+                    tmp2 = tcg_const_i32(imm);
                     break;
                 case 2:
+                    imm = (uint32_t)shift;
+                    tmp2 = tcg_const_i32(imm);
                 case 3:
-                    imm = shift;
+                    tmp2 = tcg_const_i64(shift);
                     break;
                 default:
                     abort();
                 }
 
-                /* Processing MSB first means we need to do less shuffling at
-                   the end.  */
-                for (pass =  count - 1; pass >= 0; pass--) {
-                    /* Avoid clobbering the second operand before it has been
-                       written.  */
-                    n = pass;
-                    if (rd == rm)
-                        n ^= (count - 1);
-                    else
-                        n = pass;
-
-                    if (size < 3) {
-                        /* Operands in T0 and T1.  */
-                        gen_op_movl_T1_im(imm);
-                        NEON_GET_REG(T0, rm, n);
+                for (pass = 0; pass < 2; pass++) {
+                    if (size == 3) {
+                        neon_load_reg64(cpu_V0, rm + pass);
+                        if (q) {
+                          if (u)
+                            gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
+                          else
+                            gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
+                        } else {
+                          if (u)
+                            gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
+                          else
+                            gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
+                        }
                     } else {
-                        /* Operands in {T0, T1} and env->vfp.scratch.  */
-                        gen_op_movl_T0_im(imm);
-                        gen_neon_movl_scratch_T0(0);
-                        gen_op_movl_T0_im((int32_t)imm >> 31);
-                        gen_neon_movl_scratch_T0(1);
-                        NEON_GET_REG(T0, rm, n * 2);
-                        NEON_GET_REG(T0, rm, n * 2 + 1);
+                        tmp = neon_load_reg(rm + pass, 0);
+                        gen_neon_shift_narrow(size, tmp, tmp2, q, u);
+                        tcg_gen_extu_i32_i64(cpu_V0, tmp);
+                        dead_tmp(tmp);
+                        tmp = neon_load_reg(rm + pass, 1);
+                        gen_neon_shift_narrow(size, tmp, tmp2, q, u);
+                        tcg_gen_extu_i32_i64(cpu_V1, tmp);
+                        dead_tmp(tmp);
+                        tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
+                        tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
                     }
-
-                    gen_neon_shift_im_narrow[q][u][size - 1]();
-
-                    if (size < 3 && (pass & 1) == 0) {
-                        gen_neon_movl_scratch_T0(0);
+                    tmp = new_tmp();
+                    if (op == 8 && !u) {
+                        gen_neon_narrow(size - 1, tmp, cpu_V0);
                     } else {
-                        uint32_t offset;
-
-                        if (size < 3)
-                            gen_neon_movl_T1_scratch(0);
-
-                        if (op == 8 && !u) {
-                            gen_neon_narrow[size - 1]();
-                        } else {
-                            if (op == 8)
-                                gen_neon_narrow_sats[size - 2]();
-                            else
-                                gen_neon_narrow_satu[size - 1]();
-                        }
-                        if (size == 3)
-                            offset = neon_reg_offset(rd, n);
+                        if (op == 8)
+                            gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
                         else
-                            offset = neon_reg_offset(rd, n >> 1);
-                        gen_op_neon_setreg_T0(offset);
+                            gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
+                    }
+                    if (pass == 0) {
+                        tmp2 = tmp;
+                    } else {
+                        neon_store_reg(rd, 0, tmp2);
+                        neon_store_reg(rd, 1, tmp);
                     }
                 } /* for pass */
             } else if (op == 10) {
                 /* VSHLL */
-                if (q)
+                if (q || size == 3)
                     return 1;
+                tmp = neon_load_reg(rm, 0);
+                tmp2 = neon_load_reg(rm, 1);
                 for (pass = 0; pass < 2; pass++) {
-                    /* Avoid clobbering the input operand.  */
-                    if (rd == rm)
-                        n = 1 - pass;
-                    else
-                        n = pass;
+                    if (pass == 1)
+                        tmp = tmp2;
+
+                    gen_neon_widen(cpu_V0, tmp, size, u);
 
-                    NEON_GET_REG(T0, rm, n);
-                    GEN_NEON_INTEGER_OP(widen);
                     if (shift != 0) {
                         /* The shift is less than the width of the source
-                           type, so in some cases we can just
-                           shift the whole register.  */
-                        if (size == 1 || (size == 0 && u)) {
-                            gen_op_shll_T0_im(shift);
-                            gen_op_shll_T1_im(shift);
-                        } else {
-                            switch (size) {
-                            case 0: gen_op_neon_shll_u16(shift); break;
-                            case 2: gen_op_neon_shll_u64(shift); break;
-                            default: abort();
+                           type, so we can just shift the whole register.  */
+                        tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
+                        if (size < 2 || !u) {
+                            uint64_t imm64;
+                            if (size == 0) {
+                                imm = (0xffu >> (8 - shift));
+                                imm |= imm << 16;
+                            } else {
+                                imm = 0xffff >> (16 - shift);
                             }
+                            imm64 = imm | (((uint64_t)imm) << 32);
+                            tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
                         }
                     }
-                    NEON_SET_REG(T0, rd, n * 2);
-                    NEON_SET_REG(T1, rd, n * 2 + 1);
+                    neon_store_reg64(cpu_V0, rd + pass);
                 }
             } else if (op == 15 || op == 16) {
                 /* VCVT fixed-point.  */
@@ -4458,28 +4536,30 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
 
             for (pass = 0; pass < (q ? 4 : 2); pass++) {
                 if (op & 1 && op < 12) {
-                    NEON_GET_REG(T0, rd, pass);
+                    tmp = neon_load_reg(rd, pass);
                     if (invert) {
                         /* The immediate value has already been inverted, so
                            BIC becomes AND.  */
-                        gen_op_andl_T0_T1();
+                        tcg_gen_andi_i32(tmp, tmp, imm);
                     } else {
-                        gen_op_orl_T0_T1();
+                        tcg_gen_ori_i32(tmp, tmp, imm);
                     }
-                    NEON_SET_REG(T0, rd, pass);
                 } else {
+                    /* VMOV, VMVN.  */
+                    tmp = new_tmp();
                     if (op == 14 && invert) {
-                        uint32_t tmp;
-                        tmp = 0;
+                        uint32_t val;
+                        val = 0;
                         for (n = 0; n < 4; n++) {
                             if (imm & (1 << (n + (pass & 1) * 4)))
-                                tmp |= 0xff << (n * 8);
+                                val |= 0xff << (n * 8);
                         }
-                        gen_op_movl_T1_im(tmp);
+                        tcg_gen_movi_i32(tmp, val);
+                    } else {
+                        tcg_gen_movi_i32(tmp, imm);
                     }
-                    /* VMOV, VMVN.  */
-                    NEON_SET_REG(T1, rd, pass);
                 }
+                neon_store_reg(rd, pass, tmp);
             }
         }
     } else { /* (insn & 0x00800010 == 0x00800010) */
@@ -4513,6 +4593,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                 src1_wide = neon_3reg_wide[op][1];
                 src2_wide = neon_3reg_wide[op][2];
 
+                if (size == 0 && (op == 9 || op == 11 || op == 13))
+                    return 1;
+
                 /* Avoid overlapping operands.  Wide source operands are
                    always aligned so will never overlap with wide
                    destinations in problematic ways.  */
@@ -4524,87 +4607,69 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     gen_neon_movl_scratch_T0(2);
                 }
                 for (pass = 0; pass < 2; pass++) {
-                    /* Load the second operand into env->vfp.scratch.
-                       Also widen narrow operands.  */
-                    if (src2_wide) {
-                        NEON_GET_REG(T0, rm, pass * 2);
-                        NEON_GET_REG(T1, rm, pass * 2 + 1);
+                    if (src1_wide) {
+                        neon_load_reg64(cpu_V0, rn + pass);
                     } else {
-                        if (pass == 1 && rd == rm) {
-                            if (prewiden) {
-                                gen_neon_movl_T0_scratch(2);
-                            } else {
-                                gen_neon_movl_T1_scratch(2);
-                            }
+                        if (pass == 1 && rd == rn) {
+                            gen_neon_movl_T0_scratch(2);
+                            tmp = new_tmp();
+                            tcg_gen_mov_i32(tmp, cpu_T[0]);
                         } else {
-                            if (prewiden) {
-                                NEON_GET_REG(T0, rm, pass);
-                            } else {
-                                NEON_GET_REG(T1, rm, pass);
-                            }
+                            tmp = neon_load_reg(rn, pass);
+                        }
+                        if (prewiden) {
+                            gen_neon_widen(cpu_V0, tmp, size, u);
                         }
                     }
-                    if (prewiden && !src2_wide) {
-                        GEN_NEON_INTEGER_OP(widen);
-                    }
-                    if (prewiden || src2_wide) {
-                        gen_neon_movl_scratch_T0(0);
-                        gen_neon_movl_scratch_T1(1);
-                    }
-
-                    /* Load the first operand.  */
-                    if (src1_wide) {
-                        NEON_GET_REG(T0, rn, pass * 2);
-                        NEON_GET_REG(T1, rn, pass * 2 + 1);
+                    if (src2_wide) {
+                        neon_load_reg64(cpu_V1, rm + pass);
                     } else {
-                        if (pass == 1 && rd == rn) {
+                        if (pass == 1 && rd == rm) {
                             gen_neon_movl_T0_scratch(2);
+                            tmp2 = new_tmp();
+                            tcg_gen_mov_i32(tmp2, cpu_T[0]);
                         } else {
-                            NEON_GET_REG(T0, rn, pass);
+                            tmp2 = neon_load_reg(rm, pass);
+                        }
+                        if (prewiden) {
+                            gen_neon_widen(cpu_V1, tmp2, size, u);
                         }
-                    }
-                    if (prewiden && !src1_wide) {
-                        GEN_NEON_INTEGER_OP(widen);
                     }
                     switch (op) {
                     case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
-                        switch (size) {
-                        case 0: gen_op_neon_addl_u16(); break;
-                        case 1: gen_op_neon_addl_u32(); break;
-                        case 2: gen_op_neon_addl_u64(); break;
-                        default: abort();
-                        }
+                        gen_neon_addl(size);
                         break;
                     case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
-                        switch (size) {
-                        case 0: gen_op_neon_subl_u16(); break;
-                        case 1: gen_op_neon_subl_u32(); break;
-                        case 2: gen_op_neon_subl_u64(); break;
-                        default: abort();
-                        }
+                        gen_neon_subl(size);
                         break;
                     case 5: case 7: /* VABAL, VABDL */
                         switch ((size << 1) | u) {
-                        case 0: gen_op_neon_abdl_s16(); break;
-                        case 1: gen_op_neon_abdl_u16(); break;
-                        case 2: gen_op_neon_abdl_s32(); break;
-                        case 3: gen_op_neon_abdl_u32(); break;
-                        case 4: gen_op_neon_abdl_s64(); break;
-                        case 5: gen_op_neon_abdl_u64(); break;
+                        case 0:
+                            gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
+                            break;
+                        case 1:
+                            gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
+                            break;
+                        case 2:
+                            gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
+                            break;
+                        case 3:
+                            gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
+                            break;
+                        case 4:
+                            gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
+                            break;
+                        case 5:
+                            gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
+                            break;
                         default: abort();
                         }
+                        dead_tmp(tmp2);
+                        dead_tmp(tmp);
                         break;
                     case 8: case 9: case 10: case 11: case 12: case 13:
                         /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
-                        switch ((size << 1) | u) {
-                        case 0: gen_op_neon_mull_s8(); break;
-                        case 1: gen_op_neon_mull_u8(); break;
-                        case 2: gen_op_neon_mull_s16(); break;
-                        case 3: gen_op_neon_mull_u16(); break;
-                        case 4: gen_op_imull_T0_T1(); break;
-                        case 5: gen_op_mull_T0_T1(); break;
-                        default: abort();
-                        }
+                        gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
                         break;
                     case 14: /* Polynomial VMULL */
                         cpu_abort(env, "Polynomial VMULL not implemented");
@@ -4615,72 +4680,71 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
                         /* Accumulate.  */
                         if (op == 10 || op == 11) {
-                            switch (size) {
-                            case 0: gen_op_neon_negl_u16(); break;
-                            case 1: gen_op_neon_negl_u32(); break;
-                            case 2: gen_op_neon_negl_u64(); break;
-                            default: abort();
-                            }
+                            gen_neon_negl(cpu_V0, size);
                         }
 
-                        gen_neon_movl_scratch_T0(0);
-                        gen_neon_movl_scratch_T1(1);
-
                         if (op != 13) {
-                            NEON_GET_REG(T0, rd, pass * 2);
-                            NEON_GET_REG(T1, rd, pass * 2 + 1);
+                            neon_load_reg64(cpu_V1, rd + pass);
                         }
 
                         switch (op) {
                         case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
-                            switch (size) {
-                            case 0: gen_op_neon_addl_u16(); break;
-                            case 1: gen_op_neon_addl_u32(); break;
-                            case 2: gen_op_neon_addl_u64(); break;
-                            default: abort();
-                            }
+                            gen_neon_addl(size);
                             break;
                         case 9: case 11: /* VQDMLAL, VQDMLSL */
-                            switch (size) {
-                            case 1: gen_op_neon_addl_saturate_s32(); break;
-                            case 2: gen_op_neon_addl_saturate_s64(); break;
-                            default: abort();
-                            }
+                            gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+                            gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
+                            break;
                             /* Fall through.  */
                         case 13: /* VQDMULL */
-                            switch (size) {
-                            case 1: gen_op_neon_addl_saturate_s32(); break;
-                            case 2: gen_op_neon_addl_saturate_s64(); break;
-                            default: abort();
-                            }
+                            gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
                             break;
                         default:
                             abort();
                         }
-                        NEON_SET_REG(T0, rd, pass * 2);
-                        NEON_SET_REG(T1, rd, pass * 2 + 1);
+                        neon_store_reg64(cpu_V0, rd + pass);
                     } else if (op == 4 || op == 6) {
                         /* Narrowing operation.  */
+                        tmp = new_tmp();
                         if (u) {
                             switch (size) {
-                            case 0: gen_op_neon_narrow_high_u8(); break;
-                            case 1: gen_op_neon_narrow_high_u16(); break;
-                            case 2: gen_op_movl_T0_T1(); break;
+                            case 0:
+                                gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
+                                break;
+                            case 1:
+                                gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
+                                break;
+                            case 2:
+                                tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
+                                tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+                                break;
                             default: abort();
                             }
                         } else {
                             switch (size) {
-                            case 0: gen_op_neon_narrow_high_round_u8(); break;
-                            case 1: gen_op_neon_narrow_high_round_u16(); break;
-                            case 2: gen_op_neon_narrow_high_round_u32(); break;
+                            case 0:
+                                gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
+                                break;
+                            case 1:
+                                gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
+                                break;
+                            case 2:
+                                tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
+                                tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
+                                tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+                                break;
                             default: abort();
                             }
                         }
-                        NEON_SET_REG(T0, rd, pass);
+                        if (pass == 0) {
+                            tmp3 = tmp;
+                        } else {
+                            neon_store_reg(rd, 0, tmp3);
+                            neon_store_reg(rd, 1, tmp);
+                        }
                     } else {
                         /* Write back the result.  */
-                        NEON_SET_REG(T0, rd, pass * 2);
-                        NEON_SET_REG(T1, rd, pass * 2 + 1);
+                        neon_store_reg64(cpu_V0, rd + pass);
                     }
                 }
             } else {
@@ -4702,22 +4766,22 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         NEON_GET_REG(T1, rn, pass);
                         if (op == 12) {
                             if (size == 1) {
-                                gen_op_neon_qdmulh_s16();
+                                gen_helper_neon_qdmulh_s16(CPU_T0E01);
                             } else {
-                                gen_op_neon_qdmulh_s32();
+                                gen_helper_neon_qdmulh_s32(CPU_T0E01);
                             }
                         } else if (op == 13) {
                             if (size == 1) {
-                                gen_op_neon_qrdmulh_s16();
+                                gen_helper_neon_qrdmulh_s16(CPU_T0E01);
                             } else {
-                                gen_op_neon_qrdmulh_s32();
+                                gen_helper_neon_qrdmulh_s32(CPU_T0E01);
                             }
                         } else if (op & 1) {
-                            gen_op_neon_mul_f32();
+                            gen_helper_neon_mul_f32(CPU_T001);
                         } else {
                             switch (size) {
-                            case 0: gen_op_neon_mul_u8(); break;
-                            case 1: gen_op_neon_mul_u16(); break;
+                            case 0: gen_helper_neon_mul_u8(CPU_T001); break;
+                            case 1: gen_helper_neon_mul_u16(CPU_T001); break;
                             case 2: gen_op_mul_T0_T1(); break;
                             default: return 1;
                             }
@@ -4730,18 +4794,13 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                                 gen_neon_add(size);
                                 break;
                             case 1:
-                                gen_op_neon_add_f32();
+                                gen_helper_neon_add_f32(CPU_T001);
                                 break;
                             case 4:
-                                switch (size) {
-                                case 0: gen_op_neon_rsb_u8(); break;
-                                case 1: gen_op_neon_rsb_u16(); break;
-                                case 2: gen_op_rsbl_T0_T1(); break;
-                                default: return 1;
-                                }
+                                gen_neon_rsb(size);
                                 break;
                             case 5:
-                                gen_op_neon_rsb_f32();
+                                gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
                                 break;
                             default:
                                 abort();
@@ -4756,81 +4815,46 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                 case 7: /* VQDMLSL scalar */
                 case 10: /* VMULL scalar */
                 case 11: /* VQDMULL scalar */
-                    if (rd == rn) {
-                        /* Save overlapping operands before they are
-                           clobbered.  */
-                        NEON_GET_REG(T0, rn, 1);
-                        gen_neon_movl_scratch_T0(2);
-                    }
+                    if (size == 0 && (op == 3 || op == 7 || op == 11))
+                        return 1;
+
                     gen_neon_get_scalar(size, rm);
-                    gen_neon_movl_scratch_T0(3);
+                    NEON_GET_REG(T1, rn, 1);
+
                     for (pass = 0; pass < 2; pass++) {
-                        if (pass != 0) {
-                            gen_neon_movl_T0_scratch(3);
-                        }
-                        if (pass != 0 && rd == rn) {
-                            gen_neon_movl_T1_scratch(2);
+                        if (pass == 0) {
+                            tmp = neon_load_reg(rn, 0);
                         } else {
-                            NEON_GET_REG(T1, rn, pass);
-                        }
-                        switch ((size << 1) | u) {
-                        case 0: gen_op_neon_mull_s8(); break;
-                        case 1: gen_op_neon_mull_u8(); break;
-                        case 2: gen_op_neon_mull_s16(); break;
-                        case 3: gen_op_neon_mull_u16(); break;
-                        case 4: gen_op_imull_T0_T1(); break;
-                        case 5: gen_op_mull_T0_T1(); break;
-                        default: abort();
+                            tmp = new_tmp();
+                            tcg_gen_mov_i32(tmp, cpu_T[1]);
                         }
+                        tmp2 = new_tmp();
+                        tcg_gen_mov_i32(tmp2, cpu_T[0]);
+                        gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
                         if (op == 6 || op == 7) {
-                            switch (size) {
-                            case 0: gen_op_neon_negl_u16(); break;
-                            case 1: gen_op_neon_negl_u32(); break;
-                            case 2: gen_op_neon_negl_u64(); break;
-                            default: abort();
-                            }
+                            gen_neon_negl(cpu_V0, size);
+                        }
+                        if (op != 11) {
+                            neon_load_reg64(cpu_V1, rd + pass);
                         }
-                        gen_neon_movl_scratch_T0(0);
-                        gen_neon_movl_scratch_T1(1);
-                        NEON_GET_REG(T0, rd, pass * 2);
-                        NEON_GET_REG(T1, rd, pass * 2 + 1);
                         switch (op) {
                         case 2: case 6:
-                            switch (size) {
-                            case 0: gen_op_neon_addl_u16(); break;
-                            case 1: gen_op_neon_addl_u32(); break;
-                            case 2: gen_op_neon_addl_u64(); break;
-                            default: abort();
-                            }
+                            gen_neon_addl(size);
                             break;
                         case 3: case 7:
-                            switch (size) {
-                            case 1:
-                                gen_op_neon_addl_saturate_s32();
-                                gen_op_neon_addl_saturate_s32();
-                                break;
-                            case 2:
-                                gen_op_neon_addl_saturate_s64();
-                                gen_op_neon_addl_saturate_s64();
-                                break;
-                            default: abort();
-                            }
+                            gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+                            gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
                             break;
                         case 10:
                             /* no-op */
                             break;
                         case 11:
-                            switch (size) {
-                            case 1: gen_op_neon_addl_saturate_s32(); break;
-                            case 2: gen_op_neon_addl_saturate_s64(); break;
-                            default: abort();
-                            }
+                            gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
                             break;
                         default:
                             abort();
                         }
-                        NEON_SET_REG(T0, rd, pass * 2);
-                        NEON_SET_REG(T1, rd, pass * 2 + 1);
+                        neon_store_reg64(cpu_V0, rd + pass);
                     }
                     break;
                 default: /* 14 and 15 are RESERVED */
@@ -4840,29 +4864,53 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
         } else { /* size == 3 */
             if (!u) {
                 /* Extract.  */
-                int reg;
                 imm = (insn >> 8) & 0xf;
-                reg = rn;
-                count = q ? 4 : 2;
-                n = imm >> 2;
-                NEON_GET_REG(T0, reg, n);
-                for (pass = 0; pass < count; pass++) {
-                    n++;
-                    if (n > count) {
-                        reg = rm;
-                        n -= count;
+                count = q + 1;
+
+                if (imm > 7 && !q)
+                    return 1;
+
+                if (imm == 0) {
+                    neon_load_reg64(cpu_V0, rn);
+                    if (q) {
+                        neon_load_reg64(cpu_V1, rn + 1);
                     }
-                    if (imm & 3) {
-                        NEON_GET_REG(T1, reg, n);
-                        gen_op_neon_extract((insn << 3) & 0x1f);
+                } else if (imm == 8) {
+                    neon_load_reg64(cpu_V0, rn + 1);
+                    if (q) {
+                        neon_load_reg64(cpu_V1, rm);
                     }
-                    /* ??? This is broken if rd and rm overlap */
-                    NEON_SET_REG(T0, rd, pass);
-                    if (imm & 3) {
-                        gen_op_movl_T0_T1();
+                } else if (q) {
+                    tmp = tcg_temp_new(TCG_TYPE_I64);
+                    if (imm < 8) {
+                        neon_load_reg64(cpu_V0, rn);
+                        neon_load_reg64(tmp, rn + 1);
+                    } else {
+                        neon_load_reg64(cpu_V0, rn + 1);
+                        neon_load_reg64(tmp, rm);
+                    }
+                    tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
+                    tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
+                    tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+                    if (imm < 8) {
+                        neon_load_reg64(cpu_V1, rm);
                     } else {
-                        NEON_GET_REG(T0, reg, n);
+                        neon_load_reg64(cpu_V1, rm + 1);
+                        imm -= 8;
                     }
+                    tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
+                    tcg_gen_shri_i64(tmp, tmp, imm * 8);
+                    tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
+                } else {
+                    neon_load_reg64(cpu_V0, rn);
+                    tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
+                    neon_load_reg64(cpu_V1, rm);
+                    tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
+                    tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+                }
+                neon_store_reg64(cpu_V0, rd);
+                if (q) {
+                    neon_store_reg64(cpu_V1, rd + 1);
                 }
             } else if ((insn & (1 << 11)) == 0) {
                 /* Two register misc.  */
@@ -4897,28 +4945,25 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     break;
                 case 4: case 5: /* VPADDL */
                 case 12: case 13: /* VPADAL */
-                    if (size < 2)
-                        goto elementwise;
                     if (size == 3)
                         return 1;
-                    for (pass = 0; pass < (q ? 2 : 1); pass++) {
-                        NEON_GET_REG(T0, rm, pass * 2);
-                        NEON_GET_REG(T1, rm, pass * 2 + 1);
-                        if (op & 1)
-                            gen_op_neon_paddl_u32();
-                        else
-                            gen_op_neon_paddl_s32();
+                    for (pass = 0; pass < q + 1; pass++) {
+                        tmp = neon_load_reg(rm, pass * 2);
+                        gen_neon_widen(cpu_V0, tmp, size, op & 1);
+                        tmp = neon_load_reg(rm, pass * 2 + 1);
+                        gen_neon_widen(cpu_V1, tmp, size, op & 1);
+                        switch (size) {
+                        case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
+                        case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
+                        case 2: tcg_gen_add_i64(CPU_V001); break;
+                        default: abort();
+                        }
                         if (op >= 12) {
                             /* Accumulate.  */
-                            gen_neon_movl_scratch_T0(0);
-                            gen_neon_movl_scratch_T1(1);
-
-                            NEON_GET_REG(T0, rd, pass * 2);
-                            NEON_GET_REG(T1, rd, pass * 2 + 1);
-                            gen_op_neon_addl_u64();
+                            neon_load_reg64(cpu_V1, rd + pass);
+                            gen_neon_addl(size);
                         }
-                        NEON_SET_REG(T0, rd, pass * 2);
-                        NEON_SET_REG(T1, rd, pass * 2 + 1);
+                        neon_store_reg64(cpu_V0, rd + pass);
                     }
                     break;
                 case 33: /* VTRN */
@@ -4972,8 +5017,8 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         NEON_GET_REG(T0, rd, n);
                         NEON_GET_REG(T1, rd, n);
                         switch (size) {
-                        case 0: gen_op_neon_zip_u8(); break;
-                        case 1: gen_op_neon_zip_u16(); break;
+                        case 0: gen_helper_neon_zip_u8(); break;
+                        case 1: gen_helper_neon_zip_u16(); break;
                         case 2: /* no-op */; break;
                         default: abort();
                         }
@@ -4987,63 +5032,36 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     }
                     break;
                 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
+                    if (size == 3)
+                        return 1;
                     for (pass = 0; pass < 2; pass++) {
-                        if (rd == rm + 1) {
-                            n = 1 - pass;
-                        } else {
-                            n = pass;
-                        }
-                        NEON_GET_REG(T0, rm, n * 2);
-                        NEON_GET_REG(T1, rm, n * 2 + 1);
+                        neon_load_reg64(cpu_V0, rm + pass);
+                        tmp = new_tmp();
                         if (op == 36 && q == 0) {
-                            switch (size) {
-                            case 0: gen_op_neon_narrow_u8(); break;
-                            case 1: gen_op_neon_narrow_u16(); break;
-                            case 2: /* no-op */ break;
-                            default: return 1;
-                            }
+                            gen_neon_narrow(size, tmp, cpu_V0);
                         } else if (q) {
-                            switch (size) {
-                            case 0: gen_op_neon_narrow_sat_u8(); break;
-                            case 1: gen_op_neon_narrow_sat_u16(); break;
-                            case 2: gen_op_neon_narrow_sat_u32(); break;
-                            default: return 1;
-                            }
+                            gen_neon_narrow_satu(size, tmp, cpu_V0);
                         } else {
-                            switch (size) {
-                            case 0: gen_op_neon_narrow_sat_s8(); break;
-                            case 1: gen_op_neon_narrow_sat_s16(); break;
-                            case 2: gen_op_neon_narrow_sat_s32(); break;
-                            default: return 1;
-                            }
+                            gen_neon_narrow_sats(size, tmp, cpu_V0);
+                        }
+                        if (pass == 0) {
+                            tmp2 = tmp;
+                        } else {
+                            neon_store_reg(rd, 0, tmp2);
+                            neon_store_reg(rd, 1, tmp);
                         }
-                        NEON_SET_REG(T0, rd, n);
                     }
                     break;
                 case 38: /* VSHLL */
-                    if (q)
+                    if (q || size == 3)
                         return 1;
-                    if (rm == rd) {
-                        NEON_GET_REG(T0, rm, 1);
-                        gen_neon_movl_scratch_T0(0);
-                    }
+                    tmp = neon_load_reg(rm, 0);
+                    tmp2 = neon_load_reg(rm, 1);
                     for (pass = 0; pass < 2; pass++) {
-                        if (pass == 1 && rm == rd) {
-                            gen_neon_movl_T0_scratch(0);
-                        } else {
-                            NEON_GET_REG(T0, rm, pass);
-                        }
-                        switch (size) {
-                        case 0: gen_op_neon_widen_high_u8(); break;
-                        case 1: gen_op_neon_widen_high_u16(); break;
-                        case 2:
-                            gen_op_movl_T1_T0();
-                            gen_op_movl_T0_im(0);
-                            break;
-                        default: return 1;
-                        }
-                        NEON_SET_REG(T0, rd, pass * 2);
-                        NEON_SET_REG(T1, rd, pass * 2 + 1);
+                        if (pass == 1)
+                            tmp = tmp2;
+                        gen_neon_widen(cpu_V0, tmp, size, 1);
+                        neon_store_reg64(cpu_V0, rd + pass);
                     }
                     break;
                 default:
@@ -5068,37 +5086,18 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                                 return 1;
                             gen_rev16(cpu_T[0]);
                             break;
-                        case 4: case 5: /* VPADDL */
-                        case 12: case 13: /* VPADAL */
-                            switch ((size << 1) | (op & 1)) {
-                            case 0: gen_op_neon_paddl_s8(); break;
-                            case 1: gen_op_neon_paddl_u8(); break;
-                            case 2: gen_op_neon_paddl_s16(); break;
-                            case 3: gen_op_neon_paddl_u16(); break;
-                            default: abort();
-                            }
-                            if (op >= 12) {
-                                /* Accumulate */
-                                NEON_GET_REG(T1, rd, pass);
-                                switch (size) {
-                                case 0: gen_op_neon_add_u16(); break;
-                                case 1: gen_op_addl_T0_T1(); break;
-                                default: abort();
-                                }
-                            }
-                            break;
                         case 8: /* CLS */
                             switch (size) {
-                            case 0: gen_op_neon_cls_s8(); break;
-                            case 1: gen_op_neon_cls_s16(); break;
-                            case 2: gen_op_neon_cls_s32(); break;
+                            case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
+                            case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
+                            case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
                             default: return 1;
                             }
                             break;
                         case 9: /* CLZ */
                             switch (size) {
-                            case 0: gen_op_neon_clz_u8(); break;
-                            case 1: gen_op_neon_clz_u16(); break;
+                            case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
+                            case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
                             case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
                             default: return 1;
                             }
@@ -5106,7 +5105,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         case 10: /* CNT */
                             if (size != 0)
                                 return 1;
-                            gen_op_neon_cnt_u8();
+                            gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
                             break;
                         case 11: /* VNOT */
                             if (size != 0)
@@ -5115,26 +5114,26 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                             break;
                         case 14: /* VQABS */
                             switch (size) {
-                            case 0: gen_op_neon_qabs_s8(); break;
-                            case 1: gen_op_neon_qabs_s16(); break;
-                            case 2: gen_op_neon_qabs_s32(); break;
+                            case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
+                            case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
+                            case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
                             default: return 1;
                             }
                             break;
                         case 15: /* VQNEG */
                             switch (size) {
-                            case 0: gen_op_neon_qneg_s8(); break;
-                            case 1: gen_op_neon_qneg_s16(); break;
-                            case 2: gen_op_neon_qneg_s32(); break;
+                            case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
+                            case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
+                            case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
                             default: return 1;
                             }
                             break;
                         case 16: case 19: /* VCGT #0, VCLE #0 */
                             gen_op_movl_T1_im(0);
                             switch(size) {
-                            case 0: gen_op_neon_cgt_s8(); break;
-                            case 1: gen_op_neon_cgt_s16(); break;
-                            case 2: gen_op_neon_cgt_s32(); break;
+                            case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
+                            case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
+                            case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
                             default: return 1;
                             }
                             if (op == 19)
@@ -5143,9 +5142,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         case 17: case 20: /* VCGE #0, VCLT #0 */
                             gen_op_movl_T1_im(0);
                             switch(size) {
-                            case 0: gen_op_neon_cge_s8(); break;
-                            case 1: gen_op_neon_cge_s16(); break;
-                            case 2: gen_op_neon_cge_s32(); break;
+                            case 0: gen_helper_neon_cge_s8(CPU_T001); break;
+                            case 1: gen_helper_neon_cge_s16(CPU_T001); break;
+                            case 2: gen_helper_neon_cge_s32(CPU_T001); break;
                             default: return 1;
                             }
                             if (op == 20)
@@ -5154,44 +5153,41 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         case 18: /* VCEQ #0 */
                             gen_op_movl_T1_im(0);
                             switch(size) {
-                            case 0: gen_op_neon_ceq_u8(); break;
-                            case 1: gen_op_neon_ceq_u16(); break;
-                            case 2: gen_op_neon_ceq_u32(); break;
+                            case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
+                            case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
+                            case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
                             default: return 1;
                             }
                             break;
                         case 22: /* VABS */
                             switch(size) {
-                            case 0: gen_op_neon_abs_s8(); break;
-                            case 1: gen_op_neon_abs_s16(); break;
-                            case 2: gen_op_neon_abs_s32(); break;
+                            case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
+                            case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
+                            case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
                             default: return 1;
                             }
                             break;
                         case 23: /* VNEG */
                             gen_op_movl_T1_im(0);
-                            switch(size) {
-                            case 0: gen_op_neon_rsb_u8(); break;
-                            case 1: gen_op_neon_rsb_u16(); break;
-                            case 2: gen_op_rsbl_T0_T1(); break;
-                            default: return 1;
-                            }
+                            if (size == 3)
+                                return 1;
+                            gen_neon_rsb(size);
                             break;
                         case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
                             gen_op_movl_T1_im(0);
-                            gen_op_neon_cgt_f32();
+                            gen_helper_neon_cgt_f32(CPU_T001);
                             if (op == 27)
                                 gen_op_notl_T0();
                             break;
                         case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
                             gen_op_movl_T1_im(0);
-                            gen_op_neon_cge_f32();
+                            gen_helper_neon_cge_f32(CPU_T001);
                             if (op == 28)
                                 gen_op_notl_T0();
                             break;
                         case 26: /* Float VCEQ #0 */
                             gen_op_movl_T1_im(0);
-                            gen_op_neon_ceq_f32();
+                            gen_helper_neon_ceq_f32(CPU_T001);
                             break;
                         case 30: /* Float VABS */
                             gen_vfp_abs(0);
@@ -5206,8 +5202,8 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         case 33: /* VTRN */
                             NEON_GET_REG(T1, rd, pass);
                             switch (size) {
-                            case 0: gen_op_neon_trn_u8(); break;
-                            case 1: gen_op_neon_trn_u16(); break;
+                            case 0: gen_helper_neon_trn_u8(); break;
+                            case 1: gen_helper_neon_trn_u16(); break;
                             case 2: abort();
                             default: return 1;
                             }
@@ -5281,12 +5277,12 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     NEON_SET_REG(T0, rm, 0);
                 }
                 if (insn & (1 << 16)) {
-                    gen_op_neon_dup_u8(((insn >> 17) & 3) * 8);
+                    gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
                 } else if (insn & (1 << 17)) {
                     if ((insn >> 18) & 1)
-                        gen_op_neon_dup_high16();
+                        gen_neon_dup_high16(cpu_T[0]);
                     else
-                        gen_op_neon_dup_low16();
+                        gen_neon_dup_low16(cpu_T[0]);
                 }
                 for (pass = 0; pass < (q ? 4 : 2); pass++) {
                     NEON_SET_REG(T0, rd, pass);
@@ -8324,6 +8320,8 @@ static inline int gen_intermediate_code_internal(CPUState *env,
     cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
     cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
     cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
+    cpu_V0 = cpu_F0d;
+    cpu_V1 = cpu_F1d;
     next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
     lj = -1;
     /* Reset the conditional execution bits immediately. This avoids