target-arm: Fix shift by immediate and narrow where src, dest overlap
authorPeter Maydell <peter.maydell@linaro.org>
Mon, 21 Feb 2011 11:05:22 +0000 (11:05 +0000)
committerAurelien Jarno <aurelien@aurel32.net>
Mon, 21 Feb 2011 14:39:03 +0000 (15:39 +0100)
For Neon shifts by immediate and narrow, correctly handle the case
where the source registers and the destination registers overlap
(the second pass should use the original register contents, not the
results of the first pass).

This includes a refactoring to pull the size check outside the
loop rather than inside, since there is now very little common
code between the size == 3 and size != 3 case.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
target-arm/translate.c

index fa20e8443f6d90c4b00dfccca2a26eedd7850062..dbd958be783587645dbffa65b40d9b5381b104f0 100644 (file)
@@ -4804,64 +4804,68 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
 
                 shift = shift - (1 << (size + 3));
                 size++;
-                switch (size) {
-                case 1:
-                    imm = (uint16_t)shift;
-                    imm |= imm << 16;
-                    tmp2 = tcg_const_i32(imm);
-                    TCGV_UNUSED_I64(tmp64);
-                    break;
-                case 2:
-                    imm = (uint32_t)shift;
-                    tmp2 = tcg_const_i32(imm);
-                    TCGV_UNUSED_I64(tmp64);
-                    break;
-                case 3:
+                if (size == 3) {
                     tmp64 = tcg_const_i64(shift);
-                    TCGV_UNUSED(tmp2);
-                    break;
-                default:
-                    abort();
-                }
-
-                for (pass = 0; pass < 2; pass++) {
-                    if (size == 3) {
-                        neon_load_reg64(cpu_V0, rm + pass);
+                    neon_load_reg64(cpu_V0, rm);
+                    neon_load_reg64(cpu_V1, rm + 1);
+                    for (pass = 0; pass < 2; pass++) {
+                        TCGv_i64 in;
+                        if (pass == 0) {
+                            in = cpu_V0;
+                        } else {
+                            in = cpu_V1;
+                        }
                         if (q) {
                             if (input_unsigned) {
-                                gen_helper_neon_rshl_u64(cpu_V0, cpu_V0,
-                                                         tmp64);
+                                gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
                             } else {
-                                gen_helper_neon_rshl_s64(cpu_V0, cpu_V0,
-                                                         tmp64);
+                                gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
                             }
                         } else {
                             if (input_unsigned) {
-                                gen_helper_neon_shl_u64(cpu_V0, cpu_V0,
-                                                        tmp64);
+                                gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
                             } else {
-                                gen_helper_neon_shl_s64(cpu_V0, cpu_V0,
-                                                        tmp64);
+                                gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
                             }
                         }
+                        tmp = new_tmp();
+                        gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
+                        neon_store_reg(rd, pass, tmp);
+                    } /* for pass */
+                    tcg_temp_free_i64(tmp64);
+                } else {
+                    if (size == 1) {
+                        imm = (uint16_t)shift;
+                        imm |= imm << 16;
                     } else {
-                        tmp = neon_load_reg(rm + pass, 0);
+                        /* size == 2 */
+                        imm = (uint32_t)shift;
+                    }
+                    tmp2 = tcg_const_i32(imm);
+                    tmp4 = neon_load_reg(rm + 1, 0);
+                    tmp5 = neon_load_reg(rm + 1, 1);
+                    for (pass = 0; pass < 2; pass++) {
+                        if (pass == 0) {
+                            tmp = neon_load_reg(rm, 0);
+                        } else {
+                            tmp = tmp4;
+                        }
                         gen_neon_shift_narrow(size, tmp, tmp2, q,
                                               input_unsigned);
-                        tmp3 = neon_load_reg(rm + pass, 1);
+                        if (pass == 0) {
+                            tmp3 = neon_load_reg(rm, 1);
+                        } else {
+                            tmp3 = tmp5;
+                        }
                         gen_neon_shift_narrow(size, tmp3, tmp2, q,
                                               input_unsigned);
                         tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
                         dead_tmp(tmp);
                         dead_tmp(tmp3);
-                    }
-                    tmp = new_tmp();
-                    gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
-                    neon_store_reg(rd, pass, tmp);
-                } /* for pass */
-                if (size == 3) {
-                    tcg_temp_free_i64(tmp64);
-                } else {
+                        tmp = new_tmp();
+                        gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
+                        neon_store_reg(rd, pass, tmp);
+                    } /* for pass */
                     tcg_temp_free_i32(tmp2);
                 }
             } else if (op == 10) {