From: Aurelien Jarno Date: Wed, 30 Sep 2009 21:09:35 +0000 (+0200) Subject: tcg: add ext{8,16,32}u_i{32,64} TCG ops X-Git-Tag: TizenStudio_2.0_p2.3.2~208^2~10398 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=cfc86988a830d89ed22433af83711847d7859b15;p=sdk%2Femulator%2Fqemu.git tcg: add ext{8,16,32}u_i{32,64} TCG ops Currently zero extensions ops are implemented by a and op with a constant. This is then catched in some backend, and replaced by a zero extension instruction. While this works well on RISC machines, this adds a useless register move on non-RISC machines. Example on x86: ext16u_i32 r1, r2 is translated into mov %eax,%ebx movzwl %bx, %ebx while the optimized version should be: movzwl %ax, %ebx This patch adds ext{8,16,32}u_i{32,64} TCG ops that can be implemented in the backends to avoid emitting useless register moves. Signed-off-by: Aurelien Jarno --- diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 7cb6934..faf2e8b 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -1189,16 +1189,22 @@ static inline void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg) #endif } -/* These are currently just for convenience. - We assume a target will recognise these automatically . */ static inline void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg) { +#ifdef TCG_TARGET_HAS_ext8u_i32 + tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg); +#else tcg_gen_andi_i32(ret, arg, 0xffu); +#endif } static inline void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg) { +#ifdef TCG_TARGET_HAS_ext16u_i32 + tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg); +#else tcg_gen_andi_i32(ret, arg, 0xffffu); +#endif } /* Note: we assume the two high bytes are set to zero */ @@ -1358,17 +1364,29 @@ static inline void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg) static inline void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg) { +#ifdef TCG_TARGET_HAS_ext8u_i64 + tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg); +#else tcg_gen_andi_i64(ret, arg, 0xffu); +#endif } static inline void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg) { +#ifdef TCG_TARGET_HAS_ext16u_i64 + tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg); +#else tcg_gen_andi_i64(ret, arg, 0xffffu); +#endif } static inline void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg) { +#ifdef TCG_TARGET_HAS_ext32u_i64 + tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg); +#else tcg_gen_andi_i64(ret, arg, 0xffffffffu); +#endif } /* Note: we assume the target supports move between 32 and 64 bit @@ -1382,7 +1400,7 @@ static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg) registers */ static inline void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg) { - tcg_gen_andi_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg)), 0xffffffffu); + tcg_gen_ext32u_i64(ret, MAKE_TCGV_I64(GET_TCGV_I32(arg))); } /* Note: we assume the target supports move between 32 and 64 bit diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h index 3a095fc..b7f3fd7 100644 --- a/tcg/tcg-opc.h +++ b/tcg/tcg-opc.h @@ -89,6 +89,12 @@ DEF2(ext8s_i32, 1, 1, 0, 0) #ifdef TCG_TARGET_HAS_ext16s_i32 DEF2(ext16s_i32, 1, 1, 0, 0) #endif +#ifdef TCG_TARGET_HAS_ext8u_i32 +DEF2(ext8u_i32, 1, 1, 0, 0) +#endif +#ifdef TCG_TARGET_HAS_ext16u_i32 +DEF2(ext16u_i32, 1, 1, 0, 0) +#endif #ifdef TCG_TARGET_HAS_bswap16_i32 DEF2(bswap16_i32, 1, 1, 0, 0) #endif @@ -152,6 +158,15 @@ DEF2(ext16s_i64, 1, 1, 0, 0) #ifdef TCG_TARGET_HAS_ext32s_i64 DEF2(ext32s_i64, 1, 1, 0, 0) #endif +#ifdef TCG_TARGET_HAS_ext8u_i64 +DEF2(ext8u_i64, 1, 1, 0, 0) +#endif +#ifdef TCG_TARGET_HAS_ext16u_i64 +DEF2(ext16u_i64, 1, 1, 0, 0) +#endif +#ifdef TCG_TARGET_HAS_ext32u_i64 +DEF2(ext32u_i64, 1, 1, 0, 0) +#endif #ifdef TCG_TARGET_HAS_bswap16_i64 DEF2(bswap16_i64, 1, 1, 0, 0) #endif