From: Jakub Jelinek Date: Thu, 16 Jun 2022 08:58:58 +0000 (+0200) Subject: expand: Fix up IFN_ATOMIC_{BIT*,*CMP_0} expansion [PR105951] X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6a27c430468cb85454b19cef881a1422580657ff;p=platform%2Fupstream%2Fgcc.git expand: Fix up IFN_ATOMIC_{BIT*,*CMP_0} expansion [PR105951] Both IFN_ATOMIC_BIT_TEST_AND_* and IFN_ATOMIC_*_FETCH_CMP_0 ifns are matched if their corresponding optab is implemented for the particular mode. The fact that those optabs are implemented doesn't guarantee they will succeed though, they can just FAIL in their expansion. The expansion in that case uses expand_atomic_fetch_op as fallback, but as has been reported and and can be reproduced on the testcases, even those can fail and we didn't have any fallback after that. For IFN_ATOMIC_BIT_TEST_AND_* we actually have such calls. One is done whenever we lost lhs of the ifn at some point in between matching it in tree-ssa-ccp.cc and expansion. The following patch for that case just falls through and expands as if there was a lhs, creates a temporary for it. For the other expand_atomic_fetch_op call in the same expander and for the only expand_atomic_fetch_op call in the other, this falls back the hard way, by constructing a CALL_EXPR to the call from which the ifn has been matched and expanding that. Either it is lucky and manages to expand inline, or it emits a libatomic API call. So that we don't have to rediscover which builtin function to call in the fallback, we record at tree-ssa-ccp.cc time gimple_call_fn (call) in an extra argument to the ifn. 2022-06-16 Jakub Jelinek PR middle-end/105951 * tree-ssa-ccp.cc (optimize_atomic_bit_test_and, optimize_atomic_op_fetch_cmp_0): Remember gimple_call_fn (call) as last argument to the internal functions. * builtins.cc (expand_ifn_atomic_bit_test_and): Adjust for the extra call argument to ifns. If expand_atomic_fetch_op fails for the lhs == NULL_TREE case, fall through into the optab code with gen_reg_rtx (mode) as target. If second expand_atomic_fetch_op fails, construct a CALL_EXPR and expand that. (expand_ifn_atomic_op_fetch_cmp_0): Adjust for the extra call argument to ifns. If expand_atomic_fetch_op fails, construct a CALL_EXPR and expand that. * gcc.target/i386/pr105951-1.c: New test. * gcc.target/i386/pr105951-2.c: New test. --- diff --git a/gcc/builtins.cc b/gcc/builtins.cc index b9d89b4..971b18c 100644 --- a/gcc/builtins.cc +++ b/gcc/builtins.cc @@ -6224,7 +6224,7 @@ expand_ifn_atomic_bit_test_and (gcall *call) gcc_assert (flag_inline_atomics); - if (gimple_call_num_args (call) == 4) + if (gimple_call_num_args (call) == 5) model = get_memmodel (gimple_call_arg (call, 3)); rtx mem = get_builtin_sync_mem (ptr, mode); @@ -6250,15 +6250,19 @@ expand_ifn_atomic_bit_test_and (gcall *call) if (lhs == NULL_TREE) { - val = expand_simple_binop (mode, ASHIFT, const1_rtx, - val, NULL_RTX, true, OPTAB_DIRECT); + rtx val2 = expand_simple_binop (mode, ASHIFT, const1_rtx, + val, NULL_RTX, true, OPTAB_DIRECT); if (code == AND) - val = expand_simple_unop (mode, NOT, val, NULL_RTX, true); - expand_atomic_fetch_op (const0_rtx, mem, val, code, model, false); - return; + val2 = expand_simple_unop (mode, NOT, val2, NULL_RTX, true); + if (expand_atomic_fetch_op (const0_rtx, mem, val2, code, model, false)) + return; } - rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); + rtx target; + if (lhs) + target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE); + else + target = gen_reg_rtx (mode); enum insn_code icode = direct_optab_handler (optab, mode); gcc_assert (icode != CODE_FOR_nothing); create_output_operand (&ops[0], target, mode); @@ -6277,6 +6281,22 @@ expand_ifn_atomic_bit_test_and (gcall *call) val = expand_simple_unop (mode, NOT, val, NULL_RTX, true); rtx result = expand_atomic_fetch_op (gen_reg_rtx (mode), mem, val, code, model, false); + if (!result) + { + bool is_atomic = gimple_call_num_args (call) == 5; + tree tcall = gimple_call_arg (call, 3 + is_atomic); + tree fndecl = gimple_call_addr_fndecl (tcall); + tree type = TREE_TYPE (TREE_TYPE (fndecl)); + tree exp = build_call_nary (type, tcall, 2 + is_atomic, ptr, + make_tree (type, val), + is_atomic + ? gimple_call_arg (call, 3) + : integer_zero_node); + result = expand_builtin (exp, gen_reg_rtx (mode), NULL_RTX, + mode, !lhs); + } + if (!lhs) + return; if (integer_onep (flag)) { result = expand_simple_binop (mode, ASHIFTRT, result, bitval, @@ -6308,7 +6328,7 @@ expand_ifn_atomic_op_fetch_cmp_0 (gcall *call) gcc_assert (flag_inline_atomics); - if (gimple_call_num_args (call) == 4) + if (gimple_call_num_args (call) == 5) model = get_memmodel (gimple_call_arg (call, 3)); rtx mem = get_builtin_sync_mem (ptr, mode); @@ -6369,6 +6389,21 @@ expand_ifn_atomic_op_fetch_cmp_0 (gcall *call) rtx result = expand_atomic_fetch_op (gen_reg_rtx (mode), mem, op, code, model, true); + if (!result) + { + bool is_atomic = gimple_call_num_args (call) == 5; + tree tcall = gimple_call_arg (call, 3 + is_atomic); + tree fndecl = gimple_call_addr_fndecl (tcall); + tree type = TREE_TYPE (TREE_TYPE (fndecl)); + tree exp = build_call_nary (type, tcall, + 2 + is_atomic, ptr, arg, + is_atomic + ? gimple_call_arg (call, 3) + : integer_zero_node); + result = expand_builtin (exp, gen_reg_rtx (mode), NULL_RTX, + mode, !lhs); + } + if (lhs) { result = emit_store_flag_force (target, comp, result, const0_rtx, mode, diff --git a/gcc/testsuite/gcc.target/i386/pr105951-1.c b/gcc/testsuite/gcc.target/i386/pr105951-1.c new file mode 100644 index 0000000..ff1c1db --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr105951-1.c @@ -0,0 +1,5 @@ +/* PR middle-end/105951 */ +/* { dg-do compile { target ia32 } } */ +/* { dg-options "-O2 -march=i386" } */ + +#include "pr98737-2.c" diff --git a/gcc/testsuite/gcc.target/i386/pr105951-2.c b/gcc/testsuite/gcc.target/i386/pr105951-2.c new file mode 100644 index 0000000..fed77f7 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr105951-2.c @@ -0,0 +1,5 @@ +/* PR middle-end/105951 */ +/* { dg-do compile { target ia32 } } */ +/* { dg-options "-O2 -march=i386" } */ + +#include "pr98737-4.c" diff --git a/gcc/tree-ssa-ccp.cc b/gcc/tree-ssa-ccp.cc index 81c9767..58e0fac 100644 --- a/gcc/tree-ssa-ccp.cc +++ b/gcc/tree-ssa-ccp.cc @@ -3789,11 +3789,12 @@ optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip, tree new_lhs = make_ssa_name (TREE_TYPE (lhs)); tree flag = build_int_cst (TREE_TYPE (lhs), use_bool); if (has_model_arg) - g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0), - bit, flag, gimple_call_arg (call, 2)); + g = gimple_build_call_internal (fn, 5, gimple_call_arg (call, 0), + bit, flag, gimple_call_arg (call, 2), + gimple_call_fn (call)); else - g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0), - bit, flag); + g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0), + bit, flag, gimple_call_fn (call)); gimple_call_set_lhs (g, new_lhs); gimple_set_location (g, gimple_location (call)); gimple_move_vops (g, call); @@ -4003,14 +4004,16 @@ optimize_atomic_op_fetch_cmp_0 (gimple_stmt_iterator *gsip, gimple *g; tree flag = build_int_cst (TREE_TYPE (lhs), encoded); if (has_model_arg) - g = gimple_build_call_internal (fn, 4, flag, + g = gimple_build_call_internal (fn, 5, flag, gimple_call_arg (call, 0), gimple_call_arg (call, 1), - gimple_call_arg (call, 2)); + gimple_call_arg (call, 2), + gimple_call_fn (call)); else - g = gimple_build_call_internal (fn, 3, flag, + g = gimple_build_call_internal (fn, 4, flag, gimple_call_arg (call, 0), - gimple_call_arg (call, 1)); + gimple_call_arg (call, 1), + gimple_call_fn (call)); gimple_call_set_lhs (g, new_lhs); gimple_set_location (g, gimple_location (call)); gimple_move_vops (g, call);