void
ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
- rtx old_dst, rtx new_dst)
+ rtx old_dst, rtx new_dst, enum memmodel model)
{
enum machine_mode mode = GET_MODE (mem);
rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
if (!old_dst)
old_dst = gen_reg_rtx (mode);
- emit_insn (gen_memory_barrier ());
+ switch (model)
+ {
+ case MEMMODEL_ACQ_REL:
+ case MEMMODEL_SEQ_CST:
+ emit_insn (gen_memory_barrier ());
+ /* FALLTHRU */
+ case MEMMODEL_RELAXED:
+ case MEMMODEL_ACQUIRE:
+ case MEMMODEL_CONSUME:
+ if (mode == SImode)
+ icode = CODE_FOR_fetchadd_acq_si;
+ else
+ icode = CODE_FOR_fetchadd_acq_di;
+ break;
+ case MEMMODEL_RELEASE:
+ if (mode == SImode)
+ icode = CODE_FOR_fetchadd_rel_si;
+ else
+ icode = CODE_FOR_fetchadd_rel_di;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
- if (mode == SImode)
- icode = CODE_FOR_fetchadd_acq_si;
- else
- icode = CODE_FOR_fetchadd_acq_di;
emit_insn (GEN_FCN (icode) (old_dst, mem, val));
if (new_dst)
}
/* Because of the volatile mem read, we get an ld.acq, which is the
- front half of the full barrier. The end half is the cmpxchg.rel. */
- gcc_assert (MEM_VOLATILE_P (mem));
+ front half of the full barrier. The end half is the cmpxchg.rel.
+ For relaxed and release memory models, we don't need this. But we
+ also don't bother trying to prevent it either. */
+ gcc_assert (model == MEMMODEL_RELAXED
+ || model == MEMMODEL_RELEASE
+ || MEM_VOLATILE_P (mem));
old_reg = gen_reg_rtx (DImode);
cmp_reg = gen_reg_rtx (DImode);
if (new_dst)
emit_move_insn (new_dst, new_reg);
- switch (mode)
+ switch (model)
{
- case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
- case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
- case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
- case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
+ case MEMMODEL_RELAXED:
+ case MEMMODEL_ACQUIRE:
+ case MEMMODEL_CONSUME:
+ switch (mode)
+ {
+ case QImode: icode = CODE_FOR_cmpxchg_acq_qi; break;
+ case HImode: icode = CODE_FOR_cmpxchg_acq_hi; break;
+ case SImode: icode = CODE_FOR_cmpxchg_acq_si; break;
+ case DImode: icode = CODE_FOR_cmpxchg_acq_di; break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case MEMMODEL_RELEASE:
+ case MEMMODEL_ACQ_REL:
+ case MEMMODEL_SEQ_CST:
+ switch (mode)
+ {
+ case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
+ case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
+ case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
+ case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
default:
gcc_unreachable ();
}
case UNSPEC_PIC_CALL:
case UNSPEC_MF:
case UNSPEC_FETCHADD_ACQ:
+ case UNSPEC_FETCHADD_REL:
case UNSPEC_BSP_VALUE:
case UNSPEC_FLUSHRS:
case UNSPEC_BUNDLE_SELECTOR:
break;
case UNSPEC_CMPXCHG_ACQ:
+ case UNSPEC_CMPXCHG_REL:
need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
break;
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
+;; Conversion to C++11 memory model based on
+;; http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+
(define_mode_iterator IMODE [QI HI SI DI])
(define_mode_iterator I124MODE [QI HI SI])
(define_mode_iterator I48MODE [SI DI])
(define_code_attr fetchop_name
[(plus "add") (minus "sub") (ior "ior") (xor "xor") (and "and")])
+(define_expand "mem_thread_fence"
+ [(match_operand:SI 0 "const_int_operand" "")] ;; model
+ ""
+{
+ if (INTVAL (operands[0]) == MEMMODEL_SEQ_CST)
+ emit_insn (gen_memory_barrier ());
+ DONE;
+})
+
(define_expand "memory_barrier"
[(set (match_dup 0)
(unspec:BLK [(match_dup 0)] UNSPEC_MF))]
"mf"
[(set_attr "itanium_class" "syst_m")])
-(define_insn "fetchadd_acq_<mode>"
- [(set (match_operand:I48MODE 0 "gr_register_operand" "=r")
- (match_operand:I48MODE 1 "not_postinc_memory_operand" "+S"))
- (set (match_dup 1)
- (unspec:I48MODE [(match_dup 1)
- (match_operand:I48MODE 2 "fetchadd_operand" "n")]
- UNSPEC_FETCHADD_ACQ))]
- ""
- "fetchadd<modesuffix>.acq %0 = %1, %2"
- [(set_attr "itanium_class" "sem")])
-
-(define_expand "sync_<fetchop_name><mode>"
- [(set (match_operand:IMODE 0 "memory_operand" "")
- (FETCHOP:IMODE (match_dup 0)
- (match_operand:IMODE 1 "general_operand" "")))]
- ""
-{
- ia64_expand_atomic_op (<CODE>, operands[0], operands[1], NULL, NULL);
- DONE;
-})
-
-(define_expand "sync_nand<mode>"
- [(set (match_operand:IMODE 0 "memory_operand" "")
- (not:IMODE
- (and:IMODE (match_dup 0)
- (match_operand:IMODE 1 "general_operand" ""))))]
+(define_expand "atomic_load<mode>"
+ [(match_operand:IMODE 0 "gr_register_operand" "") ;; output
+ (match_operand:IMODE 1 "memory_operand" "") ;; memory
+ (match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
- ia64_expand_atomic_op (NOT, operands[0], operands[1], NULL, NULL);
- DONE;
-})
+ enum memmodel model = (enum memmodel) INTVAL (operands[2]);
-(define_expand "sync_old_<fetchop_name><mode>"
- [(set (match_operand:IMODE 0 "gr_register_operand" "")
- (FETCHOP:IMODE
- (match_operand:IMODE 1 "memory_operand" "")
- (match_operand:IMODE 2 "general_operand" "")))]
- ""
-{
- ia64_expand_atomic_op (<CODE>, operands[1], operands[2], operands[0], NULL);
+ /* Unless the memory model is relaxed, we want to emit ld.acq, which
+ will happen automatically for volatile memories. */
+ gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[1]));
+ emit_move_insn (operands[0], operands[1]);
DONE;
})
-(define_expand "sync_old_nand<mode>"
- [(set (match_operand:IMODE 0 "gr_register_operand" "")
- (not:IMODE
- (and:IMODE (match_operand:IMODE 1 "memory_operand" "")
- (match_operand:IMODE 2 "general_operand" ""))))]
+(define_expand "atomic_store<mode>"
+ [(match_operand:IMODE 0 "memory_operand" "") ;; memory
+ (match_operand:IMODE 1 "gr_reg_or_0_operand" "") ;; input
+ (match_operand:SI 2 "const_int_operand" "")] ;; model
""
{
- ia64_expand_atomic_op (NOT, operands[1], operands[2], operands[0], NULL);
- DONE;
-})
+ enum memmodel model = (enum memmodel) INTVAL (operands[2]);
-(define_expand "sync_new_<fetchop_name><mode>"
- [(set (match_operand:IMODE 0 "gr_register_operand" "")
- (FETCHOP:IMODE
- (match_operand:IMODE 1 "memory_operand" "")
- (match_operand:IMODE 2 "general_operand" "")))]
- ""
-{
- ia64_expand_atomic_op (<CODE>, operands[1], operands[2], NULL, operands[0]);
- DONE;
-})
+ /* Unless the memory model is relaxed, we want to emit st.rel, which
+ will happen automatically for volatile memories. */
+ gcc_assert (model == MEMMODEL_RELAXED || MEM_VOLATILE_P (operands[0]));
+ emit_move_insn (operands[0], operands[1]);
-(define_expand "sync_new_nand<mode>"
- [(set (match_operand:IMODE 0 "gr_register_operand" "")
- (not:IMODE
- (and:IMODE (match_operand:IMODE 1 "memory_operand" "")
- (match_operand:IMODE 2 "general_operand" ""))))]
- ""
-{
- ia64_expand_atomic_op (NOT, operands[1], operands[2], NULL, operands[0]);
+ /* Sequentially consistent stores need a subsequent MF. See
+ http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001952.html
+ for a discussion of why a MF is needed here, but not for atomic_load. */
+ if (model == MEMMODEL_SEQ_CST)
+ emit_insn (gen_memory_barrier ());
DONE;
})
-(define_expand "sync_compare_and_swap<mode>"
- [(match_operand:IMODE 0 "gr_register_operand" "")
- (match_operand:IMODE 1 "memory_operand" "")
- (match_operand:IMODE 2 "gr_register_operand" "")
- (match_operand:IMODE 3 "gr_register_operand" "")]
+(define_expand "atomic_compare_and_swap<mode>"
+ [(match_operand:DI 0 "gr_register_operand" "") ;; bool out
+ (match_operand:IMODE 1 "gr_register_operand" "") ;; val out
+ (match_operand:IMODE 2 "not_postinc_memory_operand" "") ;; memory
+ (match_operand:IMODE 3 "gr_register_operand" "") ;; expected
+ (match_operand:IMODE 4 "gr_reg_or_0_operand" "") ;; desired
+ (match_operand:SI 5 "const_int_operand" "") ;; is_weak
+ (match_operand:SI 6 "const_int_operand" "") ;; succ model
+ (match_operand:SI 7 "const_int_operand" "")] ;; fail model
""
{
+ enum memmodel model = (enum memmodel) INTVAL (operands[6]);
rtx ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
- rtx dst;
+ rtx dval, eval;
+
+ eval = gen_reg_rtx (DImode);
+ convert_move (eval, operands[3], 1);
+ emit_move_insn (ccv, eval);
- convert_move (ccv, operands[2], 1);
+ if (<MODE>mode == DImode)
+ dval = operands[1];
+ else
+ dval = gen_reg_rtx (DImode);
- dst = operands[0];
- if (GET_MODE (dst) != DImode)
- dst = gen_reg_rtx (DImode);
+ switch (model)
+ {
+ case MEMMODEL_RELAXED:
+ case MEMMODEL_ACQUIRE:
+ case MEMMODEL_CONSUME:
+ emit_insn (gen_cmpxchg_acq_<mode> (dval, operands[2], ccv, operands[4]));
+ break;
+ case MEMMODEL_RELEASE:
+ emit_insn (gen_cmpxchg_rel_<mode> (dval, operands[2], ccv, operands[4]));
+ break;
+ case MEMMODEL_ACQ_REL:
+ case MEMMODEL_SEQ_CST:
+ emit_insn (gen_cmpxchg_rel_<mode> (dval, operands[2], ccv, operands[4]));
+ emit_insn (gen_memory_barrier ());
+ break;
+ default:
+ gcc_unreachable ();
+ }
- emit_insn (gen_cmpxchg_rel_<mode> (dst, operands[1], ccv, operands[3]));
- emit_insn (gen_memory_barrier ());
+ if (<MODE>mode != DImode)
+ emit_move_insn (operands[1], gen_lowpart (<MODE>mode, dval));
- if (dst != operands[0])
- emit_move_insn (operands[0], gen_lowpart (<MODE>mode, dst));
+ emit_insn (gen_cstoredi4 (operands[0], gen_rtx_EQ (DImode, dval, eval),
+ dval, eval));
DONE;
})
-(define_insn "cmpxchg_rel_<mode>"
+(define_insn "cmpxchg_acq_<mode>"
[(set (match_operand:DI 0 "gr_register_operand" "=r")
(zero_extend:DI
(match_operand:I124MODE 1 "not_postinc_memory_operand" "+S")))
(match_operand:I124MODE 3 "gr_reg_or_0_operand" "rO")]
UNSPEC_CMPXCHG_ACQ))]
""
+ "cmpxchg<modesuffix>.acq %0 = %1, %r3, %2"
+ [(set_attr "itanium_class" "sem")])
+
+(define_insn "cmpxchg_rel_<mode>"
+ [(set (match_operand:DI 0 "gr_register_operand" "=r")
+ (zero_extend:DI
+ (match_operand:I124MODE 1 "not_postinc_memory_operand" "+S")))
+ (set (match_dup 1)
+ (unspec:I124MODE
+ [(match_dup 1)
+ (match_operand:DI 2 "ar_ccv_reg_operand" "")
+ (match_operand:I124MODE 3 "gr_reg_or_0_operand" "rO")]
+ UNSPEC_CMPXCHG_REL))]
+ ""
"cmpxchg<modesuffix>.rel %0 = %1, %r3, %2"
[(set_attr "itanium_class" "sem")])
-(define_insn "cmpxchg_rel_di"
+(define_insn "cmpxchg_acq_di"
[(set (match_operand:DI 0 "gr_register_operand" "=r")
(match_operand:DI 1 "not_postinc_memory_operand" "+S"))
(set (match_dup 1)
(match_operand:DI 3 "gr_reg_or_0_operand" "rO")]
UNSPEC_CMPXCHG_ACQ))]
""
+ "cmpxchg8.acq %0 = %1, %r3, %2"
+ [(set_attr "itanium_class" "sem")])
+
+(define_insn "cmpxchg_rel_di"
+ [(set (match_operand:DI 0 "gr_register_operand" "=r")
+ (match_operand:DI 1 "not_postinc_memory_operand" "+S"))
+ (set (match_dup 1)
+ (unspec:DI [(match_dup 1)
+ (match_operand:DI 2 "ar_ccv_reg_operand" "")
+ (match_operand:DI 3 "gr_reg_or_0_operand" "rO")]
+ UNSPEC_CMPXCHG_REL))]
+ ""
"cmpxchg8.rel %0 = %1, %r3, %2"
[(set_attr "itanium_class" "sem")])
-(define_insn "sync_lock_test_and_set<mode>"
+(define_expand "atomic_exchange<mode>"
+ [(match_operand:IMODE 0 "gr_register_operand" "") ;; output
+ (match_operand:IMODE 1 "not_postinc_memory_operand" "") ;; memory
+ (match_operand:IMODE 2 "gr_reg_or_0_operand" "") ;; input
+ (match_operand:SI 3 "const_int_operand" "")] ;; succ model
+ ""
+{
+ enum memmodel model = (enum memmodel) INTVAL (operands[3]);
+
+ switch (model)
+ {
+ case MEMMODEL_RELAXED:
+ case MEMMODEL_ACQUIRE:
+ case MEMMODEL_CONSUME:
+ break;
+ case MEMMODEL_RELEASE:
+ case MEMMODEL_ACQ_REL:
+ case MEMMODEL_SEQ_CST:
+ emit_insn (gen_memory_barrier ());
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ emit_insn (gen_xchg_acq_<mode> (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; Note that XCHG is always memory model acquire.
+(define_insn "xchg_acq_<mode>"
[(set (match_operand:IMODE 0 "gr_register_operand" "=r")
(match_operand:IMODE 1 "not_postinc_memory_operand" "+S"))
(set (match_dup 1)
"xchg<modesuffix> %0 = %1, %r2"
[(set_attr "itanium_class" "sem")])
-(define_expand "sync_lock_release<mode>"
+(define_expand "atomic_<fetchop_name><mode>"
[(set (match_operand:IMODE 0 "memory_operand" "")
- (match_operand:IMODE 1 "gr_reg_or_0_operand" ""))]
+ (FETCHOP:IMODE (match_dup 0)
+ (match_operand:IMODE 1 "nonmemory_operand" "")))
+ (use (match_operand:SI 2 "const_int_operand" ""))]
+ ""
+{
+ ia64_expand_atomic_op (<CODE>, operands[0], operands[1], NULL, NULL,
+ (enum memmodel) INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "atomic_nand<mode>"
+ [(set (match_operand:IMODE 0 "memory_operand" "")
+ (not:IMODE
+ (and:IMODE (match_dup 0)
+ (match_operand:IMODE 1 "nonmemory_operand" ""))))
+ (use (match_operand:SI 2 "const_int_operand" ""))]
+ ""
+{
+ ia64_expand_atomic_op (NOT, operands[0], operands[1], NULL, NULL,
+ (enum memmodel) INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "atomic_fetch_<fetchop_name><mode>"
+ [(set (match_operand:IMODE 0 "gr_register_operand" "")
+ (FETCHOP:IMODE
+ (match_operand:IMODE 1 "memory_operand" "")
+ (match_operand:IMODE 2 "nonmemory_operand" "")))
+ (use (match_operand:SI 3 "const_int_operand" ""))]
""
{
- gcc_assert (MEM_VOLATILE_P (operands[0]));
+ ia64_expand_atomic_op (<CODE>, operands[1], operands[2], operands[0], NULL,
+ (enum memmodel) INTVAL (operands[3]));
+ DONE;
+})
+
+(define_expand "atomic_fetch_nand<mode>"
+ [(set (match_operand:IMODE 0 "gr_register_operand" "")
+ (not:IMODE
+ (and:IMODE (match_operand:IMODE 1 "memory_operand" "")
+ (match_operand:IMODE 2 "nonmemory_operand" ""))))
+ (use (match_operand:SI 3 "const_int_operand" ""))]
+ ""
+{
+ ia64_expand_atomic_op (NOT, operands[1], operands[2], operands[0], NULL,
+ (enum memmodel) INTVAL (operands[3]));
+ DONE;
})
+
+(define_expand "atomic_<fetchop_name>_fetch<mode>"
+ [(set (match_operand:IMODE 0 "gr_register_operand" "")
+ (FETCHOP:IMODE
+ (match_operand:IMODE 1 "memory_operand" "")
+ (match_operand:IMODE 2 "nonmemory_operand" "")))
+ (use (match_operand:SI 3 "const_int_operand" ""))]
+ ""
+{
+ ia64_expand_atomic_op (<CODE>, operands[1], operands[2], NULL, operands[0],
+ (enum memmodel) INTVAL (operands[3]));
+ DONE;
+})
+
+(define_expand "atomic_nand_fetch<mode>"
+ [(set (match_operand:IMODE 0 "gr_register_operand" "")
+ (not:IMODE
+ (and:IMODE (match_operand:IMODE 1 "memory_operand" "")
+ (match_operand:IMODE 2 "nonmemory_operand" ""))))
+ (use (match_operand:SI 3 "const_int_operand" ""))]
+ ""
+{
+ ia64_expand_atomic_op (NOT, operands[1], operands[2], NULL, operands[0],
+ (enum memmodel) INTVAL (operands[3]));
+ DONE;
+})
+
+(define_insn "fetchadd_acq_<mode>"
+ [(set (match_operand:I48MODE 0 "gr_register_operand" "=r")
+ (match_operand:I48MODE 1 "not_postinc_memory_operand" "+S"))
+ (set (match_dup 1)
+ (unspec:I48MODE [(match_dup 1)
+ (match_operand:I48MODE 2 "fetchadd_operand" "n")]
+ UNSPEC_FETCHADD_ACQ))]
+ ""
+ "fetchadd<modesuffix>.acq %0 = %1, %2"
+ [(set_attr "itanium_class" "sem")])
+
+(define_insn "fetchadd_rel_<mode>"
+ [(set (match_operand:I48MODE 0 "gr_register_operand" "=r")
+ (match_operand:I48MODE 1 "not_postinc_memory_operand" "+S"))
+ (set (match_dup 1)
+ (unspec:I48MODE [(match_dup 1)
+ (match_operand:I48MODE 2 "fetchadd_operand" "n")]
+ UNSPEC_FETCHADD_REL))]
+ ""
+ "fetchadd<modesuffix>.rel %0 = %1, %2"
+ [(set_attr "itanium_class" "sem")])