;;- Machine description for HP PA-RISC architecture for GCC compiler
;; Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-;; 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+;; 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010
+;; Free Software Foundation, Inc.
;; Contributed by the Center for Software Science at the University
;; of Utah.
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 2, or (at your option)
+;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;; GCC is distributed in the hope that it will be useful,
;; GNU General Public License for more details.
;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING. If not, write to
-;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
-;; Boston, MA 02110-1301, USA.
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
;; This gcc Version 2 machine description is inspired by sparc.md and
;; mips.md.
(UNSPEC_TLSLDBASE 7)
(UNSPEC_TLSIE 8)
(UNSPEC_TLSLE 9)
+ (UNSPEC_TLSGD_PIC 10)
+ (UNSPEC_TLSLDM_PIC 11)
+ (UNSPEC_TLSIE_PIC 12)
])
;; UNSPEC_VOLATILE:
(UNSPECV_LONGJMP 5) ; builtin_longjmp
])
+;; Maximum pc-relative branch offsets.
+
+;; These numbers are a bit smaller than the maximum allowable offsets
+;; so that a few instructions may be inserted before the actual branch.
+
+(define_constants
+ [(MAX_12BIT_OFFSET 8184) ; 12-bit branch
+ (MAX_17BIT_OFFSET 262100) ; 17-bit branch
+ ])
+
+;; Mode and code iterators
+
+;; This mode iterator allows :P to be used for patterns that operate on
+;; pointer-sized quantities. Exactly one of the two alternatives will match.
+(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
+
+;; This attribute defines the condition prefix for word and double word
+;; add, compare, subtract and logical instructions.
+(define_mode_attr dwc [(SI "") (DI "*")])
+
;; Insn type. Used to default other attribute values.
;; type "unary" insns have one input operand (1) and one output operand (0)
;; type "binary" insns have two input operands (1,2) and one output (0)
(define_attr "type"
- "move,unary,binary,shift,nullshift,compare,load,store,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,fpload,fpstore,fpalu,fpcc,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,multi,milli,parallel_branch"
+ "move,unary,binary,shift,nullshift,compare,load,store,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,fpload,fpstore,fpalu,fpcc,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,multi,milli,parallel_branch,fpstore_load,store_fpload"
(const_string "binary"))
(define_attr "pa_combine_type"
(const_int 8) (const_int 4))
(eq_attr "type" "binary,shift,nullshift")
- (if_then_else (match_operand 2 "arith_operand" "")
+ (if_then_else (match_operand 2 "arith14_operand" "")
(const_int 4) (const_int 12))
(eq_attr "type" "move,unary,shift,nullshift")
- (if_then_else (match_operand 1 "arith_operand" "")
+ (if_then_else (match_operand 1 "arith14_operand" "")
(const_int 4) (const_int 8))]
(const_int 4)))
(eq_attr "length" "4"))
(const_string "true")
(eq_attr "type" "uncond_branch")
- (if_then_else (ne (symbol_ref "TARGET_JUMP_IN_DELAY")
- (const_int 0))
+ (if_then_else (match_test "TARGET_JUMP_IN_DELAY")
(const_string "true")
(const_string "false"))]
(const_string "false")))
(define_delay (eq_attr "type" "btable_branch,branch,parallel_branch")
[(eq_attr "in_branch_delay" "true") (nil) (nil)])
-;; Floating point conditional branch delay slot description and
+;; Floating point conditional branch delay slot description.
(define_delay (eq_attr "type" "fbranch")
[(eq_attr "in_branch_delay" "true")
(eq_attr "in_nullified_branch_delay" "true")
(attr_flag "backward"))])
(define_delay (and (eq_attr "type" "uncond_branch")
- (eq (symbol_ref "following_call (insn)")
- (const_int 0)))
+ (not (match_test "pa_following_call (insn)")))
[(eq_attr "in_branch_delay" "true") (nil) (nil)])
;; Memory. Disregarding Cache misses, the Mustang memory times are:
(eq_attr "cpu" "700"))
"mem_700*3")
-(define_insn_reservation "W11" 1
- (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,load,fpload,store,fpstore")
+(define_insn_reservation "W11" 5
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "700"))
+ "mem_700*5")
+
+(define_insn_reservation "W12" 6
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "700"))
+ "mem_700*6")
+
+(define_insn_reservation "W13" 1
+ (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpdivdbl,fpsqrtsgl,fpsqrtdbl,load,fpload,store,fpstore,fpstore_load,store_fpload")
(eq_attr "cpu" "700"))
"dummy_700")
;; We have a bypass for all computations in the FP unit which feed an
;; FP store as long as the sizes are the same.
-(define_bypass 2 "W1,W2" "W10" "hppa_fpstore_bypass_p")
-(define_bypass 9 "W3" "W10" "hppa_fpstore_bypass_p")
-(define_bypass 11 "W4" "W10" "hppa_fpstore_bypass_p")
-(define_bypass 13 "W5" "W10" "hppa_fpstore_bypass_p")
-(define_bypass 17 "W6" "W10" "hppa_fpstore_bypass_p")
+(define_bypass 2 "W1,W2" "W10,W11" "pa_fpstore_bypass_p")
+(define_bypass 9 "W3" "W10,W11" "pa_fpstore_bypass_p")
+(define_bypass 11 "W4" "W10,W11" "pa_fpstore_bypass_p")
+(define_bypass 13 "W5" "W10,W11" "pa_fpstore_bypass_p")
+(define_bypass 17 "W6" "W10,W11" "pa_fpstore_bypass_p")
;; We have an "anti-bypass" for FP loads which feed an FP store.
-(define_bypass 4 "W8" "W10" "hppa_fpstore_bypass_p")
+(define_bypass 4 "W8,W12" "W10,W11" "pa_fpstore_bypass_p")
;; Function units for the 7100 and 7150. The 7100/7150 can dual-issue
;; floating point computations with non-floating point computations (fp loads
(eq_attr "cpu" "7100"))
"i_7100+mem_7100,mem_7100")
-(define_insn_reservation "X7" 1
- (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpsqrtsgl,fpdivdbl,fpsqrtdbl,load,fpload,store,fpstore")
+(define_insn_reservation "X7" 4
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "7100"))
+ "i_7100+mem_7100,mem_7100*3")
+
+(define_insn_reservation "X8" 4
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "7100"))
+ "i_7100+mem_7100,mem_7100*3")
+
+(define_insn_reservation "X9" 1
+ (and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpsqrtsgl,fpdivdbl,fpsqrtdbl,load,fpload,store,fpstore,fpstore_load,store_fpload")
(eq_attr "cpu" "7100"))
"i_7100")
;; We have a bypass for all computations in the FP unit which feed an
;; FP store as long as the sizes are the same.
-(define_bypass 1 "X0" "X6" "hppa_fpstore_bypass_p")
-(define_bypass 7 "X1" "X6" "hppa_fpstore_bypass_p")
-(define_bypass 14 "X2" "X6" "hppa_fpstore_bypass_p")
+(define_bypass 1 "X0" "X6,X7" "pa_fpstore_bypass_p")
+(define_bypass 7 "X1" "X6,X7" "pa_fpstore_bypass_p")
+(define_bypass 14 "X2" "X6,X7" "pa_fpstore_bypass_p")
;; We have an "anti-bypass" for FP loads which feed an FP store.
-(define_bypass 3 "X4" "X6" "hppa_fpstore_bypass_p")
+(define_bypass 3 "X4,X8" "X6,X7" "pa_fpstore_bypass_p")
;; The 7100LC has three floating-point units: ALU, MUL, and DIV.
;; There's no value in modeling the ALU and MUL separately though
(eq_attr "cpu" "7100LC"))
"i1_7100lc+mem_7100lc,mem_7100lc")
-(define_insn_reservation "Y6" 1
+(define_insn_reservation "Y6" 4
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "7100LC"))
+ "i1_7100lc+mem_7100lc,mem_7100lc*3")
+
+(define_insn_reservation "Y7" 4
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "7100LC"))
+ "i1_7100lc+mem_7100lc,mem_7100lc*3")
+
+(define_insn_reservation "Y8" 1
(and (eq_attr "type" "shift,nullshift")
(eq_attr "cpu" "7100LC,7200,7300"))
"i1_7100lc")
-(define_insn_reservation "Y7" 1
+(define_insn_reservation "Y9" 1
(and (eq_attr "type" "!fpcc,fpalu,fpmulsgl,fpmuldbl,fpdivsgl,fpsqrtsgl,fpdivdbl,fpsqrtdbl,load,fpload,store,fpstore,shift,nullshift")
(eq_attr "cpu" "7100LC,7200,7300"))
"(i0_7100lc|i1_7100lc)")
;; The 7200 has a store-load penalty
-(define_insn_reservation "Y8" 2
+(define_insn_reservation "Y10" 2
(and (eq_attr "type" "store")
(eq_attr "cpu" "7200"))
"i1_7100lc,mem_7100lc")
-(define_insn_reservation "Y9" 2
+(define_insn_reservation "Y11" 2
(and (eq_attr "type" "fpstore")
(eq_attr "cpu" "7200"))
"i1_7100lc,mem_7100lc")
+(define_insn_reservation "Y12" 4
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "7200"))
+ "i1_7100lc,mem_7100lc,i1_7100lc+mem_7100lc")
+
+(define_insn_reservation "Y13" 4
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "7200"))
+ "i1_7100lc,mem_7100lc,i1_7100lc+mem_7100lc")
+
;; The 7300 has no penalty for store-store or store-load
-(define_insn_reservation "Y10" 2
+(define_insn_reservation "Y14" 2
(and (eq_attr "type" "store")
(eq_attr "cpu" "7300"))
"i1_7100lc")
-(define_insn_reservation "Y11" 2
+(define_insn_reservation "Y15" 2
(and (eq_attr "type" "fpstore")
(eq_attr "cpu" "7300"))
"i1_7100lc")
+(define_insn_reservation "Y16" 4
+ (and (eq_attr "type" "fpstore_load")
+ (eq_attr "cpu" "7300"))
+ "i1_7100lc,i1_7100lc+mem_7100lc")
+
+(define_insn_reservation "Y17" 4
+ (and (eq_attr "type" "store_fpload")
+ (eq_attr "cpu" "7300"))
+ "i1_7100lc,i1_7100lc+mem_7100lc")
+
;; We have an "anti-bypass" for FP loads which feed an FP store.
-(define_bypass 3 "Y3" "Y5,Y9,Y11" "hppa_fpstore_bypass_p")
+(define_bypass 3 "Y3,Y7,Y13,Y17" "Y5,Y6,Y11,Y12,Y15,Y16" "pa_fpstore_bypass_p")
;; Scheduling for the PA8000 is somewhat different than scheduling for a
;; traditional architecture.
(eq_attr "cpu" "8000"))
"im_8000,rm_8000+store_8000")
+(define_insn_reservation "Z2" 0
+ (and (eq_attr "type" "fpstore_load,store_fpload")
+ (eq_attr "cpu" "8000"))
+ "im_8000,rm_8000+store_8000,im_8000,rm_8000")
+
;; We can issue and retire two non-memory operations per cycle with
;; a few exceptions (branches). This group catches those we want
;; to assume have zero latency.
-(define_insn_reservation "Z2" 0
+(define_insn_reservation "Z3" 0
(and
- (eq_attr "type" "!load,fpload,store,fpstore,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch,fpcc,fpalu,fpmulsgl,fpmuldbl,fpsqrtsgl,fpsqrtdbl,fpdivsgl,fpdivdbl")
+ (eq_attr "type" "!load,fpload,store,fpstore,uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch,fpcc,fpalu,fpmulsgl,fpmuldbl,fpsqrtsgl,fpsqrtdbl,fpdivsgl,fpdivdbl,fpstore_load,store_fpload")
(eq_attr "cpu" "8000"))
"inm_8000,rnm_8000")
;; Branches use both slots in the non-memory issue and
;; retirement unit.
-(define_insn_reservation "Z3" 0
+(define_insn_reservation "Z4" 0
(and
(eq_attr "type" "uncond_branch,btable_branch,branch,cbranch,fbranch,call,dyncall,multi,milli,parallel_branch")
(eq_attr "cpu" "8000"))
;; They can issue/retire two at a time in the non-memory
;; units. We fix their latency at 2 cycles and they
;; are fully pipelined.
-(define_insn_reservation "Z4" 1
+(define_insn_reservation "Z5" 1
(and
(eq_attr "type" "fpcc,fpalu,fpmulsgl,fpmuldbl")
(eq_attr "cpu" "8000"))
;; The fdivsqrt units are not pipelined and have a very long latency.
;; To keep the DFA from exploding, we do not show all the
;; reservations for the divsqrt unit.
-(define_insn_reservation "Z5" 17
+(define_insn_reservation "Z6" 17
(and
(eq_attr "type" "fpdivsgl,fpsqrtsgl")
(eq_attr "cpu" "8000"))
"inm_8000,fdivsqrt_8000*6,rnm_8000")
-(define_insn_reservation "Z6" 31
+(define_insn_reservation "Z7" 31
(and
(eq_attr "type" "fpdivdbl,fpsqrtdbl")
(eq_attr "cpu" "8000"))
"inm_8000,fdivsqrt_8000*6,rnm_8000")
+;; Operand and operator predicates and constraints
+
(include "predicates.md")
+(include "constraints.md")
\f
;; Compare instructions.
;; This controls RTL generation and register allocation.
-;; We generate RTL for comparisons and branches by having the cmpxx
-;; patterns store away the operands. Then, the scc and bcc patterns
-;; emit RTL for both the compare and the branch.
-;;
-
-(define_expand "cmpdi"
- [(set (reg:CC 0)
- (compare:CC (match_operand:DI 0 "reg_or_0_operand" "")
- (match_operand:DI 1 "register_operand" "")))]
- "TARGET_64BIT"
-
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_SI;
- DONE;
-}")
-
-(define_expand "cmpsi"
- [(set (reg:CC 0)
- (compare:CC (match_operand:SI 0 "reg_or_0_operand" "")
- (match_operand:SI 1 "arith5_operand" "")))]
- ""
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_SI;
- DONE;
-}")
-
-(define_expand "cmpsf"
- [(set (reg:CCFP 0)
- (compare:CCFP (match_operand:SF 0 "reg_or_0_operand" "")
- (match_operand:SF 1 "reg_or_0_operand" "")))]
- "! TARGET_SOFT_FLOAT"
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_SF;
- DONE;
-}")
-
-(define_expand "cmpdf"
- [(set (reg:CCFP 0)
- (compare:CCFP (match_operand:DF 0 "reg_or_0_operand" "")
- (match_operand:DF 1 "reg_or_0_operand" "")))]
- "! TARGET_SOFT_FLOAT"
- "
-{
- hppa_compare_op0 = operands[0];
- hppa_compare_op1 = operands[1];
- hppa_branch_type = CMP_DF;
- DONE;
-}")
-
(define_insn ""
[(set (reg:CCFP 0)
(match_operator:CCFP 2 "comparison_operator"
;; scc insns.
-(define_expand "seq"
- [(set (match_operand:SI 0 "register_operand" "")
- (eq:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- /* set up operands from compare. */
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
- /* fall through and generate default code */
-}")
-
-(define_expand "sne"
- [(set (match_operand:SI 0 "register_operand" "")
- (ne:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "slt"
- [(set (match_operand:SI 0 "register_operand" "")
- (lt:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sgt"
- [(set (match_operand:SI 0 "register_operand" "")
- (gt:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sle"
- [(set (match_operand:SI 0 "register_operand" "")
- (le:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sge"
- [(set (match_operand:SI 0 "register_operand" "")
- (ge:SI (match_dup 1)
- (match_dup 2)))]
+(define_expand "cstoresi4"
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(match_operand:SI 2 "reg_or_0_operand" "")
+ (match_operand:SI 3 "arith5_operand" "")]))]
"!TARGET_64BIT"
- "
-{
- /* fp scc patterns rarely match, and are not a win on the PA. */
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sltu"
- [(set (match_operand:SI 0 "register_operand" "")
- (ltu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sgtu"
- [(set (match_operand:SI 0 "register_operand" "")
- (gtu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sleu"
- [(set (match_operand:SI 0 "register_operand" "")
- (leu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "sgeu"
- [(set (match_operand:SI 0 "register_operand" "")
- (geu:SI (match_dup 1)
- (match_dup 2)))]
- "!TARGET_64BIT"
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
+ "")
;; Instruction canonicalization puts immediate operands second, which
;; is the reverse of what we want.
(define_expand "movsicc"
[(set (match_operand:SI 0 "register_operand" "")
(if_then_else:SI
- (match_operator 1 "comparison_operator"
- [(match_dup 4)
- (match_dup 5)])
+ (match_operand 1 "comparison_operator" "")
(match_operand:SI 2 "reg_or_cint_move_operand" "")
(match_operand:SI 3 "reg_or_cint_move_operand" "")))]
""
"
{
- enum rtx_code code = GET_CODE (operands[1]);
-
- if (hppa_branch_type != CMP_SI)
- FAIL;
-
- if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
- || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
+ if (GET_MODE (XEXP (operands[1], 0)) != SImode
+ || GET_MODE (XEXP (operands[1], 0)) != GET_MODE (XEXP (operands[1], 1)))
FAIL;
-
- /* operands[1] is currently the result of compare_from_rtx. We want to
- emit a compare of the original operands. */
- operands[1] = gen_rtx_fmt_ee (code, SImode, hppa_compare_op0, hppa_compare_op1);
- operands[4] = hppa_compare_op0;
- operands[5] = hppa_compare_op1;
}")
;; We used to accept any register for op1.
(define_expand "movdicc"
[(set (match_operand:DI 0 "register_operand" "")
(if_then_else:DI
- (match_operator 1 "comparison_operator"
- [(match_dup 4)
- (match_dup 5)])
+ (match_operand 1 "comparison_operator" "")
(match_operand:DI 2 "reg_or_cint_move_operand" "")
(match_operand:DI 3 "reg_or_cint_move_operand" "")))]
"TARGET_64BIT"
"
{
- enum rtx_code code = GET_CODE (operands[1]);
-
- if (hppa_branch_type != CMP_SI)
+ if (GET_MODE (XEXP (operands[1], 0)) != DImode
+ || GET_MODE (XEXP (operands[1], 0)) != GET_MODE (XEXP (operands[1], 1)))
FAIL;
-
- if (GET_MODE (hppa_compare_op0) != GET_MODE (hppa_compare_op1)
- || GET_MODE (hppa_compare_op0) != GET_MODE (operands[0]))
- FAIL;
-
- /* operands[1] is currently the result of compare_from_rtx. We want to
- emit a compare of the original operands. */
- operands[1] = gen_rtx_fmt_ee (code, DImode, hppa_compare_op0, hppa_compare_op1);
- operands[4] = hppa_compare_op0;
- operands[5] = hppa_compare_op1;
}")
; We need the first constraint alternative in order to avoid
;; Conditional Branches
-(define_expand "beq"
- [(set (pc)
- (if_then_else (eq (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (EQ, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- /* set up operands from compare. */
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
- /* fall through and generate default code */
-}")
-
-(define_expand "bne"
- [(set (pc)
- (if_then_else (ne (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (NE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bgt"
- [(set (pc)
- (if_then_else (gt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (GT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "blt"
+(define_expand "cbranchdi4"
[(set (pc)
- (if_then_else (lt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "register_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (LT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bge"
- [(set (pc)
- (if_then_else (ge (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (GE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "ble"
- [(set (pc)
- (if_then_else (le (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- {
- emit_insn (gen_cmp_fp (LE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
- }
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bgtu"
- [(set (pc)
- (if_then_else (gtu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bltu"
- [(set (pc)
- (if_then_else (ltu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bgeu"
- [(set (pc)
- (if_then_else (geu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bleu"
- [(set (pc)
- (if_then_else (leu (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type != CMP_SI)
- FAIL;
- operands[1] = hppa_compare_op0;
- operands[2] = hppa_compare_op1;
-}")
-
-(define_expand "bltgt"
- [(set (pc)
- (if_then_else (ltgt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (LTGT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-
-(define_expand "bunle"
- [(set (pc)
- (if_then_else (unle (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNLE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-
-(define_expand "bunlt"
- [(set (pc)
- (if_then_else (unlt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNLT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-
-(define_expand "bunge"
- [(set (pc)
- (if_then_else (unge (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNGE, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
+ "TARGET_64BIT"
+ "")
-(define_expand "bungt"
+(define_expand "cbranchsi4"
[(set (pc)
- (if_then_else (ungt (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "ordered_comparison_operator"
+ [(match_operand:SI 1 "reg_or_0_operand" "")
+ (match_operand:SI 2 "arith5_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNGT, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
+ "")
-(define_expand "buneq"
+(define_expand "cbranchsf4"
[(set (pc)
- (if_then_else (uneq (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:SF 1 "reg_or_0_operand" "")
+ (match_operand:SF 2 "reg_or_0_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
""
"
{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNEQ, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
+ pa_emit_bcond_fp (operands);
DONE;
}")
-(define_expand "bunordered"
- [(set (pc)
- (if_then_else (unordered (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
- (pc)))]
- ""
- "
-{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (UNORDERED, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
- DONE;
-}")
-(define_expand "bordered"
+(define_expand "cbranchdf4"
[(set (pc)
- (if_then_else (ordered (match_dup 1) (match_dup 2))
- (label_ref (match_operand 0 "" ""))
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:DF 1 "reg_or_0_operand" "")
+ (match_operand:DF 2 "reg_or_0_operand" "")])
+ (label_ref (match_operand 3 "" ""))
(pc)))]
""
"
{
- if (hppa_branch_type == CMP_SI)
- FAIL;
- emit_insn (gen_cmp_fp (ORDERED, hppa_compare_op0, hppa_compare_op1));
- emit_bcond_fp (NE, operands[0]);
+ pa_emit_bcond_fp (operands);
DONE;
}")
""
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn);
+ return pa_output_cbranch (operands, 0, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
- (eq (symbol_ref "flag_pic") (const_int 0))
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
(const_int 20)]
(const_int 28)))])
""
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn);
+ return pa_output_cbranch (operands, 1, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
- (eq (symbol_ref "flag_pic") (const_int 0))
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
(const_int 20)]
(const_int 28)))])
"TARGET_64BIT"
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn);
+ return pa_output_cbranch (operands, 0, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
- (eq (symbol_ref "flag_pic") (const_int 0))
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
(const_int 20)]
(const_int 28)))])
"TARGET_64BIT"
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn);
+ return pa_output_cbranch (operands, 1, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
- (eq (symbol_ref "flag_pic") (const_int 0))
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
(const_int 20)]
(const_int 28)))])
(define_insn ""
"TARGET_64BIT"
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn);
+ return pa_output_cbranch (operands, 0, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
- (eq (symbol_ref "flag_pic") (const_int 0))
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
(const_int 20)]
(const_int 28)))])
"TARGET_64BIT"
"*
{
- return output_cbranch (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn);
+ return pa_output_cbranch (operands, 1, insn);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
(cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 8184))
+ (const_int MAX_12BIT_OFFSET))
(const_int 4)
(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
+ (const_int MAX_17BIT_OFFSET))
(const_int 8)
- (eq (symbol_ref "flag_pic") (const_int 0))
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
(const_int 20)]
(const_int 28)))])
""
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 0);
+ return pa_output_bb (operands, 0, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 0);
+ return pa_output_bb (operands, 0, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 0);
+ return pa_output_bb (operands, 1, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 0);
+ return pa_output_bb (operands, 1, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 1);
+ return pa_output_bb (operands, 0, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 1);
+ return pa_output_bb (operands, 0, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 1);
+ return pa_output_bb (operands, 1, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 1);
+ return pa_output_bb (operands, 1, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
;; Branch on Variable Bit patterns.
(define_insn ""
""
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 0);
+ return pa_output_bvb (operands, 0, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 0);
+ return pa_output_bvb (operands, 0, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 0);
+ return pa_output_bvb (operands, 1, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 0);
+ return pa_output_bvb (operands, 1, insn, 0);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 1);
+ return pa_output_bvb (operands, 0, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 0, insn, 1);
+ return pa_output_bvb (operands, 0, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
""
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 1);
+ return pa_output_bvb (operands, 1, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc)
"TARGET_64BIT"
"*
{
- return output_bvb (operands, INSN_ANNULLED_BRANCH_P (insn),
- get_attr_length (insn), 1, insn, 1);
+ return pa_output_bvb (operands, 1, insn, 1);
}"
[(set_attr "type" "cbranch")
(set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
;; Floating point branches
+
+;; ??? Nullification is handled differently from other branches.
+;; If nullification is specified, the delay slot is nullified on any
+;; taken branch regardless of branch direction.
(define_insn ""
[(set (pc) (if_then_else (ne (reg:CCFP 0) (const_int 0))
(label_ref (match_operand 0 "" ""))
(pc)))]
- "! TARGET_SOFT_FLOAT"
+ "!TARGET_SOFT_FLOAT"
"*
{
- if (INSN_ANNULLED_BRANCH_P (insn))
- return \"ftest\;b,n %0\";
+ int length = get_attr_length (insn);
+ rtx xoperands[1];
+ int nullify, xdelay;
+
+ if (length < 16)
+ return \"ftest\;b%* %l0\";
+
+ if (dbr_sequence_length () == 0 || INSN_ANNULLED_BRANCH_P (insn))
+ {
+ nullify = 1;
+ xdelay = 0;
+ xoperands[0] = GEN_INT (length - 8);
+ }
+ else
+ {
+ nullify = 0;
+ xdelay = 1;
+ xoperands[0] = GEN_INT (length - 4);
+ }
+
+ if (nullify)
+ output_asm_insn (\"ftest\;add,tr %%r0,%%r0,%%r0\;b,n .+%0\", xoperands);
else
- return \"ftest\;b%* %0\";
+ output_asm_insn (\"ftest\;add,tr %%r0,%%r0,%%r0\;b .+%0\", xoperands);
+ return pa_output_lbranch (operands[0], insn, xdelay);
}"
- [(set_attr "type" "fbranch")
- (set_attr "length" "8")])
+[(set_attr "type" "fbranch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 32)
+ (not (match_test "flag_pic"))
+ (const_int 28)]
+ (const_int 36)))])
(define_insn ""
[(set (pc) (if_then_else (ne (reg:CCFP 0) (const_int 0))
(pc)
(label_ref (match_operand 0 "" ""))))]
- "! TARGET_SOFT_FLOAT"
+ "!TARGET_SOFT_FLOAT"
"*
{
- if (INSN_ANNULLED_BRANCH_P (insn))
- return \"ftest\;add,tr %%r0,%%r0,%%r0\;b,n %0\";
- else
+ int length = get_attr_length (insn);
+ rtx xoperands[1];
+ int nullify, xdelay;
+
+ if (length < 16)
return \"ftest\;add,tr %%r0,%%r0,%%r0\;b%* %0\";
+
+ if (dbr_sequence_length () == 0 || INSN_ANNULLED_BRANCH_P (insn))
+ {
+ nullify = 1;
+ xdelay = 0;
+ xoperands[0] = GEN_INT (length - 4);
+ }
+ else
+ {
+ nullify = 0;
+ xdelay = 1;
+ xoperands[0] = GEN_INT (length);
+ }
+
+ if (nullify)
+ output_asm_insn (\"ftest\;b,n .+%0\", xoperands);
+ else
+ output_asm_insn (\"ftest\;b .+%0\", xoperands);
+ return pa_output_lbranch (operands[0], insn, xdelay);
}"
- [(set_attr "type" "fbranch")
- (set_attr "length" "12")])
+[(set_attr "type" "fbranch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 12)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 28)
+ (not (match_test "flag_pic"))
+ (const_int 24)]
+ (const_int 32)))])
;; Move instructions
""
"
{
- if (emit_move_sequence (operands, SImode, 0))
+ if (pa_emit_move_sequence (operands, SImode, 0))
DONE;
}")
""
"
{
- if (emit_move_sequence (operands, SImode, operands[2]))
+ if (pa_emit_move_sequence (operands, SImode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
""
"
{
- if (emit_move_sequence (operands, SImode, operands[2]))
+ if (pa_emit_move_sequence (operands, SImode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
""
"
{
- if (emit_move_sequence (operands, SImode, operands[2]))
+ if (pa_emit_move_sequence (operands, SImode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
(define_insn ""
[(set (match_operand:SI 0 "move_dest_operand"
- "=r,r,r,r,r,r,Q,!*q,!r,!*f,*f,T,!r,!f")
+ "=r,r,r,r,r,r,Q,!*q,!r,!*f,*f,T,?r,?*f")
(match_operand:SI 1 "move_src_operand"
- "A,r,J,N,K,RQ,rM,!rM,!*q,!*fM,RT,*f,!f,!r"))]
+ "A,r,J,N,K,RQ,rM,!rM,!*q,!*fM,RT,*f,*f,r"))]
"(register_operand (operands[0], SImode)
|| reg_or_0_operand (operands[1], SImode))
&& !TARGET_SOFT_FLOAT
fstw%F0 %1,%0
{fstws|fstw} %1,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0
{stws|stw} %1,-16(%%sp)\n\t{fldws|fldw} -16(%%sp),%0"
- [(set_attr "type" "load,move,move,move,shift,load,store,move,move,fpalu,fpload,fpstore,move,move")
+ [(set_attr "type" "load,move,move,move,shift,load,store,move,move,fpalu,fpload,fpstore,fpstore_load,store_fpload")
(set_attr "pa_combine_type" "addmove")
(set_attr "length" "4,4,4,4,4,4,4,4,4,4,4,4,8,8")])
}"
[(set_attr "type" "binary")
(set (attr "length")
- (if_then_else (eq (symbol_ref "TARGET_LONG_LOAD_STORE") (const_int 0))
+ (if_then_else (not (match_test "TARGET_LONG_LOAD_STORE"))
(const_int 4)
(const_int 8)))])
[(set (match_operand:SI 0 "register_operand" "=r")
(high:SI (match_operand 1 "" "")))]
"(!flag_pic || !symbolic_operand (operands[1], Pmode))
- && !is_function_label_plus_const (operands[1])"
+ && !pa_is_function_label_plus_const (operands[1])"
"*
{
if (symbolic_operand (operands[1], Pmode))
[(set (match_operand:SI 0 "register_operand" "=r")
(lo_sum:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "immediate_operand" "i")))]
- "!is_function_label_plus_const (operands[2])"
+ "!pa_is_function_label_plus_const (operands[2])"
"*
{
gcc_assert (!flag_pic || !symbolic_operand (operands[2], Pmode));
""
"
{
- if (emit_move_sequence (operands, HImode, 0))
+ if (pa_emit_move_sequence (operands, HImode, 0))
DONE;
}")
-(define_insn ""
- [(set (match_operand:HI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!r,!*f,!r,!f")
- (match_operand:HI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*q,!*fM,!f,!r"))]
- "(register_operand (operands[0], HImode)
- || reg_or_0_operand (operands[1], HImode))
- && !TARGET_SOFT_FLOAT
- && !TARGET_64BIT"
- "@
- copy %1,%0
- ldi %1,%0
- ldil L'%1,%0
- {zdepi|depwi,z} %Z1,%0
- ldh%M1 %1,%0
- sth%M0 %r1,%0
- mtsar %r1
- {mfctl|mfctl,w} %sar,%0
- fcpy,sgl %f1,%0
- {fstws|fstw} %1,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0
- {stws|stw} %1,-16(%%sp)\n\t{fldws|fldw} -16(%%sp),%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,move,move,move,move")
- (set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4,8,8")])
+;; Handle HImode input reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_inhi"
+ [(set (match_operand:HI 0 "register_operand" "=Z")
+ (match_operand:HI 1 "non_hard_reg_operand" ""))
+ (clobber (match_operand:HI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (pa_emit_move_sequence (operands, HImode, operands[2]))
+ DONE;
-(define_insn ""
- [(set (match_operand:HI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!r,!*f")
- (match_operand:HI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*q,!*fM"))]
- "(register_operand (operands[0], HImode)
- || reg_or_0_operand (operands[1], HImode))
- && !TARGET_SOFT_FLOAT
- && TARGET_64BIT"
- "@
- copy %1,%0
- ldi %1,%0
- ldil L'%1,%0
- {zdepi|depwi,z} %Z1,%0
- ldh%M1 %1,%0
- sth%M0 %r1,%0
- mtsar %r1
- {mfctl|mfctl,w} %sar,%0
- fcpy,sgl %f1,%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,move,move")
- (set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4")])
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
+;; Handle HImode output reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_outhi"
+ [(set (match_operand:HI 0 "non_hard_reg_operand" "")
+ (match_operand:HI 1 "register_operand" "Z"))
+ (clobber (match_operand:HI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (pa_emit_move_sequence (operands, HImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
(define_insn ""
[(set (match_operand:HI 0 "move_dest_operand"
(match_operand:HI 1 "move_src_operand"
"r,J,N,K,RQ,rM,!rM,!*q"))]
"(register_operand (operands[0], HImode)
- || reg_or_0_operand (operands[1], HImode))
- && TARGET_SOFT_FLOAT"
+ || reg_or_0_operand (operands[1], HImode))"
"@
copy %1,%0
ldi %1,%0
""
"
{
- if (emit_move_sequence (operands, QImode, 0))
+ if (pa_emit_move_sequence (operands, QImode, 0))
DONE;
}")
-(define_insn ""
- [(set (match_operand:QI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!r,!*f,!r,!f")
- (match_operand:QI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*q,!*fM,!f,!r"))]
- "(register_operand (operands[0], QImode)
- || reg_or_0_operand (operands[1], QImode))
- && !TARGET_SOFT_FLOAT
- && !TARGET_64BIT"
- "@
- copy %1,%0
- ldi %1,%0
- ldil L'%1,%0
- {zdepi|depwi,z} %Z1,%0
- ldb%M1 %1,%0
- stb%M0 %r1,%0
- mtsar %r1
- {mfctl|mfctl,w} %%sar,%0
- fcpy,sgl %f1,%0
- {fstws|fstw} %1,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0
- {stws|stw} %1,-16(%%sp)\n\t{fldws|fldw} -16(%%sp),%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,move,move,move,move")
- (set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4,8,8")])
+;; Handle QImode input reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_inqi"
+ [(set (match_operand:QI 0 "register_operand" "=Z")
+ (match_operand:QI 1 "non_hard_reg_operand" ""))
+ (clobber (match_operand:QI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (pa_emit_move_sequence (operands, QImode, operands[2]))
+ DONE;
-(define_insn ""
- [(set (match_operand:QI 0 "move_dest_operand"
- "=r,r,r,r,r,Q,!*q,!r,!*f")
- (match_operand:QI 1 "move_src_operand"
- "r,J,N,K,RQ,rM,!rM,!*q,!*fM"))]
- "(register_operand (operands[0], QImode)
- || reg_or_0_operand (operands[1], QImode))
- && !TARGET_SOFT_FLOAT
- && TARGET_64BIT"
- "@
- copy %1,%0
- ldi %1,%0
- ldil L'%1,%0
- {zdepi|depwi,z} %Z1,%0
- ldb%M1 %1,%0
- stb%M0 %r1,%0
- mtsar %r1
- {mfctl|mfctl,w} %%sar,%0
- fcpy,sgl %f1,%0"
- [(set_attr "type" "move,move,move,shift,load,store,move,move,move")
- (set_attr "pa_combine_type" "addmove")
- (set_attr "length" "4,4,4,4,4,4,4,4,4")])
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
+
+;; Handle QImode output reloads requiring a general register as a
+;; scratch register.
+(define_expand "reload_outqi"
+ [(set (match_operand:QI 0 "non_hard_reg_operand" "")
+ (match_operand:QI 1 "register_operand" "Z"))
+ (clobber (match_operand:QI 2 "register_operand" "=&r"))]
+ ""
+ "
+{
+ if (pa_emit_move_sequence (operands, QImode, operands[2]))
+ DONE;
+
+ /* We don't want the clobber emitted, so handle this ourselves. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ DONE;
+}")
(define_insn ""
[(set (match_operand:QI 0 "move_dest_operand"
(match_operand:QI 1 "move_src_operand"
"r,J,N,K,RQ,rM,!rM,!*q"))]
"(register_operand (operands[0], QImode)
- || reg_or_0_operand (operands[1], QImode))
- && TARGET_SOFT_FLOAT"
+ || reg_or_0_operand (operands[1], QImode))"
"@
copy %1,%0
ldi %1,%0
(clobber (match_dup 6))
(clobber (match_dup 7))
(clobber (match_dup 8))
- (use (match_operand:SI 2 "arith_operand" ""))
+ (use (match_operand:SI 2 "arith14_operand" ""))
(use (match_operand:SI 3 "const_int_operand" ""))])]
"!TARGET_64BIT && optimize > 0"
"
size = INTVAL (operands[2]);
align = INTVAL (operands[3]);
- align = align > 4 ? 4 : align;
+ align = align > 4 ? 4 : (align ? align : 1);
/* If size/alignment is large, then use the library routines. */
if (size / align > 16)
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block move pattern. */
}")
;; The operand constraints are written like this to support both compile-time
-;; and run-time determined byte counts. The expander and output_block_move
+;; and run-time determined byte counts. The expander and pa_output_block_move
;; only support compile-time determined counts at this time.
;;
;; If the count is run-time determined, the register with the byte count
(clobber (match_operand:SI 6 "register_operand" "=&r,&r")) ;item tmp2
(clobber (match_operand:SI 7 "register_operand" "=&r,&r")) ;item tmp3
(clobber (match_operand:SI 8 "register_operand" "=&r,&r")) ;item tmp4
- (use (match_operand:SI 4 "arith_operand" "J,2")) ;byte count
+ (use (match_operand:SI 4 "arith14_operand" "J,2")) ;byte count
(use (match_operand:SI 5 "const_int_operand" "n,n"))] ;alignment
"!TARGET_64BIT"
"#"
(clobber (match_operand:SI 6 "register_operand" ""))
(clobber (match_operand:SI 7 "register_operand" ""))
(clobber (match_operand:SI 8 "register_operand" ""))
- (use (match_operand:SI 4 "arith_operand" ""))
+ (use (match_operand:SI 4 "arith14_operand" ""))
(use (match_operand:SI 5 "const_int_operand" ""))])]
"!TARGET_64BIT && reload_completed && !flag_peephole2
&& GET_CODE (operands[0]) == MEM
(clobber (match_operand:SI 6 "register_operand" ""))
(clobber (match_operand:SI 7 "register_operand" ""))
(clobber (match_operand:SI 8 "register_operand" ""))
- (use (match_operand:SI 4 "arith_operand" ""))
+ (use (match_operand:SI 4 "arith14_operand" ""))
(use (match_operand:SI 5 "const_int_operand" ""))])]
"!TARGET_64BIT
&& GET_CODE (operands[0]) == MEM
(clobber (match_operand:SI 6 "register_operand" "=&r,&r")) ;item tmp2
(clobber (match_dup 0))
(clobber (match_dup 1))
- (use (match_operand:SI 4 "arith_operand" "J,2")) ;byte count
+ (use (match_operand:SI 4 "arith14_operand" "J,2")) ;byte count
(use (match_operand:SI 5 "const_int_operand" "n,n")) ;alignment
(const_int 0)]
"!TARGET_64BIT && reload_completed"
- "* return output_block_move (operands, !which_alternative);"
+ "* return pa_output_block_move (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
(define_expand "movmemdi"
(clobber (match_dup 6))
(clobber (match_dup 7))
(clobber (match_dup 8))
- (use (match_operand:DI 2 "arith_operand" ""))
+ (use (match_operand:DI 2 "arith14_operand" ""))
(use (match_operand:DI 3 "const_int_operand" ""))])]
"TARGET_64BIT && optimize > 0"
"
size = INTVAL (operands[2]);
align = INTVAL (operands[3]);
- align = align > 8 ? 8 : align;
+ align = align > 8 ? 8 : (align ? align : 1);
/* If size/alignment is large, then use the library routines. */
if (size / align > 16)
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block move pattern. */
}")
;; The operand constraints are written like this to support both compile-time
-;; and run-time determined byte counts. The expander and output_block_move
+;; and run-time determined byte counts. The expander and pa_output_block_move
;; only support compile-time determined counts at this time.
;;
;; If the count is run-time determined, the register with the byte count
(clobber (match_operand:DI 6 "register_operand" "=&r,&r")) ;item tmp2
(clobber (match_operand:DI 7 "register_operand" "=&r,&r")) ;item tmp3
(clobber (match_operand:DI 8 "register_operand" "=&r,&r")) ;item tmp4
- (use (match_operand:DI 4 "arith_operand" "J,2")) ;byte count
+ (use (match_operand:DI 4 "arith14_operand" "J,2")) ;byte count
(use (match_operand:DI 5 "const_int_operand" "n,n"))] ;alignment
"TARGET_64BIT"
"#"
(clobber (match_operand:DI 6 "register_operand" ""))
(clobber (match_operand:DI 7 "register_operand" ""))
(clobber (match_operand:DI 8 "register_operand" ""))
- (use (match_operand:DI 4 "arith_operand" ""))
+ (use (match_operand:DI 4 "arith14_operand" ""))
(use (match_operand:DI 5 "const_int_operand" ""))])]
"TARGET_64BIT && reload_completed && !flag_peephole2
&& GET_CODE (operands[0]) == MEM
(clobber (match_operand:DI 6 "register_operand" ""))
(clobber (match_operand:DI 7 "register_operand" ""))
(clobber (match_operand:DI 8 "register_operand" ""))
- (use (match_operand:DI 4 "arith_operand" ""))
+ (use (match_operand:DI 4 "arith14_operand" ""))
(use (match_operand:DI 5 "const_int_operand" ""))])]
"TARGET_64BIT
&& GET_CODE (operands[0]) == MEM
(clobber (match_operand:DI 6 "register_operand" "=&r,&r")) ;item tmp2
(clobber (match_dup 0))
(clobber (match_dup 1))
- (use (match_operand:DI 4 "arith_operand" "J,2")) ;byte count
+ (use (match_operand:DI 4 "arith14_operand" "J,2")) ;byte count
(use (match_operand:DI 5 "const_int_operand" "n,n")) ;alignment
(const_int 0)]
"TARGET_64BIT && reload_completed"
- "* return output_block_move (operands, !which_alternative);"
+ "* return pa_output_block_move (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
(define_expand "setmemsi"
(match_operand 2 "const_int_operand" ""))
(clobber (match_dup 4))
(clobber (match_dup 5))
- (use (match_operand:SI 1 "arith_operand" ""))
+ (use (match_operand:SI 1 "arith14_operand" ""))
(use (match_operand:SI 3 "const_int_operand" ""))])]
"!TARGET_64BIT && optimize > 0"
"
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block clear pattern. */
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
(clobber (match_operand:SI 4 "register_operand" "=&r,&r")) ;tmp1
- (use (match_operand:SI 2 "arith_operand" "J,1")) ;byte count
+ (use (match_operand:SI 2 "arith14_operand" "J,1")) ;byte count
(use (match_operand:SI 3 "const_int_operand" "n,n"))] ;alignment
"!TARGET_64BIT"
"#"
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" ""))
(clobber (match_operand:SI 4 "register_operand" ""))
- (use (match_operand:SI 2 "arith_operand" ""))
+ (use (match_operand:SI 2 "arith14_operand" ""))
(use (match_operand:SI 3 "const_int_operand" ""))])]
"!TARGET_64BIT && reload_completed && !flag_peephole2
&& GET_CODE (operands[0]) == MEM
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" ""))
(clobber (match_operand:SI 4 "register_operand" ""))
- (use (match_operand:SI 2 "arith_operand" ""))
+ (use (match_operand:SI 2 "arith14_operand" ""))
(use (match_operand:SI 3 "const_int_operand" ""))])]
"!TARGET_64BIT
&& GET_CODE (operands[0]) == MEM
(const_int 0))
(clobber (match_operand:SI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
(clobber (match_dup 0))
- (use (match_operand:SI 2 "arith_operand" "J,1")) ;byte count
+ (use (match_operand:SI 2 "arith14_operand" "J,1")) ;byte count
(use (match_operand:SI 3 "const_int_operand" "n,n")) ;alignment
(const_int 0)]
"!TARGET_64BIT && reload_completed"
- "* return output_block_clear (operands, !which_alternative);"
+ "* return pa_output_block_clear (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
(define_expand "setmemdi"
(match_operand 2 "const_int_operand" ""))
(clobber (match_dup 4))
(clobber (match_dup 5))
- (use (match_operand:DI 1 "arith_operand" ""))
+ (use (match_operand:DI 1 "arith14_operand" ""))
(use (match_operand:DI 3 "const_int_operand" ""))])]
"TARGET_64BIT && optimize > 0"
"
FAIL;
/* This does happen, but not often enough to worry much about. */
- if (size / align < MOVE_RATIO)
+ if (size / align < MOVE_RATIO (optimize_insn_for_speed_p ()))
FAIL;
/* Fall through means we're going to use our block clear pattern. */
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
(clobber (match_operand:DI 4 "register_operand" "=&r,&r")) ;item tmp1
- (use (match_operand:DI 2 "arith_operand" "J,1")) ;byte count
+ (use (match_operand:DI 2 "arith14_operand" "J,1")) ;byte count
(use (match_operand:DI 3 "const_int_operand" "n,n"))] ;alignment
"TARGET_64BIT"
"#"
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" ""))
(clobber (match_operand:DI 4 "register_operand" ""))
- (use (match_operand:DI 2 "arith_operand" ""))
+ (use (match_operand:DI 2 "arith14_operand" ""))
(use (match_operand:DI 3 "const_int_operand" ""))])]
"TARGET_64BIT && reload_completed && !flag_peephole2
&& GET_CODE (operands[0]) == MEM
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" ""))
(clobber (match_operand:DI 4 "register_operand" ""))
- (use (match_operand:DI 2 "arith_operand" ""))
+ (use (match_operand:DI 2 "arith14_operand" ""))
(use (match_operand:DI 3 "const_int_operand" ""))])]
"TARGET_64BIT
&& GET_CODE (operands[0]) == MEM
(const_int 0))
(clobber (match_operand:DI 1 "register_operand" "=&r,&r")) ;loop cnt/tmp
(clobber (match_dup 0))
- (use (match_operand:DI 2 "arith_operand" "J,1")) ;byte count
+ (use (match_operand:DI 2 "arith14_operand" "J,1")) ;byte count
(use (match_operand:DI 3 "const_int_operand" "n,n")) ;alignment
(const_int 0)]
"TARGET_64BIT && reload_completed"
- "* return output_block_clear (operands, !which_alternative);"
+ "* return pa_output_block_clear (operands, !which_alternative);"
[(set_attr "type" "multi,multi")])
\f
;; Floating point move insns
&& operands[1] != CONST0_RTX (DFmode)
&& !TARGET_64BIT
&& !TARGET_SOFT_FLOAT"
- "* return (which_alternative == 0 ? output_move_double (operands)
+ "* return (which_alternative == 0 ? pa_output_move_double (operands)
: \"fldd%F1 %1,%0\");"
[(set_attr "type" "move,fpload")
(set_attr "length" "16,4")])
""
"
{
- if (GET_CODE (operands[1]) == CONST_DOUBLE && TARGET_64BIT)
- operands[1] = force_const_mem (DFmode, operands[1]);
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && operands[1] != CONST0_RTX (DFmode))
+ {
+ /* Reject CONST_DOUBLE loads to all hard registers when
+ generating 64-bit code and to floating point registers
+ when generating 32-bit code. */
+ if (REG_P (operands[0])
+ && HARD_REGISTER_P (operands[0])
+ && (TARGET_64BIT || REGNO (operands[0]) >= 32))
+ FAIL;
+
+ if (TARGET_64BIT)
+ operands[1] = force_const_mem (DFmode, operands[1]);
+ }
- if (emit_move_sequence (operands, DFmode, 0))
+ if (pa_emit_move_sequence (operands, DFmode, 0))
DONE;
}")
""
"
{
- if (emit_move_sequence (operands, DFmode, operands[2]))
+ if (pa_emit_move_sequence (operands, DFmode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
""
"
{
- if (emit_move_sequence (operands, DFmode, operands[2]))
+ if (pa_emit_move_sequence (operands, DFmode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
(define_insn ""
[(set (match_operand:DF 0 "move_dest_operand"
- "=f,*r,Q,?o,?Q,f,*r,*r,!r,!f")
+ "=f,*r,Q,?o,?Q,f,*r,*r,?*r,?f")
(match_operand:DF 1 "reg_or_0_or_nonsymb_mem_operand"
- "fG,*rG,f,*r,*r,RQ,o,RQ,!f,!r"))]
+ "fG,*rG,f,*r,*r,RQ,o,RQ,f,*r"))]
"(register_operand (operands[0], DFmode)
|| reg_or_0_operand (operands[1], DFmode))
&& !(GET_CODE (operands[1]) == CONST_DOUBLE
|| operands[1] == CONST0_RTX (DFmode))
&& !(REG_P (operands[0]) && REG_P (operands[1])
&& FP_REG_P (operands[0]) ^ FP_REG_P (operands[1])))
- return output_fp_move_double (operands);
- return output_move_double (operands);
+ return pa_output_fp_move_double (operands);
+ return pa_output_move_double (operands);
}"
- [(set_attr "type" "fpalu,move,fpstore,store,store,fpload,load,load,move,move")
+ [(set_attr "type" "fpalu,move,fpstore,store,store,fpload,load,load,fpstore_load,store_fpload")
(set_attr "length" "4,8,4,8,16,4,8,16,12,12")])
(define_insn ""
(define_insn ""
[(set (match_operand:DF 0 "move_dest_operand"
- "=r,?o,?Q,r,r,!r,!f")
+ "=r,?o,?Q,r,r")
(match_operand:DF 1 "reg_or_0_or_nonsymb_mem_operand"
- "rG,r,r,o,RQ,!f,!r"))]
+ "rG,r,r,o,RQ"))]
"(register_operand (operands[0], DFmode)
|| reg_or_0_operand (operands[1], DFmode))
&& !TARGET_64BIT
&& TARGET_SOFT_FLOAT"
"*
{
- return output_move_double (operands);
+ return pa_output_move_double (operands);
}"
- [(set_attr "type" "move,store,store,load,load,move,move")
- (set_attr "length" "8,8,16,8,16,12,12")])
+ [(set_attr "type" "move,store,store,load,load")
+ (set_attr "length" "8,8,16,8,16")])
(define_insn ""
[(set (match_operand:DF 0 "move_dest_operand"
"=!*r,*r,*r,*r,*r,Q,f,f,T")
(match_operand:DF 1 "move_src_operand"
- "!*r,J,N,K,RQ,*rM,fM,RT,f"))]
+ "!*r,J,N,K,RQ,*rG,fG,RT,f"))]
"(register_operand (operands[0], DFmode)
|| reg_or_0_operand (operands[1], DFmode))
&& !TARGET_SOFT_FLOAT && TARGET_64BIT"
""
"
{
- if (GET_CODE (operands[1]) == CONST_DOUBLE && TARGET_64BIT)
- operands[1] = force_const_mem (DImode, operands[1]);
+ /* Except for zero, we don't support loading a CONST_INT directly
+ to a hard floating-point register since a scratch register is
+ needed for the operation. While the operation could be handled
+ before register allocation, the simplest solution is to fail. */
+ if (TARGET_64BIT
+ && GET_CODE (operands[1]) == CONST_INT
+ && operands[1] != CONST0_RTX (DImode)
+ && REG_P (operands[0])
+ && HARD_REGISTER_P (operands[0])
+ && REGNO (operands[0]) >= 32)
+ FAIL;
- if (emit_move_sequence (operands, DImode, 0))
+ if (pa_emit_move_sequence (operands, DImode, 0))
DONE;
}")
""
"
{
- if (emit_move_sequence (operands, DImode, operands[2]))
+ if (pa_emit_move_sequence (operands, DImode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
""
"
{
- if (emit_move_sequence (operands, DImode, operands[2]))
+ if (pa_emit_move_sequence (operands, DImode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
""
"
{
- if (emit_move_sequence (operands, DImode, operands[2]))
+ if (pa_emit_move_sequence (operands, DImode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
switch (GET_CODE (op1))
{
case CONST_INT:
+#if HOST_BITS_PER_WIDE_INT <= 32
operands[0] = operand_subword (op0, 1, 0, DImode);
output_asm_insn (\"ldil L'%1,%0\", operands);
output_asm_insn (\"ldi -1,%0\", operands);
else
output_asm_insn (\"ldi 0,%0\", operands);
+#else
+ operands[0] = operand_subword (op0, 1, 0, DImode);
+ operands[1] = GEN_INT (INTVAL (op1) & 0xffffffff);
+ output_asm_insn (\"ldil L'%1,%0\", operands);
+
+ operands[0] = operand_subword (op0, 0, 0, DImode);
+ operands[1] = GEN_INT (INTVAL (op1) >> 32);
+ output_asm_insn (pa_singlemove_string (operands), operands);
+#endif
break;
case CONST_DOUBLE:
operands[0] = operand_subword (op0, 0, 0, DImode);
operands[1] = GEN_INT (CONST_DOUBLE_HIGH (op1));
- output_asm_insn (singlemove_string (operands), operands);
+ output_asm_insn (pa_singlemove_string (operands), operands);
break;
default:
return \"\";
}"
[(set_attr "type" "move")
- (set_attr "length" "8")])
+ (set_attr "length" "12")])
(define_insn ""
[(set (match_operand:DI 0 "move_dest_operand"
- "=r,o,Q,r,r,r,*f,*f,T,!r,!f")
+ "=r,o,Q,r,r,r,*f,*f,T,?r,?*f")
(match_operand:DI 1 "general_operand"
- "rM,r,r,o*R,Q,i,*fM,RT,*f,!f,!r"))]
+ "rM,r,r,o*R,Q,i,*fM,RT,*f,*f,r"))]
"(register_operand (operands[0], DImode)
|| reg_or_0_operand (operands[1], DImode))
&& !TARGET_64BIT
|| operands[1] == CONST0_RTX (DFmode))
&& !(REG_P (operands[0]) && REG_P (operands[1])
&& FP_REG_P (operands[0]) ^ FP_REG_P (operands[1])))
- return output_fp_move_double (operands);
- return output_move_double (operands);
+ return pa_output_fp_move_double (operands);
+ return pa_output_move_double (operands);
}"
[(set_attr "type"
- "move,store,store,load,load,multi,fpalu,fpload,fpstore,move,move")
+ "move,store,store,load,load,multi,fpalu,fpload,fpstore,fpstore_load,store_fpload")
(set_attr "length" "8,8,16,8,16,16,4,4,4,12,12")])
(define_insn ""
&& TARGET_SOFT_FLOAT"
"*
{
- return output_move_double (operands);
+ return pa_output_move_double (operands);
}"
[(set_attr "type" "move,store,store,load,load,multi")
(set_attr "length" "8,8,16,8,16,16")])
"!TARGET_64BIT"
"*
{
- /* Don't output a 64 bit constant, since we can't trust the assembler to
+ /* Don't output a 64-bit constant, since we can't trust the assembler to
handle it correctly. */
if (GET_CODE (operands[2]) == CONST_DOUBLE)
operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2]));
+ else if (HOST_BITS_PER_WIDE_INT > 32
+ && GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff);
if (which_alternative == 1)
output_asm_insn (\"copy %1,%0\", operands);
return \"ldo R'%G2(%R1),%R0\";
"GET_CODE (operands[1]) == CONST_DOUBLE
&& operands[1] != CONST0_RTX (SFmode)
&& ! TARGET_SOFT_FLOAT"
- "* return (which_alternative == 0 ? singlemove_string (operands)
+ "* return (which_alternative == 0 ? pa_singlemove_string (operands)
: \" fldw%F1 %1,%0\");"
[(set_attr "type" "move,fpload")
(set_attr "length" "8,4")])
""
"
{
- if (emit_move_sequence (operands, SFmode, 0))
+ /* Reject CONST_DOUBLE loads to floating point registers. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && operands[1] != CONST0_RTX (SFmode)
+ && REG_P (operands[0])
+ && HARD_REGISTER_P (operands[0])
+ && REGNO (operands[0]) >= 32)
+ FAIL;
+
+ if (pa_emit_move_sequence (operands, SFmode, 0))
DONE;
}")
""
"
{
- if (emit_move_sequence (operands, SFmode, operands[2]))
+ if (pa_emit_move_sequence (operands, SFmode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
""
"
{
- if (emit_move_sequence (operands, SFmode, operands[2]))
+ if (pa_emit_move_sequence (operands, SFmode, operands[2]))
DONE;
/* We don't want the clobber emitted, so handle this ourselves. */
(define_insn ""
[(set (match_operand:SF 0 "move_dest_operand"
- "=f,!*r,f,*r,Q,Q,!r,!f")
+ "=f,!*r,f,*r,Q,Q,?*r,?f")
(match_operand:SF 1 "reg_or_0_or_nonsymb_mem_operand"
- "fG,!*rG,RQ,RQ,f,*rG,!f,!r"))]
+ "fG,!*rG,RQ,RQ,f,*rG,f,*r"))]
"(register_operand (operands[0], SFmode)
|| reg_or_0_operand (operands[1], SFmode))
&& !TARGET_SOFT_FLOAT
stw%M0 %r1,%0
{fstws|fstw} %1,-16(%%sp)\n\t{ldws|ldw} -16(%%sp),%0
{stws|stw} %1,-16(%%sp)\n\t{fldws|fldw} -16(%%sp),%0"
- [(set_attr "type" "fpalu,move,fpload,load,fpstore,store,move,move")
+ [(set_attr "type" "fpalu,move,fpload,load,fpstore,store,fpstore_load,store_fpload")
(set_attr "pa_combine_type" "addmove")
(set_attr "length" "4,4,4,4,4,4,8,8")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r")
(plus:DI (match_operand:DI 1 "register_operand" "%r,r")
- (match_operand:DI 2 "arith_operand" "r,J")))]
+ (match_operand:DI 2 "arith14_operand" "r,J")))]
"TARGET_64BIT"
"@
add,l %1,%2,%0
[(set_attr "type" "binary")
(set_attr "length" "4")])
+(define_expand "addvdi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "reg_or_0_operand" "")
+ (match_operand:DI 2 "arith11_operand" "")))
+ (trap_if (ne (plus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (plus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))])]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rM,rM")
+ (match_operand:DI 2 "arith11_operand" "r,I")))
+ (trap_if (ne (plus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (plus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ "TARGET_64BIT"
+ "@
+ add,tsv,* %2,%1,%0
+ addi,tsv,* %2,%1,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "4,4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rM")
+ (match_operand:DI 2 "arith11_operand" "rI")))
+ (trap_if (ne (plus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (plus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ "!TARGET_64BIT"
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) >= 0)
+ return \"addi %2,%R1,%R0\;{addco|add,c,tsv} %1,%%r0,%0\";
+ else
+ return \"addi %2,%R1,%R0\;{subbo|sub,b,tsv} %1,%%r0,%0\";
+ }
+ else
+ return \"add %R2,%R1,%R0\;{addco|add,c,tsv} %2,%1,%0\";
+}"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
;; define_splits to optimize cases of adding a constant integer
;; to a register when the constant does not fit in 14 bits. */
(define_split
(plus:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "const_int_operand" "")))
(clobber (match_operand:SI 4 "register_operand" ""))]
- "! cint_ok_for_move (INTVAL (operands[2]))
+ "! pa_cint_ok_for_move (INTVAL (operands[2]))
&& VAL_14_BITS_P (INTVAL (operands[2]) >> 1)"
[(set (match_dup 4) (plus:SI (match_dup 1) (match_dup 2)))
(set (match_dup 0) (plus:SI (match_dup 4) (match_dup 3)))]
(plus:SI (match_operand:SI 1 "register_operand" "")
(match_operand:SI 2 "const_int_operand" "")))
(clobber (match_operand:SI 4 "register_operand" ""))]
- "! cint_ok_for_move (INTVAL (operands[2]))"
+ "! pa_cint_ok_for_move (INTVAL (operands[2]))"
[(set (match_dup 4) (match_dup 2))
(set (match_dup 0) (plus:SI (mult:SI (match_dup 4) (match_dup 3))
(match_dup 1)))]
/* Try dividing the constant by 2, then 4, and finally 8 to see
if we can get a constant which can be loaded into a register
- in a single instruction (cint_ok_for_move).
+ in a single instruction (pa_cint_ok_for_move).
If that fails, try to negate the constant and subtract it
from our input operand. */
- if (intval % 2 == 0 && cint_ok_for_move (intval / 2))
+ if (intval % 2 == 0 && pa_cint_ok_for_move (intval / 2))
{
operands[2] = GEN_INT (intval / 2);
operands[3] = const2_rtx;
}
- else if (intval % 4 == 0 && cint_ok_for_move (intval / 4))
+ else if (intval % 4 == 0 && pa_cint_ok_for_move (intval / 4))
{
operands[2] = GEN_INT (intval / 4);
operands[3] = GEN_INT (4);
}
- else if (intval % 8 == 0 && cint_ok_for_move (intval / 8))
+ else if (intval % 8 == 0 && pa_cint_ok_for_move (intval / 8))
{
operands[2] = GEN_INT (intval / 8);
operands[3] = GEN_INT (8);
}
- else if (cint_ok_for_move (-intval))
+ else if (pa_cint_ok_for_move (-intval))
{
emit_insn (gen_rtx_SET (VOIDmode, operands[4], GEN_INT (-intval)));
emit_insn (gen_subsi3 (operands[0], operands[1], operands[4]));
(define_insn "addsi3"
[(set (match_operand:SI 0 "register_operand" "=r,r")
(plus:SI (match_operand:SI 1 "register_operand" "%r,r")
- (match_operand:SI 2 "arith_operand" "r,J")))]
+ (match_operand:SI 2 "arith14_operand" "r,J")))]
""
"@
{addl|add,l} %1,%2,%0
(set_attr "pa_combine_type" "addmove")
(set_attr "length" "4,4")])
+(define_insn "addvsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rM,rM")
+ (match_operand:SI 2 "arith11_operand" "r,I")))
+ (trap_if (ne (plus:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (sign_extend:DI (plus:SI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ ""
+ "@
+ {addo|add,tsv} %2,%1,%0
+ {addio|addi,tsv} %2,%1,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "4,4")])
+
(define_expand "subdi3"
[(set (match_operand:DI 0 "register_operand" "")
- (minus:DI (match_operand:DI 1 "register_operand" "")
- (match_operand:DI 2 "register_operand" "")))]
+ (minus:DI (match_operand:DI 1 "arith11_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")))]
""
"")
(define_insn ""
- [(set (match_operand:DI 0 "register_operand" "=r")
- (minus:DI (match_operand:DI 1 "register_operand" "r")
- (match_operand:DI 2 "register_operand" "r")))]
- "!TARGET_64BIT"
- "sub %R1,%R2,%R0\;{subb|sub,b} %1,%2,%0"
- [(set_attr "type" "binary")
- (set_attr "length" "8")])
-
-(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r,!q")
(minus:DI (match_operand:DI 1 "arith11_operand" "r,I,!U")
- (match_operand:DI 2 "register_operand" "r,r,!r")))]
+ (match_operand:DI 2 "reg_or_0_operand" "rM,rM,!rM")))]
"TARGET_64BIT"
"@
sub %1,%2,%0
[(set_attr "type" "binary,binary,move")
(set_attr "length" "4,4,4")])
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,&r")
+ (minus:DI (match_operand:DI 1 "arith11_operand" "r,I")
+ (match_operand:DI 2 "reg_or_0_operand" "rM,rM")))]
+ "!TARGET_64BIT"
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) >= 0)
+ return \"subi %1,%R2,%R0\;{subb|sub,b} %%r0,%2,%0\";
+ else
+ return \"ldi -1,%0\;subi %1,%R2,%R0\;{subb|sub,b} %0,%2,%0\";
+ }
+ else
+ return \"sub %R1,%R2,%R0\;{subb|sub,b} %1,%2,%0\";
+}"
+ [(set_attr "type" "binary")
+ (set (attr "length")
+ (if_then_else (eq_attr "alternative" "0")
+ (const_int 8)
+ (if_then_else (ge (symbol_ref "INTVAL (operands[1])")
+ (const_int 0))
+ (const_int 8)
+ (const_int 12))))])
+
+(define_expand "subvdi3"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (match_operand:DI 1 "arith11_operand" "")
+ (match_operand:DI 2 "reg_or_0_operand" "")))
+ (trap_if (ne (minus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (minus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))])]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (minus:DI (match_operand:DI 1 "arith11_operand" "r,I")
+ (match_operand:DI 2 "reg_or_0_operand" "rM,rM")))
+ (trap_if (ne (minus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (minus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ "TARGET_64BIT"
+ "@
+ {subo|sub,tsv} %1,%2,%0
+ {subio|subi,tsv} %1,%2,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "4,4")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,&r")
+ (minus:DI (match_operand:DI 1 "arith11_operand" "r,I")
+ (match_operand:DI 2 "reg_or_0_operand" "rM,rM")))
+ (trap_if (ne (minus:TI (sign_extend:TI (match_dup 1))
+ (sign_extend:TI (match_dup 2)))
+ (sign_extend:TI (minus:DI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ "!TARGET_64BIT"
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) >= 0)
+ return \"subi %1,%R2,%R0\;{subbo|sub,b,tsv} %%r0,%2,%0\";
+ else
+ return \"ldi -1,%0\;subi %1,%R2,%R0\;{subbo|sub,b,tsv} %0,%2,%0\";
+ }
+ else
+ return \"sub %R1,%R2,%R0\;{subbo|sub,b,tsv} %1,%2,%0\";
+}"
+ [(set_attr "type" "binary,binary")
+ (set (attr "length")
+ (if_then_else (eq_attr "alternative" "0")
+ (const_int 8)
+ (if_then_else (ge (symbol_ref "INTVAL (operands[1])")
+ (const_int 0))
+ (const_int 8)
+ (const_int 12))))])
+
(define_expand "subsi3"
[(set (match_operand:SI 0 "register_operand" "")
(minus:SI (match_operand:SI 1 "arith11_operand" "")
[(set_attr "type" "binary,binary,move")
(set_attr "length" "4,4,4")])
+(define_insn "subvsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "arith11_operand" "rM,I")
+ (match_operand:SI 2 "reg_or_0_operand" "rM,rM")))
+ (trap_if (ne (minus:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (sign_extend:DI (minus:SI (match_dup 1)
+ (match_dup 2))))
+ (const_int 0))]
+ ""
+ "@
+ {subo|sub,tsv} %1,%2,%0
+ {subio|subi,tsv} %1,%2,%0"
+ [(set_attr "type" "binary,binary")
+ (set_attr "length" "4,4")])
+
;; Clobbering a "register_operand" instead of a match_scratch
;; in operand3 of millicode calls avoids spilling %r1 and
;; produces better code.
(clobber (reg:SI 25))
(clobber (reg:SI 31))]
"!TARGET_64BIT"
- "* return output_mul_insn (0, insn);"
+ "* return pa_output_mul_insn (0, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
(define_insn ""
[(set (reg:SI 29) (mult:SI (reg:SI 26) (reg:SI 25)))
(clobber (reg:SI 25))
(clobber (reg:SI 2))]
"TARGET_64BIT"
- "* return output_mul_insn (0, insn);"
+ "* return pa_output_mul_insn (0, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
(define_expand "muldi3"
[(set (match_operand:DI 0 "register_operand" "")
GEN_INT (32)));
emit_move_insn (op2shifted, gen_rtx_LSHIFTRT (DImode, operands[2],
GEN_INT (32)));
- op1r = gen_rtx_SUBREG (SImode, operands[1], 4);
- op2r = gen_rtx_SUBREG (SImode, operands[2], 4);
- op1l = gen_rtx_SUBREG (SImode, op1shifted, 4);
- op2l = gen_rtx_SUBREG (SImode, op2shifted, 4);
+ op1r = force_reg (SImode, gen_rtx_SUBREG (SImode, operands[1], 4));
+ op2r = force_reg (SImode, gen_rtx_SUBREG (SImode, operands[2], 4));
+ op1l = force_reg (SImode, gen_rtx_SUBREG (SImode, op1shifted, 4));
+ op2l = force_reg (SImode, gen_rtx_SUBREG (SImode, op2shifted, 4));
/* Emit multiplies for the cross products. */
emit_insn (gen_umulsidi3 (cross_product1, op2r, op1l));
operands[5] = gen_rtx_REG (SImode, 31);
operands[4] = gen_reg_rtx (SImode);
}
- if (GET_CODE (operands[2]) == CONST_INT && emit_hpdiv_const (operands, 0))
+ if (GET_CODE (operands[2]) == CONST_INT && pa_emit_hpdiv_const (operands, 0))
DONE;
}")
(clobber (reg:SI 31))]
"!TARGET_64BIT"
"*
- return output_div_insn (operands, 0, insn);"
+ return pa_output_div_insn (operands, 0, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
(define_insn ""
[(set (reg:SI 29)
(clobber (reg:SI 2))]
"TARGET_64BIT"
"*
- return output_div_insn (operands, 0, insn);"
+ return pa_output_div_insn (operands, 0, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
(define_expand "udivsi3"
[(set (reg:SI 26) (match_operand:SI 1 "move_src_operand" ""))
operands[5] = gen_rtx_REG (SImode, 31);
operands[4] = gen_reg_rtx (SImode);
}
- if (GET_CODE (operands[2]) == CONST_INT && emit_hpdiv_const (operands, 1))
+ if (GET_CODE (operands[2]) == CONST_INT && pa_emit_hpdiv_const (operands, 1))
DONE;
}")
(clobber (reg:SI 31))]
"!TARGET_64BIT"
"*
- return output_div_insn (operands, 1, insn);"
+ return pa_output_div_insn (operands, 1, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
(define_insn ""
[(set (reg:SI 29)
(clobber (reg:SI 2))]
"TARGET_64BIT"
"*
- return output_div_insn (operands, 1, insn);"
+ return pa_output_div_insn (operands, 1, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
(define_expand "modsi3"
[(set (reg:SI 26) (match_operand:SI 1 "move_src_operand" ""))
(clobber (reg:SI 31))]
"!TARGET_64BIT"
"*
- return output_mod_insn (0, insn);"
+ return pa_output_mod_insn (0, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
(define_insn ""
[(set (reg:SI 29) (mod:SI (reg:SI 26) (reg:SI 25)))
(clobber (reg:SI 2))]
"TARGET_64BIT"
"*
- return output_mod_insn (0, insn);"
+ return pa_output_mod_insn (0, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
(define_expand "umodsi3"
[(set (reg:SI 26) (match_operand:SI 1 "move_src_operand" ""))
(clobber (reg:SI 31))]
"!TARGET_64BIT"
"*
- return output_mod_insn (1, insn);"
+ return pa_output_mod_insn (1, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
(define_insn ""
[(set (reg:SI 29) (umod:SI (reg:SI 26) (reg:SI 25)))
(clobber (reg:SI 2))]
"TARGET_64BIT"
"*
- return output_mod_insn (1, insn);"
+ return pa_output_mod_insn (1, insn);"
[(set_attr "type" "milli")
- (set (attr "length") (symbol_ref "attr_length_millicode_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_millicode_call (insn)"))])
;;- and instructions
;; We define DImode `and` so with DImode `not` we can get
(and:DI (match_operand:DI 1 "register_operand" "%?r,0")
(match_operand:DI 2 "and_operand" "rO,P")))]
"TARGET_64BIT"
- "* return output_64bit_and (operands); "
+ "* return pa_output_64bit_and (operands); "
[(set_attr "type" "binary")
(set_attr "length" "4")])
(and:SI (match_operand:SI 1 "register_operand" "%?r,0")
(match_operand:SI 2 "and_operand" "rO,P")))]
""
- "* return output_and (operands); "
+ "* return pa_output_and (operands); "
[(set_attr "type" "binary,shift")
(set_attr "length" "4,4")])
(define_expand "iordi3"
[(set (match_operand:DI 0 "register_operand" "")
(ior:DI (match_operand:DI 1 "register_operand" "")
- (match_operand:DI 2 "ior_operand" "")))]
+ (match_operand:DI 2 "reg_or_cint_ior_operand" "")))]
""
"
{
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r")
(ior:DI (match_operand:DI 1 "register_operand" "0,0")
- (match_operand:DI 2 "ior_operand" "M,i")))]
+ (match_operand:DI 2 "cint_ior_operand" "M,i")))]
"TARGET_64BIT"
- "* return output_64bit_ior (operands); "
+ "* return pa_output_64bit_ior (operands); "
[(set_attr "type" "binary,shift")
(set_attr "length" "4,4")])
(define_expand "iorsi3"
[(set (match_operand:SI 0 "register_operand" "")
(ior:SI (match_operand:SI 1 "register_operand" "")
- (match_operand:SI 2 "arith32_operand" "")))]
+ (match_operand:SI 2 "reg_or_cint_ior_operand" "")))]
""
- "
-{
- if (! (ior_operand (operands[2], SImode)
- || register_operand (operands[2], SImode)))
- operands[2] = force_reg (SImode, operands[2]);
-}")
+ "")
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r,r")
(ior:SI (match_operand:SI 1 "register_operand" "0,0")
- (match_operand:SI 2 "ior_operand" "M,i")))]
+ (match_operand:SI 2 "cint_ior_operand" "M,i")))]
""
- "* return output_ior (operands); "
+ "* return pa_output_ior (operands); "
[(set_attr "type" "binary,shift")
(set_attr "length" "4,4")])
[(set_attr "type" "unary")
(set_attr "length" "4")])
+(define_expand "negvdi2"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "")
+ (neg:DI (match_operand:DI 1 "register_operand" "")))
+ (trap_if (ne (neg:TI (sign_extend:TI (match_dup 1)))
+ (sign_extend:TI (neg:DI (match_dup 1))))
+ (const_int 0))])]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))
+ (trap_if (ne (neg:TI (sign_extend:TI (match_dup 1)))
+ (sign_extend:TI (neg:DI (match_dup 1))))
+ (const_int 0))]
+ "!TARGET_64BIT"
+ "sub %%r0,%R1,%R0\;{subbo|sub,b,tsv} %%r0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))
+ (trap_if (ne (neg:TI (sign_extend:TI (match_dup 1)))
+ (sign_extend:TI (neg:DI (match_dup 1))))
+ (const_int 0))]
+ "TARGET_64BIT"
+ "sub,tsv %%r0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "4")])
+
(define_insn "negsi2"
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "register_operand" "r")))]
[(set_attr "type" "unary")
(set_attr "length" "4")])
+(define_insn "negvsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "register_operand" "r")))
+ (trap_if (ne (neg:DI (sign_extend:DI (match_dup 1)))
+ (sign_extend:DI (neg:SI (match_dup 1))))
+ (const_int 0))]
+ ""
+ "{subo|sub,tsv} %%r0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "4")])
+
(define_expand "one_cmpldi2"
[(set (match_operand:DI 0 "register_operand" "")
(not:DI (match_operand:DI 1 "register_operand" "")))]
;; Processors prior to PA 2.0 don't have a fneg instruction. Fast
;; negation can be done by subtracting from plus zero. However, this
;; violates the IEEE standard when negating plus and minus zero.
+;; The slow path toggles the sign bit in the general registers.
(define_expand "negdf2"
- [(parallel [(set (match_operand:DF 0 "register_operand" "")
- (neg:DF (match_operand:DF 1 "register_operand" "")))
- (use (match_dup 2))])]
- "! TARGET_SOFT_FLOAT"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (neg:DF (match_operand:DF 1 "register_operand" "")))]
+ "!TARGET_SOFT_FLOAT"
{
- if (TARGET_PA_20 || flag_unsafe_math_optimizations)
+ if (TARGET_PA_20 || !flag_signed_zeros)
emit_insn (gen_negdf2_fast (operands[0], operands[1]));
else
- {
- operands[2] = force_reg (DFmode,
- CONST_DOUBLE_FROM_REAL_VALUE (dconstm1, DFmode));
- emit_insn (gen_muldf3 (operands[0], operands[1], operands[2]));
- }
+ emit_insn (gen_negdf2_slow (operands[0], operands[1]));
DONE;
})
+(define_insn "negdf2_slow"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (neg:DF (match_operand:DF 1 "register_operand" "r")))]
+ "!TARGET_SOFT_FLOAT && !TARGET_PA_20"
+ "*
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ return \"and,< %1,%1,%0\;depi,tr 1,0,1,%0\;depi 0,0,1,%0\";
+ else
+ return \"and,< %1,%1,%0\;depi,tr 1,0,1,%0\;depi 0,0,1,%0\;copy %R1,%R0\";
+}"
+ [(set_attr "type" "multi")
+ (set (attr "length")
+ (if_then_else (match_test "rtx_equal_p (operands[0], operands[1])")
+ (const_int 12)
+ (const_int 16)))])
+
(define_insn "negdf2_fast"
[(set (match_operand:DF 0 "register_operand" "=f")
(neg:DF (match_operand:DF 1 "register_operand" "f")))]
- "! TARGET_SOFT_FLOAT && (TARGET_PA_20 || flag_unsafe_math_optimizations)"
+ "!TARGET_SOFT_FLOAT"
"*
{
if (TARGET_PA_20)
(set_attr "length" "4")])
(define_expand "negsf2"
- [(parallel [(set (match_operand:SF 0 "register_operand" "")
- (neg:SF (match_operand:SF 1 "register_operand" "")))
- (use (match_dup 2))])]
- "! TARGET_SOFT_FLOAT"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (neg:SF (match_operand:SF 1 "register_operand" "")))]
+ "!TARGET_SOFT_FLOAT"
{
- if (TARGET_PA_20 || flag_unsafe_math_optimizations)
+ if (TARGET_PA_20 || !flag_signed_zeros)
emit_insn (gen_negsf2_fast (operands[0], operands[1]));
else
- {
- operands[2] = force_reg (SFmode,
- CONST_DOUBLE_FROM_REAL_VALUE (dconstm1, SFmode));
- emit_insn (gen_mulsf3 (operands[0], operands[1], operands[2]));
- }
+ emit_insn (gen_negsf2_slow (operands[0], operands[1]));
DONE;
})
+(define_insn "negsf2_slow"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (neg:SF (match_operand:SF 1 "register_operand" "r")))]
+ "!TARGET_SOFT_FLOAT && !TARGET_PA_20"
+ "and,< %1,%1,%0\;depi,tr 1,0,1,%0\;depi 0,0,1,%0"
+ [(set_attr "type" "multi")
+ (set_attr "length" "12")])
+
(define_insn "negsf2_fast"
[(set (match_operand:SF 0 "register_operand" "=f")
(neg:SF (match_operand:SF 1 "register_operand" "f")))]
- "! TARGET_SOFT_FLOAT && (TARGET_PA_20 || flag_unsafe_math_optimizations)"
+ "!TARGET_SOFT_FLOAT"
"*
{
if (TARGET_PA_20)
;; PA 2.0 floating point instructions
; fmpyfadd patterns
-(define_insn ""
+(define_insn "fmadf4"
[(set (match_operand:DF 0 "register_operand" "=f")
- (plus:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
- (match_operand:DF 2 "register_operand" "f"))
- (match_operand:DF 3 "register_operand" "f")))]
+ (fma:DF (match_operand:DF 1 "register_operand" "f")
+ (match_operand:DF 2 "register_operand" "f")
+ (match_operand:DF 3 "register_operand" "f")))]
"TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
"fmpyfadd,dbl %1,%2,%3,%0"
[(set_attr "type" "fpmuldbl")
(set_attr "length" "4")])
-(define_insn ""
- [(set (match_operand:DF 0 "register_operand" "=f")
- (plus:DF (match_operand:DF 1 "register_operand" "f")
- (mult:DF (match_operand:DF 2 "register_operand" "f")
- (match_operand:DF 3 "register_operand" "f"))))]
- "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
- "fmpyfadd,dbl %2,%3,%1,%0"
- [(set_attr "type" "fpmuldbl")
- (set_attr "length" "4")])
-
-(define_insn ""
+(define_insn "fmasf4"
[(set (match_operand:SF 0 "register_operand" "=f")
- (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
- (match_operand:SF 2 "register_operand" "f"))
- (match_operand:SF 3 "register_operand" "f")))]
+ (fma:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")
+ (match_operand:SF 3 "register_operand" "f")))]
"TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
"fmpyfadd,sgl %1,%2,%3,%0"
[(set_attr "type" "fpmulsgl")
(set_attr "length" "4")])
-(define_insn ""
- [(set (match_operand:SF 0 "register_operand" "=f")
- (plus:SF (match_operand:SF 1 "register_operand" "f")
- (mult:SF (match_operand:SF 2 "register_operand" "f")
- (match_operand:SF 3 "register_operand" "f"))))]
- "TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
- "fmpyfadd,sgl %2,%3,%1,%0"
- [(set_attr "type" "fpmulsgl")
- (set_attr "length" "4")])
-
; fmpynfadd patterns
-(define_insn ""
+(define_insn "fnmadf4"
[(set (match_operand:DF 0 "register_operand" "=f")
- (minus:DF (match_operand:DF 1 "register_operand" "f")
- (mult:DF (match_operand:DF 2 "register_operand" "f")
- (match_operand:DF 3 "register_operand" "f"))))]
+ (fma:DF (neg:DF (match_operand:DF 1 "register_operand" "f"))
+ (match_operand:DF 2 "register_operand" "f")
+ (match_operand:DF 3 "register_operand" "f")))]
"TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
- "fmpynfadd,dbl %2,%3,%1,%0"
+ "fmpynfadd,dbl %1,%2,%3,%0"
[(set_attr "type" "fpmuldbl")
(set_attr "length" "4")])
-(define_insn ""
+(define_insn "fnmasf4"
[(set (match_operand:SF 0 "register_operand" "=f")
- (minus:SF (match_operand:SF 1 "register_operand" "f")
- (mult:SF (match_operand:SF 2 "register_operand" "f")
- (match_operand:SF 3 "register_operand" "f"))))]
+ (fma:SF (neg:SF (match_operand:SF 1 "register_operand" "f"))
+ (match_operand:SF 2 "register_operand" "f")
+ (match_operand:SF 3 "register_operand" "f")))]
"TARGET_PA_20 && ! TARGET_SOFT_FLOAT"
- "fmpynfadd,sgl %2,%3,%1,%0"
+ "fmpynfadd,sgl %1,%2,%3,%0"
[(set_attr "type" "fpmulsgl")
(set_attr "length" "4")])
[(set_attr "type" "fpalu")
(set_attr "length" "4")])
-;; Generating a fused multiply sequence is a win for this case as it will
-;; reduce the latency for the fused case without impacting the plain
-;; multiply case.
-;;
-;; Similar possibilities exist for fnegabs, shadd and other insns which
-;; perform two operations with the result of the first feeding the second.
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
- (plus:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
- (match_operand:DF 2 "register_operand" "f"))
- (match_operand:DF 3 "register_operand" "f")))
- (set (match_operand:DF 4 "register_operand" "=&f")
- (mult:DF (match_dup 1) (match_dup 2)))]
+ (neg:DF (abs:DF (match_operand:DF 1 "register_operand" "f"))))
+ (set (match_operand:DF 2 "register_operand" "=&f") (abs:DF (match_dup 1)))]
"(! TARGET_SOFT_FLOAT && TARGET_PA_20
- && ! (reg_overlap_mentioned_p (operands[4], operands[1])
- || reg_overlap_mentioned_p (operands[4], operands[2])))"
+ && ! reg_overlap_mentioned_p (operands[2], operands[1]))"
"#"
- [(set_attr "type" "fpmuldbl")
+ [(set_attr "type" "fpalu")
(set_attr "length" "8")])
-;; We want to split this up during scheduling since we want both insns
-;; to schedule independently.
(define_split
[(set (match_operand:DF 0 "register_operand" "")
- (plus:DF (mult:DF (match_operand:DF 1 "register_operand" "")
- (match_operand:DF 2 "register_operand" ""))
- (match_operand:DF 3 "register_operand" "")))
- (set (match_operand:DF 4 "register_operand" "")
- (mult:DF (match_dup 1) (match_dup 2)))]
+ (neg:DF (abs:DF (match_operand:DF 1 "register_operand" ""))))
+ (set (match_operand:DF 2 "register_operand" "") (abs:DF (match_dup 1)))]
"! TARGET_SOFT_FLOAT && TARGET_PA_20"
- [(set (match_dup 4) (mult:DF (match_dup 1) (match_dup 2)))
- (set (match_dup 0) (plus:DF (mult:DF (match_dup 1) (match_dup 2))
- (match_dup 3)))]
+ [(set (match_dup 2) (abs:DF (match_dup 1)))
+ (set (match_dup 0) (neg:DF (abs:DF (match_dup 1))))]
"")
(define_insn ""
[(set (match_operand:SF 0 "register_operand" "=f")
- (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
- (match_operand:SF 2 "register_operand" "f"))
- (match_operand:SF 3 "register_operand" "f")))
- (set (match_operand:SF 4 "register_operand" "=&f")
- (mult:SF (match_dup 1) (match_dup 2)))]
+ (neg:SF (abs:SF (match_operand:SF 1 "register_operand" "f"))))
+ (set (match_operand:SF 2 "register_operand" "=&f") (abs:SF (match_dup 1)))]
"(! TARGET_SOFT_FLOAT && TARGET_PA_20
- && ! (reg_overlap_mentioned_p (operands[4], operands[1])
- || reg_overlap_mentioned_p (operands[4], operands[2])))"
+ && ! reg_overlap_mentioned_p (operands[2], operands[1]))"
"#"
- [(set_attr "type" "fpmuldbl")
+ [(set_attr "type" "fpalu")
(set_attr "length" "8")])
-;; We want to split this up during scheduling since we want both insns
-;; to schedule independently.
(define_split
[(set (match_operand:SF 0 "register_operand" "")
- (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "")
- (match_operand:SF 2 "register_operand" ""))
- (match_operand:SF 3 "register_operand" "")))
- (set (match_operand:SF 4 "register_operand" "")
- (mult:SF (match_dup 1) (match_dup 2)))]
+ (neg:SF (abs:SF (match_operand:SF 1 "register_operand" ""))))
+ (set (match_operand:SF 2 "register_operand" "") (abs:SF (match_dup 1)))]
"! TARGET_SOFT_FLOAT && TARGET_PA_20"
- [(set (match_dup 4) (mult:SF (match_dup 1) (match_dup 2)))
- (set (match_dup 0) (plus:SF (mult:SF (match_dup 1) (match_dup 2))
- (match_dup 3)))]
+ [(set (match_dup 2) (abs:SF (match_dup 1)))
+ (set (match_dup 0) (neg:SF (abs:SF (match_dup 1))))]
"")
;; Negating a multiply can be faked by adding zero in a fused multiply-add
-;; instruction.
+;; instruction if we can ignore the sign of zero.
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(neg:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
(match_operand:DF 2 "register_operand" "f"))))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
+ "!TARGET_SOFT_FLOAT && TARGET_PA_20 && !flag_signed_zeros"
"fmpynfadd,dbl %1,%2,%%fr0,%0"
[(set_attr "type" "fpmuldbl")
(set_attr "length" "4")])
[(set (match_operand:SF 0 "register_operand" "=f")
(neg:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
(match_operand:SF 2 "register_operand" "f"))))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
+ "!TARGET_SOFT_FLOAT && TARGET_PA_20 && !flag_signed_zeros"
"fmpynfadd,sgl %1,%2,%%fr0,%0"
[(set_attr "type" "fpmuldbl")
(set_attr "length" "4")])
(match_operand:DF 2 "register_operand" "f"))))
(set (match_operand:DF 3 "register_operand" "=&f")
(mult:DF (match_dup 1) (match_dup 2)))]
- "(! TARGET_SOFT_FLOAT && TARGET_PA_20
+ "(!TARGET_SOFT_FLOAT && TARGET_PA_20 && !flag_signed_zeros
&& ! (reg_overlap_mentioned_p (operands[3], operands[1])
|| reg_overlap_mentioned_p (operands[3], operands[2])))"
"#"
(match_operand:DF 2 "register_operand" ""))))
(set (match_operand:DF 3 "register_operand" "")
(mult:DF (match_dup 1) (match_dup 2)))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
+ "!TARGET_SOFT_FLOAT && TARGET_PA_20 && !flag_signed_zeros"
[(set (match_dup 3) (mult:DF (match_dup 1) (match_dup 2)))
(set (match_dup 0) (neg:DF (mult:DF (match_dup 1) (match_dup 2))))]
"")
(match_operand:SF 2 "register_operand" "f"))))
(set (match_operand:SF 3 "register_operand" "=&f")
(mult:SF (match_dup 1) (match_dup 2)))]
- "(! TARGET_SOFT_FLOAT && TARGET_PA_20
+ "(!TARGET_SOFT_FLOAT && TARGET_PA_20 && !flag_signed_zeros
&& ! (reg_overlap_mentioned_p (operands[3], operands[1])
|| reg_overlap_mentioned_p (operands[3], operands[2])))"
"#"
(match_operand:SF 2 "register_operand" ""))))
(set (match_operand:SF 3 "register_operand" "")
(mult:SF (match_dup 1) (match_dup 2)))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
+ "!TARGET_SOFT_FLOAT && TARGET_PA_20&& !flag_signed_zeros"
[(set (match_dup 3) (mult:SF (match_dup 1) (match_dup 2)))
(set (match_dup 0) (neg:SF (mult:SF (match_dup 1) (match_dup 2))))]
"")
-
-;; Now fused multiplies with the result of the multiply negated.
-(define_insn ""
- [(set (match_operand:DF 0 "register_operand" "=f")
- (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
- (match_operand:DF 2 "register_operand" "f")))
- (match_operand:DF 3 "register_operand" "f")))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
- "fmpynfadd,dbl %1,%2,%3,%0"
- [(set_attr "type" "fpmuldbl")
- (set_attr "length" "4")])
-
-(define_insn ""
- [(set (match_operand:SF 0 "register_operand" "=f")
- (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
- (match_operand:SF 2 "register_operand" "f")))
- (match_operand:SF 3 "register_operand" "f")))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
- "fmpynfadd,sgl %1,%2,%3,%0"
- [(set_attr "type" "fpmuldbl")
- (set_attr "length" "4")])
-
-(define_insn ""
- [(set (match_operand:DF 0 "register_operand" "=f")
- (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "register_operand" "f")
- (match_operand:DF 2 "register_operand" "f")))
- (match_operand:DF 3 "register_operand" "f")))
- (set (match_operand:DF 4 "register_operand" "=&f")
- (mult:DF (match_dup 1) (match_dup 2)))]
- "(! TARGET_SOFT_FLOAT && TARGET_PA_20
- && ! (reg_overlap_mentioned_p (operands[4], operands[1])
- || reg_overlap_mentioned_p (operands[4], operands[2])))"
- "#"
- [(set_attr "type" "fpmuldbl")
- (set_attr "length" "8")])
-
-(define_split
- [(set (match_operand:DF 0 "register_operand" "")
- (plus:DF (neg:DF (mult:DF (match_operand:DF 1 "register_operand" "")
- (match_operand:DF 2 "register_operand" "")))
- (match_operand:DF 3 "register_operand" "")))
- (set (match_operand:DF 4 "register_operand" "")
- (mult:DF (match_dup 1) (match_dup 2)))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
- [(set (match_dup 4) (mult:DF (match_dup 1) (match_dup 2)))
- (set (match_dup 0) (plus:DF (neg:DF (mult:DF (match_dup 1) (match_dup 2)))
- (match_dup 3)))]
- "")
-
-(define_insn ""
- [(set (match_operand:SF 0 "register_operand" "=f")
- (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "f")
- (match_operand:SF 2 "register_operand" "f")))
- (match_operand:SF 3 "register_operand" "f")))
- (set (match_operand:SF 4 "register_operand" "=&f")
- (mult:SF (match_dup 1) (match_dup 2)))]
- "(! TARGET_SOFT_FLOAT && TARGET_PA_20
- && ! (reg_overlap_mentioned_p (operands[4], operands[1])
- || reg_overlap_mentioned_p (operands[4], operands[2])))"
- "#"
- [(set_attr "type" "fpmuldbl")
- (set_attr "length" "8")])
-
-(define_split
- [(set (match_operand:SF 0 "register_operand" "")
- (plus:SF (neg:SF (mult:SF (match_operand:SF 1 "register_operand" "")
- (match_operand:SF 2 "register_operand" "")))
- (match_operand:SF 3 "register_operand" "")))
- (set (match_operand:SF 4 "register_operand" "")
- (mult:SF (match_dup 1) (match_dup 2)))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
- [(set (match_dup 4) (mult:SF (match_dup 1) (match_dup 2)))
- (set (match_dup 0) (plus:SF (neg:SF (mult:SF (match_dup 1) (match_dup 2)))
- (match_dup 3)))]
- "")
-
-(define_insn ""
- [(set (match_operand:DF 0 "register_operand" "=f")
- (minus:DF (match_operand:DF 3 "register_operand" "f")
- (mult:DF (match_operand:DF 1 "register_operand" "f")
- (match_operand:DF 2 "register_operand" "f"))))
- (set (match_operand:DF 4 "register_operand" "=&f")
- (mult:DF (match_dup 1) (match_dup 2)))]
- "(! TARGET_SOFT_FLOAT && TARGET_PA_20
- && ! (reg_overlap_mentioned_p (operands[4], operands[1])
- || reg_overlap_mentioned_p (operands[4], operands[2])))"
- "#"
- [(set_attr "type" "fpmuldbl")
- (set_attr "length" "8")])
-
-(define_split
- [(set (match_operand:DF 0 "register_operand" "")
- (minus:DF (match_operand:DF 3 "register_operand" "")
- (mult:DF (match_operand:DF 1 "register_operand" "")
- (match_operand:DF 2 "register_operand" ""))))
- (set (match_operand:DF 4 "register_operand" "")
- (mult:DF (match_dup 1) (match_dup 2)))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
- [(set (match_dup 4) (mult:DF (match_dup 1) (match_dup 2)))
- (set (match_dup 0) (minus:DF (match_dup 3)
- (mult:DF (match_dup 1) (match_dup 2))))]
- "")
-
-(define_insn ""
- [(set (match_operand:SF 0 "register_operand" "=f")
- (minus:SF (match_operand:SF 3 "register_operand" "f")
- (mult:SF (match_operand:SF 1 "register_operand" "f")
- (match_operand:SF 2 "register_operand" "f"))))
- (set (match_operand:SF 4 "register_operand" "=&f")
- (mult:SF (match_dup 1) (match_dup 2)))]
- "(! TARGET_SOFT_FLOAT && TARGET_PA_20
- && ! (reg_overlap_mentioned_p (operands[4], operands[1])
- || reg_overlap_mentioned_p (operands[4], operands[2])))"
- "#"
- [(set_attr "type" "fpmuldbl")
- (set_attr "length" "8")])
-
-(define_split
- [(set (match_operand:SF 0 "register_operand" "")
- (minus:SF (match_operand:SF 3 "register_operand" "")
- (mult:SF (match_operand:SF 1 "register_operand" "")
- (match_operand:SF 2 "register_operand" ""))))
- (set (match_operand:SF 4 "register_operand" "")
- (mult:SF (match_dup 1) (match_dup 2)))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
- [(set (match_dup 4) (mult:SF (match_dup 1) (match_dup 2)))
- (set (match_dup 0) (minus:SF (match_dup 3)
- (mult:SF (match_dup 1) (match_dup 2))))]
- "")
-
-(define_insn ""
- [(set (match_operand:DF 0 "register_operand" "=f")
- (neg:DF (abs:DF (match_operand:DF 1 "register_operand" "f"))))
- (set (match_operand:DF 2 "register_operand" "=&f") (abs:DF (match_dup 1)))]
- "(! TARGET_SOFT_FLOAT && TARGET_PA_20
- && ! reg_overlap_mentioned_p (operands[2], operands[1]))"
- "#"
- [(set_attr "type" "fpalu")
- (set_attr "length" "8")])
-
-(define_split
- [(set (match_operand:DF 0 "register_operand" "")
- (neg:DF (abs:DF (match_operand:DF 1 "register_operand" ""))))
- (set (match_operand:DF 2 "register_operand" "") (abs:DF (match_dup 1)))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
- [(set (match_dup 2) (abs:DF (match_dup 1)))
- (set (match_dup 0) (neg:DF (abs:DF (match_dup 1))))]
- "")
-
-(define_insn ""
- [(set (match_operand:SF 0 "register_operand" "=f")
- (neg:SF (abs:SF (match_operand:SF 1 "register_operand" "f"))))
- (set (match_operand:SF 2 "register_operand" "=&f") (abs:SF (match_dup 1)))]
- "(! TARGET_SOFT_FLOAT && TARGET_PA_20
- && ! reg_overlap_mentioned_p (operands[2], operands[1]))"
- "#"
- [(set_attr "type" "fpalu")
- (set_attr "length" "8")])
-
-(define_split
- [(set (match_operand:SF 0 "register_operand" "")
- (neg:SF (abs:SF (match_operand:SF 1 "register_operand" ""))))
- (set (match_operand:SF 2 "register_operand" "") (abs:SF (match_dup 1)))]
- "! TARGET_SOFT_FLOAT && TARGET_PA_20"
- [(set (match_dup 2) (abs:SF (match_dup 1)))
- (set (match_dup 0) (neg:SF (abs:SF (match_dup 1))))]
- "")
\f
;;- Shift instructions
(match_operand:SI 2 "register_operand" "q")))
(match_operand:SI 3 "register_operand" "0")))]
; accept ...0001...1, can this be generalized?
- "exact_log2 (INTVAL (operands[1]) + 1) >= 0"
+ "exact_log2 (INTVAL (operands[1]) + 1) > 0"
"*
{
int x = INTVAL (operands[1]);
(match_operand:DI 2 "register_operand" "q")))
(match_operand:DI 3 "register_operand" "0")))]
; accept ...0001...1, can this be generalized?
- "TARGET_64BIT && exact_log2 (INTVAL (operands[1]) + 1) >= 0"
+ "TARGET_64BIT && exact_log2 (INTVAL (operands[1]) + 1) > 0"
"*
{
int x = INTVAL (operands[1]);
(and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "const_int_operand" ""))
(match_operand:SI 3 "const_int_operand" "")))]
- "exact_log2 (1 + (INTVAL (operands[3]) >> (INTVAL (operands[2]) & 31))) >= 0"
+ "exact_log2 (1 + (INTVAL (operands[3]) >> (INTVAL (operands[2]) & 31))) > 0"
"*
{
int cnt = INTVAL (operands[2]) & 31;
\f
;; Unconditional and other jump instructions.
-;; This can only be used in a leaf function, so we do
-;; not need to use the PIC register when generating PIC code.
+;; Trivial return used when no epilogue is needed.
(define_insn "return"
[(return)
- (use (reg:SI 2))
- (const_int 0)]
- "hppa_can_use_return_insn_p ()"
+ (use (reg:SI 2))]
+ "pa_can_use_return_insn ()"
"*
{
if (TARGET_PA_20)
[(set_attr "type" "branch")
(set_attr "length" "4")])
-;; Emit a different pattern for functions which have non-trivial
-;; epilogues so as not to confuse jump and reorg.
+;; This is used for most returns.
(define_insn "return_internal"
[(return)
- (use (reg:SI 2))
- (const_int 1)]
+ (use (reg:SI 2))]
""
"*
{
(use (reg:SI 2))]
"!TARGET_NO_SPACE_REGS
&& !TARGET_PA_20
- && flag_pic && current_function_calls_eh_return"
+ && flag_pic && crtl->calls_eh_return"
"ldsid (%%sr0,%%r2),%%r1\;mtsp %%r1,%%sr0\;be%* 0(%%sr0,%%r2)"
[(set_attr "type" "branch")
(set_attr "length" "12")])
(define_expand "prologue"
[(const_int 0)]
""
- "hppa_expand_prologue ();DONE;")
+ "pa_expand_prologue ();DONE;")
(define_expand "sibcall_epilogue"
[(return)]
""
"
{
- hppa_expand_epilogue ();
+ pa_expand_epilogue ();
DONE;
}")
""
"
{
- /* Try to use the trivial return first. Else use the full
- epilogue. */
- if (hppa_can_use_return_insn_p ())
- emit_jump_insn (gen_return ());
+ rtx x;
+
+ /* Try to use the trivial return first. Else use the full epilogue. */
+ if (pa_can_use_return_insn ())
+ x = gen_return ();
else
{
- rtx x;
-
- hppa_expand_epilogue ();
+ pa_expand_epilogue ();
/* EH returns bypass the normal return stub. Thus, we must do an
interspace branch to return from functions that call eh_return.
using space registers. */
if (!TARGET_NO_SPACE_REGS
&& !TARGET_PA_20
- && flag_pic && current_function_calls_eh_return)
+ && flag_pic && crtl->calls_eh_return)
x = gen_return_external_pic ();
else
x = gen_return_internal ();
-
- emit_jump_insn (x);
}
+ emit_jump_insn (x);
DONE;
}")
"*
{
/* An unconditional branch which can reach its target. */
- if (get_attr_length (insn) != 24
- && get_attr_length (insn) != 16)
+ if (get_attr_length (insn) < 16)
return \"b%* %l0\";
- return output_lbranch (operands[0], insn);
+ return pa_output_lbranch (operands[0], insn, 1);
}"
[(set_attr "type" "uncond_branch")
(set_attr "pa_combine_type" "uncond_branch")
(set (attr "length")
- (cond [(eq (symbol_ref "jump_in_call_delay (insn)") (const_int 1))
+ (cond [(match_test "pa_jump_in_call_delay (insn)")
(if_then_else (lt (abs (minus (match_dup 0)
(plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8))
- (ge (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
- (const_int 262100))
- (if_then_else (eq (symbol_ref "flag_pic") (const_int 0))
- (const_int 16)
- (const_int 24))]
- (const_int 4)))])
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (const_int 8))
+ (lt (abs (minus (match_dup 0) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 4)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 20)
+ (not (match_test "flag_pic"))
+ (const_int 16)]
+ (const_int 24)))])
;;; Hope this is only within a function...
(define_insn "indirect_jump"
lab = copy_to_reg (lab);
- emit_insn (gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_MEM (BLKmode,
- gen_rtx_SCRATCH (VOIDmode))));
- emit_insn (gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_MEM (BLKmode,
- hard_frame_pointer_rtx)));
+ emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
+ emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
/* Restore the frame pointer. The virtual_stack_vars_rtx is saved
instead of the hard_frame_pointer_rtx in the save area. As a
result, an extra instruction is needed to adjust for the offset
- of the virtual stack variables and the frame pointer. */
+ of the virtual stack variables and the hard frame pointer. */
if (GET_CODE (fp) != REG)
fp = force_reg (Pmode, fp);
- emit_move_insn (virtual_stack_vars_rtx, fp);
+ emit_move_insn (hard_frame_pointer_rtx, plus_constant (fp, -8));
- emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
+ emit_stack_restore (SAVE_NONLOCAL, stack);
- emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
- emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
/* Nonlocal goto jumps are only used between functions in the same
translation unit. Thus, we can avoid the extra overhead of an
{
rtx index = gen_reg_rtx (SImode);
- operands[1] = GEN_INT (-INTVAL (operands[1]));
+ operands[1] = gen_int_mode (-INTVAL (operands[1]), SImode);
if (!INT_14_BITS (operands[1]))
operands[1] = force_reg (SImode, operands[1]);
emit_insn (gen_addsi3 (index, operands[0], operands[1]));
operands[0] = index;
}
- /* In 64bit mode we must make sure to wipe the upper bits of the register
- just in case the addition overflowed or we had random bits in the
- high part of the register. */
- if (TARGET_64BIT)
- {
- rtx index = gen_reg_rtx (DImode);
-
- emit_insn (gen_extendsidi2 (index, operands[0]));
- operands[0] = gen_rtx_SUBREG (SImode, index, 4);
- }
-
if (!INT_5_BITS (operands[2]))
operands[2] = force_reg (SImode, operands[2]);
then be worthwhile to split the casesi patterns to improve scheduling.
However, it's not clear that all this extra complexity is worth
the effort. */
- emit_insn (gen_cmpsi (operands[0], operands[2]));
- emit_jump_insn (gen_bgtu (operands[4]));
+ {
+ rtx test = gen_rtx_GTU (VOIDmode, operands[0], operands[2]);
+ emit_jump_insn (gen_cbranchsi4 (test, operands[0], operands[2], operands[4]));
+ }
+
+ /* In 64bit mode we must make sure to wipe the upper bits of the register
+ just in case the addition overflowed or we had random bits in the
+ high part of the register. */
+ if (TARGET_64BIT)
+ {
+ rtx index = gen_reg_rtx (DImode);
+
+ emit_insn (gen_extendsidi2 (index, operands[0]));
+ operands[0] = index;
+ }
if (TARGET_BIG_SWITCH)
{
if (TARGET_64BIT)
- {
- rtx tmp1 = gen_reg_rtx (DImode);
- rtx tmp2 = gen_reg_rtx (DImode);
-
- emit_jump_insn (gen_casesi64p (operands[0], operands[3],
- tmp1, tmp2));
- }
+ emit_jump_insn (gen_casesi64p (operands[0], operands[3]));
+ else if (flag_pic)
+ emit_jump_insn (gen_casesi32p (operands[0], operands[3]));
else
- {
- rtx tmp1 = gen_reg_rtx (SImode);
-
- if (flag_pic)
- {
- rtx tmp2 = gen_reg_rtx (SImode);
-
- emit_jump_insn (gen_casesi32p (operands[0], operands[3],
- tmp1, tmp2));
- }
- else
- emit_jump_insn (gen_casesi32 (operands[0], operands[3], tmp1));
- }
+ emit_jump_insn (gen_casesi32 (operands[0], operands[3]));
}
else
emit_jump_insn (gen_casesi0 (operands[0], operands[3]));
(mult:SI (match_operand:SI 0 "register_operand" "r")
(const_int 4))
(label_ref (match_operand 1 "" "")))))
- (clobber (match_operand:SI 2 "register_operand" "=&r"))]
- "!TARGET_64BIT && TARGET_BIG_SWITCH"
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "!flag_pic"
"ldil L'%l1,%2\;ldo R'%l1(%2),%2\;{ldwx|ldw},s %0(%2),%2\;bv,n %%r0(%2)"
[(set_attr "type" "multi")
(set_attr "length" "16")])
(mult:SI (match_operand:SI 0 "register_operand" "r")
(const_int 4))
(label_ref (match_operand 1 "" "")))))
- (clobber (match_operand:SI 2 "register_operand" "=&a"))
- (clobber (match_operand:SI 3 "register_operand" "=&r"))]
- "!TARGET_64BIT && TARGET_BIG_SWITCH"
- "{bl .+8,%2\;depi 0,31,2,%2|mfia %2}\;ldo {16|20}(%2),%2\;\
+ (clobber (match_scratch:SI 2 "=&r"))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "flag_pic"
+ "{bl .+8,%2\;depi 0,31,2,%2|mfia %2}\;ldo {%l1-.|%l1+4-.}(%2),%2\;\
{ldwx|ldw},s %0(%2),%3\;{addl|add,l} %2,%3,%3\;bv,n %%r0(%3)"
[(set_attr "type" "multi")
(set (attr "length")
- (if_then_else (ne (symbol_ref "TARGET_PA_20") (const_int 0))
+ (if_then_else (match_test "TARGET_PA_20")
(const_int 20)
(const_int 24)))])
;;; 64-bit code, 32-bit relative branch table.
(define_insn "casesi64p"
[(set (pc) (mem:DI (plus:DI
- (mult:DI (sign_extend:DI
- (match_operand:SI 0 "register_operand" "r"))
+ (mult:DI (match_operand:DI 0 "register_operand" "r")
(const_int 8))
(label_ref (match_operand 1 "" "")))))
- (clobber (match_operand:DI 2 "register_operand" "=&r"))
- (clobber (match_operand:DI 3 "register_operand" "=&r"))]
- "TARGET_64BIT && TARGET_BIG_SWITCH"
- "mfia %2\;ldo 24(%2),%2\;ldw,s %0(%2),%3\;extrd,s %3,63,32,%3\;\
+ (clobber (match_scratch:DI 2 "=&r"))
+ (clobber (match_scratch:DI 3 "=&r"))]
+ ""
+ "mfia %2\;ldo %l1+4-.(%2),%2\;ldw,s %0(%2),%3\;extrd,s %3,63,32,%3\;\
add,l %2,%3,%3\;bv,n %%r0(%3)"
[(set_attr "type" "multi")
(set_attr "length" "24")])
""
"
{
- rtx op, call_insn;
+ rtx op;
rtx nb = operands[1];
if (TARGET_PORTABLE_RUNTIME)
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- gcc_assert (INTVAL (nb) <= current_function_outgoing_args_size);
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
need to have a use of the PIC register in the return pattern and
the final save/restore operation is not needed.
- I elected to just clobber %r4 in the PIC patterns and use it instead
+ I elected to just use register %r4 in the PIC patterns instead
of trying to force hppa_pic_save_rtx () to a callee saved register.
This might have required a new register class and constraint. It
was also simpler to just handle the restore from a register than a
generic pseudo. */
if (TARGET_64BIT)
{
+ rtx r4 = gen_rtx_REG (word_mode, 4);
if (GET_CODE (op) == SYMBOL_REF)
- call_insn = emit_call_insn (gen_call_symref_64bit (op, nb));
+ emit_call_insn (gen_call_symref_64bit (op, nb, r4));
else
{
op = force_reg (word_mode, op);
- call_insn = emit_call_insn (gen_call_reg_64bit (op, nb));
+ emit_call_insn (gen_call_reg_64bit (op, nb, r4));
}
}
else
if (GET_CODE (op) == SYMBOL_REF)
{
if (flag_pic)
- call_insn = emit_call_insn (gen_call_symref_pic (op, nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_symref_pic (op, nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_symref (op, nb));
+ emit_call_insn (gen_call_symref (op, nb));
}
else
{
rtx tmpreg = gen_rtx_REG (word_mode, 22);
-
emit_move_insn (tmpreg, force_reg (word_mode, op));
if (flag_pic)
- call_insn = emit_call_insn (gen_call_reg_pic (nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_reg_pic (nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_reg (nb));
+ emit_call_insn (gen_call_reg (nb));
}
}
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 0);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[0], 0);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 0)"))])
(define_insn "call_symref_pic"
- [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(set (match_operand:SI 2 "register_operand" "=&r") (reg:SI 19))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" "i"))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 0))]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 0))])]
- "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:SI 19))
(parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+(define_split
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 0))])]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (match_dup 0))
+ [(set (match_dup 2) (reg:SI 19))
+ (parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:SI 19) (match_dup 2))]
"")
(define_insn "*call_symref_pic_post_reload"
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 0);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[0], 0);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 0)"))])
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_symref_64bit"
- [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(set (match_operand:DI 2 "register_operand" "=&r") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" "i"))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
- (use (reg:DI 27))
- (use (reg:DI 29))
- (use (const_int 0))]
- "TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ (use (match_dup 2))
+ (use (reg:DI 27))
+ (use (reg:DI 29))
+ (use (const_int 0))]
+ "TARGET_64BIT"
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:DI 27))
(parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (match_operand 0 "call_operand_address" ""))
+(define_split
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "call_operand_address" ""))
(match_operand 1 "" ""))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (match_dup 0))
+ [(set (match_dup 2) (reg:DI 27))
+ (parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:DI 27) (match_dup 2))]
"")
(define_insn "*call_symref_64bit_post_reload"
"TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 0);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[0], 0);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 0)"))])
(define_insn "call_reg"
[(call (mem:SI (reg:SI 22))
"!TARGET_64BIT"
"*
{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
+ return pa_output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
}"
[(set_attr "type" "dyncall")
- (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_indirect_call (insn)"))])
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_reg_pic"
- [(call (mem:SI (reg:SI 22))
+ [(set (match_operand:SI 1 "register_operand" "=&r") (reg:SI 19))
+ (call (mem:SI (reg:SI 22))
(match_operand 0 "" "i"))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 1))
(use (reg:SI 19))
(use (const_int 1))]
"!TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (reg:SI 22))
+ [(parallel [(set (match_operand:SI 1 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (reg:SI 22))
(match_operand 0 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 1))
(use (reg:SI 19))
(use (const_int 1))])]
- "!TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 1) (reg:SI 19))
(parallel [(call (mem:SI (reg:SI 22))
(match_dup 0))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (reg:SI 22))
+(define_split
+ [(parallel [(set (match_operand:SI 1 "register_operand" "") (reg:SI 19))
+ (call (mem:SI (reg:SI 22))
(match_operand 0 "" ""))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 1))
(use (reg:SI 19))
(use (const_int 1))])]
"!TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (reg:SI 22))
+ [(set (match_dup 1) (reg:SI 19))
+ (parallel [(call (mem:SI (reg:SI 22))
(match_dup 0))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:SI 19) (match_dup 1))]
"")
(define_insn "*call_reg_pic_post_reload"
"!TARGET_64BIT"
"*
{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
+ return pa_output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
}"
[(set_attr "type" "dyncall")
- (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_indirect_call (insn)"))])
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_reg_64bit"
- [(call (mem:SI (match_operand:DI 0 "register_operand" "r"))
+ [(set (match_operand:DI 2 "register_operand" "=&r") (reg:DI 27))
+ (call (mem:SI (match_operand:DI 0 "register_operand" "r"))
(match_operand 1 "" "i"))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))]
"TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, operands[0]);
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(call (mem:SI (match_operand 0 "register_operand" ""))
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "register_operand" ""))
(match_operand 1 "" ""))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:DI 27))
(parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(call (mem:SI (match_operand 0 "register_operand" ""))
+(define_split
+ [(parallel [(set (match_operand:DI 2 "register_operand" "") (reg:DI 27))
+ (call (mem:SI (match_operand 0 "register_operand" ""))
(match_operand 1 "" ""))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 2))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(call (mem:SI (match_dup 0))
+ [(set (match_dup 2) (reg:DI 27))
+ (parallel [(call (mem:SI (match_dup 0))
(match_dup 1))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:DI 27) (match_dup 2))]
"")
(define_insn "*call_reg_64bit_post_reload"
[(call (mem:SI (match_operand:DI 0 "register_operand" "r"))
(match_operand 1 "" "i"))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
"TARGET_64BIT"
"*
{
- return output_indirect_call (insn, operands[0]);
+ return pa_output_indirect_call (insn, operands[0]);
}"
[(set_attr "type" "dyncall")
- (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_indirect_call (insn)"))])
(define_expand "call_value"
[(parallel [(set (match_operand 0 "" "")
""
"
{
- rtx op, call_insn;
+ rtx op;
rtx dst = operands[0];
rtx nb = operands[2];
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- gcc_assert (INTVAL (nb) <= current_function_outgoing_args_size);
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
need to have a use of the PIC register in the return pattern and
the final save/restore operation is not needed.
- I elected to just clobber %r4 in the PIC patterns and use it instead
+ I elected to just use register %r4 in the PIC patterns instead
of trying to force hppa_pic_save_rtx () to a callee saved register.
This might have required a new register class and constraint. It
was also simpler to just handle the restore from a register than a
generic pseudo. */
if (TARGET_64BIT)
{
+ rtx r4 = gen_rtx_REG (word_mode, 4);
if (GET_CODE (op) == SYMBOL_REF)
- call_insn = emit_call_insn (gen_call_val_symref_64bit (dst, op, nb));
+ emit_call_insn (gen_call_val_symref_64bit (dst, op, nb, r4));
else
{
op = force_reg (word_mode, op);
- call_insn = emit_call_insn (gen_call_val_reg_64bit (dst, op, nb));
+ emit_call_insn (gen_call_val_reg_64bit (dst, op, nb, r4));
}
}
else
if (GET_CODE (op) == SYMBOL_REF)
{
if (flag_pic)
- call_insn = emit_call_insn (gen_call_val_symref_pic (dst, op, nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_val_symref_pic (dst, op, nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_val_symref (dst, op, nb));
+ emit_call_insn (gen_call_val_symref (dst, op, nb));
}
else
{
rtx tmpreg = gen_rtx_REG (word_mode, 22);
-
emit_move_insn (tmpreg, force_reg (word_mode, op));
if (flag_pic)
- call_insn = emit_call_insn (gen_call_val_reg_pic (dst, nb));
+ {
+ rtx r4 = gen_rtx_REG (word_mode, 4);
+ emit_call_insn (gen_call_val_reg_pic (dst, nb, r4));
+ }
else
- call_insn = emit_call_insn (gen_call_val_reg (dst, nb));
+ emit_call_insn (gen_call_val_reg (dst, nb));
}
}
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 0);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[1], 0);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 0)"))])
(define_insn "call_val_symref_pic"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:SI 3 "register_operand" "=&r") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "i")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 3))
(use (reg:SI 19))
(use (const_int 0))]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:SI 3 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 3))
(use (reg:SI 19))
(use (const_int 0))])]
- "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 3) (reg:SI 19))
(parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:SI 3 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 3))
(use (reg:SI 19))
(use (const_int 0))])]
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 3) (reg:SI 19))
+ (parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:SI 19) (match_dup 3))]
"")
(define_insn "*call_val_symref_pic_post_reload"
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 0);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[1], 0);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 0)"))])
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_val_symref_64bit"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:DI 3 "register_operand" "=&r") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "i")))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))]
"TARGET_64BIT"
- "*
-{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 0);
-}"
- [(set_attr "type" "call")
- (set (attr "length")
- (plus (symbol_ref "attr_length_call (insn, 0)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 3) (reg:DI 27))
(parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 0))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand 1 "call_operand_address" ""))
(match_operand 2 "" "")))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 0))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 3) (reg:DI 27))
+ (parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
(clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 0))])]
+ (use (const_int 0))])
+ (set (reg:DI 27) (match_dup 3))]
"")
(define_insn "*call_val_symref_64bit_post_reload"
"TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 0);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[1], 0);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 0)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 0)"))])
(define_insn "call_val_reg"
[(set (match_operand 0 "" "")
"!TARGET_64BIT"
"*
{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
+ return pa_output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
}"
[(set_attr "type" "dyncall")
- (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_indirect_call (insn)"))])
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_val_reg_pic"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:SI 2 "register_operand" "=&r") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (reg:SI 22))
(match_operand 1 "" "i")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 1))]
"!TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (reg:SI 22))
(match_operand 1 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 1))])]
- "!TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:SI 4) (reg:SI 19))
+ "!TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 2) (reg:SI 19))
(parallel [(set (match_dup 0)
(call (mem:SI (reg:SI 22))
(match_dup 1)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])
- (set (reg:SI 19) (reg:SI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:SI 2 "register_operand" "") (reg:SI 19))
+ (set (match_operand 0 "" "")
(call (mem:SI (reg:SI 22))
(match_operand 1 "" "")))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
- (clobber (reg:SI 4))
+ (use (match_dup 2))
(use (reg:SI 19))
(use (const_int 1))])]
"!TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 2) (reg:SI 19))
+ (parallel [(set (match_dup 0)
(call (mem:SI (reg:SI 22))
(match_dup 1)))
(clobber (reg:SI 1))
(clobber (reg:SI 2))
(use (reg:SI 19))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:SI 19) (match_dup 2))]
"")
(define_insn "*call_val_reg_pic_post_reload"
"!TARGET_64BIT"
"*
{
- return output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
+ return pa_output_indirect_call (insn, gen_rtx_REG (word_mode, 22));
}"
[(set_attr "type" "dyncall")
- (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_indirect_call (insn)"))])
;; This pattern is split if it is necessary to save and restore the
;; PIC register.
(define_insn "call_val_reg_64bit"
- [(set (match_operand 0 "" "")
+ [(set (match_operand:DI 3 "register_operand" "=&r") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" "r"))
(match_operand 2 "" "i")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))]
"TARGET_64BIT"
- "*
-{
- return output_indirect_call (insn, operands[1]);
-}"
- [(set_attr "type" "dyncall")
- (set (attr "length")
- (plus (symbol_ref "attr_length_indirect_call (insn)")
- (symbol_ref "attr_length_save_restore_dltp (insn)")))])
-
-;; Split out the PIC register save and restore after reload. This is
-;; done only if the function returns. As the split is done after reload,
-;; there are some situations in which we unnecessarily save and restore
-;; %r4. This happens when there is a single call and the PIC register
-;; is "dead" after the call. This isn't easy to fix as the usage of
-;; the PIC register isn't completely determined until the reload pass.
+ "#")
+
+;; Split out the PIC register save and restore after reload. As the
+;; split is done after reload, there are some situations in which we
+;; unnecessarily save and restore %r4. This happens when there is a
+;; single call and the PIC register is not used after the call.
+;;
+;; The split has to be done since call_from_call_insn () can't handle
+;; the pattern as is. Noreturn calls are special because they have to
+;; terminate the basic block. The split has to contain more than one
+;; insn.
(define_split
- [(parallel [(set (match_operand 0 "" "")
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" ""))
(match_operand 2 "" "")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
- "TARGET_64BIT
- && reload_completed
- && !find_reg_note (insn, REG_NORETURN, NULL_RTX)"
- [(set (reg:DI 4) (reg:DI 27))
+ "TARGET_64BIT && reload_completed
+ && find_reg_note (insn, REG_NORETURN, NULL_RTX)"
+ [(set (match_dup 3) (reg:DI 27))
(parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])
- (set (reg:DI 27) (reg:DI 4))]
+ (use (const_int 1))])]
"")
-;; Remove the clobber of register 4 when optimizing. This has to be
-;; done with a peephole optimization rather than a split because the
-;; split sequence for a call must be longer than one instruction.
-(define_peephole2
- [(parallel [(set (match_operand 0 "" "")
+(define_split
+ [(parallel [(set (match_operand:DI 3 "register_operand" "") (reg:DI 27))
+ (set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" ""))
(match_operand 2 "" "")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
- (clobber (reg:DI 4))
+ (use (match_dup 3))
(use (reg:DI 27))
(use (reg:DI 29))
(use (const_int 1))])]
"TARGET_64BIT && reload_completed"
- [(parallel [(set (match_dup 0)
+ [(set (match_dup 3) (reg:DI 27))
+ (parallel [(set (match_dup 0)
(call (mem:SI (match_dup 1))
(match_dup 2)))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
- (use (const_int 1))])]
+ (use (const_int 1))])
+ (set (reg:DI 27) (match_dup 3))]
"")
(define_insn "*call_val_reg_64bit_post_reload"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "register_operand" "r"))
(match_operand 2 "" "i")))
+ (clobber (reg:DI 1))
(clobber (reg:DI 2))
(use (reg:DI 27))
(use (reg:DI 29))
"TARGET_64BIT"
"*
{
- return output_indirect_call (insn, operands[1]);
+ return pa_output_indirect_call (insn, operands[1]);
}"
[(set_attr "type" "dyncall")
- (set (attr "length") (symbol_ref "attr_length_indirect_call (insn)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_indirect_call (insn)"))])
;; Call subroutine returning any type.
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- gcc_assert (INTVAL (nb) <= current_function_outgoing_args_size);
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 1);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[0], 1);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 1)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 1)"))])
(define_insn "sibcall_internal_symref_64bit"
[(call (mem:SI (match_operand 0 "call_operand_address" ""))
"TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[0], 1);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[0], 1);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 1)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 1)"))])
(define_expand "sibcall_value"
[(set (match_operand 0 "" "")
the only method that we have for doing DImode multiplication
is with a libcall. This could be trouble if we haven't
allocated enough space for the outgoing arguments. */
- gcc_assert (INTVAL (nb) <= current_function_outgoing_args_size);
+ gcc_assert (INTVAL (nb) <= crtl->outgoing_args_size);
emit_move_insn (arg_pointer_rtx,
gen_rtx_PLUS (word_mode, stack_pointer_rtx,
"!TARGET_PORTABLE_RUNTIME && !TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 1);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[1], 1);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 1)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 1)"))])
(define_insn "sibcall_value_internal_symref_64bit"
[(set (match_operand 0 "" "")
"TARGET_64BIT"
"*
{
- output_arg_descriptor (insn);
- return output_call (insn, operands[1], 1);
+ pa_output_arg_descriptor (insn);
+ return pa_output_call (insn, operands[1], 1);
}"
[(set_attr "type" "call")
- (set (attr "length") (symbol_ref "attr_length_call (insn, 1)"))])
+ (set (attr "length") (symbol_ref "pa_attr_length_call (insn, 1)"))])
(define_insn "nop"
[(const_int 0)]
(POINTER_SIZE * 2) / BITS_PER_UNIT));
rtx pv = gen_rtx_REG (Pmode, 1);
- emit_insn (gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_MEM (BLKmode,
- gen_rtx_SCRATCH (VOIDmode))));
- emit_insn (gen_rtx_CLOBBER (VOIDmode,
- gen_rtx_MEM (BLKmode,
- hard_frame_pointer_rtx)));
+ emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
+ emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
/* Restore the frame pointer. The virtual_stack_vars_rtx is saved
instead of the hard_frame_pointer_rtx in the save area. We need
- to adjust for the offset between these two values when we have
- a nonlocal_goto pattern. When we don't have a nonlocal_goto
- pattern, the receiver performs the adjustment. */
-#ifdef HAVE_nonlocal_goto
- if (HAVE_nonlocal_goto)
- emit_move_insn (virtual_stack_vars_rtx, force_reg (Pmode, fp));
- else
-#endif
- emit_move_insn (hard_frame_pointer_rtx, fp);
+ to adjust for the offset between these two values. */
+ if (GET_CODE (fp) != REG)
+ fp = force_reg (Pmode, fp);
+ emit_move_insn (hard_frame_pointer_rtx, plus_constant (fp, -8));
/* This bit is the same as expand_builtin_longjmp. */
- emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
- emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
- emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+ emit_stack_restore (SAVE_NONLOCAL, stack);
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
/* Load the label we are jumping through into r1 so that we know
where to look for it when we get back to setjmp's function for
(plus:SI (match_dup 0) (match_dup 1)))
(clobber (match_scratch:SI 4 "=X,r,r"))]
""
- "* return output_dbra (operands, insn, which_alternative); "
+ "* return pa_output_dbra (operands, insn, which_alternative); "
;; Do not expect to understand this the first time through.
[(set_attr "type" "cbranch,multi,multi")
(set (attr "length")
(if_then_else (eq_attr "alternative" "0")
;; Loop counter in register case
;; Short branch has length of 4
-;; Long branch has length of 8
- (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8))
+;; Long branch has length of 8, 20, 24 or 28
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28))
;; Loop counter in FP reg case.
;; Extra goo to deal with additional reload insns.
(if_then_else (eq_attr "alternative" "1")
(if_then_else (lt (match_dup 3) (pc))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 24))))
- (const_int 8184))
- (const_int 24)
- (const_int 28))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 24)
- (const_int 28)))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 24))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 24)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 24))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 28)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 44)
+ (not (match_test "flag_pic"))
+ (const_int 40)]
+ (const_int 48))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 24)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 28)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 44)
+ (not (match_test "flag_pic"))
+ (const_int 40)]
+ (const_int 48)))
+
;; Loop counter in memory case.
;; Extra goo to deal with additional reload insns.
(if_then_else (lt (match_dup 3) (pc))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
- (const_int 8184))
- (const_int 12)
- (const_int 16))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 12)
- (const_int 16))))))])
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 32)
+ (not (match_test "flag_pic"))
+ (const_int 28)]
+ (const_int 36))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 32)
+ (not (match_test "flag_pic"))
+ (const_int 28)]
+ (const_int 36))))))])
(define_insn ""
[(set (pc)
(set (match_operand:SI 0 "reg_before_reload_operand" "=!r,!*f,*m,!*q")
(match_dup 1))]
""
-"* return output_movb (operands, insn, which_alternative, 0); "
+"* return pa_output_movb (operands, insn, which_alternative, 0); "
;; Do not expect to understand this the first time through.
[(set_attr "type" "cbranch,multi,multi,multi")
(set (attr "length")
(if_then_else (eq_attr "alternative" "0")
;; Loop counter in register case
;; Short branch has length of 4
-;; Long branch has length of 8
- (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8))
+;; Long branch has length of 8, 20, 24 or 28
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28))
;; Loop counter in FP reg case.
;; Extra goo to deal with additional reload insns.
(if_then_else (eq_attr "alternative" "1")
(if_then_else (lt (match_dup 3) (pc))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
- (const_int 8184))
- (const_int 12)
- (const_int 16))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 12)
- (const_int 16)))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 32)
+ (not (match_test "flag_pic"))
+ (const_int 28)]
+ (const_int 36))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 32)
+ (not (match_test "flag_pic"))
+ (const_int 28)]
+ (const_int 36)))
+
;; Loop counter in memory or sar case.
;; Extra goo to deal with additional reload insns.
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 8)
- (const_int 12)))))])
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 8)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 12)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 28)
+ (not (match_test "flag_pic"))
+ (const_int 24)]
+ (const_int 32)))))])
;; Handle negated branch.
(define_insn ""
(set (match_operand:SI 0 "reg_before_reload_operand" "=!r,!*f,*m,!*q")
(match_dup 1))]
""
-"* return output_movb (operands, insn, which_alternative, 1); "
+"* return pa_output_movb (operands, insn, which_alternative, 1); "
;; Do not expect to understand this the first time through.
[(set_attr "type" "cbranch,multi,multi,multi")
(set (attr "length")
;; Loop counter in register case
;; Short branch has length of 4
;; Long branch has length of 8
- (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28))
;; Loop counter in FP reg case.
;; Extra goo to deal with additional reload insns.
(if_then_else (eq_attr "alternative" "1")
(if_then_else (lt (match_dup 3) (pc))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
- (const_int 8184))
- (const_int 12)
- (const_int 16))
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 12)
- (const_int 16)))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 12))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 32)
+ (not (match_test "flag_pic"))
+ (const_int 28)]
+ (const_int 36))
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 12)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 16)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 32)
+ (not (match_test "flag_pic"))
+ (const_int 28)]
+ (const_int 36)))
+
;; Loop counter in memory or SAR case.
;; Extra goo to deal with additional reload insns.
- (if_then_else
- (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 8)
- (const_int 12)))))])
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 8)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 12)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 28)
+ (not (match_test "flag_pic"))
+ (const_int 24)]
+ (const_int 32)))))])
(define_insn ""
[(set (pc) (label_ref (match_operand 3 "" "" )))
"(reload_completed && operands[0] == operands[1]) || operands[0] == operands[2]"
"*
{
- return output_parallel_addb (operands, get_attr_length (insn));
+ return pa_output_parallel_addb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 3) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc) (label_ref (match_operand 2 "" "" )))
"reload_completed"
"*
{
- return output_parallel_movb (operands, get_attr_length (insn));
+ return pa_output_parallel_movb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc) (label_ref (match_operand 2 "" "" )))
"reload_completed"
"*
{
- return output_parallel_movb (operands, get_attr_length (insn));
+ return pa_output_parallel_movb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc) (label_ref (match_operand 2 "" "" )))
"reload_completed"
"*
{
- return output_parallel_movb (operands, get_attr_length (insn));
+ return pa_output_parallel_movb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (pc) (label_ref (match_operand 2 "" "" )))
"reload_completed"
"*
{
- return output_parallel_movb (operands, get_attr_length (insn));
+ return pa_output_parallel_movb (operands, insn);
}"
- [(set_attr "type" "parallel_branch")
- (set (attr "length")
- (if_then_else (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
- (const_int 8184))
- (const_int 4)
- (const_int 8)))])
+[(set_attr "type" "parallel_branch")
+ (set (attr "length")
+ (cond [(lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_12BIT_OFFSET))
+ (const_int 4)
+ (lt (abs (minus (match_dup 2) (plus (pc) (const_int 8))))
+ (const_int MAX_17BIT_OFFSET))
+ (const_int 8)
+ (match_test "TARGET_PORTABLE_RUNTIME")
+ (const_int 24)
+ (not (match_test "flag_pic"))
+ (const_int 20)]
+ (const_int 28)))])
(define_insn ""
[(set (match_operand 0 "register_operand" "=f")
(plus (match_operand 4 "register_operand" "f")
(match_operand 5 "register_operand" "f")))]
"TARGET_PA_11 && ! TARGET_SOFT_FLOAT
- && reload_completed && fmpyaddoperands (operands)"
+ && reload_completed && pa_fmpyaddoperands (operands)"
"*
{
if (GET_MODE (operands[0]) == DFmode)
(mult (match_operand 1 "register_operand" "f")
(match_operand 2 "register_operand" "f")))]
"TARGET_PA_11 && ! TARGET_SOFT_FLOAT
- && reload_completed && fmpyaddoperands (operands)"
+ && reload_completed && pa_fmpyaddoperands (operands)"
"*
{
if (GET_MODE (operands[0]) == DFmode)
(minus (match_operand 4 "register_operand" "f")
(match_operand 5 "register_operand" "f")))]
"TARGET_PA_11 && ! TARGET_SOFT_FLOAT
- && reload_completed && fmpysuboperands (operands)"
+ && reload_completed && pa_fmpysuboperands (operands)"
"*
{
if (GET_MODE (operands[0]) == DFmode)
(mult (match_operand 1 "register_operand" "f")
(match_operand 2 "register_operand" "f")))]
"TARGET_PA_11 && ! TARGET_SOFT_FLOAT
- && reload_completed && fmpysuboperands (operands)"
+ && reload_completed && pa_fmpysuboperands (operands)"
"*
{
if (GET_MODE (operands[0]) == DFmode)
[(set_attr "type" "fpalu")
(set_attr "length" "4")])
-;; Flush the I and D cache lines from the start address (operand0)
-;; to the end address (operand1). No lines are flushed if the end
-;; address is less than the start address (unsigned).
+;; The following two patterns are used by the trampoline code for nested
+;; functions. They flush the I and D cache lines from the start address
+;; (operand0) to the end address (operand1). No lines are flushed if the
+;; end address is less than the start address (unsigned).
;;
-;; Because the range of memory flushed is variable and the size of
-;; a MEM can only be a CONST_INT, the patterns specify that they
-;; perform an unspecified volatile operation on all memory.
+;; Because the range of memory flushed is variable and the size of a MEM
+;; can only be a CONST_INT, the patterns specify that they perform an
+;; unspecified volatile operation on all memory.
;;
;; The address range for an icache flush must lie within a single
;; space on targets with non-equivalent space registers.
;;
-;; This is used by the trampoline code for nested functions.
-;;
;; Operand 0 contains the start address.
;; Operand 1 contains the end address.
;; Operand 2 contains the line length to use.
-;; Operands 3 and 4 (icacheflush) are clobbered scratch registers.
-(define_insn "dcacheflush"
+(define_insn "dcacheflush<P:mode>"
[(const_int 1)
(unspec_volatile [(mem:BLK (scratch))] UNSPECV_DCACHE)
(use (match_operand 0 "pmode_register_operand" "r"))
(use (match_operand 1 "pmode_register_operand" "r"))
(use (match_operand 2 "pmode_register_operand" "r"))
- (clobber (match_scratch 3 "=&0"))]
+ (clobber (match_scratch:P 3 "=&0"))]
""
- "*
-{
- if (TARGET_64BIT)
- return \"cmpb,*<<=,n %3,%1,.\;fdc,m %2(%3)\;sync\";
- else
- return \"cmpb,<<=,n %3,%1,.\;fdc,m %2(%3)\;sync\";
-}"
+ "cmpb,<dwc><<=,n %3,%1,.\;fdc,m %2(%3)\;sync"
[(set_attr "type" "multi")
(set_attr "length" "12")])
-(define_insn "icacheflush"
+(define_insn "icacheflush<P:mode>"
[(const_int 2)
(unspec_volatile [(mem:BLK (scratch))] UNSPECV_ICACHE)
(use (match_operand 0 "pmode_register_operand" "r"))
(use (match_operand 2 "pmode_register_operand" "r"))
(clobber (match_operand 3 "pmode_register_operand" "=&r"))
(clobber (match_operand 4 "pmode_register_operand" "=&r"))
- (clobber (match_scratch 5 "=&0"))]
+ (clobber (match_scratch:P 5 "=&0"))]
""
- "*
-{
- if (TARGET_64BIT)
- return \"mfsp %%sr0,%4\;ldsid (%5),%3\;mtsp %3,%%sr0\;cmpb,*<<=,n %5,%1,.\;fic,m %2(%%sr0,%5)\;sync\;mtsp %4,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop\";
- else
- return \"mfsp %%sr0,%4\;ldsid (%5),%3\;mtsp %3,%%sr0\;cmpb,<<=,n %5,%1,.\;fic,m %2(%%sr0,%5)\;sync\;mtsp %4,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop\";
-}"
+ "mfsp %%sr0,%4\;ldsid (%5),%3\;mtsp %3,%%sr0\;cmpb,<dwc><<=,n %5,%1,.\;fic,m %2(%%sr0,%5)\;sync\;mtsp %4,%%sr0\;nop\;nop\;nop\;nop\;nop\;nop"
[(set_attr "type" "multi")
(set_attr "length" "52")])
""
"*
{
- extern int frame_pointer_needed;
-
+
/* We need two different versions depending on whether or not we
need a frame pointer. Also note that we return to the instruction
immediately after the branch rather than two instructions after the
""
"*
{
- extern int frame_pointer_needed;
/* We need two different versions depending on whether or not we
need a frame pointer. Also note that we return to the instruction
output_asm_insn (\"{comb|cmpb},<<,n %%r26,%%r31,.+%1\", xoperands);
/* Finally, call $$sh_func_adrs to extract the function's real add24. */
- return output_millicode_call (insn,
- gen_rtx_SYMBOL_REF (SImode,
- \"$$sh_func_adrs\"));
+ return pa_output_millicode_call (insn,
+ gen_rtx_SYMBOL_REF (SImode,
+ \"$$sh_func_adrs\"));
}"
[(set_attr "type" "multi")
(set (attr "length")
- (plus (symbol_ref "attr_length_millicode_call (insn)")
+ (plus (symbol_ref "pa_attr_length_millicode_call (insn)")
(const_int 20)))])
;; On the PA, the PIC register is call clobbered, so it must
{
addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
GEN_INT (TARGET_64BIT ? -8 : -4));
- emit_move_insn (gen_rtx_MEM (word_mode, addr), frame_pointer_rtx);
+ emit_move_insn (gen_rtx_MEM (word_mode, addr), hard_frame_pointer_rtx);
}
if (!TARGET_64BIT && flag_pic)
{
(match_operand 2 "const_int_operand" "")]
"TARGET_PA_20"
{
- int locality = INTVAL (operands[2]);
-
- gcc_assert (locality >= 0 && locality <= 3);
-
- /* Change operand[0] to a MEM as we don't have the infrastructure
- to output all the supported address modes for ldw/ldd when we use
- the address directly. However, we do have it for MEMs. */
- operands[0] = gen_rtx_MEM (QImode, operands[0]);
-
- /* If the address isn't valid for the prefetch, replace it. */
- if (locality)
- {
- if (!prefetch_nocc_operand (operands[0], QImode))
- operands[0]
- = replace_equiv_address (operands[0],
- copy_to_mode_reg (Pmode,
- XEXP (operands[0], 0)));
- emit_insn (gen_prefetch_nocc (operands[0], operands[1], operands[2]));
- }
- else
- {
- if (!prefetch_cc_operand (operands[0], QImode))
- operands[0]
- = replace_equiv_address (operands[0],
- copy_to_mode_reg (Pmode,
- XEXP (operands[0], 0)));
- emit_insn (gen_prefetch_cc (operands[0], operands[1], operands[2]));
- }
+ operands[0] = copy_addr_to_reg (operands[0]);
+ emit_insn (gen_prefetch_20 (operands[0], operands[1], operands[2]));
DONE;
})
-(define_insn "prefetch_cc"
- [(prefetch (match_operand:QI 0 "prefetch_cc_operand" "RW")
+(define_insn "prefetch_20"
+ [(prefetch (match_operand 0 "pmode_register_operand" "r")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n"))]
- "TARGET_PA_20 && operands[2] == const0_rtx"
+ "TARGET_PA_20"
{
- /* The SL cache-control completor indicates good spatial locality but
+ /* The SL cache-control completer indicates good spatial locality but
poor temporal locality. The ldw instruction with a target of general
register 0 prefetches a cache line for a read. The ldd instruction
prefetches a cache line for a write. */
- static const char * const instr[2] = {
- "ldw%M0,sl %0,%%r0",
- "ldd%M0,sl %0,%%r0"
- };
- int read_or_write = INTVAL (operands[1]);
-
- gcc_assert (read_or_write >= 0 && read_or_write <= 1);
-
- return instr [read_or_write];
-}
- [(set_attr "type" "load")
- (set_attr "length" "4")])
-
-(define_insn "prefetch_nocc"
- [(prefetch (match_operand:QI 0 "prefetch_nocc_operand" "A,RQ")
- (match_operand:SI 1 "const_int_operand" "n,n")
- (match_operand:SI 2 "const_int_operand" "n,n"))]
- "TARGET_PA_20 && operands[2] != const0_rtx"
-{
- /* The ldw instruction with a target of general register 0 prefetches
- a cache line for a read. The ldd instruction prefetches a cache line
- for a write. */
static const char * const instr[2][2] = {
{
- "ldw RT'%A0,%%r0",
- "ldd RT'%A0,%%r0",
+ "ldw,sl 0(%0),%%r0",
+ "ldd,sl 0(%0),%%r0"
},
{
- "ldw%M0 %0,%%r0",
- "ldd%M0 %0,%%r0",
+ "ldw 0(%0),%%r0",
+ "ldd 0(%0),%%r0"
}
};
- int read_or_write = INTVAL (operands[1]);
-
- gcc_assert (which_alternative == 0 || which_alternative == 1);
- gcc_assert (read_or_write >= 0 && read_or_write <= 1);
+ int read_or_write = INTVAL (operands[1]) == 0 ? 0 : 1;
+ int locality = INTVAL (operands[2]) == 0 ? 0 : 1;
- return instr [which_alternative][read_or_write];
+ return instr [locality][read_or_write];
}
[(set_attr "type" "load")
(set_attr "length" "4")])
-
;; TLS Support
(define_insn "tgd_load"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand 1 "tgd_symbolic_operand" "")] UNSPEC_TLSGD))
- (clobber (reg:SI 1))]
+ (clobber (reg:SI 1))
+ (use (reg:SI 27))]
""
"*
{
- if (flag_pic)
- return \"addil LT'%1-$tls_gdidx$,%%r19\;ldo RT'%1-$tls_gdidx$(%%r1),%0\";
- else
- return \"addil LR'%1-$tls_gdidx$,%%r27\;ldo RR'%1-$tls_gdidx$(%%r1),%0\";
+ return \"addil LR'%1-$tls_gdidx$,%%r27\;ldo RR'%1-$tls_gdidx$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tgd_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tgd_symbolic_operand" "")] UNSPEC_TLSGD_PIC))
+ (clobber (reg:SI 1))
+ (use (reg:SI 19))]
+ ""
+ "*
+{
+ return \"addil LT'%1-$tls_gdidx$,%%r19\;ldo RT'%1-$tls_gdidx$(%%r1),%0\";
}"
[(set_attr "type" "multi")
(set_attr "length" "8")])
(define_insn "tld_load"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand 1 "tld_symbolic_operand" "")] UNSPEC_TLSLDM))
- (clobber (reg:SI 1))]
+ (clobber (reg:SI 1))
+ (use (reg:SI 27))]
""
"*
{
- if (flag_pic)
- return \"addil LT'%1-$tls_ldidx$,%%r19\;ldo RT'%1-$tls_ldidx$(%%r1),%0\";
- else
- return \"addil LR'%1-$tls_ldidx$,%%r27\;ldo RR'%1-$tls_ldidx$(%%r1),%0\";
+ return \"addil LR'%1-$tls_ldidx$,%%r27\;ldo RR'%1-$tls_ldidx$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tld_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tld_symbolic_operand" "")] UNSPEC_TLSLDM_PIC))
+ (clobber (reg:SI 1))
+ (use (reg:SI 19))]
+ ""
+ "*
+{
+ return \"addil LT'%1-$tls_ldidx$,%%r19\;ldo RT'%1-$tls_ldidx$(%%r1),%0\";
}"
[(set_attr "type" "multi")
(set_attr "length" "8")])
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(const_int 0)] UNSPEC_TP))]
""
- "{mfctl|mfctl,w} %%cr27,%0"
+ "mfctl %%cr27,%0"
[(set_attr "type" "multi")
(set_attr "length" "4")])
(define_insn "tie_load"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand 1 "tie_symbolic_operand" "")] UNSPEC_TLSIE))
- (clobber (reg:SI 1))]
+ (clobber (reg:SI 1))
+ (use (reg:SI 27))]
""
"*
{
- if (flag_pic)
- return \"addil LT'%1-$tls_ieoff$,%%r19\;ldw RT'%1-$tls_ieoff$(%%r1),%0\";
- else
- return \"addil LR'%1-$tls_ieoff$,%%r27\;ldw RR'%1-$tls_ieoff$(%%r1),%0\";
+ return \"addil LR'%1-$tls_ieoff$,%%r27\;ldw RR'%1-$tls_ieoff$(%%r1),%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_insn "tie_load_pic"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand 1 "tie_symbolic_operand" "")] UNSPEC_TLSIE_PIC))
+ (clobber (reg:SI 1))
+ (use (reg:SI 19))]
+ ""
+ "*
+{
+ return \"addil LT'%1-$tls_ieoff$,%%r19\;ldw RT'%1-$tls_ieoff$(%%r1),%0\";
}"
[(set_attr "type" "multi")
(set_attr "length" "8")])