1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
47 /* Each optab contains info on how this target machine
48 can perform a particular operation
49 for all sizes and kinds of operands.
51 The operation to be performed is often specified
52 by passing one of these optabs as an argument.
54 See expr.h for documentation of these optabs. */
56 optab optab_table[OTI_MAX];
58 rtx libfunc_table[LTI_MAX];
60 /* Tables of patterns for extending one integer mode to another. */
61 enum insn_code extendtab[MAX_MACHINE_MODE][MAX_MACHINE_MODE][2];
63 /* Tables of patterns for converting between fixed and floating point. */
64 enum insn_code fixtab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
65 enum insn_code fixtrunctab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
66 enum insn_code floattab[NUM_MACHINE_MODES][NUM_MACHINE_MODES][2];
68 /* Contains the optab used for each rtx code. */
69 optab code_to_optab[NUM_RTX_CODE + 1];
71 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
72 gives the gen_function to make a branch to test that condition. */
74 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
76 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
77 gives the insn code to make a store-condition insn
78 to test that condition. */
80 enum insn_code setcc_gen_code[NUM_RTX_CODE];
82 #ifdef HAVE_conditional_move
83 /* Indexed by the machine mode, gives the insn code to make a conditional
84 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
85 setcc_gen_code to cut down on the number of named patterns. Consider a day
86 when a lot more rtx codes are conditional (eg: for the ARM). */
88 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
91 /* The insn generating function can not take an rtx_code argument.
92 TRAP_RTX is used as an rtx argument. Its code is replaced with
93 the code to be used in the trap insn and all other fields are ignored. */
94 static GTY(()) rtx trap_rtx;
96 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
97 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
99 static int expand_cmplxdiv_straight (rtx, rtx, rtx, rtx, rtx, rtx,
100 enum machine_mode, int,
101 enum optab_methods, enum mode_class,
103 static int expand_cmplxdiv_wide (rtx, rtx, rtx, rtx, rtx, rtx,
104 enum machine_mode, int, enum optab_methods,
105 enum mode_class, optab);
106 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
107 enum machine_mode *, int *,
108 enum can_compare_purpose);
109 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
111 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
112 static rtx ftruncify (rtx);
113 static optab new_optab (void);
114 static inline optab init_optab (enum rtx_code);
115 static inline optab init_optabv (enum rtx_code);
116 static void init_libfuncs (optab, int, int, const char *, int);
117 static void init_integral_libfuncs (optab, const char *, int);
118 static void init_floating_libfuncs (optab, const char *, int);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx expand_vector_binop (enum machine_mode, optab, rtx, rtx, rtx, int,
125 static rtx expand_vector_unop (enum machine_mode, optab, rtx, rtx, int);
126 static rtx widen_clz (enum machine_mode, rtx, rtx);
127 static rtx expand_parity (enum machine_mode, rtx, rtx);
129 #ifndef HAVE_conditional_trap
130 #define HAVE_conditional_trap 0
131 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
134 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
135 the result of operation CODE applied to OP0 (and OP1 if it is a binary
138 If the last insn does not set TARGET, don't do anything, but return 1.
140 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
141 don't add the REG_EQUAL note but return 0. Our caller can then try
142 again, ensuring that TARGET is not one of the operands. */
145 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
147 rtx last_insn, insn, set;
152 || NEXT_INSN (insns) == NULL_RTX)
155 if (GET_RTX_CLASS (code) != '1' && GET_RTX_CLASS (code) != '2'
156 && GET_RTX_CLASS (code) != 'c' && GET_RTX_CLASS (code) != '<')
159 if (GET_CODE (target) == ZERO_EXTRACT)
162 for (last_insn = insns;
163 NEXT_INSN (last_insn) != NULL_RTX;
164 last_insn = NEXT_INSN (last_insn))
167 set = single_set (last_insn);
171 if (! rtx_equal_p (SET_DEST (set), target)
172 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
173 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
174 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
177 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
178 besides the last insn. */
179 if (reg_overlap_mentioned_p (target, op0)
180 || (op1 && reg_overlap_mentioned_p (target, op1)))
182 insn = PREV_INSN (last_insn);
183 while (insn != NULL_RTX)
185 if (reg_set_p (target, insn))
188 insn = PREV_INSN (insn);
192 if (GET_RTX_CLASS (code) == '1')
193 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
195 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
197 set_unique_reg_note (last_insn, REG_EQUAL, note);
202 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
203 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
204 not actually do a sign-extend or zero-extend, but can leave the
205 higher-order bits of the result rtx undefined, for example, in the case
206 of logical operations, but not right shifts. */
209 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
210 int unsignedp, int no_extend)
214 /* If we don't have to extend and this is a constant, return it. */
215 if (no_extend && GET_MODE (op) == VOIDmode)
218 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
219 extend since it will be more efficient to do so unless the signedness of
220 a promoted object differs from our extension. */
222 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
223 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
224 return convert_modes (mode, oldmode, op, unsignedp);
226 /* If MODE is no wider than a single word, we return a paradoxical
228 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
229 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
231 /* Otherwise, get an object of MODE, clobber it, and set the low-order
234 result = gen_reg_rtx (mode);
235 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
236 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
240 /* Generate code to perform a straightforward complex divide. */
243 expand_cmplxdiv_straight (rtx real0, rtx real1, rtx imag0, rtx imag1,
244 rtx realr, rtx imagr, enum machine_mode submode,
245 int unsignedp, enum optab_methods methods,
246 enum mode_class class, optab binoptab)
252 optab this_add_optab = add_optab;
253 optab this_sub_optab = sub_optab;
254 optab this_neg_optab = neg_optab;
255 optab this_mul_optab = smul_optab;
257 if (binoptab == sdivv_optab)
259 this_add_optab = addv_optab;
260 this_sub_optab = subv_optab;
261 this_neg_optab = negv_optab;
262 this_mul_optab = smulv_optab;
265 /* Don't fetch these from memory more than once. */
266 real0 = force_reg (submode, real0);
267 real1 = force_reg (submode, real1);
270 imag0 = force_reg (submode, imag0);
272 imag1 = force_reg (submode, imag1);
274 /* Divisor: c*c + d*d. */
275 temp1 = expand_binop (submode, this_mul_optab, real1, real1,
276 NULL_RTX, unsignedp, methods);
278 temp2 = expand_binop (submode, this_mul_optab, imag1, imag1,
279 NULL_RTX, unsignedp, methods);
281 if (temp1 == 0 || temp2 == 0)
284 divisor = expand_binop (submode, this_add_optab, temp1, temp2,
285 NULL_RTX, unsignedp, methods);
291 /* Mathematically, ((a)(c-id))/divisor. */
292 /* Computationally, (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)). */
294 /* Calculate the dividend. */
295 real_t = expand_binop (submode, this_mul_optab, real0, real1,
296 NULL_RTX, unsignedp, methods);
298 imag_t = expand_binop (submode, this_mul_optab, real0, imag1,
299 NULL_RTX, unsignedp, methods);
301 if (real_t == 0 || imag_t == 0)
304 imag_t = expand_unop (submode, this_neg_optab, imag_t,
305 NULL_RTX, unsignedp);
309 /* Mathematically, ((a+ib)(c-id))/divider. */
310 /* Calculate the dividend. */
311 temp1 = expand_binop (submode, this_mul_optab, real0, real1,
312 NULL_RTX, unsignedp, methods);
314 temp2 = expand_binop (submode, this_mul_optab, imag0, imag1,
315 NULL_RTX, unsignedp, methods);
317 if (temp1 == 0 || temp2 == 0)
320 real_t = expand_binop (submode, this_add_optab, temp1, temp2,
321 NULL_RTX, unsignedp, methods);
323 temp1 = expand_binop (submode, this_mul_optab, imag0, real1,
324 NULL_RTX, unsignedp, methods);
326 temp2 = expand_binop (submode, this_mul_optab, real0, imag1,
327 NULL_RTX, unsignedp, methods);
329 if (temp1 == 0 || temp2 == 0)
332 imag_t = expand_binop (submode, this_sub_optab, temp1, temp2,
333 NULL_RTX, unsignedp, methods);
335 if (real_t == 0 || imag_t == 0)
339 if (class == MODE_COMPLEX_FLOAT)
340 res = expand_binop (submode, binoptab, real_t, divisor,
341 realr, unsignedp, methods);
343 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
344 real_t, divisor, realr, unsignedp);
350 emit_move_insn (realr, res);
352 if (class == MODE_COMPLEX_FLOAT)
353 res = expand_binop (submode, binoptab, imag_t, divisor,
354 imagr, unsignedp, methods);
356 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
357 imag_t, divisor, imagr, unsignedp);
363 emit_move_insn (imagr, res);
368 /* Generate code to perform a wide-input-range-acceptable complex divide. */
371 expand_cmplxdiv_wide (rtx real0, rtx real1, rtx imag0, rtx imag1, rtx realr,
372 rtx imagr, enum machine_mode submode, int unsignedp,
373 enum optab_methods methods, enum mode_class class,
378 rtx temp1, temp2, lab1, lab2;
379 enum machine_mode mode;
381 optab this_add_optab = add_optab;
382 optab this_sub_optab = sub_optab;
383 optab this_neg_optab = neg_optab;
384 optab this_mul_optab = smul_optab;
386 if (binoptab == sdivv_optab)
388 this_add_optab = addv_optab;
389 this_sub_optab = subv_optab;
390 this_neg_optab = negv_optab;
391 this_mul_optab = smulv_optab;
394 /* Don't fetch these from memory more than once. */
395 real0 = force_reg (submode, real0);
396 real1 = force_reg (submode, real1);
399 imag0 = force_reg (submode, imag0);
401 imag1 = force_reg (submode, imag1);
403 /* XXX What's an "unsigned" complex number? */
411 temp1 = expand_abs (submode, real1, NULL_RTX, unsignedp, 1);
412 temp2 = expand_abs (submode, imag1, NULL_RTX, unsignedp, 1);
415 if (temp1 == 0 || temp2 == 0)
418 mode = GET_MODE (temp1);
419 lab1 = gen_label_rtx ();
420 emit_cmp_and_jump_insns (temp1, temp2, LT, NULL_RTX,
421 mode, unsignedp, lab1);
423 /* |c| >= |d|; use ratio d/c to scale dividend and divisor. */
425 if (class == MODE_COMPLEX_FLOAT)
426 ratio = expand_binop (submode, binoptab, imag1, real1,
427 NULL_RTX, unsignedp, methods);
429 ratio = expand_divmod (0, TRUNC_DIV_EXPR, submode,
430 imag1, real1, NULL_RTX, unsignedp);
435 /* Calculate divisor. */
437 temp1 = expand_binop (submode, this_mul_optab, imag1, ratio,
438 NULL_RTX, unsignedp, methods);
443 divisor = expand_binop (submode, this_add_optab, temp1, real1,
444 NULL_RTX, unsignedp, methods);
449 /* Calculate dividend. */
455 /* Compute a / (c+id) as a / (c+d(d/c)) + i (-a(d/c)) / (c+d(d/c)). */
457 imag_t = expand_binop (submode, this_mul_optab, real0, ratio,
458 NULL_RTX, unsignedp, methods);
463 imag_t = expand_unop (submode, this_neg_optab, imag_t,
464 NULL_RTX, unsignedp);
466 if (real_t == 0 || imag_t == 0)
471 /* Compute (a+ib)/(c+id) as
472 (a+b(d/c))/(c+d(d/c) + i(b-a(d/c))/(c+d(d/c)). */
474 temp1 = expand_binop (submode, this_mul_optab, imag0, ratio,
475 NULL_RTX, unsignedp, methods);
480 real_t = expand_binop (submode, this_add_optab, temp1, real0,
481 NULL_RTX, unsignedp, methods);
483 temp1 = expand_binop (submode, this_mul_optab, real0, ratio,
484 NULL_RTX, unsignedp, methods);
489 imag_t = expand_binop (submode, this_sub_optab, imag0, temp1,
490 NULL_RTX, unsignedp, methods);
492 if (real_t == 0 || imag_t == 0)
496 if (class == MODE_COMPLEX_FLOAT)
497 res = expand_binop (submode, binoptab, real_t, divisor,
498 realr, unsignedp, methods);
500 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
501 real_t, divisor, realr, unsignedp);
507 emit_move_insn (realr, res);
509 if (class == MODE_COMPLEX_FLOAT)
510 res = expand_binop (submode, binoptab, imag_t, divisor,
511 imagr, unsignedp, methods);
513 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
514 imag_t, divisor, imagr, unsignedp);
520 emit_move_insn (imagr, res);
522 lab2 = gen_label_rtx ();
523 emit_jump_insn (gen_jump (lab2));
528 /* |d| > |c|; use ratio c/d to scale dividend and divisor. */
530 if (class == MODE_COMPLEX_FLOAT)
531 ratio = expand_binop (submode, binoptab, real1, imag1,
532 NULL_RTX, unsignedp, methods);
534 ratio = expand_divmod (0, TRUNC_DIV_EXPR, submode,
535 real1, imag1, NULL_RTX, unsignedp);
540 /* Calculate divisor. */
542 temp1 = expand_binop (submode, this_mul_optab, real1, ratio,
543 NULL_RTX, unsignedp, methods);
548 divisor = expand_binop (submode, this_add_optab, temp1, imag1,
549 NULL_RTX, unsignedp, methods);
554 /* Calculate dividend. */
558 /* Compute a / (c+id) as a(c/d) / (c(c/d)+d) + i (-a) / (c(c/d)+d). */
560 real_t = expand_binop (submode, this_mul_optab, real0, ratio,
561 NULL_RTX, unsignedp, methods);
563 imag_t = expand_unop (submode, this_neg_optab, real0,
564 NULL_RTX, unsignedp);
566 if (real_t == 0 || imag_t == 0)
571 /* Compute (a+ib)/(c+id) as
572 (a(c/d)+b)/(c(c/d)+d) + i (b(c/d)-a)/(c(c/d)+d). */
574 temp1 = expand_binop (submode, this_mul_optab, real0, ratio,
575 NULL_RTX, unsignedp, methods);
580 real_t = expand_binop (submode, this_add_optab, temp1, imag0,
581 NULL_RTX, unsignedp, methods);
583 temp1 = expand_binop (submode, this_mul_optab, imag0, ratio,
584 NULL_RTX, unsignedp, methods);
589 imag_t = expand_binop (submode, this_sub_optab, temp1, real0,
590 NULL_RTX, unsignedp, methods);
592 if (real_t == 0 || imag_t == 0)
596 if (class == MODE_COMPLEX_FLOAT)
597 res = expand_binop (submode, binoptab, real_t, divisor,
598 realr, unsignedp, methods);
600 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
601 real_t, divisor, realr, unsignedp);
607 emit_move_insn (realr, res);
609 if (class == MODE_COMPLEX_FLOAT)
610 res = expand_binop (submode, binoptab, imag_t, divisor,
611 imagr, unsignedp, methods);
613 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
614 imag_t, divisor, imagr, unsignedp);
620 emit_move_insn (imagr, res);
627 /* Wrapper around expand_binop which takes an rtx code to specify
628 the operation to perform, not an optab pointer. All other
629 arguments are the same. */
631 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
632 rtx op1, rtx target, int unsignedp,
633 enum optab_methods methods)
635 optab binop = code_to_optab[(int) code];
639 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
642 /* Generate code to perform an operation specified by BINOPTAB
643 on operands OP0 and OP1, with result having machine-mode MODE.
645 UNSIGNEDP is for the case where we have to widen the operands
646 to perform the operation. It says to use zero-extension.
648 If TARGET is nonzero, the value
649 is generated there, if it is convenient to do so.
650 In all cases an rtx is returned for the locus of the value;
651 this may or may not be TARGET. */
654 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
655 rtx target, int unsignedp, enum optab_methods methods)
657 enum optab_methods next_methods
658 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
659 ? OPTAB_WIDEN : methods);
660 enum mode_class class;
661 enum machine_mode wider_mode;
663 int commutative_op = 0;
664 int shift_op = (binoptab->code == ASHIFT
665 || binoptab->code == ASHIFTRT
666 || binoptab->code == LSHIFTRT
667 || binoptab->code == ROTATE
668 || binoptab->code == ROTATERT);
669 rtx entry_last = get_last_insn ();
672 class = GET_MODE_CLASS (mode);
674 op0 = protect_from_queue (op0, 0);
675 op1 = protect_from_queue (op1, 0);
677 target = protect_from_queue (target, 1);
681 /* Load duplicate non-volatile operands once. */
682 if (rtx_equal_p (op0, op1) && ! volatile_refs_p (op0))
684 op0 = force_not_mem (op0);
689 op0 = force_not_mem (op0);
690 op1 = force_not_mem (op1);
694 /* If subtracting an integer constant, convert this into an addition of
695 the negated constant. */
697 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
699 op1 = negate_rtx (mode, op1);
700 binoptab = add_optab;
703 /* If we are inside an appropriately-short loop and one operand is an
704 expensive constant, force it into a register. */
705 if (CONSTANT_P (op0) && preserve_subexpressions_p ()
706 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
707 op0 = force_reg (mode, op0);
709 if (CONSTANT_P (op1) && preserve_subexpressions_p ()
710 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
711 op1 = force_reg (mode, op1);
713 /* Record where to delete back to if we backtrack. */
714 last = get_last_insn ();
716 /* If operation is commutative,
717 try to make the first operand a register.
718 Even better, try to make it the same as the target.
719 Also try to make the last operand a constant. */
720 if (GET_RTX_CLASS (binoptab->code) == 'c'
721 || binoptab == smul_widen_optab
722 || binoptab == umul_widen_optab
723 || binoptab == smul_highpart_optab
724 || binoptab == umul_highpart_optab)
728 if (((target == 0 || GET_CODE (target) == REG)
729 ? ((GET_CODE (op1) == REG
730 && GET_CODE (op0) != REG)
732 : rtx_equal_p (op1, target))
733 || GET_CODE (op0) == CONST_INT)
741 /* If we can do it with a three-operand insn, do so. */
743 if (methods != OPTAB_MUST_WIDEN
744 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
746 int icode = (int) binoptab->handlers[(int) mode].insn_code;
747 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
748 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
750 rtx xop0 = op0, xop1 = op1;
755 temp = gen_reg_rtx (mode);
757 /* If it is a commutative operator and the modes would match
758 if we would swap the operands, we can save the conversions. */
761 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
762 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
766 tmp = op0; op0 = op1; op1 = tmp;
767 tmp = xop0; xop0 = xop1; xop1 = tmp;
771 /* In case the insn wants input operands in modes different from
772 those of the actual operands, convert the operands. It would
773 seem that we don't need to convert CONST_INTs, but we do, so
774 that they're properly zero-extended, sign-extended or truncated
777 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
778 xop0 = convert_modes (mode0,
779 GET_MODE (op0) != VOIDmode
784 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
785 xop1 = convert_modes (mode1,
786 GET_MODE (op1) != VOIDmode
791 /* Now, if insn's predicates don't allow our operands, put them into
794 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0)
795 && mode0 != VOIDmode)
796 xop0 = copy_to_mode_reg (mode0, xop0);
798 if (! (*insn_data[icode].operand[2].predicate) (xop1, mode1)
799 && mode1 != VOIDmode)
800 xop1 = copy_to_mode_reg (mode1, xop1);
802 if (! (*insn_data[icode].operand[0].predicate) (temp, mode))
803 temp = gen_reg_rtx (mode);
805 pat = GEN_FCN (icode) (temp, xop0, xop1);
808 /* If PAT is composed of more than one insn, try to add an appropriate
809 REG_EQUAL note to it. If we can't because TEMP conflicts with an
810 operand, call ourselves again, this time without a target. */
811 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
812 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
814 delete_insns_since (last);
815 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
823 delete_insns_since (last);
826 /* If this is a multiply, see if we can do a widening operation that
827 takes operands of this mode and makes a wider mode. */
829 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
830 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
831 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
832 != CODE_FOR_nothing))
834 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
835 unsignedp ? umul_widen_optab : smul_widen_optab,
836 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
840 if (GET_MODE_CLASS (mode) == MODE_INT)
841 return gen_lowpart (mode, temp);
843 return convert_to_mode (mode, temp, unsignedp);
847 /* Look for a wider mode of the same class for which we think we
848 can open-code the operation. Check for a widening multiply at the
849 wider mode as well. */
851 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
852 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
853 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
854 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
856 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
857 || (binoptab == smul_optab
858 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
859 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
860 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
861 != CODE_FOR_nothing)))
863 rtx xop0 = op0, xop1 = op1;
866 /* For certain integer operations, we need not actually extend
867 the narrow operands, as long as we will truncate
868 the results to the same narrowness. */
870 if ((binoptab == ior_optab || binoptab == and_optab
871 || binoptab == xor_optab
872 || binoptab == add_optab || binoptab == sub_optab
873 || binoptab == smul_optab || binoptab == ashl_optab)
874 && class == MODE_INT)
877 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
879 /* The second operand of a shift must always be extended. */
880 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
881 no_extend && binoptab != ashl_optab);
883 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
884 unsignedp, OPTAB_DIRECT);
887 if (class != MODE_INT)
890 target = gen_reg_rtx (mode);
891 convert_move (target, temp, 0);
895 return gen_lowpart (mode, temp);
898 delete_insns_since (last);
902 /* These can be done a word at a time. */
903 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
905 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
906 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
912 /* If TARGET is the same as one of the operands, the REG_EQUAL note
913 won't be accurate, so use a new target. */
914 if (target == 0 || target == op0 || target == op1)
915 target = gen_reg_rtx (mode);
919 /* Do the actual arithmetic. */
920 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
922 rtx target_piece = operand_subword (target, i, 1, mode);
923 rtx x = expand_binop (word_mode, binoptab,
924 operand_subword_force (op0, i, mode),
925 operand_subword_force (op1, i, mode),
926 target_piece, unsignedp, next_methods);
931 if (target_piece != x)
932 emit_move_insn (target_piece, x);
935 insns = get_insns ();
938 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
940 if (binoptab->code != UNKNOWN)
942 = gen_rtx_fmt_ee (binoptab->code, mode,
943 copy_rtx (op0), copy_rtx (op1));
947 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
952 /* Synthesize double word shifts from single word shifts. */
953 if ((binoptab == lshr_optab || binoptab == ashl_optab
954 || binoptab == ashr_optab)
956 && GET_CODE (op1) == CONST_INT
957 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
958 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
959 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
960 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
962 rtx insns, inter, equiv_value;
963 rtx into_target, outof_target;
964 rtx into_input, outof_input;
965 int shift_count, left_shift, outof_word;
967 /* If TARGET is the same as one of the operands, the REG_EQUAL note
968 won't be accurate, so use a new target. */
969 if (target == 0 || target == op0 || target == op1)
970 target = gen_reg_rtx (mode);
974 shift_count = INTVAL (op1);
976 /* OUTOF_* is the word we are shifting bits away from, and
977 INTO_* is the word that we are shifting bits towards, thus
978 they differ depending on the direction of the shift and
981 left_shift = binoptab == ashl_optab;
982 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
984 outof_target = operand_subword (target, outof_word, 1, mode);
985 into_target = operand_subword (target, 1 - outof_word, 1, mode);
987 outof_input = operand_subword_force (op0, outof_word, mode);
988 into_input = operand_subword_force (op0, 1 - outof_word, mode);
990 if (shift_count >= BITS_PER_WORD)
992 inter = expand_binop (word_mode, binoptab,
994 GEN_INT (shift_count - BITS_PER_WORD),
995 into_target, unsignedp, next_methods);
997 if (inter != 0 && inter != into_target)
998 emit_move_insn (into_target, inter);
1000 /* For a signed right shift, we must fill the word we are shifting
1001 out of with copies of the sign bit. Otherwise it is zeroed. */
1002 if (inter != 0 && binoptab != ashr_optab)
1003 inter = CONST0_RTX (word_mode);
1004 else if (inter != 0)
1005 inter = expand_binop (word_mode, binoptab,
1007 GEN_INT (BITS_PER_WORD - 1),
1008 outof_target, unsignedp, next_methods);
1010 if (inter != 0 && inter != outof_target)
1011 emit_move_insn (outof_target, inter);
1016 optab reverse_unsigned_shift, unsigned_shift;
1018 /* For a shift of less then BITS_PER_WORD, to compute the carry,
1019 we must do a logical shift in the opposite direction of the
1022 reverse_unsigned_shift = (left_shift ? lshr_optab : ashl_optab);
1024 /* For a shift of less than BITS_PER_WORD, to compute the word
1025 shifted towards, we need to unsigned shift the orig value of
1028 unsigned_shift = (left_shift ? ashl_optab : lshr_optab);
1030 carries = expand_binop (word_mode, reverse_unsigned_shift,
1032 GEN_INT (BITS_PER_WORD - shift_count),
1033 0, unsignedp, next_methods);
1038 inter = expand_binop (word_mode, unsigned_shift, into_input,
1039 op1, 0, unsignedp, next_methods);
1042 inter = expand_binop (word_mode, ior_optab, carries, inter,
1043 into_target, unsignedp, next_methods);
1045 if (inter != 0 && inter != into_target)
1046 emit_move_insn (into_target, inter);
1049 inter = expand_binop (word_mode, binoptab, outof_input,
1050 op1, outof_target, unsignedp, next_methods);
1052 if (inter != 0 && inter != outof_target)
1053 emit_move_insn (outof_target, inter);
1056 insns = get_insns ();
1061 if (binoptab->code != UNKNOWN)
1062 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1066 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1071 /* Synthesize double word rotates from single word shifts. */
1072 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1073 && class == MODE_INT
1074 && GET_CODE (op1) == CONST_INT
1075 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1076 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1077 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1079 rtx insns, equiv_value;
1080 rtx into_target, outof_target;
1081 rtx into_input, outof_input;
1083 int shift_count, left_shift, outof_word;
1085 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1086 won't be accurate, so use a new target. */
1087 if (target == 0 || target == op0 || target == op1)
1088 target = gen_reg_rtx (mode);
1092 shift_count = INTVAL (op1);
1094 /* OUTOF_* is the word we are shifting bits away from, and
1095 INTO_* is the word that we are shifting bits towards, thus
1096 they differ depending on the direction of the shift and
1097 WORDS_BIG_ENDIAN. */
1099 left_shift = (binoptab == rotl_optab);
1100 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1102 outof_target = operand_subword (target, outof_word, 1, mode);
1103 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1105 outof_input = operand_subword_force (op0, outof_word, mode);
1106 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1108 if (shift_count == BITS_PER_WORD)
1110 /* This is just a word swap. */
1111 emit_move_insn (outof_target, into_input);
1112 emit_move_insn (into_target, outof_input);
1117 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1118 rtx first_shift_count, second_shift_count;
1119 optab reverse_unsigned_shift, unsigned_shift;
1121 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1122 ? lshr_optab : ashl_optab);
1124 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1125 ? ashl_optab : lshr_optab);
1127 if (shift_count > BITS_PER_WORD)
1129 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1130 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1134 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1135 second_shift_count = GEN_INT (shift_count);
1138 into_temp1 = expand_binop (word_mode, unsigned_shift,
1139 outof_input, first_shift_count,
1140 NULL_RTX, unsignedp, next_methods);
1141 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1142 into_input, second_shift_count,
1143 NULL_RTX, unsignedp, next_methods);
1145 if (into_temp1 != 0 && into_temp2 != 0)
1146 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1147 into_target, unsignedp, next_methods);
1151 if (inter != 0 && inter != into_target)
1152 emit_move_insn (into_target, inter);
1154 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1155 into_input, first_shift_count,
1156 NULL_RTX, unsignedp, next_methods);
1157 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1158 outof_input, second_shift_count,
1159 NULL_RTX, unsignedp, next_methods);
1161 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1162 inter = expand_binop (word_mode, ior_optab,
1163 outof_temp1, outof_temp2,
1164 outof_target, unsignedp, next_methods);
1166 if (inter != 0 && inter != outof_target)
1167 emit_move_insn (outof_target, inter);
1170 insns = get_insns ();
1175 if (binoptab->code != UNKNOWN)
1176 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1180 /* We can't make this a no conflict block if this is a word swap,
1181 because the word swap case fails if the input and output values
1182 are in the same register. */
1183 if (shift_count != BITS_PER_WORD)
1184 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1193 /* These can be done a word at a time by propagating carries. */
1194 if ((binoptab == add_optab || binoptab == sub_optab)
1195 && class == MODE_INT
1196 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1197 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1200 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1201 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1202 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1203 rtx xop0, xop1, xtarget;
1205 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1206 value is one of those, use it. Otherwise, use 1 since it is the
1207 one easiest to get. */
1208 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1209 int normalizep = STORE_FLAG_VALUE;
1214 /* Prepare the operands. */
1215 xop0 = force_reg (mode, op0);
1216 xop1 = force_reg (mode, op1);
1218 xtarget = gen_reg_rtx (mode);
1220 if (target == 0 || GET_CODE (target) != REG)
1223 /* Indicate for flow that the entire target reg is being set. */
1224 if (GET_CODE (target) == REG)
1225 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1227 /* Do the actual arithmetic. */
1228 for (i = 0; i < nwords; i++)
1230 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1231 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1232 rtx op0_piece = operand_subword_force (xop0, index, mode);
1233 rtx op1_piece = operand_subword_force (xop1, index, mode);
1236 /* Main add/subtract of the input operands. */
1237 x = expand_binop (word_mode, binoptab,
1238 op0_piece, op1_piece,
1239 target_piece, unsignedp, next_methods);
1245 /* Store carry from main add/subtract. */
1246 carry_out = gen_reg_rtx (word_mode);
1247 carry_out = emit_store_flag_force (carry_out,
1248 (binoptab == add_optab
1251 word_mode, 1, normalizep);
1258 /* Add/subtract previous carry to main result. */
1259 newx = expand_binop (word_mode,
1260 normalizep == 1 ? binoptab : otheroptab,
1262 NULL_RTX, 1, next_methods);
1266 /* Get out carry from adding/subtracting carry in. */
1267 rtx carry_tmp = gen_reg_rtx (word_mode);
1268 carry_tmp = emit_store_flag_force (carry_tmp,
1269 (binoptab == add_optab
1272 word_mode, 1, normalizep);
1274 /* Logical-ior the two poss. carry together. */
1275 carry_out = expand_binop (word_mode, ior_optab,
1276 carry_out, carry_tmp,
1277 carry_out, 0, next_methods);
1281 emit_move_insn (target_piece, newx);
1284 carry_in = carry_out;
1287 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1289 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1290 || ! rtx_equal_p (target, xtarget))
1292 rtx temp = emit_move_insn (target, xtarget);
1294 set_unique_reg_note (temp,
1296 gen_rtx_fmt_ee (binoptab->code, mode,
1307 delete_insns_since (last);
1310 /* If we want to multiply two two-word values and have normal and widening
1311 multiplies of single-word values, we can do this with three smaller
1312 multiplications. Note that we do not make a REG_NO_CONFLICT block here
1313 because we are not operating on one word at a time.
1315 The multiplication proceeds as follows:
1316 _______________________
1317 [__op0_high_|__op0_low__]
1318 _______________________
1319 * [__op1_high_|__op1_low__]
1320 _______________________________________________
1321 _______________________
1322 (1) [__op0_low__*__op1_low__]
1323 _______________________
1324 (2a) [__op0_low__*__op1_high_]
1325 _______________________
1326 (2b) [__op0_high_*__op1_low__]
1327 _______________________
1328 (3) [__op0_high_*__op1_high_]
1331 This gives a 4-word result. Since we are only interested in the
1332 lower 2 words, partial result (3) and the upper words of (2a) and
1333 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1334 calculated using non-widening multiplication.
1336 (1), however, needs to be calculated with an unsigned widening
1337 multiplication. If this operation is not directly supported we
1338 try using a signed widening multiplication and adjust the result.
1339 This adjustment works as follows:
1341 If both operands are positive then no adjustment is needed.
1343 If the operands have different signs, for example op0_low < 0 and
1344 op1_low >= 0, the instruction treats the most significant bit of
1345 op0_low as a sign bit instead of a bit with significance
1346 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1347 with 2**BITS_PER_WORD - op0_low, and two's complements the
1348 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1351 Similarly, if both operands are negative, we need to add
1352 (op0_low + op1_low) * 2**BITS_PER_WORD.
1354 We use a trick to adjust quickly. We logically shift op0_low right
1355 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1356 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1357 logical shift exists, we do an arithmetic right shift and subtract
1360 if (binoptab == smul_optab
1361 && class == MODE_INT
1362 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1363 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1364 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1365 && ((umul_widen_optab->handlers[(int) mode].insn_code
1366 != CODE_FOR_nothing)
1367 || (smul_widen_optab->handlers[(int) mode].insn_code
1368 != CODE_FOR_nothing)))
1370 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1371 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1372 rtx op0_high = operand_subword_force (op0, high, mode);
1373 rtx op0_low = operand_subword_force (op0, low, mode);
1374 rtx op1_high = operand_subword_force (op1, high, mode);
1375 rtx op1_low = operand_subword_force (op1, low, mode);
1377 rtx op0_xhigh = NULL_RTX;
1378 rtx op1_xhigh = NULL_RTX;
1380 /* If the target is the same as one of the inputs, don't use it. This
1381 prevents problems with the REG_EQUAL note. */
1382 if (target == op0 || target == op1
1383 || (target != 0 && GET_CODE (target) != REG))
1386 /* Multiply the two lower words to get a double-word product.
1387 If unsigned widening multiplication is available, use that;
1388 otherwise use the signed form and compensate. */
1390 if (umul_widen_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1392 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1393 target, 1, OPTAB_DIRECT);
1395 /* If we didn't succeed, delete everything we did so far. */
1397 delete_insns_since (last);
1399 op0_xhigh = op0_high, op1_xhigh = op1_high;
1403 && smul_widen_optab->handlers[(int) mode].insn_code
1404 != CODE_FOR_nothing)
1406 rtx wordm1 = GEN_INT (BITS_PER_WORD - 1);
1407 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1408 target, 1, OPTAB_DIRECT);
1409 op0_xhigh = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1410 NULL_RTX, 1, next_methods);
1412 op0_xhigh = expand_binop (word_mode, add_optab, op0_high,
1413 op0_xhigh, op0_xhigh, 0, next_methods);
1416 op0_xhigh = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1417 NULL_RTX, 0, next_methods);
1419 op0_xhigh = expand_binop (word_mode, sub_optab, op0_high,
1420 op0_xhigh, op0_xhigh, 0,
1424 op1_xhigh = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1425 NULL_RTX, 1, next_methods);
1427 op1_xhigh = expand_binop (word_mode, add_optab, op1_high,
1428 op1_xhigh, op1_xhigh, 0, next_methods);
1431 op1_xhigh = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1432 NULL_RTX, 0, next_methods);
1434 op1_xhigh = expand_binop (word_mode, sub_optab, op1_high,
1435 op1_xhigh, op1_xhigh, 0,
1440 /* If we have been able to directly compute the product of the
1441 low-order words of the operands and perform any required adjustments
1442 of the operands, we proceed by trying two more multiplications
1443 and then computing the appropriate sum.
1445 We have checked above that the required addition is provided.
1446 Full-word addition will normally always succeed, especially if
1447 it is provided at all, so we don't worry about its failure. The
1448 multiplication may well fail, however, so we do handle that. */
1450 if (product && op0_xhigh && op1_xhigh)
1452 rtx product_high = operand_subword (product, high, 1, mode);
1453 rtx temp = expand_binop (word_mode, binoptab, op0_low, op1_xhigh,
1454 NULL_RTX, 0, OPTAB_DIRECT);
1456 if (!REG_P (product_high))
1457 product_high = force_reg (word_mode, product_high);
1460 temp = expand_binop (word_mode, add_optab, temp, product_high,
1461 product_high, 0, next_methods);
1463 if (temp != 0 && temp != product_high)
1464 emit_move_insn (product_high, temp);
1467 temp = expand_binop (word_mode, binoptab, op1_low, op0_xhigh,
1468 NULL_RTX, 0, OPTAB_DIRECT);
1471 temp = expand_binop (word_mode, add_optab, temp,
1472 product_high, product_high,
1475 if (temp != 0 && temp != product_high)
1476 emit_move_insn (product_high, temp);
1478 emit_move_insn (operand_subword (product, high, 1, mode), product_high);
1482 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1484 temp = emit_move_insn (product, product);
1485 set_unique_reg_note (temp,
1487 gen_rtx_fmt_ee (MULT, mode,
1496 /* If we get here, we couldn't do it for some reason even though we
1497 originally thought we could. Delete anything we've emitted in
1500 delete_insns_since (last);
1503 /* Open-code the vector operations if we have no hardware support
1505 if (class == MODE_VECTOR_INT || class == MODE_VECTOR_FLOAT)
1506 return expand_vector_binop (mode, binoptab, op0, op1, target,
1507 unsignedp, methods);
1509 /* We need to open-code the complex type operations: '+, -, * and /' */
1511 /* At this point we allow operations between two similar complex
1512 numbers, and also if one of the operands is not a complex number
1513 but rather of MODE_FLOAT or MODE_INT. However, the caller
1514 must make sure that the MODE of the non-complex operand matches
1515 the SUBMODE of the complex operand. */
1517 if (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT)
1519 rtx real0 = 0, imag0 = 0;
1520 rtx real1 = 0, imag1 = 0;
1521 rtx realr, imagr, res;
1526 /* Find the correct mode for the real and imaginary parts. */
1527 enum machine_mode submode = GET_MODE_INNER(mode);
1529 if (submode == BLKmode)
1533 target = gen_reg_rtx (mode);
1537 realr = gen_realpart (submode, target);
1538 imagr = gen_imagpart (submode, target);
1540 if (GET_MODE (op0) == mode)
1542 real0 = gen_realpart (submode, op0);
1543 imag0 = gen_imagpart (submode, op0);
1548 if (GET_MODE (op1) == mode)
1550 real1 = gen_realpart (submode, op1);
1551 imag1 = gen_imagpart (submode, op1);
1556 if (real0 == 0 || real1 == 0 || ! (imag0 != 0 || imag1 != 0))
1559 switch (binoptab->code)
1562 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1564 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1565 res = expand_binop (submode, binoptab, real0, real1,
1566 realr, unsignedp, methods);
1570 else if (res != realr)
1571 emit_move_insn (realr, res);
1573 if (imag0 != 0 && imag1 != 0)
1574 res = expand_binop (submode, binoptab, imag0, imag1,
1575 imagr, unsignedp, methods);
1576 else if (imag0 != 0)
1578 else if (binoptab->code == MINUS)
1579 res = expand_unop (submode,
1580 binoptab == subv_optab ? negv_optab : neg_optab,
1581 imag1, imagr, unsignedp);
1587 else if (res != imagr)
1588 emit_move_insn (imagr, res);
1594 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1596 if (imag0 != 0 && imag1 != 0)
1600 /* Don't fetch these from memory more than once. */
1601 real0 = force_reg (submode, real0);
1602 real1 = force_reg (submode, real1);
1603 imag0 = force_reg (submode, imag0);
1604 imag1 = force_reg (submode, imag1);
1606 temp1 = expand_binop (submode, binoptab, real0, real1, NULL_RTX,
1607 unsignedp, methods);
1609 temp2 = expand_binop (submode, binoptab, imag0, imag1, NULL_RTX,
1610 unsignedp, methods);
1612 if (temp1 == 0 || temp2 == 0)
1617 binoptab == smulv_optab ? subv_optab : sub_optab,
1618 temp1, temp2, realr, unsignedp, methods));
1622 else if (res != realr)
1623 emit_move_insn (realr, res);
1625 temp1 = expand_binop (submode, binoptab, real0, imag1,
1626 NULL_RTX, unsignedp, methods);
1628 /* Avoid expanding redundant multiplication for the common
1629 case of squaring a complex number. */
1630 if (rtx_equal_p (real0, real1) && rtx_equal_p (imag0, imag1))
1633 temp2 = expand_binop (submode, binoptab, real1, imag0,
1634 NULL_RTX, unsignedp, methods);
1636 if (temp1 == 0 || temp2 == 0)
1641 binoptab == smulv_optab ? addv_optab : add_optab,
1642 temp1, temp2, imagr, unsignedp, methods));
1646 else if (res != imagr)
1647 emit_move_insn (imagr, res);
1653 /* Don't fetch these from memory more than once. */
1654 real0 = force_reg (submode, real0);
1655 real1 = force_reg (submode, real1);
1657 res = expand_binop (submode, binoptab, real0, real1,
1658 realr, unsignedp, methods);
1661 else if (res != realr)
1662 emit_move_insn (realr, res);
1665 res = expand_binop (submode, binoptab,
1666 real1, imag0, imagr, unsignedp, methods);
1668 res = expand_binop (submode, binoptab,
1669 real0, imag1, imagr, unsignedp, methods);
1673 else if (res != imagr)
1674 emit_move_insn (imagr, res);
1681 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1685 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1687 /* Don't fetch these from memory more than once. */
1688 real1 = force_reg (submode, real1);
1690 /* Simply divide the real and imaginary parts by `c' */
1691 if (class == MODE_COMPLEX_FLOAT)
1692 res = expand_binop (submode, binoptab, real0, real1,
1693 realr, unsignedp, methods);
1695 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
1696 real0, real1, realr, unsignedp);
1700 else if (res != realr)
1701 emit_move_insn (realr, res);
1703 if (class == MODE_COMPLEX_FLOAT)
1704 res = expand_binop (submode, binoptab, imag0, real1,
1705 imagr, unsignedp, methods);
1707 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
1708 imag0, real1, imagr, unsignedp);
1712 else if (res != imagr)
1713 emit_move_insn (imagr, res);
1719 switch (flag_complex_divide_method)
1722 ok = expand_cmplxdiv_straight (real0, real1, imag0, imag1,
1723 realr, imagr, submode,
1729 ok = expand_cmplxdiv_wide (real0, real1, imag0, imag1,
1730 realr, imagr, submode,
1750 if (binoptab->code != UNKNOWN)
1752 = gen_rtx_fmt_ee (binoptab->code, mode,
1753 copy_rtx (op0), copy_rtx (op1));
1757 emit_no_conflict_block (seq, target, op0, op1, equiv_value);
1763 /* It can't be open-coded in this mode.
1764 Use a library call if one is available and caller says that's ok. */
1766 if (binoptab->handlers[(int) mode].libfunc
1767 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1771 enum machine_mode op1_mode = mode;
1778 op1_mode = word_mode;
1779 /* Specify unsigned here,
1780 since negative shift counts are meaningless. */
1781 op1x = convert_to_mode (word_mode, op1, 1);
1784 if (GET_MODE (op0) != VOIDmode
1785 && GET_MODE (op0) != mode)
1786 op0 = convert_to_mode (mode, op0, unsignedp);
1788 /* Pass 1 for NO_QUEUE so we don't lose any increments
1789 if the libcall is cse'd or moved. */
1790 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1791 NULL_RTX, LCT_CONST, mode, 2,
1792 op0, mode, op1x, op1_mode);
1794 insns = get_insns ();
1797 target = gen_reg_rtx (mode);
1798 emit_libcall_block (insns, target, value,
1799 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1804 delete_insns_since (last);
1806 /* It can't be done in this mode. Can we do it in a wider mode? */
1808 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1809 || methods == OPTAB_MUST_WIDEN))
1811 /* Caller says, don't even try. */
1812 delete_insns_since (entry_last);
1816 /* Compute the value of METHODS to pass to recursive calls.
1817 Don't allow widening to be tried recursively. */
1819 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1821 /* Look for a wider mode of the same class for which it appears we can do
1824 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1826 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1827 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1829 if ((binoptab->handlers[(int) wider_mode].insn_code
1830 != CODE_FOR_nothing)
1831 || (methods == OPTAB_LIB
1832 && binoptab->handlers[(int) wider_mode].libfunc))
1834 rtx xop0 = op0, xop1 = op1;
1837 /* For certain integer operations, we need not actually extend
1838 the narrow operands, as long as we will truncate
1839 the results to the same narrowness. */
1841 if ((binoptab == ior_optab || binoptab == and_optab
1842 || binoptab == xor_optab
1843 || binoptab == add_optab || binoptab == sub_optab
1844 || binoptab == smul_optab || binoptab == ashl_optab)
1845 && class == MODE_INT)
1848 xop0 = widen_operand (xop0, wider_mode, mode,
1849 unsignedp, no_extend);
1851 /* The second operand of a shift must always be extended. */
1852 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1853 no_extend && binoptab != ashl_optab);
1855 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1856 unsignedp, methods);
1859 if (class != MODE_INT)
1862 target = gen_reg_rtx (mode);
1863 convert_move (target, temp, 0);
1867 return gen_lowpart (mode, temp);
1870 delete_insns_since (last);
1875 delete_insns_since (entry_last);
1879 /* Like expand_binop, but for open-coding vectors binops. */
1882 expand_vector_binop (enum machine_mode mode, optab binoptab, rtx op0,
1883 rtx op1, rtx target, int unsignedp,
1884 enum optab_methods methods)
1886 enum machine_mode submode, tmode;
1887 int size, elts, subsize, subbitsize, i;
1888 rtx t, a, b, res, seq;
1889 enum mode_class class;
1891 class = GET_MODE_CLASS (mode);
1893 size = GET_MODE_SIZE (mode);
1894 submode = GET_MODE_INNER (mode);
1896 /* Search for the widest vector mode with the same inner mode that is
1897 still narrower than MODE and that allows to open-code this operator.
1898 Note, if we find such a mode and the handler later decides it can't
1899 do the expansion, we'll be called recursively with the narrower mode. */
1900 for (tmode = GET_CLASS_NARROWEST_MODE (class);
1901 GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode);
1902 tmode = GET_MODE_WIDER_MODE (tmode))
1904 if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode)
1905 && binoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
1909 switch (binoptab->code)
1914 tmode = int_mode_for_mode (mode);
1915 if (tmode != BLKmode)
1921 subsize = GET_MODE_SIZE (submode);
1922 subbitsize = GET_MODE_BITSIZE (submode);
1923 elts = size / subsize;
1925 /* If METHODS is OPTAB_DIRECT, we don't insist on the exact mode,
1926 but that we operate on more than one element at a time. */
1927 if (subsize == GET_MODE_UNIT_SIZE (mode) && methods == OPTAB_DIRECT)
1932 /* Errors can leave us with a const0_rtx as operand. */
1933 if (GET_MODE (op0) != mode)
1934 op0 = copy_to_mode_reg (mode, op0);
1935 if (GET_MODE (op1) != mode)
1936 op1 = copy_to_mode_reg (mode, op1);
1939 target = gen_reg_rtx (mode);
1941 for (i = 0; i < elts; ++i)
1943 /* If this is part of a register, and not the first item in the
1944 word, we can't store using a SUBREG - that would clobber
1946 And storing with a SUBREG is only possible for the least
1947 significant part, hence we can't do it for big endian
1948 (unless we want to permute the evaluation order. */
1949 if (GET_CODE (target) == REG
1950 && (BYTES_BIG_ENDIAN
1951 ? subsize < UNITS_PER_WORD
1952 : ((i * subsize) % UNITS_PER_WORD) != 0))
1955 t = simplify_gen_subreg (submode, target, mode, i * subsize);
1956 if (CONSTANT_P (op0))
1957 a = simplify_gen_subreg (submode, op0, mode, i * subsize);
1959 a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp,
1960 NULL_RTX, submode, submode, size);
1961 if (CONSTANT_P (op1))
1962 b = simplify_gen_subreg (submode, op1, mode, i * subsize);
1964 b = extract_bit_field (op1, subbitsize, i * subbitsize, unsignedp,
1965 NULL_RTX, submode, submode, size);
1967 if (binoptab->code == DIV)
1969 if (class == MODE_VECTOR_FLOAT)
1970 res = expand_binop (submode, binoptab, a, b, t,
1971 unsignedp, methods);
1973 res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
1974 a, b, t, unsignedp);
1977 res = expand_binop (submode, binoptab, a, b, t,
1978 unsignedp, methods);
1984 emit_move_insn (t, res);
1986 store_bit_field (target, subbitsize, i * subbitsize, submode, res,
2002 /* Like expand_unop but for open-coding vector unops. */
2005 expand_vector_unop (enum machine_mode mode, optab unoptab, rtx op0,
2006 rtx target, int unsignedp)
2008 enum machine_mode submode, tmode;
2009 int size, elts, subsize, subbitsize, i;
2012 size = GET_MODE_SIZE (mode);
2013 submode = GET_MODE_INNER (mode);
2015 /* Search for the widest vector mode with the same inner mode that is
2016 still narrower than MODE and that allows to open-code this operator.
2017 Note, if we find such a mode and the handler later decides it can't
2018 do the expansion, we'll be called recursively with the narrower mode. */
2019 for (tmode = GET_CLASS_NARROWEST_MODE (GET_MODE_CLASS (mode));
2020 GET_MODE_SIZE (tmode) < GET_MODE_SIZE (mode);
2021 tmode = GET_MODE_WIDER_MODE (tmode))
2023 if (GET_MODE_INNER (tmode) == GET_MODE_INNER (mode)
2024 && unoptab->handlers[(int) tmode].insn_code != CODE_FOR_nothing)
2027 /* If there is no negate operation, try doing a subtract from zero. */
2028 if (unoptab == neg_optab && GET_MODE_CLASS (submode) == MODE_INT
2029 /* Avoid infinite recursion when an
2030 error has left us with the wrong mode. */
2031 && GET_MODE (op0) == mode)
2034 temp = expand_binop (mode, sub_optab, CONST0_RTX (mode), op0,
2035 target, unsignedp, OPTAB_DIRECT);
2040 if (unoptab == one_cmpl_optab)
2042 tmode = int_mode_for_mode (mode);
2043 if (tmode != BLKmode)
2047 subsize = GET_MODE_SIZE (submode);
2048 subbitsize = GET_MODE_BITSIZE (submode);
2049 elts = size / subsize;
2051 /* Errors can leave us with a const0_rtx as operand. */
2052 if (GET_MODE (op0) != mode)
2053 op0 = copy_to_mode_reg (mode, op0);
2056 target = gen_reg_rtx (mode);
2060 for (i = 0; i < elts; ++i)
2062 /* If this is part of a register, and not the first item in the
2063 word, we can't store using a SUBREG - that would clobber
2065 And storing with a SUBREG is only possible for the least
2066 significant part, hence we can't do it for big endian
2067 (unless we want to permute the evaluation order. */
2068 if (GET_CODE (target) == REG
2069 && (BYTES_BIG_ENDIAN
2070 ? subsize < UNITS_PER_WORD
2071 : ((i * subsize) % UNITS_PER_WORD) != 0))
2074 t = simplify_gen_subreg (submode, target, mode, i * subsize);
2075 if (CONSTANT_P (op0))
2076 a = simplify_gen_subreg (submode, op0, mode, i * subsize);
2078 a = extract_bit_field (op0, subbitsize, i * subbitsize, unsignedp,
2079 t, submode, submode, size);
2081 res = expand_unop (submode, unoptab, a, t, unsignedp);
2084 emit_move_insn (t, res);
2086 store_bit_field (target, subbitsize, i * subbitsize, submode, res,
2097 /* Expand a binary operator which has both signed and unsigned forms.
2098 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2101 If we widen unsigned operands, we may use a signed wider operation instead
2102 of an unsigned wider operation, since the result would be the same. */
2105 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2106 rtx op0, rtx op1, rtx target, int unsignedp,
2107 enum optab_methods methods)
2110 optab direct_optab = unsignedp ? uoptab : soptab;
2111 struct optab wide_soptab;
2113 /* Do it without widening, if possible. */
2114 temp = expand_binop (mode, direct_optab, op0, op1, target,
2115 unsignedp, OPTAB_DIRECT);
2116 if (temp || methods == OPTAB_DIRECT)
2119 /* Try widening to a signed int. Make a fake signed optab that
2120 hides any signed insn for direct use. */
2121 wide_soptab = *soptab;
2122 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
2123 wide_soptab.handlers[(int) mode].libfunc = 0;
2125 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2126 unsignedp, OPTAB_WIDEN);
2128 /* For unsigned operands, try widening to an unsigned int. */
2129 if (temp == 0 && unsignedp)
2130 temp = expand_binop (mode, uoptab, op0, op1, target,
2131 unsignedp, OPTAB_WIDEN);
2132 if (temp || methods == OPTAB_WIDEN)
2135 /* Use the right width lib call if that exists. */
2136 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2137 if (temp || methods == OPTAB_LIB)
2140 /* Must widen and use a lib call, use either signed or unsigned. */
2141 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2142 unsignedp, methods);
2146 return expand_binop (mode, uoptab, op0, op1, target,
2147 unsignedp, methods);
2151 /* Generate code to perform an operation specified by BINOPTAB
2152 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2153 We assume that the order of the operands for the instruction
2154 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2155 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2157 Either TARG0 or TARG1 may be zero, but what that means is that
2158 the result is not actually wanted. We will generate it into
2159 a dummy pseudo-reg and discard it. They may not both be zero.
2161 Returns 1 if this operation can be performed; 0 if not. */
2164 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2167 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2168 enum mode_class class;
2169 enum machine_mode wider_mode;
2170 rtx entry_last = get_last_insn ();
2173 class = GET_MODE_CLASS (mode);
2175 op0 = protect_from_queue (op0, 0);
2176 op1 = protect_from_queue (op1, 0);
2180 op0 = force_not_mem (op0);
2181 op1 = force_not_mem (op1);
2184 /* If we are inside an appropriately-short loop and one operand is an
2185 expensive constant, force it into a register. */
2186 if (CONSTANT_P (op0) && preserve_subexpressions_p ()
2187 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
2188 op0 = force_reg (mode, op0);
2190 if (CONSTANT_P (op1) && preserve_subexpressions_p ()
2191 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
2192 op1 = force_reg (mode, op1);
2195 targ0 = protect_from_queue (targ0, 1);
2197 targ0 = gen_reg_rtx (mode);
2199 targ1 = protect_from_queue (targ1, 1);
2201 targ1 = gen_reg_rtx (mode);
2203 /* Record where to go back to if we fail. */
2204 last = get_last_insn ();
2206 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2208 int icode = (int) binoptab->handlers[(int) mode].insn_code;
2209 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2210 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2212 rtx xop0 = op0, xop1 = op1;
2214 /* In case the insn wants input operands in modes different from
2215 those of the actual operands, convert the operands. It would
2216 seem that we don't need to convert CONST_INTs, but we do, so
2217 that they're properly zero-extended, sign-extended or truncated
2220 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2221 xop0 = convert_modes (mode0,
2222 GET_MODE (op0) != VOIDmode
2227 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2228 xop1 = convert_modes (mode1,
2229 GET_MODE (op1) != VOIDmode
2234 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2235 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
2236 xop0 = copy_to_mode_reg (mode0, xop0);
2238 if (! (*insn_data[icode].operand[2].predicate) (xop1, mode1))
2239 xop1 = copy_to_mode_reg (mode1, xop1);
2241 /* We could handle this, but we should always be called with a pseudo
2242 for our targets and all insns should take them as outputs. */
2243 if (! (*insn_data[icode].operand[0].predicate) (targ0, mode)
2244 || ! (*insn_data[icode].operand[3].predicate) (targ1, mode))
2247 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2254 delete_insns_since (last);
2257 /* It can't be done in this mode. Can we do it in a wider mode? */
2259 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2261 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2262 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2264 if (binoptab->handlers[(int) wider_mode].insn_code
2265 != CODE_FOR_nothing)
2267 rtx t0 = gen_reg_rtx (wider_mode);
2268 rtx t1 = gen_reg_rtx (wider_mode);
2269 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2270 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2272 if (expand_twoval_binop (binoptab, cop0, cop1,
2275 convert_move (targ0, t0, unsignedp);
2276 convert_move (targ1, t1, unsignedp);
2280 delete_insns_since (last);
2285 delete_insns_since (entry_last);
2289 /* Wrapper around expand_unop which takes an rtx code to specify
2290 the operation to perform, not an optab pointer. All other
2291 arguments are the same. */
2293 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2294 rtx target, int unsignedp)
2296 optab unop = code_to_optab[(int) code];
2300 return expand_unop (mode, unop, op0, target, unsignedp);
2306 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2308 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2310 enum mode_class class = GET_MODE_CLASS (mode);
2311 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2313 enum machine_mode wider_mode;
2314 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2315 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2317 if (clz_optab->handlers[(int) wider_mode].insn_code
2318 != CODE_FOR_nothing)
2320 rtx xop0, temp, last;
2322 last = get_last_insn ();
2325 target = gen_reg_rtx (mode);
2326 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2327 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2329 temp = expand_binop (wider_mode, sub_optab, temp,
2330 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2331 - GET_MODE_BITSIZE (mode)),
2332 target, true, OPTAB_DIRECT);
2334 delete_insns_since (last);
2343 /* Try calculating (parity x) as (and (popcount x) 1), where
2344 popcount can also be done in a wider mode. */
2346 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2348 enum mode_class class = GET_MODE_CLASS (mode);
2349 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2351 enum machine_mode wider_mode;
2352 for (wider_mode = mode; wider_mode != VOIDmode;
2353 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2355 if (popcount_optab->handlers[(int) wider_mode].insn_code
2356 != CODE_FOR_nothing)
2358 rtx xop0, temp, last;
2360 last = get_last_insn ();
2363 target = gen_reg_rtx (mode);
2364 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2365 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2368 temp = expand_binop (wider_mode, and_optab, temp, GEN_INT (1),
2369 target, true, OPTAB_DIRECT);
2371 delete_insns_since (last);
2380 /* Generate code to perform an operation specified by UNOPTAB
2381 on operand OP0, with result having machine-mode MODE.
2383 UNSIGNEDP is for the case where we have to widen the operands
2384 to perform the operation. It says to use zero-extension.
2386 If TARGET is nonzero, the value
2387 is generated there, if it is convenient to do so.
2388 In all cases an rtx is returned for the locus of the value;
2389 this may or may not be TARGET. */
2392 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2395 enum mode_class class;
2396 enum machine_mode wider_mode;
2398 rtx last = get_last_insn ();
2401 class = GET_MODE_CLASS (mode);
2403 op0 = protect_from_queue (op0, 0);
2407 op0 = force_not_mem (op0);
2411 target = protect_from_queue (target, 1);
2413 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2415 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2416 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2422 temp = gen_reg_rtx (mode);
2424 if (GET_MODE (xop0) != VOIDmode
2425 && GET_MODE (xop0) != mode0)
2426 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2428 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2430 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
2431 xop0 = copy_to_mode_reg (mode0, xop0);
2433 if (! (*insn_data[icode].operand[0].predicate) (temp, mode))
2434 temp = gen_reg_rtx (mode);
2436 pat = GEN_FCN (icode) (temp, xop0);
2439 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2440 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2442 delete_insns_since (last);
2443 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2451 delete_insns_since (last);
2454 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2456 /* Widening clz needs special treatment. */
2457 if (unoptab == clz_optab)
2459 temp = widen_clz (mode, op0, target);
2466 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2467 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2468 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2470 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2474 /* For certain operations, we need not actually extend
2475 the narrow operand, as long as we will truncate the
2476 results to the same narrowness. */
2478 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2479 (unoptab == neg_optab
2480 || unoptab == one_cmpl_optab)
2481 && class == MODE_INT);
2483 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2488 if (class != MODE_INT)
2491 target = gen_reg_rtx (mode);
2492 convert_move (target, temp, 0);
2496 return gen_lowpart (mode, temp);
2499 delete_insns_since (last);
2503 /* These can be done a word at a time. */
2504 if (unoptab == one_cmpl_optab
2505 && class == MODE_INT
2506 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2507 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2512 if (target == 0 || target == op0)
2513 target = gen_reg_rtx (mode);
2517 /* Do the actual arithmetic. */
2518 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2520 rtx target_piece = operand_subword (target, i, 1, mode);
2521 rtx x = expand_unop (word_mode, unoptab,
2522 operand_subword_force (op0, i, mode),
2523 target_piece, unsignedp);
2525 if (target_piece != x)
2526 emit_move_insn (target_piece, x);
2529 insns = get_insns ();
2532 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2533 gen_rtx_fmt_e (unoptab->code, mode,
2538 /* Open-code the complex negation operation. */
2539 else if (unoptab->code == NEG
2540 && (class == MODE_COMPLEX_FLOAT || class == MODE_COMPLEX_INT))
2546 /* Find the correct mode for the real and imaginary parts. */
2547 enum machine_mode submode = GET_MODE_INNER (mode);
2549 if (submode == BLKmode)
2553 target = gen_reg_rtx (mode);
2557 target_piece = gen_imagpart (submode, target);
2558 x = expand_unop (submode, unoptab,
2559 gen_imagpart (submode, op0),
2560 target_piece, unsignedp);
2561 if (target_piece != x)
2562 emit_move_insn (target_piece, x);
2564 target_piece = gen_realpart (submode, target);
2565 x = expand_unop (submode, unoptab,
2566 gen_realpart (submode, op0),
2567 target_piece, unsignedp);
2568 if (target_piece != x)
2569 emit_move_insn (target_piece, x);
2574 emit_no_conflict_block (seq, target, op0, 0,
2575 gen_rtx_fmt_e (unoptab->code, mode,
2580 /* Try negating floating point values by flipping the sign bit. */
2581 if (unoptab->code == NEG && class == MODE_FLOAT
2582 && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT)
2584 const struct real_format *fmt = real_format_for_mode[mode - QFmode];
2585 enum machine_mode imode = int_mode_for_mode (mode);
2586 int bitpos = (fmt != 0) ? fmt->signbit : -1;
2588 if (imode != BLKmode && bitpos >= 0 && fmt->has_signed_zero)
2590 HOST_WIDE_INT hi, lo;
2591 rtx last = get_last_insn ();
2593 /* Handle targets with different FP word orders. */
2594 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2596 int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
2597 int word = nwords - (bitpos / BITS_PER_WORD) - 1;
2598 bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD;
2601 if (bitpos < HOST_BITS_PER_WIDE_INT)
2604 lo = (HOST_WIDE_INT) 1 << bitpos;
2608 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2611 temp = expand_binop (imode, xor_optab,
2612 gen_lowpart (imode, op0),
2613 immed_double_const (lo, hi, imode),
2614 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2616 return gen_lowpart (mode, temp);
2617 delete_insns_since (last);
2621 /* Try calculating parity (x) as popcount (x) % 2. */
2622 if (unoptab == parity_optab)
2624 temp = expand_parity (mode, op0, target);
2630 /* Now try a library call in this mode. */
2631 if (unoptab->handlers[(int) mode].libfunc)
2635 enum machine_mode outmode = mode;
2637 /* All of these functions return small values. Thus we choose to
2638 have them return something that isn't a double-word. */
2639 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2640 || unoptab == popcount_optab || unoptab == parity_optab)
2641 outmode = TYPE_MODE (integer_type_node);
2645 /* Pass 1 for NO_QUEUE so we don't lose any increments
2646 if the libcall is cse'd or moved. */
2647 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2648 NULL_RTX, LCT_CONST, outmode,
2650 insns = get_insns ();
2653 target = gen_reg_rtx (outmode);
2654 emit_libcall_block (insns, target, value,
2655 gen_rtx_fmt_e (unoptab->code, mode, op0));
2660 if (class == MODE_VECTOR_FLOAT || class == MODE_VECTOR_INT)
2661 return expand_vector_unop (mode, unoptab, op0, target, unsignedp);
2663 /* It can't be done in this mode. Can we do it in a wider mode? */
2665 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2667 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2668 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2670 if ((unoptab->handlers[(int) wider_mode].insn_code
2671 != CODE_FOR_nothing)
2672 || unoptab->handlers[(int) wider_mode].libfunc)
2676 /* For certain operations, we need not actually extend
2677 the narrow operand, as long as we will truncate the
2678 results to the same narrowness. */
2680 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2681 (unoptab == neg_optab
2682 || unoptab == one_cmpl_optab)
2683 && class == MODE_INT);
2685 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2688 /* If we are generating clz using wider mode, adjust the
2690 if (unoptab == clz_optab && temp != 0)
2691 temp = expand_binop (wider_mode, sub_optab, temp,
2692 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2693 - GET_MODE_BITSIZE (mode)),
2694 target, true, OPTAB_DIRECT);
2698 if (class != MODE_INT)
2701 target = gen_reg_rtx (mode);
2702 convert_move (target, temp, 0);
2706 return gen_lowpart (mode, temp);
2709 delete_insns_since (last);
2714 /* If there is no negate operation, try doing a subtract from zero.
2715 The US Software GOFAST library needs this. */
2716 if (unoptab->code == NEG)
2719 temp = expand_binop (mode,
2720 unoptab == negv_optab ? subv_optab : sub_optab,
2721 CONST0_RTX (mode), op0,
2722 target, unsignedp, OPTAB_LIB_WIDEN);
2730 /* Emit code to compute the absolute value of OP0, with result to
2731 TARGET if convenient. (TARGET may be 0.) The return value says
2732 where the result actually is to be found.
2734 MODE is the mode of the operand; the mode of the result is
2735 different but can be deduced from MODE.
2740 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2741 int result_unsignedp)
2746 result_unsignedp = 1;
2748 /* First try to do it with a special abs instruction. */
2749 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2754 /* For floating point modes, try clearing the sign bit. */
2755 if (GET_MODE_CLASS (mode) == MODE_FLOAT
2756 && GET_MODE_BITSIZE (mode) <= 2 * HOST_BITS_PER_WIDE_INT)
2758 const struct real_format *fmt = real_format_for_mode[mode - QFmode];
2759 enum machine_mode imode = int_mode_for_mode (mode);
2760 int bitpos = (fmt != 0) ? fmt->signbit : -1;
2762 if (imode != BLKmode && bitpos >= 0)
2764 HOST_WIDE_INT hi, lo;
2765 rtx last = get_last_insn ();
2767 /* Handle targets with different FP word orders. */
2768 if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
2770 int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
2771 int word = nwords - (bitpos / BITS_PER_WORD) - 1;
2772 bitpos = word * BITS_PER_WORD + bitpos % BITS_PER_WORD;
2775 if (bitpos < HOST_BITS_PER_WIDE_INT)
2778 lo = (HOST_WIDE_INT) 1 << bitpos;
2782 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2785 temp = expand_binop (imode, and_optab,
2786 gen_lowpart (imode, op0),
2787 immed_double_const (~lo, ~hi, imode),
2788 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2790 return gen_lowpart (mode, temp);
2791 delete_insns_since (last);
2795 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2796 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2798 rtx last = get_last_insn ();
2800 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2802 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2808 delete_insns_since (last);
2811 /* If this machine has expensive jumps, we can do integer absolute
2812 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2813 where W is the width of MODE. */
2815 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2817 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2818 size_int (GET_MODE_BITSIZE (mode) - 1),
2821 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2824 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2825 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2835 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2836 int result_unsignedp, int safe)
2841 result_unsignedp = 1;
2843 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2847 /* If that does not win, use conditional jump and negate. */
2849 /* It is safe to use the target if it is the same
2850 as the source if this is also a pseudo register */
2851 if (op0 == target && GET_CODE (op0) == REG
2852 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2855 op1 = gen_label_rtx ();
2856 if (target == 0 || ! safe
2857 || GET_MODE (target) != mode
2858 || (GET_CODE (target) == MEM && MEM_VOLATILE_P (target))
2859 || (GET_CODE (target) == REG
2860 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2861 target = gen_reg_rtx (mode);
2863 emit_move_insn (target, op0);
2866 /* If this mode is an integer too wide to compare properly,
2867 compare word by word. Rely on CSE to optimize constant cases. */
2868 if (GET_MODE_CLASS (mode) == MODE_INT
2869 && ! can_compare_p (GE, mode, ccp_jump))
2870 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2873 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2874 NULL_RTX, NULL_RTX, op1);
2876 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2879 emit_move_insn (target, op0);
2885 /* Emit code to compute the absolute value of OP0, with result to
2886 TARGET if convenient. (TARGET may be 0.) The return value says
2887 where the result actually is to be found.
2889 MODE is the mode of the operand; the mode of the result is
2890 different but can be deduced from MODE.
2892 UNSIGNEDP is relevant for complex integer modes. */
2895 expand_complex_abs (enum machine_mode mode, rtx op0, rtx target,
2898 enum mode_class class = GET_MODE_CLASS (mode);
2899 enum machine_mode wider_mode;
2901 rtx entry_last = get_last_insn ();
2904 optab this_abs_optab;
2906 /* Find the correct mode for the real and imaginary parts. */
2907 enum machine_mode submode = GET_MODE_INNER (mode);
2909 if (submode == BLKmode)
2912 op0 = protect_from_queue (op0, 0);
2916 op0 = force_not_mem (op0);
2919 last = get_last_insn ();
2922 target = protect_from_queue (target, 1);
2924 this_abs_optab = ! unsignedp && flag_trapv
2925 && (GET_MODE_CLASS(mode) == MODE_INT)
2926 ? absv_optab : abs_optab;
2928 if (this_abs_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2930 int icode = (int) this_abs_optab->handlers[(int) mode].insn_code;
2931 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2937 temp = gen_reg_rtx (submode);
2939 if (GET_MODE (xop0) != VOIDmode
2940 && GET_MODE (xop0) != mode0)
2941 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2943 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2945 if (! (*insn_data[icode].operand[1].predicate) (xop0, mode0))
2946 xop0 = copy_to_mode_reg (mode0, xop0);
2948 if (! (*insn_data[icode].operand[0].predicate) (temp, submode))
2949 temp = gen_reg_rtx (submode);
2951 pat = GEN_FCN (icode) (temp, xop0);
2954 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2955 && ! add_equal_note (pat, temp, this_abs_optab->code, xop0,
2958 delete_insns_since (last);
2959 return expand_unop (mode, this_abs_optab, op0, NULL_RTX,
2968 delete_insns_since (last);
2971 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2973 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2974 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2976 if (this_abs_optab->handlers[(int) wider_mode].insn_code
2977 != CODE_FOR_nothing)
2981 xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
2982 temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
2986 if (class != MODE_COMPLEX_INT)
2989 target = gen_reg_rtx (submode);
2990 convert_move (target, temp, 0);
2994 return gen_lowpart (submode, temp);
2997 delete_insns_since (last);
3001 /* Open-code the complex absolute-value operation
3002 if we can open-code sqrt. Otherwise it's not worth while. */
3003 if (sqrt_optab->handlers[(int) submode].insn_code != CODE_FOR_nothing
3006 rtx real, imag, total;
3008 real = gen_realpart (submode, op0);
3009 imag = gen_imagpart (submode, op0);
3011 /* Square both parts. */
3012 real = expand_mult (submode, real, real, NULL_RTX, 0);
3013 imag = expand_mult (submode, imag, imag, NULL_RTX, 0);
3015 /* Sum the parts. */
3016 total = expand_binop (submode, add_optab, real, imag, NULL_RTX,
3017 0, OPTAB_LIB_WIDEN);
3019 /* Get sqrt in TARGET. Set TARGET to where the result is. */
3020 target = expand_unop (submode, sqrt_optab, total, target, 0);
3022 delete_insns_since (last);
3027 /* Now try a library call in this mode. */
3028 if (this_abs_optab->handlers[(int) mode].libfunc)
3035 /* Pass 1 for NO_QUEUE so we don't lose any increments
3036 if the libcall is cse'd or moved. */
3037 value = emit_library_call_value (abs_optab->handlers[(int) mode].libfunc,
3038 NULL_RTX, LCT_CONST, submode, 1, op0, mode);
3039 insns = get_insns ();
3042 target = gen_reg_rtx (submode);
3043 emit_libcall_block (insns, target, value,
3044 gen_rtx_fmt_e (this_abs_optab->code, mode, op0));
3049 /* It can't be done in this mode. Can we do it in a wider mode? */
3051 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
3052 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3054 if ((this_abs_optab->handlers[(int) wider_mode].insn_code
3055 != CODE_FOR_nothing)
3056 || this_abs_optab->handlers[(int) wider_mode].libfunc)
3060 xop0 = convert_modes (wider_mode, mode, xop0, unsignedp);
3062 temp = expand_complex_abs (wider_mode, xop0, NULL_RTX, unsignedp);
3066 if (class != MODE_COMPLEX_INT)
3069 target = gen_reg_rtx (submode);
3070 convert_move (target, temp, 0);
3074 return gen_lowpart (submode, temp);
3077 delete_insns_since (last);
3081 delete_insns_since (entry_last);
3085 /* Generate an instruction whose insn-code is INSN_CODE,
3086 with two operands: an output TARGET and an input OP0.
3087 TARGET *must* be nonzero, and the output is always stored there.
3088 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3089 the value that is stored into TARGET. */
3092 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3095 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3098 temp = target = protect_from_queue (target, 1);
3100 op0 = protect_from_queue (op0, 0);
3102 /* Sign and zero extension from memory is often done specially on
3103 RISC machines, so forcing into a register here can pessimize
3105 if (flag_force_mem && code != SIGN_EXTEND && code != ZERO_EXTEND)
3106 op0 = force_not_mem (op0);
3108 /* Now, if insn does not accept our operands, put them into pseudos. */
3110 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
3111 op0 = copy_to_mode_reg (mode0, op0);
3113 if (! (*insn_data[icode].operand[0].predicate) (temp, GET_MODE (temp))
3114 || (flag_force_mem && GET_CODE (temp) == MEM))
3115 temp = gen_reg_rtx (GET_MODE (temp));
3117 pat = GEN_FCN (icode) (temp, op0);
3119 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3120 add_equal_note (pat, temp, code, op0, NULL_RTX);
3125 emit_move_insn (target, temp);
3128 /* Emit code to perform a series of operations on a multi-word quantity, one
3131 Such a block is preceded by a CLOBBER of the output, consists of multiple
3132 insns, each setting one word of the output, and followed by a SET copying
3133 the output to itself.
3135 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3136 note indicating that it doesn't conflict with the (also multi-word)
3137 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3140 INSNS is a block of code generated to perform the operation, not including
3141 the CLOBBER and final copy. All insns that compute intermediate values
3142 are first emitted, followed by the block as described above.
3144 TARGET, OP0, and OP1 are the output and inputs of the operations,
3145 respectively. OP1 may be zero for a unary operation.
3147 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3150 If TARGET is not a register, INSNS is simply emitted with no special
3151 processing. Likewise if anything in INSNS is not an INSN or if
3152 there is a libcall block inside INSNS.
3154 The final insn emitted is returned. */
3157 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3159 rtx prev, next, first, last, insn;
3161 if (GET_CODE (target) != REG || reload_in_progress)
3162 return emit_insn (insns);
3164 for (insn = insns; insn; insn = NEXT_INSN (insn))
3165 if (GET_CODE (insn) != INSN
3166 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3167 return emit_insn (insns);
3169 /* First emit all insns that do not store into words of the output and remove
3170 these from the list. */
3171 for (insn = insns; insn; insn = next)
3176 next = NEXT_INSN (insn);
3178 /* Some ports (cris) create an libcall regions at their own. We must
3179 avoid any potential nesting of LIBCALLs. */
3180 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3181 remove_note (insn, note);
3182 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3183 remove_note (insn, note);
3185 if (GET_CODE (PATTERN (insn)) == SET || GET_CODE (PATTERN (insn)) == USE
3186 || GET_CODE (PATTERN (insn)) == CLOBBER)
3187 set = PATTERN (insn);
3188 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3190 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
3191 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
3193 set = XVECEXP (PATTERN (insn), 0, i);
3201 if (! reg_overlap_mentioned_p (target, SET_DEST (set)))
3203 if (PREV_INSN (insn))
3204 NEXT_INSN (PREV_INSN (insn)) = next;
3209 PREV_INSN (next) = PREV_INSN (insn);
3215 prev = get_last_insn ();
3217 /* Now write the CLOBBER of the output, followed by the setting of each
3218 of the words, followed by the final copy. */
3219 if (target != op0 && target != op1)
3220 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3222 for (insn = insns; insn; insn = next)
3224 next = NEXT_INSN (insn);
3227 if (op1 && GET_CODE (op1) == REG)
3228 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3231 if (op0 && GET_CODE (op0) == REG)
3232 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3236 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3237 != CODE_FOR_nothing)
3239 last = emit_move_insn (target, target);
3241 set_unique_reg_note (last, REG_EQUAL, equiv);
3245 last = get_last_insn ();
3247 /* Remove any existing REG_EQUAL note from "last", or else it will
3248 be mistaken for a note referring to the full contents of the
3249 alleged libcall value when found together with the REG_RETVAL
3250 note added below. An existing note can come from an insn
3251 expansion at "last". */
3252 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3256 first = get_insns ();
3258 first = NEXT_INSN (prev);
3260 /* Encapsulate the block so it gets manipulated as a unit. */
3261 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3263 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3268 /* Emit code to make a call to a constant function or a library call.
3270 INSNS is a list containing all insns emitted in the call.
3271 These insns leave the result in RESULT. Our block is to copy RESULT
3272 to TARGET, which is logically equivalent to EQUIV.
3274 We first emit any insns that set a pseudo on the assumption that these are
3275 loading constants into registers; doing so allows them to be safely cse'ed
3276 between blocks. Then we emit all the other insns in the block, followed by
3277 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3278 note with an operand of EQUIV.
3280 Moving assignments to pseudos outside of the block is done to improve
3281 the generated code, but is not required to generate correct code,
3282 hence being unable to move an assignment is not grounds for not making
3283 a libcall block. There are two reasons why it is safe to leave these
3284 insns inside the block: First, we know that these pseudos cannot be
3285 used in generated RTL outside the block since they are created for
3286 temporary purposes within the block. Second, CSE will not record the
3287 values of anything set inside a libcall block, so we know they must
3288 be dead at the end of the block.
3290 Except for the first group of insns (the ones setting pseudos), the
3291 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3294 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3296 rtx final_dest = target;
3297 rtx prev, next, first, last, insn;
3299 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3300 into a MEM later. Protect the libcall block from this change. */
3301 if (! REG_P (target) || REG_USERVAR_P (target))
3302 target = gen_reg_rtx (GET_MODE (target));
3304 /* If we're using non-call exceptions, a libcall corresponding to an
3305 operation that may trap may also trap. */
3306 if (flag_non_call_exceptions && may_trap_p (equiv))
3308 for (insn = insns; insn; insn = NEXT_INSN (insn))
3309 if (GET_CODE (insn) == CALL_INSN)
3311 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3313 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3314 remove_note (insn, note);
3318 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3319 reg note to indicate that this call cannot throw or execute a nonlocal
3320 goto (unless there is already a REG_EH_REGION note, in which case
3322 for (insn = insns; insn; insn = NEXT_INSN (insn))
3323 if (GET_CODE (insn) == CALL_INSN)
3325 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3328 XEXP (note, 0) = GEN_INT (-1);
3330 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, GEN_INT (-1),
3334 /* First emit all insns that set pseudos. Remove them from the list as
3335 we go. Avoid insns that set pseudos which were referenced in previous
3336 insns. These can be generated by move_by_pieces, for example,
3337 to update an address. Similarly, avoid insns that reference things
3338 set in previous insns. */
3340 for (insn = insns; insn; insn = next)
3342 rtx set = single_set (insn);
3345 /* Some ports (cris) create an libcall regions at their own. We must
3346 avoid any potential nesting of LIBCALLs. */
3347 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3348 remove_note (insn, note);
3349 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3350 remove_note (insn, note);
3352 next = NEXT_INSN (insn);
3354 if (set != 0 && GET_CODE (SET_DEST (set)) == REG
3355 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
3357 || ((! INSN_P(insns)
3358 || ! reg_mentioned_p (SET_DEST (set), PATTERN (insns)))
3359 && ! reg_used_between_p (SET_DEST (set), insns, insn)
3360 && ! modified_in_p (SET_SRC (set), insns)
3361 && ! modified_between_p (SET_SRC (set), insns, insn))))
3363 if (PREV_INSN (insn))
3364 NEXT_INSN (PREV_INSN (insn)) = next;
3369 PREV_INSN (next) = PREV_INSN (insn);
3374 /* Some ports use a loop to copy large arguments onto the stack.
3375 Don't move anything outside such a loop. */
3376 if (GET_CODE (insn) == CODE_LABEL)
3380 prev = get_last_insn ();
3382 /* Write the remaining insns followed by the final copy. */
3384 for (insn = insns; insn; insn = next)
3386 next = NEXT_INSN (insn);
3391 last = emit_move_insn (target, result);
3392 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3393 != CODE_FOR_nothing)
3394 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3397 /* Remove any existing REG_EQUAL note from "last", or else it will
3398 be mistaken for a note referring to the full contents of the
3399 libcall value when found together with the REG_RETVAL note added
3400 below. An existing note can come from an insn expansion at
3402 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3405 if (final_dest != target)
3406 emit_move_insn (final_dest, target);
3409 first = get_insns ();
3411 first = NEXT_INSN (prev);
3413 /* Encapsulate the block so it gets manipulated as a unit. */
3414 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3416 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3417 when the encapsulated region would not be in one basic block,
3418 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3420 bool attach_libcall_retval_notes = true;
3421 next = NEXT_INSN (last);
3422 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3423 if (control_flow_insn_p (insn))
3425 attach_libcall_retval_notes = false;
3429 if (attach_libcall_retval_notes)
3431 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3433 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3439 /* Generate code to store zero in X. */
3442 emit_clr_insn (rtx x)
3444 emit_move_insn (x, const0_rtx);
3447 /* Generate code to store 1 in X
3448 assuming it contains zero beforehand. */
3451 emit_0_to_1_insn (rtx x)
3453 emit_move_insn (x, const1_rtx);
3456 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3457 PURPOSE describes how this comparison will be used. CODE is the rtx
3458 comparison code we will be using.
3460 ??? Actually, CODE is slightly weaker than that. A target is still
3461 required to implement all of the normal bcc operations, but not
3462 required to implement all (or any) of the unordered bcc operations. */
3465 can_compare_p (enum rtx_code code, enum machine_mode mode,
3466 enum can_compare_purpose purpose)
3470 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3472 if (purpose == ccp_jump)
3473 return bcc_gen_fctn[(int) code] != NULL;
3474 else if (purpose == ccp_store_flag)
3475 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3477 /* There's only one cmov entry point, and it's allowed to fail. */
3480 if (purpose == ccp_jump
3481 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3483 if (purpose == ccp_cmov
3484 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3486 if (purpose == ccp_store_flag
3487 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3490 mode = GET_MODE_WIDER_MODE (mode);
3492 while (mode != VOIDmode);
3497 /* This function is called when we are going to emit a compare instruction that
3498 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3500 *PMODE is the mode of the inputs (in case they are const_int).
3501 *PUNSIGNEDP nonzero says that the operands are unsigned;
3502 this matters if they need to be widened.
3504 If they have mode BLKmode, then SIZE specifies the size of both operands.
3506 This function performs all the setup necessary so that the caller only has
3507 to emit a single comparison insn. This setup can involve doing a BLKmode
3508 comparison or emitting a library call to perform the comparison if no insn
3509 is available to handle it.
3510 The values which are passed in through pointers can be modified; the caller
3511 should perform the comparison on the modified values. */
3514 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3515 enum machine_mode *pmode, int *punsignedp,
3516 enum can_compare_purpose purpose)
3518 enum machine_mode mode = *pmode;
3519 rtx x = *px, y = *py;
3520 int unsignedp = *punsignedp;
3521 enum mode_class class;
3523 class = GET_MODE_CLASS (mode);
3525 /* They could both be VOIDmode if both args are immediate constants,
3526 but we should fold that at an earlier stage.
3527 With no special code here, this will call abort,
3528 reminding the programmer to implement such folding. */
3530 if (mode != BLKmode && flag_force_mem)
3532 /* Load duplicate non-volatile operands once. */
3533 if (rtx_equal_p (x, y) && ! volatile_refs_p (x))
3535 x = force_not_mem (x);
3540 x = force_not_mem (x);
3541 y = force_not_mem (y);
3545 /* If we are inside an appropriately-short loop and one operand is an
3546 expensive constant, force it into a register. */
3547 if (CONSTANT_P (x) && preserve_subexpressions_p ()
3548 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3549 x = force_reg (mode, x);
3551 if (CONSTANT_P (y) && preserve_subexpressions_p ()
3552 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3553 y = force_reg (mode, y);
3556 /* Abort if we have a non-canonical comparison. The RTL documentation
3557 states that canonical comparisons are required only for targets which
3559 if (CONSTANT_P (x) && ! CONSTANT_P (y))
3563 /* Don't let both operands fail to indicate the mode. */
3564 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3565 x = force_reg (mode, x);
3567 /* Handle all BLKmode compares. */
3569 if (mode == BLKmode)
3572 enum machine_mode result_mode;
3573 rtx opalign ATTRIBUTE_UNUSED
3574 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3577 x = protect_from_queue (x, 0);
3578 y = protect_from_queue (y, 0);
3582 #ifdef HAVE_cmpstrqi
3584 && GET_CODE (size) == CONST_INT
3585 && INTVAL (size) < (1 << GET_MODE_BITSIZE (QImode)))
3587 result_mode = insn_data[(int) CODE_FOR_cmpstrqi].operand[0].mode;
3588 result = gen_reg_rtx (result_mode);
3589 emit_insn (gen_cmpstrqi (result, x, y, size, opalign));
3593 #ifdef HAVE_cmpstrhi
3595 && GET_CODE (size) == CONST_INT
3596 && INTVAL (size) < (1 << GET_MODE_BITSIZE (HImode)))
3598 result_mode = insn_data[(int) CODE_FOR_cmpstrhi].operand[0].mode;
3599 result = gen_reg_rtx (result_mode);
3600 emit_insn (gen_cmpstrhi (result, x, y, size, opalign));
3604 #ifdef HAVE_cmpstrsi
3607 result_mode = insn_data[(int) CODE_FOR_cmpstrsi].operand[0].mode;
3608 result = gen_reg_rtx (result_mode);
3609 size = protect_from_queue (size, 0);
3610 emit_insn (gen_cmpstrsi (result, x, y,
3611 convert_to_mode (SImode, size, 1),
3617 #ifdef TARGET_MEM_FUNCTIONS
3618 result = emit_library_call_value (memcmp_libfunc, NULL_RTX, LCT_PURE_MAKE_BLOCK,
3619 TYPE_MODE (integer_type_node), 3,
3620 XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
3621 convert_to_mode (TYPE_MODE (sizetype), size,
3622 TREE_UNSIGNED (sizetype)),
3623 TYPE_MODE (sizetype));
3625 result = emit_library_call_value (bcmp_libfunc, NULL_RTX, LCT_PURE_MAKE_BLOCK,
3626 TYPE_MODE (integer_type_node), 3,
3627 XEXP (x, 0), Pmode, XEXP (y, 0), Pmode,
3628 convert_to_mode (TYPE_MODE (integer_type_node),
3630 TREE_UNSIGNED (integer_type_node)),
3631 TYPE_MODE (integer_type_node));
3634 result_mode = TYPE_MODE (integer_type_node);
3638 *pmode = result_mode;
3644 if (can_compare_p (*pcomparison, mode, purpose))
3647 /* Handle a lib call just for the mode we are using. */
3649 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3651 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3654 /* If we want unsigned, and this mode has a distinct unsigned
3655 comparison routine, use that. */
3656 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3657 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3659 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3660 word_mode, 2, x, mode, y, mode);
3662 /* Integer comparison returns a result that must be compared against 1,
3663 so that even if we do an unsigned compare afterward,
3664 there is still a value that can represent the result "less than". */
3671 if (class == MODE_FLOAT)
3672 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3678 /* Before emitting an insn with code ICODE, make sure that X, which is going
3679 to be used for operand OPNUM of the insn, is converted from mode MODE to
3680 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3681 that it is accepted by the operand predicate. Return the new value. */
3684 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3685 enum machine_mode wider_mode, int unsignedp)
3687 x = protect_from_queue (x, 0);
3689 if (mode != wider_mode)
3690 x = convert_modes (wider_mode, mode, x, unsignedp);
3692 if (! (*insn_data[icode].operand[opnum].predicate)
3693 (x, insn_data[icode].operand[opnum].mode))
3694 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3698 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3699 we can do the comparison.
3700 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3701 be NULL_RTX which indicates that only a comparison is to be generated. */
3704 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3705 enum rtx_code comparison, int unsignedp, rtx label)
3707 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3708 enum mode_class class = GET_MODE_CLASS (mode);
3709 enum machine_mode wider_mode = mode;
3711 /* Try combined insns first. */
3714 enum insn_code icode;
3715 PUT_MODE (test, wider_mode);
3719 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3721 if (icode != CODE_FOR_nothing
3722 && (*insn_data[icode].operand[0].predicate) (test, wider_mode))
3724 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3725 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3726 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3731 /* Handle some compares against zero. */
3732 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3733 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3735 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3736 emit_insn (GEN_FCN (icode) (x));
3738 emit_jump_insn ((*bcc_gen_fctn[(int) comparison]) (label));
3742 /* Handle compares for which there is a directly suitable insn. */
3744 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3745 if (icode != CODE_FOR_nothing)
3747 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3748 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3749 emit_insn (GEN_FCN (icode) (x, y));
3751 emit_jump_insn ((*bcc_gen_fctn[(int) comparison]) (label));
3755 if (class != MODE_INT && class != MODE_FLOAT
3756 && class != MODE_COMPLEX_FLOAT)
3759 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3761 while (wider_mode != VOIDmode);
3766 /* Generate code to compare X with Y so that the condition codes are
3767 set and to jump to LABEL if the condition is true. If X is a
3768 constant and Y is not a constant, then the comparison is swapped to
3769 ensure that the comparison RTL has the canonical form.
3771 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3772 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3773 the proper branch condition code.
3775 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3777 MODE is the mode of the inputs (in case they are const_int).
3779 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3780 be passed unchanged to emit_cmp_insn, then potentially converted into an
3781 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3784 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3785 enum machine_mode mode, int unsignedp, rtx label)
3787 rtx op0 = x, op1 = y;
3789 /* Swap operands and condition to ensure canonical RTL. */
3790 if (swap_commutative_operands_p (x, y))
3792 /* If we're not emitting a branch, this means some caller
3798 comparison = swap_condition (comparison);
3802 /* If OP0 is still a constant, then both X and Y must be constants. Force
3803 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3805 if (CONSTANT_P (op0))
3806 op0 = force_reg (mode, op0);
3811 comparison = unsigned_condition (comparison);
3813 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3815 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3818 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3821 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3822 enum machine_mode mode, int unsignedp)
3824 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3827 /* Emit a library call comparison between floating point X and Y.
3828 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3831 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3832 enum machine_mode *pmode, int *punsignedp)
3834 enum rtx_code comparison = *pcomparison;
3836 rtx x = *px = protect_from_queue (*px, 0);
3837 rtx y = *py = protect_from_queue (*py, 0);
3838 enum machine_mode mode = GET_MODE (x);
3846 libfunc = eqhf2_libfunc;
3850 libfunc = nehf2_libfunc;
3854 libfunc = gthf2_libfunc;
3855 if (libfunc == NULL_RTX)
3857 tmp = x; x = y; y = tmp;
3859 libfunc = lthf2_libfunc;
3864 libfunc = gehf2_libfunc;
3865 if (libfunc == NULL_RTX)
3867 tmp = x; x = y; y = tmp;
3869 libfunc = lehf2_libfunc;
3874 libfunc = lthf2_libfunc;
3875 if (libfunc == NULL_RTX)
3877 tmp = x; x = y; y = tmp;
3879 libfunc = gthf2_libfunc;
3884 libfunc = lehf2_libfunc;
3885 if (libfunc == NULL_RTX)
3887 tmp = x; x = y; y = tmp;
3889 libfunc = gehf2_libfunc;
3894 libfunc = unordhf2_libfunc;
3900 else if (mode == SFmode)
3904 libfunc = eqsf2_libfunc;
3908 libfunc = nesf2_libfunc;
3912 libfunc = gtsf2_libfunc;
3913 if (libfunc == NULL_RTX)
3915 tmp = x; x = y; y = tmp;
3917 libfunc = ltsf2_libfunc;
3922 libfunc = gesf2_libfunc;
3923 if (libfunc == NULL_RTX)
3925 tmp = x; x = y; y = tmp;
3927 libfunc = lesf2_libfunc;
3932 libfunc = ltsf2_libfunc;
3933 if (libfunc == NULL_RTX)
3935 tmp = x; x = y; y = tmp;
3937 libfunc = gtsf2_libfunc;
3942 libfunc = lesf2_libfunc;
3943 if (libfunc == NULL_RTX)
3945 tmp = x; x = y; y = tmp;
3947 libfunc = gesf2_libfunc;
3952 libfunc = unordsf2_libfunc;
3958 else if (mode == DFmode)
3962 libfunc = eqdf2_libfunc;
3966 libfunc = nedf2_libfunc;
3970 libfunc = gtdf2_libfunc;
3971 if (libfunc == NULL_RTX)
3973 tmp = x; x = y; y = tmp;
3975 libfunc = ltdf2_libfunc;
3980 libfunc = gedf2_libfunc;
3981 if (libfunc == NULL_RTX)
3983 tmp = x; x = y; y = tmp;
3985 libfunc = ledf2_libfunc;
3990 libfunc = ltdf2_libfunc;
3991 if (libfunc == NULL_RTX)
3993 tmp = x; x = y; y = tmp;
3995 libfunc = gtdf2_libfunc;
4000 libfunc = ledf2_libfunc;
4001 if (libfunc == NULL_RTX)
4003 tmp = x; x = y; y = tmp;
4005 libfunc = gedf2_libfunc;
4010 libfunc = unorddf2_libfunc;
4016 else if (mode == XFmode)
4020 libfunc = eqxf2_libfunc;
4024 libfunc = nexf2_libfunc;
4028 libfunc = gtxf2_libfunc;
4029 if (libfunc == NULL_RTX)
4031 tmp = x; x = y; y = tmp;
4033 libfunc = ltxf2_libfunc;
4038 libfunc = gexf2_libfunc;
4039 if (libfunc == NULL_RTX)
4041 tmp = x; x = y; y = tmp;
4043 libfunc = lexf2_libfunc;
4048 libfunc = ltxf2_libfunc;
4049 if (libfunc == NULL_RTX)
4051 tmp = x; x = y; y = tmp;
4053 libfunc = gtxf2_libfunc;
4058 libfunc = lexf2_libfunc;
4059 if (libfunc == NULL_RTX)
4061 tmp = x; x = y; y = tmp;
4063 libfunc = gexf2_libfunc;
4068 libfunc = unordxf2_libfunc;
4074 else if (mode == TFmode)
4078 libfunc = eqtf2_libfunc;
4082 libfunc = netf2_libfunc;
4086 libfunc = gttf2_libfunc;
4087 if (libfunc == NULL_RTX)
4089 tmp = x; x = y; y = tmp;
4091 libfunc = lttf2_libfunc;
4096 libfunc = getf2_libfunc;
4097 if (libfunc == NULL_RTX)
4099 tmp = x; x = y; y = tmp;
4101 libfunc = letf2_libfunc;
4106 libfunc = lttf2_libfunc;
4107 if (libfunc == NULL_RTX)
4109 tmp = x; x = y; y = tmp;
4111 libfunc = gttf2_libfunc;
4116 libfunc = letf2_libfunc;
4117 if (libfunc == NULL_RTX)
4119 tmp = x; x = y; y = tmp;
4121 libfunc = getf2_libfunc;
4126 libfunc = unordtf2_libfunc;
4134 enum machine_mode wider_mode;
4136 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
4137 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
4139 if ((cmp_optab->handlers[(int) wider_mode].insn_code
4140 != CODE_FOR_nothing)
4141 || (cmp_optab->handlers[(int) wider_mode].libfunc != 0))
4143 x = protect_from_queue (x, 0);
4144 y = protect_from_queue (y, 0);
4145 *px = convert_to_mode (wider_mode, x, 0);
4146 *py = convert_to_mode (wider_mode, y, 0);
4147 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
4157 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
4158 word_mode, 2, x, mode, y, mode);
4162 if (comparison == UNORDERED)
4164 #ifdef FLOAT_LIB_COMPARE_RETURNS_BOOL
4165 else if (FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4171 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4174 emit_indirect_jump (rtx loc)
4176 if (! ((*insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate)
4178 loc = copy_to_mode_reg (Pmode, loc);
4180 emit_jump_insn (gen_indirect_jump (loc));
4184 #ifdef HAVE_conditional_move
4186 /* Emit a conditional move instruction if the machine supports one for that
4187 condition and machine mode.
4189 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4190 the mode to use should they be constants. If it is VOIDmode, they cannot
4193 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4194 should be stored there. MODE is the mode to use should they be constants.
4195 If it is VOIDmode, they cannot both be constants.
4197 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4198 is not supported. */
4201 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4202 enum machine_mode cmode, rtx op2, rtx op3,
4203 enum machine_mode mode, int unsignedp)
4205 rtx tem, subtarget, comparison, insn;
4206 enum insn_code icode;
4207 enum rtx_code reversed;
4209 /* If one operand is constant, make it the second one. Only do this
4210 if the other operand is not constant as well. */
4212 if (swap_commutative_operands_p (op0, op1))
4217 code = swap_condition (code);
4220 /* get_condition will prefer to generate LT and GT even if the old
4221 comparison was against zero, so undo that canonicalization here since
4222 comparisons against zero are cheaper. */
4223 if (code == LT && GET_CODE (op1) == CONST_INT && INTVAL (op1) == 1)
4224 code = LE, op1 = const0_rtx;
4225 else if (code == GT && GET_CODE (op1) == CONST_INT && INTVAL (op1) == -1)
4226 code = GE, op1 = const0_rtx;
4228 if (cmode == VOIDmode)
4229 cmode = GET_MODE (op0);
4231 if (swap_commutative_operands_p (op2, op3)
4232 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4241 if (mode == VOIDmode)
4242 mode = GET_MODE (op2);
4244 icode = movcc_gen_code[mode];
4246 if (icode == CODE_FOR_nothing)
4251 op2 = force_not_mem (op2);
4252 op3 = force_not_mem (op3);
4256 target = protect_from_queue (target, 1);
4258 target = gen_reg_rtx (mode);
4264 op2 = protect_from_queue (op2, 0);
4265 op3 = protect_from_queue (op3, 0);
4267 /* If the insn doesn't accept these operands, put them in pseudos. */
4269 if (! (*insn_data[icode].operand[0].predicate)
4270 (subtarget, insn_data[icode].operand[0].mode))
4271 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4273 if (! (*insn_data[icode].operand[2].predicate)
4274 (op2, insn_data[icode].operand[2].mode))
4275 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4277 if (! (*insn_data[icode].operand[3].predicate)
4278 (op3, insn_data[icode].operand[3].mode))
4279 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4281 /* Everything should now be in the suitable form, so emit the compare insn
4282 and then the conditional move. */
4285 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4287 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4288 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4289 return NULL and let the caller figure out how best to deal with this
4291 if (GET_CODE (comparison) != code)
4294 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4296 /* If that failed, then give up. */
4302 if (subtarget != target)
4303 convert_move (target, subtarget, 0);
4308 /* Return nonzero if a conditional move of mode MODE is supported.
4310 This function is for combine so it can tell whether an insn that looks
4311 like a conditional move is actually supported by the hardware. If we
4312 guess wrong we lose a bit on optimization, but that's it. */
4313 /* ??? sparc64 supports conditionally moving integers values based on fp
4314 comparisons, and vice versa. How do we handle them? */
4317 can_conditionally_move_p (enum machine_mode mode)
4319 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4325 #endif /* HAVE_conditional_move */
4327 /* Emit a conditional addition instruction if the machine supports one for that
4328 condition and machine mode.
4330 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4331 the mode to use should they be constants. If it is VOIDmode, they cannot
4334 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4335 should be stored there. MODE is the mode to use should they be constants.
4336 If it is VOIDmode, they cannot both be constants.
4338 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4339 is not supported. */
4342 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4343 enum machine_mode cmode, rtx op2, rtx op3,
4344 enum machine_mode mode, int unsignedp)
4346 rtx tem, subtarget, comparison, insn;
4347 enum insn_code icode;
4348 enum rtx_code reversed;
4350 /* If one operand is constant, make it the second one. Only do this
4351 if the other operand is not constant as well. */
4353 if (swap_commutative_operands_p (op0, op1))
4358 code = swap_condition (code);
4361 /* get_condition will prefer to generate LT and GT even if the old
4362 comparison was against zero, so undo that canonicalization here since
4363 comparisons against zero are cheaper. */
4364 if (code == LT && GET_CODE (op1) == CONST_INT && INTVAL (op1) == 1)
4365 code = LE, op1 = const0_rtx;
4366 else if (code == GT && GET_CODE (op1) == CONST_INT && INTVAL (op1) == -1)
4367 code = GE, op1 = const0_rtx;
4369 if (cmode == VOIDmode)
4370 cmode = GET_MODE (op0);
4372 if (swap_commutative_operands_p (op2, op3)
4373 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4382 if (mode == VOIDmode)
4383 mode = GET_MODE (op2);
4385 icode = addcc_optab->handlers[(int) mode].insn_code;
4387 if (icode == CODE_FOR_nothing)
4392 op2 = force_not_mem (op2);
4393 op3 = force_not_mem (op3);
4397 target = protect_from_queue (target, 1);
4399 target = gen_reg_rtx (mode);
4405 op2 = protect_from_queue (op2, 0);
4406 op3 = protect_from_queue (op3, 0);
4408 /* If the insn doesn't accept these operands, put them in pseudos. */
4410 if (! (*insn_data[icode].operand[0].predicate)
4411 (subtarget, insn_data[icode].operand[0].mode))
4412 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4414 if (! (*insn_data[icode].operand[2].predicate)
4415 (op2, insn_data[icode].operand[2].mode))
4416 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4418 if (! (*insn_data[icode].operand[3].predicate)
4419 (op3, insn_data[icode].operand[3].mode))
4420 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4422 /* Everything should now be in the suitable form, so emit the compare insn
4423 and then the conditional move. */
4426 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4428 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4429 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4430 return NULL and let the caller figure out how best to deal with this
4432 if (GET_CODE (comparison) != code)
4435 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4437 /* If that failed, then give up. */
4443 if (subtarget != target)
4444 convert_move (target, subtarget, 0);
4449 /* These functions attempt to generate an insn body, rather than
4450 emitting the insn, but if the gen function already emits them, we
4451 make no attempt to turn them back into naked patterns.
4453 They do not protect from queued increments,
4454 because they may be used 1) in protect_from_queue itself
4455 and 2) in other passes where there is no queue. */
4457 /* Generate and return an insn body to add Y to X. */
4460 gen_add2_insn (rtx x, rtx y)
4462 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4464 if (! ((*insn_data[icode].operand[0].predicate)
4465 (x, insn_data[icode].operand[0].mode))
4466 || ! ((*insn_data[icode].operand[1].predicate)
4467 (x, insn_data[icode].operand[1].mode))
4468 || ! ((*insn_data[icode].operand[2].predicate)
4469 (y, insn_data[icode].operand[2].mode)))
4472 return (GEN_FCN (icode) (x, x, y));
4475 /* Generate and return an insn body to add r1 and c,
4476 storing the result in r0. */
4478 gen_add3_insn (rtx r0, rtx r1, rtx c)
4480 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4482 if (icode == CODE_FOR_nothing
4483 || ! ((*insn_data[icode].operand[0].predicate)
4484 (r0, insn_data[icode].operand[0].mode))
4485 || ! ((*insn_data[icode].operand[1].predicate)
4486 (r1, insn_data[icode].operand[1].mode))
4487 || ! ((*insn_data[icode].operand[2].predicate)
4488 (c, insn_data[icode].operand[2].mode)))
4491 return (GEN_FCN (icode) (r0, r1, c));
4495 have_add2_insn (rtx x, rtx y)
4499 if (GET_MODE (x) == VOIDmode)
4502 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4504 if (icode == CODE_FOR_nothing)
4507 if (! ((*insn_data[icode].operand[0].predicate)
4508 (x, insn_data[icode].operand[0].mode))
4509 || ! ((*insn_data[icode].operand[1].predicate)
4510 (x, insn_data[icode].operand[1].mode))
4511 || ! ((*insn_data[icode].operand[2].predicate)
4512 (y, insn_data[icode].operand[2].mode)))
4518 /* Generate and return an insn body to subtract Y from X. */
4521 gen_sub2_insn (rtx x, rtx y)
4523 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4525 if (! ((*insn_data[icode].operand[0].predicate)
4526 (x, insn_data[icode].operand[0].mode))
4527 || ! ((*insn_data[icode].operand[1].predicate)
4528 (x, insn_data[icode].operand[1].mode))
4529 || ! ((*insn_data[icode].operand[2].predicate)
4530 (y, insn_data[icode].operand[2].mode)))
4533 return (GEN_FCN (icode) (x, x, y));
4536 /* Generate and return an insn body to subtract r1 and c,
4537 storing the result in r0. */
4539 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4541 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4543 if (icode == CODE_FOR_nothing
4544 || ! ((*insn_data[icode].operand[0].predicate)
4545 (r0, insn_data[icode].operand[0].mode))
4546 || ! ((*insn_data[icode].operand[1].predicate)
4547 (r1, insn_data[icode].operand[1].mode))
4548 || ! ((*insn_data[icode].operand[2].predicate)
4549 (c, insn_data[icode].operand[2].mode)))
4552 return (GEN_FCN (icode) (r0, r1, c));
4556 have_sub2_insn (rtx x, rtx y)
4560 if (GET_MODE (x) == VOIDmode)
4563 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4565 if (icode == CODE_FOR_nothing)
4568 if (! ((*insn_data[icode].operand[0].predicate)
4569 (x, insn_data[icode].operand[0].mode))
4570 || ! ((*insn_data[icode].operand[1].predicate)
4571 (x, insn_data[icode].operand[1].mode))
4572 || ! ((*insn_data[icode].operand[2].predicate)
4573 (y, insn_data[icode].operand[2].mode)))
4579 /* Generate the body of an instruction to copy Y into X.
4580 It may be a list of insns, if one insn isn't enough. */
4583 gen_move_insn (rtx x, rtx y)
4588 emit_move_insn_1 (x, y);
4594 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4595 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4596 no such operation exists, CODE_FOR_nothing will be returned. */
4599 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4602 #ifdef HAVE_ptr_extend
4604 return CODE_FOR_ptr_extend;
4607 return extendtab[(int) to_mode][(int) from_mode][unsignedp != 0];
4610 /* Generate the body of an insn to extend Y (with mode MFROM)
4611 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4614 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4615 enum machine_mode mfrom, int unsignedp)
4617 return (GEN_FCN (extendtab[(int) mto][(int) mfrom][unsignedp != 0]) (x, y));
4620 /* can_fix_p and can_float_p say whether the target machine
4621 can directly convert a given fixed point type to
4622 a given floating point type, or vice versa.
4623 The returned value is the CODE_FOR_... value to use,
4624 or CODE_FOR_nothing if these modes cannot be directly converted.
4626 *TRUNCP_PTR is set to 1 if it is necessary to output
4627 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4629 static enum insn_code
4630 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4631 int unsignedp, int *truncp_ptr)
4634 if (fixtrunctab[(int) fltmode][(int) fixmode][unsignedp != 0]
4635 != CODE_FOR_nothing)
4636 return fixtrunctab[(int) fltmode][(int) fixmode][unsignedp != 0];
4638 if (ftrunc_optab->handlers[(int) fltmode].insn_code != CODE_FOR_nothing)
4641 return fixtab[(int) fltmode][(int) fixmode][unsignedp != 0];
4643 return CODE_FOR_nothing;
4646 static enum insn_code
4647 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4650 return floattab[(int) fltmode][(int) fixmode][unsignedp != 0];
4653 /* Generate code to convert FROM to floating point
4654 and store in TO. FROM must be fixed point and not VOIDmode.
4655 UNSIGNEDP nonzero means regard FROM as unsigned.
4656 Normally this is done by correcting the final value
4657 if it is negative. */
4660 expand_float (rtx to, rtx from, int unsignedp)
4662 enum insn_code icode;
4664 enum machine_mode fmode, imode;
4666 /* Crash now, because we won't be able to decide which mode to use. */
4667 if (GET_MODE (from) == VOIDmode)
4670 /* Look for an insn to do the conversion. Do it in the specified
4671 modes if possible; otherwise convert either input, output or both to
4672 wider mode. If the integer mode is wider than the mode of FROM,
4673 we can do the conversion signed even if the input is unsigned. */
4675 for (fmode = GET_MODE (to); fmode != VOIDmode;
4676 fmode = GET_MODE_WIDER_MODE (fmode))
4677 for (imode = GET_MODE (from); imode != VOIDmode;
4678 imode = GET_MODE_WIDER_MODE (imode))
4680 int doing_unsigned = unsignedp;
4682 if (fmode != GET_MODE (to)
4683 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4686 icode = can_float_p (fmode, imode, unsignedp);
4687 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4688 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4690 if (icode != CODE_FOR_nothing)
4692 to = protect_from_queue (to, 1);
4693 from = protect_from_queue (from, 0);
4695 if (imode != GET_MODE (from))
4696 from = convert_to_mode (imode, from, unsignedp);
4698 if (fmode != GET_MODE (to))
4699 target = gen_reg_rtx (fmode);
4701 emit_unop_insn (icode, target, from,
4702 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4705 convert_move (to, target, 0);
4710 /* Unsigned integer, and no way to convert directly.
4711 Convert as signed, then conditionally adjust the result. */
4714 rtx label = gen_label_rtx ();
4716 REAL_VALUE_TYPE offset;
4720 to = protect_from_queue (to, 1);
4721 from = protect_from_queue (from, 0);
4724 from = force_not_mem (from);
4726 /* Look for a usable floating mode FMODE wider than the source and at
4727 least as wide as the target. Using FMODE will avoid rounding woes
4728 with unsigned values greater than the signed maximum value. */
4730 for (fmode = GET_MODE (to); fmode != VOIDmode;
4731 fmode = GET_MODE_WIDER_MODE (fmode))
4732 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4733 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4736 if (fmode == VOIDmode)
4738 /* There is no such mode. Pretend the target is wide enough. */
4739 fmode = GET_MODE (to);
4741 /* Avoid double-rounding when TO is narrower than FROM. */
4742 if ((significand_size (fmode) + 1)
4743 < GET_MODE_BITSIZE (GET_MODE (from)))
4746 rtx neglabel = gen_label_rtx ();
4748 /* Don't use TARGET if it isn't a register, is a hard register,
4749 or is the wrong mode. */
4750 if (GET_CODE (target) != REG
4751 || REGNO (target) < FIRST_PSEUDO_REGISTER
4752 || GET_MODE (target) != fmode)
4753 target = gen_reg_rtx (fmode);
4755 imode = GET_MODE (from);
4756 do_pending_stack_adjust ();
4758 /* Test whether the sign bit is set. */
4759 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4762 /* The sign bit is not set. Convert as signed. */
4763 expand_float (target, from, 0);
4764 emit_jump_insn (gen_jump (label));
4767 /* The sign bit is set.
4768 Convert to a usable (positive signed) value by shifting right
4769 one bit, while remembering if a nonzero bit was shifted
4770 out; i.e., compute (from & 1) | (from >> 1). */
4772 emit_label (neglabel);
4773 temp = expand_binop (imode, and_optab, from, const1_rtx,
4774 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4775 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4777 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4779 expand_float (target, temp, 0);
4781 /* Multiply by 2 to undo the shift above. */
4782 temp = expand_binop (fmode, add_optab, target, target,
4783 target, 0, OPTAB_LIB_WIDEN);
4785 emit_move_insn (target, temp);
4787 do_pending_stack_adjust ();
4793 /* If we are about to do some arithmetic to correct for an
4794 unsigned operand, do it in a pseudo-register. */
4796 if (GET_MODE (to) != fmode
4797 || GET_CODE (to) != REG || REGNO (to) < FIRST_PSEUDO_REGISTER)
4798 target = gen_reg_rtx (fmode);
4800 /* Convert as signed integer to floating. */
4801 expand_float (target, from, 0);
4803 /* If FROM is negative (and therefore TO is negative),
4804 correct its value by 2**bitwidth. */
4806 do_pending_stack_adjust ();
4807 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4811 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4812 temp = expand_binop (fmode, add_optab, target,
4813 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4814 target, 0, OPTAB_LIB_WIDEN);
4816 emit_move_insn (target, temp);
4818 do_pending_stack_adjust ();
4823 /* No hardware instruction available; call a library routine to convert from
4824 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
4830 to = protect_from_queue (to, 1);
4831 from = protect_from_queue (from, 0);
4833 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4834 from = convert_to_mode (SImode, from, unsignedp);
4837 from = force_not_mem (from);
4839 if (GET_MODE (to) == SFmode)
4841 if (GET_MODE (from) == SImode)
4842 libfcn = floatsisf_libfunc;
4843 else if (GET_MODE (from) == DImode)
4844 libfcn = floatdisf_libfunc;
4845 else if (GET_MODE (from) == TImode)
4846 libfcn = floattisf_libfunc;
4850 else if (GET_MODE (to) == DFmode)
4852 if (GET_MODE (from) == SImode)
4853 libfcn = floatsidf_libfunc;
4854 else if (GET_MODE (from) == DImode)
4855 libfcn = floatdidf_libfunc;
4856 else if (GET_MODE (from) == TImode)
4857 libfcn = floattidf_libfunc;
4861 else if (GET_MODE (to) == XFmode)
4863 if (GET_MODE (from) == SImode)
4864 libfcn = floatsixf_libfunc;
4865 else if (GET_MODE (from) == DImode)
4866 libfcn = floatdixf_libfunc;
4867 else if (GET_MODE (from) == TImode)
4868 libfcn = floattixf_libfunc;
4872 else if (GET_MODE (to) == TFmode)
4874 if (GET_MODE (from) == SImode)
4875 libfcn = floatsitf_libfunc;
4876 else if (GET_MODE (from) == DImode)
4877 libfcn = floatditf_libfunc;
4878 else if (GET_MODE (from) == TImode)
4879 libfcn = floattitf_libfunc;
4888 value = emit_library_call_value (libfcn, NULL_RTX, LCT_CONST,
4889 GET_MODE (to), 1, from,
4891 insns = get_insns ();
4894 emit_libcall_block (insns, target, value,
4895 gen_rtx_FLOAT (GET_MODE (to), from));
4900 /* Copy result to requested destination
4901 if we have been computing in a temp location. */
4905 if (GET_MODE (target) == GET_MODE (to))
4906 emit_move_insn (to, target);
4908 convert_move (to, target, 0);
4912 /* expand_fix: generate code to convert FROM to fixed point
4913 and store in TO. FROM must be floating point. */
4918 rtx temp = gen_reg_rtx (GET_MODE (x));
4919 return expand_unop (GET_MODE (x), ftrunc_optab, x, temp, 0);
4923 expand_fix (rtx to, rtx from, int unsignedp)
4925 enum insn_code icode;
4927 enum machine_mode fmode, imode;
4931 /* We first try to find a pair of modes, one real and one integer, at
4932 least as wide as FROM and TO, respectively, in which we can open-code
4933 this conversion. If the integer mode is wider than the mode of TO,
4934 we can do the conversion either signed or unsigned. */
4936 for (fmode = GET_MODE (from); fmode != VOIDmode;
4937 fmode = GET_MODE_WIDER_MODE (fmode))
4938 for (imode = GET_MODE (to); imode != VOIDmode;
4939 imode = GET_MODE_WIDER_MODE (imode))
4941 int doing_unsigned = unsignedp;
4943 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4944 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4945 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4947 if (icode != CODE_FOR_nothing)
4949 to = protect_from_queue (to, 1);
4950 from = protect_from_queue (from, 0);
4952 if (fmode != GET_MODE (from))
4953 from = convert_to_mode (fmode, from, 0);
4956 from = ftruncify (from);
4958 if (imode != GET_MODE (to))
4959 target = gen_reg_rtx (imode);
4961 emit_unop_insn (icode, target, from,
4962 doing_unsigned ? UNSIGNED_FIX : FIX);
4964 convert_move (to, target, unsignedp);
4969 /* For an unsigned conversion, there is one more way to do it.
4970 If we have a signed conversion, we generate code that compares
4971 the real value to the largest representable positive number. If if
4972 is smaller, the conversion is done normally. Otherwise, subtract
4973 one plus the highest signed number, convert, and add it back.
4975 We only need to check all real modes, since we know we didn't find
4976 anything with a wider integer mode.
4978 This code used to extend FP value into mode wider than the destination.
4979 This is not needed. Consider, for instance conversion from SFmode
4982 The hot path trought the code is dealing with inputs smaller than 2^63
4983 and doing just the conversion, so there is no bits to lose.
4985 In the other path we know the value is positive in the range 2^63..2^64-1
4986 inclusive. (as for other imput overflow happens and result is undefined)
4987 So we know that the most important bit set in mantissa corresponds to
4988 2^63. The subtraction of 2^63 should not generate any rounding as it
4989 simply clears out that bit. The rest is trivial. */
4991 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4992 for (fmode = GET_MODE (from); fmode != VOIDmode;
4993 fmode = GET_MODE_WIDER_MODE (fmode))
4994 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4998 REAL_VALUE_TYPE offset;
4999 rtx limit, lab1, lab2, insn;
5001 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5002 real_2expN (&offset, bitsize - 1);
5003 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5004 lab1 = gen_label_rtx ();
5005 lab2 = gen_label_rtx ();
5008 to = protect_from_queue (to, 1);
5009 from = protect_from_queue (from, 0);
5012 from = force_not_mem (from);
5014 if (fmode != GET_MODE (from))
5015 from = convert_to_mode (fmode, from, 0);
5017 /* See if we need to do the subtraction. */
5018 do_pending_stack_adjust ();
5019 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5022 /* If not, do the signed "fix" and branch around fixup code. */
5023 expand_fix (to, from, 0);
5024 emit_jump_insn (gen_jump (lab2));
5027 /* Otherwise, subtract 2**(N-1), convert to signed number,
5028 then add 2**(N-1). Do the addition using XOR since this
5029 will often generate better code. */
5031 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5032 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5033 expand_fix (to, target, 0);
5034 target = expand_binop (GET_MODE (to), xor_optab, to,
5036 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5038 to, 1, OPTAB_LIB_WIDEN);
5041 emit_move_insn (to, target);
5045 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
5046 != CODE_FOR_nothing)
5048 /* Make a place for a REG_NOTE and add it. */
5049 insn = emit_move_insn (to, to);
5050 set_unique_reg_note (insn,
5052 gen_rtx_fmt_e (UNSIGNED_FIX,
5060 /* We can't do it with an insn, so use a library call. But first ensure
5061 that the mode of TO is at least as wide as SImode, since those are the
5062 only library calls we know about. */
5064 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5066 target = gen_reg_rtx (SImode);
5068 expand_fix (target, from, unsignedp);
5070 else if (GET_MODE (from) == SFmode)
5072 if (GET_MODE (to) == SImode)
5073 libfcn = unsignedp ? fixunssfsi_libfunc : fixsfsi_libfunc;
5074 else if (GET_MODE (to) == DImode)
5075 libfcn = unsignedp ? fixunssfdi_libfunc : fixsfdi_libfunc;
5076 else if (GET_MODE (to) == TImode)
5077 libfcn = unsignedp ? fixunssfti_libfunc : fixsfti_libfunc;
5081 else if (GET_MODE (from) == DFmode)
5083 if (GET_MODE (to) == SImode)
5084 libfcn = unsignedp ? fixunsdfsi_libfunc : fixdfsi_libfunc;
5085 else if (GET_MODE (to) == DImode)
5086 libfcn = unsignedp ? fixunsdfdi_libfunc : fixdfdi_libfunc;
5087 else if (GET_MODE (to) == TImode)
5088 libfcn = unsignedp ? fixunsdfti_libfunc : fixdfti_libfunc;
5092 else if (GET_MODE (from) == XFmode)
5094 if (GET_MODE (to) == SImode)
5095 libfcn = unsignedp ? fixunsxfsi_libfunc : fixxfsi_libfunc;
5096 else if (GET_MODE (to) == DImode)
5097 libfcn = unsignedp ? fixunsxfdi_libfunc : fixxfdi_libfunc;
5098 else if (GET_MODE (to) == TImode)
5099 libfcn = unsignedp ? fixunsxfti_libfunc : fixxfti_libfunc;
5103 else if (GET_MODE (from) == TFmode)
5105 if (GET_MODE (to) == SImode)
5106 libfcn = unsignedp ? fixunstfsi_libfunc : fixtfsi_libfunc;
5107 else if (GET_MODE (to) == DImode)
5108 libfcn = unsignedp ? fixunstfdi_libfunc : fixtfdi_libfunc;
5109 else if (GET_MODE (to) == TImode)
5110 libfcn = unsignedp ? fixunstfti_libfunc : fixtfti_libfunc;
5122 to = protect_from_queue (to, 1);
5123 from = protect_from_queue (from, 0);
5126 from = force_not_mem (from);
5130 value = emit_library_call_value (libfcn, NULL_RTX, LCT_CONST,
5131 GET_MODE (to), 1, from,
5133 insns = get_insns ();
5136 emit_libcall_block (insns, target, value,
5137 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5138 GET_MODE (to), from));
5143 if (GET_MODE (to) == GET_MODE (target))
5144 emit_move_insn (to, target);
5146 convert_move (to, target, 0);
5150 /* Report whether we have an instruction to perform the operation
5151 specified by CODE on operands of mode MODE. */
5153 have_insn_for (enum rtx_code code, enum machine_mode mode)
5155 return (code_to_optab[(int) code] != 0
5156 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
5157 != CODE_FOR_nothing));
5160 /* Create a blank optab. */
5165 optab op = (optab) ggc_alloc (sizeof (struct optab));
5166 for (i = 0; i < NUM_MACHINE_MODES; i++)
5168 op->handlers[i].insn_code = CODE_FOR_nothing;
5169 op->handlers[i].libfunc = 0;
5175 /* Same, but fill in its code as CODE, and write it into the
5176 code_to_optab table. */
5178 init_optab (enum rtx_code code)
5180 optab op = new_optab ();
5182 code_to_optab[(int) code] = op;
5186 /* Same, but fill in its code as CODE, and do _not_ write it into
5187 the code_to_optab table. */
5189 init_optabv (enum rtx_code code)
5191 optab op = new_optab ();
5196 /* Initialize the libfunc fields of an entire group of entries in some
5197 optab. Each entry is set equal to a string consisting of a leading
5198 pair of underscores followed by a generic operation name followed by
5199 a mode name (downshifted to lower case) followed by a single character
5200 representing the number of operands for the given operation (which is
5201 usually one of the characters '2', '3', or '4').
5203 OPTABLE is the table in which libfunc fields are to be initialized.
5204 FIRST_MODE is the first machine mode index in the given optab to
5206 LAST_MODE is the last machine mode index in the given optab to
5208 OPNAME is the generic (string) name of the operation.
5209 SUFFIX is the character which specifies the number of operands for
5210 the given generic operation.
5214 init_libfuncs (optab optable, int first_mode, int last_mode,
5215 const char *opname, int suffix)
5218 unsigned opname_len = strlen (opname);
5220 for (mode = first_mode; (int) mode <= (int) last_mode;
5221 mode = (enum machine_mode) ((int) mode + 1))
5223 const char *mname = GET_MODE_NAME (mode);
5224 unsigned mname_len = strlen (mname);
5225 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
5232 for (q = opname; *q; )
5234 for (q = mname; *q; q++)
5235 *p++ = TOLOWER (*q);
5239 optable->handlers[(int) mode].libfunc
5240 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
5244 /* Initialize the libfunc fields of an entire group of entries in some
5245 optab which correspond to all integer mode operations. The parameters
5246 have the same meaning as similarly named ones for the `init_libfuncs'
5247 routine. (See above). */
5250 init_integral_libfuncs (optab optable, const char *opname, int suffix)
5252 int maxsize = 2*BITS_PER_WORD;
5253 if (maxsize < LONG_LONG_TYPE_SIZE)
5254 maxsize = LONG_LONG_TYPE_SIZE;
5255 init_libfuncs (optable, word_mode,
5256 mode_for_size (maxsize, MODE_INT, 0),
5260 /* Initialize the libfunc fields of an entire group of entries in some
5261 optab which correspond to all real mode operations. The parameters
5262 have the same meaning as similarly named ones for the `init_libfuncs'
5263 routine. (See above). */
5266 init_floating_libfuncs (optab optable, const char *opname, int suffix)
5268 enum machine_mode fmode, dmode, lmode;
5270 fmode = float_type_node ? TYPE_MODE (float_type_node) : VOIDmode;
5271 dmode = double_type_node ? TYPE_MODE (double_type_node) : VOIDmode;
5272 lmode = long_double_type_node ? TYPE_MODE (long_double_type_node) : VOIDmode;
5274 if (fmode != VOIDmode)
5275 init_libfuncs (optable, fmode, fmode, opname, suffix);
5276 if (dmode != fmode && dmode != VOIDmode)
5277 init_libfuncs (optable, dmode, dmode, opname, suffix);
5278 if (lmode != dmode && lmode != VOIDmode)
5279 init_libfuncs (optable, lmode, lmode, opname, suffix);
5283 init_one_libfunc (const char *name)
5287 /* Create a FUNCTION_DECL that can be passed to
5288 targetm.encode_section_info. */
5289 /* ??? We don't have any type information except for this is
5290 a function. Pretend this is "int foo()". */
5291 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
5292 build_function_type (integer_type_node, NULL_TREE));
5293 DECL_ARTIFICIAL (decl) = 1;
5294 DECL_EXTERNAL (decl) = 1;
5295 TREE_PUBLIC (decl) = 1;
5297 symbol = XEXP (DECL_RTL (decl), 0);
5299 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
5300 are the flags assigned by targetm.encode_section_info. */
5301 SYMBOL_REF_DECL (symbol) = 0;
5306 /* Call this once to initialize the contents of the optabs
5307 appropriately for the current target machine. */
5312 unsigned int i, j, k;
5314 /* Start by initializing all tables to contain CODE_FOR_nothing. */
5316 for (i = 0; i < ARRAY_SIZE (fixtab); i++)
5317 for (j = 0; j < ARRAY_SIZE (fixtab[0]); j++)
5318 for (k = 0; k < ARRAY_SIZE (fixtab[0][0]); k++)
5319 fixtab[i][j][k] = CODE_FOR_nothing;
5321 for (i = 0; i < ARRAY_SIZE (fixtrunctab); i++)
5322 for (j = 0; j < ARRAY_SIZE (fixtrunctab[0]); j++)
5323 for (k = 0; k < ARRAY_SIZE (fixtrunctab[0][0]); k++)
5324 fixtrunctab[i][j][k] = CODE_FOR_nothing;
5326 for (i = 0; i < ARRAY_SIZE (floattab); i++)
5327 for (j = 0; j < ARRAY_SIZE (floattab[0]); j++)
5328 for (k = 0; k < ARRAY_SIZE (floattab[0][0]); k++)
5329 floattab[i][j][k] = CODE_FOR_nothing;
5331 for (i = 0; i < ARRAY_SIZE (extendtab); i++)
5332 for (j = 0; j < ARRAY_SIZE (extendtab[0]); j++)
5333 for (k = 0; k < ARRAY_SIZE (extendtab[0][0]); k++)
5334 extendtab[i][j][k] = CODE_FOR_nothing;
5336 for (i = 0; i < NUM_RTX_CODE; i++)
5337 setcc_gen_code[i] = CODE_FOR_nothing;
5339 #ifdef HAVE_conditional_move
5340 for (i = 0; i < NUM_MACHINE_MODES; i++)
5341 movcc_gen_code[i] = CODE_FOR_nothing;
5344 add_optab = init_optab (PLUS);
5345 addv_optab = init_optabv (PLUS);
5346 sub_optab = init_optab (MINUS);
5347 subv_optab = init_optabv (MINUS);
5348 smul_optab = init_optab (MULT);
5349 smulv_optab = init_optabv (MULT);
5350 smul_highpart_optab = init_optab (UNKNOWN);
5351 umul_highpart_optab = init_optab (UNKNOWN);
5352 smul_widen_optab = init_optab (UNKNOWN);
5353 umul_widen_optab = init_optab (UNKNOWN);
5354 sdiv_optab = init_optab (DIV);
5355 sdivv_optab = init_optabv (DIV);
5356 sdivmod_optab = init_optab (UNKNOWN);
5357 udiv_optab = init_optab (UDIV);
5358 udivmod_optab = init_optab (UNKNOWN);
5359 smod_optab = init_optab (MOD);
5360 umod_optab = init_optab (UMOD);
5361 ftrunc_optab = init_optab (UNKNOWN);
5362 and_optab = init_optab (AND);
5363 ior_optab = init_optab (IOR);
5364 xor_optab = init_optab (XOR);
5365 ashl_optab = init_optab (ASHIFT);
5366 ashr_optab = init_optab (ASHIFTRT);
5367 lshr_optab = init_optab (LSHIFTRT);
5368 rotl_optab = init_optab (ROTATE);
5369 rotr_optab = init_optab (ROTATERT);
5370 smin_optab = init_optab (SMIN);
5371 smax_optab = init_optab (SMAX);
5372 umin_optab = init_optab (UMIN);
5373 umax_optab = init_optab (UMAX);
5374 pow_optab = init_optab (UNKNOWN);
5375 atan2_optab = init_optab (UNKNOWN);
5377 /* These three have codes assigned exclusively for the sake of
5379 mov_optab = init_optab (SET);
5380 movstrict_optab = init_optab (STRICT_LOW_PART);
5381 cmp_optab = init_optab (COMPARE);
5383 ucmp_optab = init_optab (UNKNOWN);
5384 tst_optab = init_optab (UNKNOWN);
5385 neg_optab = init_optab (NEG);
5386 negv_optab = init_optabv (NEG);
5387 abs_optab = init_optab (ABS);
5388 absv_optab = init_optabv (ABS);
5389 addcc_optab = init_optab (UNKNOWN);
5390 one_cmpl_optab = init_optab (NOT);
5391 ffs_optab = init_optab (FFS);
5392 clz_optab = init_optab (CLZ);
5393 ctz_optab = init_optab (CTZ);
5394 popcount_optab = init_optab (POPCOUNT);
5395 parity_optab = init_optab (PARITY);
5396 sqrt_optab = init_optab (SQRT);
5397 floor_optab = init_optab (UNKNOWN);
5398 ceil_optab = init_optab (UNKNOWN);
5399 round_optab = init_optab (UNKNOWN);
5400 trunc_optab = init_optab (UNKNOWN);
5401 nearbyint_optab = init_optab (UNKNOWN);
5402 sin_optab = init_optab (UNKNOWN);
5403 cos_optab = init_optab (UNKNOWN);
5404 exp_optab = init_optab (UNKNOWN);
5405 log_optab = init_optab (UNKNOWN);
5406 tan_optab = init_optab (UNKNOWN);
5407 atan_optab = init_optab (UNKNOWN);
5408 strlen_optab = init_optab (UNKNOWN);
5409 cbranch_optab = init_optab (UNKNOWN);
5410 cmov_optab = init_optab (UNKNOWN);
5411 cstore_optab = init_optab (UNKNOWN);
5412 push_optab = init_optab (UNKNOWN);
5414 for (i = 0; i < NUM_MACHINE_MODES; i++)
5416 movstr_optab[i] = CODE_FOR_nothing;
5417 clrstr_optab[i] = CODE_FOR_nothing;
5419 #ifdef HAVE_SECONDARY_RELOADS
5420 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5424 /* Fill in the optabs with the insns we support. */
5427 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
5428 /* This flag says the same insns that convert to a signed fixnum
5429 also convert validly to an unsigned one. */
5430 for (i = 0; i < NUM_MACHINE_MODES; i++)
5431 for (j = 0; j < NUM_MACHINE_MODES; j++)
5432 fixtrunctab[i][j][1] = fixtrunctab[i][j][0];
5435 /* Initialize the optabs with the names of the library functions. */
5436 init_integral_libfuncs (add_optab, "add", '3');
5437 init_floating_libfuncs (add_optab, "add", '3');
5438 init_integral_libfuncs (addv_optab, "addv", '3');
5439 init_floating_libfuncs (addv_optab, "add", '3');
5440 init_integral_libfuncs (sub_optab, "sub", '3');
5441 init_floating_libfuncs (sub_optab, "sub", '3');
5442 init_integral_libfuncs (subv_optab, "subv", '3');
5443 init_floating_libfuncs (subv_optab, "sub", '3');
5444 init_integral_libfuncs (smul_optab, "mul", '3');
5445 init_floating_libfuncs (smul_optab, "mul", '3');
5446 init_integral_libfuncs (smulv_optab, "mulv", '3');
5447 init_floating_libfuncs (smulv_optab, "mul", '3');
5448 init_integral_libfuncs (sdiv_optab, "div", '3');
5449 init_floating_libfuncs (sdiv_optab, "div", '3');
5450 init_integral_libfuncs (sdivv_optab, "divv", '3');
5451 init_integral_libfuncs (udiv_optab, "udiv", '3');
5452 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5453 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5454 init_integral_libfuncs (smod_optab, "mod", '3');
5455 init_integral_libfuncs (umod_optab, "umod", '3');
5456 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5457 init_integral_libfuncs (and_optab, "and", '3');
5458 init_integral_libfuncs (ior_optab, "ior", '3');
5459 init_integral_libfuncs (xor_optab, "xor", '3');
5460 init_integral_libfuncs (ashl_optab, "ashl", '3');
5461 init_integral_libfuncs (ashr_optab, "ashr", '3');
5462 init_integral_libfuncs (lshr_optab, "lshr", '3');
5463 init_integral_libfuncs (smin_optab, "min", '3');
5464 init_floating_libfuncs (smin_optab, "min", '3');
5465 init_integral_libfuncs (smax_optab, "max", '3');
5466 init_floating_libfuncs (smax_optab, "max", '3');
5467 init_integral_libfuncs (umin_optab, "umin", '3');
5468 init_integral_libfuncs (umax_optab, "umax", '3');
5469 init_integral_libfuncs (neg_optab, "neg", '2');
5470 init_floating_libfuncs (neg_optab, "neg", '2');
5471 init_integral_libfuncs (negv_optab, "negv", '2');
5472 init_floating_libfuncs (negv_optab, "neg", '2');
5473 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5474 init_integral_libfuncs (ffs_optab, "ffs", '2');
5475 init_integral_libfuncs (clz_optab, "clz", '2');
5476 init_integral_libfuncs (ctz_optab, "ctz", '2');
5477 init_integral_libfuncs (popcount_optab, "popcount", '2');
5478 init_integral_libfuncs (parity_optab, "parity", '2');
5480 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
5481 init_integral_libfuncs (cmp_optab, "cmp", '2');
5482 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5483 init_floating_libfuncs (cmp_optab, "cmp", '2');
5485 #ifdef MULSI3_LIBCALL
5486 smul_optab->handlers[(int) SImode].libfunc
5487 = init_one_libfunc (MULSI3_LIBCALL);
5489 #ifdef MULDI3_LIBCALL
5490 smul_optab->handlers[(int) DImode].libfunc
5491 = init_one_libfunc (MULDI3_LIBCALL);
5494 #ifdef DIVSI3_LIBCALL
5495 sdiv_optab->handlers[(int) SImode].libfunc
5496 = init_one_libfunc (DIVSI3_LIBCALL);
5498 #ifdef DIVDI3_LIBCALL
5499 sdiv_optab->handlers[(int) DImode].libfunc
5500 = init_one_libfunc (DIVDI3_LIBCALL);
5503 #ifdef UDIVSI3_LIBCALL
5504 udiv_optab->handlers[(int) SImode].libfunc
5505 = init_one_libfunc (UDIVSI3_LIBCALL);
5507 #ifdef UDIVDI3_LIBCALL
5508 udiv_optab->handlers[(int) DImode].libfunc
5509 = init_one_libfunc (UDIVDI3_LIBCALL);
5512 #ifdef MODSI3_LIBCALL
5513 smod_optab->handlers[(int) SImode].libfunc
5514 = init_one_libfunc (MODSI3_LIBCALL);
5516 #ifdef MODDI3_LIBCALL
5517 smod_optab->handlers[(int) DImode].libfunc
5518 = init_one_libfunc (MODDI3_LIBCALL);
5521 #ifdef UMODSI3_LIBCALL
5522 umod_optab->handlers[(int) SImode].libfunc
5523 = init_one_libfunc (UMODSI3_LIBCALL);
5525 #ifdef UMODDI3_LIBCALL
5526 umod_optab->handlers[(int) DImode].libfunc
5527 = init_one_libfunc (UMODDI3_LIBCALL);
5530 /* Use cabs for DC complex abs, since systems generally have cabs.
5531 Don't define any libcall for SCmode, so that cabs will be used. */
5532 abs_optab->handlers[(int) DCmode].libfunc
5533 = init_one_libfunc ("cabs");
5535 /* The ffs function operates on `int'. */
5536 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5537 = init_one_libfunc ("ffs");
5539 extendsfdf2_libfunc = init_one_libfunc ("__extendsfdf2");
5540 extendsfxf2_libfunc = init_one_libfunc ("__extendsfxf2");
5541 extendsftf2_libfunc = init_one_libfunc ("__extendsftf2");
5542 extenddfxf2_libfunc = init_one_libfunc ("__extenddfxf2");
5543 extenddftf2_libfunc = init_one_libfunc ("__extenddftf2");
5545 truncdfsf2_libfunc = init_one_libfunc ("__truncdfsf2");
5546 truncxfsf2_libfunc = init_one_libfunc ("__truncxfsf2");
5547 trunctfsf2_libfunc = init_one_libfunc ("__trunctfsf2");
5548 truncxfdf2_libfunc = init_one_libfunc ("__truncxfdf2");
5549 trunctfdf2_libfunc = init_one_libfunc ("__trunctfdf2");
5551 abort_libfunc = init_one_libfunc ("abort");
5552 memcpy_libfunc = init_one_libfunc ("memcpy");
5553 memmove_libfunc = init_one_libfunc ("memmove");
5554 bcopy_libfunc = init_one_libfunc ("bcopy");
5555 memcmp_libfunc = init_one_libfunc ("memcmp");
5556 bcmp_libfunc = init_one_libfunc ("__gcc_bcmp");
5557 memset_libfunc = init_one_libfunc ("memset");
5558 bzero_libfunc = init_one_libfunc ("bzero");
5559 setbits_libfunc = init_one_libfunc ("__setbits");
5561 unwind_resume_libfunc = init_one_libfunc (USING_SJLJ_EXCEPTIONS
5562 ? "_Unwind_SjLj_Resume"
5563 : "_Unwind_Resume");
5564 #ifndef DONT_USE_BUILTIN_SETJMP
5565 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5566 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5568 setjmp_libfunc = init_one_libfunc ("setjmp");
5569 longjmp_libfunc = init_one_libfunc ("longjmp");
5571 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5572 unwind_sjlj_unregister_libfunc
5573 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5575 eqhf2_libfunc = init_one_libfunc ("__eqhf2");
5576 nehf2_libfunc = init_one_libfunc ("__nehf2");
5577 gthf2_libfunc = init_one_libfunc ("__gthf2");
5578 gehf2_libfunc = init_one_libfunc ("__gehf2");
5579 lthf2_libfunc = init_one_libfunc ("__lthf2");
5580 lehf2_libfunc = init_one_libfunc ("__lehf2");
5581 unordhf2_libfunc = init_one_libfunc ("__unordhf2");
5583 eqsf2_libfunc = init_one_libfunc ("__eqsf2");
5584 nesf2_libfunc = init_one_libfunc ("__nesf2");
5585 gtsf2_libfunc = init_one_libfunc ("__gtsf2");
5586 gesf2_libfunc = init_one_libfunc ("__gesf2");
5587 ltsf2_libfunc = init_one_libfunc ("__ltsf2");
5588 lesf2_libfunc = init_one_libfunc ("__lesf2");
5589 unordsf2_libfunc = init_one_libfunc ("__unordsf2");
5591 eqdf2_libfunc = init_one_libfunc ("__eqdf2");
5592 nedf2_libfunc = init_one_libfunc ("__nedf2");
5593 gtdf2_libfunc = init_one_libfunc ("__gtdf2");
5594 gedf2_libfunc = init_one_libfunc ("__gedf2");
5595 ltdf2_libfunc = init_one_libfunc ("__ltdf2");
5596 ledf2_libfunc = init_one_libfunc ("__ledf2");
5597 unorddf2_libfunc = init_one_libfunc ("__unorddf2");
5599 eqxf2_libfunc = init_one_libfunc ("__eqxf2");
5600 nexf2_libfunc = init_one_libfunc ("__nexf2");
5601 gtxf2_libfunc = init_one_libfunc ("__gtxf2");
5602 gexf2_libfunc = init_one_libfunc ("__gexf2");
5603 ltxf2_libfunc = init_one_libfunc ("__ltxf2");
5604 lexf2_libfunc = init_one_libfunc ("__lexf2");
5605 unordxf2_libfunc = init_one_libfunc ("__unordxf2");
5607 eqtf2_libfunc = init_one_libfunc ("__eqtf2");
5608 netf2_libfunc = init_one_libfunc ("__netf2");
5609 gttf2_libfunc = init_one_libfunc ("__gttf2");
5610 getf2_libfunc = init_one_libfunc ("__getf2");
5611 lttf2_libfunc = init_one_libfunc ("__lttf2");
5612 letf2_libfunc = init_one_libfunc ("__letf2");
5613 unordtf2_libfunc = init_one_libfunc ("__unordtf2");
5615 floatsisf_libfunc = init_one_libfunc ("__floatsisf");
5616 floatdisf_libfunc = init_one_libfunc ("__floatdisf");
5617 floattisf_libfunc = init_one_libfunc ("__floattisf");
5619 floatsidf_libfunc = init_one_libfunc ("__floatsidf");
5620 floatdidf_libfunc = init_one_libfunc ("__floatdidf");
5621 floattidf_libfunc = init_one_libfunc ("__floattidf");
5623 floatsixf_libfunc = init_one_libfunc ("__floatsixf");
5624 floatdixf_libfunc = init_one_libfunc ("__floatdixf");
5625 floattixf_libfunc = init_one_libfunc ("__floattixf");
5627 floatsitf_libfunc = init_one_libfunc ("__floatsitf");
5628 floatditf_libfunc = init_one_libfunc ("__floatditf");
5629 floattitf_libfunc = init_one_libfunc ("__floattitf");
5631 fixsfsi_libfunc = init_one_libfunc ("__fixsfsi");
5632 fixsfdi_libfunc = init_one_libfunc ("__fixsfdi");
5633 fixsfti_libfunc = init_one_libfunc ("__fixsfti");
5635 fixdfsi_libfunc = init_one_libfunc ("__fixdfsi");
5636 fixdfdi_libfunc = init_one_libfunc ("__fixdfdi");
5637 fixdfti_libfunc = init_one_libfunc ("__fixdfti");
5639 fixxfsi_libfunc = init_one_libfunc ("__fixxfsi");
5640 fixxfdi_libfunc = init_one_libfunc ("__fixxfdi");
5641 fixxfti_libfunc = init_one_libfunc ("__fixxfti");
5643 fixtfsi_libfunc = init_one_libfunc ("__fixtfsi");
5644 fixtfdi_libfunc = init_one_libfunc ("__fixtfdi");
5645 fixtfti_libfunc = init_one_libfunc ("__fixtfti");
5647 fixunssfsi_libfunc = init_one_libfunc ("__fixunssfsi");
5648 fixunssfdi_libfunc = init_one_libfunc ("__fixunssfdi");
5649 fixunssfti_libfunc = init_one_libfunc ("__fixunssfti");
5651 fixunsdfsi_libfunc = init_one_libfunc ("__fixunsdfsi");
5652 fixunsdfdi_libfunc = init_one_libfunc ("__fixunsdfdi");
5653 fixunsdfti_libfunc = init_one_libfunc ("__fixunsdfti");
5655 fixunsxfsi_libfunc = init_one_libfunc ("__fixunsxfsi");
5656 fixunsxfdi_libfunc = init_one_libfunc ("__fixunsxfdi");
5657 fixunsxfti_libfunc = init_one_libfunc ("__fixunsxfti");
5659 fixunstfsi_libfunc = init_one_libfunc ("__fixunstfsi");
5660 fixunstfdi_libfunc = init_one_libfunc ("__fixunstfdi");
5661 fixunstfti_libfunc = init_one_libfunc ("__fixunstfti");
5663 /* For function entry/exit instrumentation. */
5664 profile_function_entry_libfunc
5665 = init_one_libfunc ("__cyg_profile_func_enter");
5666 profile_function_exit_libfunc
5667 = init_one_libfunc ("__cyg_profile_func_exit");
5669 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5670 gcov_init_libfunc = init_one_libfunc ("__gcov_init");
5672 if (HAVE_conditional_trap)
5673 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5675 #ifdef INIT_TARGET_OPTABS
5676 /* Allow the target to add more libcalls or rename some, etc. */
5681 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5682 CODE. Return 0 on failure. */
5685 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5686 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5688 enum machine_mode mode = GET_MODE (op1);
5689 enum insn_code icode;
5692 if (!HAVE_conditional_trap)
5695 if (mode == VOIDmode)
5698 icode = cmp_optab->handlers[(int) mode].insn_code;
5699 if (icode == CODE_FOR_nothing)
5703 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5704 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5705 emit_insn (GEN_FCN (icode) (op1, op2));
5707 PUT_CODE (trap_rtx, code);
5708 insn = gen_conditional_trap (trap_rtx, tcode);
5712 insn = get_insns ();
5719 #include "gt-optabs.h"