#include "langhooks.h"
#include "intl.h"
#include "tm_p.h"
+#include "target.h"
/* Decide whether a function's arguments should be processed
from first to last or from last to first.
static bool float_extend_from_mem[NUM_MACHINE_MODES][NUM_MACHINE_MODES];
-/* If a memory-to-memory move would take MOVE_RATIO or more simple
- move-instruction sequences, we will do a movstr or libcall instead. */
-
-#ifndef MOVE_RATIO
-#if defined (HAVE_movstrqi) || defined (HAVE_movstrhi) || defined (HAVE_movstrsi) || defined (HAVE_movstrdi) || defined (HAVE_movstrti)
-#define MOVE_RATIO 2
-#else
-/* If we are optimizing for space (-Os), cut down the default move ratio. */
-#define MOVE_RATIO (optimize_size ? 3 : 15)
-#endif
-#endif
-
/* This macro is used to determine whether move_by_pieces should be called
to perform a structure copy. */
#ifndef MOVE_BY_PIECES_P
(move_by_pieces_ninsns (SIZE, ALIGN) < (unsigned int) MOVE_RATIO)
#endif
-/* If a clear memory operation would take CLEAR_RATIO or more simple
- move-instruction sequences, we will do a clrstr or libcall instead. */
-
-#ifndef CLEAR_RATIO
-#if defined (HAVE_clrstrqi) || defined (HAVE_clrstrhi) || defined (HAVE_clrstrsi) || defined (HAVE_clrstrdi) || defined (HAVE_clrstrti)
-#define CLEAR_RATIO 2
-#else
-/* If we are optimizing for space, cut down the default clear ratio. */
-#define CLEAR_RATIO (optimize_size ? 3 : 15)
-#endif
-#endif
-
/* This macro is used to determine whether clear_by_pieces should be
called to clear storage. */
#ifndef CLEAR_BY_PIECES_P
rtx value, insns;
convert_optab tab;
- if (GET_MODE_BITSIZE (from_mode) < GET_MODE_BITSIZE (to_mode))
+ if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode))
tab = sext_optab;
- else if (GET_MODE_BITSIZE (from_mode) > GET_MODE_BITSIZE (to_mode))
+ else if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode))
tab = trunc_optab;
else
abort ();
1, from, from_mode);
insns = get_insns ();
end_sequence ();
- emit_libcall_block (insns, to, value, gen_rtx_FLOAT_TRUNCATE (to_mode,
- from));
+ emit_libcall_block (insns, to, value,
+ tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode,
+ from)
+ : gen_rtx_FLOAT_EXTEND (to_mode, from));
return;
}
static bool
block_move_libcall_safe_for_call_parm (void)
{
+ /* If arguments are pushed on the stack, then they're safe. */
if (PUSH_ARGS)
return true;
- else
- {
- /* Check to see whether memcpy takes all register arguments. */
- static enum {
- takes_regs_uninit, takes_regs_no, takes_regs_yes
- } takes_regs = takes_regs_uninit;
- switch (takes_regs)
- {
- case takes_regs_uninit:
- {
- CUMULATIVE_ARGS args_so_far;
- tree fn, arg;
-
- fn = emit_block_move_libcall_fn (false);
- INIT_CUMULATIVE_ARGS (args_so_far, TREE_TYPE (fn), NULL_RTX, 0);
-
- arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
- for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
- {
- enum machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
- rtx tmp = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1);
- if (!tmp || !REG_P (tmp))
- goto fail_takes_regs;
-#ifdef FUNCTION_ARG_PARTIAL_NREGS
- if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode,
- NULL_TREE, 1))
- goto fail_takes_regs;
+ /* If registers go on the stack anyway, any argument is sure to clobber
+ an outgoing argument. */
+#if defined (REG_PARM_STACK_SPACE) && defined (OUTGOING_REG_PARM_STACK_SPACE)
+ {
+ tree fn = emit_block_move_libcall_fn (false);
+ (void) fn;
+ if (REG_PARM_STACK_SPACE (fn) != 0)
+ return false;
+ }
#endif
- FUNCTION_ARG_ADVANCE (args_so_far, mode, NULL_TREE, 1);
- }
- }
- takes_regs = takes_regs_yes;
- /* FALLTHRU */
- case takes_regs_yes:
- return true;
+ /* If any argument goes in memory, then it might clobber an outgoing
+ argument. */
+ {
+ CUMULATIVE_ARGS args_so_far;
+ tree fn, arg;
- fail_takes_regs:
- takes_regs = takes_regs_no;
- /* FALLTHRU */
- case takes_regs_no:
- return false;
+ fn = emit_block_move_libcall_fn (false);
+ INIT_CUMULATIVE_ARGS (args_so_far, TREE_TYPE (fn), NULL_RTX, 0);
- default:
- abort ();
- }
- }
+ arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
+ for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
+ {
+ enum machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
+ rtx tmp = FUNCTION_ARG (args_so_far, mode, NULL_TREE, 1);
+ if (!tmp || !REG_P (tmp))
+ return false;
+#ifdef FUNCTION_ARG_PARTIAL_NREGS
+ if (FUNCTION_ARG_PARTIAL_NREGS (args_so_far, mode,
+ NULL_TREE, 1))
+ return false;
+#endif
+ FUNCTION_ARG_ADVANCE (args_so_far, mode, NULL_TREE, 1);
+ }
+ }
+ return true;
}
/* A subroutine of emit_block_move. Expand a movstr pattern;
/* Emit code to move a block ORIG_SRC of type TYPE to a block DST,
where DST is non-consecutive registers represented by a PARALLEL.
SSIZE represents the total size of block ORIG_SRC in bytes, or -1
- if not known. */
+ if not known. */
void
emit_group_load (rtx dst, rtx orig_src, tree type ATTRIBUTE_UNUSED, int ssize)
set of registers starting with SRCREG into TGTBLK. If TGTBLK
is null, a stack temporary is created. TGTBLK is returned.
- The primary purpose of this routine is to handle functions
- that return BLKmode structures in registers. Some machines
- (the PA for example) want to return all small structures
- in registers regardless of the structure's alignment. */
+ The purpose of this routine is to handle functions that return
+ BLKmode structures in registers. Some machines (the PA for example)
+ want to return all small structures in registers regardless of the
+ structure's alignment. */
rtx
copy_blkmode_from_reg (rtx tgtblk, rtx srcreg, tree type)
unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type);
rtx src = NULL, dst = NULL;
unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
- unsigned HOST_WIDE_INT bitpos, xbitpos, big_endian_correction = 0;
+ unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0;
if (tgtblk == 0)
{
&& GET_MODE_SIZE (GET_MODE (srcreg)) < UNITS_PER_WORD)
srcreg = convert_to_mode (word_mode, srcreg, TREE_UNSIGNED (type));
- /* Structures whose size is not a multiple of a word are aligned
- to the least significant byte (to the right). On a BYTES_BIG_ENDIAN
- machine, this means we must skip the empty high order bytes when
- calculating the bit offset. */
- if (BYTES_BIG_ENDIAN
- && bytes % UNITS_PER_WORD)
- big_endian_correction
+ /* If the structure doesn't take up a whole number of words, see whether
+ SRCREG is padded on the left or on the right. If it's on the left,
+ set PADDING_CORRECTION to the number of bits to skip.
+
+ In most ABIs, the structure will be returned at the least end of
+ the register, which translates to right padding on little-endian
+ targets and left padding on big-endian targets. The opposite
+ holds if the structure is returned at the most significant
+ end of the register. */
+ if (bytes % UNITS_PER_WORD != 0
+ && (targetm.calls.return_in_msb (type)
+ ? !BYTES_BIG_ENDIAN
+ : BYTES_BIG_ENDIAN))
+ padding_correction
= (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
/* Copy the structure BITSIZE bites at a time.
We could probably emit more efficient code for machines which do not use
strict alignment, but it doesn't seem worth the effort at the current
time. */
- for (bitpos = 0, xbitpos = big_endian_correction;
+ for (bitpos = 0, xbitpos = padding_correction;
bitpos < bytes * BITS_PER_UNIT;
bitpos += bitsize, xbitpos += bitsize)
{
/* We need a new source operand each time xbitpos is on a
- word boundary and when xbitpos == big_endian_correction
+ word boundary and when xbitpos == padding_correction
(the first time through). */
if (xbitpos % BITS_PER_WORD == 0
- || xbitpos == big_endian_correction)
+ || xbitpos == padding_correction)
src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD,
GET_MODE (srcreg));
rtx temp;
int used = partial * UNITS_PER_WORD;
- int offset = used % (PARM_BOUNDARY / BITS_PER_UNIT);
+ int offset;
int skip;
+ if (reg && GET_CODE (reg) == PARALLEL)
+ {
+ /* Use the size of the elt to compute offset. */
+ rtx elt = XEXP (XVECEXP (reg, 0, 0), 0);
+ used = partial * GET_MODE_SIZE (GET_MODE (elt));
+ offset = used % (PARM_BOUNDARY / BITS_PER_UNIT);
+ }
+ else
+ offset = used % (PARM_BOUNDARY / BITS_PER_UNIT);
+
if (size == 0)
abort ();
}
if (TREE_CODE (to) == COMPONENT_REF
- && TREE_READONLY (TREE_OPERAND (to, 1)))
+ && TREE_READONLY (TREE_OPERAND (to, 1))
+ /* We can't assert that a MEM won't be set more than once
+ if the component is not addressable because another
+ non-addressable component may be referenced by the same MEM. */
+ && ! (GET_CODE (to_rtx) == MEM && ! can_address_p (to)))
{
if (to_rtx == orig_to_rtx)
to_rtx = copy_rtx (to_rtx);
store_expr (tree exp, rtx target, int want_value)
{
rtx temp;
+ rtx alt_rtl = NULL_RTX;
int dont_return_target = 0;
int dont_store_target = 0;
}
else
{
- temp = expand_expr (exp, target, GET_MODE (target),
- want_value & 2 ? EXPAND_STACK_PARM : EXPAND_NORMAL);
+ temp = expand_expr_real (exp, target, GET_MODE (target),
+ (want_value & 2
+ ? EXPAND_STACK_PARM : EXPAND_NORMAL),
+ &alt_rtl);
/* Return TARGET if it's a specified hardware register.
If TARGET is a volatile mem ref, either return TARGET
or return a reg copied *from* TARGET; ANSI requires this.
|| side_effects_p (target))))
&& TREE_CODE (exp) != ERROR_MARK
&& ! dont_store_target
- /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
- but TARGET is not valid memory reference, TEMP will differ
- from TARGET although it is really the same location. */
- && (TREE_CODE_CLASS (TREE_CODE (exp)) != 'd'
- || target != DECL_RTL_IF_SET (exp))
+ /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
+ but TARGET is not valid memory reference, TEMP will differ
+ from TARGET although it is really the same location. */
+ && !(alt_rtl && rtx_equal_p (alt_rtl, target))
/* If there's nothing to copy, don't bother. Don't call expr_size
unless necessary, because some front-ends (C++) expr_size-hook
aborts on objects that are not supposed to be bit-copied or
highest_pow2_factor (offset));
}
- if (TREE_READONLY (field))
+ /* If the constructor has been cleared, setting RTX_UNCHANGING_P
+ on the MEM might lead to scheduling the clearing after the
+ store. */
+ if (TREE_READONLY (field) && !cleared)
{
if (GET_CODE (to_rtx) == MEM)
to_rtx = copy_rtx (to_rtx);
int const_bounds_p;
HOST_WIDE_INT minelt = 0;
HOST_WIDE_INT maxelt = 0;
+ int icode = 0;
+ rtx *vector = NULL;
+ int elt_size = 0;
+ unsigned n_elts = 0;
/* Vectors are like arrays, but the domain is stored via an array
type indirectly. */
it always will. */
domain = TYPE_DEBUG_REPRESENTATION_TYPE (type);
domain = TYPE_DOMAIN (TREE_TYPE (TYPE_FIELDS (domain)));
+ if (REG_P (target) && VECTOR_MODE_P (GET_MODE (target)))
+ {
+ enum machine_mode mode = GET_MODE (target);
+
+ icode = (int) vec_init_optab->handlers[mode].insn_code;
+ if (icode != CODE_FOR_nothing)
+ {
+ unsigned int i;
+
+ elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ n_elts = (GET_MODE_SIZE (mode) / elt_size);
+ vector = alloca (n_elts);
+ for (i = 0; i < n_elts; i++)
+ vector [i] = CONST0_RTX (GET_MODE_INNER (mode));
+ }
+ }
}
const_bounds_p = (TYPE_MIN_VALUE (domain)
need_to_clear = 1;
}
- if (need_to_clear && size > 0)
+ if (need_to_clear && size > 0 && !vector)
{
if (! cleared)
{
HOST_WIDE_INT lo, hi, count;
tree position;
+ if (vector)
+ abort ();
+
/* If the range is constant and "small", unroll the loop. */
if (const_bounds_p
&& host_integerp (lo_index, 0)
{
tree position;
+ if (vector)
+ abort ();
+
if (index == 0)
index = ssize_int (1);
xtarget = adjust_address (xtarget, mode, 0);
store_expr (value, xtarget, 0);
}
+ else if (vector)
+ {
+ int pos;
+
+ if (index != 0)
+ pos = tree_low_cst (index, 0) - minelt;
+ else
+ pos = i;
+ vector[pos] = expand_expr (value, NULL_RTX, VOIDmode, 0);
+ }
else
{
if (index != 0)
target = copy_rtx (target);
MEM_KEEP_ALIAS_SET_P (target) = 1;
}
-
store_constructor_field (target, bitsize, bitpos, mode, value,
type, cleared, get_alias_set (elttype));
-
}
}
+ if (vector)
+ {
+ emit_insn (GEN_FCN (icode) (target,
+ gen_rtx_PARALLEL (GET_MODE (target),
+ gen_rtvec_v (n_elts, vector))));
+ }
}
/* Set constructor assignments. */
case '<':
if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
return 0;
- /* FALLTHRU */
+ /* Fall through. */
case '1':
return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
return 0;
}
}
-
-#ifdef MAX_INTEGER_COMPUTATION_MODE
-
-void
-check_max_integer_computation_mode (tree exp)
-{
- enum tree_code code;
- enum machine_mode mode;
-
- /* Strip any NOPs that don't change the mode. */
- STRIP_NOPS (exp);
- code = TREE_CODE (exp);
-
- /* We must allow conversions of constants to MAX_INTEGER_COMPUTATION_MODE. */
- if (code == NOP_EXPR
- && TREE_CODE (TREE_OPERAND (exp, 0)) == INTEGER_CST)
- return;
-
- /* First check the type of the overall operation. We need only look at
- unary, binary and relational operations. */
- if (TREE_CODE_CLASS (code) == '1'
- || TREE_CODE_CLASS (code) == '2'
- || TREE_CODE_CLASS (code) == '<')
- {
- mode = TYPE_MODE (TREE_TYPE (exp));
- if (GET_MODE_CLASS (mode) == MODE_INT
- && mode > MAX_INTEGER_COMPUTATION_MODE)
- internal_error ("unsupported wide integer operation");
- }
-
- /* Check operand of a unary op. */
- if (TREE_CODE_CLASS (code) == '1')
- {
- mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
- if (GET_MODE_CLASS (mode) == MODE_INT
- && mode > MAX_INTEGER_COMPUTATION_MODE)
- internal_error ("unsupported wide integer operation");
- }
-
- /* Check operands of a binary/comparison op. */
- if (TREE_CODE_CLASS (code) == '2' || TREE_CODE_CLASS (code) == '<')
- {
- mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)));
- if (GET_MODE_CLASS (mode) == MODE_INT
- && mode > MAX_INTEGER_COMPUTATION_MODE)
- internal_error ("unsupported wide integer operation");
-
- mode = TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 1)));
- if (GET_MODE_CLASS (mode) == MODE_INT
- && mode > MAX_INTEGER_COMPUTATION_MODE)
- internal_error ("unsupported wide integer operation");
- }
-}
-#endif
\f
/* Return the highest power of two that EXP is known to be a multiple of.
This is used in updating alignment of MEMs in array references. */
marked TARGET so that it's safe from being trashed by libcalls. We
don't want to use TARGET for anything but the final result;
Intermediate values must go elsewhere. Additionally, calls to
- emit_block_move will be flagged with BLOCK_OP_CALL_PARM. */
+ emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
+
+ If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
+ address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
+ DECL_RTL of the VAR_DECL. *ALT_RTL is also set if EXP is a
+ COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
+ recursively. */
rtx
-expand_expr (tree exp, rtx target, enum machine_mode tmode,
- enum expand_modifier modifier)
+expand_expr_real (tree exp, rtx target, enum machine_mode tmode,
+ enum expand_modifier modifier, rtx *alt_rtl)
{
rtx op0, op1, temp;
tree type = TREE_TYPE (exp);
target = 0;
}
-#ifdef MAX_INTEGER_COMPUTATION_MODE
- /* Only check stuff here if the mode we want is different from the mode
- of the expression; if it's the same, check_max_integer_computation_mode
- will handle it. Do we really need to check this stuff at all? */
-
- if (target
- && GET_MODE (target) != mode
- && TREE_CODE (exp) != INTEGER_CST
- && TREE_CODE (exp) != PARM_DECL
- && TREE_CODE (exp) != ARRAY_REF
- && TREE_CODE (exp) != ARRAY_RANGE_REF
- && TREE_CODE (exp) != COMPONENT_REF
- && TREE_CODE (exp) != BIT_FIELD_REF
- && TREE_CODE (exp) != INDIRECT_REF
- && TREE_CODE (exp) != CALL_EXPR
- && TREE_CODE (exp) != VAR_DECL
- && TREE_CODE (exp) != RTL_EXPR)
- {
- enum machine_mode mode = GET_MODE (target);
-
- if (GET_MODE_CLASS (mode) == MODE_INT
- && mode > MAX_INTEGER_COMPUTATION_MODE)
- internal_error ("unsupported wide integer operation");
- }
-
- if (tmode != mode
- && TREE_CODE (exp) != INTEGER_CST
- && TREE_CODE (exp) != PARM_DECL
- && TREE_CODE (exp) != ARRAY_REF
- && TREE_CODE (exp) != ARRAY_RANGE_REF
- && TREE_CODE (exp) != COMPONENT_REF
- && TREE_CODE (exp) != BIT_FIELD_REF
- && TREE_CODE (exp) != INDIRECT_REF
- && TREE_CODE (exp) != VAR_DECL
- && TREE_CODE (exp) != CALL_EXPR
- && TREE_CODE (exp) != RTL_EXPR
- && GET_MODE_CLASS (tmode) == MODE_INT
- && tmode > MAX_INTEGER_COMPUTATION_MODE)
- internal_error ("unsupported wide integer operation");
-
- check_max_integer_computation_mode (exp);
-#endif
-
/* If will do cse, generate all results into pseudo registers
since 1) that allows cse to find more things
and 2) otherwise cse could produce an insn the machine
XEXP (DECL_RTL (exp), 0))
|| (flag_force_addr
&& GET_CODE (XEXP (DECL_RTL (exp), 0)) != REG)))
- temp = replace_equiv_address (DECL_RTL (exp),
- copy_rtx (XEXP (DECL_RTL (exp), 0)));
+ {
+ if (alt_rtl)
+ *alt_rtl = DECL_RTL (exp);
+ temp = replace_equiv_address (DECL_RTL (exp),
+ copy_rtx (XEXP (DECL_RTL (exp), 0)));
+ }
/* If we got something, return it. But first, set the alignment
if the address is a register. */
}
preserve_rtl_expr_result (RTL_EXPR_RTL (exp));
free_temps_for_rtl_expr (exp);
+ if (alt_rtl)
+ *alt_rtl = RTL_EXPR_ALT_RTL (exp);
return RTL_EXPR_RTL (exp);
case CONSTRUCTOR:
&& modifier != EXPAND_MEMORY
&& TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
&& TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array)
- && TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK)
+ && TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK
+ && targetm.binds_local_p (array))
{
if (TREE_CODE (index) == INTEGER_CST)
{
(TYPE_QUALS (TREE_TYPE (tem))
| TYPE_QUAL_CONST));
rtx memloc = assign_temp (nt, 1, 1, 1);
-
+
emit_move_insn (memloc, op0);
op0 = memloc;
}
offset_rtx = convert_to_mode (ptr_mode, offset_rtx, 0);
#endif
- /* A constant address in OP0 can have VOIDmode, we must not try
- to call force_reg for that case. Avoid that case. */
- if (GET_CODE (op0) == MEM
- && GET_MODE (op0) == BLKmode
+ if (GET_MODE (op0) == BLKmode
+ /* A constant address in OP0 can have VOIDmode, we must
+ not try to call force_reg in that case. */
&& GET_MODE (XEXP (op0, 0)) != VOIDmode
&& bitsize != 0
&& (bitpos % bitsize) == 0
fetch it as a bit field. */
|| (mode1 != BLKmode
&& (((TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
- || (bitpos % GET_MODE_ALIGNMENT (mode) != 0))
+ || (bitpos % GET_MODE_ALIGNMENT (mode) != 0)
+ || (GET_CODE (op0) == MEM
+ && (MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
+ || (bitpos % GET_MODE_ALIGNMENT (mode1) != 0))))
&& ((modifier == EXPAND_CONST_ADDRESS
|| modifier == EXPAND_INITIALIZER)
? STRICT_ALIGNMENT
if (DECL_BUILT_IN_CLASS (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
== BUILT_IN_FRONTEND)
return (*lang_hooks.expand_expr) (exp, original_target,
- tmode, modifier);
+ tmode, modifier,
+ alt_rtl);
else
return expand_builtin (exp, target, subtarget, tmode, ignore);
}
}
if (target == 0)
- target = assign_temp (type, 0, 1, 1);
+ {
+ if (TYPE_MODE (type) != BLKmode)
+ target = gen_reg_rtx (TYPE_MODE (type));
+ else
+ target = assign_temp (type, 0, 1, 1);
+ }
if (GET_CODE (target) == MEM)
/* Store data into beginning of memory target. */
op0 = expand_expr (TREE_OPERAND (exp, 0), subtarget, VOIDmode,
EXPAND_SUM);
- /* If we knew for certain that this is arithmetic for an array
- reference, and we knew the bounds of the array, then we could
- apply the distributive law across (PLUS X C) for constant C.
- Without such knowledge, we risk overflowing the computation
- when both X and C are large, but X+C isn't. */
- /* ??? Could perhaps special-case EXP being unsigned and C being
- positive. In that case we are certain that X+C is no smaller
- than X and so the transformed expression will overflow iff the
- original would have. */
-
if (GET_CODE (op0) != REG)
op0 = force_operand (op0, NULL_RTX);
if (GET_CODE (op0) != REG)
case COMPOUND_EXPR:
expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode, 0);
emit_queue ();
- return expand_expr (TREE_OPERAND (exp, 1),
- (ignore ? const0_rtx : target),
- VOIDmode, modifier);
+ return expand_expr_real (TREE_OPERAND (exp, 1),
+ (ignore ? const0_rtx : target),
+ VOIDmode, modifier, alt_rtl);
case COND_EXPR:
/* If we would have a "singleton" (see below) were it not for a
abort ();
default:
- return (*lang_hooks.expand_expr) (exp, original_target, tmode, modifier);
+ return (*lang_hooks.expand_expr) (exp, original_target, tmode, modifier,
+ alt_rtl);
}
/* Here to do an ordinary binary operator, generating an instruction
{
/* We have a true reference to the value in OP0.
If there is an insn to add or subtract in this mode, queue it.
- Queueing the increment insn avoids the register shuffling
+ Queuing the increment insn avoids the register shuffling
that often results if we must increment now and first save
the old value for subsequent use. */
{
tree type = (*lang_hooks.types.type_for_mode) (mode, unsignedp);
return expand_expr (fold_single_bit_test (code == NE ? NE_EXPR : EQ_EXPR,
- arg0, arg1, type),
+ arg0, arg1, type),
target, VOIDmode, EXPAND_NORMAL);
}
if (mode != Pmode)
index = convert_to_mode (Pmode, index, 1);
- /* Don't let a MEM slip thru, because then INDEX that comes
+ /* Don't let a MEM slip through, because then INDEX that comes
out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
and break_out_memory_refs will go to work on it and mess it up. */
#ifdef PIC_CASE_VECTOR_ADDRESS