/* Convert tree expression to rtl instructions, for GNU compiler.
- Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
- 2012 Free Software Foundation, Inc.
+ Copyright (C) 1988-2013 Free Software Foundation, Inc.
This file is part of GCC.
#include "libfuncs.h"
#include "recog.h"
#include "reload.h"
-#include "output.h"
#include "typeclass.h"
#include "toplev.h"
#include "langhooks.h"
#include "intl.h"
#include "tm_p.h"
#include "tree-iterator.h"
-#include "tree-pass.h"
-#include "tree-flow.h"
+#include "tree-ssa.h"
#include "target.h"
#include "common/common-target.h"
#include "timevar.h"
#include "df.h"
#include "diagnostic.h"
-#include "ssaexpand.h"
+#include "tree-outof-ssa.h"
#include "target-globals.h"
#include "params.h"
int reverse;
};
-static void move_by_pieces_1 (rtx (*) (rtx, ...), enum machine_mode,
+static void move_by_pieces_1 (insn_gen_fn, machine_mode,
struct move_by_pieces_d *);
static bool block_move_libcall_safe_for_call_parm (void);
static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned, unsigned, HOST_WIDE_INT);
static rtx clear_by_pieces_1 (void *, HOST_WIDE_INT, enum machine_mode);
static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
static void store_by_pieces_1 (struct store_by_pieces_d *, unsigned int);
-static void store_by_pieces_2 (rtx (*) (rtx, ...), enum machine_mode,
+static void store_by_pieces_2 (insn_gen_fn, machine_mode,
struct store_by_pieces_d *);
static tree clear_storage_libcall_fn (int);
static rtx compress_float_constant (rtx, rtx);
static rtx get_subtarget (rtx);
static void store_constructor_field (rtx, unsigned HOST_WIDE_INT,
HOST_WIDE_INT, enum machine_mode,
- tree, tree, int, alias_set_type);
+ tree, int, alias_set_type);
static void store_constructor (tree, rtx, int, HOST_WIDE_INT);
static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
- enum machine_mode,
- tree, tree, alias_set_type, bool);
+ enum machine_mode, tree, alias_set_type, bool);
static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
#ifdef PUSH_ROUNDING
static void emit_single_push_insn (enum machine_mode, rtx, tree);
#endif
-static void do_tablejump (rtx, enum machine_mode, rtx, rtx, rtx);
+static void do_tablejump (rtx, enum machine_mode, rtx, rtx, rtx, int);
static rtx const_vector_from_tree (tree);
static void write_complex_part (rtx, rtx, bool);
(move_by_pieces_ninsns (SIZE, ALIGN, STORE_MAX_PIECES + 1) \
< (unsigned int) MOVE_RATIO (optimize_insn_for_speed_p ()))
#endif
-
-/* SLOW_UNALIGNED_ACCESS is nonzero if unaligned accesses are very slow. */
-
-#ifndef SLOW_UNALIGNED_ACCESS
-#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) STRICT_ALIGNMENT
-#endif
\f
/* This is run to set up which modes can be used
directly in memory and to initialize the block move optab. It is run
/* No special multiword conversion insn; do it by hand. */
start_sequence ();
- /* Since we will turn this into a no conflict block, we must ensure
- that the source does not overlap the target. */
+ /* Since we will turn this into a no conflict block, we must ensure the
+ the source does not overlap the target so force it into an isolated
+ register when maybe so. Likewise for any MEM input, since the
+ conversion sequence might require several references to it and we
+ must ensure we're getting the same value every time. */
- if (reg_overlap_mentioned_p (to, from))
+ if (MEM_P (from) || reg_overlap_mentioned_p (to, from))
from = force_reg (from_mode, from);
/* Get a copy of FROM widened to a word, if necessary. */
if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
&& direct_load[(int) to_mode]
- && ! mode_dependent_address_p (XEXP (from, 0)))
+ && ! mode_dependent_address_p (XEXP (from, 0),
+ MEM_ADDR_SPACE (from)))
|| REG_P (from)
|| GET_CODE (from) == SUBREG))
from = force_reg (from_mode, from);
if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
&& direct_load[(int) to_mode]
- && ! mode_dependent_address_p (XEXP (from, 0)))
+ && ! mode_dependent_address_p (XEXP (from, 0),
+ MEM_ADDR_SPACE (from)))
|| REG_P (from)
|| GET_CODE (from) == SUBREG))
from = force_reg (from_mode, from);
make the high-order word of the constant zero, not all ones. */
if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_BITSIZE (mode) == 2 * HOST_BITS_PER_WIDE_INT
+ && GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT
&& CONST_INT_P (x) && INTVAL (x) < 0)
{
- double_int val = uhwi_to_double_int (INTVAL (x));
+ double_int val = double_int::from_uhwi (INTVAL (x));
/* We need to zero extend VAL. */
if (oldmode != VOIDmode)
- val = double_int_zext (val, GET_MODE_BITSIZE (oldmode));
+ val = val.zext (GET_MODE_BITSIZE (oldmode));
return immed_double_int_const (val, mode);
}
&& GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT)
|| (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_CLASS (oldmode) == MODE_INT
- && (GET_CODE (x) == CONST_DOUBLE
+ && (CONST_DOUBLE_AS_INT_P (x)
|| (GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (oldmode)
&& ((MEM_P (x) && ! MEM_VOLATILE_P (x)
&& direct_load[(int) mode])
succeed. */
int
-can_move_by_pieces (unsigned HOST_WIDE_INT len,
+can_move_by_pieces (unsigned HOST_WIDE_INT len ATTRIBUTE_UNUSED,
unsigned int align ATTRIBUTE_UNUSED)
{
return MOVE_BY_PIECES_P (len, align);
/* First move what we can in the largest integer mode, then go to
successively smaller modes. */
- while (max_size > 1)
+ while (max_size > 1 && data.len > 0)
{
enum machine_mode mode = widest_int_mode_for_size (max_size);
align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
- while (max_size > 1)
+ while (max_size > 1 && l > 0)
{
enum machine_mode mode;
enum insn_code icode;
to make a move insn for that mode. DATA has all the other info. */
static void
-move_by_pieces_1 (rtx (*genfun) (rtx, ...), enum machine_mode mode,
+move_by_pieces_1 (insn_gen_fn genfun, machine_mode mode,
struct move_by_pieces_d *data)
{
unsigned int size = GET_MODE_SIZE (mode);
if (HAVE_PRE_DECREMENT && data->explicit_inc_to < 0)
emit_insn (gen_add2_insn (data->to_addr,
- GEN_INT (-(HOST_WIDE_INT)size)));
+ gen_int_mode (-(HOST_WIDE_INT) size,
+ GET_MODE (data->to_addr))));
if (HAVE_PRE_DECREMENT && data->explicit_inc_from < 0)
emit_insn (gen_add2_insn (data->from_addr,
- GEN_INT (-(HOST_WIDE_INT)size)));
+ gen_int_mode (-(HOST_WIDE_INT) size,
+ GET_MODE (data->from_addr))));
if (data->to)
emit_insn ((*genfun) (to1, from1));
}
if (HAVE_POST_INCREMENT && data->explicit_inc_to > 0)
- emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size)));
+ emit_insn (gen_add2_insn (data->to_addr,
+ gen_int_mode (size,
+ GET_MODE (data->to_addr))));
if (HAVE_POST_INCREMENT && data->explicit_inc_from > 0)
- emit_insn (gen_add2_insn (data->from_addr, GEN_INT (size)));
+ emit_insn (gen_add2_insn (data->from_addr,
+ gen_int_mode (size,
+ GET_MODE (data->from_addr))));
if (! data->reverse)
data->offset += size;
emit_label (top_label);
tmp = convert_modes (x_addr_mode, iter_mode, iter, true);
- x_addr = gen_rtx_PLUS (x_addr_mode, x_addr, tmp);
+ x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp);
if (x_addr_mode != y_addr_mode)
tmp = convert_modes (y_addr_mode, iter_mode, iter, true);
- y_addr = gen_rtx_PLUS (y_addr_mode, y_addr, tmp);
+ y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp);
x = change_address (x, QImode, x_addr);
y = change_address (y, QImode, y_addr);
emit_label (cmp_label);
emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
- true, top_label);
+ true, top_label, REG_BR_PROB_BASE * 90 / 100);
}
\f
/* Copy all or part of a value X into registers starting at REGNO.
emit_move_insn (orig_dst, dst);
}
-/* Generate code to copy a BLKmode object of TYPE out of a
- set of registers starting with SRCREG into TGTBLK. If TGTBLK
- is null, a stack temporary is created. TGTBLK is returned.
-
- The purpose of this routine is to handle functions that return
- BLKmode structures in registers. Some machines (the PA for example)
- want to return all small structures in registers regardless of the
- structure's alignment. */
+/* Return a form of X that does not use a PARALLEL. TYPE is the type
+ of the value stored in X. */
rtx
-copy_blkmode_from_reg (rtx tgtblk, rtx srcreg, tree type)
+maybe_emit_group_store (rtx x, tree type)
+{
+ enum machine_mode mode = TYPE_MODE (type);
+ gcc_checking_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode);
+ if (GET_CODE (x) == PARALLEL)
+ {
+ rtx result = gen_reg_rtx (mode);
+ emit_group_store (result, x, type, int_size_in_bytes (type));
+ return result;
+ }
+ return x;
+}
+
+/* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
+
+ This is used on targets that return BLKmode values in registers. */
+
+void
+copy_blkmode_from_reg (rtx target, rtx srcreg, tree type)
{
unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type);
rtx src = NULL, dst = NULL;
unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0;
+ enum machine_mode mode = GET_MODE (srcreg);
+ enum machine_mode tmode = GET_MODE (target);
enum machine_mode copy_mode;
- if (tgtblk == 0)
- {
- tgtblk = assign_temp (build_qualified_type (type,
- (TYPE_QUALS (type)
- | TYPE_QUAL_CONST)),
- 1, 1);
- preserve_temp_slots (tgtblk);
- }
-
- /* This code assumes srcreg is at least a full word. If it isn't, copy it
- into a new pseudo which is a full word. */
-
- if (GET_MODE (srcreg) != BLKmode
- && GET_MODE_SIZE (GET_MODE (srcreg)) < UNITS_PER_WORD)
- srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type));
+ /* BLKmode registers created in the back-end shouldn't have survived. */
+ gcc_assert (mode != BLKmode);
/* If the structure doesn't take up a whole number of words, see whether
SRCREG is padded on the left or on the right. If it's on the left,
padding_correction
= (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
+ /* We can use a single move if we have an exact mode for the size. */
+ else if (MEM_P (target)
+ && (!SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (target))
+ || MEM_ALIGN (target) >= GET_MODE_ALIGNMENT (mode))
+ && bytes == GET_MODE_SIZE (mode))
+ {
+ emit_move_insn (adjust_address (target, mode, 0), srcreg);
+ return;
+ }
+
+ /* And if we additionally have the same mode for a register. */
+ else if (REG_P (target)
+ && GET_MODE (target) == mode
+ && bytes == GET_MODE_SIZE (mode))
+ {
+ emit_move_insn (target, srcreg);
+ return;
+ }
+
+ /* This code assumes srcreg is at least a full word. If it isn't, copy it
+ into a new pseudo which is a full word. */
+ if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
+ {
+ srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type));
+ mode = word_mode;
+ }
+
/* Copy the structure BITSIZE bits at a time. If the target lives in
memory, take care of not reading/writing past its end by selecting
a copy mode suited to BITSIZE. This should always be possible given
how it is computed.
+ If the target lives in register, make sure not to select a copy mode
+ larger than the mode of the register.
+
We could probably emit more efficient code for machines which do not use
strict alignment, but it doesn't seem worth the effort at the current
time. */
copy_mode = word_mode;
- if (MEM_P (tgtblk))
+ if (MEM_P (target))
{
enum machine_mode mem_mode = mode_for_size (bitsize, MODE_INT, 1);
if (mem_mode != BLKmode)
copy_mode = mem_mode;
}
+ else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
+ copy_mode = tmode;
for (bitpos = 0, xbitpos = padding_correction;
bitpos < bytes * BITS_PER_UNIT;
/* We need a new source operand each time xbitpos is on a
word boundary and when xbitpos == padding_correction
(the first time through). */
- if (xbitpos % BITS_PER_WORD == 0
- || xbitpos == padding_correction)
- src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD,
- GET_MODE (srcreg));
+ if (xbitpos % BITS_PER_WORD == 0 || xbitpos == padding_correction)
+ src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, mode);
/* We need a new destination operand each time bitpos is on
a word boundary. */
- if (bitpos % BITS_PER_WORD == 0)
- dst = operand_subword (tgtblk, bitpos / BITS_PER_WORD, 1, BLKmode);
+ if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
+ dst = target;
+ else if (bitpos % BITS_PER_WORD == 0)
+ dst = operand_subword (target, bitpos / BITS_PER_WORD, 1, tmode);
/* Use xbitpos for the source extraction (right justified) and
bitpos for the destination store (left justified). */
xbitpos % BITS_PER_WORD, 1, false,
NULL_RTX, copy_mode, copy_mode));
}
-
- return tgtblk;
}
/* Copy BLKmode value SRC into a register of mode MODE. Return the
{
l = len;
max_size = STORE_MAX_PIECES + 1;
- while (max_size > 1)
+ while (max_size > 1 && l > 0)
{
mode = widest_int_mode_for_size (max_size);
/* First store what we can in the largest integer mode, then go to
successively smaller modes. */
- while (max_size > 1)
+ while (max_size > 1 && data->len > 0)
{
enum machine_mode mode = widest_int_mode_for_size (max_size);
to make a move insn for that mode. DATA has all the other info. */
static void
-store_by_pieces_2 (rtx (*genfun) (rtx, ...), enum machine_mode mode,
+store_by_pieces_2 (insn_gen_fn genfun, machine_mode mode,
struct store_by_pieces_d *data)
{
unsigned int size = GET_MODE_SIZE (mode);
if (HAVE_PRE_DECREMENT && data->explicit_inc_to < 0)
emit_insn (gen_add2_insn (data->to_addr,
- GEN_INT (-(HOST_WIDE_INT) size)));
+ gen_int_mode (-(HOST_WIDE_INT) size,
+ GET_MODE (data->to_addr))));
cst = (*data->constfun) (data->constfundata, data->offset, mode);
emit_insn ((*genfun) (to1, cst));
if (HAVE_POST_INCREMENT && data->explicit_inc_to > 0)
- emit_insn (gen_add2_insn (data->to_addr, GEN_INT (size)));
+ emit_insn (gen_add2_insn (data->to_addr,
+ gen_int_mode (size,
+ GET_MODE (data->to_addr))));
if (! data->reverse)
data->offset += size;
/* Do not use anti_adjust_stack, since we don't want to update
stack_pointer_delta. */
temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
- GEN_INT (adjust), stack_pointer_rtx,
+ gen_int_mode (adjust, Pmode), stack_pointer_rtx,
0, OPTAB_LIB_WIDEN);
if (temp != stack_pointer_rtx)
emit_move_insn (stack_pointer_rtx, temp);
if (push_operand (x, mode))
return emit_move_complex_push (mode, x, y);
- /* See if we can coerce the target into moving both values at once. */
-
- /* Move floating point as parts. */
+ /* See if we can coerce the target into moving both values at once, except
+ for floating point where we favor moving as parts if this is easy. */
if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
- && optab_handler (mov_optab, GET_MODE_INNER (mode)) != CODE_FOR_nothing)
+ && optab_handler (mov_optab, GET_MODE_INNER (mode)) != CODE_FOR_nothing
+ && !(REG_P (x)
+ && HARD_REGISTER_P (x)
+ && hard_regno_nregs[REGNO(x)][mode] == 1)
+ && !(REG_P (y)
+ && HARD_REGISTER_P (y)
+ && hard_regno_nregs[REGNO(y)][mode] == 1))
try_int = false;
/* Not possible if the values are inherently not adjacent. */
else if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
fits within a HOST_WIDE_INT. */
if (!CONSTANT_P (y) || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
- rtx ret = emit_move_via_integer (mode, x, y, false);
+ rtx ret = emit_move_via_integer (mode, x, y, lra_in_progress);
+
if (ret)
- return ret;
+ {
+ if (! lra_in_progress || recog (PATTERN (ret), ret, 0) >= 0)
+ return ret;
+ }
}
return emit_move_multi_word (mode, x, y);
{
temp = copy_to_mode_reg (Pmode, size);
if (extra != 0)
- temp = expand_binop (Pmode, add_optab, temp, GEN_INT (extra),
+ temp = expand_binop (Pmode, add_optab, temp,
+ gen_int_mode (extra, Pmode),
temp, 0, OPTAB_LIB_WIDEN);
anti_adjust_stack (temp);
}
add_reg_note (insn, REG_ARGS_SIZE, GEN_INT (args_size));
#ifdef STACK_GROWS_DOWNWARD
- this_delta = -this_delta;
+ this_delta = -(unsigned HOST_WIDE_INT) this_delta;
#endif
args_size -= this_delta;
}
add_optab,
#endif
stack_pointer_rtx,
- GEN_INT (rounded_size),
+ gen_int_mode (rounded_size, Pmode),
NULL_RTX, 0, OPTAB_LIB_WIDEN));
offset = (HOST_WIDE_INT) padding_size;
previous value. */
offset -= (HOST_WIDE_INT) rounded_size;
#endif
- dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset));
+ dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ gen_int_mode (offset, Pmode));
}
else
{
#ifdef STACK_GROWS_DOWNWARD
/* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- GEN_INT (-(HOST_WIDE_INT) rounded_size));
+ gen_int_mode (-(HOST_WIDE_INT) rounded_size,
+ Pmode));
#else
/* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
dest_addr = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
- GEN_INT (rounded_size));
+ gen_int_mode (rounded_size, Pmode));
#endif
dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
}
size = GEN_INT (INTVAL (size) - used);
else
size = expand_binop (GET_MODE (size), sub_optab, size,
- GEN_INT (used), NULL_RTX, 0,
- OPTAB_LIB_WIDEN);
+ gen_int_mode (used, GET_MODE (size)),
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
}
/* Get the address of the stack space.
binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
if (bitpos + bitsize != str_bitsize)
{
- rtx mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << bitsize) - 1);
+ rtx mask = gen_int_mode (((unsigned HOST_WIDE_INT) 1 << bitsize) - 1,
+ str_mode);
value = expand_and (str_mode, value, mask, NULL_RTX);
}
value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
*bitend = *bitstart + tree_low_cst (DECL_SIZE (repr), 1) - 1;
}
+/* Returns true if ADDR is an ADDR_EXPR of a DECL that does not reside
+ in memory and has non-BLKmode. DECL_RTL must not be a MEM; if
+ DECL_RTL was not set yet, return NORTL. */
+
+static inline bool
+addr_expr_of_non_mem_decl_p_1 (tree addr, bool nortl)
+{
+ if (TREE_CODE (addr) != ADDR_EXPR)
+ return false;
+
+ tree base = TREE_OPERAND (addr, 0);
+
+ if (!DECL_P (base)
+ || TREE_ADDRESSABLE (base)
+ || DECL_MODE (base) == BLKmode)
+ return false;
+
+ if (!DECL_RTL_SET_P (base))
+ return nortl;
+
+ return (!MEM_P (DECL_RTL (base)));
+}
+
/* Returns true if the MEM_REF REF refers to an object that does not
reside in memory and has non-BLKmode. */
-static bool
+static inline bool
mem_ref_refers_to_non_mem_p (tree ref)
{
tree base = TREE_OPERAND (ref, 0);
- if (TREE_CODE (base) != ADDR_EXPR)
- return false;
- base = TREE_OPERAND (base, 0);
- return (DECL_P (base)
- && !TREE_ADDRESSABLE (base)
- && DECL_MODE (base) != BLKmode
- && DECL_RTL_SET_P (base)
- && !MEM_P (DECL_RTL (base)));
+ return addr_expr_of_non_mem_decl_p_1 (base, false);
+}
+
+/* Return TRUE iff OP is an ADDR_EXPR of a DECL that's not
+ addressable. This is very much like mem_ref_refers_to_non_mem_p,
+ but instead of the MEM_REF, it takes its base, and it doesn't
+ assume a DECL is in memory just because its RTL is not set yet. */
+
+bool
+addr_expr_of_non_mem_decl_p (tree op)
+{
+ return addr_expr_of_non_mem_decl_p_1 (op, true);
}
/* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
|| TREE_CODE (to) == TARGET_MEM_REF)
&& mode != BLKmode
&& !mem_ref_refers_to_non_mem_p (to)
- && ((align = get_object_or_type_alignment (to))
+ && ((align = get_object_alignment (to))
< GET_MODE_ALIGNMENT (mode))
&& (((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing)
int unsignedp;
int volatilep = 0;
tree tem;
- bool misalignp;
- rtx mem = NULL_RTX;
push_temp_slots ();
tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
&& DECL_BIT_FIELD_TYPE (TREE_OPERAND (to, 1)))
get_bit_range (&bitregion_start, &bitregion_end, to, &bitpos, &offset);
- /* If we are going to use store_bit_field and extract_bit_field,
- make sure to_rtx will be safe for multiple use. */
- mode = TYPE_MODE (TREE_TYPE (tem));
- if (TREE_CODE (tem) == MEM_REF
- && mode != BLKmode
- && ((align = get_object_or_type_alignment (tem))
- < GET_MODE_ALIGNMENT (mode))
- && ((icode = optab_handler (movmisalign_optab, mode))
- != CODE_FOR_nothing))
- {
- struct expand_operand ops[2];
-
- misalignp = true;
- to_rtx = gen_reg_rtx (mode);
- mem = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
-
- /* If the misaligned store doesn't overwrite all bits, perform
- rmw cycle on MEM. */
- if (bitsize != GET_MODE_BITSIZE (mode))
- {
- create_input_operand (&ops[0], to_rtx, mode);
- create_fixed_operand (&ops[1], mem);
- /* The movmisalign<mode> pattern cannot fail, else the assignment
- would silently be omitted. */
- expand_insn (icode, 2, ops);
-
- mem = copy_rtx (mem);
- }
- }
- else
- {
- misalignp = false;
- to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
- }
+ to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
/* If the bitfield is volatile, we want to access it in the
field's mode, not the computed mode.
else if (bitpos + bitsize <= mode_bitsize / 2)
result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
bitregion_start, bitregion_end,
- mode1, from, TREE_TYPE (tem),
+ mode1, from,
get_alias_set (to), nontemporal);
else if (bitpos >= mode_bitsize / 2)
result = store_field (XEXP (to_rtx, 1), bitsize,
bitpos - mode_bitsize / 2,
bitregion_start, bitregion_end,
mode1, from,
- TREE_TYPE (tem), get_alias_set (to),
- nontemporal);
+ get_alias_set (to), nontemporal);
else if (bitpos == 0 && bitsize == mode_bitsize)
{
rtx from_rtx;
result = store_field (temp, bitsize, bitpos,
bitregion_start, bitregion_end,
mode1, from,
- TREE_TYPE (tem), get_alias_set (to),
- nontemporal);
+ get_alias_set (to), nontemporal);
emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
}
/* If the field is at offset zero, we could have been given the
DECL_RTX of the parent struct. Don't munge it. */
to_rtx = shallow_copy_rtx (to_rtx);
-
set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
-
- /* Deal with volatile and readonly fields. The former is only
- done for MEM. Also set MEM_KEEP_ALIAS_SET_P if needed. */
if (volatilep)
MEM_VOLATILE_P (to_rtx) = 1;
- if (component_uses_parent_alias_set (to))
- MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
}
if (optimize_bitfield_assignment_op (bitsize, bitpos,
result = store_field (to_rtx, bitsize, bitpos,
bitregion_start, bitregion_end,
mode1, from,
- TREE_TYPE (tem), get_alias_set (to),
- nontemporal);
- }
-
- if (misalignp)
- {
- struct expand_operand ops[2];
-
- create_fixed_operand (&ops[0], mem);
- create_input_operand (&ops[1], to_rtx, mode);
- /* The movmisalign<mode> pattern cannot fail, else the assignment
- would silently be omitted. */
- expand_insn (icode, 2, ops);
+ get_alias_set (to), nontemporal);
}
if (result)
/* Handle calls that return values in multiple non-contiguous locations.
The Irix 6 ABI has examples of this. */
if (GET_CODE (to_rtx) == PARALLEL)
- emit_group_load (to_rtx, value, TREE_TYPE (from),
- int_size_in_bytes (TREE_TYPE (from)));
+ {
+ if (GET_CODE (value) == PARALLEL)
+ emit_group_move (to_rtx, value);
+ else
+ emit_group_load (to_rtx, value, TREE_TYPE (from),
+ int_size_in_bytes (TREE_TYPE (from)));
+ }
+ else if (GET_CODE (value) == PARALLEL)
+ emit_group_store (to_rtx, value, TREE_TYPE (from),
+ int_size_in_bytes (TREE_TYPE (from)));
else if (GET_MODE (to_rtx) == BLKmode)
- emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL);
+ {
+ /* Handle calls that return BLKmode values in registers. */
+ if (REG_P (value))
+ copy_blkmode_from_reg (to_rtx, value, TREE_TYPE (from));
+ else
+ emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL);
+ }
else
{
if (POINTER_TYPE_P (TREE_TYPE (to)))
rtx temp;
push_temp_slots ();
- if (REG_P (to_rtx) && TYPE_MODE (TREE_TYPE (from)) == BLKmode)
+
+ /* If the source is itself a return value, it still is in a pseudo at
+ this point so we can move it back to the return register directly. */
+ if (REG_P (to_rtx)
+ && TYPE_MODE (TREE_TYPE (from)) == BLKmode
+ && TREE_CODE (from) != CALL_EXPR)
temp = copy_blkmode_to_reg (GET_MODE (to_rtx), from);
else
temp = expand_expr (from, NULL_RTX, GET_MODE (to_rtx), EXPAND_NORMAL);
+ /* Handle calls that return values in multiple non-contiguous locations.
+ The Irix 6 ABI has examples of this. */
if (GET_CODE (to_rtx) == PARALLEL)
- emit_group_load (to_rtx, temp, TREE_TYPE (from),
- int_size_in_bytes (TREE_TYPE (from)));
+ {
+ if (GET_CODE (temp) == PARALLEL)
+ emit_group_move (to_rtx, temp);
+ else
+ emit_group_load (to_rtx, temp, TREE_TYPE (from),
+ int_size_in_bytes (TREE_TYPE (from)));
+ }
else if (temp)
emit_move_insn (to_rtx, temp);
{
rtx temp;
rtx alt_rtl = NULL_RTX;
- location_t loc = EXPR_LOCATION (exp);
+ location_t loc = curr_insn_location ();
if (VOID_TYPE_P (TREE_TYPE (exp)))
{
supposed to be bit-copied or bit-initialized. */
&& expr_size (exp) != const0_rtx)
{
- if (GET_MODE (temp) != GET_MODE (target)
- && GET_MODE (temp) != VOIDmode)
+ if (GET_MODE (temp) != GET_MODE (target) && GET_MODE (temp) != VOIDmode)
{
- int unsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
- if (GET_MODE (target) == BLKmode
- && GET_MODE (temp) == BLKmode)
- emit_block_move (target, temp, expr_size (exp),
- (call_param_p
- ? BLOCK_OP_CALL_PARM
- : BLOCK_OP_NORMAL));
- else if (GET_MODE (target) == BLKmode)
- store_bit_field (target, INTVAL (expr_size (exp)) * BITS_PER_UNIT,
- 0, 0, 0, GET_MODE (temp), temp);
+ if (GET_MODE (target) == BLKmode)
+ {
+ /* Handle calls that return BLKmode values in registers. */
+ if (REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
+ copy_blkmode_from_reg (target, temp, TREE_TYPE (exp));
+ else
+ store_bit_field (target,
+ INTVAL (expr_size (exp)) * BITS_PER_UNIT,
+ 0, 0, 0, GET_MODE (temp), temp);
+ }
else
- convert_move (target, temp, unsignedp);
+ convert_move (target, temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
}
else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST)
/* Handle calls that return values in multiple non-contiguous locations.
The Irix 6 ABI has examples of this. */
else if (GET_CODE (target) == PARALLEL)
- emit_group_load (target, temp, TREE_TYPE (exp),
- int_size_in_bytes (TREE_TYPE (exp)));
+ {
+ if (GET_CODE (temp) == PARALLEL)
+ emit_group_move (target, temp);
+ else
+ emit_group_load (target, temp, TREE_TYPE (exp),
+ int_size_in_bytes (TREE_TYPE (exp)));
+ }
+ else if (GET_CODE (temp) == PARALLEL)
+ emit_group_store (target, temp, TREE_TYPE (exp),
+ int_size_in_bytes (TREE_TYPE (exp)));
else if (GET_MODE (temp) == BLKmode)
emit_block_move (target, temp, expr_size (exp),
(call_param_p
? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
- else if (nontemporal
- && emit_storent_insn (target, temp))
- /* If we managed to emit a nontemporal store, there is nothing else to
- do. */
+ /* If we emit a nontemporal store, there is nothing else to do. */
+ else if (nontemporal && emit_storent_insn (target, temp))
;
else
{
{
HOST_WIDE_INT mult = 1;
- if (TREE_CODE (purpose) == RANGE_EXPR)
+ if (purpose && TREE_CODE (purpose) == RANGE_EXPR)
{
tree lo_index = TREE_OPERAND (purpose, 0);
tree hi_index = TREE_OPERAND (purpose, 1);
\f
/* Helper function for store_constructor.
TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
- TYPE is the type of the CONSTRUCTOR, not the element type.
CLEARED is as for store_constructor.
ALIAS_SET is the alias set to use for any stores.
static void
store_constructor_field (rtx target, unsigned HOST_WIDE_INT bitsize,
HOST_WIDE_INT bitpos, enum machine_mode mode,
- tree exp, tree type, int cleared,
- alias_set_type alias_set)
+ tree exp, int cleared, alias_set_type alias_set)
{
if (TREE_CODE (exp) == CONSTRUCTOR
/* We can only call store_constructor recursively if the size and
store_constructor (exp, target, cleared, bitsize / BITS_PER_UNIT);
}
else
- store_field (target, bitsize, bitpos, 0, 0, mode, exp, type, alias_set,
- false);
+ store_field (target, bitsize, bitpos, 0, 0, mode, exp, alias_set, false);
}
/* Store the value of constructor EXP into the rtx TARGET.
register whose mode size isn't equal to SIZE since
clear_storage can't handle this case. */
else if (size > 0
- && (((int)VEC_length (constructor_elt, CONSTRUCTOR_ELTS (exp))
+ && (((int)vec_safe_length (CONSTRUCTOR_ELTS (exp))
!= fields_length (type))
|| mostly_zeros_p (exp))
&& (!REG_P (target)
}
store_constructor_field (to_rtx, bitsize, bitpos, mode,
- value, type, cleared,
+ value, cleared,
get_alias_set (TREE_TYPE (field)));
}
break;
}
store_constructor_field
- (target, bitsize, bitpos, mode, value, type, cleared,
+ (target, bitsize, bitpos, mode, value, cleared,
get_alias_set (elttype));
}
}
MEM_KEEP_ALIAS_SET_P (target) = 1;
}
store_constructor_field (target, bitsize, bitpos, mode, value,
- type, cleared, get_alias_set (elttype));
+ cleared, get_alias_set (elttype));
}
}
break;
constructor_elt *ce;
int i;
int need_to_clear;
- int icode = 0;
+ int icode = CODE_FOR_nothing;
tree elttype = TREE_TYPE (type);
int elt_size = tree_low_cst (TYPE_SIZE (elttype), 1);
enum machine_mode eltmode = TYPE_MODE (elttype);
/* Store each element of the constructor into the corresponding
element of TARGET, determined by counting the elements. */
for (idx = 0, i = 0;
- VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce);
+ vec_safe_iterate (CONSTRUCTOR_ELTS (exp), idx, &ce);
idx++, i += bitsize / elt_size)
{
HOST_WIDE_INT eltpos;
? TYPE_MODE (TREE_TYPE (value))
: eltmode;
bitpos = eltpos * elt_size;
- store_constructor_field (target, bitsize, bitpos,
- value_mode, value, type,
- cleared, alias);
+ store_constructor_field (target, bitsize, bitpos, value_mode,
+ value, cleared, alias);
}
}
Always return const0_rtx unless we have something particular to
return.
- TYPE is the type of the underlying object,
-
ALIAS_SET is the alias set for the destination. This value will
(in general) be different from that for TARGET, since TARGET is a
reference to the containing structure.
store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
- enum machine_mode mode, tree exp, tree type,
+ enum machine_mode mode, tree exp,
alias_set_type alias_set, bool nontemporal)
{
if (TREE_CODE (exp) == ERROR_MARK)
if (bitsize == 0)
return expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
- /* If we are storing into an unaligned field of an aligned union that is
- in a register, we may have the mode of TARGET being an integer mode but
- MODE == BLKmode. In that case, get an aligned object whose size and
- alignment are the same as TARGET and store TARGET into it (we can avoid
- the store if the field being stored is the entire width of TARGET). Then
- call ourselves recursively to store the field into a BLKmode version of
- that object. Finally, load from the object into TARGET. This is not
- very efficient in general, but should only be slightly more expensive
- than the otherwise-required unaligned accesses. Perhaps this can be
- cleaned up later. It's tempting to make OBJECT readonly, but it's set
- twice, once with emit_move_insn and once via store_field. */
-
- if (mode == BLKmode
- && (REG_P (target) || GET_CODE (target) == SUBREG))
- {
- rtx object = assign_temp (type, 1, 1);
- rtx blk_object = adjust_address (object, BLKmode, 0);
-
- if (bitsize != (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (target)))
- emit_move_insn (object, target);
-
- store_field (blk_object, bitsize, bitpos,
- bitregion_start, bitregion_end,
- mode, exp, type, MEM_ALIAS_SET (blk_object), nontemporal);
-
- emit_move_insn (target, object);
-
- /* We want to return the BLKmode version of the data. */
- return blk_object;
- }
-
if (GET_CODE (target) == CONCAT)
{
/* We're storing into a struct containing a single __complex. */
return const0_rtx;
}
+ /* Handle calls that return values in multiple non-contiguous locations.
+ The Irix 6 ABI has examples of this. */
+ if (GET_CODE (temp) == PARALLEL)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
+ rtx temp_target;
+ if (mode == BLKmode)
+ mode = smallest_mode_for_size (size * BITS_PER_UNIT, MODE_INT);
+ temp_target = gen_reg_rtx (mode);
+ emit_group_store (temp_target, temp, TREE_TYPE (exp), size);
+ temp = temp_target;
+ }
+ else if (mode == BLKmode)
+ {
+ /* Handle calls that return BLKmode values in registers. */
+ if (REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
+ {
+ rtx temp_target = gen_reg_rtx (GET_MODE (temp));
+ copy_blkmode_from_reg (temp_target, temp, TREE_TYPE (exp));
+ temp = temp_target;
+ }
+ else
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
+ rtx temp_target;
+ mode = smallest_mode_for_size (size * BITS_PER_UNIT, MODE_INT);
+ temp_target = gen_reg_rtx (mode);
+ temp_target
+ = extract_bit_field (temp, size * BITS_PER_UNIT, 0, 1,
+ false, temp_target, mode, mode);
+ temp = temp_target;
+ }
+ }
+
/* Store the value in the bitfield. */
store_bit_field (target, bitsize, bitpos,
bitregion_start, bitregion_end,
switch (TREE_CODE (exp))
{
case BIT_FIELD_REF:
- bit_offset
- = double_int_add (bit_offset,
- tree_to_double_int (TREE_OPERAND (exp, 2)));
+ bit_offset += tree_to_double_int (TREE_OPERAND (exp, 2));
break;
case COMPONENT_REF:
break;
offset = size_binop (PLUS_EXPR, offset, this_offset);
- bit_offset = double_int_add (bit_offset,
- tree_to_double_int
- (DECL_FIELD_BIT_OFFSET (field)));
+ bit_offset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field));
/* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
}
break;
case IMAGPART_EXPR:
- bit_offset = double_int_add (bit_offset,
- uhwi_to_double_int (*pbitsize));
+ bit_offset += double_int::from_uhwi (*pbitsize);
break;
case VIEW_CONVERT_EXPR:
if (!integer_zerop (off))
{
double_int boff, coff = mem_ref_offset (exp);
- boff = double_int_lshift (coff,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- bit_offset = double_int_add (bit_offset, boff);
+ boff = coff.lshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT));
+ bit_offset += boff;
}
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
}
if (TREE_CODE (offset) == INTEGER_CST)
{
double_int tem = tree_to_double_int (offset);
- tem = double_int_sext (tem, TYPE_PRECISION (sizetype));
- tem = double_int_lshift (tem,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
- tem = double_int_add (tem, bit_offset);
- if (double_int_fits_in_shwi_p (tem))
- {
- *pbitpos = double_int_to_shwi (tem);
+ tem = tem.sext (TYPE_PRECISION (sizetype));
+ tem = tem.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
+ tem += bit_offset;
+ if (tem.fits_shwi ())
+ {
+ *pbitpos = tem.to_shwi ();
*poffset = offset = NULL_TREE;
}
}
if (offset)
{
/* Avoid returning a negative bitpos as this may wreak havoc later. */
- if (double_int_negative_p (bit_offset))
+ if (bit_offset.is_negative ())
{
double_int mask
- = double_int_mask (BITS_PER_UNIT == 8
+ = double_int::mask (BITS_PER_UNIT == 8
? 3 : exact_log2 (BITS_PER_UNIT));
- double_int tem = double_int_and_not (bit_offset, mask);
+ double_int tem = bit_offset.and_not (mask);
/* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
- bit_offset = double_int_sub (bit_offset, tem);
- tem = double_int_rshift (tem,
- BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT, true);
+ bit_offset -= tem;
+ tem = tem.arshift (BITS_PER_UNIT == 8
+ ? 3 : exact_log2 (BITS_PER_UNIT),
+ HOST_BITS_PER_DOUBLE_INT);
offset = size_binop (PLUS_EXPR, offset,
double_int_to_tree (sizetype, tem));
}
- *pbitpos = double_int_to_shwi (bit_offset);
+ *pbitpos = bit_offset.to_shwi ();
*poffset = offset;
}
return exp;
}
-/* Given an expression EXP that may be a COMPONENT_REF, an ARRAY_REF or an
- ARRAY_RANGE_REF, look for whether EXP or any nested component-refs within
- EXP is marked as PACKED. */
-
-bool
-contains_packed_reference (const_tree exp)
-{
- bool packed_p = false;
-
- while (1)
- {
- switch (TREE_CODE (exp))
- {
- case COMPONENT_REF:
- {
- tree field = TREE_OPERAND (exp, 1);
- packed_p = DECL_PACKED (field)
- || TYPE_PACKED (TREE_TYPE (field))
- || TYPE_PACKED (TREE_TYPE (exp));
- if (packed_p)
- goto done;
- }
- break;
-
- case BIT_FIELD_REF:
- case ARRAY_REF:
- case ARRAY_RANGE_REF:
- case REALPART_EXPR:
- case IMAGPART_EXPR:
- case VIEW_CONVERT_EXPR:
- break;
-
- default:
- goto done;
- }
- exp = TREE_OPERAND (exp, 0);
- }
- done:
- return packed_p;
-}
-
/* Return a tree of sizetype representing the size, in bytes, of the element
of EXP, an ARRAY_REF or an ARRAY_RANGE_REF. */
constructor_elt *ce;
unsigned HOST_WIDE_INT idx;
- FOR_EACH_VEC_ELT (constructor_elt, CONSTRUCTOR_ELTS (exp), idx, ce)
+ FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp), idx, ce)
if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
|| !safe_from_p (x, ce->value, 0))
return 0;
inner = TREE_OPERAND (exp, 0);
break;
+ case COMPOUND_LITERAL_EXPR:
+ /* Allow COMPOUND_LITERAL_EXPR in initializers, if e.g.
+ rtl_for_decl_init is called on DECL_INITIAL with
+ COMPOUNT_LITERAL_EXPRs in it, they aren't gimplified. */
+ if (modifier == EXPAND_INITIALIZER
+ && COMPOUND_LITERAL_EXPR_DECL (exp))
+ return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp),
+ target, tmode, modifier, as);
+ /* FALLTHRU */
default:
/* If the object is a DECL, then expand it for its rtl. Don't bypass
expand_expr, as that can have various side effects; LABEL_DECLs for
of such an object. */
gcc_assert ((bitpos % BITS_PER_UNIT) == 0);
+ result = convert_memory_address_addr_space (tmode, result, as);
result = plus_constant (tmode, result, bitpos / BITS_PER_UNIT);
if (modifier < EXPAND_SUM)
result = force_operand (result, target);
return ret ? ret : const0_rtx;
}
- /* If this is an expression of some kind and it has an associated line
- number, then emit the line number before expanding the expression.
-
- We need to save and restore the file and line information so that
- errors discovered during expansion are emitted with the right
- information. It would be better of the diagnostic routines
- used the file/line information embedded in the tree nodes rather
- than globals. */
- if (cfun && EXPR_HAS_LOCATION (exp))
- {
- location_t saved_location = input_location;
- location_t saved_curr_loc = get_curr_insn_source_location ();
- tree saved_block = get_curr_insn_block ();
- input_location = EXPR_LOCATION (exp);
- set_curr_insn_source_location (input_location);
-
- /* Record where the insns produced belong. */
- set_curr_insn_block (TREE_BLOCK (exp));
-
- ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl);
-
- input_location = saved_location;
- set_curr_insn_block (saved_block);
- set_curr_insn_source_location (saved_curr_loc);
- }
- else
- {
- ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl);
- }
-
+ ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl);
return ret;
}
tree type = TREE_TYPE (treeop1);
int unsignedp = TYPE_UNSIGNED (type);
enum machine_mode mode = TYPE_MODE (type);
-
- temp = assign_temp (type, 0, 1);
+ enum machine_mode orig_mode = mode;
/* If we cannot do a conditional move on the mode, try doing it
with the promoted mode. */
if (!can_conditionally_move_p (mode))
- mode = promote_mode (type, mode, &unsignedp);
-
- if (!can_conditionally_move_p (mode))
- return NULL_RTX;
+ {
+ mode = promote_mode (type, mode, &unsignedp);
+ if (!can_conditionally_move_p (mode))
+ return NULL_RTX;
+ temp = assign_temp (type, 0, 0); /* Use promoted mode for temp. */
+ }
+ else
+ temp = assign_temp (type, 0, 1);
start_sequence ();
expand_operands (treeop1, treeop2,
rtx seq = get_insns ();
end_sequence ();
emit_insn (seq);
- return temp;
+ return convert_modes (orig_mode, mode, temp, 0);
}
/* Otherwise discard the sequence and fall back to code with
(treeop0))
* BITS_PER_UNIT),
(HOST_WIDE_INT) GET_MODE_BITSIZE (mode)),
- 0, 0, 0, TYPE_MODE (valtype), treeop0,
- type, 0, false);
+ 0, 0, 0, TYPE_MODE (valtype), treeop0, 0, false);
}
/* Return the entire union. */
else
expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
EXPAND_NORMAL);
+ /* op0 and op1 might still be constant, despite the above
+ != INTEGER_CST check. Handle it. */
+ if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
+ {
+ op0 = convert_modes (innermode, mode, op0, true);
+ op1 = convert_modes (innermode, mode, op1, false);
+ return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
+ target, unsignedp));
+ }
goto binop3;
}
}
{
expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
EXPAND_NORMAL);
+ /* op0 and op1 might still be constant, despite the above
+ != INTEGER_CST check. Handle it. */
+ if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
+ {
+ widen_mult_const:
+ op0 = convert_modes (innermode, mode, op0, zextend_p);
+ op1
+ = convert_modes (innermode, mode, op1,
+ TYPE_UNSIGNED (TREE_TYPE (treeop1)));
+ return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
+ target,
+ unsignedp));
+ }
temp = expand_widening_mult (mode, op0, op1, target,
unsignedp, this_optab);
return REDUCE_BIT_FIELD (temp);
op0 = expand_normal (treeop0);
if (TREE_CODE (treeop1) == INTEGER_CST)
op1 = convert_modes (innermode, mode,
- expand_normal (treeop1), unsignedp);
+ expand_normal (treeop1),
+ TYPE_UNSIGNED (TREE_TYPE (treeop1)));
else
op1 = expand_normal (treeop1);
+ /* op0 and op1 might still be constant, despite the above
+ != INTEGER_CST check. Handle it. */
+ if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
+ goto widen_mult_const;
temp = expand_binop (mode, other_optab, op0, op1, target,
unsignedp, OPTAB_LIB_WIDEN);
hipart = gen_highpart (innermode, temp);
case RDIV_EXPR:
goto binop;
+ case MULT_HIGHPART_EXPR:
+ expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
+ temp = expand_mult_highpart (mode, op0, op1, target, unsignedp);
+ gcc_assert (temp);
+ return temp;
+
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR:
case CEIL_MOD_EXPR:
if (reduce_bit_field && TYPE_UNSIGNED (type))
temp = expand_binop (mode, xor_optab, op0,
immed_double_int_const
- (double_int_mask (TYPE_PRECISION (type)), mode),
+ (double_int::mask (TYPE_PRECISION (type)), mode),
target, 1, OPTAB_LIB_WIDEN);
else
temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
if (!target)
target = gen_reg_rtx (TYPE_MODE (type));
+ else
+ /* If target overlaps with op1, then either we need to force
+ op1 into a pseudo (if target also overlaps with op0),
+ or write the complex parts in reverse order. */
+ switch (GET_CODE (target))
+ {
+ case CONCAT:
+ if (reg_overlap_mentioned_p (XEXP (target, 0), op1))
+ {
+ if (reg_overlap_mentioned_p (XEXP (target, 1), op0))
+ {
+ complex_expr_force_op1:
+ temp = gen_reg_rtx (GET_MODE_INNER (GET_MODE (target)));
+ emit_move_insn (temp, op1);
+ op1 = temp;
+ break;
+ }
+ complex_expr_swap_order:
+ /* Move the imaginary (op1) and real (op0) parts to their
+ location. */
+ write_complex_part (target, op1, true);
+ write_complex_part (target, op0, false);
+
+ return target;
+ }
+ break;
+ case MEM:
+ temp = adjust_address_nv (target,
+ GET_MODE_INNER (GET_MODE (target)), 0);
+ if (reg_overlap_mentioned_p (temp, op1))
+ {
+ enum machine_mode imode = GET_MODE_INNER (GET_MODE (target));
+ temp = adjust_address_nv (target, imode,
+ GET_MODE_SIZE (imode));
+ if (reg_overlap_mentioned_p (temp, op0))
+ goto complex_expr_force_op1;
+ goto complex_expr_swap_order;
+ }
+ break;
+ default:
+ if (reg_overlap_mentioned_p (target, op1))
+ {
+ if (reg_overlap_mentioned_p (target, op0))
+ goto complex_expr_force_op1;
+ goto complex_expr_swap_order;
+ }
+ break;
+ }
/* Move the real (op0) and imaginary (op1) parts to their location. */
write_complex_part (target, op0, false);
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
- {
- tree oprnd0 = treeop0;
- tree oprnd1 = treeop1;
-
- expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- target = expand_widen_pattern_expr (ops, op0, op1, NULL_RTX,
- target, unsignedp);
- gcc_assert (target);
- return target;
- }
-
+ case VEC_WIDEN_MULT_EVEN_EXPR:
+ case VEC_WIDEN_MULT_ODD_EXPR:
case VEC_WIDEN_LSHIFT_HI_EXPR:
case VEC_WIDEN_LSHIFT_LO_EXPR:
- {
- tree oprnd0 = treeop0;
- tree oprnd1 = treeop1;
-
- expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- target = expand_widen_pattern_expr (ops, op0, op1, NULL_RTX,
- target, unsignedp);
- gcc_assert (target);
- return target;
- }
+ expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ target = expand_widen_pattern_expr (ops, op0, op1, NULL_RTX,
+ target, unsignedp);
+ gcc_assert (target);
+ return target;
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_SAT_EXPR:
}
#undef REDUCE_BIT_FIELD
+
+/* Return TRUE if expression STMT is suitable for replacement.
+ Never consider memory loads as replaceable, because those don't ever lead
+ into constant expressions. */
+
+static bool
+stmt_is_replaceable_p (gimple stmt)
+{
+ if (ssa_is_replaceable_p (stmt))
+ {
+ /* Don't move around loads. */
+ if (!gimple_assign_single_p (stmt)
+ || is_gimple_val (gimple_assign_rhs1 (stmt)))
+ return true;
+ }
+ return false;
+}
+
rtx
expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
enum expand_modifier modifier, rtx *alt_rtl)
}
if (TREE_CODE_CLASS (code) == tcc_unary
- || code == COMPONENT_REF || code == INDIRECT_REF)
+ || code == BIT_FIELD_REF
+ || code == COMPONENT_REF
+ || code == INDIRECT_REF)
return expand_expr (treeop0, const0_rtx, VOIDmode,
modifier);
expand_expr (treeop1, const0_rtx, VOIDmode, modifier);
return const0_rtx;
}
- else if (code == BIT_FIELD_REF)
- {
- expand_expr (treeop0, const0_rtx, VOIDmode, modifier);
- expand_expr (treeop1, const0_rtx, VOIDmode, modifier);
- expand_expr (treeop2, const0_rtx, VOIDmode, modifier);
- return const0_rtx;
- }
target = 0;
}
base variable. This unnecessarily allocates a pseudo, see how we can
reuse it, if partition base vars have it set already. */
if (!currently_expanding_to_rtl)
- return expand_expr_real_1 (SSA_NAME_VAR (exp), target, tmode, modifier,
- NULL);
+ {
+ tree var = SSA_NAME_VAR (exp);
+ if (var && DECL_RTL_SET_P (var))
+ return DECL_RTL (var);
+ return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp)),
+ LAST_VIRTUAL_REGISTER + 1);
+ }
g = get_gimple_for_ssa_name (exp);
/* For EXPAND_INITIALIZER try harder to get something simpler. */
g = SSA_NAME_DEF_STMT (exp);
if (g)
{
- rtx r = expand_expr_real (gimple_assign_rhs_to_tree (g), target,
- tmode, modifier, NULL);
+ rtx r;
+ location_t saved_loc = curr_insn_location ();
+
+ set_curr_insn_location (gimple_location (g));
+ r = expand_expr_real (gimple_assign_rhs_to_tree (g), target,
+ tmode, modifier, NULL);
+ set_curr_insn_location (saved_loc);
if (REG_P (r) && !REG_EXPR (r))
set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp), r);
return r;
/* Variables inherited from containing functions should have
been lowered by this point. */
context = decl_function_context (exp);
- gcc_assert (!context
+ gcc_assert (SCOPE_FILE_SCOPE_P (context)
|| context == current_function_decl
|| TREE_STATIC (exp)
|| DECL_EXTERNAL (exp)
}
if (!tmp)
{
- VEC(constructor_elt,gc) *v;
+ vec<constructor_elt, va_gc> *v;
unsigned i;
- v = VEC_alloc (constructor_elt, gc, VECTOR_CST_NELTS (exp));
+ vec_alloc (v, VECTOR_CST_NELTS (exp));
for (i = 0; i < VECTOR_CST_NELTS (exp); ++i)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
tmp = build_constructor (type, v);
with non-BLKmode values. */
gcc_assert (GET_MODE (ret) != BLKmode);
- val = build_decl (EXPR_LOCATION (exp),
+ val = build_decl (curr_insn_location (),
VAR_DECL, NULL, TREE_TYPE (exp));
DECL_ARTIFICIAL (val) = 1;
DECL_IGNORED_P (val) = 1;
temp = gen_rtx_MEM (mode, op0);
set_mem_attributes (temp, exp, 0);
set_mem_addr_space (temp, as);
- align = get_object_or_type_alignment (exp);
+ align = get_object_alignment (exp);
if (modifier != EXPAND_WRITE
+ && modifier != EXPAND_MEMORY
&& mode != BLKmode
&& align < GET_MODE_ALIGNMENT (mode)
/* If the target does not have special handling for unaligned
create_output_operand (&ops[0], NULL_RTX, mode);
create_fixed_operand (&ops[1], temp);
expand_insn (icode, 2, ops);
- return ops[0].value;
+ temp = ops[0].value;
}
return temp;
}
if (mem_ref_refers_to_non_mem_p (exp))
{
HOST_WIDE_INT offset = mem_ref_offset (exp).low;
- tree bit_offset;
- tree bftype;
base = TREE_OPERAND (base, 0);
if (offset == 0
- && host_integerp (TYPE_SIZE (TREE_TYPE (exp)), 1)
+ && host_integerp (TYPE_SIZE (type), 1)
&& (GET_MODE_BITSIZE (DECL_MODE (base))
- == TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp)))))
- return expand_expr (build1 (VIEW_CONVERT_EXPR,
- TREE_TYPE (exp), base),
+ == TREE_INT_CST_LOW (TYPE_SIZE (type))))
+ return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
target, tmode, modifier);
- bit_offset = bitsize_int (offset * BITS_PER_UNIT);
- bftype = TREE_TYPE (base);
- if (TYPE_MODE (TREE_TYPE (exp)) != BLKmode)
- bftype = TREE_TYPE (exp);
- else
+ if (TYPE_MODE (type) == BLKmode)
{
temp = assign_stack_temp (DECL_MODE (base),
GET_MODE_SIZE (DECL_MODE (base)));
store_expr (base, temp, 0, false);
temp = adjust_address (temp, BLKmode, offset);
- set_mem_size (temp, int_size_in_bytes (TREE_TYPE (exp)));
+ set_mem_size (temp, int_size_in_bytes (type));
return temp;
}
- return expand_expr (build3 (BIT_FIELD_REF, bftype,
- base,
- TYPE_SIZE (TREE_TYPE (exp)),
- bit_offset),
- target, tmode, modifier);
+ exp = build3 (BIT_FIELD_REF, type, base, TYPE_SIZE (type),
+ bitsize_int (offset * BITS_PER_UNIT));
+ return expand_expr (exp, target, tmode, modifier);
}
address_mode = targetm.addr_space.address_mode (as);
base = TREE_OPERAND (exp, 0);
gimple_assign_rhs1 (def_stmt), mask);
TREE_OPERAND (exp, 0) = base;
}
- align = get_object_or_type_alignment (exp);
+ align = get_object_alignment (exp);
op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
op0 = memory_address_addr_space (address_mode, op0, as);
if (!integer_zerop (TREE_OPERAND (exp, 1)))
if (TREE_THIS_VOLATILE (exp))
MEM_VOLATILE_P (temp) = 1;
if (modifier != EXPAND_WRITE
+ && modifier != EXPAND_MEMORY
&& mode != BLKmode
&& align < GET_MODE_ALIGNMENT (mode))
{
create_output_operand (&ops[0], NULL_RTX, mode);
create_fixed_operand (&ops[1], temp);
expand_insn (icode, 2, ops);
- return ops[0].value;
+ temp = ops[0].value;
}
else if (SLOW_UNALIGNED_ACCESS (mode, align))
temp = extract_bit_field (temp, GET_MODE_BITSIZE (mode),
{
tree array = treeop0;
tree index = treeop1;
+ tree init;
/* Fold an expression like: "foo"[2].
This is not done in fold so it won't happen inside &.
&& modifier != EXPAND_INITIALIZER
&& modifier != EXPAND_MEMORY
&& TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
- && TREE_CODE (array) == VAR_DECL && DECL_INITIAL (array)
- && TREE_CODE (DECL_INITIAL (array)) != ERROR_MARK
- && const_value_known_p (array))
+ && TREE_CODE (index) == INTEGER_CST
+ && (TREE_CODE (array) == VAR_DECL
+ || TREE_CODE (array) == CONST_DECL)
+ && (init = ctor_for_folding (array)) != error_mark_node)
{
- if (TREE_CODE (index) == INTEGER_CST)
+ if (TREE_CODE (init) == CONSTRUCTOR)
{
- tree init = DECL_INITIAL (array);
-
- if (TREE_CODE (init) == CONSTRUCTOR)
- {
- unsigned HOST_WIDE_INT ix;
- tree field, value;
+ unsigned HOST_WIDE_INT ix;
+ tree field, value;
+
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
+ field, value)
+ if (tree_int_cst_equal (field, index))
+ {
+ if (TREE_SIDE_EFFECTS (value))
+ break;
- FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
- field, value)
- if (tree_int_cst_equal (field, index))
+ if (TREE_CODE (value) == CONSTRUCTOR)
{
- if (TREE_SIDE_EFFECTS (value))
+ /* If VALUE is a CONSTRUCTOR, this
+ optimization is only useful if
+ this doesn't store the CONSTRUCTOR
+ into memory. If it does, it is more
+ efficient to just load the data from
+ the array directly. */
+ rtx ret = expand_constructor (value, target,
+ modifier, true);
+ if (ret == NULL_RTX)
break;
-
- if (TREE_CODE (value) == CONSTRUCTOR)
- {
- /* If VALUE is a CONSTRUCTOR, this
- optimization is only useful if
- this doesn't store the CONSTRUCTOR
- into memory. If it does, it is more
- efficient to just load the data from
- the array directly. */
- rtx ret = expand_constructor (value, target,
- modifier, true);
- if (ret == NULL_RTX)
- break;
- }
-
- return expand_expr (fold (value), target, tmode,
- modifier);
}
- }
- else if(TREE_CODE (init) == STRING_CST)
- {
- tree index1 = index;
- tree low_bound = array_ref_low_bound (exp);
- index1 = fold_convert_loc (loc, sizetype,
- treeop1);
- /* Optimize the special-case of a zero lower bound.
-
- We convert the low_bound to sizetype to avoid some problems
- with constant folding. (E.g. suppose the lower bound is 1,
- and its mode is QI. Without the conversion,l (ARRAY
- +(INDEX-(unsigned char)1)) becomes ((ARRAY+(-(unsigned char)1))
- +INDEX), which becomes (ARRAY+255+INDEX). Opps!) */
-
- if (! integer_zerop (low_bound))
- index1 = size_diffop_loc (loc, index1,
+ return
+ expand_expr (fold (value), target, tmode, modifier);
+ }
+ }
+ else if (TREE_CODE (init) == STRING_CST)
+ {
+ tree low_bound = array_ref_low_bound (exp);
+ tree index1 = fold_convert_loc (loc, sizetype, treeop1);
+
+ /* Optimize the special case of a zero lower bound.
+
+ We convert the lower bound to sizetype to avoid problems
+ with constant folding. E.g. suppose the lower bound is
+ 1 and its mode is QI. Without the conversion
+ (ARRAY + (INDEX - (unsigned char)1))
+ becomes
+ (ARRAY + (-(unsigned char)1) + INDEX)
+ which becomes
+ (ARRAY + 255 + INDEX). Oops! */
+ if (!integer_zerop (low_bound))
+ index1 = size_diffop_loc (loc, index1,
fold_convert_loc (loc, sizetype,
low_bound));
- if (0 > compare_tree_int (index1,
- TREE_STRING_LENGTH (init)))
- {
- tree type = TREE_TYPE (TREE_TYPE (init));
- enum machine_mode mode = TYPE_MODE (type);
-
- if (GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_SIZE (mode) == 1)
- return gen_int_mode (TREE_STRING_POINTER (init)
- [TREE_INT_CST_LOW (index1)],
- mode);
- }
+ if (compare_tree_int (index1, TREE_STRING_LENGTH (init)) < 0)
+ {
+ tree type = TREE_TYPE (TREE_TYPE (init));
+ enum machine_mode mode = TYPE_MODE (type);
+
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == 1)
+ return gen_int_mode (TREE_STRING_POINTER (init)
+ [TREE_INT_CST_LOW (index1)],
+ mode);
}
}
}
if (TYPE_UNSIGNED (TREE_TYPE (field)))
{
- op1 = GEN_INT (((HOST_WIDE_INT) 1 << bitsize) - 1);
+ op1 = gen_int_mode (((HOST_WIDE_INT) 1 << bitsize) - 1,
+ imode);
op0 = expand_and (imode, op0, op1, target);
}
else
orig_op0 = op0
= expand_expr (tem,
(TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
+ && COMPLETE_TYPE_P (TREE_TYPE (tem))
&& (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
!= INTEGER_CST)
&& modifier != EXPAND_STACK_PARM
? target : NULL_RTX),
VOIDmode,
- (modifier == EXPAND_INITIALIZER
- || modifier == EXPAND_CONST_ADDRESS
- || modifier == EXPAND_STACK_PARM)
- ? modifier : EXPAND_NORMAL);
-
+ modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
/* If the bitfield is volatile, we want to access it in the
field's mode, not the computed mode.
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
&& modifier != EXPAND_CONST_ADDRESS
- && modifier != EXPAND_INITIALIZER)
+ && modifier != EXPAND_INITIALIZER
+ && modifier != EXPAND_MEMORY)
/* If the field is volatile, we always want an aligned
access. Do this in following two situations:
1. the access is not already naturally
|| (MEM_P (op0)
&& (MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
|| (bitpos % GET_MODE_ALIGNMENT (mode1) != 0))))
+ && modifier != EXPAND_MEMORY
&& ((modifier == EXPAND_CONST_ADDRESS
|| modifier == EXPAND_INITIALIZER)
? STRICT_ALIGNMENT
|| modifier == EXPAND_CONST_ADDRESS
|| modifier == EXPAND_INITIALIZER)
return op0;
- else if (target == 0)
+
+ if (target == 0)
target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
convert_move (target, op0, unsignedp);
/* If we are converting to BLKmode, try to avoid an intermediate
temporary by fetching an inner memory reference. */
if (mode == BLKmode
- && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
&& TYPE_MODE (TREE_TYPE (treeop0)) != BLKmode
&& handled_component_p (treeop0))
{
if (!offset
&& (bitpos % BITS_PER_UNIT) == 0
&& bitsize >= 0
- && compare_tree_int (TYPE_SIZE (TREE_TYPE (exp)), bitsize) == 0)
+ && compare_tree_int (TYPE_SIZE (type), bitsize) == 0)
{
/* See the normal_inner_ref case for the rationale. */
orig_op0
&& modifier != EXPAND_STACK_PARM
? target : NULL_RTX),
VOIDmode,
- (modifier == EXPAND_INITIALIZER
- || modifier == EXPAND_CONST_ADDRESS
- || modifier == EXPAND_STACK_PARM)
- ? modifier : EXPAND_NORMAL);
+ modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
if (MEM_P (orig_op0))
{
}
if (!op0)
- op0 = expand_expr (treeop0,
- NULL_RTX, VOIDmode, modifier);
+ op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
/* If the input and output modes are both the same, we are done. */
if (mode == GET_MODE (op0))
{
enum insn_code icode;
- op0 = copy_rtx (op0);
-
if (TYPE_ALIGN_OK (type))
- set_mem_align (op0, MAX (MEM_ALIGN (op0), TYPE_ALIGN (type)));
+ {
+ /* ??? Copying the MEM without substantially changing it might
+ run afoul of the code handling volatile memory references in
+ store_expr, which assumes that TARGET is returned unmodified
+ if it has been used. */
+ op0 = copy_rtx (op0);
+ set_mem_align (op0, MAX (MEM_ALIGN (op0), TYPE_ALIGN (type)));
+ }
else if (mode != BLKmode
&& MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode)
/* If the target does have special handling for unaligned
value ? label : 0,
value ? 0 : label, -1);
expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
- MOVE_NONTEMPORAL (exp));
+ false);
do_pending_stack_adjust ();
emit_label (label);
return const0_rtx;
}
- expand_assignment (lhs, rhs, MOVE_NONTEMPORAL (exp));
+ expand_assignment (lhs, rhs, false);
return const0_rtx;
}
}
else if (TYPE_UNSIGNED (type))
{
- rtx mask = immed_double_int_const (double_int_mask (prec),
+ rtx mask = immed_double_int_const (double_int::mask (prec),
GET_MODE (exp));
return expand_and (GET_MODE (exp), exp, mask, target);
}
|| TREE_CODE (array) == CONST_DECL)
{
int length;
+ tree init = ctor_for_folding (array);
/* Variables initialized to string literals can be handled too. */
- if (!const_value_known_p (array)
- || !DECL_INITIAL (array)
- || TREE_CODE (DECL_INITIAL (array)) != STRING_CST)
+ if (init == error_mark_node
+ || !init
+ || TREE_CODE (init) != STRING_CST)
return 0;
/* Avoid const char foo[4] = "abcde"; */
if (DECL_SIZE_UNIT (array) == NULL_TREE
|| TREE_CODE (DECL_SIZE_UNIT (array)) != INTEGER_CST
- || (length = TREE_STRING_LENGTH (DECL_INITIAL (array))) <= 0
+ || (length = TREE_STRING_LENGTH (init)) <= 0
|| compare_tree_int (DECL_SIZE_UNIT (array), length) < 0)
return 0;
return 0;
*ptr_offset = offset;
- return DECL_INITIAL (array);
+ return init;
}
return 0;
return expand_vec_cond_expr (ops->type, ifexp, if_true, if_false, target);
}
- /* For vector typed comparisons emit code to generate the desired
- all-ones or all-zeros mask. Conveniently use the VEC_COND_EXPR
- expander for this. */
- if (TREE_CODE (ops->type) == VECTOR_TYPE)
- {
- tree ifexp = build2 (ops->code, ops->type, arg0, arg1);
- tree if_true = constant_boolean_node (true, ops->type);
- tree if_false = constant_boolean_node (false, ops->type);
- return expand_vec_cond_expr (ops->type, ifexp, if_true, if_false, target);
- }
-
/* Get the rtx comparison code to use. We know that EXP is a comparison
operation of some type. Some comparisons against 1 and -1 can be
converted to comparisons with zero. Do so here so that the tests
#endif
/* Attempt to generate a casesi instruction. Returns 1 if successful,
- 0 otherwise (i.e. if there is no casesi instruction). */
+ 0 otherwise (i.e. if there is no casesi instruction).
+
+ DEFAULT_PROBABILITY is the probability of jumping to the default
+ label. */
int
try_casesi (tree index_type, tree index_expr, tree minval, tree range,
- rtx table_label ATTRIBUTE_UNUSED, rtx default_label,
- rtx fallback_label ATTRIBUTE_UNUSED)
+ rtx table_label, rtx default_label, rtx fallback_label,
+ int default_probability)
{
struct expand_operand ops[5];
enum machine_mode index_mode = SImode;
index = expand_normal (index_expr);
if (default_label)
emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX,
- omode, 1, default_label);
+ omode, 1, default_label,
+ default_probability);
/* Now we can safely truncate. */
index = convert_to_mode (index_mode, index, 0);
}
TABLE_LABEL is a CODE_LABEL rtx for the table itself.
DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
- index value is out of range. */
+ index value is out of range.
+ DEFAULT_PROBABILITY is the probability of jumping to
+ the default label. */
static void
do_tablejump (rtx index, enum machine_mode mode, rtx range, rtx table_label,
- rtx default_label)
+ rtx default_label, int default_probability)
{
rtx temp, vector;
if (default_label)
emit_cmp_and_jump_insns (index, range, GTU, NULL_RTX, mode, 1,
- default_label);
+ default_label, default_probability);
+
/* If index is in range, it must fit in Pmode.
Convert to Pmode so we can index with it. */
GET_MODE_SIZE, because this indicates how large insns are. The other
uses should all be Pmode, because they are addresses. This code
could fail if addresses and insns are not the same size. */
- index = gen_rtx_PLUS (Pmode,
- gen_rtx_MULT (Pmode, index,
- GEN_INT (GET_MODE_SIZE (CASE_VECTOR_MODE))),
- gen_rtx_LABEL_REF (Pmode, table_label));
+ index = gen_rtx_PLUS
+ (Pmode,
+ gen_rtx_MULT (Pmode, index,
+ gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE), Pmode)),
+ gen_rtx_LABEL_REF (Pmode, table_label));
#ifdef PIC_CASE_VECTOR_ADDRESS
if (flag_pic)
index = PIC_CASE_VECTOR_ADDRESS (index);
int
try_tablejump (tree index_type, tree index_expr, tree minval, tree range,
- rtx table_label, rtx default_label)
+ rtx table_label, rtx default_label, int default_probability)
{
rtx index;
TYPE_MODE (TREE_TYPE (range)),
expand_normal (range),
TYPE_UNSIGNED (TREE_TYPE (range))),
- table_label, default_label);
+ table_label, default_label, default_probability);
return 1;
}
case UI_TARGET:
unwind_and_version = "_v0";
break;
+ case UI_SEH:
+ unwind_and_version = "_seh0";
+ break;
default:
gcc_unreachable ();
}