/* Convert tree expression to rtl instructions, for GNU compiler.
Copyright (C) 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
#include "tree-pass.h"
#include "tree-flow.h"
#include "target.h"
+#include "common/common-target.h"
#include "timevar.h"
#include "df.h"
#include "diagnostic.h"
#include "ssaexpand.h"
#include "target-globals.h"
+#include "params.h"
/* Decide whether a function's arguments should be processed
from first to last or from last to first.
HOST_WIDE_INT, enum machine_mode,
tree, tree, int, alias_set_type);
static void store_constructor (tree, rtx, int, HOST_WIDE_INT);
-static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode,
+static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
+ enum machine_mode,
tree, tree, alias_set_type, bool);
static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
PUT_MODE (mem, srcmode);
- if ((*insn_data[ic].operand[1].predicate) (mem, srcmode))
+ if (insn_operand_matches (ic, 1, mem))
float_extend_from_mem[mode][srcmode] = true;
}
}
TO here. */
if (GET_CODE (from) == SUBREG && SUBREG_PROMOTED_VAR_P (from)
- && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (from)))
- >= GET_MODE_SIZE (to_mode))
+ && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (from)))
+ >= GET_MODE_PRECISION (to_mode))
&& SUBREG_PROMOTED_UNSIGNED_P (from) == unsignedp)
from = gen_lowpart (to_mode, from), from_mode = to_mode;
/* Now both modes are integers. */
/* Handle expanding beyond a word. */
- if (GET_MODE_BITSIZE (from_mode) < GET_MODE_BITSIZE (to_mode)
- && GET_MODE_BITSIZE (to_mode) > BITS_PER_WORD)
+ if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)
+ && GET_MODE_PRECISION (to_mode) > BITS_PER_WORD)
{
rtx insns;
rtx lowpart;
return;
}
/* Next, try converting via full word. */
- else if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD
+ else if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD
&& ((code = can_extend_p (to_mode, word_mode, unsignedp))
!= CODE_FOR_nothing))
{
from = force_reg (from_mode, from);
/* Get a copy of FROM widened to a word, if necessary. */
- if (GET_MODE_BITSIZE (from_mode) < BITS_PER_WORD)
+ if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD)
lowpart_mode = word_mode;
else
lowpart_mode = from_mode;
}
/* Truncating multi-word to a word or less. */
- if (GET_MODE_BITSIZE (from_mode) > BITS_PER_WORD
- && GET_MODE_BITSIZE (to_mode) <= BITS_PER_WORD)
+ if (GET_MODE_PRECISION (from_mode) > BITS_PER_WORD
+ && GET_MODE_PRECISION (to_mode) <= BITS_PER_WORD)
{
if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
/* For truncation, usually we can just refer to FROM in a narrower mode. */
if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)
- && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode),
- GET_MODE_BITSIZE (from_mode)))
+ && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, from_mode))
{
if (!((MEM_P (from)
&& ! MEM_VOLATILE_P (from)
}
/* Handle extension. */
- if (GET_MODE_BITSIZE (to_mode) > GET_MODE_BITSIZE (from_mode))
+ if (GET_MODE_PRECISION (to_mode) > GET_MODE_PRECISION (from_mode))
{
/* Convert directly if that works. */
if ((code = can_extend_p (to_mode, from_mode, unsignedp))
{
enum machine_mode intermediate;
rtx tmp;
- tree shift_amount;
+ int shift_amount;
/* Search for a mode to convert via. */
for (intermediate = from_mode; intermediate != VOIDmode;
if (((can_extend_p (to_mode, intermediate, unsignedp)
!= CODE_FOR_nothing)
|| (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
- && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode),
- GET_MODE_BITSIZE (intermediate))))
+ && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, intermediate)))
&& (can_extend_p (intermediate, from_mode, unsignedp)
!= CODE_FOR_nothing))
{
/* No suitable intermediate mode.
Generate what we need with shifts. */
- shift_amount = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (to_mode)
- - GET_MODE_BITSIZE (from_mode));
+ shift_amount = (GET_MODE_PRECISION (to_mode)
+ - GET_MODE_PRECISION (from_mode));
from = gen_lowpart (to_mode, force_reg (from_mode, from));
tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
to, unsignedp);
??? Code above formerly short-circuited this, for most integer
mode pairs, with a force_reg in from_mode followed by a recursive
call to this routine. Appears always to have been wrong. */
- if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode))
+ if (GET_MODE_PRECISION (to_mode) < GET_MODE_PRECISION (from_mode))
{
rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from));
emit_move_insn (to, temp);
wider than HOST_BITS_PER_WIDE_INT, we must be narrowing the operand. */
if ((CONST_INT_P (x)
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT)
|| (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_CLASS (oldmode) == MODE_INT
&& (GET_CODE (x) == CONST_DOUBLE
- || (GET_MODE_SIZE (mode) <= GET_MODE_SIZE (oldmode)
+ || (GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (oldmode)
&& ((MEM_P (x) && ! MEM_VOLATILE_P (x)
&& direct_load[(int) mode])
|| (REG_P (x)
&& (! HARD_REGISTER_P (x)
|| HARD_REGNO_MODE_OK (REGNO (x), mode))
- && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
- GET_MODE_BITSIZE (GET_MODE (x)))))))))
+ && TRULY_NOOP_TRUNCATION_MODES_P (mode,
+ GET_MODE (x))))))))
{
/* ?? If we don't know OLDMODE, we have to assume here that
X does not need sign- or zero-extension. This may not be
the case, but it's the best we can do. */
if (CONST_INT_P (x) && oldmode != VOIDmode
- && GET_MODE_SIZE (mode) > GET_MODE_SIZE (oldmode))
+ && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (oldmode))
{
HOST_WIDE_INT val = INTVAL (x);
- int width = GET_MODE_BITSIZE (oldmode);
/* We must sign or zero-extend in this case. Start by
zero-extending, then sign extend if we need to. */
- val &= ((HOST_WIDE_INT) 1 << width) - 1;
+ val &= GET_MODE_MASK (oldmode);
if (! unsignedp
- && (val & ((HOST_WIDE_INT) 1 << (width - 1))))
- val |= (HOST_WIDE_INT) (-1) << width;
+ && val_signbit_known_set_p (oldmode, val))
+ val |= ~GET_MODE_MASK (oldmode);
return gen_int_mode (val, mode);
}
return temp;
}
\f
+/* Return the largest alignment we can use for doing a move (or store)
+ of MAX_PIECES. ALIGN is the largest alignment we could use. */
+
+static unsigned int
+alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
+{
+ enum machine_mode tmode;
+
+ tmode = mode_for_size (max_pieces * BITS_PER_UNIT, MODE_INT, 1);
+ if (align >= GET_MODE_ALIGNMENT (tmode))
+ align = GET_MODE_ALIGNMENT (tmode);
+ else
+ {
+ enum machine_mode tmode, xmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
+ tmode != VOIDmode;
+ xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) > max_pieces
+ || SLOW_UNALIGNED_ACCESS (tmode, align))
+ break;
+
+ align = MAX (align, GET_MODE_ALIGNMENT (xmode));
+ }
+
+ return align;
+}
+
+/* Return the widest integer mode no wider than SIZE. If no such mode
+ can be found, return VOIDmode. */
+
+static enum machine_mode
+widest_int_mode_for_size (unsigned int size)
+{
+ enum machine_mode tmode, mode = VOIDmode;
+
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) < size)
+ mode = tmode;
+
+ return mode;
+}
+
/* STORE_MAX_PIECES is the number of bytes at a time that we can
store efficiently. Due to internal GCC limitations, this is
MOVE_MAX_PIECES limited by the number of bytes GCC can represent
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (from));
rtx to_addr, from_addr = XEXP (from, 0);
unsigned int max_size = MOVE_MAX_PIECES + 1;
- enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
align = MIN (to ? MEM_ALIGN (to) : align, MEM_ALIGN (from));
if (!(data.autinc_from && data.autinc_to)
&& move_by_pieces_ninsns (len, align, max_size) > 2)
{
- /* Find the mode of the largest move... */
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ /* Find the mode of the largest move...
+ MODE might not be used depending on the definitions of the
+ USE_* macros below. */
+ enum machine_mode mode ATTRIBUTE_UNUSED
+ = widest_int_mode_for_size (max_size);
if (USE_LOAD_PRE_DECREMENT (mode) && data.reverse && ! data.autinc_from)
{
data.to_addr = copy_to_mode_reg (to_addr_mode, to_addr);
}
- tmode = mode_for_size (MOVE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > MOVE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
/* First move what we can in the largest integer mode, then go to
successively smaller modes. */
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ enum machine_mode mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
unsigned int max_size)
{
unsigned HOST_WIDE_INT n_insns = 0;
- enum machine_mode tmode;
-
- tmode = mode_for_size (MOVE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode tmode, xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > MOVE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
while (max_size > 1)
{
- enum machine_mode mode = VOIDmode;
+ enum machine_mode mode;
enum insn_code icode;
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
{
x = shallow_copy_rtx (x);
y = shallow_copy_rtx (y);
- set_mem_size (x, size);
- set_mem_size (y, size);
+ set_mem_size (x, INTVAL (size));
+ set_mem_size (y, INTVAL (size));
}
if (CONST_INT_P (size) && MOVE_BY_PIECES_P (INTVAL (size), align))
else if (may_use_call
&& ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
&& ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y)))
- retval = emit_block_move_via_libcall (x, y, size,
- method == BLOCK_OP_TAILCALL);
+ {
+ /* Since x and y are passed to a libcall, mark the corresponding
+ tree EXPR as addressable. */
+ tree y_expr = MEM_EXPR (y);
+ tree x_expr = MEM_EXPR (x);
+ if (y_expr)
+ mark_addressable (y_expr);
+ if (x_expr)
+ mark_addressable (x_expr);
+ retval = emit_block_move_via_libcall (x, y, size,
+ method == BLOCK_OP_TAILCALL);
+ }
+
else
emit_block_move_via_loop (x, y, size, align);
/* If any argument goes in memory, then it might clobber an outgoing
argument. */
{
- CUMULATIVE_ARGS args_so_far;
+ CUMULATIVE_ARGS args_so_far_v;
+ cumulative_args_t args_so_far;
tree fn, arg;
fn = emit_block_move_libcall_fn (false);
- INIT_CUMULATIVE_ARGS (args_so_far, TREE_TYPE (fn), NULL_RTX, 0, 3);
+ INIT_CUMULATIVE_ARGS (args_so_far_v, TREE_TYPE (fn), NULL_RTX, 0, 3);
+ args_so_far = pack_cumulative_args (&args_so_far_v);
arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
{
enum machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
- rtx tmp = targetm.calls.function_arg (&args_so_far, mode,
+ rtx tmp = targetm.calls.function_arg (args_so_far, mode,
NULL_TREE, true);
if (!tmp || !REG_P (tmp))
return false;
- if (targetm.calls.arg_partial_bytes (&args_so_far, mode, NULL, 1))
+ if (targetm.calls.arg_partial_bytes (args_so_far, mode, NULL, 1))
return false;
- targetm.calls.function_arg_advance (&args_so_far, mode,
+ targetm.calls.function_arg_advance (args_so_far, mode,
NULL_TREE, true);
}
}
emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
unsigned int expected_align, HOST_WIDE_INT expected_size)
{
- rtx opalign = GEN_INT (align / BITS_PER_UNIT);
int save_volatile_ok = volatile_ok;
enum machine_mode mode;
mode = GET_MODE_WIDER_MODE (mode))
{
enum insn_code code = direct_optab_handler (movmem_optab, mode);
- insn_operand_predicate_fn pred;
if (code != CODE_FOR_nothing
/* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
&& ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
- || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
- && ((pred = insn_data[(int) code].operand[0].predicate) == 0
- || (*pred) (x, BLKmode))
- && ((pred = insn_data[(int) code].operand[1].predicate) == 0
- || (*pred) (y, BLKmode))
- && ((pred = insn_data[(int) code].operand[3].predicate) == 0
- || (*pred) (opalign, VOIDmode)))
- {
- rtx op2;
- rtx last = get_last_insn ();
- rtx pat;
-
- op2 = convert_to_mode (mode, size, 1);
- pred = insn_data[(int) code].operand[2].predicate;
- if (pred != 0 && ! (*pred) (op2, mode))
- op2 = copy_to_mode_reg (mode, op2);
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD))
+ {
+ struct expand_operand ops[6];
+ unsigned int nops;
/* ??? When called via emit_block_move_for_call, it'd be
nice if there were some way to inform the backend, so
that it doesn't fail the expansion because it thinks
emitting the libcall would be more efficient. */
-
- if (insn_data[(int) code].n_operands == 4)
- pat = GEN_FCN ((int) code) (x, y, op2, opalign);
- else
- pat = GEN_FCN ((int) code) (x, y, op2, opalign,
- GEN_INT (expected_align
- / BITS_PER_UNIT),
- GEN_INT (expected_size));
- if (pat)
+ nops = insn_data[(int) code].n_generator_args;
+ gcc_assert (nops == 4 || nops == 6);
+
+ create_fixed_operand (&ops[0], x);
+ create_fixed_operand (&ops[1], y);
+ /* The check above guarantees that this size conversion is valid. */
+ create_convert_operand_to (&ops[2], size, mode, true);
+ create_integer_operand (&ops[3], align / BITS_PER_UNIT);
+ if (nops == 6)
+ {
+ create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
+ create_integer_operand (&ops[5], expected_size);
+ }
+ if (maybe_expand_insn (code, nops, ops))
{
- emit_insn (pat);
volatile_ok = save_volatile_ok;
return true;
}
- else
- delete_insns_since (last);
}
}
if (nregs == 0)
return;
- if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
x = validize_mem (force_const_mem (mode, x));
/* See if the machine can do this with a load multiple insn. */
&& (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode))
tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
(bytepos % slen0) * BITS_PER_UNIT,
- 1, NULL_RTX, mode, mode);
+ 1, false, NULL_RTX, mode, mode);
}
else
{
mem = assign_stack_temp (GET_MODE (src), slen, 0);
emit_move_insn (mem, src);
tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
- 0, 1, NULL_RTX, mode, mode);
+ 0, 1, false, NULL_RTX, mode, mode);
}
}
/* FIXME: A SIMD parallel will eventually lead to a subreg of a
tmps[i] = src;
else
tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
- bytepos * BITS_PER_UNIT, 1, NULL_RTX,
+ bytepos * BITS_PER_UNIT, 1, false, NULL_RTX,
mode, mode);
if (shift)
tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
- build_int_cst (NULL_TREE, shift), tmps[i], 0);
+ shift, tmps[i], 0);
}
}
{
int shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
- build_int_cst (NULL_TREE, shift),
- tmps[i], 0);
+ shift, tmps[i], 0);
}
bytelen = adj_bytelen;
}
emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
else
store_bit_field (dest, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
- mode, tmps[i]);
+ 0, 0, mode, tmps[i]);
}
/* Copy from the pseudo into the (probable) hard reg. */
/* Use xbitpos for the source extraction (right justified) and
bitpos for the destination store (left justified). */
- store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, copy_mode,
+ store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, 0, 0, copy_mode,
extract_bit_field (src, bitsize,
- xbitpos % BITS_PER_WORD, 1,
+ xbitpos % BITS_PER_WORD, 1, false,
NULL_RTX, copy_mode, copy_mode));
}
unsigned HOST_WIDE_INT l;
unsigned int max_size;
HOST_WIDE_INT offset = 0;
- enum machine_mode mode, tmode;
+ enum machine_mode mode;
enum insn_code icode;
int reverse;
- rtx cst;
+ /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
+ rtx cst ATTRIBUTE_UNUSED;
if (len == 0)
return 1;
: STORE_BY_PIECES_P (len, align)))
return 0;
- tmode = mode_for_size (STORE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > STORE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
/* We would first store what we can in the largest integer mode, then go to
successively smaller modes. */
reverse++)
{
l = len;
- mode = VOIDmode;
max_size = STORE_MAX_PIECES + 1;
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
offset -= size;
cst = (*constfun) (constfundata, offset, mode);
- if (!LEGITIMATE_CONSTANT_P (cst))
+ if (!targetm.legitimate_constant_p (mode, cst))
return 0;
if (!reverse)
= targetm.addr_space.address_mode (MEM_ADDR_SPACE (data->to));
rtx to_addr = XEXP (data->to, 0);
unsigned int max_size = STORE_MAX_PIECES + 1;
- enum machine_mode mode = VOIDmode, tmode;
enum insn_code icode;
data->offset = 0;
if (!data->autinc_to
&& move_by_pieces_ninsns (data->len, align, max_size) > 2)
{
- /* Determine the main mode we'll be using. */
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ /* Determine the main mode we'll be using.
+ MODE might not be used depending on the definitions of the
+ USE_* macros below. */
+ enum machine_mode mode ATTRIBUTE_UNUSED
+ = widest_int_mode_for_size (max_size);
if (USE_STORE_PRE_DECREMENT (mode) && data->reverse && ! data->autinc_to)
{
data->to_addr = copy_to_mode_reg (to_addr_mode, to_addr);
}
- tmode = mode_for_size (STORE_MAX_PIECES * BITS_PER_UNIT, MODE_INT, 1);
- if (align >= GET_MODE_ALIGNMENT (tmode))
- align = GET_MODE_ALIGNMENT (tmode);
- else
- {
- enum machine_mode xmode;
-
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT), xmode = tmode;
- tmode != VOIDmode;
- xmode = tmode, tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) > STORE_MAX_PIECES
- || SLOW_UNALIGNED_ACCESS (tmode, align))
- break;
-
- align = MAX (align, GET_MODE_ALIGNMENT (xmode));
- }
+ align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
/* First store what we can in the largest integer mode, then go to
successively smaller modes. */
while (max_size > 1)
{
- for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
- tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
- if (GET_MODE_SIZE (tmode) < max_size)
- mode = tmode;
+ enum machine_mode mode = widest_int_mode_for_size (max_size);
if (mode == VOIDmode)
break;
including more than one in the machine description unless
the more limited one has some advantage. */
- rtx opalign = GEN_INT (align / BITS_PER_UNIT);
enum machine_mode mode;
if (expected_align < align)
mode = GET_MODE_WIDER_MODE (mode))
{
enum insn_code code = direct_optab_handler (setmem_optab, mode);
- insn_operand_predicate_fn pred;
if (code != CODE_FOR_nothing
/* We don't need MODE to be narrower than
&& ((CONST_INT_P (size)
&& ((unsigned HOST_WIDE_INT) INTVAL (size)
<= (GET_MODE_MASK (mode) >> 1)))
- || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD)
- && ((pred = insn_data[(int) code].operand[0].predicate) == 0
- || (*pred) (object, BLKmode))
- && ((pred = insn_data[(int) code].operand[3].predicate) == 0
- || (*pred) (opalign, VOIDmode)))
- {
- rtx opsize, opchar;
- enum machine_mode char_mode;
- rtx last = get_last_insn ();
- rtx pat;
-
- opsize = convert_to_mode (mode, size, 1);
- pred = insn_data[(int) code].operand[1].predicate;
- if (pred != 0 && ! (*pred) (opsize, mode))
- opsize = copy_to_mode_reg (mode, opsize);
-
- opchar = val;
- char_mode = insn_data[(int) code].operand[2].mode;
- if (char_mode != VOIDmode)
- {
- opchar = convert_to_mode (char_mode, opchar, 1);
- pred = insn_data[(int) code].operand[2].predicate;
- if (pred != 0 && ! (*pred) (opchar, char_mode))
- opchar = copy_to_mode_reg (char_mode, opchar);
- }
-
- if (insn_data[(int) code].n_operands == 4)
- pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign);
- else
- pat = GEN_FCN ((int) code) (object, opsize, opchar, opalign,
- GEN_INT (expected_align
- / BITS_PER_UNIT),
- GEN_INT (expected_size));
- if (pat)
+ || GET_MODE_BITSIZE (mode) >= BITS_PER_WORD))
+ {
+ struct expand_operand ops[6];
+ unsigned int nops;
+
+ nops = insn_data[(int) code].n_generator_args;
+ gcc_assert (nops == 4 || nops == 6);
+
+ create_fixed_operand (&ops[0], object);
+ /* The check above guarantees that this size conversion is valid. */
+ create_convert_operand_to (&ops[1], size, mode, true);
+ create_convert_operand_from (&ops[2], val, byte_mode, true);
+ create_integer_operand (&ops[3], align / BITS_PER_UNIT);
+ if (nops == 6)
{
- emit_insn (pat);
- return true;
+ create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
+ create_integer_operand (&ops[5], expected_size);
}
- else
- delete_insns_since (last);
+ if (maybe_expand_insn (code, nops, ops))
+ return true;
}
}
gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
}
- store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, imode, val);
+ store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, 0, 0, imode, val);
}
/* Extract one of the components of the complex value CPLX. Extract the
}
return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
- true, NULL_RTX, imode, imode);
+ true, false, NULL_RTX, imode, imode);
}
\f
/* A subroutine of emit_move_insn_1. Yet another lowpart generator.
y_cst = y;
- if (!LEGITIMATE_CONSTANT_P (y))
+ if (!targetm.legitimate_constant_p (mode, y))
{
y = force_const_mem (mode, y);
&& (set = single_set (last_insn)) != NULL_RTX
&& SET_DEST (set) == x
&& ! rtx_equal_p (y_cst, SET_SRC (set)))
- set_unique_reg_note (last_insn, REG_EQUAL, y_cst);
+ set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst));
return last_insn;
}
REAL_VALUE_FROM_CONST_DOUBLE (r, y);
- if (LEGITIMATE_CONSTANT_P (y))
+ if (targetm.legitimate_constant_p (dstmode, y))
oldcost = rtx_cost (y, SET, speed);
else
oldcost = rtx_cost (force_const_mem (dstmode, y), SET, speed);
trunc_y = CONST_DOUBLE_FROM_REAL_VALUE (r, srcmode);
- if (LEGITIMATE_CONSTANT_P (trunc_y))
+ if (targetm.legitimate_constant_p (srcmode, trunc_y))
{
/* Skip if the target needs extra instructions to perform
the extension. */
- if (! (*insn_data[ic].operand[1].predicate) (trunc_y, srcmode))
+ if (!insn_operand_matches (ic, 1, trunc_y))
continue;
/* This is valid, but may not be cheaper than the original. */
newcost = rtx_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y), SET, speed);
return memory_address (GET_CLASS_NARROWEST_MODE (MODE_INT), temp);
}
-#ifdef PUSH_ROUNDING
+/* A utility routine that returns the base of an auto-inc memory, or NULL. */
+
+static rtx
+mem_autoinc_base (rtx mem)
+{
+ if (MEM_P (mem))
+ {
+ rtx addr = XEXP (mem, 0);
+ if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
+ return XEXP (addr, 0);
+ }
+ return NULL;
+}
+
+/* A utility routine used here, in reload, and in try_split. The insns
+ after PREV up to and including LAST are known to adjust the stack,
+ with a final value of END_ARGS_SIZE. Iterate backward from LAST
+ placing notes as appropriate. PREV may be NULL, indicating the
+ entire insn sequence prior to LAST should be scanned.
+
+ The set of allowed stack pointer modifications is small:
+ (1) One or more auto-inc style memory references (aka pushes),
+ (2) One or more addition/subtraction with the SP as destination,
+ (3) A single move insn with the SP as destination,
+ (4) A call_pop insn.
+
+ Insns in the sequence that do not modify the SP are ignored.
+
+ The return value is the amount of adjustment that can be trivially
+ verified, via immediate operand or auto-inc. If the adjustment
+ cannot be trivially extracted, the return value is INT_MIN. */
+
+int
+fixup_args_size_notes (rtx prev, rtx last, int end_args_size)
+{
+ int args_size = end_args_size;
+ bool saw_unknown = false;
+ rtx insn;
+
+ for (insn = last; insn != prev; insn = PREV_INSN (insn))
+ {
+ rtx dest, set, pat;
+ HOST_WIDE_INT this_delta = 0;
+ int i;
+
+ if (!NONDEBUG_INSN_P (insn))
+ continue;
+ pat = PATTERN (insn);
+ set = NULL;
+
+ /* Look for a call_pop pattern. */
+ if (CALL_P (insn))
+ {
+ /* We're not supposed to see non-pop call patterns here. */
+ gcc_assert (GET_CODE (pat) == PARALLEL);
+
+ /* All call_pop have a stack pointer adjust in the parallel.
+ The call itself is always first, and the stack adjust is
+ usually last, so search from the end. */
+ for (i = XVECLEN (pat, 0) - 1; i > 0; --i)
+ {
+ set = XVECEXP (pat, 0, i);
+ if (GET_CODE (set) != SET)
+ continue;
+ dest = SET_DEST (set);
+ if (dest == stack_pointer_rtx)
+ break;
+ }
+ /* We'd better have found the stack pointer adjust. */
+ gcc_assert (i > 0);
+ /* Fall through to process the extracted SET and DEST
+ as if it was a standalone insn. */
+ }
+ else if (GET_CODE (pat) == SET)
+ set = pat;
+ else if ((set = single_set (insn)) != NULL)
+ ;
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ /* ??? Some older ports use a parallel with a stack adjust
+ and a store for a PUSH_ROUNDING pattern, rather than a
+ PRE/POST_MODIFY rtx. Don't force them to update yet... */
+ /* ??? See h8300 and m68k, pushqi1. */
+ for (i = XVECLEN (pat, 0) - 1; i >= 0; --i)
+ {
+ set = XVECEXP (pat, 0, i);
+ if (GET_CODE (set) != SET)
+ continue;
+ dest = SET_DEST (set);
+ if (dest == stack_pointer_rtx)
+ break;
+
+ /* We do not expect an auto-inc of the sp in the parallel. */
+ gcc_checking_assert (mem_autoinc_base (dest)
+ != stack_pointer_rtx);
+ gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
+ != stack_pointer_rtx);
+ }
+ if (i < 0)
+ continue;
+ }
+ else
+ continue;
+ dest = SET_DEST (set);
+
+ /* Look for direct modifications of the stack pointer. */
+ if (dest == stack_pointer_rtx)
+ {
+ gcc_assert (!saw_unknown);
+ /* Look for a trivial adjustment, otherwise assume nothing. */
+ if (GET_CODE (SET_SRC (set)) == PLUS
+ && XEXP (SET_SRC (set), 0) == stack_pointer_rtx
+ && CONST_INT_P (XEXP (SET_SRC (set), 1)))
+ this_delta = INTVAL (XEXP (SET_SRC (set), 1));
+ else
+ saw_unknown = true;
+ }
+ /* Otherwise only think about autoinc patterns. */
+ else if (mem_autoinc_base (dest) == stack_pointer_rtx)
+ {
+ rtx addr = XEXP (dest, 0);
+ gcc_assert (!saw_unknown);
+ switch (GET_CODE (addr))
+ {
+ case PRE_INC:
+ case POST_INC:
+ this_delta = GET_MODE_SIZE (GET_MODE (dest));
+ break;
+ case PRE_DEC:
+ case POST_DEC:
+ this_delta = -GET_MODE_SIZE (GET_MODE (dest));
+ break;
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ addr = XEXP (addr, 1);
+ gcc_assert (GET_CODE (addr) == PLUS);
+ gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
+ gcc_assert (CONST_INT_P (XEXP (addr, 1)));
+ this_delta = INTVAL (XEXP (addr, 1));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ continue;
+ add_reg_note (insn, REG_ARGS_SIZE, GEN_INT (args_size));
+#ifdef STACK_GROWS_DOWNWARD
+ this_delta = -this_delta;
+#endif
+ args_size -= this_delta;
+ }
+
+ return saw_unknown ? INT_MIN : args_size;
+}
+
+#ifdef PUSH_ROUNDING
/* Emit single push insn. */
static void
-emit_single_push_insn (enum machine_mode mode, rtx x, tree type)
+emit_single_push_insn_1 (enum machine_mode mode, rtx x, tree type)
{
rtx dest_addr;
unsigned rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
rtx dest;
enum insn_code icode;
- insn_operand_predicate_fn pred;
stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
/* If there is push pattern, use it. Otherwise try old way of throwing
icode = optab_handler (push_optab, mode);
if (icode != CODE_FOR_nothing)
{
- if (((pred = insn_data[(int) icode].operand[0].predicate)
- && !((*pred) (x, mode))))
- x = force_reg (mode, x);
- emit_insn (GEN_FCN (icode) (x));
- return;
+ struct expand_operand ops[1];
+
+ create_input_operand (&ops[0], x, mode);
+ if (maybe_expand_insn (icode, 1, ops))
+ return;
}
if (GET_MODE_SIZE (mode) == rounded_size)
dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
}
emit_move_insn (dest, x);
}
+
+/* Emit and annotate a single push insn. */
+
+static void
+emit_single_push_insn (enum machine_mode mode, rtx x, tree type)
+{
+ int delta, old_delta = stack_pointer_delta;
+ rtx prev = get_last_insn ();
+ rtx last;
+
+ emit_single_push_insn_1 (mode, x, type);
+
+ last = get_last_insn ();
+
+ /* Notice the common case where we emitted exactly one insn. */
+ if (PREV_INSN (last) == prev)
+ {
+ add_reg_note (last, REG_ARGS_SIZE, GEN_INT (stack_pointer_delta));
+ return;
+ }
+
+ delta = fixup_args_size_notes (prev, last, stack_pointer_delta);
+ gcc_assert (delta == INT_MIN || delta == old_delta);
+}
#endif
/* Generate code to push X onto the stack, assuming it has mode MODE and
|| align >= BIGGEST_ALIGNMENT
|| (PUSH_ROUNDING (align / BITS_PER_UNIT)
== (align / BITS_PER_UNIT)))
- && PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
+ && (HOST_WIDE_INT) PUSH_ROUNDING (INTVAL (size)) == INTVAL (size))
{
/* Push padding now if padding above and stack grows down,
or if padding below and stack grows up.
by setting SKIP to 0. */
skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
- if (CONSTANT_P (x) && ! LEGITIMATE_CONSTANT_P (x))
+ if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
x = validize_mem (force_const_mem (mode, x));
/* If X is a hard register in a non-integer mode, copy it into a pseudo;
static bool
optimize_bitfield_assignment_op (unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitpos,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end,
enum machine_mode mode1, rtx str_rtx,
tree to, tree src)
{
tree op0, op1;
rtx value, result;
optab binop;
+ gimple srcstmt;
+ enum tree_code code;
if (mode1 != VOIDmode
|| bitsize >= BITS_PER_WORD
return false;
STRIP_NOPS (src);
- if (!BINARY_CLASS_P (src)
- || TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
+ if (TREE_CODE (src) != SSA_NAME)
+ return false;
+ if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
+ return false;
+
+ srcstmt = get_gimple_for_ssa_name (src);
+ if (!srcstmt
+ || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt)) != tcc_binary)
return false;
- op0 = TREE_OPERAND (src, 0);
- op1 = TREE_OPERAND (src, 1);
- STRIP_NOPS (op0);
+ code = gimple_assign_rhs_code (srcstmt);
+
+ op0 = gimple_assign_rhs1 (srcstmt);
+
+ /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
+ to find its initialization. Hopefully the initialization will
+ be from a bitfield load. */
+ if (TREE_CODE (op0) == SSA_NAME)
+ {
+ gimple op0stmt = get_gimple_for_ssa_name (op0);
+
+ /* We want to eventually have OP0 be the same as TO, which
+ should be a bitfield. */
+ if (!op0stmt
+ || !is_gimple_assign (op0stmt)
+ || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to))
+ return false;
+ op0 = gimple_assign_rhs1 (op0stmt);
+ }
+
+ op1 = gimple_assign_rhs2 (srcstmt);
if (!operand_equal_p (to, op0, 0))
return false;
if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD)
str_mode = word_mode;
str_mode = get_best_mode (bitsize, bitpos,
+ bitregion_start, bitregion_end,
MEM_ALIGN (str_rtx), str_mode, 0);
if (str_mode == VOIDmode)
return false;
if (BYTES_BIG_ENDIAN)
bitpos = str_bitsize - bitpos - bitsize;
- switch (TREE_CODE (src))
+ switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
set_mem_expr (str_rtx, 0);
}
- binop = TREE_CODE (src) == PLUS_EXPR ? add_optab : sub_optab;
+ binop = code == PLUS_EXPR ? add_optab : sub_optab;
if (bitsize == 1 && bitpos + bitsize != str_bitsize)
{
value = expand_and (str_mode, value, const1_rtx, NULL);
binop = xor_optab;
}
value = expand_shift (LSHIFT_EXPR, str_mode, value,
- build_int_cst (NULL_TREE, bitpos),
- NULL_RTX, 1);
+ bitpos, NULL_RTX, 1);
result = expand_binop (str_mode, binop, str_rtx,
value, str_rtx, 1, OPTAB_WIDEN);
if (result != str_rtx)
set_mem_expr (str_rtx, 0);
}
- binop = TREE_CODE (src) == BIT_IOR_EXPR ? ior_optab : xor_optab;
+ binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
if (bitpos + bitsize != GET_MODE_BITSIZE (GET_MODE (str_rtx)))
{
rtx mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << bitsize)
NULL_RTX);
}
value = expand_shift (LSHIFT_EXPR, GET_MODE (str_rtx), value,
- build_int_cst (NULL_TREE, bitpos),
- NULL_RTX, 1);
+ bitpos, NULL_RTX, 1);
result = expand_binop (GET_MODE (str_rtx), binop, str_rtx,
value, str_rtx, 1, OPTAB_WIDEN);
if (result != str_rtx)
return false;
}
+/* In the C++ memory model, consecutive bit fields in a structure are
+ considered one memory location.
+
+ Given a COMPONENT_REF, this function returns the bit range of
+ consecutive bits in which this COMPONENT_REF belongs in. The
+ values are returned in *BITSTART and *BITEND. If either the C++
+ memory model is not activated, or this memory access is not thread
+ visible, 0 is returned in *BITSTART and *BITEND.
+
+ EXP is the COMPONENT_REF.
+ INNERDECL is the actual object being referenced.
+ BITPOS is the position in bits where the bit starts within the structure.
+ BITSIZE is size in bits of the field being referenced in EXP.
+
+ For example, while storing into FOO.A here...
+
+ struct {
+ BIT 0:
+ unsigned int a : 4;
+ unsigned int b : 1;
+ BIT 8:
+ unsigned char c;
+ unsigned int d : 6;
+ } foo;
+
+ ...we are not allowed to store past <b>, so for the layout above, a
+ range of 0..7 (because no one cares if we store into the
+ padding). */
+
+static void
+get_bit_range (unsigned HOST_WIDE_INT *bitstart,
+ unsigned HOST_WIDE_INT *bitend,
+ tree exp, tree innerdecl,
+ HOST_WIDE_INT bitpos, HOST_WIDE_INT bitsize)
+{
+ tree field, record_type, fld;
+ bool found_field = false;
+ bool prev_field_is_bitfield;
+
+ gcc_assert (TREE_CODE (exp) == COMPONENT_REF);
+
+ /* If other threads can't see this value, no need to restrict stores. */
+ if (ALLOW_STORE_DATA_RACES
+ || ((TREE_CODE (innerdecl) == MEM_REF
+ || TREE_CODE (innerdecl) == TARGET_MEM_REF)
+ && !ptr_deref_may_alias_global_p (TREE_OPERAND (innerdecl, 0)))
+ || (DECL_P (innerdecl)
+ && (DECL_THREAD_LOCAL_P (innerdecl)
+ || !TREE_STATIC (innerdecl))))
+ {
+ *bitstart = *bitend = 0;
+ return;
+ }
+
+ /* Bit field we're storing into. */
+ field = TREE_OPERAND (exp, 1);
+ record_type = DECL_FIELD_CONTEXT (field);
+
+ /* Count the contiguous bitfields for the memory location that
+ contains FIELD. */
+ *bitstart = 0;
+ prev_field_is_bitfield = true;
+ for (fld = TYPE_FIELDS (record_type); fld; fld = DECL_CHAIN (fld))
+ {
+ tree t, offset;
+ enum machine_mode mode;
+ int unsignedp, volatilep;
+
+ if (TREE_CODE (fld) != FIELD_DECL)
+ continue;
+
+ t = build3 (COMPONENT_REF, TREE_TYPE (exp),
+ unshare_expr (TREE_OPERAND (exp, 0)),
+ fld, NULL_TREE);
+ get_inner_reference (t, &bitsize, &bitpos, &offset,
+ &mode, &unsignedp, &volatilep, true);
+
+ if (field == fld)
+ found_field = true;
+
+ if (DECL_BIT_FIELD_TYPE (fld) && bitsize > 0)
+ {
+ if (prev_field_is_bitfield == false)
+ {
+ *bitstart = bitpos;
+ prev_field_is_bitfield = true;
+ }
+ }
+ else
+ {
+ prev_field_is_bitfield = false;
+ if (found_field)
+ break;
+ }
+ }
+ gcc_assert (found_field);
+
+ if (fld)
+ {
+ /* We found the end of the bit field sequence. Include the
+ padding up to the next field and be done. */
+ *bitend = bitpos - 1;
+ }
+ else
+ {
+ /* If this is the last element in the structure, include the padding
+ at the end of structure. */
+ *bitend = TREE_INT_CST_LOW (TYPE_SIZE (record_type)) - 1;
+ }
+}
/* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
is true, try generating a nontemporal store. */
rtx to_rtx = 0;
rtx result;
enum machine_mode mode;
- int align, icode;
+ int align;
+ enum insn_code icode;
/* Don't crash if the lhs of the assignment was erroneous. */
if (TREE_CODE (to) == ERROR_MARK)
{
- result = expand_normal (from);
+ expand_normal (from);
return;
}
&& ((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing))
{
- enum machine_mode address_mode, op_mode1;
- rtx insn, reg, op0, mem;
+ struct expand_operand ops[2];
+ enum machine_mode address_mode;
+ rtx reg, op0, mem;
reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
reg = force_not_mem (reg);
if (TREE_THIS_VOLATILE (to))
MEM_VOLATILE_P (mem) = 1;
- op_mode1 = insn_data[icode].operand[1].mode;
- if (! (*insn_data[icode].operand[1].predicate) (reg, op_mode1)
- && op_mode1 != VOIDmode)
- reg = copy_to_mode_reg (op_mode1, reg);
-
- insn = GEN_FCN (icode) (mem, reg);
+ create_fixed_operand (&ops[0], mem);
+ create_input_operand (&ops[1], reg, mode);
/* The movmisalign<mode> pattern cannot fail, else the assignment would
silently be omitted. */
- gcc_assert (insn != NULL_RTX);
- emit_insn (insn);
+ expand_insn (icode, 2, ops);
return;
}
{
enum machine_mode mode1;
HOST_WIDE_INT bitsize, bitpos;
+ unsigned HOST_WIDE_INT bitregion_start = 0;
+ unsigned HOST_WIDE_INT bitregion_end = 0;
tree offset;
int unsignedp;
int volatilep = 0;
tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
&unsignedp, &volatilep, true);
+ if (TREE_CODE (to) == COMPONENT_REF
+ && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to, 1)))
+ get_bit_range (&bitregion_start, &bitregion_end,
+ to, tem, bitpos, bitsize);
+
/* If we are going to use store_bit_field and extract_bit_field,
make sure to_rtx will be safe for multiple use. */
to_rtx = expand_normal (tem);
/* If the bitfield is volatile, we want to access it in the
- field's mode, not the computed mode. */
- if (volatilep
- && GET_CODE (to_rtx) == MEM
- && flag_strict_volatile_bitfields > 0)
- to_rtx = adjust_address (to_rtx, mode1, 0);
+ field's mode, not the computed mode.
+ If a MEM has VOIDmode (external with incomplete type),
+ use BLKmode for it instead. */
+ if (MEM_P (to_rtx))
+ {
+ if (volatilep && flag_strict_volatile_bitfields > 0)
+ to_rtx = adjust_address (to_rtx, mode1, 0);
+ else if (GET_MODE (to_rtx) == VOIDmode)
+ to_rtx = adjust_address (to_rtx, BLKmode, 0);
+ }
if (offset != 0)
{
if (!MEM_P (to_rtx)
&& GET_MODE (to_rtx) != BLKmode
&& (unsigned HOST_WIDE_INT) bitpos
- >= GET_MODE_BITSIZE (GET_MODE (to_rtx)))
+ >= GET_MODE_PRECISION (GET_MODE (to_rtx)))
{
expand_normal (from);
result = NULL;
/* Handle expand_expr of a complex value returning a CONCAT. */
else if (GET_CODE (to_rtx) == CONCAT)
{
- if (COMPLEX_MODE_P (TYPE_MODE (TREE_TYPE (from))))
+ unsigned short mode_bitsize = GET_MODE_BITSIZE (GET_MODE (to_rtx));
+ if (COMPLEX_MODE_P (TYPE_MODE (TREE_TYPE (from)))
+ && bitpos == 0
+ && bitsize == mode_bitsize)
+ result = store_expr (from, to_rtx, false, nontemporal);
+ else if (bitsize == mode_bitsize / 2
+ && (bitpos == 0 || bitpos == mode_bitsize / 2))
+ result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
+ nontemporal);
+ else if (bitpos + bitsize <= mode_bitsize / 2)
+ result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
+ bitregion_start, bitregion_end,
+ mode1, from, TREE_TYPE (tem),
+ get_alias_set (to), nontemporal);
+ else if (bitpos >= mode_bitsize / 2)
+ result = store_field (XEXP (to_rtx, 1), bitsize,
+ bitpos - mode_bitsize / 2,
+ bitregion_start, bitregion_end,
+ mode1, from,
+ TREE_TYPE (tem), get_alias_set (to),
+ nontemporal);
+ else if (bitpos == 0 && bitsize == mode_bitsize)
{
- gcc_assert (bitpos == 0);
- result = store_expr (from, to_rtx, false, nontemporal);
+ rtx from_rtx;
+ result = expand_normal (from);
+ from_rtx = simplify_gen_subreg (GET_MODE (to_rtx), result,
+ TYPE_MODE (TREE_TYPE (from)), 0);
+ emit_move_insn (XEXP (to_rtx, 0),
+ read_complex_part (from_rtx, false));
+ emit_move_insn (XEXP (to_rtx, 1),
+ read_complex_part (from_rtx, true));
}
else
{
- gcc_assert (bitpos == 0 || bitpos == GET_MODE_BITSIZE (mode1));
- result = store_expr (from, XEXP (to_rtx, bitpos != 0), false,
- nontemporal);
+ rtx temp = assign_stack_temp (GET_MODE (to_rtx),
+ GET_MODE_SIZE (GET_MODE (to_rtx)),
+ 0);
+ write_complex_part (temp, XEXP (to_rtx, 0), false);
+ write_complex_part (temp, XEXP (to_rtx, 1), true);
+ result = store_field (temp, bitsize, bitpos,
+ bitregion_start, bitregion_end,
+ mode1, from,
+ TREE_TYPE (tem), get_alias_set (to),
+ nontemporal);
+ emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
+ emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
}
}
else
MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
}
- if (optimize_bitfield_assignment_op (bitsize, bitpos, mode1,
+ if (optimize_bitfield_assignment_op (bitsize, bitpos,
+ bitregion_start, bitregion_end,
+ mode1,
to_rtx, to, from))
result = NULL;
else
- result = store_field (to_rtx, bitsize, bitpos, mode1, from,
+ result = store_field (to_rtx, bitsize, bitpos,
+ bitregion_start, bitregion_end,
+ mode1, from,
TREE_TYPE (tem), get_alias_set (to),
nontemporal);
}
bool
emit_storent_insn (rtx to, rtx from)
{
- enum machine_mode mode = GET_MODE (to), imode;
+ struct expand_operand ops[2];
+ enum machine_mode mode = GET_MODE (to);
enum insn_code code = optab_handler (storent_optab, mode);
- rtx pattern;
if (code == CODE_FOR_nothing)
return false;
- imode = insn_data[code].operand[0].mode;
- if (!insn_data[code].operand[0].predicate (to, imode))
- return false;
-
- imode = insn_data[code].operand[1].mode;
- if (!insn_data[code].operand[1].predicate (from, imode))
- {
- from = copy_to_mode_reg (imode, from);
- if (!insn_data[code].operand[1].predicate (from, imode))
- return false;
- }
-
- pattern = GEN_FCN (code) (to, from);
- if (pattern == NULL_RTX)
- return false;
-
- emit_insn (pattern);
- return true;
+ create_fixed_operand (&ops[0], to);
+ create_input_operand (&ops[1], from, mode);
+ return maybe_expand_insn (code, 2, ops);
}
/* Generate code for computing expression EXP,
/* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
but TARGET is not valid memory reference, TEMP will differ
from TARGET although it is really the same location. */
- && !(alt_rtl && rtx_equal_p (alt_rtl, target))
+ && !(alt_rtl
+ && rtx_equal_p (alt_rtl, target)
+ && !side_effects_p (alt_rtl)
+ && !side_effects_p (target))
/* If there's nothing to copy, don't bother. Don't call
expr_size unless necessary, because some front-ends (C++)
expr_size-hook must not be given objects that are not
: BLOCK_OP_NORMAL));
else if (GET_MODE (target) == BLKmode)
store_bit_field (target, INTVAL (expr_size (exp)) * BITS_PER_UNIT,
- 0, GET_MODE (temp), temp);
+ 0, 0, 0, GET_MODE (temp), temp);
else
convert_move (target, temp, unsignedp);
}
return NULL_RTX;
}
\f
+/* Return true if field F of structure TYPE is a flexible array. */
+
+static bool
+flexible_array_member_p (const_tree f, const_tree type)
+{
+ const_tree tf;
+
+ tf = TREE_TYPE (f);
+ return (DECL_CHAIN (f) == NULL
+ && TREE_CODE (tf) == ARRAY_TYPE
+ && TYPE_DOMAIN (tf)
+ && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
+ && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
+ && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
+ && int_size_in_bytes (type) >= 0);
+}
+
+/* If FOR_CTOR_P, return the number of top-level elements that a constructor
+ must have in order for it to completely initialize a value of type TYPE.
+ Return -1 if the number isn't known.
+
+ If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE. */
+
+static HOST_WIDE_INT
+count_type_elements (const_tree type, bool for_ctor_p)
+{
+ switch (TREE_CODE (type))
+ {
+ case ARRAY_TYPE:
+ {
+ tree nelts;
+
+ nelts = array_type_nelts (type);
+ if (nelts && host_integerp (nelts, 1))
+ {
+ unsigned HOST_WIDE_INT n;
+
+ n = tree_low_cst (nelts, 1) + 1;
+ if (n == 0 || for_ctor_p)
+ return n;
+ else
+ return n * count_type_elements (TREE_TYPE (type), false);
+ }
+ return for_ctor_p ? -1 : 1;
+ }
+
+ case RECORD_TYPE:
+ {
+ unsigned HOST_WIDE_INT n;
+ tree f;
+
+ n = 0;
+ for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ if (!for_ctor_p)
+ n += count_type_elements (TREE_TYPE (f), false);
+ else if (!flexible_array_member_p (f, type))
+ /* Don't count flexible arrays, which are not supposed
+ to be initialized. */
+ n += 1;
+ }
+
+ return n;
+ }
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ {
+ tree f;
+ HOST_WIDE_INT n, m;
+
+ gcc_assert (!for_ctor_p);
+ /* Estimate the number of scalars in each field and pick the
+ maximum. Other estimates would do instead; the idea is simply
+ to make sure that the estimate is not sensitive to the ordering
+ of the fields. */
+ n = 1;
+ for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ m = count_type_elements (TREE_TYPE (f), false);
+ /* If the field doesn't span the whole union, add an extra
+ scalar for the rest. */
+ if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f)),
+ TYPE_SIZE (type)) != 1)
+ m++;
+ if (n < m)
+ n = m;
+ }
+ return n;
+ }
+
+ case COMPLEX_TYPE:
+ return 2;
+
+ case VECTOR_TYPE:
+ return TYPE_VECTOR_SUBPARTS (type);
+
+ case INTEGER_TYPE:
+ case REAL_TYPE:
+ case FIXED_POINT_TYPE:
+ case ENUMERAL_TYPE:
+ case BOOLEAN_TYPE:
+ case POINTER_TYPE:
+ case OFFSET_TYPE:
+ case REFERENCE_TYPE:
+ return 1;
+
+ case ERROR_MARK:
+ return 0;
+
+ case VOID_TYPE:
+ case METHOD_TYPE:
+ case FUNCTION_TYPE:
+ case LANG_TYPE:
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Helper for categorize_ctor_elements. Identical interface. */
static bool
categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
- HOST_WIDE_INT *p_elt_count,
- bool *p_must_clear)
+ HOST_WIDE_INT *p_init_elts, bool *p_complete)
{
unsigned HOST_WIDE_INT idx;
- HOST_WIDE_INT nz_elts, elt_count;
- tree value, purpose;
+ HOST_WIDE_INT nz_elts, init_elts, num_fields;
+ tree value, purpose, elt_type;
/* Whether CTOR is a valid constant initializer, in accordance with what
initializer_constant_valid_p does. If inferred from the constructor
bool const_p = const_from_elts_p ? true : TREE_STATIC (ctor);
nz_elts = 0;
- elt_count = 0;
+ init_elts = 0;
+ num_fields = 0;
+ elt_type = NULL_TREE;
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, purpose, value)
{
mult = (tree_low_cst (hi_index, 1)
- tree_low_cst (lo_index, 1) + 1);
}
+ num_fields += mult;
+ elt_type = TREE_TYPE (value);
switch (TREE_CODE (value))
{
{
HOST_WIDE_INT nz = 0, ic = 0;
- bool const_elt_p
- = categorize_ctor_elements_1 (value, &nz, &ic, p_must_clear);
+ bool const_elt_p = categorize_ctor_elements_1 (value, &nz, &ic,
+ p_complete);
nz_elts += mult * nz;
- elt_count += mult * ic;
+ init_elts += mult * ic;
if (const_from_elts_p && const_p)
const_p = const_elt_p;
case FIXED_CST:
if (!initializer_zerop (value))
nz_elts += mult;
- elt_count += mult;
+ init_elts += mult;
break;
case STRING_CST:
nz_elts += mult * TREE_STRING_LENGTH (value);
- elt_count += mult * TREE_STRING_LENGTH (value);
+ init_elts += mult * TREE_STRING_LENGTH (value);
break;
case COMPLEX_CST:
nz_elts += mult;
if (!initializer_zerop (TREE_IMAGPART (value)))
nz_elts += mult;
- elt_count += mult;
+ init_elts += mult;
break;
case VECTOR_CST:
{
if (!initializer_zerop (TREE_VALUE (v)))
nz_elts += mult;
- elt_count += mult;
+ init_elts += mult;
}
}
break;
default:
{
- HOST_WIDE_INT tc = count_type_elements (TREE_TYPE (value), true);
- if (tc < 1)
- tc = 1;
+ HOST_WIDE_INT tc = count_type_elements (elt_type, false);
nz_elts += mult * tc;
- elt_count += mult * tc;
+ init_elts += mult * tc;
if (const_from_elts_p && const_p)
- const_p = initializer_constant_valid_p (value, TREE_TYPE (value))
+ const_p = initializer_constant_valid_p (value, elt_type)
!= NULL_TREE;
}
break;
}
}
- if (!*p_must_clear
- && (TREE_CODE (TREE_TYPE (ctor)) == UNION_TYPE
- || TREE_CODE (TREE_TYPE (ctor)) == QUAL_UNION_TYPE))
- {
- tree init_sub_type;
- bool clear_this = true;
-
- if (!VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (ctor)))
- {
- /* We don't expect more than one element of the union to be
- initialized. Not sure what we should do otherwise... */
- gcc_assert (VEC_length (constructor_elt, CONSTRUCTOR_ELTS (ctor))
- == 1);
-
- init_sub_type = TREE_TYPE (VEC_index (constructor_elt,
- CONSTRUCTOR_ELTS (ctor),
- 0)->value);
-
- /* ??? We could look at each element of the union, and find the
- largest element. Which would avoid comparing the size of the
- initialized element against any tail padding in the union.
- Doesn't seem worth the effort... */
- if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (ctor)),
- TYPE_SIZE (init_sub_type)) == 1)
- {
- /* And now we have to find out if the element itself is fully
- constructed. E.g. for union { struct { int a, b; } s; } u
- = { .s = { .a = 1 } }. */
- if (elt_count == count_type_elements (init_sub_type, false))
- clear_this = false;
- }
- }
-
- *p_must_clear = clear_this;
- }
+ if (*p_complete && !complete_ctor_at_level_p (TREE_TYPE (ctor),
+ num_fields, elt_type))
+ *p_complete = false;
*p_nz_elts += nz_elts;
- *p_elt_count += elt_count;
+ *p_init_elts += init_elts;
return const_p;
}
and place it in *P_NZ_ELTS;
* how many scalar fields in total are in CTOR,
and place it in *P_ELT_COUNT.
- * if a type is a union, and the initializer from the constructor
- is not the largest element in the union, then set *p_must_clear.
+ * whether the constructor is complete -- in the sense that every
+ meaningful byte is explicitly given a value --
+ and place it in *P_COMPLETE.
Return whether or not CTOR is a valid static constant initializer, the same
as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0". */
bool
categorize_ctor_elements (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
- HOST_WIDE_INT *p_elt_count,
- bool *p_must_clear)
+ HOST_WIDE_INT *p_init_elts, bool *p_complete)
{
*p_nz_elts = 0;
- *p_elt_count = 0;
- *p_must_clear = false;
+ *p_init_elts = 0;
+ *p_complete = true;
- return
- categorize_ctor_elements_1 (ctor, p_nz_elts, p_elt_count, p_must_clear);
+ return categorize_ctor_elements_1 (ctor, p_nz_elts, p_init_elts, p_complete);
}
-/* Count the number of scalars in TYPE. Return -1 on overflow or
- variable-sized. If ALLOW_FLEXARR is true, don't count flexible
- array member at the end of the structure. */
+/* TYPE is initialized by a constructor with NUM_ELTS elements, the last
+ of which had type LAST_TYPE. Each element was itself a complete
+ initializer, in the sense that every meaningful byte was explicitly
+ given a value. Return true if the same is true for the constructor
+ as a whole. */
-HOST_WIDE_INT
-count_type_elements (const_tree type, bool allow_flexarr)
+bool
+complete_ctor_at_level_p (const_tree type, HOST_WIDE_INT num_elts,
+ const_tree last_type)
{
- const HOST_WIDE_INT max = ~((HOST_WIDE_INT)1 << (HOST_BITS_PER_WIDE_INT-1));
- switch (TREE_CODE (type))
+ if (TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
{
- case ARRAY_TYPE:
- {
- tree telts = array_type_nelts (type);
- if (telts && host_integerp (telts, 1))
- {
- HOST_WIDE_INT n = tree_low_cst (telts, 1) + 1;
- HOST_WIDE_INT m = count_type_elements (TREE_TYPE (type), false);
- if (n == 0)
- return 0;
- else if (max / n > m)
- return n * m;
- }
- return -1;
- }
-
- case RECORD_TYPE:
- {
- HOST_WIDE_INT n = 0, t;
- tree f;
-
- for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
- if (TREE_CODE (f) == FIELD_DECL)
- {
- t = count_type_elements (TREE_TYPE (f), false);
- if (t < 0)
- {
- /* Check for structures with flexible array member. */
- tree tf = TREE_TYPE (f);
- if (allow_flexarr
- && DECL_CHAIN (f) == NULL
- && TREE_CODE (tf) == ARRAY_TYPE
- && TYPE_DOMAIN (tf)
- && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
- && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
- && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
- && int_size_in_bytes (type) >= 0)
- break;
-
- return -1;
- }
- n += t;
- }
-
- return n;
- }
-
- case UNION_TYPE:
- case QUAL_UNION_TYPE:
- return -1;
-
- case COMPLEX_TYPE:
- return 2;
-
- case VECTOR_TYPE:
- return TYPE_VECTOR_SUBPARTS (type);
-
- case INTEGER_TYPE:
- case REAL_TYPE:
- case FIXED_POINT_TYPE:
- case ENUMERAL_TYPE:
- case BOOLEAN_TYPE:
- case POINTER_TYPE:
- case OFFSET_TYPE:
- case REFERENCE_TYPE:
- return 1;
+ if (num_elts == 0)
+ return false;
- case ERROR_MARK:
- return 0;
+ gcc_assert (num_elts == 1 && last_type);
- case VOID_TYPE:
- case METHOD_TYPE:
- case FUNCTION_TYPE:
- case LANG_TYPE:
- default:
- gcc_unreachable ();
+ /* ??? We could look at each element of the union, and find the
+ largest element. Which would avoid comparing the size of the
+ initialized element against any tail padding in the union.
+ Doesn't seem worth the effort... */
+ return simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (last_type)) == 1;
}
+
+ return count_type_elements (type, true) == num_elts;
}
/* Return 1 if EXP contains mostly (3/4) zeros. */
mostly_zeros_p (const_tree exp)
{
if (TREE_CODE (exp) == CONSTRUCTOR)
-
{
- HOST_WIDE_INT nz_elts, count, elts;
- bool must_clear;
-
- categorize_ctor_elements (exp, &nz_elts, &count, &must_clear);
- if (must_clear)
- return 1;
+ HOST_WIDE_INT nz_elts, init_elts;
+ bool complete_p;
- elts = count_type_elements (TREE_TYPE (exp), false);
-
- return nz_elts < elts / 4;
+ categorize_ctor_elements (exp, &nz_elts, &init_elts, &complete_p);
+ return !complete_p || nz_elts < init_elts / 4;
}
return initializer_zerop (exp);
all_zeros_p (const_tree exp)
{
if (TREE_CODE (exp) == CONSTRUCTOR)
-
{
- HOST_WIDE_INT nz_elts, count;
- bool must_clear;
+ HOST_WIDE_INT nz_elts, init_elts;
+ bool complete_p;
- categorize_ctor_elements (exp, &nz_elts, &count, &must_clear);
+ categorize_ctor_elements (exp, &nz_elts, &init_elts, &complete_p);
return nz_elts == 0;
}
store_constructor (exp, target, cleared, bitsize / BITS_PER_UNIT);
}
else
- store_field (target, bitsize, bitpos, mode, exp, type, alias_set, false);
+ store_field (target, bitsize, bitpos, 0, 0, mode, exp, type, alias_set,
+ false);
}
/* Store the value of constructor EXP into the rtx TARGET.
int n_elts_here = tree_low_cst
(int_const_binop (TRUNC_DIV_EXPR,
TYPE_SIZE (TREE_TYPE (value)),
- TYPE_SIZE (elttype), 0), 1);
+ TYPE_SIZE (elttype)), 1);
count += n_elts_here;
if (mostly_zeros_p (value))
BITSIZE bits, starting BITPOS bits from the start of TARGET.
If MODE is VOIDmode, it means that we are storing into a bit-field.
+ BITREGION_START is bitpos of the first bitfield in this region.
+ BITREGION_END is the bitpos of the ending bitfield in this region.
+ These two fields are 0, if the C++ memory model does not apply,
+ or we are not interested in keeping track of bitfield regions.
+
Always return const0_rtx unless we have something particular to
return.
static rtx
store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
+ unsigned HOST_WIDE_INT bitregion_start,
+ unsigned HOST_WIDE_INT bitregion_end,
enum machine_mode mode, tree exp, tree type,
alias_set_type alias_set, bool nontemporal)
{
if (bitsize != (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (target)))
emit_move_insn (object, target);
- store_field (blk_object, bitsize, bitpos, mode, exp, type, alias_set,
- nontemporal);
+ store_field (blk_object, bitsize, bitpos,
+ bitregion_start, bitregion_end,
+ mode, exp, type, alias_set, nontemporal);
emit_move_insn (target, object);
&& bitsize < (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (temp))
&& TREE_CODE (TREE_TYPE (exp)) == RECORD_TYPE)
temp = expand_shift (RSHIFT_EXPR, GET_MODE (temp), temp,
- size_int (GET_MODE_BITSIZE (GET_MODE (temp))
- - bitsize),
+ GET_MODE_BITSIZE (GET_MODE (temp)) - bitsize,
NULL_RTX, 1);
/* Unless MODE is VOIDmode or BLKmode, convert TEMP to
}
/* Store the value in the bitfield. */
- store_bit_field (target, bitsize, bitpos, mode, temp);
+ store_bit_field (target, bitsize, bitpos,
+ bitregion_start, bitregion_end,
+ mode, temp);
return const0_rtx;
}
if (to_rtx == target)
to_rtx = copy_rtx (to_rtx);
- MEM_SET_IN_STRUCT_P (to_rtx, 1);
+ if (!MEM_SCALAR_P (to_rtx))
+ MEM_IN_STRUCT_P (to_rtx) = 1;
if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
set_mem_alias_set (to_rtx, alias_set);
#ifdef INSN_SCHEDULING
/* On machines that have insn scheduling, we want all memory reference to be
explicit, so we need to deal with such paradoxical SUBREGs. */
- if (GET_CODE (value) == SUBREG && MEM_P (SUBREG_REG (value))
- && (GET_MODE_SIZE (GET_MODE (value))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (value)))))
+ if (paradoxical_subreg_p (value) && MEM_P (SUBREG_REG (value)))
value
= simplify_gen_subreg (GET_MODE (value),
force_reg (GET_MODE (SUBREG_REG (value)),
/* If the DECL isn't in memory, then the DECL wasn't properly
marked TREE_ADDRESSABLE, which will be either a front-end
or a tree optimizer bug. */
- gcc_assert (MEM_P (result));
+
+ if (TREE_ADDRESSABLE (exp)
+ && ! MEM_P (result)
+ && ! targetm.calls.allocate_stack_slots_for_args())
+ {
+ error ("local frame unavailable (naked function?)");
+ return result;
+ }
+ else
+ gcc_assert (MEM_P (result));
result = XEXP (result, 0);
/* ??? Is this needed anymore? */
tmp = convert_memory_address_addr_space (tmode, tmp, as);
if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
- result = gen_rtx_PLUS (tmode, result, tmp);
+ result = simplify_gen_binary (PLUS, tmode, result, tmp);
else
{
subtarget = bitpos ? NULL_RTX : target;
int ignore;
bool reduce_bit_field;
location_t loc = ops->location;
- tree treeop0, treeop1;
+ tree treeop0, treeop1, treeop2;
#define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
? reduce_to_bit_field_precision ((expr), \
target, \
treeop0 = ops->op0;
treeop1 = ops->op1;
+ treeop2 = ops->op2;
/* We should be called only on simple (binary or unary) expressions,
exactly those that are valid in gimple expressions that aren't
/* An operation in what may be a bit-field type needs the
result to be reduced to the precision of the bit-field type,
which is narrower than that of the type's mode. */
- reduce_bit_field = (TREE_CODE (type) == INTEGER_TYPE
+ reduce_bit_field = (INTEGRAL_TYPE_P (type)
&& GET_MODE_PRECISION (mode) > TYPE_PRECISION (type));
if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
(treeop0))
* BITS_PER_UNIT),
(HOST_WIDE_INT) GET_MODE_BITSIZE (mode)),
- 0, TYPE_MODE (valtype), treeop0,
+ 0, 0, 0, TYPE_MODE (valtype), treeop0,
type, 0, false);
}
else if (CONSTANT_P (op0))
{
tree inner_type = TREE_TYPE (treeop0);
- enum machine_mode inner_mode = TYPE_MODE (inner_type);
+ enum machine_mode inner_mode = GET_MODE (op0);
+
+ if (inner_mode == VOIDmode)
+ inner_mode = TYPE_MODE (inner_type);
if (modifier == EXPAND_INITIALIZER)
op0 = simplify_gen_subreg (mode, op0, inner_mode,
if (modifier == EXPAND_STACK_PARM)
target = 0;
if (TREE_CODE (treeop0) == INTEGER_CST
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT
&& TREE_CONSTANT (treeop1))
{
rtx constant_part;
}
else if (TREE_CODE (treeop1) == INTEGER_CST
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT
&& TREE_CONSTANT (treeop0))
{
rtx constant_part;
case WIDEN_MULT_PLUS_EXPR:
case WIDEN_MULT_MINUS_EXPR:
expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (ops->op2);
+ op2 = expand_normal (treeop2);
target = expand_widen_pattern_expr (ops, op0, op1, op2,
target, unsignedp);
return target;
if (optab_handler (this_optab, mode) != CODE_FOR_nothing)
{
if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
- expand_operands (treeop0, treeop1, subtarget, &op0, &op1,
+ expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
EXPAND_NORMAL);
else
- expand_operands (treeop0, treeop1, subtarget, &op1, &op0,
+ expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
EXPAND_NORMAL);
goto binop3;
}
optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
- if (mode == GET_MODE_2XWIDER_MODE (innermode))
+ if (mode == GET_MODE_2XWIDER_MODE (innermode)
+ && TREE_CODE (treeop0) != INTEGER_CST)
{
if (optab_handler (this_optab, mode) != CODE_FOR_nothing)
{
expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
+ case FMA_EXPR:
+ {
+ optab opt = fma_optab;
+ gimple def0, def2;
+
+ /* If there is no insn for FMA, emit it as __builtin_fma{,f,l}
+ call. */
+ if (optab_handler (fma_optab, mode) == CODE_FOR_nothing)
+ {
+ tree fn = mathfn_built_in (TREE_TYPE (treeop0), BUILT_IN_FMA);
+ tree call_expr;
+
+ gcc_assert (fn != NULL_TREE);
+ call_expr = build_call_expr (fn, 3, treeop0, treeop1, treeop2);
+ return expand_builtin (call_expr, target, subtarget, mode, false);
+ }
+
+ def0 = get_def_for_expr (treeop0, NEGATE_EXPR);
+ def2 = get_def_for_expr (treeop2, NEGATE_EXPR);
+
+ op0 = op2 = NULL;
+
+ if (def0 && def2
+ && optab_handler (fnms_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fnms_optab;
+ op0 = expand_normal (gimple_assign_rhs1 (def0));
+ op2 = expand_normal (gimple_assign_rhs1 (def2));
+ }
+ else if (def0
+ && optab_handler (fnma_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fnma_optab;
+ op0 = expand_normal (gimple_assign_rhs1 (def0));
+ }
+ else if (def2
+ && optab_handler (fms_optab, mode) != CODE_FOR_nothing)
+ {
+ opt = fms_optab;
+ op2 = expand_normal (gimple_assign_rhs1 (def2));
+ }
+
+ if (op0 == NULL)
+ op0 = expand_expr (treeop0, subtarget, VOIDmode, EXPAND_NORMAL);
+ if (op2 == NULL)
+ op2 = expand_normal (treeop2);
+ op1 = expand_normal (treeop1);
+
+ return expand_ternary_op (TYPE_MODE (type), opt,
+ op0, op1, op2, target, 0);
+ }
+
case MULT_EXPR:
/* If this is a fixed-point operation, then we cannot use the code
below because "expand_mult" doesn't support sat/no-sat fixed-point
VOIDmode, EXPAND_NORMAL);
if (modifier == EXPAND_STACK_PARM)
target = 0;
- temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
+ /* In case we have to reduce the result to bitfield precision
+ expand this as XOR with a proper constant instead. */
+ if (reduce_bit_field)
+ temp = expand_binop (mode, xor_optab, op0,
+ immed_double_int_const
+ (double_int_mask (TYPE_PRECISION (type)), mode),
+ target, 1, OPTAB_LIB_WIDEN);
+ else
+ temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
gcc_assert (temp);
return temp;
and (a bitwise1 b) bitwise2 b (etc)
but that is probably not worth while. */
- /* BIT_AND_EXPR is for bitwise anding. TRUTH_AND_EXPR is for anding two
- boolean values when we want in all cases to compute both of them. In
- general it is fastest to do TRUTH_AND_EXPR by computing both operands
- as actual zero-or-1 values and then bitwise anding. In cases where
- there cannot be any side effects, better code would be made by
- treating TRUTH_AND_EXPR like TRUTH_ANDIF_EXPR; but the question is
- how to recognize those cases. */
-
- case TRUTH_AND_EXPR:
- code = BIT_AND_EXPR;
case BIT_AND_EXPR:
- goto binop;
-
- case TRUTH_OR_EXPR:
- code = BIT_IOR_EXPR;
case BIT_IOR_EXPR:
- goto binop;
-
- case TRUTH_XOR_EXPR:
- code = BIT_XOR_EXPR;
case BIT_XOR_EXPR:
goto binop;
target = 0;
op0 = expand_expr (treeop0, subtarget,
VOIDmode, EXPAND_NORMAL);
- temp = expand_shift (code, mode, op0, treeop1, target,
- unsignedp);
+ temp = expand_variable_shift (code, mode, op0, treeop1, target,
+ unsignedp);
if (code == LSHIFT_EXPR)
temp = REDUCE_BIT_FIELD (temp);
return temp;
op1 = gen_label_rtx ();
jumpifnot_1 (code, treeop0, treeop1, op1, -1);
- emit_move_insn (target, const1_rtx);
+ if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
+ emit_move_insn (target, constm1_rtx);
+ else
+ emit_move_insn (target, const1_rtx);
emit_label (op1);
return target;
- case TRUTH_NOT_EXPR:
- if (modifier == EXPAND_STACK_PARM)
- target = 0;
- op0 = expand_expr (treeop0, target,
- VOIDmode, EXPAND_NORMAL);
- /* The parser is careful to generate TRUTH_NOT_EXPR
- only with operands that are always zero or one. */
- temp = expand_binop (mode, xor_optab, op0, const1_rtx,
- target, 1, OPTAB_LIB_WIDEN);
- gcc_assert (temp);
- return temp;
-
case COMPLEX_EXPR:
/* Get the rtx code of the operands. */
op0 = expand_normal (treeop0);
case VEC_UNPACK_LO_EXPR:
{
op0 = expand_normal (treeop0);
- this_optab = optab_for_tree_code (code, type, optab_default);
temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
target, unsignedp);
gcc_assert (temp);
{
op0 = expand_normal (treeop0);
/* The signedness is determined from input operand. */
- this_optab = optab_for_tree_code (code,
- TREE_TYPE (treeop0),
- optab_default);
temp = expand_widen_pattern_expr
(ops, op0, NULL_RTX, NULL_RTX,
target, TYPE_UNSIGNED (TREE_TYPE (treeop0)));
mode = TYPE_MODE (TREE_TYPE (treeop0));
goto binop;
+ case DOT_PROD_EXPR:
+ {
+ tree oprnd0 = treeop0;
+ tree oprnd1 = treeop1;
+ tree oprnd2 = treeop2;
+ rtx op2;
+
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_normal (oprnd2);
+ target = expand_widen_pattern_expr (ops, op0, op1, op2,
+ target, unsignedp);
+ return target;
+ }
+
+ case REALIGN_LOAD_EXPR:
+ {
+ tree oprnd0 = treeop0;
+ tree oprnd1 = treeop1;
+ tree oprnd2 = treeop2;
+ rtx op2;
+
+ this_optab = optab_for_tree_code (code, type, optab_default);
+ expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
+ op2 = expand_normal (oprnd2);
+ temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
+ target, unsignedp);
+ gcc_assert (temp);
+ return temp;
+ }
+
default:
gcc_unreachable ();
}
temp = expand_binop (mode, this_optab, op0, op1, target,
unsignedp, OPTAB_LIB_WIDEN);
gcc_assert (temp);
+ /* Bitwise operations do not need bitfield reduction as we expect their
+ operands being properly truncated. */
+ if (code == BIT_XOR_EXPR
+ || code == BIT_AND_EXPR
+ || code == BIT_IOR_EXPR)
+ return temp;
return REDUCE_BIT_FIELD (temp);
}
#undef REDUCE_BIT_FIELD
int unsignedp;
enum machine_mode mode;
enum tree_code code = TREE_CODE (exp);
- optab this_optab;
rtx subtarget, original_target;
int ignore;
tree context;
result to be reduced to the precision of the bit-field type,
which is narrower than that of the type's mode. */
reduce_bit_field = (!ignore
- && TREE_CODE (type) == INTEGER_TYPE
+ && INTEGRAL_TYPE_P (type)
&& GET_MODE_PRECISION (mode) > TYPE_PRECISION (type));
/* If we are going to ignore this result, we need only do something
{
temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
if (MEM_P (temp))
- temp = copy_to_reg (temp);
+ copy_to_reg (temp);
return const0_rtx;
}
NULL);
g = get_gimple_for_ssa_name (exp);
+ /* For EXPAND_INITIALIZER try harder to get something simpler. */
+ if (g == NULL
+ && modifier == EXPAND_INITIALIZER
+ && !SSA_NAME_IS_DEFAULT_DEF (exp)
+ && (optimize || DECL_IGNORED_P (SSA_NAME_VAR (exp)))
+ && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp)))
+ g = SSA_NAME_DEF_STMT (exp);
if (g)
return expand_expr_real (gimple_assign_rhs_to_tree (g), target, tmode,
modifier, NULL);
gcc_assert (decl_rtl);
decl_rtl = copy_rtx (decl_rtl);
/* Record writes to register variables. */
- if (modifier == EXPAND_WRITE && REG_P (decl_rtl)
- && REGNO (decl_rtl) < FIRST_PSEUDO_REGISTER)
- {
- int i = REGNO (decl_rtl);
- int nregs = hard_regno_nregs[i][GET_MODE (decl_rtl)];
- while (nregs)
- {
- SET_HARD_REG_BIT (crtl->asm_clobbers, i);
- i++;
- nregs--;
- }
- }
+ if (modifier == EXPAND_WRITE
+ && REG_P (decl_rtl)
+ && HARD_REGISTER_P (decl_rtl))
+ add_to_hard_reg_set (&crtl->asm_clobbers,
+ GET_MODE (decl_rtl), REGNO (decl_rtl));
/* Ensure variable marked as used even if it doesn't go through
a parser. If it hasn't be used yet, write out an external
if (code == SSA_NAME
&& (g = SSA_NAME_DEF_STMT (ssa_name))
&& gimple_code (g) == GIMPLE_CALL)
- pmode = promote_function_mode (type, mode, &unsignedp,
- TREE_TYPE
- (TREE_TYPE (gimple_call_fn (g))),
- 2);
+ {
+ gcc_assert (!gimple_call_internal_p (g));
+ pmode = promote_function_mode (type, mode, &unsignedp,
+ gimple_call_fntype (g),
+ 2);
+ }
else
pmode = promote_decl_mode (exp, &unsignedp);
gcc_assert (GET_MODE (decl_rtl) == pmode);
{
addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
struct mem_address addr;
- int icode, align;
+ enum insn_code icode;
+ int align;
get_address_description (exp, &addr);
op0 = addr_for_mem_ref (&addr, as, true);
&& ((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing))
{
- rtx reg, insn;
+ struct expand_operand ops[2];
/* We've already validated the memory, and we're creating a
- new pseudo destination. The predicates really can't fail. */
- reg = gen_reg_rtx (mode);
-
- /* Nor can the insn generator. */
- insn = GEN_FCN (icode) (reg, temp);
- gcc_assert (insn != NULL_RTX);
- emit_insn (insn);
-
- return reg;
+ new pseudo destination. The predicates really can't fail,
+ nor can the generator. */
+ create_output_operand (&ops[0], NULL_RTX, mode);
+ create_fixed_operand (&ops[1], temp);
+ expand_insn (icode, 2, ops);
+ return ops[0].value;
}
return temp;
}
enum machine_mode address_mode;
tree base = TREE_OPERAND (exp, 0);
gimple def_stmt;
- int icode, align;
+ enum insn_code icode;
+ int align;
/* Handle expansion of non-aliased memory with non-BLKmode. That
might end up in a register. */
if (TREE_CODE (base) == ADDR_EXPR)
}
align = MAX (TYPE_ALIGN (TREE_TYPE (exp)),
get_object_alignment (exp, BIGGEST_ALIGNMENT));
- op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_NORMAL);
- op0 = convert_memory_address_addr_space (address_mode, op0, as);
+ op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
+ op0 = memory_address_addr_space (address_mode, op0, as);
if (!integer_zerop (TREE_OPERAND (exp, 1)))
{
rtx off
&& ((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing))
{
- rtx reg, insn;
+ struct expand_operand ops[2];
/* We've already validated the memory, and we're creating a
- new pseudo destination. The predicates really can't fail. */
- reg = gen_reg_rtx (mode);
-
- /* Nor can the insn generator. */
- insn = GEN_FCN (icode) (reg, temp);
- emit_insn (insn);
-
- return reg;
+ new pseudo destination. The predicates really can't fail,
+ nor can the generator. */
+ create_output_operand (&ops[0], NULL_RTX, mode);
+ create_fixed_operand (&ops[1], temp);
+ expand_insn (icode, 2, ops);
+ return ops[0].value;
}
return temp;
}
we can't do this optimization. */
&& (! DECL_BIT_FIELD (field)
|| ((GET_MODE_CLASS (DECL_MODE (field)) == MODE_INT)
- && (GET_MODE_BITSIZE (DECL_MODE (field))
+ && (GET_MODE_PRECISION (DECL_MODE (field))
<= HOST_BITS_PER_WIDE_INT))))
{
if (DECL_BIT_FIELD (field)
}
else
{
- tree count
- = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (imode) - bitsize);
+ int count = GET_MODE_PRECISION (imode) - bitsize;
op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
target, 0);
HOST_WIDE_INT bitsize, bitpos;
tree offset;
int volatilep = 0, must_force_mem;
+ bool packedp = false;
tree tem = get_inner_reference (exp, &bitsize, &bitpos, &offset,
&mode1, &unsignedp, &volatilep, true);
rtx orig_op0, memloc;
infinitely recurse. */
gcc_assert (tem != exp);
+ if (TYPE_PACKED (TREE_TYPE (TREE_OPERAND (exp, 0)))
+ || (TREE_CODE (TREE_OPERAND (exp, 1)) == FIELD_DECL
+ && DECL_PACKED (TREE_OPERAND (exp, 1))))
+ packedp = true;
+
/* If TEM's type is a union of variable size, pass TARGET to the inner
computation, since it will need a temporary and TARGET is known
to have to do. This occurs in unchecked conversion in Ada. */
/* If the bitfield is volatile, we want to access it in the
- field's mode, not the computed mode. */
- if (volatilep
- && GET_CODE (op0) == MEM
- && flag_strict_volatile_bitfields > 0)
- op0 = adjust_address (op0, mode1, 0);
+ field's mode, not the computed mode.
+ If a MEM has VOIDmode (external with incomplete type),
+ use BLKmode for it instead. */
+ if (MEM_P (op0))
+ {
+ if (volatilep && flag_strict_volatile_bitfields > 0)
+ op0 = adjust_address (op0, mode1, 0);
+ else if (GET_MODE (op0) == VOIDmode)
+ op0 = adjust_address (op0, BLKmode, 0);
+ }
mode2
= CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
constant and we don't need a memory reference. */
if (CONSTANT_P (op0)
&& mode2 != BLKmode
- && LEGITIMATE_CONSTANT_P (op0)
+ && targetm.legitimate_constant_p (mode2, op0)
&& !must_force_mem)
op0 = force_reg (mode2, op0);
&& modifier != EXPAND_CONST_ADDRESS
&& modifier != EXPAND_INITIALIZER)
/* If the field is volatile, we always want an aligned
- access. */
- || (volatilep && flag_strict_volatile_bitfields > 0)
+ access. Only do this if the access is not already naturally
+ aligned, otherwise "normal" (non-bitfield) volatile fields
+ become non-addressable. */
+ || (volatilep && flag_strict_volatile_bitfields > 0
+ && (bitpos % GET_MODE_ALIGNMENT (mode) != 0))
/* If the field isn't aligned enough to fetch as a memref,
fetch it as a bit field. */
|| (mode1 != BLKmode
if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
- op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
+ op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp, packedp,
(modifier == EXPAND_STACK_PARM
? NULL_RTX : target),
ext_mode, ext_mode);
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
&& bitsize < (HOST_WIDE_INT) GET_MODE_BITSIZE (GET_MODE (op0)))
op0 = expand_shift (LSHIFT_EXPR, GET_MODE (op0), op0,
- size_int (GET_MODE_BITSIZE (GET_MODE (op0))
- - bitsize),
- op0, 1);
+ GET_MODE_BITSIZE (GET_MODE (op0))
+ - bitsize, op0, 1);
/* If the result type is BLKmode, store the data into a temporary
of the appropriate type, but with the mode corresponding to the
/* If neither mode is BLKmode, and both modes are the same size
then we can use gen_lowpart. */
else if (mode != BLKmode && GET_MODE (op0) != BLKmode
- && GET_MODE_SIZE (mode) == GET_MODE_SIZE (GET_MODE (op0))
+ && (GET_MODE_PRECISION (mode)
+ == GET_MODE_PRECISION (GET_MODE (op0)))
&& !COMPLEX_MODE_P (GET_MODE (op0)))
{
if (GET_CODE (op0) == SUBREG)
return op0;
- /* Use a compare and a jump for BLKmode comparisons, or for function
- type comparisons is HAVE_canonicalize_funcptr_for_compare. */
-
- /* Although TRUTH_{AND,OR}IF_EXPR aren't present in GIMPLE, they
- are occassionally created by folding during expansion. */
- case TRUTH_ANDIF_EXPR:
- case TRUTH_ORIF_EXPR:
- if (! ignore
- && (target == 0
- || modifier == EXPAND_STACK_PARM
- || ! safe_from_p (target, treeop0, 1)
- || ! safe_from_p (target, treeop1, 1)
- /* Make sure we don't have a hard reg (such as function's return
- value) live across basic blocks, if not optimizing. */
- || (!optimize && REG_P (target)
- && REGNO (target) < FIRST_PSEUDO_REGISTER)))
- target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
-
- if (target)
- emit_move_insn (target, const0_rtx);
-
- op1 = gen_label_rtx ();
- jumpifnot_1 (code, treeop0, treeop1, op1, -1);
-
- if (target)
- emit_move_insn (target, const1_rtx);
-
- emit_label (op1);
- return ignore ? const0_rtx : target;
-
- case STATEMENT_LIST:
- {
- tree_stmt_iterator iter;
-
- gcc_assert (ignore);
-
- for (iter = tsi_start (exp); !tsi_end_p (iter); tsi_next (&iter))
- expand_expr (tsi_stmt (iter), const0_rtx, VOIDmode, modifier);
- }
- return const0_rtx;
-
case COND_EXPR:
/* A COND_EXPR with its type being VOID_TYPE represents a
conditional jump and is handled in
return expand_expr_real (treeop0, original_target, tmode,
modifier, alt_rtl);
- case REALIGN_LOAD_EXPR:
- {
- tree oprnd0 = treeop0;
- tree oprnd1 = treeop1;
- tree oprnd2 = treeop2;
- rtx op2;
-
- this_optab = optab_for_tree_code (code, type, optab_default);
- expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (oprnd2);
- temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
- target, unsignedp);
- gcc_assert (temp);
- return temp;
- }
-
- case DOT_PROD_EXPR:
- {
- tree oprnd0 = treeop0;
- tree oprnd1 = treeop1;
- tree oprnd2 = treeop2;
- rtx op2;
-
- expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
- op2 = expand_normal (oprnd2);
- target = expand_widen_pattern_expr (&ops, op0, op1, op2,
- target, unsignedp);
- return target;
- }
-
case COMPOUND_LITERAL_EXPR:
{
/* Initialize the anonymous variable declared in the compound
}
else
{
- tree count = build_int_cst (NULL_TREE,
- GET_MODE_BITSIZE (GET_MODE (exp)) - prec);
- exp = expand_shift (LSHIFT_EXPR, GET_MODE (exp), exp, count, target, 0);
- return expand_shift (RSHIFT_EXPR, GET_MODE (exp), exp, count, target, 0);
+ int count = GET_MODE_PRECISION (GET_MODE (exp)) - prec;
+ exp = expand_shift (LSHIFT_EXPR, GET_MODE (exp),
+ exp, count, target, 0);
+ return expand_shift (RSHIFT_EXPR, GET_MODE (exp),
+ exp, count, target, 0);
}
}
\f
if ((code == NE || code == EQ)
&& TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1)
- && integer_pow2p (TREE_OPERAND (arg0, 1)))
+ && integer_pow2p (TREE_OPERAND (arg0, 1))
+ && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type)))
{
tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
return expand_expr (fold_single_bit_test (loc,
/* Try a cstore if possible. */
return emit_store_flag_force (target, code, op0, op1,
- operand_mode, unsignedp, 1);
+ operand_mode, unsignedp,
+ (TYPE_PRECISION (ops->type) == 1
+ && !TYPE_UNSIGNED (ops->type)) ? -1 : 1);
}
\f
rtx table_label ATTRIBUTE_UNUSED, rtx default_label,
rtx fallback_label ATTRIBUTE_UNUSED)
{
+ struct expand_operand ops[5];
enum machine_mode index_mode = SImode;
int index_bits = GET_MODE_BITSIZE (index_mode);
rtx op1, op2, index;
- enum machine_mode op_mode;
if (! HAVE_casesi)
return 0;
do_pending_stack_adjust ();
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[0].mode;
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[0].predicate)
- (index, op_mode))
- index = copy_to_mode_reg (op_mode, index);
-
op1 = expand_normal (minval);
-
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[1].mode;
- op1 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (minval)),
- op1, TYPE_UNSIGNED (TREE_TYPE (minval)));
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[1].predicate)
- (op1, op_mode))
- op1 = copy_to_mode_reg (op_mode, op1);
-
op2 = expand_normal (range);
- op_mode = insn_data[(int) CODE_FOR_casesi].operand[2].mode;
- op2 = convert_modes (op_mode, TYPE_MODE (TREE_TYPE (range)),
- op2, TYPE_UNSIGNED (TREE_TYPE (range)));
- if (! (*insn_data[(int) CODE_FOR_casesi].operand[2].predicate)
- (op2, op_mode))
- op2 = copy_to_mode_reg (op_mode, op2);
-
- emit_jump_insn (gen_casesi (index, op1, op2,
- table_label, !default_label
- ? fallback_label : default_label));
+ create_input_operand (&ops[0], index, index_mode);
+ create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
+ create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
+ create_fixed_operand (&ops[3], table_label);
+ create_fixed_operand (&ops[4], (default_label
+ ? default_label
+ : fallback_label));
+ expand_jump_insn (CODE_FOR_casesi, 5, ops);
return 1;
}
return gen_rtx_CONST_VECTOR (mode, v);
}
-
-/* Build a decl for a EH personality function named NAME. */
+/* Build a decl for a personality function given a language prefix. */
tree
-build_personality_function (const char *name)
+build_personality_function (const char *lang)
{
+ const char *unwind_and_version;
tree decl, type;
+ char *name;
+
+ switch (targetm_common.except_unwind_info (&global_options))
+ {
+ case UI_NONE:
+ return NULL;
+ case UI_SJLJ:
+ unwind_and_version = "_sj0";
+ break;
+ case UI_DWARF2:
+ case UI_TARGET:
+ unwind_and_version = "_v0";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
type = build_function_type_list (integer_type_node, integer_type_node,
long_long_unsigned_type_node,