/* Perform initializations for this module. */
void
-gnat_init_stmt_group ()
+gnat_init_stmt_group (void)
{
/* Initialize ourselves. */
init_code_table ();
case N_Identifier:
case N_Expanded_Name:
/* This represents either a subtype range or a static value of
- some kind; Ekind says which. If a static value, fall through
- to the next case. */
+ some kind; Ekind says which. */
if (IN (Ekind (Entity (gnat_choice)), Type_Kind))
{
tree gnu_type = get_unpadded_type (Entity (gnat_choice));
gnu_high = fold (TYPE_MAX_VALUE (gnu_type));
break;
}
+ /* Static values are handled by the next case to which we'll
+ fallthrough. If this is a constant with an address clause
+ attached, we need to get to the initialization expression
+ first, as the GCC tree for the entity might happen to be an
+ INDIRECT_REF otherwise. */
+ else if (Ekind (Entity (gnat_choice)) == E_Constant
+ && Present (Address_Clause (Entity (gnat_choice))))
+ {
+ /* We might have a deferred constant with an address clause
+ on either the incomplete or the full view. While the
+ Address_Clause is always attached to the visible entity,
+ as tested above, the static value is the Expression
+ attached to the the declaration of the entity or of its
+ full view if any. */
+
+ Entity_Id gnat_constant = Entity (gnat_choice);
+
+ if (Present (Full_View (gnat_constant)))
+ gnat_constant = Full_View (gnat_constant);
+
+ gnat_choice
+ = Expression (Declaration_Node (gnat_constant));
+ }
/* ... fall through ... */
gnu_subprog_addr,
nreverse (gnu_actual_list));
- /* If we return by passing a target, we emit the call and return the target
- as our result. */
+ /* If we return by passing a target, the result is the target after the
+ call. We must not emit the call directly here because this might be
+ evaluated as part of an expression with conditions to control whether
+ the call should be emitted or not. */
if (TYPE_RETURNS_BY_TARGET_PTR_P (gnu_subprog_type))
{
- add_stmt_with_node (gnu_subprog_call, gnat_node);
- *gnu_result_type_p
+ /* Conceptually, what we need is a COMPOUND_EXPR with the call followed
+ by the target object converted to the proper type. Doing so would
+ potentially be very inefficient, however, as this expresssion might
+ end up wrapped into an outer SAVE_EXPR later on, which would incur a
+ pointless temporary copy of the whole object.
+
+ What we do instead is build a COMPOUND_EXPR returning the address of
+ the target, and then dereference. Wrapping the COMPOUND_EXPR into a
+ SAVE_EXPR later on then only incurs a pointer copy. */
+
+ tree gnu_result_type
= TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (gnu_subprog_type)));
- return unchecked_convert (*gnu_result_type_p, gnu_target, false);
+
+ /* Build and return
+ (result_type) *[gnu_subprog_call (&gnu_target, ...), &gnu_target] */
+
+ tree gnu_target_address
+ = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_target);
+
+ gnu_result
+ = build2 (COMPOUND_EXPR, TREE_TYPE (gnu_target_address),
+ gnu_subprog_call, gnu_target_address);
+
+ gnu_result
+ = unchecked_convert (gnu_result_type,
+ build_unary_op (INDIRECT_REF, NULL_TREE,
+ gnu_result),
+ false);
+
+ *gnu_result_type_p = gnu_result_type;
+ return gnu_result;
}
/* If it is a function call, the result is the call expression unless
case N_Slice:
{
- tree gnu_type;
- Node_Id gnat_range_node = Discrete_Range (gnat_node);
+ tree gnu_type;
+ Node_Id gnat_range_node = Discrete_Range (gnat_node);
- gnu_result = gnat_to_gnu (Prefix (gnat_node));
- gnu_result_type = get_unpadded_type (Etype (gnat_node));
+ gnu_result = gnat_to_gnu (Prefix (gnat_node));
+ gnu_result_type = get_unpadded_type (Etype (gnat_node));
/* Do any implicit dereferences of the prefix and do any needed
range check. */
- gnu_result = maybe_implicit_deref (gnu_result);
- gnu_result = maybe_unconstrained_array (gnu_result);
- gnu_type = TREE_TYPE (gnu_result);
- if (Do_Range_Check (gnat_range_node))
- {
- /* Get the bounds of the slice. */
+ gnu_result = maybe_implicit_deref (gnu_result);
+ gnu_result = maybe_unconstrained_array (gnu_result);
+ gnu_type = TREE_TYPE (gnu_result);
+ if (Do_Range_Check (gnat_range_node))
+ {
+ /* Get the bounds of the slice. */
tree gnu_index_type
= TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_result_type));
- tree gnu_min_expr = TYPE_MIN_VALUE (gnu_index_type);
- tree gnu_max_expr = TYPE_MAX_VALUE (gnu_index_type);
- tree gnu_expr_l, gnu_expr_h, gnu_expr_type;
-
- /* Check to see that the minimum slice value is in range */
- gnu_expr_l
- = emit_index_check
- (gnu_result, gnu_min_expr,
- TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type))),
- TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type))));
-
- /* Check to see that the maximum slice value is in range */
- gnu_expr_h
- = emit_index_check
- (gnu_result, gnu_max_expr,
- TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type))),
- TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type))));
-
- /* Derive a good type to convert everything too */
- gnu_expr_type = get_base_type (TREE_TYPE (gnu_expr_l));
-
- /* Build a compound expression that does the range checks */
- gnu_expr
- = build_binary_op (COMPOUND_EXPR, gnu_expr_type,
- convert (gnu_expr_type, gnu_expr_h),
- convert (gnu_expr_type, gnu_expr_l));
-
- /* Build a conditional expression that returns the range checks
- expression if the slice range is not null (max >= min) or
- returns the min if the slice range is null */
- gnu_expr
- = fold_build3 (COND_EXPR, gnu_expr_type,
- build_binary_op (GE_EXPR, gnu_expr_type,
- convert (gnu_expr_type,
- gnu_max_expr),
- convert (gnu_expr_type,
- gnu_min_expr)),
- gnu_expr, gnu_min_expr);
- }
- else
- gnu_expr = TYPE_MIN_VALUE (TYPE_DOMAIN (gnu_result_type));
+ tree gnu_min_expr = TYPE_MIN_VALUE (gnu_index_type);
+ tree gnu_max_expr = TYPE_MAX_VALUE (gnu_index_type);
+ /* Get the permitted bounds. */
+ tree gnu_base_index_type
+ = TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type));
+ tree gnu_base_min_expr = TYPE_MIN_VALUE (gnu_base_index_type);
+ tree gnu_base_max_expr = TYPE_MAX_VALUE (gnu_base_index_type);
+ tree gnu_expr_l, gnu_expr_h, gnu_expr_type;
+
+ /* Check to see that the minimum slice value is in range. */
+ gnu_expr_l = emit_index_check (gnu_result,
+ gnu_min_expr,
+ gnu_base_min_expr,
+ gnu_base_max_expr);
+
+ /* Check to see that the maximum slice value is in range. */
+ gnu_expr_h = emit_index_check (gnu_result,
+ gnu_max_expr,
+ gnu_base_min_expr,
+ gnu_base_max_expr);
+
+ /* Derive a good type to convert everything to. */
+ gnu_expr_type = get_base_type (TREE_TYPE (gnu_expr_l));
+
+ /* Build a compound expression that does the range checks and
+ returns the low bound. */
+ gnu_expr = build_binary_op (COMPOUND_EXPR, gnu_expr_type,
+ convert (gnu_expr_type, gnu_expr_h),
+ convert (gnu_expr_type, gnu_expr_l));
+
+ /* Build a conditional expression that does the range check and
+ returns the low bound if the slice is not empty (max >= min),
+ and returns the naked low bound otherwise (max < min), unless
+ it is non-constant and the high bound is; this prevents VRP
+ from inferring bogus ranges on the unlikely path. */
+ gnu_expr = fold_build3 (COND_EXPR, gnu_expr_type,
+ build_binary_op (GE_EXPR, gnu_expr_type,
+ convert (gnu_expr_type,
+ gnu_max_expr),
+ convert (gnu_expr_type,
+ gnu_min_expr)),
+ gnu_expr,
+ TREE_CODE (gnu_min_expr) != INTEGER_CST
+ && TREE_CODE (gnu_max_expr) == INTEGER_CST
+ ? gnu_max_expr : gnu_min_expr);
+ }
+ else
+ /* Simply return the naked low bound. */
+ gnu_expr = TYPE_MIN_VALUE (TYPE_DOMAIN (gnu_result_type));
- gnu_result = build_binary_op (ARRAY_RANGE_REF, gnu_result_type,
+ gnu_result = build_binary_op (ARRAY_RANGE_REF, gnu_result_type,
gnu_result, gnu_expr);
}
break;
/* This case can apply to a boolean or a modular type.
Fall through for a boolean operand since GNU_CODES is set
up to handle this. */
- if (IN (Ekind (Etype (gnat_node)), Modular_Integer_Kind))
+ if (Is_Modular_Integer_Type (Etype (gnat_node))
+ || (Ekind (Etype (gnat_node)) == E_Private_Type
+ && Is_Modular_Integer_Type (Full_View (Etype (gnat_node)))))
{
gnu_expr = gnat_to_gnu (Right_Opnd (gnat_node));
gnu_result_type = get_unpadded_type (Etype (gnat_node));
/* Start a new statement group chained to the previous group. */
static void
-start_stmt_group ()
+start_stmt_group (void)
{
struct stmt_group *group = stmt_group_free_list;
BLOCK or cleanups were set. */
static tree
-end_stmt_group ()
+end_stmt_group (void)
{
struct stmt_group *group = current_stmt_group;
tree gnu_retval = group->stmt_list;
case COMPONENT_REF:
return (!DECL_BIT_FIELD (TREE_OPERAND (gnu_expr, 1))
&& (!STRICT_ALIGNMENT
- /* If the field was marked as "semantically" addressable
- in create_field_decl, we are guaranteed that it can
- be directly addressed. */
- || !DECL_NONADDRESSABLE_P (TREE_OPERAND (gnu_expr, 1))
- /* Otherwise it can nevertheless be directly addressed
- if it has been sufficiently aligned in the record. */
+ /* Even with DECL_BIT_FIELD cleared, we have to ensure that
+ the field is sufficiently aligned, in case it is subject
+ to a pragma Component_Alignment. But we don't need to
+ check the alignment of the containing record, as it is
+ guaranteed to be not smaller than that of its most
+ aligned field that is not a bit-field. */
|| DECL_ALIGN (TREE_OPERAND (gnu_expr, 1))
>= TYPE_ALIGN (TREE_TYPE (gnu_expr)))
&& addressable_p (TREE_OPERAND (gnu_expr, 0)));
case ADDR_EXPR:
/* A standalone ADDR_EXPR is never an lvalue, and this one can't
- be nested inside an outer INDIRECT_REF, since INDIREC_REF goes
- straight to stabilize_1. */
+ be nested inside an outer INDIRECT_REF, since INDIRECT_REF goes
+ straight to gnat_stabilize_reference_1. */
if (lvalues_only)
goto failure;
break;
case COMPOUND_EXPR:
- result = build2 (COMPOUND_EXPR, type,
- gnat_stabilize_reference_1 (TREE_OPERAND (ref, 0),
- force),
- maybe_stabilize_reference (TREE_OPERAND (ref, 1), force,
- lvalues_only, success));
+ result = gnat_stabilize_reference_1 (ref, force);
+ break;
+
+ case CALL_EXPR:
+ if (lvalues_only)
+ goto failure;
+
+ /* This generates better code than the scheme in protect_multiple_eval
+ because large objects will be returned via invisible reference in
+ most ABIs so the temporary will directly be filled by the callee. */
+ result = gnat_stabilize_reference_1 (ref, force);
break;
case ERROR_MARK: