PR middle-end/20371:
authoramylaar <amylaar@138bc75d-0d04-0410-961f-82ee72b054a4>
Wed, 11 May 2005 12:24:43 +0000 (12:24 +0000)
committeramylaar <amylaar@138bc75d-0d04-0410-961f-82ee72b054a4>
Wed, 11 May 2005 12:24:43 +0000 (12:24 +0000)
* tree.h (record_layout_info_s): New member prev_packed.
* stor-layout.c (update_alignment_for_field): Fix comment about
KNOWN_ALIGN.  For MS bitfields, if we start a new run, make sure
we start it properly aligned.
(place_field): At the beginning of a record, pass 0 as KNOWN_ALIGN
to update_alignment_for_field, and recompute it afterwards using
the alignment of the record.
When a packed bitfield precedes an MS bitfield, don't add padding
at the end of the packed bitfield on behalf of the base type of
the packed bit field.
Don't adjust rli->bitpos at the end
of an MS bitfield run if we already adjusted bitpos/offset for an
alignment as large or larger than the bitfield type size.
Take possible record alignment > BIGGEST_ALIGNMENT into account
when calculating actual_align.
Only put packed buit fields into rli->prev_field if they end up
suitably aligned.
Also set rli->remaining_in_alignment when we re-set rli->prev_field.
Update rli->remaining_in_alignment when we have already started a
run of bit fields and we process a packed bit field.

git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@99574 138bc75d-0d04-0410-961f-82ee72b054a4

gcc/ChangeLog
gcc/stor-layout.c
gcc/tree.h

index f18f5b4..145839c 100644 (file)
@@ -1,3 +1,27 @@
+2005-05-11  J"orn Rennecke <joern.rennecke@st.com>
+
+       PR middle-end/20371:
+       * tree.h (record_layout_info_s): New member prev_packed.
+       * stor-layout.c (update_alignment_for_field): Fix comment about
+       KNOWN_ALIGN.  For MS bitfields, if we start a new run, make sure
+       we start it properly aligned.
+       (place_field): At the beginning of a record, pass 0 as KNOWN_ALIGN
+       to update_alignment_for_field, and recompute it afterwards using
+       the alignment of the record.
+       When a packed bitfield precedes an MS bitfield, don't add padding
+       at the end of the packed bitfield on behalf of the base type of
+       the packed bit field.
+       Don't adjust rli->bitpos at the end
+       of an MS bitfield run if we already adjusted bitpos/offset for an
+       alignment as large or larger than the bitfield type size.
+       Take possible record alignment > BIGGEST_ALIGNMENT into account
+       when calculating actual_align.
+       Only put packed buit fields into rli->prev_field if they end up
+       suitably aligned.
+       Also set rli->remaining_in_alignment when we re-set rli->prev_field.
+       Update rli->remaining_in_alignment when we have already started a
+       run of bit fields and we process a packed bit field.
+
 2005-05-11  Sebastian Pop  <pop@cri.ensmp.fr>
 
        * tree-data-ref.c (find_data_references_in_loop): Give up when
index 41d3165..cffb81c 100644 (file)
@@ -634,9 +634,9 @@ rli_size_so_far (record_layout_info rli)
 }
 
 /* FIELD is about to be added to RLI->T.  The alignment (in bits) of
-   the next available location is given by KNOWN_ALIGN.  Update the
-   variable alignment fields in RLI, and return the alignment to give
-   the FIELD.  */
+   the next available location within the record is given by KNOWN_ALIGN.
+   Update the variable alignment fields in RLI, and return the alignment
+   to give the FIELD.  */
 
 unsigned int
 update_alignment_for_field (record_layout_info rli, tree field,
@@ -682,6 +682,18 @@ update_alignment_for_field (record_layout_info rli, tree field,
            type_align = MIN (type_align, maximum_field_alignment);
          rli->record_align = MAX (rli->record_align, type_align);
          rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
+         /* If we start a new run, make sure we start it properly aligned.  */
+         if ((!rli->prev_field
+              || integer_zerop (DECL_SIZE (field))
+              || integer_zerop (DECL_SIZE (rli->prev_field))
+              || !host_integerp (DECL_SIZE (rli->prev_field), 0)
+              || !host_integerp (TYPE_SIZE (type), 0)
+              || !simple_cst_equal (TYPE_SIZE (type),
+                                    TYPE_SIZE (TREE_TYPE (rli->prev_field)))
+              || (rli->remaining_in_alignment
+                  < tree_low_cst (DECL_SIZE (field), 0)))
+             && desired_align < type_align)
+           desired_align = type_align;
        }
     }
 #ifdef PCC_BITFIELD_TYPE_MATTERS
@@ -820,7 +832,7 @@ place_field (record_layout_info rli, tree field)
     known_align = (tree_low_cst (rli->bitpos, 1)
                   & - tree_low_cst (rli->bitpos, 1));
   else if (integer_zerop (rli->offset))
-    known_align = BIGGEST_ALIGNMENT;
+    known_align = 0;
   else if (host_integerp (rli->offset, 1))
     known_align = (BITS_PER_UNIT
                   * (tree_low_cst (rli->offset, 1)
@@ -829,6 +841,8 @@ place_field (record_layout_info rli, tree field)
     known_align = rli->offset_align;
 
   desired_align = update_alignment_for_field (rli, field, known_align);
+  if (known_align == 0)
+    known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
 
   if (warn_packed && DECL_PACKED (field))
     {
@@ -1001,18 +1015,30 @@ place_field (record_layout_info rli, tree field)
 
              if (rli->remaining_in_alignment < bitsize)
                {
-                 /* out of bits; bump up to next 'word'.  */
-                 rli->offset = DECL_FIELD_OFFSET (rli->prev_field);
-                 rli->bitpos
-                   = size_binop (PLUS_EXPR, TYPE_SIZE (type),
-                                 DECL_FIELD_BIT_OFFSET (rli->prev_field));
-                 rli->prev_field = field;
-                 rli->remaining_in_alignment
-                   = tree_low_cst (TYPE_SIZE (type), 0);
+                 /* If PREV_FIELD is packed, and we haven't lumped
+                    non-packed bitfields with it, treat this as if PREV_FIELD
+                    was not a bitfield.  This avoids anomalies where a packed
+                    bitfield with long long base type can take up more
+                    space than a same-size bitfield with base type short.  */
+                 if (rli->prev_packed)
+                   rli->prev_field = prev_saved = NULL;
+                 else
+                   {
+                     /* out of bits; bump up to next 'word'.  */
+                     rli->offset = DECL_FIELD_OFFSET (rli->prev_field);
+                     rli->bitpos
+                       = size_binop (PLUS_EXPR, TYPE_SIZE (type),
+                                     DECL_FIELD_BIT_OFFSET (rli->prev_field));
+                     rli->prev_field = field;
+                     rli->remaining_in_alignment
+                       = tree_low_cst (TYPE_SIZE (type), 0) - bitsize;
+                   }
                }
-
-             rli->remaining_in_alignment -= bitsize;
+             else
+               rli->remaining_in_alignment -= bitsize;
            }
+         else if (rli->prev_packed)
+           rli->prev_field = prev_saved = NULL;
          else
            {
              /* End of a run: if leaving a run of bitfields of the same type
@@ -1028,9 +1054,14 @@ place_field (record_layout_info rli, tree field)
                {
                  tree type_size = TYPE_SIZE (TREE_TYPE (rli->prev_field));
 
-                 rli->bitpos
-                   = size_binop (PLUS_EXPR, type_size,
-                                 DECL_FIELD_BIT_OFFSET (rli->prev_field));
+                 /* If the desired alignment is greater or equal to TYPE_SIZE,
+                    we have already adjusted rli->bitpos / rli->offset above.
+                  */
+                 if ((unsigned HOST_WIDE_INT) tree_low_cst (type_size, 0)
+                     > desired_align)
+                   rli->bitpos
+                     = size_binop (PLUS_EXPR, type_size,
+                                   DECL_FIELD_BIT_OFFSET (rli->prev_field));
                }
              else
                /* We "use up" size zero fields; the code below should behave
@@ -1044,6 +1075,7 @@ place_field (record_layout_info rli, tree field)
                rli->prev_field = NULL;
            }
 
+         rli->prev_packed = 0;
          normalize_rli (rli);
         }
 
@@ -1116,20 +1148,59 @@ place_field (record_layout_info rli, tree field)
     actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
                    & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
   else if (integer_zerop (DECL_FIELD_OFFSET (field)))
-    actual_align = BIGGEST_ALIGNMENT;
+    actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
   else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
     actual_align = (BITS_PER_UNIT
                   * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
                      & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
   else
     actual_align = DECL_OFFSET_ALIGN (field);
+  /* ACTUAL_ALIGN is still the actual alignment *within the record* .
+     store / extract bit field operations will check the alignment of the
+     record against the mode of bit fields.  */
 
   if (known_align != actual_align)
     layout_decl (field, actual_align);
 
-  /* Only the MS bitfields use this.  */
-  if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE(field))
-      rli->prev_field = field;
+  if (DECL_BIT_FIELD_TYPE (field))
+    {
+      unsigned int type_align = TYPE_ALIGN (type);
+
+      /* Only the MS bitfields use this.  We used to also put any kind of
+        packed bit fields into prev_field, but that makes no sense, because
+        an 8 bit packed bit field shouldn't impose more restriction on
+        following fields than a char field, and the alignment requirements
+        are also not fulfilled.
+        There is no sane value to set rli->remaining_in_alignment to when
+        a packed bitfield in prev_field is unaligned.  */
+      if (maximum_field_alignment != 0)
+       type_align = MIN (type_align, maximum_field_alignment);
+      gcc_assert (rli->prev_field
+                 || actual_align >= type_align || DECL_PACKED (field)
+                 || integer_zerop (DECL_SIZE (field))
+                 || !targetm.ms_bitfield_layout_p (rli->t));
+      if (rli->prev_field == NULL && actual_align >= type_align
+         && !integer_zerop (DECL_SIZE (field)))
+       {
+         rli->prev_field = field;
+         /* rli->remaining_in_alignment has not been set if the bitfield
+            has size zero, or if it is a packed bitfield.  */
+         rli->remaining_in_alignment
+           = (tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 0)
+              - tree_low_cst (DECL_SIZE (field), 0));
+         rli->prev_packed = DECL_PACKED (field);
+
+       }
+      else if (rli->prev_field && DECL_PACKED (field))
+       {
+         HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0);
+
+         if (rli->remaining_in_alignment < bitsize)
+           rli->prev_field = NULL;
+         else
+           rli->remaining_in_alignment -= bitsize;
+       }
+    }
 
   /* Now add size of this field to the size of the record.  If the size is
      not constant, treat the field as being a multiple of bytes and just
index 4600d51..44a07af 100644 (file)
@@ -3142,6 +3142,9 @@ typedef struct record_layout_info_s
   tree pending_statics;
   /* Bits remaining in the current alignment group */
   int remaining_in_alignment;
+  /* True if prev_field was packed and we haven't found any non-packed
+     fields that we have put in the same alignment group.  */
+  int prev_packed;
   /* True if we've seen a packed field that didn't have normal
      alignment anyway.  */
   int packed_maybe_necessary;