2012-01-27 Richard Guenther <rguenther@suse.de>
PR tree-optimization/50444
+ * tree-sra.c (build_ref_for_offset): Properly adjust the
+ MEM_REF type for unaligned accesses.
+
+2012-01-27 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/50444
* expr.c (mem_ref_refers_to_non_mem_p): New function.
(expand_assignment): Use it. Properly handle misaligned
bases when expanding stores to component references.
+2012-01-27 Richard Guenther <rguenther@suse.de>
+
+ PR tree-optimization/50444
+ * gcc.dg/torture/pr50444.c: New testcase.
+
2012-01-27 Tobias Burnus <burnus@net-b.de>
PR fortran/51970
--- /dev/null
+/* { dg-require-effective-target sse2_runtime } */
+/* { dg-do run } */
+/* { dg-options "-msse2" } */
+
+typedef long long __m128i __attribute__ ((__vector_size__ (16),
+__may_alias__));
+typedef int __v4si __attribute__ ((__vector_size__ (16)));
+typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef unsigned int uint32_t;
+
+typedef struct {
+ uint32_t v[4];
+} a4x32;
+
+a4x32* incr(a4x32* x)
+{
+ x->v[0] += 1;
+ return x;
+}
+
+typedef struct {
+ __m128i m;
+} a1xm128i;
+
+static inline a1xm128i ssefunc( a1xm128i in, a1xm128i k)
+{
+ a1xm128i ret;
+ ret.m = (__m128i)__builtin_ia32_pxor128 ((__v2di)in.m, (__v2di)k.m);
+ return ret;
+}
+
+static a4x32 caster( a4x32 c4x32, a1xm128i k)
+{
+ a1xm128i c1x128;
+ if( sizeof(c4x32) != sizeof(c1x128) ) __builtin_abort();
+ __builtin_memcpy(&c1x128, &c4x32, sizeof(c1x128));
+ c1x128 = ssefunc(c1x128, k);
+ __builtin_memcpy(&c4x32, &c1x128, sizeof(c4x32));
+ return c4x32;
+}
+
+typedef struct {
+ a1xm128i key;
+ a4x32 c;
+ __SIZE_TYPE__ elem;
+ a4x32 v;
+} Engine;
+
+void ctor(Engine *e)
+{
+ e->elem = 0;
+ e->key.m = (__m128i)(__v4si){ 0, 0, 0, 0 };
+ e->c.v[0] = 0;
+ e->c.v[1] = 0;
+ e->c.v[2] = 0;
+ e->c.v[3] = 0;
+}
+
+uint32_t method( Engine *e)
+{
+ if( e->elem == 0 )
+ {
+ e->v = caster(*incr(&e->c), e->key);
+ e->elem = 4;
+ }
+ return e->v.v[--e->elem];
+}
+
+int main()
+{
+ Engine e4; ctor(&e4);
+ Engine e5; ctor(&e5);
+ if(method(&e4)!=method(&e5))
+ __builtin_abort ();
+ return 0;
+}
tree prev_base = base;
tree off;
HOST_WIDE_INT base_offset;
+ unsigned HOST_WIDE_INT misalign;
+ unsigned int align;
gcc_checking_assert (offset % BITS_PER_UNIT == 0);
base = build_fold_addr_expr (unshare_expr (base));
}
+ /* If prev_base were always an originally performed access
+ we can extract more optimistic alignment information
+ by looking at the access mode. That would constrain the
+ alignment of base + base_offset which we would need to
+ adjust according to offset.
+ ??? But it is not at all clear that prev_base is an access
+ that was in the IL that way, so be conservative for now. */
+ align = get_pointer_alignment_1 (base, &misalign);
+ misalign += (double_int_sext (tree_to_double_int (off),
+ TYPE_PRECISION (TREE_TYPE (off))).low
+ * BITS_PER_UNIT);
+ misalign = misalign & (align - 1);
+ if (misalign != 0)
+ align = (misalign & -misalign);
+ if (align < TYPE_ALIGN (exp_type))
+ exp_type = build_aligned_type (exp_type, align);
+
return fold_build2_loc (loc, MEM_REF, exp_type, base, off);
}