revert: [multiple changes]
authorRichard Biener <rguenther@suse.de>
Thu, 3 Jan 2013 15:57:15 +0000 (15:57 +0000)
committerRichard Biener <rguenth@gcc.gnu.org>
Thu, 3 Jan 2013 15:57:15 +0000 (15:57 +0000)
2013-01-03  Richard Biener  <rguenther@suse.de>

        Revert
        2013-01-03  Richard Biener  <rguenther@suse.de>

        PR tree-optimization/55857
        * tree-vect-stmts.c (vectorizable_load): Do not setup
        re-alignment for invariant loads.

        2013-01-02  Richard Biener  <rguenther@suse.de>

        * tree-vect-stmts.c (vectorizable_load): When vectorizing an
        invariant load do not generate a vector load from the scalar
        location.

From-SVN: r194856

gcc/ChangeLog
gcc/tree-vect-stmts.c

index bdbbb71..5e5885e 100644 (file)
@@ -1,5 +1,20 @@
 2013-01-03  Richard Biener  <rguenther@suse.de>
 
+       Revert
+       2013-01-03  Richard Biener  <rguenther@suse.de>
+
+       PR tree-optimization/55857
+       * tree-vect-stmts.c (vectorizable_load): Do not setup
+       re-alignment for invariant loads.
+
+       2013-01-02  Richard Biener  <rguenther@suse.de>
+
+       * tree-vect-stmts.c (vectorizable_load): When vectorizing an
+       invariant load do not generate a vector load from the scalar
+       location.
+
+2013-01-03  Richard Biener  <rguenther@suse.de>
+
        * tree-vect-loop.c (vect_analyze_loop_form): Clarify reason
        for not vectorizing.
        * tree-vect-data-refs.c (vect_create_addr_base_for_vector_ref): Do
index dfbce96..1e8d7ee 100644 (file)
@@ -4927,8 +4927,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
 
   if ((alignment_support_scheme == dr_explicit_realign_optimized
        || alignment_support_scheme == dr_explicit_realign)
-      && !compute_in_loop
-      && !integer_zerop (DR_STEP (dr)))
+      && !compute_in_loop)
     {
       msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
                                    alignment_support_scheme, NULL_TREE,
@@ -4989,19 +4988,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
          /* Record the mapping between SSA_NAMEs and statements.  */
          vect_record_grouped_load_vectors (stmt, dr_chain);
        }
-      /* Handle invariant-load.  */
-      else if (inv_p && !bb_vinfo)
-       {
-         gimple_stmt_iterator gsi2 = *gsi;
-         gcc_assert (!grouped_load && !slp_perm);
-         gsi_next (&gsi2);
-         new_temp = vect_init_vector (stmt, scalar_dest,
-                                      vectype, &gsi2);
-         new_stmt = SSA_NAME_DEF_STMT (new_temp);
-         /* Store vector loads in the corresponding SLP_NODE.  */
-         if (slp)
-           SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
-       }
       else
        {
          for (i = 0; i < vec_num; i++)
@@ -5149,6 +5135,17 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
                    }
                }
 
+             /* 4. Handle invariant-load.  */
+             if (inv_p && !bb_vinfo)
+               {
+                 gimple_stmt_iterator gsi2 = *gsi;
+                 gcc_assert (!grouped_load);
+                 gsi_next (&gsi2);
+                 new_temp = vect_init_vector (stmt, scalar_dest,
+                                              vectype, &gsi2);
+                 new_stmt = SSA_NAME_DEF_STMT (new_temp);
+               }
+
              if (negative)
                {
                  tree perm_mask = perm_mask_for_reverse (vectype);