Replace some heap vectors with stack vectors.
[platform/upstream/gcc.git] / gcc / tree-vect-loop.c
1 /* Loop Vectorization
2    Copyright (C) 2003-2013 Free Software Foundation, Inc.
3    Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4    Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
31 #include "gimple.h"
32 #include "gimple-ssa.h"
33 #include "tree-phinodes.h"
34 #include "ssa-iterators.h"
35 #include "tree-ssanames.h"
36 #include "tree-ssa-loop-ivopts.h"
37 #include "tree-ssa-loop-manip.h"
38 #include "tree-ssa-loop-niter.h"
39 #include "tree-pass.h"
40 #include "cfgloop.h"
41 #include "expr.h"
42 #include "recog.h"
43 #include "optabs.h"
44 #include "params.h"
45 #include "diagnostic-core.h"
46 #include "tree-chrec.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "target.h"
50
51 /* Loop Vectorization Pass.
52
53    This pass tries to vectorize loops.
54
55    For example, the vectorizer transforms the following simple loop:
56
57         short a[N]; short b[N]; short c[N]; int i;
58
59         for (i=0; i<N; i++){
60           a[i] = b[i] + c[i];
61         }
62
63    as if it was manually vectorized by rewriting the source code into:
64
65         typedef int __attribute__((mode(V8HI))) v8hi;
66         short a[N];  short b[N]; short c[N];   int i;
67         v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
68         v8hi va, vb, vc;
69
70         for (i=0; i<N/8; i++){
71           vb = pb[i];
72           vc = pc[i];
73           va = vb + vc;
74           pa[i] = va;
75         }
76
77         The main entry to this pass is vectorize_loops(), in which
78    the vectorizer applies a set of analyses on a given set of loops,
79    followed by the actual vectorization transformation for the loops that
80    had successfully passed the analysis phase.
81         Throughout this pass we make a distinction between two types of
82    data: scalars (which are represented by SSA_NAMES), and memory references
83    ("data-refs").  These two types of data require different handling both
84    during analysis and transformation. The types of data-refs that the
85    vectorizer currently supports are ARRAY_REFS which base is an array DECL
86    (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
87    accesses are required to have a simple (consecutive) access pattern.
88
89    Analysis phase:
90    ===============
91         The driver for the analysis phase is vect_analyze_loop().
92    It applies a set of analyses, some of which rely on the scalar evolution
93    analyzer (scev) developed by Sebastian Pop.
94
95         During the analysis phase the vectorizer records some information
96    per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
97    loop, as well as general information about the loop as a whole, which is
98    recorded in a "loop_vec_info" struct attached to each loop.
99
100    Transformation phase:
101    =====================
102         The loop transformation phase scans all the stmts in the loop, and
103    creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
104    the loop that needs to be vectorized.  It inserts the vector code sequence
105    just before the scalar stmt S, and records a pointer to the vector code
106    in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
107    attached to S).  This pointer will be used for the vectorization of following
108    stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
109    otherwise, we rely on dead code elimination for removing it.
110
111         For example, say stmt S1 was vectorized into stmt VS1:
112
113    VS1: vb = px[i];
114    S1:  b = x[i];    STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
115    S2:  a = b;
116
117    To vectorize stmt S2, the vectorizer first finds the stmt that defines
118    the operand 'b' (S1), and gets the relevant vector def 'vb' from the
119    vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)).  The
120    resulting sequence would be:
121
122    VS1: vb = px[i];
123    S1:  b = x[i];       STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
124    VS2: va = vb;
125    S2:  a = b;          STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
126
127         Operands that are not SSA_NAMEs, are data-refs that appear in
128    load/store operations (like 'x[i]' in S1), and are handled differently.
129
130    Target modeling:
131    =================
132         Currently the only target specific information that is used is the
133    size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
134    Targets that can support different sizes of vectors, for now will need
135    to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".  More
136    flexibility will be added in the future.
137
138         Since we only vectorize operations which vector form can be
139    expressed using existing tree codes, to verify that an operation is
140    supported, the vectorizer checks the relevant optab at the relevant
141    machine_mode (e.g, optab_handler (add_optab, V8HImode)).  If
142    the value found is CODE_FOR_nothing, then there's no target support, and
143    we can't vectorize the stmt.
144
145    For additional information on this project see:
146    http://gcc.gnu.org/projects/tree-ssa/vectorization.html
147 */
148
149 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
150
151 /* Function vect_determine_vectorization_factor
152
153    Determine the vectorization factor (VF).  VF is the number of data elements
154    that are operated upon in parallel in a single iteration of the vectorized
155    loop.  For example, when vectorizing a loop that operates on 4byte elements,
156    on a target with vector size (VS) 16byte, the VF is set to 4, since 4
157    elements can fit in a single vector register.
158
159    We currently support vectorization of loops in which all types operated upon
160    are of the same size.  Therefore this function currently sets VF according to
161    the size of the types operated upon, and fails if there are multiple sizes
162    in the loop.
163
164    VF is also the factor by which the loop iterations are strip-mined, e.g.:
165    original loop:
166         for (i=0; i<N; i++){
167           a[i] = b[i] + c[i];
168         }
169
170    vectorized loop:
171         for (i=0; i<N; i+=VF){
172           a[i:VF] = b[i:VF] + c[i:VF];
173         }
174 */
175
176 static bool
177 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
178 {
179   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
180   basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
181   int nbbs = loop->num_nodes;
182   gimple_stmt_iterator si;
183   unsigned int vectorization_factor = 0;
184   tree scalar_type;
185   gimple phi;
186   tree vectype;
187   unsigned int nunits;
188   stmt_vec_info stmt_info;
189   int i;
190   HOST_WIDE_INT dummy;
191   gimple stmt, pattern_stmt = NULL;
192   gimple_seq pattern_def_seq = NULL;
193   gimple_stmt_iterator pattern_def_si = gsi_none ();
194   bool analyze_pattern_stmt = false;
195
196   if (dump_enabled_p ())
197     dump_printf_loc (MSG_NOTE, vect_location,
198                      "=== vect_determine_vectorization_factor ===\n");
199
200   for (i = 0; i < nbbs; i++)
201     {
202       basic_block bb = bbs[i];
203
204       for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
205         {
206           phi = gsi_stmt (si);
207           stmt_info = vinfo_for_stmt (phi);
208           if (dump_enabled_p ())
209             {
210               dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
211               dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
212               dump_printf (MSG_NOTE, "\n");
213             }
214
215           gcc_assert (stmt_info);
216
217           if (STMT_VINFO_RELEVANT_P (stmt_info))
218             {
219               gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
220               scalar_type = TREE_TYPE (PHI_RESULT (phi));
221
222               if (dump_enabled_p ())
223                 {
224                   dump_printf_loc (MSG_NOTE, vect_location,
225                                    "get vectype for scalar type:  ");
226                   dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
227                   dump_printf (MSG_NOTE, "\n");
228                 }
229
230               vectype = get_vectype_for_scalar_type (scalar_type);
231               if (!vectype)
232                 {
233                   if (dump_enabled_p ())
234                     {
235                       dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
236                                        "not vectorized: unsupported "
237                                        "data-type ");
238                       dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
239                                          scalar_type);
240                       dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
241                     }
242                   return false;
243                 }
244               STMT_VINFO_VECTYPE (stmt_info) = vectype;
245
246               if (dump_enabled_p ())
247                 {
248                   dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
249                   dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
250                   dump_printf (MSG_NOTE, "\n");
251                 }
252
253               nunits = TYPE_VECTOR_SUBPARTS (vectype);
254               if (dump_enabled_p ())
255                 dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n",
256                                  nunits);
257
258               if (!vectorization_factor
259                   || (nunits > vectorization_factor))
260                 vectorization_factor = nunits;
261             }
262         }
263
264       for (si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;)
265         {
266           tree vf_vectype;
267
268           if (analyze_pattern_stmt)
269             stmt = pattern_stmt;
270           else
271             stmt = gsi_stmt (si);
272
273           stmt_info = vinfo_for_stmt (stmt);
274
275           if (dump_enabled_p ())
276             {
277               dump_printf_loc (MSG_NOTE, vect_location,
278                                "==> examining statement: ");
279               dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
280               dump_printf (MSG_NOTE, "\n");
281             }
282
283           gcc_assert (stmt_info);
284
285           /* Skip stmts which do not need to be vectorized.  */
286           if ((!STMT_VINFO_RELEVANT_P (stmt_info)
287                && !STMT_VINFO_LIVE_P (stmt_info))
288               || gimple_clobber_p (stmt))
289             {
290               if (STMT_VINFO_IN_PATTERN_P (stmt_info)
291                   && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
292                   && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
293                       || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
294                 {
295                   stmt = pattern_stmt;
296                   stmt_info = vinfo_for_stmt (pattern_stmt);
297                   if (dump_enabled_p ())
298                     {
299                       dump_printf_loc (MSG_NOTE, vect_location,
300                                        "==> examining pattern statement: ");
301                       dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
302                       dump_printf (MSG_NOTE, "\n");
303                     }
304                 }
305               else
306                 {
307                   if (dump_enabled_p ())
308                     dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
309                   gsi_next (&si);
310                   continue;
311                 }
312             }
313           else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
314                    && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
315                    && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
316                        || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
317             analyze_pattern_stmt = true;
318
319           /* If a pattern statement has def stmts, analyze them too.  */
320           if (is_pattern_stmt_p (stmt_info))
321             {
322               if (pattern_def_seq == NULL)
323                 {
324                   pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
325                   pattern_def_si = gsi_start (pattern_def_seq);
326                 }
327               else if (!gsi_end_p (pattern_def_si))
328                 gsi_next (&pattern_def_si);
329               if (pattern_def_seq != NULL)
330                 {
331                   gimple pattern_def_stmt = NULL;
332                   stmt_vec_info pattern_def_stmt_info = NULL;
333
334                   while (!gsi_end_p (pattern_def_si))
335                     {
336                       pattern_def_stmt = gsi_stmt (pattern_def_si);
337                       pattern_def_stmt_info
338                         = vinfo_for_stmt (pattern_def_stmt);
339                       if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
340                           || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
341                         break;
342                       gsi_next (&pattern_def_si);
343                     }
344
345                   if (!gsi_end_p (pattern_def_si))
346                     {
347                       if (dump_enabled_p ())
348                         {
349                           dump_printf_loc (MSG_NOTE, vect_location,
350                                            "==> examining pattern def stmt: ");
351                           dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
352                                             pattern_def_stmt, 0);
353                           dump_printf (MSG_NOTE, "\n");
354                         }
355
356                       stmt = pattern_def_stmt;
357                       stmt_info = pattern_def_stmt_info;
358                     }
359                   else
360                     {
361                       pattern_def_si = gsi_none ();
362                       analyze_pattern_stmt = false;
363                     }
364                 }
365               else
366                 analyze_pattern_stmt = false;
367             }
368
369           if (gimple_get_lhs (stmt) == NULL_TREE)
370             {
371               if (dump_enabled_p ())
372                 {
373                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
374                                    "not vectorized: irregular stmt.");
375                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION,  TDF_SLIM, stmt,
376                                     0);
377                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
378                 }
379               return false;
380             }
381
382           if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
383             {
384               if (dump_enabled_p ())
385                 {
386                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
387                                    "not vectorized: vector stmt in loop:");
388                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
389                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
390                 }
391               return false;
392             }
393
394           if (STMT_VINFO_VECTYPE (stmt_info))
395             {
396               /* The only case when a vectype had been already set is for stmts
397                  that contain a dataref, or for "pattern-stmts" (stmts
398                  generated by the vectorizer to represent/replace a certain
399                  idiom).  */
400               gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
401                           || is_pattern_stmt_p (stmt_info)
402                           || !gsi_end_p (pattern_def_si));
403               vectype = STMT_VINFO_VECTYPE (stmt_info);
404             }
405           else
406             {
407               gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
408               scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
409               if (dump_enabled_p ())
410                 {
411                   dump_printf_loc (MSG_NOTE, vect_location,
412                                    "get vectype for scalar type:  ");
413                   dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
414                   dump_printf (MSG_NOTE, "\n");
415                 }
416               vectype = get_vectype_for_scalar_type (scalar_type);
417               if (!vectype)
418                 {
419                   if (dump_enabled_p ())
420                     {
421                       dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
422                                        "not vectorized: unsupported "
423                                        "data-type ");
424                       dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
425                                          scalar_type);
426                       dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
427                     }
428                   return false;
429                 }
430
431               STMT_VINFO_VECTYPE (stmt_info) = vectype;
432
433               if (dump_enabled_p ())
434                 {
435                   dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
436                   dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
437                   dump_printf (MSG_NOTE, "\n");
438                 }
439             }
440
441           /* The vectorization factor is according to the smallest
442              scalar type (or the largest vector size, but we only
443              support one vector size per loop).  */
444           scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
445                                                        &dummy);
446           if (dump_enabled_p ())
447             {
448               dump_printf_loc (MSG_NOTE, vect_location,
449                                "get vectype for scalar type:  ");
450               dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
451               dump_printf (MSG_NOTE, "\n");
452             }
453           vf_vectype = get_vectype_for_scalar_type (scalar_type);
454           if (!vf_vectype)
455             {
456               if (dump_enabled_p ())
457                 {
458                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
459                                    "not vectorized: unsupported data-type ");
460                   dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
461                                      scalar_type);
462                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
463                 }
464               return false;
465             }
466
467           if ((GET_MODE_SIZE (TYPE_MODE (vectype))
468                != GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
469             {
470               if (dump_enabled_p ())
471                 {
472                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
473                                    "not vectorized: different sized vector "
474                                    "types in statement, ");
475                   dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
476                                      vectype);
477                   dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
478                   dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
479                                      vf_vectype);
480                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
481                 }
482               return false;
483             }
484
485           if (dump_enabled_p ())
486             {
487               dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
488               dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
489               dump_printf (MSG_NOTE, "\n");
490             }
491
492           nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
493           if (dump_enabled_p ())
494             dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits);
495           if (!vectorization_factor
496               || (nunits > vectorization_factor))
497             vectorization_factor = nunits;
498
499           if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
500             {
501               pattern_def_seq = NULL;
502               gsi_next (&si);
503             }
504         }
505     }
506
507   /* TODO: Analyze cost. Decide if worth while to vectorize.  */
508   if (dump_enabled_p ())
509     dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n",
510                      vectorization_factor);
511   if (vectorization_factor <= 1)
512     {
513       if (dump_enabled_p ())
514         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
515                          "not vectorized: unsupported data-type\n");
516       return false;
517     }
518   LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
519
520   return true;
521 }
522
523
524 /* Function vect_is_simple_iv_evolution.
525
526    FORNOW: A simple evolution of an induction variables in the loop is
527    considered a polynomial evolution.  */
528
529 static bool
530 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
531                              tree * step)
532 {
533   tree init_expr;
534   tree step_expr;
535   tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
536   basic_block bb;
537
538   /* When there is no evolution in this loop, the evolution function
539      is not "simple".  */
540   if (evolution_part == NULL_TREE)
541     return false;
542
543   /* When the evolution is a polynomial of degree >= 2
544      the evolution function is not "simple".  */
545   if (tree_is_chrec (evolution_part))
546     return false;
547
548   step_expr = evolution_part;
549   init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
550
551   if (dump_enabled_p ())
552     {
553       dump_printf_loc (MSG_NOTE, vect_location, "step: ");
554       dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
555       dump_printf (MSG_NOTE, ",  init: ");
556       dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
557       dump_printf (MSG_NOTE, "\n");
558     }
559
560   *init = init_expr;
561   *step = step_expr;
562
563   if (TREE_CODE (step_expr) != INTEGER_CST
564       && (TREE_CODE (step_expr) != SSA_NAME
565           || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
566               && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
567           || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
568               && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
569                   || !flag_associative_math)))
570       && (TREE_CODE (step_expr) != REAL_CST
571           || !flag_associative_math))
572     {
573       if (dump_enabled_p ())
574         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
575                          "step unknown.\n");
576       return false;
577     }
578
579   return true;
580 }
581
582 /* Function vect_analyze_scalar_cycles_1.
583
584    Examine the cross iteration def-use cycles of scalar variables
585    in LOOP.  LOOP_VINFO represents the loop that is now being
586    considered for vectorization (can be LOOP, or an outer-loop
587    enclosing LOOP).  */
588
589 static void
590 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
591 {
592   basic_block bb = loop->header;
593   tree init, step;
594   stack_vec<gimple, 64> worklist;
595   gimple_stmt_iterator gsi;
596   bool double_reduc;
597
598   if (dump_enabled_p ())
599     dump_printf_loc (MSG_NOTE, vect_location,
600                      "=== vect_analyze_scalar_cycles ===\n");
601
602   /* First - identify all inductions.  Reduction detection assumes that all the
603      inductions have been identified, therefore, this order must not be
604      changed.  */
605   for (gsi = gsi_start_phis  (bb); !gsi_end_p (gsi); gsi_next (&gsi))
606     {
607       gimple phi = gsi_stmt (gsi);
608       tree access_fn = NULL;
609       tree def = PHI_RESULT (phi);
610       stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
611
612       if (dump_enabled_p ())
613         {
614           dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
615           dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
616           dump_printf (MSG_NOTE, "\n");
617         }
618
619       /* Skip virtual phi's.  The data dependences that are associated with
620          virtual defs/uses (i.e., memory accesses) are analyzed elsewhere.  */
621       if (virtual_operand_p (def))
622         continue;
623
624       STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
625
626       /* Analyze the evolution function.  */
627       access_fn = analyze_scalar_evolution (loop, def);
628       if (access_fn)
629         {
630           STRIP_NOPS (access_fn);
631           if (dump_enabled_p ())
632             {
633               dump_printf_loc (MSG_NOTE, vect_location,
634                                "Access function of PHI: ");
635               dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
636               dump_printf (MSG_NOTE, "\n");
637             }
638           STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
639             = evolution_part_in_loop_num (access_fn, loop->num);
640         }
641
642       if (!access_fn
643           || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
644           || (LOOP_VINFO_LOOP (loop_vinfo) != loop
645               && TREE_CODE (step) != INTEGER_CST))
646         {
647           worklist.safe_push (phi);
648           continue;
649         }
650
651       gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
652
653       if (dump_enabled_p ())
654         dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
655       STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
656     }
657
658
659   /* Second - identify all reductions and nested cycles.  */
660   while (worklist.length () > 0)
661     {
662       gimple phi = worklist.pop ();
663       tree def = PHI_RESULT (phi);
664       stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
665       gimple reduc_stmt;
666       bool nested_cycle;
667
668       if (dump_enabled_p ())
669         {
670           dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
671           dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
672           dump_printf (MSG_NOTE, "\n");
673         }
674
675       gcc_assert (!virtual_operand_p (def)
676                   && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
677
678       nested_cycle = (loop != LOOP_VINFO_LOOP (loop_vinfo));
679       reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, !nested_cycle,
680                                                 &double_reduc);
681       if (reduc_stmt)
682         {
683           if (double_reduc)
684             {
685               if (dump_enabled_p ())
686                 dump_printf_loc (MSG_NOTE, vect_location,
687                                  "Detected double reduction.\n");
688
689               STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
690               STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
691                                                     vect_double_reduction_def;
692             }
693           else
694             {
695               if (nested_cycle)
696                 {
697                   if (dump_enabled_p ())
698                     dump_printf_loc (MSG_NOTE, vect_location,
699                                      "Detected vectorizable nested cycle.\n");
700
701                   STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
702                   STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
703                                                              vect_nested_cycle;
704                 }
705               else
706                 {
707                   if (dump_enabled_p ())
708                     dump_printf_loc (MSG_NOTE, vect_location,
709                                      "Detected reduction.\n");
710
711                   STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
712                   STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
713                                                            vect_reduction_def;
714                   /* Store the reduction cycles for possible vectorization in
715                      loop-aware SLP.  */
716                   LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
717                 }
718             }
719         }
720       else
721         if (dump_enabled_p ())
722           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
723                            "Unknown def-use cycle pattern.\n");
724     }
725 }
726
727
728 /* Function vect_analyze_scalar_cycles.
729
730    Examine the cross iteration def-use cycles of scalar variables, by
731    analyzing the loop-header PHIs of scalar variables.  Classify each
732    cycle as one of the following: invariant, induction, reduction, unknown.
733    We do that for the loop represented by LOOP_VINFO, and also to its
734    inner-loop, if exists.
735    Examples for scalar cycles:
736
737    Example1: reduction:
738
739               loop1:
740               for (i=0; i<N; i++)
741                  sum += a[i];
742
743    Example2: induction:
744
745               loop2:
746               for (i=0; i<N; i++)
747                  a[i] = i;  */
748
749 static void
750 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
751 {
752   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
753
754   vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
755
756   /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
757      Reductions in such inner-loop therefore have different properties than
758      the reductions in the nest that gets vectorized:
759      1. When vectorized, they are executed in the same order as in the original
760         scalar loop, so we can't change the order of computation when
761         vectorizing them.
762      2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
763         current checks are too strict.  */
764
765   if (loop->inner)
766     vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
767 }
768
769 /* Function vect_get_loop_niters.
770
771    Determine how many iterations the loop is executed.
772    If an expression that represents the number of iterations
773    can be constructed, place it in NUMBER_OF_ITERATIONS.
774    Return the loop exit condition.  */
775
776 static gimple
777 vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
778 {
779   tree niters;
780
781   if (dump_enabled_p ())
782     dump_printf_loc (MSG_NOTE, vect_location,
783                      "=== get_loop_niters ===\n");
784   niters = number_of_exit_cond_executions (loop);
785
786   if (niters != NULL_TREE
787       && niters != chrec_dont_know)
788     {
789       *number_of_iterations = niters;
790
791       if (dump_enabled_p ())
792         {
793           dump_printf_loc (MSG_NOTE, vect_location, "==> get_loop_niters:");
794           dump_generic_expr (MSG_NOTE, TDF_SLIM, *number_of_iterations);
795           dump_printf (MSG_NOTE, "\n");
796         }
797     }
798
799   return get_loop_exit_condition (loop);
800 }
801
802
803 /* Function bb_in_loop_p
804
805    Used as predicate for dfs order traversal of the loop bbs.  */
806
807 static bool
808 bb_in_loop_p (const_basic_block bb, const void *data)
809 {
810   const struct loop *const loop = (const struct loop *)data;
811   if (flow_bb_inside_loop_p (loop, bb))
812     return true;
813   return false;
814 }
815
816
817 /* Function new_loop_vec_info.
818
819    Create and initialize a new loop_vec_info struct for LOOP, as well as
820    stmt_vec_info structs for all the stmts in LOOP.  */
821
822 static loop_vec_info
823 new_loop_vec_info (struct loop *loop)
824 {
825   loop_vec_info res;
826   basic_block *bbs;
827   gimple_stmt_iterator si;
828   unsigned int i, nbbs;
829
830   res = (loop_vec_info) xcalloc (1, sizeof (struct _loop_vec_info));
831   LOOP_VINFO_LOOP (res) = loop;
832
833   bbs = get_loop_body (loop);
834
835   /* Create/Update stmt_info for all stmts in the loop.  */
836   for (i = 0; i < loop->num_nodes; i++)
837     {
838       basic_block bb = bbs[i];
839
840       /* BBs in a nested inner-loop will have been already processed (because
841          we will have called vect_analyze_loop_form for any nested inner-loop).
842          Therefore, for stmts in an inner-loop we just want to update the
843          STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
844          loop_info of the outer-loop we are currently considering to vectorize
845          (instead of the loop_info of the inner-loop).
846          For stmts in other BBs we need to create a stmt_info from scratch.  */
847       if (bb->loop_father != loop)
848         {
849           /* Inner-loop bb.  */
850           gcc_assert (loop->inner && bb->loop_father == loop->inner);
851           for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
852             {
853               gimple phi = gsi_stmt (si);
854               stmt_vec_info stmt_info = vinfo_for_stmt (phi);
855               loop_vec_info inner_loop_vinfo =
856                 STMT_VINFO_LOOP_VINFO (stmt_info);
857               gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
858               STMT_VINFO_LOOP_VINFO (stmt_info) = res;
859             }
860           for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
861            {
862               gimple stmt = gsi_stmt (si);
863               stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
864               loop_vec_info inner_loop_vinfo =
865                  STMT_VINFO_LOOP_VINFO (stmt_info);
866               gcc_assert (loop->inner == LOOP_VINFO_LOOP (inner_loop_vinfo));
867               STMT_VINFO_LOOP_VINFO (stmt_info) = res;
868            }
869         }
870       else
871         {
872           /* bb in current nest.  */
873           for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
874             {
875               gimple phi = gsi_stmt (si);
876               gimple_set_uid (phi, 0);
877               set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, res, NULL));
878             }
879
880           for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
881             {
882               gimple stmt = gsi_stmt (si);
883               gimple_set_uid (stmt, 0);
884               set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res, NULL));
885             }
886         }
887     }
888
889   /* CHECKME: We want to visit all BBs before their successors (except for
890      latch blocks, for which this assertion wouldn't hold).  In the simple
891      case of the loop forms we allow, a dfs order of the BBs would the same
892      as reversed postorder traversal, so we are safe.  */
893
894    free (bbs);
895    bbs = XCNEWVEC (basic_block, loop->num_nodes);
896    nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
897                               bbs, loop->num_nodes, loop);
898    gcc_assert (nbbs == loop->num_nodes);
899
900   LOOP_VINFO_BBS (res) = bbs;
901   LOOP_VINFO_NITERS (res) = NULL;
902   LOOP_VINFO_NITERS_UNCHANGED (res) = NULL;
903   LOOP_VINFO_COST_MODEL_MIN_ITERS (res) = 0;
904   LOOP_VINFO_VECTORIZABLE_P (res) = 0;
905   LOOP_PEELING_FOR_ALIGNMENT (res) = 0;
906   LOOP_VINFO_VECT_FACTOR (res) = 0;
907   LOOP_VINFO_LOOP_NEST (res).create (3);
908   LOOP_VINFO_DATAREFS (res).create (10);
909   LOOP_VINFO_DDRS (res).create (10 * 10);
910   LOOP_VINFO_UNALIGNED_DR (res) = NULL;
911   LOOP_VINFO_MAY_MISALIGN_STMTS (res).create (
912              PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS));
913   LOOP_VINFO_MAY_ALIAS_DDRS (res).create (
914              PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS));
915   LOOP_VINFO_GROUPED_STORES (res).create (10);
916   LOOP_VINFO_REDUCTIONS (res).create (10);
917   LOOP_VINFO_REDUCTION_CHAINS (res).create (10);
918   LOOP_VINFO_SLP_INSTANCES (res).create (10);
919   LOOP_VINFO_SLP_UNROLLING_FACTOR (res) = 1;
920   LOOP_VINFO_TARGET_COST_DATA (res) = init_cost (loop);
921   LOOP_VINFO_PEELING_FOR_GAPS (res) = false;
922   LOOP_VINFO_OPERANDS_SWAPPED (res) = false;
923
924   return res;
925 }
926
927
928 /* Function destroy_loop_vec_info.
929
930    Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
931    stmts in the loop.  */
932
933 void
934 destroy_loop_vec_info (loop_vec_info loop_vinfo, bool clean_stmts)
935 {
936   struct loop *loop;
937   basic_block *bbs;
938   int nbbs;
939   gimple_stmt_iterator si;
940   int j;
941   vec<slp_instance> slp_instances;
942   slp_instance instance;
943   bool swapped;
944
945   if (!loop_vinfo)
946     return;
947
948   loop = LOOP_VINFO_LOOP (loop_vinfo);
949
950   bbs = LOOP_VINFO_BBS (loop_vinfo);
951   nbbs = clean_stmts ? loop->num_nodes : 0;
952   swapped = LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo);
953
954   for (j = 0; j < nbbs; j++)
955     {
956       basic_block bb = bbs[j];
957       for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
958         free_stmt_vec_info (gsi_stmt (si));
959
960       for (si = gsi_start_bb (bb); !gsi_end_p (si); )
961         {
962           gimple stmt = gsi_stmt (si);
963
964           /* We may have broken canonical form by moving a constant
965              into RHS1 of a commutative op.  Fix such occurrences.  */
966           if (swapped && is_gimple_assign (stmt))
967             {
968               enum tree_code code = gimple_assign_rhs_code (stmt);
969
970               if ((code == PLUS_EXPR
971                    || code == POINTER_PLUS_EXPR
972                    || code == MULT_EXPR)
973                   && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
974                 swap_ssa_operands (stmt,
975                                    gimple_assign_rhs1_ptr (stmt),
976                                    gimple_assign_rhs2_ptr (stmt));
977             }
978
979           /* Free stmt_vec_info.  */
980           free_stmt_vec_info (stmt);
981           gsi_next (&si);
982         }
983     }
984
985   free (LOOP_VINFO_BBS (loop_vinfo));
986   vect_destroy_datarefs (loop_vinfo, NULL);
987   free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo));
988   LOOP_VINFO_LOOP_NEST (loop_vinfo).release ();
989   LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).release ();
990   LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).release ();
991   slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
992   FOR_EACH_VEC_ELT (slp_instances, j, instance)
993     vect_free_slp_instance (instance);
994
995   LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
996   LOOP_VINFO_GROUPED_STORES (loop_vinfo).release ();
997   LOOP_VINFO_REDUCTIONS (loop_vinfo).release ();
998   LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release ();
999
1000   if (LOOP_VINFO_PEELING_HTAB (loop_vinfo).is_created ())
1001     LOOP_VINFO_PEELING_HTAB (loop_vinfo).dispose ();
1002
1003   destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
1004
1005   free (loop_vinfo);
1006   loop->aux = NULL;
1007 }
1008
1009
1010 /* Function vect_analyze_loop_1.
1011
1012    Apply a set of analyses on LOOP, and create a loop_vec_info struct
1013    for it. The different analyses will record information in the
1014    loop_vec_info struct.  This is a subset of the analyses applied in
1015    vect_analyze_loop, to be applied on an inner-loop nested in the loop
1016    that is now considered for (outer-loop) vectorization.  */
1017
1018 static loop_vec_info
1019 vect_analyze_loop_1 (struct loop *loop)
1020 {
1021   loop_vec_info loop_vinfo;
1022
1023   if (dump_enabled_p ())
1024     dump_printf_loc (MSG_NOTE, vect_location,
1025                      "===== analyze_loop_nest_1 =====\n");
1026
1027   /* Check the CFG characteristics of the loop (nesting, entry/exit, etc.  */
1028
1029   loop_vinfo = vect_analyze_loop_form (loop);
1030   if (!loop_vinfo)
1031     {
1032       if (dump_enabled_p ())
1033         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1034                          "bad inner-loop form.\n");
1035       return NULL;
1036     }
1037
1038   return loop_vinfo;
1039 }
1040
1041
1042 /* Function vect_analyze_loop_form.
1043
1044    Verify that certain CFG restrictions hold, including:
1045    - the loop has a pre-header
1046    - the loop has a single entry and exit
1047    - the loop exit condition is simple enough, and the number of iterations
1048      can be analyzed (a countable loop).  */
1049
1050 loop_vec_info
1051 vect_analyze_loop_form (struct loop *loop)
1052 {
1053   loop_vec_info loop_vinfo;
1054   gimple loop_cond;
1055   tree number_of_iterations = NULL;
1056   loop_vec_info inner_loop_vinfo = NULL;
1057
1058   if (dump_enabled_p ())
1059     dump_printf_loc (MSG_NOTE, vect_location,
1060                      "=== vect_analyze_loop_form ===\n");
1061
1062   /* Different restrictions apply when we are considering an inner-most loop,
1063      vs. an outer (nested) loop.
1064      (FORNOW. May want to relax some of these restrictions in the future).  */
1065
1066   if (!loop->inner)
1067     {
1068       /* Inner-most loop.  We currently require that the number of BBs is
1069          exactly 2 (the header and latch).  Vectorizable inner-most loops
1070          look like this:
1071
1072                         (pre-header)
1073                            |
1074                           header <--------+
1075                            | |            |
1076                            | +--> latch --+
1077                            |
1078                         (exit-bb)  */
1079
1080       if (loop->num_nodes != 2)
1081         {
1082           if (dump_enabled_p ())
1083             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1084                              "not vectorized: control flow in loop.\n");
1085           return NULL;
1086         }
1087
1088       if (empty_block_p (loop->header))
1089     {
1090           if (dump_enabled_p ())
1091             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1092                              "not vectorized: empty loop.\n");
1093       return NULL;
1094     }
1095     }
1096   else
1097     {
1098       struct loop *innerloop = loop->inner;
1099       edge entryedge;
1100
1101       /* Nested loop. We currently require that the loop is doubly-nested,
1102          contains a single inner loop, and the number of BBs is exactly 5.
1103          Vectorizable outer-loops look like this:
1104
1105                         (pre-header)
1106                            |
1107                           header <---+
1108                            |         |
1109                           inner-loop |
1110                            |         |
1111                           tail ------+
1112                            |
1113                         (exit-bb)
1114
1115          The inner-loop has the properties expected of inner-most loops
1116          as described above.  */
1117
1118       if ((loop->inner)->inner || (loop->inner)->next)
1119         {
1120           if (dump_enabled_p ())
1121             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1122                              "not vectorized: multiple nested loops.\n");
1123           return NULL;
1124         }
1125
1126       /* Analyze the inner-loop.  */
1127       inner_loop_vinfo = vect_analyze_loop_1 (loop->inner);
1128       if (!inner_loop_vinfo)
1129         {
1130           if (dump_enabled_p ())
1131             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1132                              "not vectorized: Bad inner loop.\n");
1133           return NULL;
1134         }
1135
1136       if (!expr_invariant_in_loop_p (loop,
1137                                         LOOP_VINFO_NITERS (inner_loop_vinfo)))
1138         {
1139           if (dump_enabled_p ())
1140             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1141                              "not vectorized: inner-loop count not"
1142                              " invariant.\n");
1143           destroy_loop_vec_info (inner_loop_vinfo, true);
1144           return NULL;
1145         }
1146
1147       if (loop->num_nodes != 5)
1148         {
1149           if (dump_enabled_p ())
1150             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1151                              "not vectorized: control flow in loop.\n");
1152           destroy_loop_vec_info (inner_loop_vinfo, true);
1153           return NULL;
1154         }
1155
1156       gcc_assert (EDGE_COUNT (innerloop->header->preds) == 2);
1157       entryedge = EDGE_PRED (innerloop->header, 0);
1158       if (EDGE_PRED (innerloop->header, 0)->src == innerloop->latch)
1159         entryedge = EDGE_PRED (innerloop->header, 1);
1160
1161       if (entryedge->src != loop->header
1162           || !single_exit (innerloop)
1163           || single_exit (innerloop)->dest !=  EDGE_PRED (loop->latch, 0)->src)
1164         {
1165           if (dump_enabled_p ())
1166             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1167                              "not vectorized: unsupported outerloop form.\n");
1168           destroy_loop_vec_info (inner_loop_vinfo, true);
1169           return NULL;
1170         }
1171
1172       if (dump_enabled_p ())
1173         dump_printf_loc (MSG_NOTE, vect_location,
1174                          "Considering outer-loop vectorization.\n");
1175     }
1176
1177   if (!single_exit (loop)
1178       || EDGE_COUNT (loop->header->preds) != 2)
1179     {
1180       if (dump_enabled_p ())
1181         {
1182           if (!single_exit (loop))
1183             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1184                              "not vectorized: multiple exits.\n");
1185           else if (EDGE_COUNT (loop->header->preds) != 2)
1186             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1187                              "not vectorized: too many incoming edges.\n");
1188         }
1189       if (inner_loop_vinfo)
1190         destroy_loop_vec_info (inner_loop_vinfo, true);
1191       return NULL;
1192     }
1193
1194   /* We assume that the loop exit condition is at the end of the loop. i.e,
1195      that the loop is represented as a do-while (with a proper if-guard
1196      before the loop if needed), where the loop header contains all the
1197      executable statements, and the latch is empty.  */
1198   if (!empty_block_p (loop->latch)
1199       || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1200     {
1201       if (dump_enabled_p ())
1202         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1203                          "not vectorized: latch block not empty.\n");
1204       if (inner_loop_vinfo)
1205         destroy_loop_vec_info (inner_loop_vinfo, true);
1206       return NULL;
1207     }
1208
1209   /* Make sure there exists a single-predecessor exit bb:  */
1210   if (!single_pred_p (single_exit (loop)->dest))
1211     {
1212       edge e = single_exit (loop);
1213       if (!(e->flags & EDGE_ABNORMAL))
1214         {
1215           split_loop_exit_edge (e);
1216           if (dump_enabled_p ())
1217             dump_printf (MSG_NOTE, "split exit edge.\n");
1218         }
1219       else
1220         {
1221           if (dump_enabled_p ())
1222             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1223                              "not vectorized: abnormal loop exit edge.\n");
1224           if (inner_loop_vinfo)
1225             destroy_loop_vec_info (inner_loop_vinfo, true);
1226           return NULL;
1227         }
1228     }
1229
1230   loop_cond = vect_get_loop_niters (loop, &number_of_iterations);
1231   if (!loop_cond)
1232     {
1233       if (dump_enabled_p ())
1234         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1235                          "not vectorized: complicated exit condition.\n");
1236       if (inner_loop_vinfo)
1237         destroy_loop_vec_info (inner_loop_vinfo, true);
1238       return NULL;
1239     }
1240
1241   if (!number_of_iterations)
1242     {
1243       if (dump_enabled_p ())
1244         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1245                          "not vectorized: number of iterations cannot be "
1246                          "computed.\n");
1247       if (inner_loop_vinfo)
1248         destroy_loop_vec_info (inner_loop_vinfo, true);
1249       return NULL;
1250     }
1251
1252   if (chrec_contains_undetermined (number_of_iterations))
1253     {
1254       if (dump_enabled_p ())
1255             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1256                              "Infinite number of iterations.\n");
1257       if (inner_loop_vinfo)
1258         destroy_loop_vec_info (inner_loop_vinfo, true);
1259       return NULL;
1260     }
1261
1262   if (!NITERS_KNOWN_P (number_of_iterations))
1263     {
1264       if (dump_enabled_p ())
1265         {
1266           dump_printf_loc (MSG_NOTE, vect_location,
1267                            "Symbolic number of iterations is ");
1268           dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1269           dump_printf (MSG_NOTE, "\n");
1270         }
1271     }
1272   else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
1273     {
1274       if (dump_enabled_p ())
1275         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1276                          "not vectorized: number of iterations = 0.\n");
1277       if (inner_loop_vinfo)
1278         destroy_loop_vec_info (inner_loop_vinfo, true);
1279       return NULL;
1280     }
1281
1282   loop_vinfo = new_loop_vec_info (loop);
1283   LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1284   LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1285
1286   STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
1287
1288   /* CHECKME: May want to keep it around it in the future.  */
1289   if (inner_loop_vinfo)
1290     destroy_loop_vec_info (inner_loop_vinfo, false);
1291
1292   gcc_assert (!loop->aux);
1293   loop->aux = loop_vinfo;
1294   return loop_vinfo;
1295 }
1296
1297
1298 /* Function vect_analyze_loop_operations.
1299
1300    Scan the loop stmts and make sure they are all vectorizable.  */
1301
1302 static bool
1303 vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
1304 {
1305   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1306   basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1307   int nbbs = loop->num_nodes;
1308   gimple_stmt_iterator si;
1309   unsigned int vectorization_factor = 0;
1310   int i;
1311   gimple phi;
1312   stmt_vec_info stmt_info;
1313   bool need_to_vectorize = false;
1314   int min_profitable_iters;
1315   int min_scalar_loop_bound;
1316   unsigned int th;
1317   bool only_slp_in_loop = true, ok;
1318   HOST_WIDE_INT max_niter;
1319   HOST_WIDE_INT estimated_niter;
1320   int min_profitable_estimate;
1321
1322   if (dump_enabled_p ())
1323     dump_printf_loc (MSG_NOTE, vect_location,
1324                      "=== vect_analyze_loop_operations ===\n");
1325
1326   gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1327   vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1328   if (slp)
1329     {
1330       /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1331          vectorization factor of the loop is the unrolling factor required by
1332          the SLP instances.  If that unrolling factor is 1, we say, that we
1333          perform pure SLP on loop - cross iteration parallelism is not
1334          exploited.  */
1335       for (i = 0; i < nbbs; i++)
1336         {
1337           basic_block bb = bbs[i];
1338           for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1339             {
1340               gimple stmt = gsi_stmt (si);
1341               stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1342               gcc_assert (stmt_info);
1343               if ((STMT_VINFO_RELEVANT_P (stmt_info)
1344                    || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1345                   && !PURE_SLP_STMT (stmt_info))
1346                 /* STMT needs both SLP and loop-based vectorization.  */
1347                 only_slp_in_loop = false;
1348             }
1349         }
1350
1351       if (only_slp_in_loop)
1352         vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1353       else
1354         vectorization_factor = least_common_multiple (vectorization_factor,
1355                                 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1356
1357       LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1358       if (dump_enabled_p ())
1359         dump_printf_loc (MSG_NOTE, vect_location,
1360                          "Updating vectorization factor to %d\n",
1361                          vectorization_factor);
1362     }
1363
1364   for (i = 0; i < nbbs; i++)
1365     {
1366       basic_block bb = bbs[i];
1367
1368       for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
1369         {
1370           phi = gsi_stmt (si);
1371           ok = true;
1372
1373           stmt_info = vinfo_for_stmt (phi);
1374           if (dump_enabled_p ())
1375             {
1376               dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1377               dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1378               dump_printf (MSG_NOTE, "\n");
1379             }
1380
1381           /* Inner-loop loop-closed exit phi in outer-loop vectorization
1382              (i.e., a phi in the tail of the outer-loop).  */
1383           if (! is_loop_header_bb_p (bb))
1384             {
1385               /* FORNOW: we currently don't support the case that these phis
1386                  are not used in the outerloop (unless it is double reduction,
1387                  i.e., this phi is vect_reduction_def), cause this case
1388                  requires to actually do something here.  */
1389               if ((!STMT_VINFO_RELEVANT_P (stmt_info)
1390                    || STMT_VINFO_LIVE_P (stmt_info))
1391                   && STMT_VINFO_DEF_TYPE (stmt_info)
1392                      != vect_double_reduction_def)
1393                 {
1394                   if (dump_enabled_p ())
1395                     dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1396                                      "Unsupported loop-closed phi in "
1397                                      "outer-loop.\n");
1398                   return false;
1399                 }
1400
1401               /* If PHI is used in the outer loop, we check that its operand
1402                  is defined in the inner loop.  */
1403               if (STMT_VINFO_RELEVANT_P (stmt_info))
1404                 {
1405                   tree phi_op;
1406                   gimple op_def_stmt;
1407
1408                   if (gimple_phi_num_args (phi) != 1)
1409                     return false;
1410
1411                   phi_op = PHI_ARG_DEF (phi, 0);
1412                   if (TREE_CODE (phi_op) != SSA_NAME)
1413                     return false;
1414
1415                   op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
1416                   if (gimple_nop_p (op_def_stmt)
1417                       || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
1418                       || !vinfo_for_stmt (op_def_stmt))
1419                     return false;
1420
1421                   if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1422                         != vect_used_in_outer
1423                       && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
1424                            != vect_used_in_outer_by_reduction)
1425                     return false;
1426                 }
1427
1428               continue;
1429             }
1430
1431           gcc_assert (stmt_info);
1432
1433           if (STMT_VINFO_LIVE_P (stmt_info))
1434             {
1435               /* FORNOW: not yet supported.  */
1436               if (dump_enabled_p ())
1437                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1438                                  "not vectorized: value used after loop.\n");
1439               return false;
1440             }
1441
1442           if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1443               && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1444             {
1445               /* A scalar-dependence cycle that we don't support.  */
1446               if (dump_enabled_p ())
1447                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1448                                  "not vectorized: scalar dependence cycle.\n");
1449               return false;
1450             }
1451
1452           if (STMT_VINFO_RELEVANT_P (stmt_info))
1453             {
1454               need_to_vectorize = true;
1455               if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
1456                 ok = vectorizable_induction (phi, NULL, NULL);
1457             }
1458
1459           if (!ok)
1460             {
1461               if (dump_enabled_p ())
1462                 {
1463                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1464                                    "not vectorized: relevant phi not "
1465                                    "supported: ");
1466                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1467                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1468                 }
1469               return false;
1470             }
1471         }
1472
1473       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1474         {
1475           gimple stmt = gsi_stmt (si);
1476           if (!gimple_clobber_p (stmt)
1477               && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL))
1478             return false;
1479         }
1480     } /* bbs */
1481
1482   /* All operations in the loop are either irrelevant (deal with loop
1483      control, or dead), or only used outside the loop and can be moved
1484      out of the loop (e.g. invariants, inductions).  The loop can be
1485      optimized away by scalar optimizations.  We're better off not
1486      touching this loop.  */
1487   if (!need_to_vectorize)
1488     {
1489       if (dump_enabled_p ())
1490         dump_printf_loc (MSG_NOTE, vect_location,
1491                          "All the computation can be taken out of the loop.\n");
1492       if (dump_enabled_p ())
1493         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1494                          "not vectorized: redundant loop. no profit to "
1495                          "vectorize.\n");
1496       return false;
1497     }
1498
1499   if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1500     dump_printf_loc (MSG_NOTE, vect_location,
1501                      "vectorization_factor = %d, niters = "
1502                      HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor,
1503                      LOOP_VINFO_INT_NITERS (loop_vinfo));
1504
1505   if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1506        && (LOOP_VINFO_INT_NITERS (loop_vinfo) < vectorization_factor))
1507       || ((max_niter = max_stmt_executions_int (loop)) != -1
1508           && (unsigned HOST_WIDE_INT) max_niter < vectorization_factor))
1509     {
1510       if (dump_enabled_p ())
1511         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1512                          "not vectorized: iteration count too small.\n");
1513       if (dump_enabled_p ())
1514         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1515                          "not vectorized: iteration count smaller than "
1516                          "vectorization factor.\n");
1517       return false;
1518     }
1519
1520   /* Analyze cost.  Decide if worth while to vectorize.  */
1521
1522   /* Once VF is set, SLP costs should be updated since the number of created
1523      vector stmts depends on VF.  */
1524   vect_update_slp_costs_according_to_vf (loop_vinfo);
1525
1526   vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1527                                       &min_profitable_estimate);
1528   LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo) = min_profitable_iters;
1529
1530   if (min_profitable_iters < 0)
1531     {
1532       if (dump_enabled_p ())
1533         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1534                          "not vectorized: vectorization not profitable.\n");
1535       if (dump_enabled_p ())
1536         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1537                          "not vectorized: vector version will never be "
1538                          "profitable.\n");
1539       return false;
1540     }
1541
1542   min_scalar_loop_bound = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1543                             * vectorization_factor) - 1);
1544
1545
1546   /* Use the cost model only if it is more conservative than user specified
1547      threshold.  */
1548
1549   th = (unsigned) min_scalar_loop_bound;
1550   if (min_profitable_iters
1551       && (!min_scalar_loop_bound
1552           || min_profitable_iters > min_scalar_loop_bound))
1553     th = (unsigned) min_profitable_iters;
1554
1555   if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1556       && LOOP_VINFO_INT_NITERS (loop_vinfo) <= th)
1557     {
1558       if (dump_enabled_p ())
1559         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1560                          "not vectorized: vectorization not profitable.\n");
1561       if (dump_enabled_p ())
1562         dump_printf_loc (MSG_NOTE, vect_location,
1563                          "not vectorized: iteration count smaller than user "
1564                          "specified loop bound parameter or minimum profitable "
1565                          "iterations (whichever is more conservative).\n");
1566       return false;
1567     }
1568
1569   if ((estimated_niter = estimated_stmt_executions_int (loop)) != -1
1570       && ((unsigned HOST_WIDE_INT) estimated_niter
1571           <= MAX (th, (unsigned)min_profitable_estimate)))
1572     {
1573       if (dump_enabled_p ())
1574         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1575                          "not vectorized: estimated iteration count too "
1576                          "small.\n");
1577       if (dump_enabled_p ())
1578         dump_printf_loc (MSG_NOTE, vect_location,
1579                          "not vectorized: estimated iteration count smaller "
1580                          "than specified loop bound parameter or minimum "
1581                          "profitable iterations (whichever is more "
1582                          "conservative).\n");
1583       return false;
1584     }
1585
1586   if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo)
1587       || ((int) tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
1588           < exact_log2 (vectorization_factor)))
1589     {
1590       if (dump_enabled_p ())
1591         dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required.\n");
1592       if (!vect_can_advance_ivs_p (loop_vinfo))
1593         {
1594           if (dump_enabled_p ())
1595             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1596                              "not vectorized: can't create epilog loop 1.\n");
1597           return false;
1598         }
1599       if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1600         {
1601           if (dump_enabled_p ())
1602             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1603                              "not vectorized: can't create epilog loop 2.\n");
1604           return false;
1605         }
1606     }
1607
1608   return true;
1609 }
1610
1611
1612 /* Function vect_analyze_loop_2.
1613
1614    Apply a set of analyses on LOOP, and create a loop_vec_info struct
1615    for it.  The different analyses will record information in the
1616    loop_vec_info struct.  */
1617 static bool
1618 vect_analyze_loop_2 (loop_vec_info loop_vinfo)
1619 {
1620   bool ok, slp = false;
1621   int max_vf = MAX_VECTORIZATION_FACTOR;
1622   int min_vf = 2;
1623
1624   /* Find all data references in the loop (which correspond to vdefs/vuses)
1625      and analyze their evolution in the loop.  Also adjust the minimal
1626      vectorization factor according to the loads and stores.
1627
1628      FORNOW: Handle only simple, array references, which
1629      alignment can be forced, and aligned pointer-references.  */
1630
1631   ok = vect_analyze_data_refs (loop_vinfo, NULL, &min_vf);
1632   if (!ok)
1633     {
1634       if (dump_enabled_p ())
1635         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1636                          "bad data references.\n");
1637       return false;
1638     }
1639
1640   /* Analyze the access patterns of the data-refs in the loop (consecutive,
1641      complex, etc.). FORNOW: Only handle consecutive access pattern.  */
1642
1643   ok = vect_analyze_data_ref_accesses (loop_vinfo, NULL);
1644   if (!ok)
1645     {
1646       if (dump_enabled_p ())
1647         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1648                          "bad data access.\n");
1649       return false;
1650     }
1651
1652   /* Classify all cross-iteration scalar data-flow cycles.
1653      Cross-iteration cycles caused by virtual phis are analyzed separately.  */
1654
1655   vect_analyze_scalar_cycles (loop_vinfo);
1656
1657   vect_pattern_recog (loop_vinfo, NULL);
1658
1659   /* Data-flow analysis to detect stmts that do not need to be vectorized.  */
1660
1661   ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1662   if (!ok)
1663     {
1664       if (dump_enabled_p ())
1665         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1666                          "unexpected pattern.\n");
1667       return false;
1668     }
1669
1670   /* Analyze data dependences between the data-refs in the loop
1671      and adjust the maximum vectorization factor according to
1672      the dependences.
1673      FORNOW: fail at the first data dependence that we encounter.  */
1674
1675   ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1676   if (!ok
1677       || max_vf < min_vf)
1678     {
1679       if (dump_enabled_p ())
1680             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1681                              "bad data dependence.\n");
1682       return false;
1683     }
1684
1685   ok = vect_determine_vectorization_factor (loop_vinfo);
1686   if (!ok)
1687     {
1688       if (dump_enabled_p ())
1689         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1690                          "can't determine vectorization factor.\n");
1691       return false;
1692     }
1693   if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
1694     {
1695       if (dump_enabled_p ())
1696         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1697                          "bad data dependence.\n");
1698       return false;
1699     }
1700
1701   /* Analyze the alignment of the data-refs in the loop.
1702      Fail if a data reference is found that cannot be vectorized.  */
1703
1704   ok = vect_analyze_data_refs_alignment (loop_vinfo, NULL);
1705   if (!ok)
1706     {
1707       if (dump_enabled_p ())
1708         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1709                          "bad data alignment.\n");
1710       return false;
1711     }
1712
1713   /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1714      It is important to call pruning after vect_analyze_data_ref_accesses,
1715      since we use grouping information gathered by interleaving analysis.  */
1716   ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1717   if (!ok)
1718     {
1719       if (dump_enabled_p ())
1720         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1721                          "too long list of versioning for alias "
1722                          "run-time tests.\n");
1723       return false;
1724     }
1725
1726   /* This pass will decide on using loop versioning and/or loop peeling in
1727      order to enhance the alignment of data references in the loop.  */
1728
1729   ok = vect_enhance_data_refs_alignment (loop_vinfo);
1730   if (!ok)
1731     {
1732       if (dump_enabled_p ())
1733         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1734                          "bad data alignment.\n");
1735       return false;
1736     }
1737
1738   /* Check the SLP opportunities in the loop, analyze and build SLP trees.  */
1739   ok = vect_analyze_slp (loop_vinfo, NULL);
1740   if (ok)
1741     {
1742       /* Decide which possible SLP instances to SLP.  */
1743       slp = vect_make_slp_decision (loop_vinfo);
1744
1745       /* Find stmts that need to be both vectorized and SLPed.  */
1746       vect_detect_hybrid_slp (loop_vinfo);
1747     }
1748   else
1749     return false;
1750
1751   /* Scan all the operations in the loop and make sure they are
1752      vectorizable.  */
1753
1754   ok = vect_analyze_loop_operations (loop_vinfo, slp);
1755   if (!ok)
1756     {
1757       if (dump_enabled_p ())
1758         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1759                          "bad operation or unsupported loop bound.\n");
1760       return false;
1761     }
1762
1763   return true;
1764 }
1765
1766 /* Function vect_analyze_loop.
1767
1768    Apply a set of analyses on LOOP, and create a loop_vec_info struct
1769    for it.  The different analyses will record information in the
1770    loop_vec_info struct.  */
1771 loop_vec_info
1772 vect_analyze_loop (struct loop *loop)
1773 {
1774   loop_vec_info loop_vinfo;
1775   unsigned int vector_sizes;
1776
1777   /* Autodetect first vector size we try.  */
1778   current_vector_size = 0;
1779   vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
1780
1781   if (dump_enabled_p ())
1782     dump_printf_loc (MSG_NOTE, vect_location,
1783                      "===== analyze_loop_nest =====\n");
1784
1785   if (loop_outer (loop)
1786       && loop_vec_info_for_loop (loop_outer (loop))
1787       && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
1788     {
1789       if (dump_enabled_p ())
1790         dump_printf_loc (MSG_NOTE, vect_location,
1791                          "outer-loop already vectorized.\n");
1792       return NULL;
1793     }
1794
1795   while (1)
1796     {
1797       /* Check the CFG characteristics of the loop (nesting, entry/exit).  */
1798       loop_vinfo = vect_analyze_loop_form (loop);
1799       if (!loop_vinfo)
1800         {
1801           if (dump_enabled_p ())
1802             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1803                              "bad loop form.\n");
1804           return NULL;
1805         }
1806
1807       if (vect_analyze_loop_2 (loop_vinfo))
1808         {
1809           LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
1810
1811           return loop_vinfo;
1812         }
1813
1814       destroy_loop_vec_info (loop_vinfo, true);
1815
1816       vector_sizes &= ~current_vector_size;
1817       if (vector_sizes == 0
1818           || current_vector_size == 0)
1819         return NULL;
1820
1821       /* Try the next biggest vector size.  */
1822       current_vector_size = 1 << floor_log2 (vector_sizes);
1823       if (dump_enabled_p ())
1824         dump_printf_loc (MSG_NOTE, vect_location,
1825                          "***** Re-trying analysis with "
1826                          "vector size %d\n", current_vector_size);
1827     }
1828 }
1829
1830
1831 /* Function reduction_code_for_scalar_code
1832
1833    Input:
1834    CODE - tree_code of a reduction operations.
1835
1836    Output:
1837    REDUC_CODE - the corresponding tree-code to be used to reduce the
1838       vector of partial results into a single scalar result (which
1839       will also reside in a vector) or ERROR_MARK if the operation is
1840       a supported reduction operation, but does not have such tree-code.
1841
1842    Return FALSE if CODE currently cannot be vectorized as reduction.  */
1843
1844 static bool
1845 reduction_code_for_scalar_code (enum tree_code code,
1846                                 enum tree_code *reduc_code)
1847 {
1848   switch (code)
1849     {
1850       case MAX_EXPR:
1851         *reduc_code = REDUC_MAX_EXPR;
1852         return true;
1853
1854       case MIN_EXPR:
1855         *reduc_code = REDUC_MIN_EXPR;
1856         return true;
1857
1858       case PLUS_EXPR:
1859         *reduc_code = REDUC_PLUS_EXPR;
1860         return true;
1861
1862       case MULT_EXPR:
1863       case MINUS_EXPR:
1864       case BIT_IOR_EXPR:
1865       case BIT_XOR_EXPR:
1866       case BIT_AND_EXPR:
1867         *reduc_code = ERROR_MARK;
1868         return true;
1869
1870       default:
1871        return false;
1872     }
1873 }
1874
1875
1876 /* Error reporting helper for vect_is_simple_reduction below.  GIMPLE statement
1877    STMT is printed with a message MSG. */
1878
1879 static void
1880 report_vect_op (int msg_type, gimple stmt, const char *msg)
1881 {
1882   dump_printf_loc (msg_type, vect_location, "%s", msg);
1883   dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
1884   dump_printf (msg_type, "\n");
1885 }
1886
1887
1888 /* Detect SLP reduction of the form:
1889
1890    #a1 = phi <a5, a0>
1891    a2 = operation (a1)
1892    a3 = operation (a2)
1893    a4 = operation (a3)
1894    a5 = operation (a4)
1895
1896    #a = phi <a5>
1897
1898    PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1899    FIRST_STMT is the first reduction stmt in the chain
1900    (a2 = operation (a1)).
1901
1902    Return TRUE if a reduction chain was detected.  */
1903
1904 static bool
1905 vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
1906 {
1907   struct loop *loop = (gimple_bb (phi))->loop_father;
1908   struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
1909   enum tree_code code;
1910   gimple current_stmt = NULL, loop_use_stmt = NULL, first, next_stmt;
1911   stmt_vec_info use_stmt_info, current_stmt_info;
1912   tree lhs;
1913   imm_use_iterator imm_iter;
1914   use_operand_p use_p;
1915   int nloop_uses, size = 0, n_out_of_loop_uses;
1916   bool found = false;
1917
1918   if (loop != vect_loop)
1919     return false;
1920
1921   lhs = PHI_RESULT (phi);
1922   code = gimple_assign_rhs_code (first_stmt);
1923   while (1)
1924     {
1925       nloop_uses = 0;
1926       n_out_of_loop_uses = 0;
1927       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
1928         {
1929           gimple use_stmt = USE_STMT (use_p);
1930           if (is_gimple_debug (use_stmt))
1931             continue;
1932
1933           use_stmt = USE_STMT (use_p);
1934
1935           /* Check if we got back to the reduction phi.  */
1936           if (use_stmt == phi)
1937             {
1938               loop_use_stmt = use_stmt;
1939               found = true;
1940               break;
1941             }
1942
1943           if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1944             {
1945               if (vinfo_for_stmt (use_stmt)
1946                   && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
1947                 {
1948                   loop_use_stmt = use_stmt;
1949                   nloop_uses++;
1950                 }
1951             }
1952            else
1953              n_out_of_loop_uses++;
1954
1955            /* There are can be either a single use in the loop or two uses in
1956               phi nodes.  */
1957            if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
1958              return false;
1959         }
1960
1961       if (found)
1962         break;
1963
1964       /* We reached a statement with no loop uses.  */
1965       if (nloop_uses == 0)
1966         return false;
1967
1968       /* This is a loop exit phi, and we haven't reached the reduction phi.  */
1969       if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
1970         return false;
1971
1972       if (!is_gimple_assign (loop_use_stmt)
1973           || code != gimple_assign_rhs_code (loop_use_stmt)
1974           || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
1975         return false;
1976
1977       /* Insert USE_STMT into reduction chain.  */
1978       use_stmt_info = vinfo_for_stmt (loop_use_stmt);
1979       if (current_stmt)
1980         {
1981           current_stmt_info = vinfo_for_stmt (current_stmt);
1982           GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
1983           GROUP_FIRST_ELEMENT (use_stmt_info)
1984             = GROUP_FIRST_ELEMENT (current_stmt_info);
1985         }
1986       else
1987         GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
1988
1989       lhs = gimple_assign_lhs (loop_use_stmt);
1990       current_stmt = loop_use_stmt;
1991       size++;
1992    }
1993
1994   if (!found || loop_use_stmt != phi || size < 2)
1995     return false;
1996
1997   /* Swap the operands, if needed, to make the reduction operand be the second
1998      operand.  */
1999   lhs = PHI_RESULT (phi);
2000   next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2001   while (next_stmt)
2002     {
2003       if (gimple_assign_rhs2 (next_stmt) == lhs)
2004         {
2005           tree op = gimple_assign_rhs1 (next_stmt);
2006           gimple def_stmt = NULL;
2007
2008           if (TREE_CODE (op) == SSA_NAME)
2009             def_stmt = SSA_NAME_DEF_STMT (op);
2010
2011           /* Check that the other def is either defined in the loop
2012              ("vect_internal_def"), or it's an induction (defined by a
2013              loop-header phi-node).  */
2014           if (def_stmt
2015               && gimple_bb (def_stmt)
2016               && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2017               && (is_gimple_assign (def_stmt)
2018                   || is_gimple_call (def_stmt)
2019                   || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2020                            == vect_induction_def
2021                   || (gimple_code (def_stmt) == GIMPLE_PHI
2022                       && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2023                                   == vect_internal_def
2024                       && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2025             {
2026               lhs = gimple_assign_lhs (next_stmt);
2027               next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2028               continue;
2029             }
2030
2031           return false;
2032         }
2033       else
2034         {
2035           tree op = gimple_assign_rhs2 (next_stmt);
2036           gimple def_stmt = NULL;
2037
2038           if (TREE_CODE (op) == SSA_NAME)
2039             def_stmt = SSA_NAME_DEF_STMT (op);
2040
2041           /* Check that the other def is either defined in the loop
2042             ("vect_internal_def"), or it's an induction (defined by a
2043             loop-header phi-node).  */
2044           if (def_stmt
2045               && gimple_bb (def_stmt)
2046               && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2047               && (is_gimple_assign (def_stmt)
2048                   || is_gimple_call (def_stmt)
2049                   || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2050                               == vect_induction_def
2051                   || (gimple_code (def_stmt) == GIMPLE_PHI
2052                       && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
2053                                   == vect_internal_def
2054                       && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
2055             {
2056               if (dump_enabled_p ())
2057                 {
2058                   dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2059                   dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2060                   dump_printf (MSG_NOTE, "\n");
2061                 }
2062
2063               swap_ssa_operands (next_stmt,
2064                                  gimple_assign_rhs1_ptr (next_stmt),
2065                                  gimple_assign_rhs2_ptr (next_stmt));
2066               update_stmt (next_stmt);
2067
2068               if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2069                 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2070             }
2071           else
2072             return false;
2073         }
2074
2075       lhs = gimple_assign_lhs (next_stmt);
2076       next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2077     }
2078
2079   /* Save the chain for further analysis in SLP detection.  */
2080   first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
2081   LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
2082   GROUP_SIZE (vinfo_for_stmt (first)) = size;
2083
2084   return true;
2085 }
2086
2087
2088 /* Function vect_is_simple_reduction_1
2089
2090    (1) Detect a cross-iteration def-use cycle that represents a simple
2091    reduction computation.  We look for the following pattern:
2092
2093    loop_header:
2094      a1 = phi < a0, a2 >
2095      a3 = ...
2096      a2 = operation (a3, a1)
2097
2098    or
2099
2100    a3 = ...
2101    loop_header:
2102      a1 = phi < a0, a2 >
2103      a2 = operation (a3, a1)
2104
2105    such that:
2106    1. operation is commutative and associative and it is safe to
2107       change the order of the computation (if CHECK_REDUCTION is true)
2108    2. no uses for a2 in the loop (a2 is used out of the loop)
2109    3. no uses of a1 in the loop besides the reduction operation
2110    4. no uses of a1 outside the loop.
2111
2112    Conditions 1,4 are tested here.
2113    Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2114
2115    (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2116    nested cycles, if CHECK_REDUCTION is false.
2117
2118    (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2119    reductions:
2120
2121      a1 = phi < a0, a2 >
2122      inner loop (def of a3)
2123      a2 = phi < a3 >
2124
2125    If MODIFY is true it tries also to rework the code in-place to enable
2126    detection of more reduction patterns.  For the time being we rewrite
2127    "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
2128 */
2129
2130 static gimple
2131 vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
2132                             bool check_reduction, bool *double_reduc,
2133                             bool modify)
2134 {
2135   struct loop *loop = (gimple_bb (phi))->loop_father;
2136   struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2137   edge latch_e = loop_latch_edge (loop);
2138   tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2139   gimple def_stmt, def1 = NULL, def2 = NULL;
2140   enum tree_code orig_code, code;
2141   tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2142   tree type;
2143   int nloop_uses;
2144   tree name;
2145   imm_use_iterator imm_iter;
2146   use_operand_p use_p;
2147   bool phi_def;
2148
2149   *double_reduc = false;
2150
2151   /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
2152      otherwise, we assume outer loop vectorization.  */
2153   gcc_assert ((check_reduction && loop == vect_loop)
2154               || (!check_reduction && flow_loop_nested_p (vect_loop, loop)));
2155
2156   name = PHI_RESULT (phi);
2157   nloop_uses = 0;
2158   FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2159     {
2160       gimple use_stmt = USE_STMT (use_p);
2161       if (is_gimple_debug (use_stmt))
2162         continue;
2163
2164       if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2165         {
2166           if (dump_enabled_p ())
2167             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2168                              "intermediate value used outside loop.\n");
2169
2170           return NULL;
2171         }
2172
2173       if (vinfo_for_stmt (use_stmt)
2174           && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2175         nloop_uses++;
2176       if (nloop_uses > 1)
2177         {
2178           if (dump_enabled_p ())
2179             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2180                              "reduction used in loop.\n");
2181           return NULL;
2182         }
2183     }
2184
2185   if (TREE_CODE (loop_arg) != SSA_NAME)
2186     {
2187       if (dump_enabled_p ())
2188         {
2189           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2190                            "reduction: not ssa_name: ");
2191           dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2192           dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2193         }
2194       return NULL;
2195     }
2196
2197   def_stmt = SSA_NAME_DEF_STMT (loop_arg);
2198   if (!def_stmt)
2199     {
2200       if (dump_enabled_p ())
2201         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2202                          "reduction: no def_stmt.\n");
2203       return NULL;
2204     }
2205
2206   if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
2207     {
2208       if (dump_enabled_p ())
2209         {
2210           dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2211           dump_printf (MSG_NOTE, "\n");
2212         }
2213       return NULL;
2214     }
2215
2216   if (is_gimple_assign (def_stmt))
2217     {
2218       name = gimple_assign_lhs (def_stmt);
2219       phi_def = false;
2220     }
2221   else
2222     {
2223       name = PHI_RESULT (def_stmt);
2224       phi_def = true;
2225     }
2226
2227   nloop_uses = 0;
2228   FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2229     {
2230       gimple use_stmt = USE_STMT (use_p);
2231       if (is_gimple_debug (use_stmt))
2232         continue;
2233       if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2234           && vinfo_for_stmt (use_stmt)
2235           && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt)))
2236         nloop_uses++;
2237       if (nloop_uses > 1)
2238         {
2239           if (dump_enabled_p ())
2240             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2241                              "reduction used in loop.\n");
2242           return NULL;
2243         }
2244     }
2245
2246   /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2247      defined in the inner loop.  */
2248   if (phi_def)
2249     {
2250       op1 = PHI_ARG_DEF (def_stmt, 0);
2251
2252       if (gimple_phi_num_args (def_stmt) != 1
2253           || TREE_CODE (op1) != SSA_NAME)
2254         {
2255           if (dump_enabled_p ())
2256             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2257                              "unsupported phi node definition.\n");
2258
2259           return NULL;
2260         }
2261
2262       def1 = SSA_NAME_DEF_STMT (op1);
2263       if (flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
2264           && loop->inner
2265           && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
2266           && is_gimple_assign (def1))
2267         {
2268           if (dump_enabled_p ())
2269             report_vect_op (MSG_NOTE, def_stmt,
2270                             "detected double reduction: ");
2271
2272           *double_reduc = true;
2273           return def_stmt;
2274         }
2275
2276       return NULL;
2277     }
2278
2279   code = orig_code = gimple_assign_rhs_code (def_stmt);
2280
2281   /* We can handle "res -= x[i]", which is non-associative by
2282      simply rewriting this into "res += -x[i]".  Avoid changing
2283      gimple instruction for the first simple tests and only do this
2284      if we're allowed to change code at all.  */
2285   if (code == MINUS_EXPR
2286       && modify
2287       && (op1 = gimple_assign_rhs1 (def_stmt))
2288       && TREE_CODE (op1) == SSA_NAME
2289       && SSA_NAME_DEF_STMT (op1) == phi)
2290     code = PLUS_EXPR;
2291
2292   if (check_reduction
2293       && (!commutative_tree_code (code) || !associative_tree_code (code)))
2294     {
2295       if (dump_enabled_p ())
2296         report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2297                         "reduction: not commutative/associative: ");
2298       return NULL;
2299     }
2300
2301   if (get_gimple_rhs_class (code) != GIMPLE_BINARY_RHS)
2302     {
2303       if (code != COND_EXPR)
2304         {
2305           if (dump_enabled_p ())
2306             report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2307                             "reduction: not binary operation: ");
2308
2309           return NULL;
2310         }
2311
2312       op3 = gimple_assign_rhs1 (def_stmt);
2313       if (COMPARISON_CLASS_P (op3))
2314         {
2315           op4 = TREE_OPERAND (op3, 1);
2316           op3 = TREE_OPERAND (op3, 0);
2317         }
2318
2319       op1 = gimple_assign_rhs2 (def_stmt);
2320       op2 = gimple_assign_rhs3 (def_stmt);
2321
2322       if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2323         {
2324           if (dump_enabled_p ())
2325             report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2326                             "reduction: uses not ssa_names: ");
2327
2328           return NULL;
2329         }
2330     }
2331   else
2332     {
2333       op1 = gimple_assign_rhs1 (def_stmt);
2334       op2 = gimple_assign_rhs2 (def_stmt);
2335
2336       if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
2337         {
2338           if (dump_enabled_p ())
2339             report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2340                             "reduction: uses not ssa_names: ");
2341
2342           return NULL;
2343         }
2344    }
2345
2346   type = TREE_TYPE (gimple_assign_lhs (def_stmt));
2347   if ((TREE_CODE (op1) == SSA_NAME
2348        && !types_compatible_p (type,TREE_TYPE (op1)))
2349       || (TREE_CODE (op2) == SSA_NAME
2350           && !types_compatible_p (type, TREE_TYPE (op2)))
2351       || (op3 && TREE_CODE (op3) == SSA_NAME
2352           && !types_compatible_p (type, TREE_TYPE (op3)))
2353       || (op4 && TREE_CODE (op4) == SSA_NAME
2354           && !types_compatible_p (type, TREE_TYPE (op4))))
2355     {
2356       if (dump_enabled_p ())
2357         {
2358           dump_printf_loc (MSG_NOTE, vect_location,
2359                            "reduction: multiple types: operation type: ");
2360           dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
2361           dump_printf (MSG_NOTE, ", operands types: ");
2362           dump_generic_expr (MSG_NOTE, TDF_SLIM,
2363                              TREE_TYPE (op1));
2364           dump_printf (MSG_NOTE, ",");
2365           dump_generic_expr (MSG_NOTE, TDF_SLIM,
2366                              TREE_TYPE (op2));
2367           if (op3)
2368             {
2369               dump_printf (MSG_NOTE, ",");
2370               dump_generic_expr (MSG_NOTE, TDF_SLIM,
2371                                  TREE_TYPE (op3));
2372             }
2373
2374           if (op4)
2375             {
2376               dump_printf (MSG_NOTE, ",");
2377               dump_generic_expr (MSG_NOTE, TDF_SLIM,
2378                                  TREE_TYPE (op4));
2379             }
2380           dump_printf (MSG_NOTE, "\n");
2381         }
2382
2383       return NULL;
2384     }
2385
2386   /* Check that it's ok to change the order of the computation.
2387      Generally, when vectorizing a reduction we change the order of the
2388      computation.  This may change the behavior of the program in some
2389      cases, so we need to check that this is ok.  One exception is when
2390      vectorizing an outer-loop: the inner-loop is executed sequentially,
2391      and therefore vectorizing reductions in the inner-loop during
2392      outer-loop vectorization is safe.  */
2393
2394   /* CHECKME: check for !flag_finite_math_only too?  */
2395   if (SCALAR_FLOAT_TYPE_P (type) && !flag_associative_math
2396       && check_reduction)
2397     {
2398       /* Changing the order of operations changes the semantics.  */
2399       if (dump_enabled_p ())
2400         report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2401                         "reduction: unsafe fp math optimization: ");
2402       return NULL;
2403     }
2404   else if (INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type)
2405            && check_reduction)
2406     {
2407       /* Changing the order of operations changes the semantics.  */
2408       if (dump_enabled_p ())
2409         report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2410                         "reduction: unsafe int math optimization: ");
2411       return NULL;
2412     }
2413   else if (SAT_FIXED_POINT_TYPE_P (type) && check_reduction)
2414     {
2415       /* Changing the order of operations changes the semantics.  */
2416       if (dump_enabled_p ())
2417         report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2418                         "reduction: unsafe fixed-point math optimization: ");
2419       return NULL;
2420     }
2421
2422   /* If we detected "res -= x[i]" earlier, rewrite it into
2423      "res += -x[i]" now.  If this turns out to be useless reassoc
2424      will clean it up again.  */
2425   if (orig_code == MINUS_EXPR)
2426     {
2427       tree rhs = gimple_assign_rhs2 (def_stmt);
2428       tree negrhs = make_ssa_name (TREE_TYPE (rhs), NULL);
2429       gimple negate_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, negrhs,
2430                                                          rhs, NULL);
2431       gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
2432       set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt, 
2433                                                           loop_info, NULL));
2434       gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
2435       gimple_assign_set_rhs2 (def_stmt, negrhs);
2436       gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
2437       update_stmt (def_stmt);
2438     }
2439
2440   /* Reduction is safe. We're dealing with one of the following:
2441      1) integer arithmetic and no trapv
2442      2) floating point arithmetic, and special flags permit this optimization
2443      3) nested cycle (i.e., outer loop vectorization).  */
2444   if (TREE_CODE (op1) == SSA_NAME)
2445     def1 = SSA_NAME_DEF_STMT (op1);
2446
2447   if (TREE_CODE (op2) == SSA_NAME)
2448     def2 = SSA_NAME_DEF_STMT (op2);
2449
2450   if (code != COND_EXPR
2451       && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
2452     {
2453       if (dump_enabled_p ())
2454         report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
2455       return NULL;
2456     }
2457
2458   /* Check that one def is the reduction def, defined by PHI,
2459      the other def is either defined in the loop ("vect_internal_def"),
2460      or it's an induction (defined by a loop-header phi-node).  */
2461
2462   if (def2 && def2 == phi
2463       && (code == COND_EXPR
2464           || !def1 || gimple_nop_p (def1)
2465           || !flow_bb_inside_loop_p (loop, gimple_bb (def1))
2466           || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
2467               && (is_gimple_assign (def1)
2468                   || is_gimple_call (def1)
2469                   || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2470                       == vect_induction_def
2471                   || (gimple_code (def1) == GIMPLE_PHI
2472                       && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
2473                           == vect_internal_def
2474                       && !is_loop_header_bb_p (gimple_bb (def1)))))))
2475     {
2476       if (dump_enabled_p ())
2477         report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2478       return def_stmt;
2479     }
2480
2481   if (def1 && def1 == phi
2482       && (code == COND_EXPR
2483           || !def2 || gimple_nop_p (def2)
2484           || !flow_bb_inside_loop_p (loop, gimple_bb (def2))
2485           || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
2486               && (is_gimple_assign (def2)
2487                   || is_gimple_call (def2)
2488                   || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2489                       == vect_induction_def
2490                   || (gimple_code (def2) == GIMPLE_PHI
2491                       && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
2492                           == vect_internal_def
2493                       && !is_loop_header_bb_p (gimple_bb (def2)))))))
2494     {
2495       if (check_reduction)
2496         {
2497           /* Swap operands (just for simplicity - so that the rest of the code
2498              can assume that the reduction variable is always the last (second)
2499              argument).  */
2500           if (dump_enabled_p ())
2501             report_vect_op (MSG_NOTE, def_stmt,
2502                             "detected reduction: need to swap operands: ");
2503
2504           swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
2505                              gimple_assign_rhs2_ptr (def_stmt));
2506
2507           if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
2508             LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2509         }
2510       else
2511         {
2512           if (dump_enabled_p ())
2513             report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
2514         }
2515
2516       return def_stmt;
2517     }
2518
2519   /* Try to find SLP reduction chain.  */
2520   if (check_reduction && vect_is_slp_reduction (loop_info, phi, def_stmt))
2521     {
2522       if (dump_enabled_p ())
2523         report_vect_op (MSG_NOTE, def_stmt,
2524                         "reduction: detected reduction chain: ");
2525
2526       return def_stmt;
2527     }
2528
2529   if (dump_enabled_p ())
2530     report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
2531                     "reduction: unknown pattern: ");
2532        
2533   return NULL;
2534 }
2535
2536 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2537    in-place.  Arguments as there.  */
2538
2539 static gimple
2540 vect_is_simple_reduction (loop_vec_info loop_info, gimple phi,
2541                           bool check_reduction, bool *double_reduc)
2542 {
2543   return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2544                                      double_reduc, false);
2545 }
2546
2547 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2548    in-place if it enables detection of more reductions.  Arguments
2549    as there.  */
2550
2551 gimple
2552 vect_force_simple_reduction (loop_vec_info loop_info, gimple phi,
2553                           bool check_reduction, bool *double_reduc)
2554 {
2555   return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
2556                                      double_reduc, true);
2557 }
2558
2559 /* Calculate the cost of one scalar iteration of the loop.  */
2560 int
2561 vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
2562 {
2563   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2564   basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
2565   int nbbs = loop->num_nodes, factor, scalar_single_iter_cost = 0;
2566   int innerloop_iters, i, stmt_cost;
2567
2568   /* Count statements in scalar loop.  Using this as scalar cost for a single
2569      iteration for now.
2570
2571      TODO: Add outer loop support.
2572
2573      TODO: Consider assigning different costs to different scalar
2574      statements.  */
2575
2576   /* FORNOW.  */
2577   innerloop_iters = 1;
2578   if (loop->inner)
2579     innerloop_iters = 50; /* FIXME */
2580
2581   for (i = 0; i < nbbs; i++)
2582     {
2583       gimple_stmt_iterator si;
2584       basic_block bb = bbs[i];
2585
2586       if (bb->loop_father == loop->inner)
2587         factor = innerloop_iters;
2588       else
2589         factor = 1;
2590
2591       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2592         {
2593           gimple stmt = gsi_stmt (si);
2594           stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2595
2596           if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
2597             continue;
2598
2599           /* Skip stmts that are not vectorized inside the loop.  */
2600           if (stmt_info
2601               && !STMT_VINFO_RELEVANT_P (stmt_info)
2602               && (!STMT_VINFO_LIVE_P (stmt_info)
2603                   || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
2604               && !STMT_VINFO_IN_PATTERN_P (stmt_info))
2605             continue;
2606
2607           if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)))
2608             {
2609               if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
2610                stmt_cost = vect_get_stmt_cost (scalar_load);
2611              else
2612                stmt_cost = vect_get_stmt_cost (scalar_store);
2613             }
2614           else
2615             stmt_cost = vect_get_stmt_cost (scalar_stmt);
2616
2617           scalar_single_iter_cost += stmt_cost * factor;
2618         }
2619     }
2620   return scalar_single_iter_cost;
2621 }
2622
2623 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times.  */
2624 int
2625 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
2626                              int *peel_iters_epilogue,
2627                              int scalar_single_iter_cost,
2628                              stmt_vector_for_cost *prologue_cost_vec,
2629                              stmt_vector_for_cost *epilogue_cost_vec)
2630 {
2631   int retval = 0;
2632   int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2633
2634   if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2635     {
2636       *peel_iters_epilogue = vf/2;
2637       if (dump_enabled_p ())
2638         dump_printf_loc (MSG_NOTE, vect_location,
2639                          "cost model: epilogue peel iters set to vf/2 "
2640                          "because loop iterations are unknown .\n");
2641
2642       /* If peeled iterations are known but number of scalar loop
2643          iterations are unknown, count a taken branch per peeled loop.  */
2644       retval = record_stmt_cost (prologue_cost_vec, 2, cond_branch_taken,
2645                                  NULL, 0, vect_prologue);
2646     }
2647   else
2648     {
2649       int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
2650       peel_iters_prologue = niters < peel_iters_prologue ?
2651                             niters : peel_iters_prologue;
2652       *peel_iters_epilogue = (niters - peel_iters_prologue) % vf;
2653       /* If we need to peel for gaps, but no peeling is required, we have to
2654          peel VF iterations.  */
2655       if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
2656         *peel_iters_epilogue = vf;
2657     }
2658
2659   if (peel_iters_prologue)
2660     retval += record_stmt_cost (prologue_cost_vec,
2661                                 peel_iters_prologue * scalar_single_iter_cost,
2662                                 scalar_stmt, NULL, 0, vect_prologue);
2663   if (*peel_iters_epilogue)
2664     retval += record_stmt_cost (epilogue_cost_vec,
2665                                 *peel_iters_epilogue * scalar_single_iter_cost,
2666                                 scalar_stmt, NULL, 0, vect_epilogue);
2667   return retval;
2668 }
2669
2670 /* Function vect_estimate_min_profitable_iters
2671
2672    Return the number of iterations required for the vector version of the
2673    loop to be profitable relative to the cost of the scalar version of the
2674    loop.  */
2675
2676 static void
2677 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
2678                                     int *ret_min_profitable_niters,
2679                                     int *ret_min_profitable_estimate)
2680 {
2681   int min_profitable_iters;
2682   int min_profitable_estimate;
2683   int peel_iters_prologue;
2684   int peel_iters_epilogue;
2685   unsigned vec_inside_cost = 0;
2686   int vec_outside_cost = 0;
2687   unsigned vec_prologue_cost = 0;
2688   unsigned vec_epilogue_cost = 0;
2689   int scalar_single_iter_cost = 0;
2690   int scalar_outside_cost = 0;
2691   int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2692   int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2693   void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2694
2695   /* Cost model disabled.  */
2696   if (unlimited_cost_model ())
2697     {
2698       dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
2699       *ret_min_profitable_niters = 0;
2700       *ret_min_profitable_estimate = 0;
2701       return;
2702     }
2703
2704   /* Requires loop versioning tests to handle misalignment.  */
2705   if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2706     {
2707       /*  FIXME: Make cost depend on complexity of individual check.  */
2708       unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
2709       (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2710                             vect_prologue);
2711       dump_printf (MSG_NOTE,
2712                    "cost model: Adding cost of checks for loop "
2713                    "versioning to treat misalignment.\n");
2714     }
2715
2716   /* Requires loop versioning with alias checks.  */
2717   if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2718     {
2719       /*  FIXME: Make cost depend on complexity of individual check.  */
2720       unsigned len = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).length ();
2721       (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
2722                             vect_prologue);
2723       dump_printf (MSG_NOTE,
2724                    "cost model: Adding cost of checks for loop "
2725                    "versioning aliasing.\n");
2726     }
2727
2728   if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2729       || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2730     (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
2731                           vect_prologue);
2732
2733   /* Count statements in scalar loop.  Using this as scalar cost for a single
2734      iteration for now.
2735
2736      TODO: Add outer loop support.
2737
2738      TODO: Consider assigning different costs to different scalar
2739      statements.  */
2740
2741   scalar_single_iter_cost = vect_get_single_scalar_iteration_cost (loop_vinfo);
2742
2743   /* Add additional cost for the peeled instructions in prologue and epilogue
2744      loop.
2745
2746      FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2747      at compile-time - we assume it's vf/2 (the worst would be vf-1).
2748
2749      TODO: Build an expression that represents peel_iters for prologue and
2750      epilogue to be used in a run-time test.  */
2751
2752   if (npeel  < 0)
2753     {
2754       peel_iters_prologue = vf/2;
2755       dump_printf (MSG_NOTE, "cost model: "
2756                    "prologue peel iters set to vf/2.\n");
2757
2758       /* If peeling for alignment is unknown, loop bound of main loop becomes
2759          unknown.  */
2760       peel_iters_epilogue = vf/2;
2761       dump_printf (MSG_NOTE, "cost model: "
2762                    "epilogue peel iters set to vf/2 because "
2763                    "peeling for alignment is unknown.\n");
2764
2765       /* If peeled iterations are unknown, count a taken branch and a not taken
2766          branch per peeled loop. Even if scalar loop iterations are known,
2767          vector iterations are not known since peeled prologue iterations are
2768          not known. Hence guards remain the same.  */
2769       (void) add_stmt_cost (target_cost_data, 2, cond_branch_taken,
2770                             NULL, 0, vect_prologue);
2771       (void) add_stmt_cost (target_cost_data, 2, cond_branch_not_taken,
2772                             NULL, 0, vect_prologue);
2773       /* FORNOW: Don't attempt to pass individual scalar instructions to
2774          the model; just assume linear cost for scalar iterations.  */
2775       (void) add_stmt_cost (target_cost_data,
2776                             peel_iters_prologue * scalar_single_iter_cost,
2777                             scalar_stmt, NULL, 0, vect_prologue);
2778       (void) add_stmt_cost (target_cost_data, 
2779                             peel_iters_epilogue * scalar_single_iter_cost,
2780                             scalar_stmt, NULL, 0, vect_epilogue);
2781     }
2782   else
2783     {
2784       stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
2785       stmt_info_for_cost *si;
2786       int j;
2787       void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2788
2789       prologue_cost_vec.create (2);
2790       epilogue_cost_vec.create (2);
2791       peel_iters_prologue = npeel;
2792
2793       (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
2794                                           &peel_iters_epilogue,
2795                                           scalar_single_iter_cost,
2796                                           &prologue_cost_vec,
2797                                           &epilogue_cost_vec);
2798
2799       FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
2800         {
2801           struct _stmt_vec_info *stmt_info
2802             = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2803           (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2804                                 si->misalign, vect_prologue);
2805         }
2806
2807       FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
2808         {
2809           struct _stmt_vec_info *stmt_info
2810             = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
2811           (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
2812                                 si->misalign, vect_epilogue);
2813         }
2814
2815       prologue_cost_vec.release ();
2816       epilogue_cost_vec.release ();
2817     }
2818
2819   /* FORNOW: The scalar outside cost is incremented in one of the
2820      following ways:
2821
2822      1. The vectorizer checks for alignment and aliasing and generates
2823      a condition that allows dynamic vectorization.  A cost model
2824      check is ANDED with the versioning condition.  Hence scalar code
2825      path now has the added cost of the versioning check.
2826
2827        if (cost > th & versioning_check)
2828          jmp to vector code
2829
2830      Hence run-time scalar is incremented by not-taken branch cost.
2831
2832      2. The vectorizer then checks if a prologue is required.  If the
2833      cost model check was not done before during versioning, it has to
2834      be done before the prologue check.
2835
2836        if (cost <= th)
2837          prologue = scalar_iters
2838        if (prologue == 0)
2839          jmp to vector code
2840        else
2841          execute prologue
2842        if (prologue == num_iters)
2843          go to exit
2844
2845      Hence the run-time scalar cost is incremented by a taken branch,
2846      plus a not-taken branch, plus a taken branch cost.
2847
2848      3. The vectorizer then checks if an epilogue is required.  If the
2849      cost model check was not done before during prologue check, it
2850      has to be done with the epilogue check.
2851
2852        if (prologue == 0)
2853          jmp to vector code
2854        else
2855          execute prologue
2856        if (prologue == num_iters)
2857          go to exit
2858        vector code:
2859          if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2860            jmp to epilogue
2861
2862      Hence the run-time scalar cost should be incremented by 2 taken
2863      branches.
2864
2865      TODO: The back end may reorder the BBS's differently and reverse
2866      conditions/branch directions.  Change the estimates below to
2867      something more reasonable.  */
2868
2869   /* If the number of iterations is known and we do not do versioning, we can
2870      decide whether to vectorize at compile time.  Hence the scalar version
2871      do not carry cost model guard costs.  */
2872   if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2873       || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2874       || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2875     {
2876       /* Cost model check occurs at versioning.  */
2877       if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
2878           || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2879         scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
2880       else
2881         {
2882           /* Cost model check occurs at prologue generation.  */
2883           if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2884             scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
2885               + vect_get_stmt_cost (cond_branch_not_taken); 
2886           /* Cost model check occurs at epilogue generation.  */
2887           else
2888             scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken); 
2889         }
2890     }
2891
2892   /* Complete the target-specific cost calculations.  */
2893   finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
2894                &vec_inside_cost, &vec_epilogue_cost);
2895
2896   vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
2897   
2898   /* Calculate number of iterations required to make the vector version
2899      profitable, relative to the loop bodies only.  The following condition
2900      must hold true:
2901      SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2902      where
2903      SIC = scalar iteration cost, VIC = vector iteration cost,
2904      VOC = vector outside cost, VF = vectorization factor,
2905      PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2906      SOC = scalar outside cost for run time cost model check.  */
2907
2908   if ((scalar_single_iter_cost * vf) > (int) vec_inside_cost)
2909     {
2910       if (vec_outside_cost <= 0)
2911         min_profitable_iters = 1;
2912       else
2913         {
2914           min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * vf
2915                                   - vec_inside_cost * peel_iters_prologue
2916                                   - vec_inside_cost * peel_iters_epilogue)
2917                                  / ((scalar_single_iter_cost * vf)
2918                                     - vec_inside_cost);
2919
2920           if ((scalar_single_iter_cost * vf * min_profitable_iters)
2921               <= (((int) vec_inside_cost * min_profitable_iters)
2922                   + (((int) vec_outside_cost - scalar_outside_cost) * vf)))
2923             min_profitable_iters++;
2924         }
2925     }
2926   /* vector version will never be profitable.  */
2927   else
2928     {
2929       if (dump_enabled_p ())
2930         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2931                          "cost model: the vector iteration cost = %d "
2932                          "divided by the scalar iteration cost = %d "
2933                          "is greater or equal to the vectorization factor = %d"
2934                          ".\n",
2935                          vec_inside_cost, scalar_single_iter_cost, vf);
2936       *ret_min_profitable_niters = -1;
2937       *ret_min_profitable_estimate = -1;
2938       return;
2939     }
2940
2941   if (dump_enabled_p ())
2942     {
2943       dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2944       dump_printf (MSG_NOTE, "  Vector inside of loop cost: %d\n",
2945                    vec_inside_cost);
2946       dump_printf (MSG_NOTE, "  Vector prologue cost: %d\n",
2947                    vec_prologue_cost);
2948       dump_printf (MSG_NOTE, "  Vector epilogue cost: %d\n",
2949                    vec_epilogue_cost);
2950       dump_printf (MSG_NOTE, "  Scalar iteration cost: %d\n",
2951                    scalar_single_iter_cost);
2952       dump_printf (MSG_NOTE, "  Scalar outside cost: %d\n",
2953                    scalar_outside_cost);
2954       dump_printf (MSG_NOTE, "  Vector outside cost: %d\n",
2955                    vec_outside_cost);
2956       dump_printf (MSG_NOTE, "  prologue iterations: %d\n",
2957                    peel_iters_prologue);
2958       dump_printf (MSG_NOTE, "  epilogue iterations: %d\n",
2959                    peel_iters_epilogue);
2960       dump_printf (MSG_NOTE,
2961                    "  Calculated minimum iters for profitability: %d\n",
2962                    min_profitable_iters);
2963       dump_printf (MSG_NOTE, "\n");
2964     }
2965
2966   min_profitable_iters =
2967         min_profitable_iters < vf ? vf : min_profitable_iters;
2968
2969   /* Because the condition we create is:
2970      if (niters <= min_profitable_iters)
2971        then skip the vectorized loop.  */
2972   min_profitable_iters--;
2973
2974   if (dump_enabled_p ())
2975     dump_printf_loc (MSG_NOTE, vect_location,
2976                      "  Runtime profitability threshold = %d\n",
2977                      min_profitable_iters);
2978
2979   *ret_min_profitable_niters = min_profitable_iters;
2980
2981   /* Calculate number of iterations required to make the vector version
2982      profitable, relative to the loop bodies only.
2983
2984      Non-vectorized variant is SIC * niters and it must win over vector
2985      variant on the expected loop trip count.  The following condition must hold true:
2986      SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC  */
2987
2988   if (vec_outside_cost <= 0)
2989     min_profitable_estimate = 1;
2990   else
2991     {
2992       min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * vf
2993                                  - vec_inside_cost * peel_iters_prologue
2994                                  - vec_inside_cost * peel_iters_epilogue)
2995                                  / ((scalar_single_iter_cost * vf)
2996                                    - vec_inside_cost);
2997     }
2998   min_profitable_estimate --;
2999   min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3000   if (dump_enabled_p ())
3001     dump_printf_loc (MSG_NOTE, vect_location,
3002                      "  Static estimate profitability threshold = %d\n",
3003                       min_profitable_iters);
3004
3005   *ret_min_profitable_estimate = min_profitable_estimate;
3006 }
3007
3008
3009 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3010    functions. Design better to avoid maintenance issues.  */
3011
3012 /* Function vect_model_reduction_cost.
3013
3014    Models cost for a reduction operation, including the vector ops
3015    generated within the strip-mine loop, the initial definition before
3016    the loop, and the epilogue code that must be generated.  */
3017
3018 static bool
3019 vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
3020                            int ncopies)
3021 {
3022   int prologue_cost = 0, epilogue_cost = 0;
3023   enum tree_code code;
3024   optab optab;
3025   tree vectype;
3026   gimple stmt, orig_stmt;
3027   tree reduction_op;
3028   enum machine_mode mode;
3029   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3030   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3031   void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3032
3033   /* Cost of reduction op inside loop.  */
3034   unsigned inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3035                                         stmt_info, 0, vect_body);
3036   stmt = STMT_VINFO_STMT (stmt_info);
3037
3038   switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3039     {
3040     case GIMPLE_SINGLE_RHS:
3041       gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt)) == ternary_op);
3042       reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), 2);
3043       break;
3044     case GIMPLE_UNARY_RHS:
3045       reduction_op = gimple_assign_rhs1 (stmt);
3046       break;
3047     case GIMPLE_BINARY_RHS:
3048       reduction_op = gimple_assign_rhs2 (stmt);
3049       break;
3050     case GIMPLE_TERNARY_RHS:
3051       reduction_op = gimple_assign_rhs3 (stmt);
3052       break;
3053     default:
3054       gcc_unreachable ();
3055     }
3056
3057   vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3058   if (!vectype)
3059     {
3060       if (dump_enabled_p ())
3061         {
3062           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3063                            "unsupported data-type ");
3064           dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
3065                              TREE_TYPE (reduction_op));
3066           dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3067         }
3068       return false;
3069    }
3070
3071   mode = TYPE_MODE (vectype);
3072   orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
3073
3074   if (!orig_stmt)
3075     orig_stmt = STMT_VINFO_STMT (stmt_info);
3076
3077   code = gimple_assign_rhs_code (orig_stmt);
3078
3079   /* Add in cost for initial definition.  */
3080   prologue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec,
3081                                   stmt_info, 0, vect_prologue);
3082
3083   /* Determine cost of epilogue code.
3084
3085      We have a reduction operator that will reduce the vector in one statement.
3086      Also requires scalar extract.  */
3087
3088   if (!nested_in_vect_loop_p (loop, orig_stmt))
3089     {
3090       if (reduc_code != ERROR_MARK)
3091         {
3092           epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
3093                                           stmt_info, 0, vect_epilogue);
3094           epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar,
3095                                           stmt_info, 0, vect_epilogue);
3096         }
3097       else
3098         {
3099           int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
3100           tree bitsize =
3101             TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
3102           int element_bitsize = tree_low_cst (bitsize, 1);
3103           int nelements = vec_size_in_bits / element_bitsize;
3104
3105           optab = optab_for_tree_code (code, vectype, optab_default);
3106
3107           /* We have a whole vector shift available.  */
3108           if (VECTOR_MODE_P (mode)
3109               && optab_handler (optab, mode) != CODE_FOR_nothing
3110               && optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3111             {
3112               /* Final reduction via vector shifts and the reduction operator.
3113                  Also requires scalar extract.  */
3114               epilogue_cost += add_stmt_cost (target_cost_data,
3115                                               exact_log2 (nelements) * 2,
3116                                               vector_stmt, stmt_info, 0,
3117                                               vect_epilogue);
3118               epilogue_cost += add_stmt_cost (target_cost_data, 1,
3119                                               vec_to_scalar, stmt_info, 0,
3120                                               vect_epilogue);
3121             }     
3122           else
3123             /* Use extracts and reduction op for final reduction.  For N
3124                elements, we have N extracts and N-1 reduction ops.  */
3125             epilogue_cost += add_stmt_cost (target_cost_data, 
3126                                             nelements + nelements - 1,
3127                                             vector_stmt, stmt_info, 0,
3128                                             vect_epilogue);
3129         }
3130     }
3131
3132   if (dump_enabled_p ())
3133     dump_printf (MSG_NOTE, 
3134                  "vect_model_reduction_cost: inside_cost = %d, "
3135                  "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3136                  prologue_cost, epilogue_cost);
3137
3138   return true;
3139 }
3140
3141
3142 /* Function vect_model_induction_cost.
3143
3144    Models cost for induction operations.  */
3145
3146 static void
3147 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
3148 {
3149   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3150   void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3151   unsigned inside_cost, prologue_cost;
3152
3153   /* loop cost for vec_loop.  */
3154   inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
3155                                stmt_info, 0, vect_body);
3156
3157   /* prologue cost for vec_init and vec_step.  */
3158   prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
3159                                  stmt_info, 0, vect_prologue);
3160
3161   if (dump_enabled_p ())
3162     dump_printf_loc (MSG_NOTE, vect_location,
3163                      "vect_model_induction_cost: inside_cost = %d, "
3164                      "prologue_cost = %d .\n", inside_cost, prologue_cost);
3165 }
3166
3167
3168 /* Function get_initial_def_for_induction
3169
3170    Input:
3171    STMT - a stmt that performs an induction operation in the loop.
3172    IV_PHI - the initial value of the induction variable
3173
3174    Output:
3175    Return a vector variable, initialized with the first VF values of
3176    the induction variable.  E.g., for an iv with IV_PHI='X' and
3177    evolution S, for a vector of 4 units, we want to return:
3178    [X, X + S, X + 2*S, X + 3*S].  */
3179
3180 static tree
3181 get_initial_def_for_induction (gimple iv_phi)
3182 {
3183   stmt_vec_info stmt_vinfo = vinfo_for_stmt (iv_phi);
3184   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3185   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3186   tree vectype;
3187   int nunits;
3188   edge pe = loop_preheader_edge (loop);
3189   struct loop *iv_loop;
3190   basic_block new_bb;
3191   tree new_vec, vec_init, vec_step, t;
3192   tree access_fn;
3193   tree new_var;
3194   tree new_name;
3195   gimple init_stmt, induction_phi, new_stmt;
3196   tree induc_def, vec_def, vec_dest;
3197   tree init_expr, step_expr;
3198   int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3199   int i;
3200   bool ok;
3201   int ncopies;
3202   tree expr;
3203   stmt_vec_info phi_info = vinfo_for_stmt (iv_phi);
3204   bool nested_in_vect_loop = false;
3205   gimple_seq stmts = NULL;
3206   imm_use_iterator imm_iter;
3207   use_operand_p use_p;
3208   gimple exit_phi;
3209   edge latch_e;
3210   tree loop_arg;
3211   gimple_stmt_iterator si;
3212   basic_block bb = gimple_bb (iv_phi);
3213   tree stepvectype;
3214   tree resvectype;
3215
3216   /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop?  */
3217   if (nested_in_vect_loop_p (loop, iv_phi))
3218     {
3219       nested_in_vect_loop = true;
3220       iv_loop = loop->inner;
3221     }
3222   else
3223     iv_loop = loop;
3224   gcc_assert (iv_loop == (gimple_bb (iv_phi))->loop_father);
3225
3226   latch_e = loop_latch_edge (iv_loop);
3227   loop_arg = PHI_ARG_DEF_FROM_EDGE (iv_phi, latch_e);
3228
3229   access_fn = analyze_scalar_evolution (iv_loop, PHI_RESULT (iv_phi));
3230   gcc_assert (access_fn);
3231   STRIP_NOPS (access_fn);
3232   ok = vect_is_simple_iv_evolution (iv_loop->num, access_fn,
3233                                     &init_expr, &step_expr);
3234   gcc_assert (ok);
3235   pe = loop_preheader_edge (iv_loop);
3236
3237   vectype = get_vectype_for_scalar_type (TREE_TYPE (init_expr));
3238   resvectype = get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi)));
3239   gcc_assert (vectype);
3240   nunits = TYPE_VECTOR_SUBPARTS (vectype);
3241   ncopies = vf / nunits;
3242
3243   gcc_assert (phi_info);
3244   gcc_assert (ncopies >= 1);
3245
3246   /* Find the first insertion point in the BB.  */
3247   si = gsi_after_labels (bb);
3248
3249   /* Create the vector that holds the initial_value of the induction.  */
3250   if (nested_in_vect_loop)
3251     {
3252       /* iv_loop is nested in the loop to be vectorized.  init_expr had already
3253          been created during vectorization of previous stmts.  We obtain it
3254          from the STMT_VINFO_VEC_STMT of the defining stmt.  */
3255       tree iv_def = PHI_ARG_DEF_FROM_EDGE (iv_phi,
3256                                            loop_preheader_edge (iv_loop));
3257       vec_init = vect_get_vec_def_for_operand (iv_def, iv_phi, NULL);
3258       /* If the initial value is not of proper type, convert it.  */
3259       if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
3260         {
3261           new_stmt = gimple_build_assign_with_ops
3262               (VIEW_CONVERT_EXPR,
3263                vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"),
3264                build1 (VIEW_CONVERT_EXPR, vectype, vec_init), NULL_TREE);
3265           vec_init = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3266           gimple_assign_set_lhs (new_stmt, vec_init);
3267           new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
3268                                                  new_stmt);
3269           gcc_assert (!new_bb);
3270           set_vinfo_for_stmt (new_stmt,
3271                               new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3272         }
3273     }
3274   else
3275     {
3276       vec<constructor_elt, va_gc> *v;
3277
3278       /* iv_loop is the loop to be vectorized. Create:
3279          vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr)  */
3280       new_var = vect_get_new_vect_var (TREE_TYPE (vectype),
3281                                        vect_scalar_var, "var_");
3282       new_name = force_gimple_operand (fold_convert (TREE_TYPE (vectype),
3283                                                      init_expr),
3284                                        &stmts, false, new_var);
3285       if (stmts)
3286         {
3287           new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3288           gcc_assert (!new_bb);
3289         }
3290
3291       vec_alloc (v, nunits);
3292       bool constant_p = is_gimple_min_invariant (new_name);
3293       CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3294       for (i = 1; i < nunits; i++)
3295         {
3296           /* Create: new_name_i = new_name + step_expr  */
3297           new_name = fold_build2 (PLUS_EXPR, TREE_TYPE (new_name),
3298                                   new_name, step_expr);
3299           if (!is_gimple_min_invariant (new_name))
3300             {
3301               init_stmt = gimple_build_assign (new_var, new_name);
3302               new_name = make_ssa_name (new_var, init_stmt);
3303               gimple_assign_set_lhs (init_stmt, new_name);
3304               new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
3305               gcc_assert (!new_bb);
3306               if (dump_enabled_p ())
3307                 {
3308                   dump_printf_loc (MSG_NOTE, vect_location,
3309                                    "created new init_stmt: ");
3310                   dump_gimple_stmt (MSG_NOTE, TDF_SLIM, init_stmt, 0);
3311                   dump_printf (MSG_NOTE, "\n");
3312                 }
3313               constant_p = false;
3314             }
3315           CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, new_name);
3316         }
3317       /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1]  */
3318       if (constant_p)
3319         new_vec = build_vector_from_ctor (vectype, v);
3320       else
3321         new_vec = build_constructor (vectype, v);
3322       vec_init = vect_init_vector (iv_phi, new_vec, vectype, NULL);
3323     }
3324
3325
3326   /* Create the vector that holds the step of the induction.  */
3327   if (nested_in_vect_loop)
3328     /* iv_loop is nested in the loop to be vectorized. Generate:
3329        vec_step = [S, S, S, S]  */
3330     new_name = step_expr;
3331   else
3332     {
3333       /* iv_loop is the loop to be vectorized. Generate:
3334           vec_step = [VF*S, VF*S, VF*S, VF*S]  */
3335       if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3336         {
3337           expr = build_int_cst (integer_type_node, vf);
3338           expr = fold_convert (TREE_TYPE (step_expr), expr);
3339         }
3340       else
3341         expr = build_int_cst (TREE_TYPE (step_expr), vf);
3342       new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3343                               expr, step_expr);
3344       if (TREE_CODE (step_expr) == SSA_NAME)
3345         new_name = vect_init_vector (iv_phi, new_name,
3346                                      TREE_TYPE (step_expr), NULL);
3347     }
3348
3349   t = unshare_expr (new_name);
3350   gcc_assert (CONSTANT_CLASS_P (new_name)
3351               || TREE_CODE (new_name) == SSA_NAME);
3352   stepvectype = get_vectype_for_scalar_type (TREE_TYPE (new_name));
3353   gcc_assert (stepvectype);
3354   new_vec = build_vector_from_val (stepvectype, t);
3355   vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3356
3357
3358   /* Create the following def-use cycle:
3359      loop prolog:
3360          vec_init = ...
3361          vec_step = ...
3362      loop:
3363          vec_iv = PHI <vec_init, vec_loop>
3364          ...
3365          STMT
3366          ...
3367          vec_loop = vec_iv + vec_step;  */
3368
3369   /* Create the induction-phi that defines the induction-operand.  */
3370   vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
3371   induction_phi = create_phi_node (vec_dest, iv_loop->header);
3372   set_vinfo_for_stmt (induction_phi,
3373                       new_stmt_vec_info (induction_phi, loop_vinfo, NULL));
3374   induc_def = PHI_RESULT (induction_phi);
3375
3376   /* Create the iv update inside the loop  */
3377   new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3378                                            induc_def, vec_step);
3379   vec_def = make_ssa_name (vec_dest, new_stmt);
3380   gimple_assign_set_lhs (new_stmt, vec_def);
3381   gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3382   set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo,
3383                                                    NULL));
3384
3385   /* Set the arguments of the phi node:  */
3386   add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
3387   add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
3388                UNKNOWN_LOCATION);
3389
3390
3391   /* In case that vectorization factor (VF) is bigger than the number
3392      of elements that we can fit in a vectype (nunits), we have to generate
3393      more than one vector stmt - i.e - we need to "unroll" the
3394      vector stmt by a factor VF/nunits.  For more details see documentation
3395      in vectorizable_operation.  */
3396
3397   if (ncopies > 1)
3398     {
3399       stmt_vec_info prev_stmt_vinfo;
3400       /* FORNOW. This restriction should be relaxed.  */
3401       gcc_assert (!nested_in_vect_loop);
3402
3403       /* Create the vector that holds the step of the induction.  */
3404       if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
3405         {
3406           expr = build_int_cst (integer_type_node, nunits);
3407           expr = fold_convert (TREE_TYPE (step_expr), expr);
3408         }
3409       else
3410         expr = build_int_cst (TREE_TYPE (step_expr), nunits);
3411       new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
3412                               expr, step_expr);
3413       if (TREE_CODE (step_expr) == SSA_NAME)
3414         new_name = vect_init_vector (iv_phi, new_name,
3415                                      TREE_TYPE (step_expr), NULL);
3416       t = unshare_expr (new_name);
3417       gcc_assert (CONSTANT_CLASS_P (new_name)
3418                   || TREE_CODE (new_name) == SSA_NAME);
3419       new_vec = build_vector_from_val (stepvectype, t);
3420       vec_step = vect_init_vector (iv_phi, new_vec, stepvectype, NULL);
3421
3422       vec_def = induc_def;
3423       prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
3424       for (i = 1; i < ncopies; i++)
3425         {
3426           /* vec_i = vec_prev + vec_step  */
3427           new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, vec_dest,
3428                                                    vec_def, vec_step);
3429           vec_def = make_ssa_name (vec_dest, new_stmt);
3430           gimple_assign_set_lhs (new_stmt, vec_def);
3431  
3432           gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3433           if (!useless_type_conversion_p (resvectype, vectype))
3434             {
3435               new_stmt = gimple_build_assign_with_ops
3436                   (VIEW_CONVERT_EXPR,
3437                    vect_get_new_vect_var (resvectype, vect_simple_var,
3438                                           "vec_iv_"),
3439                    build1 (VIEW_CONVERT_EXPR, resvectype,
3440                            gimple_assign_lhs (new_stmt)), NULL_TREE);
3441               gimple_assign_set_lhs (new_stmt,
3442                                      make_ssa_name
3443                                        (gimple_assign_lhs (new_stmt), new_stmt));
3444               gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3445             }
3446           set_vinfo_for_stmt (new_stmt,
3447                               new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3448           STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
3449           prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
3450         }
3451     }
3452
3453   if (nested_in_vect_loop)
3454     {
3455       /* Find the loop-closed exit-phi of the induction, and record
3456          the final vector of induction results:  */
3457       exit_phi = NULL;
3458       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
3459         {
3460           if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (USE_STMT (use_p))))
3461             {
3462               exit_phi = USE_STMT (use_p);
3463               break;
3464             }
3465         }
3466       if (exit_phi)
3467         {
3468           stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
3469           /* FORNOW. Currently not supporting the case that an inner-loop induction
3470              is not used in the outer-loop (i.e. only outside the outer-loop).  */
3471           gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
3472                       && !STMT_VINFO_LIVE_P (stmt_vinfo));
3473
3474           STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
3475           if (dump_enabled_p ())
3476             {
3477               dump_printf_loc (MSG_NOTE, vect_location,
3478                                "vector of inductions after inner-loop:");
3479               dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
3480               dump_printf (MSG_NOTE, "\n");
3481             }
3482         }
3483     }
3484
3485
3486   if (dump_enabled_p ())
3487     {
3488       dump_printf_loc (MSG_NOTE, vect_location,
3489                        "transform induction: created def-use cycle: ");
3490       dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
3491       dump_printf (MSG_NOTE, "\n");
3492       dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
3493                         SSA_NAME_DEF_STMT (vec_def), 0);
3494       dump_printf (MSG_NOTE, "\n");
3495     }
3496
3497   STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
3498   if (!useless_type_conversion_p (resvectype, vectype))
3499     {
3500       new_stmt = gimple_build_assign_with_ops
3501          (VIEW_CONVERT_EXPR,
3502           vect_get_new_vect_var (resvectype, vect_simple_var, "vec_iv_"),
3503           build1 (VIEW_CONVERT_EXPR, resvectype, induc_def), NULL_TREE);
3504       induc_def = make_ssa_name (gimple_assign_lhs (new_stmt), new_stmt);
3505       gimple_assign_set_lhs (new_stmt, induc_def);
3506       si = gsi_after_labels (bb);
3507       gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
3508       set_vinfo_for_stmt (new_stmt,
3509                           new_stmt_vec_info (new_stmt, loop_vinfo, NULL));
3510       STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt))
3511         = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi));
3512     }
3513
3514   return induc_def;
3515 }
3516
3517
3518 /* Function get_initial_def_for_reduction
3519
3520    Input:
3521    STMT - a stmt that performs a reduction operation in the loop.
3522    INIT_VAL - the initial value of the reduction variable
3523
3524    Output:
3525    ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3526         of the reduction (used for adjusting the epilog - see below).
3527    Return a vector variable, initialized according to the operation that STMT
3528         performs. This vector will be used as the initial value of the
3529         vector of partial results.
3530
3531    Option1 (adjust in epilog): Initialize the vector as follows:
3532      add/bit or/xor:    [0,0,...,0,0]
3533      mult/bit and:      [1,1,...,1,1]
3534      min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3535    and when necessary (e.g. add/mult case) let the caller know
3536    that it needs to adjust the result by init_val.
3537
3538    Option2: Initialize the vector as follows:
3539      add/bit or/xor:    [init_val,0,0,...,0]
3540      mult/bit and:      [init_val,1,1,...,1]
3541      min/max/cond_expr: [init_val,init_val,...,init_val]
3542    and no adjustments are needed.
3543
3544    For example, for the following code:
3545
3546    s = init_val;
3547    for (i=0;i<n;i++)
3548      s = s + a[i];
3549
3550    STMT is 's = s + a[i]', and the reduction variable is 's'.
3551    For a vector of 4 units, we want to return either [0,0,0,init_val],
3552    or [0,0,0,0] and let the caller know that it needs to adjust
3553    the result at the end by 'init_val'.
3554
3555    FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3556    initialization vector is simpler (same element in all entries), if
3557    ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3558
3559    A cost model should help decide between these two schemes.  */
3560
3561 tree
3562 get_initial_def_for_reduction (gimple stmt, tree init_val,
3563                                tree *adjustment_def)
3564 {
3565   stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
3566   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
3567   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3568   tree scalar_type = TREE_TYPE (init_val);
3569   tree vectype = get_vectype_for_scalar_type (scalar_type);
3570   int nunits;
3571   enum tree_code code = gimple_assign_rhs_code (stmt);
3572   tree def_for_init;
3573   tree init_def;
3574   tree *elts;
3575   int i;
3576   bool nested_in_vect_loop = false;
3577   tree init_value;
3578   REAL_VALUE_TYPE real_init_val = dconst0;
3579   int int_init_val = 0;
3580   gimple def_stmt = NULL;
3581
3582   gcc_assert (vectype);
3583   nunits = TYPE_VECTOR_SUBPARTS (vectype);
3584
3585   gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
3586               || SCALAR_FLOAT_TYPE_P (scalar_type));
3587
3588   if (nested_in_vect_loop_p (loop, stmt))
3589     nested_in_vect_loop = true;
3590   else
3591     gcc_assert (loop == (gimple_bb (stmt))->loop_father);
3592
3593   /* In case of double reduction we only create a vector variable to be put
3594      in the reduction phi node.  The actual statement creation is done in
3595      vect_create_epilog_for_reduction.  */
3596   if (adjustment_def && nested_in_vect_loop
3597       && TREE_CODE (init_val) == SSA_NAME
3598       && (def_stmt = SSA_NAME_DEF_STMT (init_val))
3599       && gimple_code (def_stmt) == GIMPLE_PHI
3600       && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3601       && vinfo_for_stmt (def_stmt)
3602       && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
3603           == vect_double_reduction_def)
3604     {
3605       *adjustment_def = NULL;
3606       return vect_create_destination_var (init_val, vectype);
3607     }
3608
3609   if (TREE_CONSTANT (init_val))
3610     {
3611       if (SCALAR_FLOAT_TYPE_P (scalar_type))
3612         init_value = build_real (scalar_type, TREE_REAL_CST (init_val));
3613       else
3614         init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val));
3615     }
3616   else
3617     init_value = init_val;
3618
3619   switch (code)
3620     {
3621       case WIDEN_SUM_EXPR:
3622       case DOT_PROD_EXPR:
3623       case PLUS_EXPR:
3624       case MINUS_EXPR:
3625       case BIT_IOR_EXPR:
3626       case BIT_XOR_EXPR:
3627       case MULT_EXPR:
3628       case BIT_AND_EXPR:
3629         /* ADJUSMENT_DEF is NULL when called from
3630            vect_create_epilog_for_reduction to vectorize double reduction.  */
3631         if (adjustment_def)
3632           {
3633             if (nested_in_vect_loop)
3634               *adjustment_def = vect_get_vec_def_for_operand (init_val, stmt,
3635                                                               NULL);
3636             else
3637               *adjustment_def = init_val;
3638           }
3639
3640         if (code == MULT_EXPR)
3641           {
3642             real_init_val = dconst1;
3643             int_init_val = 1;
3644           }
3645
3646         if (code == BIT_AND_EXPR)
3647           int_init_val = -1;
3648
3649         if (SCALAR_FLOAT_TYPE_P (scalar_type))
3650           def_for_init = build_real (scalar_type, real_init_val);
3651         else
3652           def_for_init = build_int_cst (scalar_type, int_init_val);
3653
3654         /* Create a vector of '0' or '1' except the first element.  */
3655         elts = XALLOCAVEC (tree, nunits);
3656         for (i = nunits - 2; i >= 0; --i)
3657           elts[i + 1] = def_for_init;
3658
3659         /* Option1: the first element is '0' or '1' as well.  */
3660         if (adjustment_def)
3661           {
3662             elts[0] = def_for_init;
3663             init_def = build_vector (vectype, elts);
3664             break;
3665           }
3666
3667         /* Option2: the first element is INIT_VAL.  */
3668         elts[0] = init_val;
3669         if (TREE_CONSTANT (init_val))
3670           init_def = build_vector (vectype, elts);
3671         else
3672           {
3673             vec<constructor_elt, va_gc> *v;
3674             vec_alloc (v, nunits);
3675             CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init_val);
3676             for (i = 1; i < nunits; ++i)
3677               CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[i]);
3678             init_def = build_constructor (vectype, v);
3679           }
3680
3681         break;
3682
3683       case MIN_EXPR:
3684       case MAX_EXPR:
3685       case COND_EXPR:
3686         if (adjustment_def)
3687           {
3688             *adjustment_def = NULL_TREE;
3689             init_def = vect_get_vec_def_for_operand (init_val, stmt, NULL);
3690             break;
3691           }
3692
3693         init_def = build_vector_from_val (vectype, init_value);
3694         break;
3695
3696       default:
3697         gcc_unreachable ();
3698     }
3699
3700   return init_def;
3701 }
3702
3703
3704 /* Function vect_create_epilog_for_reduction
3705
3706    Create code at the loop-epilog to finalize the result of a reduction
3707    computation. 
3708   
3709    VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector 
3710      reduction statements. 
3711    STMT is the scalar reduction stmt that is being vectorized.
3712    NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3713      number of elements that we can fit in a vectype (nunits).  In this case
3714      we have to generate more than one vector stmt - i.e - we need to "unroll"
3715      the vector stmt by a factor VF/nunits.  For more details see documentation
3716      in vectorizable_operation.
3717    REDUC_CODE is the tree-code for the epilog reduction.
3718    REDUCTION_PHIS is a list of the phi-nodes that carry the reduction 
3719      computation.
3720    REDUC_INDEX is the index of the operand in the right hand side of the 
3721      statement that is defined by REDUCTION_PHI.
3722    DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3723    SLP_NODE is an SLP node containing a group of reduction statements. The 
3724      first one in this group is STMT.
3725
3726    This function:
3727    1. Creates the reduction def-use cycles: sets the arguments for 
3728       REDUCTION_PHIS:
3729       The loop-entry argument is the vectorized initial-value of the reduction.
3730       The loop-latch argument is taken from VECT_DEFS - the vector of partial 
3731       sums.
3732    2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3733       by applying the operation specified by REDUC_CODE if available, or by 
3734       other means (whole-vector shifts or a scalar loop).
3735       The function also creates a new phi node at the loop exit to preserve
3736       loop-closed form, as illustrated below.
3737
3738      The flow at the entry to this function:
3739
3740         loop:
3741           vec_def = phi <null, null>            # REDUCTION_PHI
3742           VECT_DEF = vector_stmt                # vectorized form of STMT
3743           s_loop = scalar_stmt                  # (scalar) STMT
3744         loop_exit:
3745           s_out0 = phi <s_loop>                 # (scalar) EXIT_PHI
3746           use <s_out0>
3747           use <s_out0>
3748
3749      The above is transformed by this function into:
3750
3751         loop:
3752           vec_def = phi <vec_init, VECT_DEF>    # REDUCTION_PHI
3753           VECT_DEF = vector_stmt                # vectorized form of STMT
3754           s_loop = scalar_stmt                  # (scalar) STMT
3755         loop_exit:
3756           s_out0 = phi <s_loop>                 # (scalar) EXIT_PHI
3757           v_out1 = phi <VECT_DEF>               # NEW_EXIT_PHI
3758           v_out2 = reduce <v_out1>
3759           s_out3 = extract_field <v_out2, 0>
3760           s_out4 = adjust_result <s_out3>
3761           use <s_out4>
3762           use <s_out4>
3763 */
3764
3765 static void
3766 vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
3767                                   int ncopies, enum tree_code reduc_code,
3768                                   vec<gimple> reduction_phis,
3769                                   int reduc_index, bool double_reduc, 
3770                                   slp_tree slp_node)
3771 {
3772   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3773   stmt_vec_info prev_phi_info;
3774   tree vectype;
3775   enum machine_mode mode;
3776   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3777   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
3778   basic_block exit_bb;
3779   tree scalar_dest;
3780   tree scalar_type;
3781   gimple new_phi = NULL, phi;
3782   gimple_stmt_iterator exit_gsi;
3783   tree vec_dest;
3784   tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
3785   gimple epilog_stmt = NULL;
3786   enum tree_code code = gimple_assign_rhs_code (stmt);
3787   gimple exit_phi;
3788   tree bitsize, bitpos;
3789   tree adjustment_def = NULL;
3790   tree vec_initial_def = NULL;
3791   tree reduction_op, expr, def;
3792   tree orig_name, scalar_result;
3793   imm_use_iterator imm_iter, phi_imm_iter;
3794   use_operand_p use_p, phi_use_p;
3795   bool extract_scalar_result = false;
3796   gimple use_stmt, orig_stmt, reduction_phi = NULL;
3797   bool nested_in_vect_loop = false;
3798   vec<gimple> new_phis = vNULL;
3799   vec<gimple> inner_phis = vNULL;
3800   enum vect_def_type dt = vect_unknown_def_type;
3801   int j, i;
3802   vec<tree> scalar_results = vNULL;
3803   unsigned int group_size = 1, k, ratio;
3804   vec<tree> vec_initial_defs = vNULL;
3805   vec<gimple> phis;
3806   bool slp_reduc = false;
3807   tree new_phi_result;
3808   gimple inner_phi = NULL;
3809
3810   if (slp_node)
3811     group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); 
3812
3813   if (nested_in_vect_loop_p (loop, stmt))
3814     {
3815       outer_loop = loop;
3816       loop = loop->inner;
3817       nested_in_vect_loop = true;
3818       gcc_assert (!slp_node);
3819     }
3820
3821   switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
3822     {
3823     case GIMPLE_SINGLE_RHS:
3824       gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt))
3825                   == ternary_op);
3826       reduction_op = TREE_OPERAND (gimple_assign_rhs1 (stmt), reduc_index);
3827       break;
3828     case GIMPLE_UNARY_RHS:
3829       reduction_op = gimple_assign_rhs1 (stmt);
3830       break;
3831     case GIMPLE_BINARY_RHS:
3832       reduction_op = reduc_index ?
3833                      gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
3834       break;
3835     case GIMPLE_TERNARY_RHS:
3836       reduction_op = gimple_op (stmt, reduc_index + 1);
3837       break;
3838     default:
3839       gcc_unreachable ();
3840     }
3841
3842   vectype = get_vectype_for_scalar_type (TREE_TYPE (reduction_op));
3843   gcc_assert (vectype);
3844   mode = TYPE_MODE (vectype);
3845
3846   /* 1. Create the reduction def-use cycle:
3847      Set the arguments of REDUCTION_PHIS, i.e., transform
3848
3849         loop:
3850           vec_def = phi <null, null>            # REDUCTION_PHI
3851           VECT_DEF = vector_stmt                # vectorized form of STMT
3852           ...
3853
3854      into:
3855
3856         loop:
3857           vec_def = phi <vec_init, VECT_DEF>    # REDUCTION_PHI
3858           VECT_DEF = vector_stmt                # vectorized form of STMT
3859           ...
3860
3861      (in case of SLP, do it for all the phis). */
3862
3863   /* Get the loop-entry arguments.  */
3864   if (slp_node)
3865     vect_get_vec_defs (reduction_op, NULL_TREE, stmt, &vec_initial_defs,
3866                        NULL, slp_node, reduc_index);
3867   else
3868     {
3869       vec_initial_defs.create (1);
3870      /* For the case of reduction, vect_get_vec_def_for_operand returns
3871         the scalar def before the loop, that defines the initial value
3872         of the reduction variable.  */
3873       vec_initial_def = vect_get_vec_def_for_operand (reduction_op, stmt,
3874                                                       &adjustment_def);
3875       vec_initial_defs.quick_push (vec_initial_def);
3876     }
3877
3878   /* Set phi nodes arguments.  */
3879   FOR_EACH_VEC_ELT (reduction_phis, i, phi)
3880     {
3881       tree vec_init_def = vec_initial_defs[i];
3882       tree def = vect_defs[i];
3883       for (j = 0; j < ncopies; j++)
3884         {
3885           /* Set the loop-entry arg of the reduction-phi.  */
3886           add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
3887                        UNKNOWN_LOCATION);
3888
3889           /* Set the loop-latch arg for the reduction-phi.  */
3890           if (j > 0)
3891             def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
3892
3893           add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
3894
3895           if (dump_enabled_p ())
3896             {
3897               dump_printf_loc (MSG_NOTE, vect_location,
3898                                "transform reduction: created def-use cycle: ");
3899               dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
3900               dump_printf (MSG_NOTE, "\n");
3901               dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
3902               dump_printf (MSG_NOTE, "\n");
3903             }
3904
3905           phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3906         }
3907     }
3908
3909   vec_initial_defs.release ();
3910
3911   /* 2. Create epilog code.
3912         The reduction epilog code operates across the elements of the vector
3913         of partial results computed by the vectorized loop.
3914         The reduction epilog code consists of:
3915
3916         step 1: compute the scalar result in a vector (v_out2)
3917         step 2: extract the scalar result (s_out3) from the vector (v_out2)
3918         step 3: adjust the scalar result (s_out3) if needed.
3919
3920         Step 1 can be accomplished using one the following three schemes:
3921           (scheme 1) using reduc_code, if available.
3922           (scheme 2) using whole-vector shifts, if available.
3923           (scheme 3) using a scalar loop. In this case steps 1+2 above are
3924                      combined.
3925
3926           The overall epilog code looks like this:
3927
3928           s_out0 = phi <s_loop>         # original EXIT_PHI
3929           v_out1 = phi <VECT_DEF>       # NEW_EXIT_PHI
3930           v_out2 = reduce <v_out1>              # step 1
3931           s_out3 = extract_field <v_out2, 0>    # step 2
3932           s_out4 = adjust_result <s_out3>       # step 3
3933
3934           (step 3 is optional, and steps 1 and 2 may be combined).
3935           Lastly, the uses of s_out0 are replaced by s_out4.  */
3936
3937
3938   /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
3939          v_out1 = phi <VECT_DEF> 
3940          Store them in NEW_PHIS.  */
3941
3942   exit_bb = single_exit (loop)->dest;
3943   prev_phi_info = NULL;
3944   new_phis.create (vect_defs.length ());
3945   FOR_EACH_VEC_ELT (vect_defs, i, def)
3946     {
3947       for (j = 0; j < ncopies; j++)
3948         {
3949           tree new_def = copy_ssa_name (def, NULL);
3950           phi = create_phi_node (new_def, exit_bb);
3951           set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo, NULL));
3952           if (j == 0)
3953             new_phis.quick_push (phi);
3954           else
3955             {
3956               def = vect_get_vec_def_for_stmt_copy (dt, def);
3957               STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
3958             }
3959
3960           SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
3961           prev_phi_info = vinfo_for_stmt (phi);
3962         }
3963     }
3964
3965   /* The epilogue is created for the outer-loop, i.e., for the loop being
3966      vectorized.  Create exit phis for the outer loop.  */
3967   if (double_reduc)
3968     {
3969       loop = outer_loop;
3970       exit_bb = single_exit (loop)->dest;
3971       inner_phis.create (vect_defs.length ());
3972       FOR_EACH_VEC_ELT (new_phis, i, phi)
3973         {
3974           tree new_result = copy_ssa_name (PHI_RESULT (phi), NULL);
3975           gimple outer_phi = create_phi_node (new_result, exit_bb);
3976           SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3977                            PHI_RESULT (phi));
3978           set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3979                                                             loop_vinfo, NULL));
3980           inner_phis.quick_push (phi);
3981           new_phis[i] = outer_phi;
3982           prev_phi_info = vinfo_for_stmt (outer_phi);
3983           while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
3984             {
3985               phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
3986               new_result = copy_ssa_name (PHI_RESULT (phi), NULL);
3987               outer_phi = create_phi_node (new_result, exit_bb);
3988               SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
3989                                PHI_RESULT (phi));
3990               set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
3991                                                         loop_vinfo, NULL));
3992               STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
3993               prev_phi_info = vinfo_for_stmt (outer_phi);
3994             }
3995         }
3996     }
3997
3998   exit_gsi = gsi_after_labels (exit_bb);
3999
4000   /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4001          (i.e. when reduc_code is not available) and in the final adjustment
4002          code (if needed).  Also get the original scalar reduction variable as
4003          defined in the loop.  In case STMT is a "pattern-stmt" (i.e. - it
4004          represents a reduction pattern), the tree-code and scalar-def are
4005          taken from the original stmt that the pattern-stmt (STMT) replaces.
4006          Otherwise (it is a regular reduction) - the tree-code and scalar-def
4007          are taken from STMT.  */
4008
4009   orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4010   if (!orig_stmt)
4011     {
4012       /* Regular reduction  */
4013       orig_stmt = stmt;
4014     }
4015   else
4016     {
4017       /* Reduction pattern  */
4018       stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
4019       gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
4020       gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4021     }
4022
4023   code = gimple_assign_rhs_code (orig_stmt);
4024   /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4025      partial results are added and not subtracted.  */
4026   if (code == MINUS_EXPR) 
4027     code = PLUS_EXPR;
4028   
4029   scalar_dest = gimple_assign_lhs (orig_stmt);
4030   scalar_type = TREE_TYPE (scalar_dest);
4031   scalar_results.create (group_size); 
4032   new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4033   bitsize = TYPE_SIZE (scalar_type);
4034
4035   /* In case this is a reduction in an inner-loop while vectorizing an outer
4036      loop - we don't need to extract a single scalar result at the end of the
4037      inner-loop (unless it is double reduction, i.e., the use of reduction is
4038      outside the outer-loop).  The final vector of partial results will be used
4039      in the vectorized outer-loop, or reduced to a scalar result at the end of
4040      the outer-loop.  */
4041   if (nested_in_vect_loop && !double_reduc)
4042     goto vect_finalize_reduction;
4043
4044   /* SLP reduction without reduction chain, e.g.,
4045      # a1 = phi <a2, a0>
4046      # b1 = phi <b2, b0>
4047      a2 = operation (a1)
4048      b2 = operation (b1)  */
4049   slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
4050
4051   /* In case of reduction chain, e.g.,
4052      # a1 = phi <a3, a0>
4053      a2 = operation (a1)
4054      a3 = operation (a2),
4055
4056      we may end up with more than one vector result.  Here we reduce them to
4057      one vector.  */
4058   if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4059     {
4060       tree first_vect = PHI_RESULT (new_phis[0]);
4061       tree tmp;
4062       gimple new_vec_stmt = NULL;
4063
4064       vec_dest = vect_create_destination_var (scalar_dest, vectype);
4065       for (k = 1; k < new_phis.length (); k++)
4066         {
4067           gimple next_phi = new_phis[k];
4068           tree second_vect = PHI_RESULT (next_phi);
4069
4070           tmp = build2 (code, vectype,  first_vect, second_vect);
4071           new_vec_stmt = gimple_build_assign (vec_dest, tmp);
4072           first_vect = make_ssa_name (vec_dest, new_vec_stmt);
4073           gimple_assign_set_lhs (new_vec_stmt, first_vect);
4074           gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4075         }
4076
4077       new_phi_result = first_vect;
4078       if (new_vec_stmt)
4079         {
4080           new_phis.truncate (0);
4081           new_phis.safe_push (new_vec_stmt);
4082         }
4083     }
4084   else
4085     new_phi_result = PHI_RESULT (new_phis[0]);
4086  
4087   /* 2.3 Create the reduction code, using one of the three schemes described
4088          above. In SLP we simply need to extract all the elements from the 
4089          vector (without reducing them), so we use scalar shifts.  */
4090   if (reduc_code != ERROR_MARK && !slp_reduc)
4091     {
4092       tree tmp;
4093
4094       /*** Case 1:  Create:
4095            v_out2 = reduc_expr <v_out1>  */
4096
4097       if (dump_enabled_p ())
4098         dump_printf_loc (MSG_NOTE, vect_location,
4099                          "Reduce using direct vector reduction.\n");
4100
4101       vec_dest = vect_create_destination_var (scalar_dest, vectype);
4102       tmp = build1 (reduc_code, vectype, new_phi_result);
4103       epilog_stmt = gimple_build_assign (vec_dest, tmp);
4104       new_temp = make_ssa_name (vec_dest, epilog_stmt);
4105       gimple_assign_set_lhs (epilog_stmt, new_temp);
4106       gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4107
4108       extract_scalar_result = true;
4109     }
4110   else
4111     {
4112       enum tree_code shift_code = ERROR_MARK;
4113       bool have_whole_vector_shift = true;
4114       int bit_offset;
4115       int element_bitsize = tree_low_cst (bitsize, 1);
4116       int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
4117       tree vec_temp;
4118
4119       if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
4120         shift_code = VEC_RSHIFT_EXPR;
4121       else
4122         have_whole_vector_shift = false;
4123
4124       /* Regardless of whether we have a whole vector shift, if we're
4125          emulating the operation via tree-vect-generic, we don't want
4126          to use it.  Only the first round of the reduction is likely
4127          to still be profitable via emulation.  */
4128       /* ??? It might be better to emit a reduction tree code here, so that
4129          tree-vect-generic can expand the first round via bit tricks.  */
4130       if (!VECTOR_MODE_P (mode))
4131         have_whole_vector_shift = false;
4132       else
4133         {
4134           optab optab = optab_for_tree_code (code, vectype, optab_default);
4135           if (optab_handler (optab, mode) == CODE_FOR_nothing)
4136             have_whole_vector_shift = false;
4137         }
4138
4139       if (have_whole_vector_shift && !slp_reduc)
4140         {
4141           /*** Case 2: Create:
4142              for (offset = VS/2; offset >= element_size; offset/=2)
4143                 {
4144                   Create:  va' = vec_shift <va, offset>
4145                   Create:  va = vop <va, va'>
4146                 }  */
4147
4148           if (dump_enabled_p ())
4149             dump_printf_loc (MSG_NOTE, vect_location,
4150                              "Reduce using vector shifts\n");
4151
4152           vec_dest = vect_create_destination_var (scalar_dest, vectype);
4153           new_temp = new_phi_result;
4154           for (bit_offset = vec_size_in_bits/2;
4155                bit_offset >= element_bitsize;
4156                bit_offset /= 2)
4157             {
4158               tree bitpos = size_int (bit_offset);
4159
4160               epilog_stmt = gimple_build_assign_with_ops (shift_code,
4161                                                vec_dest, new_temp, bitpos);
4162               new_name = make_ssa_name (vec_dest, epilog_stmt);
4163               gimple_assign_set_lhs (epilog_stmt, new_name);
4164               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4165
4166               epilog_stmt = gimple_build_assign_with_ops (code, vec_dest,
4167                                                           new_name, new_temp);
4168               new_temp = make_ssa_name (vec_dest, epilog_stmt);
4169               gimple_assign_set_lhs (epilog_stmt, new_temp);
4170               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4171             }
4172
4173           extract_scalar_result = true;
4174         }
4175       else
4176         {
4177           tree rhs;
4178
4179           /*** Case 3: Create:
4180              s = extract_field <v_out2, 0>
4181              for (offset = element_size;
4182                   offset < vector_size;
4183                   offset += element_size;)
4184                {
4185                  Create:  s' = extract_field <v_out2, offset>
4186                  Create:  s = op <s, s'>  // For non SLP cases
4187                }  */
4188
4189           if (dump_enabled_p ())
4190             dump_printf_loc (MSG_NOTE, vect_location,
4191                              "Reduce using scalar code.\n");
4192
4193           vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
4194           FOR_EACH_VEC_ELT (new_phis, i, new_phi)
4195             {
4196               if (gimple_code (new_phi) == GIMPLE_PHI)
4197                 vec_temp = PHI_RESULT (new_phi);
4198               else
4199                 vec_temp = gimple_assign_lhs (new_phi);
4200               rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
4201                             bitsize_zero_node);
4202               epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4203               new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4204               gimple_assign_set_lhs (epilog_stmt, new_temp);
4205               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4206
4207               /* In SLP we don't need to apply reduction operation, so we just
4208                  collect s' values in SCALAR_RESULTS.  */
4209               if (slp_reduc)
4210                 scalar_results.safe_push (new_temp);
4211
4212               for (bit_offset = element_bitsize;
4213                    bit_offset < vec_size_in_bits;
4214                    bit_offset += element_bitsize)
4215                 {
4216                   tree bitpos = bitsize_int (bit_offset);
4217                   tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
4218                                      bitsize, bitpos);
4219
4220                   epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4221                   new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
4222                   gimple_assign_set_lhs (epilog_stmt, new_name);
4223                   gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4224
4225                   if (slp_reduc)
4226                     {
4227                       /* In SLP we don't need to apply reduction operation, so 
4228                          we just collect s' values in SCALAR_RESULTS.  */
4229                       new_temp = new_name;
4230                       scalar_results.safe_push (new_name);
4231                     }
4232                   else
4233                     {
4234                       epilog_stmt = gimple_build_assign_with_ops (code,
4235                                           new_scalar_dest, new_name, new_temp);
4236                       new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4237                       gimple_assign_set_lhs (epilog_stmt, new_temp);
4238                       gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4239                     }
4240                 }
4241             }
4242
4243           /* The only case where we need to reduce scalar results in SLP, is
4244              unrolling.  If the size of SCALAR_RESULTS is greater than
4245              GROUP_SIZE, we reduce them combining elements modulo 
4246              GROUP_SIZE.  */
4247           if (slp_reduc)
4248             {
4249               tree res, first_res, new_res;
4250               gimple new_stmt;
4251             
4252               /* Reduce multiple scalar results in case of SLP unrolling.  */
4253               for (j = group_size; scalar_results.iterate (j, &res);
4254                    j++)
4255                 {
4256                   first_res = scalar_results[j % group_size];
4257                   new_stmt = gimple_build_assign_with_ops (code,
4258                                               new_scalar_dest, first_res, res);
4259                   new_res = make_ssa_name (new_scalar_dest, new_stmt);
4260                   gimple_assign_set_lhs (new_stmt, new_res);
4261                   gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
4262                   scalar_results[j % group_size] = new_res;
4263                 }
4264             }
4265           else
4266             /* Not SLP - we have one scalar to keep in SCALAR_RESULTS.  */
4267             scalar_results.safe_push (new_temp);
4268
4269           extract_scalar_result = false;
4270         }
4271     }
4272
4273   /* 2.4  Extract the final scalar result.  Create:
4274           s_out3 = extract_field <v_out2, bitpos>  */
4275
4276   if (extract_scalar_result)
4277     {
4278       tree rhs;
4279
4280       if (dump_enabled_p ())
4281         dump_printf_loc (MSG_NOTE, vect_location,
4282                          "extract scalar result\n");
4283
4284       if (BYTES_BIG_ENDIAN)
4285         bitpos = size_binop (MULT_EXPR,
4286                              bitsize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1),
4287                              TYPE_SIZE (scalar_type));
4288       else
4289         bitpos = bitsize_zero_node;
4290
4291       rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitpos);
4292       epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
4293       new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
4294       gimple_assign_set_lhs (epilog_stmt, new_temp);
4295       gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4296       scalar_results.safe_push (new_temp);
4297     }
4298   
4299 vect_finalize_reduction:
4300
4301   if (double_reduc)
4302     loop = loop->inner;
4303
4304   /* 2.5 Adjust the final result by the initial value of the reduction
4305          variable. (When such adjustment is not needed, then
4306          'adjustment_def' is zero).  For example, if code is PLUS we create:
4307          new_temp = loop_exit_def + adjustment_def  */
4308
4309   if (adjustment_def)
4310     {
4311       gcc_assert (!slp_reduc);
4312       if (nested_in_vect_loop)
4313         {
4314           new_phi = new_phis[0];
4315           gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
4316           expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
4317           new_dest = vect_create_destination_var (scalar_dest, vectype);
4318         }
4319       else
4320         {
4321           new_temp = scalar_results[0];
4322           gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
4323           expr = build2 (code, scalar_type, new_temp, adjustment_def);
4324           new_dest = vect_create_destination_var (scalar_dest, scalar_type);
4325         }
4326
4327       epilog_stmt = gimple_build_assign (new_dest, expr);
4328       new_temp = make_ssa_name (new_dest, epilog_stmt);
4329       gimple_assign_set_lhs (epilog_stmt, new_temp);
4330       SSA_NAME_DEF_STMT (new_temp) = epilog_stmt;
4331       gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
4332       if (nested_in_vect_loop)
4333         {
4334           set_vinfo_for_stmt (epilog_stmt,
4335                               new_stmt_vec_info (epilog_stmt, loop_vinfo,
4336                                                  NULL));
4337           STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
4338                 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
4339
4340           if (!double_reduc)
4341             scalar_results.quick_push (new_temp);
4342           else
4343             scalar_results[0] = new_temp;
4344         }
4345       else
4346         scalar_results[0] = new_temp;
4347
4348       new_phis[0] = epilog_stmt;
4349     }
4350
4351   /* 2.6  Handle the loop-exit phis.  Replace the uses of scalar loop-exit
4352           phis with new adjusted scalar results, i.e., replace use <s_out0>
4353           with use <s_out4>.        
4354
4355      Transform:
4356         loop_exit:
4357           s_out0 = phi <s_loop>                 # (scalar) EXIT_PHI
4358           v_out1 = phi <VECT_DEF>               # NEW_EXIT_PHI
4359           v_out2 = reduce <v_out1>
4360           s_out3 = extract_field <v_out2, 0>
4361           s_out4 = adjust_result <s_out3>
4362           use <s_out0>
4363           use <s_out0>
4364
4365      into:
4366
4367         loop_exit:
4368           s_out0 = phi <s_loop>                 # (scalar) EXIT_PHI
4369           v_out1 = phi <VECT_DEF>               # NEW_EXIT_PHI
4370           v_out2 = reduce <v_out1>
4371           s_out3 = extract_field <v_out2, 0>
4372           s_out4 = adjust_result <s_out3>
4373           use <s_out4>  
4374           use <s_out4> */
4375
4376
4377   /* In SLP reduction chain we reduce vector results into one vector if
4378      necessary, hence we set here GROUP_SIZE to 1.  SCALAR_DEST is the LHS of
4379      the last stmt in the reduction chain, since we are looking for the loop
4380      exit phi node.  */
4381   if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4382     {
4383       scalar_dest = gimple_assign_lhs (
4384                         SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
4385       group_size = 1;
4386     }
4387
4388   /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in 
4389      case that GROUP_SIZE is greater than vectorization factor).  Therefore, we
4390      need to match SCALAR_RESULTS with corresponding statements.  The first
4391      (GROUP_SIZE / number of new vector stmts) scalar results correspond to
4392      the first vector stmt, etc.  
4393      (RATIO is equal to (GROUP_SIZE / number of new vector stmts)).  */ 
4394   if (group_size > new_phis.length ())
4395     {
4396       ratio = group_size / new_phis.length ();
4397       gcc_assert (!(group_size % new_phis.length ()));
4398     }
4399   else
4400     ratio = 1;
4401
4402   for (k = 0; k < group_size; k++)
4403     {
4404       if (k % ratio == 0)
4405         {
4406           epilog_stmt = new_phis[k / ratio];
4407           reduction_phi = reduction_phis[k / ratio];
4408           if (double_reduc)
4409             inner_phi = inner_phis[k / ratio];
4410         }
4411
4412       if (slp_reduc)
4413         {
4414           gimple current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
4415
4416           orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
4417           /* SLP statements can't participate in patterns.  */
4418           gcc_assert (!orig_stmt);
4419           scalar_dest = gimple_assign_lhs (current_stmt);
4420         }
4421
4422       phis.create (3);
4423       /* Find the loop-closed-use at the loop exit of the original scalar
4424          result.  (The reduction result is expected to have two immediate uses -
4425          one at the latch block, and one at the loop exit).  */
4426       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4427         if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
4428             && !is_gimple_debug (USE_STMT (use_p)))
4429           phis.safe_push (USE_STMT (use_p));
4430
4431       /* While we expect to have found an exit_phi because of loop-closed-ssa
4432          form we can end up without one if the scalar cycle is dead.  */
4433
4434       FOR_EACH_VEC_ELT (phis, i, exit_phi)
4435         {
4436           if (outer_loop)
4437             {
4438               stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
4439               gimple vect_phi;
4440
4441               /* FORNOW. Currently not supporting the case that an inner-loop
4442                  reduction is not used in the outer-loop (but only outside the
4443                  outer-loop), unless it is double reduction.  */
4444               gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
4445                            && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
4446                           || double_reduc);
4447
4448               STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
4449               if (!double_reduc
4450                   || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
4451                       != vect_double_reduction_def)
4452                 continue;
4453
4454               /* Handle double reduction:
4455
4456                  stmt1: s1 = phi <s0, s2>  - double reduction phi (outer loop)
4457                  stmt2:   s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4458                  stmt3:   s4 = use (s3)     - (regular) reduc stmt (inner loop)
4459                  stmt4: s2 = phi <s4>      - double reduction stmt (outer loop)
4460
4461                  At that point the regular reduction (stmt2 and stmt3) is
4462                  already vectorized, as well as the exit phi node, stmt4.
4463                  Here we vectorize the phi node of double reduction, stmt1, and
4464                  update all relevant statements.  */
4465
4466               /* Go through all the uses of s2 to find double reduction phi
4467                  node, i.e., stmt1 above.  */
4468               orig_name = PHI_RESULT (exit_phi);
4469               FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4470                 {
4471                   stmt_vec_info use_stmt_vinfo;
4472                   stmt_vec_info new_phi_vinfo;
4473                   tree vect_phi_init, preheader_arg, vect_phi_res, init_def;
4474                   basic_block bb = gimple_bb (use_stmt);
4475                   gimple use;
4476
4477                   /* Check that USE_STMT is really double reduction phi
4478                      node.  */
4479                   if (gimple_code (use_stmt) != GIMPLE_PHI
4480                       || gimple_phi_num_args (use_stmt) != 2
4481                       || bb->loop_father != outer_loop)
4482                     continue;
4483                   use_stmt_vinfo = vinfo_for_stmt (use_stmt);
4484                   if (!use_stmt_vinfo
4485                       || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
4486                           != vect_double_reduction_def)
4487                     continue;
4488
4489                   /* Create vector phi node for double reduction:
4490                      vs1 = phi <vs0, vs2>
4491                      vs1 was created previously in this function by a call to
4492                        vect_get_vec_def_for_operand and is stored in
4493                        vec_initial_def;
4494                      vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
4495                      vs0 is created here.  */
4496
4497                   /* Create vector phi node.  */
4498                   vect_phi = create_phi_node (vec_initial_def, bb);
4499                   new_phi_vinfo = new_stmt_vec_info (vect_phi,
4500                                     loop_vec_info_for_loop (outer_loop), NULL);
4501                   set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
4502
4503                   /* Create vs0 - initial def of the double reduction phi.  */
4504                   preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
4505                                              loop_preheader_edge (outer_loop));
4506                   init_def = get_initial_def_for_reduction (stmt,
4507                                                           preheader_arg, NULL);
4508                   vect_phi_init = vect_init_vector (use_stmt, init_def,
4509                                                     vectype, NULL);
4510
4511                   /* Update phi node arguments with vs0 and vs2.  */
4512                   add_phi_arg (vect_phi, vect_phi_init,
4513                                loop_preheader_edge (outer_loop),
4514                                UNKNOWN_LOCATION);
4515                   add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
4516                                loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
4517                   if (dump_enabled_p ())
4518                     {
4519                       dump_printf_loc (MSG_NOTE, vect_location,
4520                                        "created double reduction phi node: ");
4521                       dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
4522                       dump_printf (MSG_NOTE, "\n");
4523                     }
4524
4525                   vect_phi_res = PHI_RESULT (vect_phi);
4526
4527                   /* Replace the use, i.e., set the correct vs1 in the regular
4528                      reduction phi node.  FORNOW, NCOPIES is always 1, so the
4529                      loop is redundant.  */
4530                   use = reduction_phi;
4531                   for (j = 0; j < ncopies; j++)
4532                     {
4533                       edge pr_edge = loop_preheader_edge (loop);
4534                       SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
4535                       use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
4536                     }
4537                 }
4538             }
4539         }
4540
4541       phis.release ();
4542       if (nested_in_vect_loop)
4543         {
4544           if (double_reduc)
4545             loop = outer_loop;
4546           else
4547             continue;
4548         }
4549
4550       phis.create (3);
4551       /* Find the loop-closed-use at the loop exit of the original scalar
4552          result.  (The reduction result is expected to have two immediate uses,
4553          one at the latch block, and one at the loop exit).  For double
4554          reductions we are looking for exit phis of the outer loop.  */
4555       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4556         {
4557           if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
4558             {
4559               if (!is_gimple_debug (USE_STMT (use_p)))
4560                 phis.safe_push (USE_STMT (use_p));
4561             }
4562           else
4563             {
4564               if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
4565                 {
4566                   tree phi_res = PHI_RESULT (USE_STMT (use_p));
4567
4568                   FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
4569                     {
4570                       if (!flow_bb_inside_loop_p (loop,
4571                                              gimple_bb (USE_STMT (phi_use_p)))
4572                           && !is_gimple_debug (USE_STMT (phi_use_p)))
4573                         phis.safe_push (USE_STMT (phi_use_p));
4574                     }
4575                 }
4576             }
4577         }
4578
4579       FOR_EACH_VEC_ELT (phis, i, exit_phi)
4580         {
4581           /* Replace the uses:  */
4582           orig_name = PHI_RESULT (exit_phi);
4583           scalar_result = scalar_results[k];
4584           FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
4585             FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
4586               SET_USE (use_p, scalar_result);
4587         }
4588
4589       phis.release ();
4590     }
4591
4592   scalar_results.release ();
4593   inner_phis.release ();
4594   new_phis.release ();
4595 }
4596
4597
4598 /* Function vectorizable_reduction.
4599
4600    Check if STMT performs a reduction operation that can be vectorized.
4601    If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4602    stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4603    Return FALSE if not a vectorizable STMT, TRUE otherwise.
4604
4605    This function also handles reduction idioms (patterns) that have been
4606    recognized in advance during vect_pattern_recog.  In this case, STMT may be
4607    of this form:
4608      X = pattern_expr (arg0, arg1, ..., X)
4609    and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4610    sequence that had been detected and replaced by the pattern-stmt (STMT).
4611
4612    In some cases of reduction patterns, the type of the reduction variable X is
4613    different than the type of the other arguments of STMT.
4614    In such cases, the vectype that is used when transforming STMT into a vector
4615    stmt is different than the vectype that is used to determine the
4616    vectorization factor, because it consists of a different number of elements
4617    than the actual number of elements that are being operated upon in parallel.
4618
4619    For example, consider an accumulation of shorts into an int accumulator.
4620    On some targets it's possible to vectorize this pattern operating on 8
4621    shorts at a time (hence, the vectype for purposes of determining the
4622    vectorization factor should be V8HI); on the other hand, the vectype that
4623    is used to create the vector form is actually V4SI (the type of the result).
4624
4625    Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4626    indicates what is the actual level of parallelism (V8HI in the example), so
4627    that the right vectorization factor would be derived.  This vectype
4628    corresponds to the type of arguments to the reduction stmt, and should *NOT*
4629    be used to create the vectorized stmt.  The right vectype for the vectorized
4630    stmt is obtained from the type of the result X:
4631         get_vectype_for_scalar_type (TREE_TYPE (X))
4632
4633    This means that, contrary to "regular" reductions (or "regular" stmts in
4634    general), the following equation:
4635       STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4636    does *NOT* necessarily hold for reduction patterns.  */
4637
4638 bool
4639 vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
4640                         gimple *vec_stmt, slp_tree slp_node)
4641 {
4642   tree vec_dest;
4643   tree scalar_dest;
4644   tree loop_vec_def0 = NULL_TREE, loop_vec_def1 = NULL_TREE;
4645   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4646   tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4647   tree vectype_in = NULL_TREE;
4648   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4649   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4650   enum tree_code code, orig_code, epilog_reduc_code;
4651   enum machine_mode vec_mode;
4652   int op_type;
4653   optab optab, reduc_optab;
4654   tree new_temp = NULL_TREE;
4655   tree def;
4656   gimple def_stmt;
4657   enum vect_def_type dt;
4658   gimple new_phi = NULL;
4659   tree scalar_type;
4660   bool is_simple_use;
4661   gimple orig_stmt;
4662   stmt_vec_info orig_stmt_info;
4663   tree expr = NULL_TREE;
4664   int i;
4665   int ncopies;
4666   int epilog_copies;
4667   stmt_vec_info prev_stmt_info, prev_phi_info;
4668   bool single_defuse_cycle = false;
4669   tree reduc_def = NULL_TREE;
4670   gimple new_stmt = NULL;
4671   int j;
4672   tree ops[3];
4673   bool nested_cycle = false, found_nested_cycle_def = false;
4674   gimple reduc_def_stmt = NULL;
4675   /* The default is that the reduction variable is the last in statement.  */
4676   int reduc_index = 2;
4677   bool double_reduc = false, dummy;
4678   basic_block def_bb;
4679   struct loop * def_stmt_loop, *outer_loop = NULL;
4680   tree def_arg;
4681   gimple def_arg_stmt;
4682   vec<tree> vec_oprnds0 = vNULL;
4683   vec<tree> vec_oprnds1 = vNULL;
4684   vec<tree> vect_defs = vNULL;
4685   vec<gimple> phis = vNULL;
4686   int vec_num;
4687   tree def0, def1, tem, op0, op1 = NULL_TREE;
4688
4689   /* In case of reduction chain we switch to the first stmt in the chain, but
4690      we don't update STMT_INFO, since only the last stmt is marked as reduction
4691      and has reduction properties.  */
4692   if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
4693     stmt = GROUP_FIRST_ELEMENT (stmt_info);
4694
4695   if (nested_in_vect_loop_p (loop, stmt))
4696     {
4697       outer_loop = loop;
4698       loop = loop->inner;
4699       nested_cycle = true;
4700     }
4701
4702   /* 1. Is vectorizable reduction?  */
4703   /* Not supportable if the reduction variable is used in the loop, unless
4704      it's a reduction chain.  */
4705   if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
4706       && !GROUP_FIRST_ELEMENT (stmt_info))
4707     return false;
4708
4709   /* Reductions that are not used even in an enclosing outer-loop,
4710      are expected to be "live" (used out of the loop).  */
4711   if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
4712       && !STMT_VINFO_LIVE_P (stmt_info))
4713     return false;
4714
4715   /* Make sure it was already recognized as a reduction computation.  */
4716   if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
4717       && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
4718     return false;
4719
4720   /* 2. Has this been recognized as a reduction pattern?
4721
4722      Check if STMT represents a pattern that has been recognized
4723      in earlier analysis stages.  For stmts that represent a pattern,
4724      the STMT_VINFO_RELATED_STMT field records the last stmt in
4725      the original sequence that constitutes the pattern.  */
4726
4727   orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
4728   if (orig_stmt)
4729     {
4730       orig_stmt_info = vinfo_for_stmt (orig_stmt);
4731       gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4732       gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
4733     }
4734
4735   /* 3. Check the operands of the operation.  The first operands are defined
4736         inside the loop body. The last operand is the reduction variable,
4737         which is defined by the loop-header-phi.  */
4738
4739   gcc_assert (is_gimple_assign (stmt));
4740
4741   /* Flatten RHS.  */
4742   switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
4743     {
4744     case GIMPLE_SINGLE_RHS:
4745       op_type = TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt));
4746       if (op_type == ternary_op)
4747         {
4748           tree rhs = gimple_assign_rhs1 (stmt);
4749           ops[0] = TREE_OPERAND (rhs, 0);
4750           ops[1] = TREE_OPERAND (rhs, 1);
4751           ops[2] = TREE_OPERAND (rhs, 2);
4752           code = TREE_CODE (rhs);
4753         }
4754       else
4755         return false;
4756       break;
4757
4758     case GIMPLE_BINARY_RHS:
4759       code = gimple_assign_rhs_code (stmt);
4760       op_type = TREE_CODE_LENGTH (code);
4761       gcc_assert (op_type == binary_op);
4762       ops[0] = gimple_assign_rhs1 (stmt);
4763       ops[1] = gimple_assign_rhs2 (stmt);
4764       break;
4765
4766     case GIMPLE_TERNARY_RHS:
4767       code = gimple_assign_rhs_code (stmt);
4768       op_type = TREE_CODE_LENGTH (code);
4769       gcc_assert (op_type == ternary_op);
4770       ops[0] = gimple_assign_rhs1 (stmt);
4771       ops[1] = gimple_assign_rhs2 (stmt);
4772       ops[2] = gimple_assign_rhs3 (stmt);
4773       break;
4774
4775     case GIMPLE_UNARY_RHS:
4776       return false;
4777
4778     default:
4779       gcc_unreachable ();
4780     }
4781
4782   if (code == COND_EXPR && slp_node)
4783     return false;
4784
4785   scalar_dest = gimple_assign_lhs (stmt);
4786   scalar_type = TREE_TYPE (scalar_dest);
4787   if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
4788       && !SCALAR_FLOAT_TYPE_P (scalar_type))
4789     return false;
4790
4791   /* Do not try to vectorize bit-precision reductions.  */
4792   if ((TYPE_PRECISION (scalar_type)
4793        != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
4794     return false;
4795
4796   /* All uses but the last are expected to be defined in the loop.
4797      The last use is the reduction variable.  In case of nested cycle this
4798      assumption is not true: we use reduc_index to record the index of the
4799      reduction variable.  */
4800   for (i = 0; i < op_type - 1; i++)
4801     {
4802       /* The condition of COND_EXPR is checked in vectorizable_condition().  */
4803       if (i == 0 && code == COND_EXPR)
4804         continue;
4805
4806       is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4807                                             &def_stmt, &def, &dt, &tem);
4808       if (!vectype_in)
4809         vectype_in = tem;
4810       gcc_assert (is_simple_use);
4811
4812       if (dt != vect_internal_def
4813           && dt != vect_external_def
4814           && dt != vect_constant_def
4815           && dt != vect_induction_def
4816           && !(dt == vect_nested_cycle && nested_cycle))
4817         return false;
4818
4819       if (dt == vect_nested_cycle)
4820         {
4821           found_nested_cycle_def = true;
4822           reduc_def_stmt = def_stmt;
4823           reduc_index = i;
4824         }
4825     }
4826
4827   is_simple_use = vect_is_simple_use_1 (ops[i], stmt, loop_vinfo, NULL,
4828                                         &def_stmt, &def, &dt, &tem);
4829   if (!vectype_in)
4830     vectype_in = tem;
4831   gcc_assert (is_simple_use);
4832   if (!(dt == vect_reduction_def
4833         || dt == vect_nested_cycle
4834         || ((dt == vect_internal_def || dt == vect_external_def
4835              || dt == vect_constant_def || dt == vect_induction_def)
4836             && nested_cycle && found_nested_cycle_def)))
4837     {
4838       /* For pattern recognized stmts, orig_stmt might be a reduction,
4839          but some helper statements for the pattern might not, or
4840          might be COND_EXPRs with reduction uses in the condition.  */
4841       gcc_assert (orig_stmt);
4842       return false;
4843     }
4844   if (!found_nested_cycle_def)
4845     reduc_def_stmt = def_stmt;
4846
4847   gcc_assert (gimple_code (reduc_def_stmt) == GIMPLE_PHI);
4848   if (orig_stmt)
4849     gcc_assert (orig_stmt == vect_is_simple_reduction (loop_vinfo,
4850                                                        reduc_def_stmt,
4851                                                        !nested_cycle,
4852                                                        &dummy));
4853   else
4854     {
4855       gimple tmp = vect_is_simple_reduction (loop_vinfo, reduc_def_stmt,
4856                                              !nested_cycle, &dummy);
4857       /* We changed STMT to be the first stmt in reduction chain, hence we
4858          check that in this case the first element in the chain is STMT.  */
4859       gcc_assert (stmt == tmp
4860                   || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
4861     }
4862
4863   if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
4864     return false;
4865
4866   if (slp_node || PURE_SLP_STMT (stmt_info))
4867     ncopies = 1;
4868   else
4869     ncopies = (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4870                / TYPE_VECTOR_SUBPARTS (vectype_in));
4871
4872   gcc_assert (ncopies >= 1);
4873
4874   vec_mode = TYPE_MODE (vectype_in);
4875
4876   if (code == COND_EXPR)
4877     {
4878       if (!vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL))
4879         {
4880           if (dump_enabled_p ())
4881             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4882                              "unsupported condition in reduction\n");
4883
4884             return false;
4885         }
4886     }
4887   else
4888     {
4889       /* 4. Supportable by target?  */
4890
4891       if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
4892           || code == LROTATE_EXPR || code == RROTATE_EXPR)
4893         {
4894           /* Shifts and rotates are only supported by vectorizable_shifts,
4895              not vectorizable_reduction.  */
4896           if (dump_enabled_p ())
4897             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4898                              "unsupported shift or rotation.\n");
4899           return false;
4900         }
4901
4902       /* 4.1. check support for the operation in the loop  */
4903       optab = optab_for_tree_code (code, vectype_in, optab_default);
4904       if (!optab)
4905         {
4906           if (dump_enabled_p ())
4907             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4908                              "no optab.\n");
4909
4910           return false;
4911         }
4912
4913       if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
4914         {
4915           if (dump_enabled_p ())
4916             dump_printf (MSG_NOTE, "op not supported by target.\n");
4917
4918           if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4919               || LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4920                   < vect_min_worthwhile_factor (code))
4921             return false;
4922
4923           if (dump_enabled_p ())
4924             dump_printf (MSG_NOTE, "proceeding using word mode.\n");
4925         }
4926
4927       /* Worthwhile without SIMD support?  */
4928       if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
4929           && LOOP_VINFO_VECT_FACTOR (loop_vinfo)
4930              < vect_min_worthwhile_factor (code))
4931         {
4932           if (dump_enabled_p ())
4933             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4934                              "not worthwhile without SIMD support.\n");
4935
4936           return false;
4937         }
4938     }
4939
4940   /* 4.2. Check support for the epilog operation.
4941
4942           If STMT represents a reduction pattern, then the type of the
4943           reduction variable may be different than the type of the rest
4944           of the arguments.  For example, consider the case of accumulation
4945           of shorts into an int accumulator; The original code:
4946                         S1: int_a = (int) short_a;
4947           orig_stmt->   S2: int_acc = plus <int_a ,int_acc>;
4948
4949           was replaced with:
4950                         STMT: int_acc = widen_sum <short_a, int_acc>
4951
4952           This means that:
4953           1. The tree-code that is used to create the vector operation in the
4954              epilog code (that reduces the partial results) is not the
4955              tree-code of STMT, but is rather the tree-code of the original
4956              stmt from the pattern that STMT is replacing.  I.e, in the example
4957              above we want to use 'widen_sum' in the loop, but 'plus' in the
4958              epilog.
4959           2. The type (mode) we use to check available target support
4960              for the vector operation to be created in the *epilog*, is
4961              determined by the type of the reduction variable (in the example
4962              above we'd check this: optab_handler (plus_optab, vect_int_mode])).
4963              However the type (mode) we use to check available target support
4964              for the vector operation to be created *inside the loop*, is
4965              determined by the type of the other arguments to STMT (in the
4966              example we'd check this: optab_handler (widen_sum_optab,
4967              vect_short_mode)).
4968
4969           This is contrary to "regular" reductions, in which the types of all
4970           the arguments are the same as the type of the reduction variable.
4971           For "regular" reductions we can therefore use the same vector type
4972           (and also the same tree-code) when generating the epilog code and
4973           when generating the code inside the loop.  */
4974
4975   if (orig_stmt)
4976     {
4977       /* This is a reduction pattern: get the vectype from the type of the
4978          reduction variable, and get the tree-code from orig_stmt.  */
4979       orig_code = gimple_assign_rhs_code (orig_stmt);
4980       gcc_assert (vectype_out);
4981       vec_mode = TYPE_MODE (vectype_out);
4982     }
4983   else
4984     {
4985       /* Regular reduction: use the same vectype and tree-code as used for
4986          the vector code inside the loop can be used for the epilog code. */
4987       orig_code = code;
4988     }
4989
4990   if (nested_cycle)
4991     {
4992       def_bb = gimple_bb (reduc_def_stmt);
4993       def_stmt_loop = def_bb->loop_father;
4994       def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4995                                        loop_preheader_edge (def_stmt_loop));
4996       if (TREE_CODE (def_arg) == SSA_NAME
4997           && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
4998           && gimple_code (def_arg_stmt) == GIMPLE_PHI
4999           && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
5000           && vinfo_for_stmt (def_arg_stmt)
5001           && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
5002               == vect_double_reduction_def)
5003         double_reduc = true;
5004     }
5005
5006   epilog_reduc_code = ERROR_MARK;
5007   if (reduction_code_for_scalar_code (orig_code, &epilog_reduc_code))
5008     {
5009       reduc_optab = optab_for_tree_code (epilog_reduc_code, vectype_out,
5010                                          optab_default);
5011       if (!reduc_optab)
5012         {
5013           if (dump_enabled_p ())
5014             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5015                              "no optab for reduction.\n");
5016
5017           epilog_reduc_code = ERROR_MARK;
5018         }
5019
5020       if (reduc_optab
5021           && optab_handler (reduc_optab, vec_mode) == CODE_FOR_nothing)
5022         {
5023           if (dump_enabled_p ())
5024             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5025                              "reduc op not supported by target.\n");
5026
5027           epilog_reduc_code = ERROR_MARK;
5028         }
5029     }
5030   else
5031     {
5032       if (!nested_cycle || double_reduc)
5033         {
5034           if (dump_enabled_p ())
5035             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5036                              "no reduc code for scalar code.\n");
5037
5038           return false;
5039         }
5040     }
5041
5042   if (double_reduc && ncopies > 1)
5043     {
5044       if (dump_enabled_p ())
5045         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5046                          "multiple types in double reduction\n");
5047
5048       return false;
5049     }
5050
5051   /* In case of widenning multiplication by a constant, we update the type
5052      of the constant to be the type of the other operand.  We check that the
5053      constant fits the type in the pattern recognition pass.  */
5054   if (code == DOT_PROD_EXPR
5055       && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
5056     {
5057       if (TREE_CODE (ops[0]) == INTEGER_CST)
5058         ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
5059       else if (TREE_CODE (ops[1]) == INTEGER_CST)
5060         ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
5061       else
5062         {
5063           if (dump_enabled_p ())
5064             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5065                              "invalid types in dot-prod\n");
5066
5067           return false;
5068         }
5069     }
5070
5071   if (!vec_stmt) /* transformation not required.  */
5072     {
5073       if (!vect_model_reduction_cost (stmt_info, epilog_reduc_code, ncopies))
5074         return false;
5075       STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
5076       return true;
5077     }
5078
5079   /** Transform.  **/
5080
5081   if (dump_enabled_p ())
5082     dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
5083
5084   /* FORNOW: Multiple types are not supported for condition.  */
5085   if (code == COND_EXPR)
5086     gcc_assert (ncopies == 1);
5087
5088   /* Create the destination vector  */
5089   vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
5090
5091   /* In case the vectorization factor (VF) is bigger than the number
5092      of elements that we can fit in a vectype (nunits), we have to generate
5093      more than one vector stmt - i.e - we need to "unroll" the
5094      vector stmt by a factor VF/nunits.  For more details see documentation
5095      in vectorizable_operation.  */
5096
5097   /* If the reduction is used in an outer loop we need to generate
5098      VF intermediate results, like so (e.g. for ncopies=2):
5099         r0 = phi (init, r0)
5100         r1 = phi (init, r1)
5101         r0 = x0 + r0;
5102         r1 = x1 + r1;
5103     (i.e. we generate VF results in 2 registers).
5104     In this case we have a separate def-use cycle for each copy, and therefore
5105     for each copy we get the vector def for the reduction variable from the
5106     respective phi node created for this copy.
5107
5108     Otherwise (the reduction is unused in the loop nest), we can combine
5109     together intermediate results, like so (e.g. for ncopies=2):
5110         r = phi (init, r)
5111         r = x0 + r;
5112         r = x1 + r;
5113    (i.e. we generate VF/2 results in a single register).
5114    In this case for each copy we get the vector def for the reduction variable
5115    from the vectorized reduction operation generated in the previous iteration.
5116   */
5117
5118   if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope)
5119     {
5120       single_defuse_cycle = true;
5121       epilog_copies = 1;
5122     }
5123   else
5124     epilog_copies = ncopies;
5125
5126   prev_stmt_info = NULL;
5127   prev_phi_info = NULL;
5128   if (slp_node)
5129     {
5130       vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5131       gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out) 
5132                   == TYPE_VECTOR_SUBPARTS (vectype_in));
5133     }
5134   else
5135     {
5136       vec_num = 1;
5137       vec_oprnds0.create (1);
5138       if (op_type == ternary_op)
5139         vec_oprnds1.create (1);
5140     }
5141
5142   phis.create (vec_num);
5143   vect_defs.create (vec_num);
5144   if (!slp_node)
5145     vect_defs.quick_push (NULL_TREE);
5146
5147   for (j = 0; j < ncopies; j++)
5148     {
5149       if (j == 0 || !single_defuse_cycle)
5150         {
5151           for (i = 0; i < vec_num; i++)
5152             {
5153               /* Create the reduction-phi that defines the reduction
5154                  operand.  */
5155               new_phi = create_phi_node (vec_dest, loop->header);
5156               set_vinfo_for_stmt (new_phi,
5157                                   new_stmt_vec_info (new_phi, loop_vinfo,
5158                                                      NULL));
5159                if (j == 0 || slp_node)
5160                  phis.quick_push (new_phi);
5161             }
5162         }
5163
5164       if (code == COND_EXPR)
5165         {
5166           gcc_assert (!slp_node);
5167           vectorizable_condition (stmt, gsi, vec_stmt, 
5168                                   PHI_RESULT (phis[0]), 
5169                                   reduc_index, NULL);
5170           /* Multiple types are not supported for condition.  */
5171           break;
5172         }
5173
5174       /* Handle uses.  */
5175       if (j == 0)
5176         {
5177           op0 = ops[!reduc_index];
5178           if (op_type == ternary_op)
5179             {
5180               if (reduc_index == 0)
5181                 op1 = ops[2];
5182               else
5183                 op1 = ops[1];
5184             }
5185
5186           if (slp_node)
5187             vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5188                                slp_node, -1);
5189           else
5190             {
5191               loop_vec_def0 = vect_get_vec_def_for_operand (ops[!reduc_index],
5192                                                             stmt, NULL);
5193               vec_oprnds0.quick_push (loop_vec_def0);
5194               if (op_type == ternary_op)
5195                {
5196                  loop_vec_def1 = vect_get_vec_def_for_operand (op1, stmt,
5197                                                                NULL);
5198                  vec_oprnds1.quick_push (loop_vec_def1);
5199                }
5200             }
5201         }
5202       else
5203         {
5204           if (!slp_node)
5205             {
5206               enum vect_def_type dt;
5207               gimple dummy_stmt;
5208               tree dummy;
5209
5210               vect_is_simple_use (ops[!reduc_index], stmt, loop_vinfo, NULL,
5211                                   &dummy_stmt, &dummy, &dt);
5212               loop_vec_def0 = vect_get_vec_def_for_stmt_copy (dt,
5213                                                               loop_vec_def0);
5214               vec_oprnds0[0] = loop_vec_def0;
5215               if (op_type == ternary_op)
5216                 {
5217                   vect_is_simple_use (op1, stmt, loop_vinfo, NULL, &dummy_stmt,
5218                                       &dummy, &dt);
5219                   loop_vec_def1 = vect_get_vec_def_for_stmt_copy (dt,
5220                                                                 loop_vec_def1);
5221                   vec_oprnds1[0] = loop_vec_def1;
5222                 }
5223             }
5224
5225           if (single_defuse_cycle)
5226             reduc_def = gimple_assign_lhs (new_stmt);
5227
5228           STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
5229         }
5230
5231       FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5232         {
5233           if (slp_node)
5234             reduc_def = PHI_RESULT (phis[i]);
5235           else
5236             {
5237               if (!single_defuse_cycle || j == 0)
5238                 reduc_def = PHI_RESULT (new_phi);
5239             }
5240
5241           def1 = ((op_type == ternary_op)
5242                   ? vec_oprnds1[i] : NULL);
5243           if (op_type == binary_op)
5244             {
5245               if (reduc_index == 0)
5246                 expr = build2 (code, vectype_out, reduc_def, def0);
5247               else
5248                 expr = build2 (code, vectype_out, def0, reduc_def);
5249             }
5250           else
5251             {
5252               if (reduc_index == 0)
5253                 expr = build3 (code, vectype_out, reduc_def, def0, def1);
5254               else
5255                 {
5256                   if (reduc_index == 1)
5257                     expr = build3 (code, vectype_out, def0, reduc_def, def1);
5258                   else
5259                     expr = build3 (code, vectype_out, def0, def1, reduc_def);
5260                 }
5261             }
5262
5263           new_stmt = gimple_build_assign (vec_dest, expr);
5264           new_temp = make_ssa_name (vec_dest, new_stmt);
5265           gimple_assign_set_lhs (new_stmt, new_temp);
5266           vect_finish_stmt_generation (stmt, new_stmt, gsi);
5267
5268           if (slp_node)
5269             {
5270               SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5271               vect_defs.quick_push (new_temp);
5272             }
5273           else
5274             vect_defs[0] = new_temp;
5275         }
5276
5277       if (slp_node)
5278         continue;
5279
5280       if (j == 0)
5281         STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5282       else
5283         STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5284
5285       prev_stmt_info = vinfo_for_stmt (new_stmt);
5286       prev_phi_info = vinfo_for_stmt (new_phi);
5287     }
5288
5289   /* Finalize the reduction-phi (set its arguments) and create the
5290      epilog reduction code.  */
5291   if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
5292     {
5293       new_temp = gimple_assign_lhs (*vec_stmt);
5294       vect_defs[0] = new_temp;
5295     }
5296
5297   vect_create_epilog_for_reduction (vect_defs, stmt, epilog_copies,
5298                                     epilog_reduc_code, phis, reduc_index,
5299                                     double_reduc, slp_node);
5300
5301   phis.release ();
5302   vect_defs.release ();
5303   vec_oprnds0.release ();
5304   vec_oprnds1.release ();
5305
5306   return true;
5307 }
5308
5309 /* Function vect_min_worthwhile_factor.
5310
5311    For a loop where we could vectorize the operation indicated by CODE,
5312    return the minimum vectorization factor that makes it worthwhile
5313    to use generic vectors.  */
5314 int
5315 vect_min_worthwhile_factor (enum tree_code code)
5316 {
5317   switch (code)
5318     {
5319     case PLUS_EXPR:
5320     case MINUS_EXPR:
5321     case NEGATE_EXPR:
5322       return 4;
5323
5324     case BIT_AND_EXPR:
5325     case BIT_IOR_EXPR:
5326     case BIT_XOR_EXPR:
5327     case BIT_NOT_EXPR:
5328       return 2;
5329
5330     default:
5331       return INT_MAX;
5332     }
5333 }
5334
5335
5336 /* Function vectorizable_induction
5337
5338    Check if PHI performs an induction computation that can be vectorized.
5339    If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
5340    phi to replace it, put it in VEC_STMT, and add it to the same basic block.
5341    Return FALSE if not a vectorizable STMT, TRUE otherwise.  */
5342
5343 bool
5344 vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5345                         gimple *vec_stmt)
5346 {
5347   stmt_vec_info stmt_info = vinfo_for_stmt (phi);
5348   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5349   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5350   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5351   int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5352   int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5353   tree vec_def;
5354
5355   gcc_assert (ncopies >= 1);
5356   /* FORNOW. These restrictions should be relaxed.  */
5357   if (nested_in_vect_loop_p (loop, phi))
5358     {
5359       imm_use_iterator imm_iter;
5360       use_operand_p use_p;
5361       gimple exit_phi;
5362       edge latch_e;
5363       tree loop_arg;
5364
5365       if (ncopies > 1)
5366         {
5367           if (dump_enabled_p ())
5368             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5369                              "multiple types in nested loop.\n");
5370           return false;
5371         }
5372
5373       exit_phi = NULL;
5374       latch_e = loop_latch_edge (loop->inner);
5375       loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
5376       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
5377         {
5378           if (!flow_bb_inside_loop_p (loop->inner,
5379                                       gimple_bb (USE_STMT (use_p))))
5380             {
5381               exit_phi = USE_STMT (use_p);
5382               break;
5383             }
5384         }
5385       if (exit_phi)
5386         {
5387           stmt_vec_info exit_phi_vinfo  = vinfo_for_stmt (exit_phi);
5388           if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5389                 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
5390             {
5391               if (dump_enabled_p ())
5392                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5393                                  "inner-loop induction only used outside "
5394                                  "of the outer vectorized loop.\n");
5395               return false;
5396             }
5397         }
5398     }
5399
5400   if (!STMT_VINFO_RELEVANT_P (stmt_info))
5401     return false;
5402
5403   /* FORNOW: SLP not supported.  */
5404   if (STMT_SLP_TYPE (stmt_info))
5405     return false;
5406
5407   gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def);
5408
5409   if (gimple_code (phi) != GIMPLE_PHI)
5410     return false;
5411
5412   if (!vec_stmt) /* transformation not required.  */
5413     {
5414       STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
5415       if (dump_enabled_p ())
5416         dump_printf_loc (MSG_NOTE, vect_location,
5417                          "=== vectorizable_induction ===\n");
5418       vect_model_induction_cost (stmt_info, ncopies);
5419       return true;
5420     }
5421
5422   /** Transform.  **/
5423
5424   if (dump_enabled_p ())
5425     dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
5426
5427   vec_def = get_initial_def_for_induction (phi);
5428   *vec_stmt = SSA_NAME_DEF_STMT (vec_def);
5429   return true;
5430 }
5431
5432 /* Function vectorizable_live_operation.
5433
5434    STMT computes a value that is used outside the loop.  Check if
5435    it can be supported.  */
5436
5437 bool
5438 vectorizable_live_operation (gimple stmt,
5439                              gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
5440                              gimple *vec_stmt)
5441 {
5442   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5443   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5444   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5445   int i;
5446   int op_type;
5447   tree op;
5448   tree def;
5449   gimple def_stmt;
5450   enum vect_def_type dt;
5451   enum tree_code code;
5452   enum gimple_rhs_class rhs_class;
5453
5454   gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
5455
5456   if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
5457     return false;
5458
5459   if (!is_gimple_assign (stmt))
5460     {
5461       if (gimple_call_internal_p (stmt)
5462           && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
5463           && gimple_call_lhs (stmt)
5464           && loop->simduid
5465           && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
5466           && loop->simduid
5467              == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
5468         {
5469           edge e = single_exit (loop);
5470           basic_block merge_bb = e->dest;
5471           imm_use_iterator imm_iter;
5472           use_operand_p use_p;
5473           tree lhs = gimple_call_lhs (stmt);
5474
5475           FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
5476             {
5477               gimple use_stmt = USE_STMT (use_p);
5478               if (gimple_code (use_stmt) == GIMPLE_PHI
5479                   || gimple_bb (use_stmt) == merge_bb)
5480                 {
5481                   if (vec_stmt)
5482                     {
5483                       tree vfm1
5484                         = build_int_cst (unsigned_type_node,
5485                                          loop_vinfo->vectorization_factor - 1);
5486                       SET_PHI_ARG_DEF (use_stmt, e->dest_idx, vfm1);
5487                     }
5488                   return true;
5489                 }
5490             }
5491         }
5492
5493       return false;
5494     }
5495
5496   if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5497     return false;
5498
5499   /* FORNOW. CHECKME. */
5500   if (nested_in_vect_loop_p (loop, stmt))
5501     return false;
5502
5503   code = gimple_assign_rhs_code (stmt);
5504   op_type = TREE_CODE_LENGTH (code);
5505   rhs_class = get_gimple_rhs_class (code);
5506   gcc_assert (rhs_class != GIMPLE_UNARY_RHS || op_type == unary_op);
5507   gcc_assert (rhs_class != GIMPLE_BINARY_RHS || op_type == binary_op);
5508
5509   /* FORNOW: support only if all uses are invariant.  This means
5510      that the scalar operations can remain in place, unvectorized.
5511      The original last scalar value that they compute will be used.  */
5512
5513   for (i = 0; i < op_type; i++)
5514     {
5515       if (rhs_class == GIMPLE_SINGLE_RHS)
5516         op = TREE_OPERAND (gimple_op (stmt, 1), i);
5517       else
5518         op = gimple_op (stmt, i + 1);
5519       if (op
5520           && !vect_is_simple_use (op, stmt, loop_vinfo, NULL, &def_stmt, &def,
5521                                   &dt))
5522         {
5523           if (dump_enabled_p ())
5524             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5525                              "use not simple.\n");
5526           return false;
5527         }
5528
5529       if (dt != vect_external_def && dt != vect_constant_def)
5530         return false;
5531     }
5532
5533   /* No transformation is required for the cases we currently support.  */
5534   return true;
5535 }
5536
5537 /* Kill any debug uses outside LOOP of SSA names defined in STMT.  */
5538
5539 static void
5540 vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
5541 {
5542   ssa_op_iter op_iter;
5543   imm_use_iterator imm_iter;
5544   def_operand_p def_p;
5545   gimple ustmt;
5546
5547   FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
5548     {
5549       FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
5550         {
5551           basic_block bb;
5552
5553           if (!is_gimple_debug (ustmt))
5554             continue;
5555
5556           bb = gimple_bb (ustmt);
5557
5558           if (!flow_bb_inside_loop_p (loop, bb))
5559             {
5560               if (gimple_debug_bind_p (ustmt))
5561                 {
5562                   if (dump_enabled_p ())
5563                     dump_printf_loc (MSG_NOTE, vect_location,
5564                                      "killing debug use\n");
5565
5566                   gimple_debug_bind_reset_value (ustmt);
5567                   update_stmt (ustmt);
5568                 }
5569               else
5570                 gcc_unreachable ();
5571             }
5572         }
5573     }
5574 }
5575
5576 /* Function vect_transform_loop.
5577
5578    The analysis phase has determined that the loop is vectorizable.
5579    Vectorize the loop - created vectorized stmts to replace the scalar
5580    stmts in the loop, and update the loop exit condition.  */
5581
5582 void
5583 vect_transform_loop (loop_vec_info loop_vinfo)
5584 {
5585   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5586   basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
5587   int nbbs = loop->num_nodes;
5588   gimple_stmt_iterator si;
5589   int i;
5590   tree ratio = NULL;
5591   int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5592   bool grouped_store;
5593   bool slp_scheduled = false;
5594   unsigned int nunits;
5595   gimple stmt, pattern_stmt;
5596   gimple_seq pattern_def_seq = NULL;
5597   gimple_stmt_iterator pattern_def_si = gsi_none ();
5598   bool transform_pattern_stmt = false;
5599   bool check_profitability = false;
5600   int th;
5601   /* Record number of iterations before we started tampering with the profile. */
5602   gcov_type expected_iterations = expected_loop_iterations_unbounded (loop);
5603
5604   if (dump_enabled_p ())
5605     dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
5606
5607   /* If profile is inprecise, we have chance to fix it up.  */
5608   if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5609     expected_iterations = LOOP_VINFO_INT_NITERS (loop_vinfo);
5610
5611   /* Use the more conservative vectorization threshold.  If the number
5612      of iterations is constant assume the cost check has been performed
5613      by our caller.  If the threshold makes all loops profitable that
5614      run at least the vectorization factor number of times checking
5615      is pointless, too.  */
5616   th = ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
5617          * LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 1);
5618   th = MAX (th, LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo));
5619   if (th >= LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1
5620       && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5621     {
5622       if (dump_enabled_p ())
5623         dump_printf_loc (MSG_NOTE, vect_location,
5624                          "Profitability threshold is %d loop iterations.\n",
5625                          th);
5626       check_profitability = true;
5627     }
5628
5629   /* Version the loop first, if required, so the profitability check
5630      comes first.  */
5631
5632   if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
5633       || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
5634     {
5635       vect_loop_versioning (loop_vinfo, th, check_profitability);
5636       check_profitability = false;
5637     }
5638
5639   /* Peel the loop if there are data refs with unknown alignment.
5640      Only one data ref with unknown store is allowed.  */
5641
5642   if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
5643     {
5644       vect_do_peeling_for_alignment (loop_vinfo, th, check_profitability);
5645       check_profitability = false;
5646     }
5647
5648   /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5649      compile time constant), or it is a constant that doesn't divide by the
5650      vectorization factor, then an epilog loop needs to be created.
5651      We therefore duplicate the loop: the original loop will be vectorized,
5652      and will compute the first (n/VF) iterations.  The second copy of the loop
5653      will remain scalar and will compute the remaining (n%VF) iterations.
5654      (VF is the vectorization factor).  */
5655
5656   if ((int) tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
5657       < exact_log2 (vectorization_factor)
5658       || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
5659     vect_do_peeling_for_loop_bound (loop_vinfo, &ratio,
5660                                     th, check_profitability);
5661   else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
5662     ratio = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
5663                 LOOP_VINFO_INT_NITERS (loop_vinfo) / vectorization_factor);
5664   else
5665     {
5666       tree ni_name, ratio_mult_vf;
5667       vect_generate_tmps_on_preheader (loop_vinfo, &ni_name, &ratio_mult_vf,
5668                                        &ratio, NULL);
5669     }
5670
5671   /* 1) Make sure the loop header has exactly two entries
5672      2) Make sure we have a preheader basic block.  */
5673
5674   gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
5675
5676   split_edge (loop_preheader_edge (loop));
5677
5678   /* FORNOW: the vectorizer supports only loops which body consist
5679      of one basic block (header + empty latch). When the vectorizer will
5680      support more involved loop forms, the order by which the BBs are
5681      traversed need to be reconsidered.  */
5682
5683   for (i = 0; i < nbbs; i++)
5684     {
5685       basic_block bb = bbs[i];
5686       stmt_vec_info stmt_info;
5687       gimple phi;
5688
5689       for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5690         {
5691           phi = gsi_stmt (si);
5692           if (dump_enabled_p ())
5693             {
5694               dump_printf_loc (MSG_NOTE, vect_location,
5695                                "------>vectorizing phi: ");
5696               dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
5697               dump_printf (MSG_NOTE, "\n");
5698             }
5699           stmt_info = vinfo_for_stmt (phi);
5700           if (!stmt_info)
5701             continue;
5702
5703           if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5704             vect_loop_kill_debug_uses (loop, phi);
5705
5706           if (!STMT_VINFO_RELEVANT_P (stmt_info)
5707               && !STMT_VINFO_LIVE_P (stmt_info))
5708             continue;
5709
5710           if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
5711                 != (unsigned HOST_WIDE_INT) vectorization_factor)
5712               && dump_enabled_p ())
5713             dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
5714
5715           if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
5716             {
5717               if (dump_enabled_p ())
5718                 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
5719               vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
5720             }
5721         }
5722
5723       pattern_stmt = NULL;
5724       for (si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;)
5725         {
5726           bool is_store;
5727
5728           if (transform_pattern_stmt)
5729             stmt = pattern_stmt;
5730           else
5731             {
5732               stmt = gsi_stmt (si);
5733               /* During vectorization remove existing clobber stmts.  */
5734               if (gimple_clobber_p (stmt))
5735                 {
5736                   unlink_stmt_vdef (stmt);
5737                   gsi_remove (&si, true);
5738                   release_defs (stmt);
5739                   continue;
5740                 }
5741             }
5742
5743           if (dump_enabled_p ())
5744             {
5745               dump_printf_loc (MSG_NOTE, vect_location,
5746                                "------>vectorizing statement: ");
5747               dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
5748               dump_printf (MSG_NOTE, "\n");
5749             }
5750
5751           stmt_info = vinfo_for_stmt (stmt);
5752
5753           /* vector stmts created in the outer-loop during vectorization of
5754              stmts in an inner-loop may not have a stmt_info, and do not
5755              need to be vectorized.  */
5756           if (!stmt_info)
5757             {
5758               gsi_next (&si);
5759               continue;
5760             }
5761
5762           if (MAY_HAVE_DEBUG_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
5763             vect_loop_kill_debug_uses (loop, stmt);
5764
5765           if (!STMT_VINFO_RELEVANT_P (stmt_info)
5766               && !STMT_VINFO_LIVE_P (stmt_info))
5767             {
5768               if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5769                   && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5770                   && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5771                       || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5772                 {
5773                   stmt = pattern_stmt;
5774                   stmt_info = vinfo_for_stmt (stmt);
5775                 }
5776               else
5777                 {
5778                   gsi_next (&si);
5779                   continue;
5780                 }
5781             }
5782           else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
5783                    && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
5784                    && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
5785                        || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
5786             transform_pattern_stmt = true;
5787
5788           /* If pattern statement has def stmts, vectorize them too.  */
5789           if (is_pattern_stmt_p (stmt_info))
5790             {
5791               if (pattern_def_seq == NULL)
5792                 {
5793                   pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
5794                   pattern_def_si = gsi_start (pattern_def_seq);
5795                 }
5796               else if (!gsi_end_p (pattern_def_si))
5797                 gsi_next (&pattern_def_si);
5798               if (pattern_def_seq != NULL)
5799                 {
5800                   gimple pattern_def_stmt = NULL;
5801                   stmt_vec_info pattern_def_stmt_info = NULL;
5802
5803                   while (!gsi_end_p (pattern_def_si))
5804                     {
5805                       pattern_def_stmt = gsi_stmt (pattern_def_si);
5806                       pattern_def_stmt_info
5807                         = vinfo_for_stmt (pattern_def_stmt);
5808                       if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
5809                           || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
5810                         break;
5811                       gsi_next (&pattern_def_si);
5812                     }
5813
5814                   if (!gsi_end_p (pattern_def_si))
5815                     {
5816                       if (dump_enabled_p ())
5817                         {
5818                           dump_printf_loc (MSG_NOTE, vect_location,
5819                                            "==> vectorizing pattern def "
5820                                            "stmt: ");
5821                           dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
5822                                             pattern_def_stmt, 0);
5823                           dump_printf (MSG_NOTE, "\n");
5824                         }
5825
5826                       stmt = pattern_def_stmt;
5827                       stmt_info = pattern_def_stmt_info;
5828                     }
5829                   else
5830                     {
5831                       pattern_def_si = gsi_none ();
5832                       transform_pattern_stmt = false;
5833                     }
5834                 }
5835               else
5836                 transform_pattern_stmt = false;
5837             }
5838
5839           gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
5840           nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (
5841                                                STMT_VINFO_VECTYPE (stmt_info));
5842           if (!STMT_SLP_TYPE (stmt_info)
5843               && nunits != (unsigned int) vectorization_factor
5844               && dump_enabled_p ())
5845             /* For SLP VF is set according to unrolling factor, and not to
5846                vector size, hence for SLP this print is not valid.  */
5847             dump_printf_loc (MSG_NOTE, vect_location,
5848                              "multiple-types.\n");
5849
5850           /* SLP. Schedule all the SLP instances when the first SLP stmt is
5851              reached.  */
5852           if (STMT_SLP_TYPE (stmt_info))
5853             {
5854               if (!slp_scheduled)
5855                 {
5856                   slp_scheduled = true;
5857
5858                   if (dump_enabled_p ())
5859                     dump_printf_loc (MSG_NOTE, vect_location,
5860                                      "=== scheduling SLP instances ===\n");
5861
5862                   vect_schedule_slp (loop_vinfo, NULL);
5863                 }
5864
5865               /* Hybrid SLP stmts must be vectorized in addition to SLP.  */
5866               if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
5867                 {
5868                   if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5869                     {
5870                       pattern_def_seq = NULL;
5871                       gsi_next (&si);
5872                     }
5873                   continue;
5874                 }
5875             }
5876
5877           /* -------- vectorize statement ------------ */
5878           if (dump_enabled_p ())
5879             dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
5880
5881           grouped_store = false;
5882           is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
5883           if (is_store)
5884             {
5885               if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5886                 {
5887                   /* Interleaving. If IS_STORE is TRUE, the vectorization of the
5888                      interleaving chain was completed - free all the stores in
5889                      the chain.  */
5890                   gsi_next (&si);
5891                   vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
5892                   continue;
5893                 }
5894               else
5895                 {
5896                   /* Free the attached stmt_vec_info and remove the stmt.  */
5897                   gimple store = gsi_stmt (si);
5898                   free_stmt_vec_info (store);
5899                   unlink_stmt_vdef (store);
5900                   gsi_remove (&si, true);
5901                   release_defs (store);
5902                   continue;
5903                 }
5904             }
5905
5906           if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
5907             {
5908               pattern_def_seq = NULL;
5909               gsi_next (&si);
5910             }
5911         }                       /* stmts in BB */
5912     }                           /* BBs in loop */
5913
5914   slpeel_make_loop_iterate_ntimes (loop, ratio);
5915
5916   /* Reduce loop iterations by the vectorization factor.  */
5917   scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor),
5918                       expected_iterations / vectorization_factor);
5919   loop->nb_iterations_upper_bound
5920     = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (vectorization_factor),
5921                                             FLOOR_DIV_EXPR);
5922   if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
5923       && loop->nb_iterations_upper_bound != double_int_zero)
5924     loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - double_int_one;
5925   if (loop->any_estimate)
5926     {
5927       loop->nb_iterations_estimate
5928         = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (vectorization_factor),
5929                                              FLOOR_DIV_EXPR);
5930        if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
5931            && loop->nb_iterations_estimate != double_int_zero)
5932          loop->nb_iterations_estimate = loop->nb_iterations_estimate - double_int_one;
5933     }
5934
5935   if (dump_enabled_p ())
5936     {
5937       dump_printf_loc (MSG_NOTE, vect_location,
5938                        "LOOP VECTORIZED\n");
5939       if (loop->inner)
5940         dump_printf_loc (MSG_NOTE, vect_location,
5941                          "OUTER LOOP VECTORIZED\n");
5942       dump_printf (MSG_NOTE, "\n");
5943     }
5944 }