tree-vect-loop.c (vect_update_vf_for_slp): Split out from ...
[platform/upstream/gcc.git] / gcc / tree-vect-slp.c
1 /* SLP - Basic Block Vectorization
2    Copyright (C) 2007-2015 Free Software Foundation, Inc.
3    Contributed by Dorit Naishlos <dorit@il.ibm.com>
4    and Ira Rosen <irar@il.ibm.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "target.h"
40 #include "predict.h"
41 #include "hard-reg-set.h"
42 #include "function.h"
43 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-pass.h"
57 #include "cfgloop.h"
58 #include "hashtab.h"
59 #include "rtl.h"
60 #include "flags.h"
61 #include "statistics.h"
62 #include "real.h"
63 #include "fixed-value.h"
64 #include "insn-config.h"
65 #include "expmed.h"
66 #include "dojump.h"
67 #include "explow.h"
68 #include "calls.h"
69 #include "emit-rtl.h"
70 #include "varasm.h"
71 #include "stmt.h"
72 #include "expr.h"
73 #include "recog.h"              /* FIXME: for insn_data */
74 #include "insn-codes.h"
75 #include "optabs.h"
76 #include "tree-vectorizer.h"
77 #include "langhooks.h"
78 #include "gimple-walk.h"
79
80 /* Extract the location of the basic block in the source code.
81    Return the basic block location if succeed and NULL if not.  */
82
83 source_location
84 find_bb_location (basic_block bb)
85 {
86   gimple stmt = NULL;
87   gimple_stmt_iterator si;
88
89   if (!bb)
90     return UNKNOWN_LOCATION;
91
92   for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
93     {
94       stmt = gsi_stmt (si);
95       if (gimple_location (stmt) != UNKNOWN_LOCATION)
96         return gimple_location (stmt);
97     }
98
99   return UNKNOWN_LOCATION;
100 }
101
102
103 /* Recursively free the memory allocated for the SLP tree rooted at NODE.  */
104
105 static void
106 vect_free_slp_tree (slp_tree node)
107 {
108   int i;
109   slp_tree child;
110
111   if (!node)
112     return;
113
114   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
115     vect_free_slp_tree (child);
116
117   SLP_TREE_CHILDREN (node).release ();
118   SLP_TREE_SCALAR_STMTS (node).release ();
119   SLP_TREE_VEC_STMTS (node).release ();
120   SLP_TREE_LOAD_PERMUTATION (node).release ();
121
122   free (node);
123 }
124
125
126 /* Free the memory allocated for the SLP instance.  */
127
128 void
129 vect_free_slp_instance (slp_instance instance)
130 {
131   vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
132   SLP_INSTANCE_LOADS (instance).release ();
133   SLP_INSTANCE_BODY_COST_VEC (instance).release ();
134   free (instance);
135 }
136
137
138 /* Create an SLP node for SCALAR_STMTS.  */
139
140 static slp_tree
141 vect_create_new_slp_node (vec<gimple> scalar_stmts)
142 {
143   slp_tree node;
144   gimple stmt = scalar_stmts[0];
145   unsigned int nops;
146
147   if (is_gimple_call (stmt))
148     nops = gimple_call_num_args (stmt);
149   else if (is_gimple_assign (stmt))
150     {
151       nops = gimple_num_ops (stmt) - 1;
152       if (gimple_assign_rhs_code (stmt) == COND_EXPR)
153         nops++;
154     }
155   else
156     return NULL;
157
158   node = XNEW (struct _slp_tree);
159   SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
160   SLP_TREE_VEC_STMTS (node).create (0);
161   SLP_TREE_CHILDREN (node).create (nops);
162   SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
163   SLP_TREE_TWO_OPERATORS (node) = false;
164
165   return node;
166 }
167
168
169 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
170    operand.  */
171 static vec<slp_oprnd_info> 
172 vect_create_oprnd_info (int nops, int group_size)
173 {
174   int i;
175   slp_oprnd_info oprnd_info;
176   vec<slp_oprnd_info> oprnds_info;
177
178   oprnds_info.create (nops);
179   for (i = 0; i < nops; i++)
180     {
181       oprnd_info = XNEW (struct _slp_oprnd_info);
182       oprnd_info->def_stmts.create (group_size);
183       oprnd_info->first_dt = vect_uninitialized_def;
184       oprnd_info->first_op_type = NULL_TREE;
185       oprnd_info->first_pattern = false;
186       oprnd_info->second_pattern = false;
187       oprnds_info.quick_push (oprnd_info);
188     }
189
190   return oprnds_info;
191 }
192
193
194 /* Free operands info.  */
195
196 static void
197 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
198 {
199   int i;
200   slp_oprnd_info oprnd_info;
201
202   FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
203     {
204       oprnd_info->def_stmts.release ();
205       XDELETE (oprnd_info);
206     }
207
208   oprnds_info.release ();
209 }
210
211
212 /* Find the place of the data-ref in STMT in the interleaving chain that starts
213    from FIRST_STMT.  Return -1 if the data-ref is not a part of the chain.  */
214
215 static int
216 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
217 {
218   gimple next_stmt = first_stmt;
219   int result = 0;
220
221   if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
222     return -1;
223
224   do
225     {
226       if (next_stmt == stmt)
227         return result;
228       next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
229       if (next_stmt)
230         result += GROUP_GAP (vinfo_for_stmt (next_stmt));
231     }
232   while (next_stmt);
233
234   return -1;
235 }
236
237
238 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
239    they are of a valid type and that they match the defs of the first stmt of
240    the SLP group (stored in OPRNDS_INFO).  If there was a fatal error
241    return -1, if the error could be corrected by swapping operands of the
242    operation return 1, if everything is ok return 0.  */
243
244 static int 
245 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
246                              gimple stmt, unsigned stmt_num,
247                              vec<slp_oprnd_info> *oprnds_info)
248 {
249   tree oprnd;
250   unsigned int i, number_of_oprnds;
251   tree def;
252   gimple def_stmt;
253   enum vect_def_type dt = vect_uninitialized_def;
254   struct loop *loop = NULL;
255   bool pattern = false;
256   slp_oprnd_info oprnd_info;
257   int first_op_idx = 1;
258   bool commutative = false;
259   bool first_op_cond = false;
260   bool first = stmt_num == 0;
261   bool second = stmt_num == 1;
262
263   if (loop_vinfo)
264     loop = LOOP_VINFO_LOOP (loop_vinfo);
265
266   if (is_gimple_call (stmt))
267     {
268       number_of_oprnds = gimple_call_num_args (stmt);
269       first_op_idx = 3;
270     }
271   else if (is_gimple_assign (stmt))
272     {
273       enum tree_code code = gimple_assign_rhs_code (stmt);
274       number_of_oprnds = gimple_num_ops (stmt) - 1;
275       if (gimple_assign_rhs_code (stmt) == COND_EXPR)
276         {
277           first_op_cond = true;
278           commutative = true;
279           number_of_oprnds++;
280         }
281       else
282         commutative = commutative_tree_code (code);
283     }
284   else
285     return -1;
286
287   bool swapped = false;
288   for (i = 0; i < number_of_oprnds; i++)
289     {
290 again:
291       if (first_op_cond)
292         {
293           if (i == 0 || i == 1)
294             oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
295                                   swapped ? !i : i);
296           else
297             oprnd = gimple_op (stmt, first_op_idx + i - 1);
298         }
299       else
300         oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
301
302       oprnd_info = (*oprnds_info)[i];
303
304       if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
305                                &def, &dt)
306           || (!def_stmt && dt != vect_constant_def))
307         {
308           if (dump_enabled_p ())
309             {
310               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
311                                "Build SLP failed: can't find def for ");
312               dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
313               dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
314             }
315
316           return -1;
317         }
318
319       /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
320          from the pattern.  Check that all the stmts of the node are in the
321          pattern.  */
322       if (def_stmt && gimple_bb (def_stmt)
323           && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
324               || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
325                   && gimple_code (def_stmt) != GIMPLE_PHI))
326           && vinfo_for_stmt (def_stmt)
327           && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
328           && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
329           && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
330         {
331           pattern = true;
332           if (!first && !oprnd_info->first_pattern
333               /* Allow different pattern state for the defs of the
334                  first stmt in reduction chains.  */
335               && (oprnd_info->first_dt != vect_reduction_def
336                   || (!second && !oprnd_info->second_pattern)))
337             {
338               if (i == 0
339                   && !swapped
340                   && commutative)
341                 {
342                   swapped = true;
343                   goto again;
344                 }
345
346               if (dump_enabled_p ())
347                 {
348                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
349                                    "Build SLP failed: some of the stmts"
350                                    " are in a pattern, and others are not ");
351                   dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
352                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
353                 }
354
355               return 1;
356             }
357
358           def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
359           dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
360
361           if (dt == vect_unknown_def_type)
362             {
363               if (dump_enabled_p ())
364                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
365                                  "Unsupported pattern.\n");
366               return -1;
367             }
368
369           switch (gimple_code (def_stmt))
370             {
371               case GIMPLE_PHI:
372                 def = gimple_phi_result (def_stmt);
373                 break;
374
375               case GIMPLE_ASSIGN:
376                 def = gimple_assign_lhs (def_stmt);
377                 break;
378
379               default:
380                 if (dump_enabled_p ())
381                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
382                                    "unsupported defining stmt:\n");
383                 return -1;
384             }
385         }
386
387       if (second)
388         oprnd_info->second_pattern = pattern;
389
390       if (first)
391         {
392           oprnd_info->first_dt = dt;
393           oprnd_info->first_pattern = pattern;
394           oprnd_info->first_op_type = TREE_TYPE (oprnd);
395         }
396       else
397         {
398           /* Not first stmt of the group, check that the def-stmt/s match
399              the def-stmt/s of the first stmt.  Allow different definition
400              types for reduction chains: the first stmt must be a
401              vect_reduction_def (a phi node), and the rest
402              vect_internal_def.  */
403           if (((oprnd_info->first_dt != dt
404                 && !(oprnd_info->first_dt == vect_reduction_def
405                      && dt == vect_internal_def)
406                 && !((oprnd_info->first_dt == vect_external_def
407                       || oprnd_info->first_dt == vect_constant_def)
408                      && (dt == vect_external_def
409                          || dt == vect_constant_def)))
410                || !types_compatible_p (oprnd_info->first_op_type,
411                                        TREE_TYPE (oprnd))))
412             {
413               /* Try swapping operands if we got a mismatch.  */
414               if (i == 0
415                   && !swapped
416                   && commutative)
417                 {
418                   swapped = true;
419                   goto again;
420                 }
421
422               if (dump_enabled_p ())
423                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
424                                  "Build SLP failed: different types\n");
425
426               return 1;
427             }
428         }
429
430       /* Check the types of the definitions.  */
431       switch (dt)
432         {
433         case vect_constant_def:
434         case vect_external_def:
435         case vect_reduction_def:
436           break;
437
438         case vect_internal_def:
439           oprnd_info->def_stmts.quick_push (def_stmt);
440           break;
441
442         default:
443           /* FORNOW: Not supported.  */
444           if (dump_enabled_p ())
445             {
446               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
447                                "Build SLP failed: illegal type of def ");
448               dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
449               dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
450             }
451
452           return -1;
453         }
454     }
455
456   /* Swap operands.  */
457   if (swapped)
458     {
459       if (first_op_cond)
460         {
461           tree cond = gimple_assign_rhs1 (stmt);
462           swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
463                              &TREE_OPERAND (cond, 1));
464           TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
465         }
466       else
467         swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
468                            gimple_assign_rhs2_ptr (stmt));
469     }
470
471   return 0;
472 }
473
474
475 /* Verify if the scalar stmts STMTS are isomorphic, require data
476    permutation or are of unsupported types of operation.  Return
477    true if they are, otherwise return false and indicate in *MATCHES
478    which stmts are not isomorphic to the first one.  If MATCHES[0]
479    is false then this indicates the comparison could not be
480    carried out or the stmts will never be vectorized by SLP.  */
481
482 static bool
483 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
484                        vec<gimple> stmts, unsigned int group_size,
485                        unsigned nops, unsigned int *max_nunits,
486                        unsigned int vectorization_factor, bool *matches,
487                        bool *two_operators)
488 {
489   unsigned int i;
490   gimple first_stmt = stmts[0], stmt = stmts[0];
491   enum tree_code first_stmt_code = ERROR_MARK;
492   enum tree_code alt_stmt_code = ERROR_MARK;
493   enum tree_code rhs_code = ERROR_MARK;
494   enum tree_code first_cond_code = ERROR_MARK;
495   tree lhs;
496   bool need_same_oprnds = false;
497   tree vectype, scalar_type, first_op1 = NULL_TREE;
498   optab optab;
499   int icode;
500   machine_mode optab_op2_mode;
501   machine_mode vec_mode;
502   struct data_reference *first_dr;
503   HOST_WIDE_INT dummy;
504   gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
505   tree cond;
506
507   /* For every stmt in NODE find its def stmt/s.  */
508   FOR_EACH_VEC_ELT (stmts, i, stmt)
509     {
510       matches[i] = false;
511
512       if (dump_enabled_p ())
513         {
514           dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
515           dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
516           dump_printf (MSG_NOTE, "\n");
517         }
518
519       /* Fail to vectorize statements marked as unvectorizable.  */
520       if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
521         {
522           if (dump_enabled_p ())
523             {
524               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
525                                "Build SLP failed: unvectorizable statement ");
526               dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
527               dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
528             }
529           /* Fatal mismatch.  */
530           matches[0] = false;
531           return false;
532         }
533
534       lhs = gimple_get_lhs (stmt);
535       if (lhs == NULL_TREE)
536         {
537           if (dump_enabled_p ())
538             {
539               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
540                                "Build SLP failed: not GIMPLE_ASSIGN nor "
541                                "GIMPLE_CALL ");
542               dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
543               dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
544             }
545           /* Fatal mismatch.  */
546           matches[0] = false;
547           return false;
548         }
549
550        if (is_gimple_assign (stmt)
551            && gimple_assign_rhs_code (stmt) == COND_EXPR
552            && (cond = gimple_assign_rhs1 (stmt))
553            && !COMPARISON_CLASS_P (cond))
554         {
555           if (dump_enabled_p ())
556             {
557               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
558                                "Build SLP failed: condition is not "
559                                "comparison ");
560               dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
561               dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
562             }
563           /* Fatal mismatch.  */
564           matches[0] = false;
565           return false;
566         }
567
568       scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
569       vectype = get_vectype_for_scalar_type (scalar_type);
570       if (!vectype)
571         {
572           if (dump_enabled_p ())
573             {
574               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
575                                "Build SLP failed: unsupported data-type ");
576               dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
577                                  scalar_type);
578               dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
579             }
580           /* Fatal mismatch.  */
581           matches[0] = false;
582           return false;
583         }
584
585       /* If populating the vector type requires unrolling then fail
586          before adjusting *max_nunits for basic-block vectorization.  */
587       if (bb_vinfo
588           && TYPE_VECTOR_SUBPARTS (vectype) > group_size)
589         {
590           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
591                            "Build SLP failed: unrolling required "
592                            "in basic block SLP\n");
593           /* Fatal mismatch.  */
594           matches[0] = false;
595           return false;
596         }
597
598       /* In case of multiple types we need to detect the smallest type.  */
599       if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
600         {
601           *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
602           if (bb_vinfo)
603             vectorization_factor = *max_nunits;
604         }
605
606       if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
607         {
608           rhs_code = CALL_EXPR;
609           if (gimple_call_internal_p (call_stmt)
610               || gimple_call_tail_p (call_stmt)
611               || gimple_call_noreturn_p (call_stmt)
612               || !gimple_call_nothrow_p (call_stmt)
613               || gimple_call_chain (call_stmt))
614             {
615               if (dump_enabled_p ())
616                 {
617                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
618                                    "Build SLP failed: unsupported call type ");
619                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
620                                     call_stmt, 0);
621                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
622                 }
623               /* Fatal mismatch.  */
624               matches[0] = false;
625               return false;
626             }
627         }
628       else
629         rhs_code = gimple_assign_rhs_code (stmt);
630
631       /* Check the operation.  */
632       if (i == 0)
633         {
634           first_stmt_code = rhs_code;
635
636           /* Shift arguments should be equal in all the packed stmts for a
637              vector shift with scalar shift operand.  */
638           if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
639               || rhs_code == LROTATE_EXPR
640               || rhs_code == RROTATE_EXPR)
641             {
642               vec_mode = TYPE_MODE (vectype);
643
644               /* First see if we have a vector/vector shift.  */
645               optab = optab_for_tree_code (rhs_code, vectype,
646                                            optab_vector);
647
648               if (!optab
649                   || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
650                 {
651                   /* No vector/vector shift, try for a vector/scalar shift.  */
652                   optab = optab_for_tree_code (rhs_code, vectype,
653                                                optab_scalar);
654
655                   if (!optab)
656                     {
657                       if (dump_enabled_p ())
658                         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
659                                          "Build SLP failed: no optab.\n");
660                       /* Fatal mismatch.  */
661                       matches[0] = false;
662                       return false;
663                     }
664                   icode = (int) optab_handler (optab, vec_mode);
665                   if (icode == CODE_FOR_nothing)
666                     {
667                       if (dump_enabled_p ())
668                         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
669                                          "Build SLP failed: "
670                                          "op not supported by target.\n");
671                       /* Fatal mismatch.  */
672                       matches[0] = false;
673                       return false;
674                     }
675                   optab_op2_mode = insn_data[icode].operand[2].mode;
676                   if (!VECTOR_MODE_P (optab_op2_mode))
677                     {
678                       need_same_oprnds = true;
679                       first_op1 = gimple_assign_rhs2 (stmt);
680                     }
681                 }
682             }
683           else if (rhs_code == WIDEN_LSHIFT_EXPR)
684             {
685               need_same_oprnds = true;
686               first_op1 = gimple_assign_rhs2 (stmt);
687             }
688         }
689       else
690         {
691           if (first_stmt_code != rhs_code
692               && alt_stmt_code == ERROR_MARK)
693             alt_stmt_code = rhs_code;
694           if (first_stmt_code != rhs_code
695               && (first_stmt_code != IMAGPART_EXPR
696                   || rhs_code != REALPART_EXPR)
697               && (first_stmt_code != REALPART_EXPR
698                   || rhs_code != IMAGPART_EXPR)
699               /* Handle mismatches in plus/minus by computing both
700                  and merging the results.  */
701               && !((first_stmt_code == PLUS_EXPR
702                     || first_stmt_code == MINUS_EXPR)
703                    && (alt_stmt_code == PLUS_EXPR
704                        || alt_stmt_code == MINUS_EXPR)
705                    && rhs_code == alt_stmt_code)
706               && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
707                    && (first_stmt_code == ARRAY_REF
708                        || first_stmt_code == BIT_FIELD_REF
709                        || first_stmt_code == INDIRECT_REF
710                        || first_stmt_code == COMPONENT_REF
711                        || first_stmt_code == MEM_REF)))
712             {
713               if (dump_enabled_p ())
714                 {
715                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
716                                    "Build SLP failed: different operation "
717                                    "in stmt ");
718                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
719                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
720                                    "original stmt ");
721                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
722                                     first_stmt, 0);
723                 }
724               /* Mismatch.  */
725               continue;
726             }
727
728           if (need_same_oprnds
729               && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
730             {
731               if (dump_enabled_p ())
732                 {
733                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
734                                    "Build SLP failed: different shift "
735                                    "arguments in ");
736                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
737                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
738                 }
739               /* Mismatch.  */
740               continue;
741             }
742
743           if (rhs_code == CALL_EXPR)
744             {
745               gimple first_stmt = stmts[0];
746               if (gimple_call_num_args (stmt) != nops
747                   || !operand_equal_p (gimple_call_fn (first_stmt),
748                                        gimple_call_fn (stmt), 0)
749                   || gimple_call_fntype (first_stmt)
750                      != gimple_call_fntype (stmt))
751                 {
752                   if (dump_enabled_p ())
753                     {
754                       dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
755                                        "Build SLP failed: different calls in ");
756                       dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
757                                         stmt, 0);
758                       dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
759                     }
760                   /* Mismatch.  */
761                   continue;
762                 }
763             }
764         }
765
766       /* Grouped store or load.  */
767       if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
768         {
769           if (REFERENCE_CLASS_P (lhs))
770             {
771               /* Store.  */
772               ;
773             }
774           else
775             {
776               /* Load.  */
777               unsigned unrolling_factor
778                 = least_common_multiple
779                     (*max_nunits, group_size) / group_size;
780               /* FORNOW: Check that there is no gap between the loads
781                  and no gap between the groups when we need to load
782                  multiple groups at once.
783                  ???  We should enhance this to only disallow gaps
784                  inside vectors.  */
785               if ((unrolling_factor > 1
786                    && ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
787                         && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
788                        /* If the group is split up then GROUP_GAP
789                           isn't correct here, nor is GROUP_FIRST_ELEMENT.  */
790                        || GROUP_SIZE (vinfo_for_stmt (stmt)) > group_size))
791                   || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
792                       && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
793                 {
794                   if (dump_enabled_p ())
795                     {
796                       dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
797                                        "Build SLP failed: grouped "
798                                        "loads have gaps ");
799                       dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
800                                         stmt, 0);
801                       dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
802                     }
803                   /* Fatal mismatch.  */
804                   matches[0] = false;
805                   return false;
806                 }
807
808               /* Check that the size of interleaved loads group is not
809                  greater than the SLP group size.  */
810               unsigned ncopies
811                 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
812               if (loop_vinfo
813                   && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
814                   && ((GROUP_SIZE (vinfo_for_stmt (stmt))
815                        - GROUP_GAP (vinfo_for_stmt (stmt)))
816                       > ncopies * group_size))
817                 {
818                   if (dump_enabled_p ())
819                     {
820                       dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
821                                        "Build SLP failed: the number "
822                                        "of interleaved loads is greater than "
823                                        "the SLP group size ");
824                       dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
825                                         stmt, 0);
826                       dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
827                     }
828                   /* Fatal mismatch.  */
829                   matches[0] = false;
830                   return false;
831                 }
832
833               old_first_load = first_load;
834               first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
835               if (prev_first_load)
836                 {
837                   /* Check that there are no loads from different interleaving
838                      chains in the same node.  */
839                   if (prev_first_load != first_load)
840                     {
841                       if (dump_enabled_p ())
842                         {
843                           dump_printf_loc (MSG_MISSED_OPTIMIZATION,
844                                            vect_location, 
845                                            "Build SLP failed: different "
846                                            "interleaving chains in one node ");
847                           dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
848                                             stmt, 0);
849                           dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
850                         }
851                       /* Mismatch.  */
852                       continue;
853                     }
854                 }
855               else
856                 prev_first_load = first_load;
857
858               /* In some cases a group of loads is just the same load
859                  repeated N times.  Only analyze its cost once.  */
860               if (first_load == stmt && old_first_load != first_load)
861                 {
862                   first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
863                   if (vect_supportable_dr_alignment (first_dr, false)
864                       == dr_unaligned_unsupported)
865                     {
866                       if (dump_enabled_p ())
867                         {
868                           dump_printf_loc (MSG_MISSED_OPTIMIZATION,
869                                            vect_location, 
870                                            "Build SLP failed: unsupported "
871                                            "unaligned load ");
872                           dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
873                                             stmt, 0);
874                           dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
875                         }
876                       /* Fatal mismatch.  */
877                       matches[0] = false;
878                       return false;
879                     }
880                 }
881            }
882         } /* Grouped access.  */
883       else
884         {
885           if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
886             {
887               /* Not grouped load.  */
888               if (dump_enabled_p ())
889                 {
890                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
891                                    "Build SLP failed: not grouped load ");
892                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
893                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
894                 }
895
896               /* FORNOW: Not grouped loads are not supported.  */
897               /* Fatal mismatch.  */
898               matches[0] = false;
899               return false;
900             }
901
902           /* Not memory operation.  */
903           if (TREE_CODE_CLASS (rhs_code) != tcc_binary
904               && TREE_CODE_CLASS (rhs_code) != tcc_unary
905               && TREE_CODE_CLASS (rhs_code) != tcc_expression
906               && rhs_code != CALL_EXPR)
907             {
908               if (dump_enabled_p ())
909                 {
910                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
911                                    "Build SLP failed: operation");
912                   dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
913                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
914                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
915                 }
916               /* Fatal mismatch.  */
917               matches[0] = false;
918               return false;
919             }
920
921           if (rhs_code == COND_EXPR)
922             {
923               tree cond_expr = gimple_assign_rhs1 (stmt);
924
925               if (i == 0)
926                 first_cond_code = TREE_CODE (cond_expr);
927               else if (first_cond_code != TREE_CODE (cond_expr))
928                 {
929                   if (dump_enabled_p ())
930                     {
931                       dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
932                                        "Build SLP failed: different"
933                                        " operation");
934                       dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
935                                         stmt, 0);
936                       dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
937                     }
938                   /* Mismatch.  */
939                   continue;
940                 }
941             }
942         }
943
944       matches[i] = true;
945     }
946
947   for (i = 0; i < group_size; ++i)
948     if (!matches[i])
949       return false;
950
951   /* If we allowed a two-operation SLP node verify the target can cope
952      with the permute we are going to use.  */
953   if (alt_stmt_code != ERROR_MARK
954       && TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
955     {
956       unsigned char *sel
957         = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype));
958       for (i = 0; i < TYPE_VECTOR_SUBPARTS (vectype); ++i)
959         {
960           sel[i] = i;
961           if (gimple_assign_rhs_code (stmts[i % group_size]) == alt_stmt_code)
962             sel[i] += TYPE_VECTOR_SUBPARTS (vectype);
963         }
964       if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
965         {
966           for (i = 0; i < group_size; ++i)
967             if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
968               {
969                 matches[i] = false;
970                 if (dump_enabled_p ())
971                   {
972                     dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
973                                      "Build SLP failed: different operation "
974                                      "in stmt ");
975                     dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
976                                       stmts[i], 0);
977                     dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
978                                      "original stmt ");
979                     dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
980                                       first_stmt, 0);
981                   }
982               }
983           return false;
984         }
985       *two_operators = true;
986     }
987
988   return true;
989 }
990
991 /* Recursively build an SLP tree starting from NODE.
992    Fail (and return a value not equal to zero) if def-stmts are not
993    isomorphic, require data permutation or are of unsupported types of
994    operation.  Otherwise, return 0.
995    The value returned is the depth in the SLP tree where a mismatch
996    was found.  */
997
998 static bool
999 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1000                      slp_tree *node, unsigned int group_size,
1001                      unsigned int *max_nunits,
1002                      vec<slp_tree> *loads,
1003                      unsigned int vectorization_factor,
1004                      bool *matches, unsigned *npermutes, unsigned *tree_size,
1005                      unsigned max_tree_size)
1006 {
1007   unsigned nops, i, this_tree_size = 0;
1008   gimple stmt;
1009
1010   matches[0] = false;
1011
1012   stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1013   if (is_gimple_call (stmt))
1014     nops = gimple_call_num_args (stmt);
1015   else if (is_gimple_assign (stmt))
1016     {
1017       nops = gimple_num_ops (stmt) - 1;
1018       if (gimple_assign_rhs_code (stmt) == COND_EXPR)
1019         nops++;
1020     }
1021   else
1022     return false;
1023
1024   bool two_operators = false;
1025   if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
1026                               SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
1027                               max_nunits, vectorization_factor, matches,
1028                               &two_operators))
1029     return false;
1030   SLP_TREE_TWO_OPERATORS (*node) = two_operators;
1031
1032   /* If the SLP node is a load, terminate the recursion.  */
1033   if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
1034       && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
1035     {
1036       loads->safe_push (*node);
1037       return true;
1038     }
1039
1040   /* Get at the operands, verifying they are compatible.  */
1041   vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
1042   slp_oprnd_info oprnd_info;
1043   FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
1044     {
1045       switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
1046                                            stmt, i, &oprnds_info))
1047         {
1048         case 0:
1049           break;
1050         case -1:
1051           matches[0] = false;
1052           vect_free_oprnd_info (oprnds_info);
1053           return false;
1054         case 1:
1055           matches[i] = false;
1056           break;
1057         }
1058     }
1059   for (i = 0; i < group_size; ++i)
1060     if (!matches[i])
1061       {
1062         vect_free_oprnd_info (oprnds_info);
1063         return false;
1064       }
1065
1066   stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1067
1068   /* Create SLP_TREE nodes for the definition node/s.  */
1069   FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
1070     {
1071       slp_tree child;
1072       unsigned old_nloads = loads->length ();
1073       unsigned old_max_nunits = *max_nunits;
1074
1075       if (oprnd_info->first_dt != vect_internal_def)
1076         continue;
1077
1078       if (++this_tree_size > max_tree_size)
1079         {
1080           vect_free_oprnd_info (oprnds_info);
1081           return false;
1082         }
1083
1084       child = vect_create_new_slp_node (oprnd_info->def_stmts);
1085       if (!child)
1086         {
1087           vect_free_oprnd_info (oprnds_info);
1088           return false;
1089         }
1090
1091       if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1092                                group_size, max_nunits, loads,
1093                                vectorization_factor, matches,
1094                                npermutes, &this_tree_size, max_tree_size))
1095         {
1096           oprnd_info->def_stmts = vNULL;
1097           SLP_TREE_CHILDREN (*node).quick_push (child);
1098           continue;
1099         }
1100
1101       /* If the SLP build failed fatally and we analyze a basic-block
1102          simply treat nodes we fail to build as externally defined
1103          (and thus build vectors from the scalar defs).
1104          The cost model will reject outright expensive cases.
1105          ???  This doesn't treat cases where permutation ultimatively
1106          fails (or we don't try permutation below).  Ideally we'd
1107          even compute a permutation that will end up with the maximum
1108          SLP tree size...  */
1109       if (bb_vinfo
1110           && !matches[0]
1111           /* ???  Rejecting patterns this way doesn't work.  We'd have to
1112              do extra work to cancel the pattern so the uses see the
1113              scalar version.  */
1114           && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
1115         {
1116           unsigned int j;
1117           slp_tree grandchild;
1118
1119           /* Roll back.  */
1120           *max_nunits = old_max_nunits;
1121           loads->truncate (old_nloads);
1122           FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1123             vect_free_slp_tree (grandchild);
1124           SLP_TREE_CHILDREN (child).truncate (0);
1125
1126           dump_printf_loc (MSG_NOTE, vect_location,
1127                            "Building vector operands from scalars\n");
1128           oprnd_info->def_stmts = vNULL;
1129           vect_free_slp_tree (child);
1130           SLP_TREE_CHILDREN (*node).quick_push (NULL);
1131           continue;
1132         }
1133
1134       /* If the SLP build for operand zero failed and operand zero
1135          and one can be commutated try that for the scalar stmts
1136          that failed the match.  */
1137       if (i == 0
1138           /* A first scalar stmt mismatch signals a fatal mismatch.  */
1139           && matches[0]
1140           /* ???  For COND_EXPRs we can swap the comparison operands
1141              as well as the arms under some constraints.  */
1142           && nops == 2
1143           && oprnds_info[1]->first_dt == vect_internal_def
1144           && is_gimple_assign (stmt)
1145           && commutative_tree_code (gimple_assign_rhs_code (stmt))
1146           && !SLP_TREE_TWO_OPERATORS (*node)
1147           /* Do so only if the number of not successful permutes was nor more
1148              than a cut-ff as re-trying the recursive match on
1149              possibly each level of the tree would expose exponential
1150              behavior.  */
1151           && *npermutes < 4)
1152         {
1153           unsigned int j;
1154           slp_tree grandchild;
1155
1156           /* Roll back.  */
1157           *max_nunits = old_max_nunits;
1158           loads->truncate (old_nloads);
1159           FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1160             vect_free_slp_tree (grandchild);
1161           SLP_TREE_CHILDREN (child).truncate (0);
1162
1163           /* Swap mismatched definition stmts.  */
1164           dump_printf_loc (MSG_NOTE, vect_location,
1165                            "Re-trying with swapped operands of stmts ");
1166           for (j = 0; j < group_size; ++j)
1167             if (!matches[j])
1168               {
1169                 gimple tem = oprnds_info[0]->def_stmts[j];
1170                 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1171                 oprnds_info[1]->def_stmts[j] = tem;
1172                 dump_printf (MSG_NOTE, "%d ", j);
1173               }
1174           dump_printf (MSG_NOTE, "\n");
1175           /* And try again with scratch 'matches' ... */
1176           bool *tem = XALLOCAVEC (bool, group_size);
1177           if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1178                                    group_size, max_nunits, loads,
1179                                    vectorization_factor,
1180                                    tem, npermutes, &this_tree_size,
1181                                    max_tree_size))
1182             {
1183               /* ... so if successful we can apply the operand swapping
1184                  to the GIMPLE IL.  This is necessary because for example
1185                  vect_get_slp_defs uses operand indexes and thus expects
1186                  canonical operand order.  */
1187               for (j = 0; j < group_size; ++j)
1188                 if (!matches[j])
1189                   {
1190                     gimple stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1191                     swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1192                                        gimple_assign_rhs2_ptr (stmt));
1193                   }
1194               oprnd_info->def_stmts = vNULL;
1195               SLP_TREE_CHILDREN (*node).quick_push (child);
1196               continue;
1197             }
1198
1199           ++*npermutes;
1200         }
1201
1202       oprnd_info->def_stmts = vNULL;
1203       vect_free_slp_tree (child);
1204       vect_free_oprnd_info (oprnds_info);
1205       return false;
1206     }
1207
1208   if (tree_size)
1209     *tree_size += this_tree_size;
1210
1211   vect_free_oprnd_info (oprnds_info);
1212   return true;
1213 }
1214
1215 /* Dump a slp tree NODE using flags specified in DUMP_KIND.  */
1216
1217 static void
1218 vect_print_slp_tree (int dump_kind, slp_tree node)
1219 {
1220   int i;
1221   gimple stmt;
1222   slp_tree child;
1223
1224   if (!node)
1225     return;
1226
1227   dump_printf (dump_kind, "node ");
1228   FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1229     {
1230       dump_printf (dump_kind, "\n\tstmt %d ", i);
1231       dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1232     }
1233   dump_printf (dump_kind, "\n");
1234
1235   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1236     vect_print_slp_tree (dump_kind, child);
1237 }
1238
1239
1240 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1241    If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1242    J).  Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1243    stmts in NODE are to be marked.  */
1244
1245 static void
1246 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1247 {
1248   int i;
1249   gimple stmt;
1250   slp_tree child;
1251
1252   if (!node)
1253     return;
1254
1255   FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1256     if (j < 0 || i == j)
1257       STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1258
1259   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1260     vect_mark_slp_stmts (child, mark, j);
1261 }
1262
1263
1264 /* Mark the statements of the tree rooted at NODE as relevant (vect_used).  */
1265
1266 static void
1267 vect_mark_slp_stmts_relevant (slp_tree node)
1268 {
1269   int i;
1270   gimple stmt;
1271   stmt_vec_info stmt_info;
1272   slp_tree child;
1273
1274   if (!node)
1275     return;
1276
1277   FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1278     {
1279       stmt_info = vinfo_for_stmt (stmt);
1280       gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1281                   || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1282       STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1283     }
1284
1285   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1286     vect_mark_slp_stmts_relevant (child);
1287 }
1288
1289
1290 /* Rearrange the statements of NODE according to PERMUTATION.  */
1291
1292 static void
1293 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1294                           vec<unsigned> permutation)
1295 {
1296   gimple stmt;
1297   vec<gimple> tmp_stmts;
1298   unsigned int i;
1299   slp_tree child;
1300
1301   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1302     vect_slp_rearrange_stmts (child, group_size, permutation);
1303
1304   gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1305   tmp_stmts.create (group_size);
1306   tmp_stmts.quick_grow_cleared (group_size);
1307
1308   FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1309     tmp_stmts[permutation[i]] = stmt;
1310
1311   SLP_TREE_SCALAR_STMTS (node).release ();
1312   SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1313 }
1314
1315
1316 /* Check if the required load permutations in the SLP instance
1317    SLP_INSTN are supported.  */
1318
1319 static bool
1320 vect_supported_load_permutation_p (slp_instance slp_instn)
1321 {
1322   unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1323   unsigned int i, j, k, next;
1324   sbitmap load_index;
1325   slp_tree node;
1326   gimple stmt, load, next_load, first_load;
1327   struct data_reference *dr;
1328
1329   if (dump_enabled_p ())
1330     {
1331       dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1332       FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1333         if (node->load_permutation.exists ())
1334           FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1335             dump_printf (MSG_NOTE, "%d ", next);
1336         else
1337           for (k = 0; k < group_size; ++k)
1338             dump_printf (MSG_NOTE, "%d ", k);
1339       dump_printf (MSG_NOTE, "\n");
1340     }
1341
1342   /* In case of reduction every load permutation is allowed, since the order
1343      of the reduction statements is not important (as opposed to the case of
1344      grouped stores).  The only condition we need to check is that all the
1345      load nodes are of the same size and have the same permutation (and then
1346      rearrange all the nodes of the SLP instance according to this 
1347      permutation).  */
1348
1349   /* Check that all the load nodes are of the same size.  */
1350   /* ???  Can't we assert this? */
1351   FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1352     if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1353       return false;
1354
1355   node = SLP_INSTANCE_TREE (slp_instn);
1356   stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1357
1358   /* Reduction (there are no data-refs in the root).
1359      In reduction chain the order of the loads is important.  */
1360   if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1361       && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1362     {
1363       slp_tree load;
1364       unsigned int lidx;
1365
1366       /* Compare all the permutation sequences to the first one.  We know
1367          that at least one load is permuted.  */
1368       node = SLP_INSTANCE_LOADS (slp_instn)[0];
1369       if (!node->load_permutation.exists ())
1370         return false;
1371       for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1372         {
1373           if (!load->load_permutation.exists ())
1374             return false;
1375           FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1376             if (lidx != node->load_permutation[j])
1377               return false;
1378         }
1379
1380       /* Check that the loads in the first sequence are different and there
1381          are no gaps between them.  */
1382       load_index = sbitmap_alloc (group_size);
1383       bitmap_clear (load_index);
1384       FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1385         {
1386           if (bitmap_bit_p (load_index, lidx))
1387             {
1388               sbitmap_free (load_index);
1389               return false;
1390             }
1391           bitmap_set_bit (load_index, lidx);
1392         }
1393       for (i = 0; i < group_size; i++)
1394         if (!bitmap_bit_p (load_index, i))
1395           {
1396             sbitmap_free (load_index);
1397             return false;
1398           }
1399       sbitmap_free (load_index);
1400
1401       /* This permutation is valid for reduction.  Since the order of the
1402          statements in the nodes is not important unless they are memory
1403          accesses, we can rearrange the statements in all the nodes
1404          according to the order of the loads.  */
1405       vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1406                                 node->load_permutation);
1407
1408       /* We are done, no actual permutations need to be generated.  */
1409       FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1410         SLP_TREE_LOAD_PERMUTATION (node).release ();
1411       return true;
1412     }
1413
1414   /* In basic block vectorization we allow any subchain of an interleaving
1415      chain.
1416      FORNOW: not supported in loop SLP because of realignment compications.  */
1417   if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1418     {
1419       /* Check whether the loads in an instance form a subchain and thus
1420          no permutation is necessary.  */
1421       FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1422         {
1423           if (!SLP_TREE_LOAD_PERMUTATION (node).exists ())
1424             continue;
1425           bool subchain_p = true;
1426           next_load = NULL;
1427           FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1428             {
1429               if (j != 0 && next_load != load)
1430                 {
1431                   subchain_p = false;
1432                   break;
1433                 }
1434               next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1435             }
1436           if (subchain_p)
1437             SLP_TREE_LOAD_PERMUTATION (node).release ();
1438           else
1439             {
1440               /* Verify the permutation can be generated.  */
1441               vec<tree> tem;
1442               if (!vect_transform_slp_perm_load (node, tem, NULL,
1443                                                  1, slp_instn, true))
1444                 {
1445                   dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1446                                    vect_location,
1447                                    "unsupported load permutation\n");
1448                   return false;
1449                 }
1450             }
1451         }
1452
1453       /* Check that the alignment of the first load in every subchain, i.e.,
1454          the first statement in every load node, is supported.
1455          ???  This belongs in alignment checking.  */
1456       FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1457         {
1458           first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1459           if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1460             {
1461               dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1462               if (vect_supportable_dr_alignment (dr, false)
1463                   == dr_unaligned_unsupported)
1464                 {
1465                   if (dump_enabled_p ())
1466                     {
1467                       dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1468                                        vect_location,
1469                                        "unsupported unaligned load ");
1470                       dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1471                                         first_load, 0);
1472                       dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1473                     }
1474                   return false;
1475                 }
1476             }
1477         }
1478
1479       return true;
1480     }
1481
1482   /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1483      GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1484      well (unless it's reduction).  */
1485   if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1486     return false;
1487   FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1488     if (!node->load_permutation.exists ())
1489       return false;
1490
1491   load_index = sbitmap_alloc (group_size);
1492   bitmap_clear (load_index);
1493   FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1494     {
1495       unsigned int lidx = node->load_permutation[0];
1496       if (bitmap_bit_p (load_index, lidx))
1497         {
1498           sbitmap_free (load_index);
1499           return false;
1500         }
1501       bitmap_set_bit (load_index, lidx);
1502       FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1503         if (k != lidx)
1504           {
1505             sbitmap_free (load_index);
1506             return false;
1507           }
1508     }
1509   for (i = 0; i < group_size; i++)
1510     if (!bitmap_bit_p (load_index, i))
1511       {
1512         sbitmap_free (load_index);
1513         return false;
1514       }
1515   sbitmap_free (load_index);
1516
1517   FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1518     if (node->load_permutation.exists ()
1519         && !vect_transform_slp_perm_load
1520               (node, vNULL, NULL,
1521                SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1522       return false;
1523   return true;
1524 }
1525
1526
1527 /* Find the last store in SLP INSTANCE.  */
1528
1529 static gimple
1530 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1531 {
1532   gimple last = NULL, stmt;
1533
1534   for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1535     {
1536       stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1537       if (is_pattern_stmt_p (stmt_vinfo))
1538         last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1539       else
1540         last = get_later_stmt (stmt, last);
1541     }
1542
1543   return last;
1544 }
1545
1546 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE.  */
1547
1548 static void
1549 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1550                          slp_instance instance, slp_tree node,
1551                          stmt_vector_for_cost *prologue_cost_vec,
1552                          unsigned ncopies_for_cost)
1553 {
1554   stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1555
1556   unsigned i;
1557   slp_tree child;
1558   gimple stmt, s;
1559   stmt_vec_info stmt_info;
1560   tree lhs;
1561   unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1562
1563   /* Recurse down the SLP tree.  */
1564   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1565     if (child)
1566       vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1567                                instance, child, prologue_cost_vec,
1568                                ncopies_for_cost);
1569
1570   /* Look at the first scalar stmt to determine the cost.  */
1571   stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1572   stmt_info = vinfo_for_stmt (stmt);
1573   if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1574     {
1575       if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1576         vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1577                                vect_uninitialized_def,
1578                                node, prologue_cost_vec, body_cost_vec);
1579       else
1580         {
1581           int i;
1582           gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1583           vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1584                                 node, prologue_cost_vec, body_cost_vec);
1585           /* If the load is permuted record the cost for the permutation.
1586              ???  Loads from multiple chains are let through here only
1587              for a single special case involving complex numbers where
1588              in the end no permutation is necessary.  */
1589           FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1590             if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1591                  == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1592                 && vect_get_place_in_interleaving_chain
1593                      (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1594               {
1595                 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1596                                   stmt_info, 0, vect_body);
1597                 break;
1598               }
1599         }
1600     }
1601   else
1602     {
1603       record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1604                         stmt_info, 0, vect_body);
1605       if (SLP_TREE_TWO_OPERATORS (node))
1606         {
1607           record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1608                             stmt_info, 0, vect_body);
1609           record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1610                             stmt_info, 0, vect_body);
1611         }
1612     }
1613
1614   /* Scan operands and account for prologue cost of constants/externals.
1615      ???  This over-estimates cost for multiple uses and should be
1616      re-engineered.  */
1617   lhs = gimple_get_lhs (stmt);
1618   for (i = 0; i < gimple_num_ops (stmt); ++i)
1619     {
1620       tree def, op = gimple_op (stmt, i);
1621       gimple def_stmt;
1622       enum vect_def_type dt;
1623       if (!op || op == lhs)
1624         continue;
1625       if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1626                               &def_stmt, &def, &dt))
1627         {
1628           /* Without looking at the actual initializer a vector of
1629              constants can be implemented as load from the constant pool.
1630              ???  We need to pass down stmt_info for a vector type
1631              even if it points to the wrong stmt.  */
1632           if (dt == vect_constant_def)
1633             record_stmt_cost (prologue_cost_vec, 1, vector_load,
1634                               stmt_info, 0, vect_prologue);
1635           else if (dt == vect_external_def)
1636             record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1637                               stmt_info, 0, vect_prologue);
1638         }
1639     }
1640 }
1641
1642 /* Compute the cost for the SLP instance INSTANCE.  */
1643
1644 static void
1645 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1646                        slp_instance instance, unsigned nunits)
1647 {
1648   stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1649   unsigned ncopies_for_cost;
1650   stmt_info_for_cost *si;
1651   unsigned i;
1652
1653   /* Calculate the number of vector stmts to create based on the unrolling
1654      factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1655      GROUP_SIZE / NUNITS otherwise.  */
1656   unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1657   ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1658
1659   prologue_cost_vec.create (10);
1660   body_cost_vec.create (10);
1661   SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1662   vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1663                            instance, SLP_INSTANCE_TREE (instance),
1664                            &prologue_cost_vec, ncopies_for_cost);
1665
1666   /* Record the prologue costs, which were delayed until we were
1667      sure that SLP was successful.  Unlike the body costs, we know
1668      the final values now regardless of the loop vectorization factor.  */
1669   void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1670                 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1671   FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1672     {
1673       struct _stmt_vec_info *stmt_info
1674         = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1675       (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1676                             si->misalign, vect_prologue);
1677     }
1678
1679   prologue_cost_vec.release ();
1680 }
1681
1682 /* Analyze an SLP instance starting from a group of grouped stores.  Call
1683    vect_build_slp_tree to build a tree of packed stmts if possible.
1684    Return FALSE if it's impossible to SLP any stmt in the loop.  */
1685
1686 static bool
1687 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1688                            gimple stmt, unsigned max_tree_size)
1689 {
1690   slp_instance new_instance;
1691   slp_tree node;
1692   unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1693   unsigned int unrolling_factor = 1, nunits;
1694   tree vectype, scalar_type = NULL_TREE;
1695   gimple next;
1696   unsigned int vectorization_factor = 0;
1697   int i;
1698   unsigned int max_nunits = 0;
1699   vec<slp_tree> loads;
1700   struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1701   vec<gimple> scalar_stmts;
1702
1703   if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1704     {
1705       if (dr)
1706         {
1707           scalar_type = TREE_TYPE (DR_REF (dr));
1708           vectype = get_vectype_for_scalar_type (scalar_type);
1709         }
1710       else
1711         {
1712           gcc_assert (loop_vinfo);
1713           vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1714         }
1715
1716       group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1717     }
1718   else
1719     {
1720       gcc_assert (loop_vinfo);
1721       vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1722       group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1723     }
1724
1725   if (!vectype)
1726     {
1727       if (dump_enabled_p ())
1728         {
1729           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1730                            "Build SLP failed: unsupported data-type ");
1731           dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1732           dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1733         }
1734
1735       return false;
1736     }
1737
1738   nunits = TYPE_VECTOR_SUBPARTS (vectype);
1739   if (loop_vinfo)
1740     vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1741   else
1742     vectorization_factor = nunits;
1743
1744   /* Calculate the unrolling factor.  */
1745   unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1746   if (unrolling_factor != 1 && !loop_vinfo)
1747     {
1748       if (dump_enabled_p ())
1749         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1750                          "Build SLP failed: unrolling required in basic"
1751                          " block SLP\n");
1752
1753       return false;
1754     }
1755
1756   /* Create a node (a root of the SLP tree) for the packed grouped stores.  */
1757   scalar_stmts.create (group_size);
1758   next = stmt;
1759   if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1760     {
1761       /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS.  */
1762       while (next)
1763         {
1764           if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1765               && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1766             scalar_stmts.safe_push (
1767                   STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1768           else
1769             scalar_stmts.safe_push (next);
1770           next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1771         }
1772     }
1773   else
1774     {
1775       /* Collect reduction statements.  */
1776       vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1777       for (i = 0; reductions.iterate (i, &next); i++)
1778         scalar_stmts.safe_push (next);
1779     }
1780
1781   node = vect_create_new_slp_node (scalar_stmts);
1782
1783   loads.create (group_size);
1784
1785   /* Build the tree for the SLP instance.  */
1786   bool *matches = XALLOCAVEC (bool, group_size);
1787   unsigned npermutes = 0;
1788   if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1789                            &max_nunits, &loads,
1790                            vectorization_factor, matches, &npermutes, NULL,
1791                            max_tree_size))
1792     {
1793       /* Calculate the unrolling factor based on the smallest type.  */
1794       if (max_nunits > nunits)
1795         unrolling_factor = least_common_multiple (max_nunits, group_size)
1796                            / group_size;
1797
1798       if (unrolling_factor != 1 && !loop_vinfo)
1799         {
1800           if (dump_enabled_p ())
1801             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1802                              "Build SLP failed: unrolling required in basic"
1803                              " block SLP\n");
1804           vect_free_slp_tree (node);
1805           loads.release ();
1806           return false;
1807         }
1808
1809       /* Create a new SLP instance.  */
1810       new_instance = XNEW (struct _slp_instance);
1811       SLP_INSTANCE_TREE (new_instance) = node;
1812       SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1813       SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1814       SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1815       SLP_INSTANCE_LOADS (new_instance) = loads;
1816
1817       /* Compute the load permutation.  */
1818       slp_tree load_node;
1819       bool loads_permuted = false;
1820       FOR_EACH_VEC_ELT (loads, i, load_node)
1821         {
1822           vec<unsigned> load_permutation;
1823           int j;
1824           gimple load, first_stmt;
1825           bool this_load_permuted = false;
1826           load_permutation.create (group_size);
1827           first_stmt = GROUP_FIRST_ELEMENT
1828               (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1829           FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1830             {
1831               int load_place
1832                 = vect_get_place_in_interleaving_chain (load, first_stmt);
1833               gcc_assert (load_place != -1);
1834               if (load_place != j)
1835                 this_load_permuted = true;
1836               load_permutation.safe_push (load_place);
1837             }
1838           if (!this_load_permuted)
1839             {
1840               load_permutation.release ();
1841               continue;
1842             }
1843           SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1844           loads_permuted = true;
1845         }
1846
1847       if (loads_permuted)
1848         {
1849           if (!vect_supported_load_permutation_p (new_instance))
1850             {
1851               if (dump_enabled_p ())
1852                 {
1853                   dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1854                                    "Build SLP failed: unsupported load "
1855                                    "permutation ");
1856                   dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1857                   dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1858                 }
1859               vect_free_slp_instance (new_instance);
1860               return false;
1861             }
1862         }
1863
1864
1865       if (loop_vinfo)
1866         {
1867           /* Compute the costs of this SLP instance.  Delay this for BB
1868              vectorization as we don't have vector types computed yet.  */
1869           vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1870                                  new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1871           LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1872         }
1873       else
1874         BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1875
1876       if (dump_enabled_p ())
1877         vect_print_slp_tree (MSG_NOTE, node);
1878
1879       return true;
1880     }
1881
1882   /* Failed to SLP.  */
1883   /* Free the allocated memory.  */
1884   vect_free_slp_tree (node);
1885   loads.release ();
1886
1887   return false;
1888 }
1889
1890
1891 /* Check if there are stmts in the loop can be vectorized using SLP.  Build SLP
1892    trees of packed scalar stmts if SLP is possible.  */
1893
1894 bool
1895 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1896                   unsigned max_tree_size)
1897 {
1898   unsigned int i;
1899   vec<gimple> grouped_stores;
1900   vec<gimple> reductions = vNULL;
1901   vec<gimple> reduc_chains = vNULL;
1902   gimple first_element;
1903   bool ok = false;
1904
1905   if (dump_enabled_p ())
1906     dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1907
1908   if (loop_vinfo)
1909     {
1910       grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1911       reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1912       reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1913     }
1914   else
1915     grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1916
1917   /* Find SLP sequences starting from groups of grouped stores.  */
1918   FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1919     if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1920                                    max_tree_size))
1921       ok = true;
1922
1923   if (reduc_chains.length () > 0)
1924     {
1925       /* Find SLP sequences starting from reduction chains.  */
1926       FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1927         if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1928                                        max_tree_size))
1929           ok = true;
1930         else
1931           return false;
1932
1933       /* Don't try to vectorize SLP reductions if reduction chain was
1934          detected.  */
1935       return ok;
1936     }
1937
1938   /* Find SLP sequences starting from groups of reductions.  */
1939   if (reductions.length () > 1
1940       && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1941                                     max_tree_size))
1942     ok = true;
1943
1944   return true;
1945 }
1946
1947
1948 /* For each possible SLP instance decide whether to SLP it and calculate overall
1949    unrolling factor needed to SLP the loop.  Return TRUE if decided to SLP at
1950    least one instance.  */
1951
1952 bool
1953 vect_make_slp_decision (loop_vec_info loop_vinfo)
1954 {
1955   unsigned int i, unrolling_factor = 1;
1956   vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1957   slp_instance instance;
1958   int decided_to_slp = 0;
1959
1960   if (dump_enabled_p ())
1961     dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1962                      "\n");
1963
1964   FOR_EACH_VEC_ELT (slp_instances, i, instance)
1965     {
1966       /* FORNOW: SLP if you can.  */
1967       if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1968         unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1969
1970       /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts.  Later we
1971          call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1972          loop-based vectorization.  Such stmts will be marked as HYBRID.  */
1973       vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1974       decided_to_slp++;
1975     }
1976
1977   LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1978
1979   if (decided_to_slp && dump_enabled_p ())
1980     dump_printf_loc (MSG_NOTE, vect_location,
1981                      "Decided to SLP %d instances. Unrolling factor %d\n",
1982                      decided_to_slp, unrolling_factor);
1983
1984   return (decided_to_slp > 0);
1985 }
1986
1987
1988 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1989    can't be SLPed) in the tree rooted at NODE.  Mark such stmts as HYBRID.  */
1990
1991 static void
1992 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1993 {
1994   gimple stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1995   imm_use_iterator imm_iter;
1996   gimple use_stmt;
1997   stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1998   slp_tree child;
1999   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2000   struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2001   int j;
2002
2003   /* Propagate hybrid down the SLP tree.  */
2004   if (stype == hybrid)
2005     ;
2006   else if (HYBRID_SLP_STMT (stmt_vinfo))
2007     stype = hybrid;
2008   else
2009     {
2010       /* Check if a pure SLP stmt has uses in non-SLP stmts.  */
2011       gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
2012       if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2013         FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
2014           if (gimple_bb (use_stmt)
2015               && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2016               && (use_vinfo = vinfo_for_stmt (use_stmt))
2017               && !STMT_SLP_TYPE (use_vinfo)
2018               && (STMT_VINFO_RELEVANT (use_vinfo)
2019                   || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
2020                   || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
2021                       && STMT_VINFO_RELATED_STMT (use_vinfo)
2022                       && !STMT_SLP_TYPE (vinfo_for_stmt
2023                             (STMT_VINFO_RELATED_STMT (use_vinfo)))))
2024               && !(gimple_code (use_stmt) == GIMPLE_PHI
2025                    && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
2026             stype = hybrid;
2027     }
2028
2029   if (stype == hybrid)
2030     STMT_SLP_TYPE (stmt_vinfo) = hybrid;
2031
2032   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2033     if (child)
2034       vect_detect_hybrid_slp_stmts (child, i, stype);
2035 }
2036
2037 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses.  */
2038
2039 static tree
2040 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
2041 {
2042   walk_stmt_info *wi = (walk_stmt_info *)data;
2043   struct loop *loopp = (struct loop *)wi->info;
2044
2045   if (wi->is_lhs)
2046     return NULL_TREE;
2047
2048   if (TREE_CODE (*tp) == SSA_NAME
2049       && !SSA_NAME_IS_DEFAULT_DEF (*tp))
2050     {
2051       gimple def_stmt = SSA_NAME_DEF_STMT (*tp);
2052       if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
2053           && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
2054         STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
2055     }
2056
2057   return NULL_TREE;
2058 }
2059
2060 static tree
2061 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
2062                           walk_stmt_info *)
2063 {
2064   /* If the stmt is in a SLP instance then this isn't a reason
2065      to mark use definitions in other SLP instances as hybrid.  */
2066   if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
2067     *handled = true;
2068   return NULL_TREE;
2069 }
2070
2071 /* Find stmts that must be both vectorized and SLPed.  */
2072
2073 void
2074 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
2075 {
2076   unsigned int i;
2077   vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2078   slp_instance instance;
2079
2080   if (dump_enabled_p ())
2081     dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
2082                      "\n");
2083
2084   /* First walk all pattern stmt in the loop and mark defs of uses as
2085      hybrid because immediate uses in them are not recorded.  */
2086   for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2087     {
2088       basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2089       for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2090            gsi_next (&gsi))
2091         {
2092           gimple stmt = gsi_stmt (gsi);
2093           stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2094           if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2095             {
2096               walk_stmt_info wi;
2097               memset (&wi, 0, sizeof (wi));
2098               wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2099               gimple_stmt_iterator gsi2
2100                 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2101               walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2102                                 vect_detect_hybrid_slp_1, &wi);
2103               walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2104                                vect_detect_hybrid_slp_2,
2105                                vect_detect_hybrid_slp_1, &wi);
2106             }
2107         }
2108     }
2109
2110   /* Then walk the SLP instance trees marking stmts with uses in
2111      non-SLP stmts as hybrid, also propagating hybrid down the
2112      SLP tree, collecting the above info on-the-fly.  */
2113   FOR_EACH_VEC_ELT (slp_instances, i, instance)
2114     {
2115       for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2116         vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2117                                       i, pure_slp);
2118     }
2119 }
2120
2121
2122 /* Create and initialize a new bb_vec_info struct for BB, as well as
2123    stmt_vec_info structs for all the stmts in it.  */
2124
2125 static bb_vec_info
2126 new_bb_vec_info (basic_block bb)
2127 {
2128   bb_vec_info res = NULL;
2129   gimple_stmt_iterator gsi;
2130
2131   res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2132   BB_VINFO_BB (res) = bb;
2133
2134   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2135     {
2136       gimple stmt = gsi_stmt (gsi);
2137       gimple_set_uid (stmt, 0);
2138       set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
2139     }
2140
2141   BB_VINFO_GROUPED_STORES (res).create (10);
2142   BB_VINFO_SLP_INSTANCES (res).create (2);
2143   BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2144
2145   bb->aux = res;
2146   return res;
2147 }
2148
2149
2150 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2151    stmts in the basic block.  */
2152
2153 static void
2154 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2155 {
2156   vec<slp_instance> slp_instances;
2157   slp_instance instance;
2158   basic_block bb;
2159   gimple_stmt_iterator si;
2160   unsigned i;
2161
2162   if (!bb_vinfo)
2163     return;
2164
2165   bb = BB_VINFO_BB (bb_vinfo);
2166
2167   for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2168     {
2169       gimple stmt = gsi_stmt (si);
2170       stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2171
2172       if (stmt_info)
2173         /* Free stmt_vec_info.  */
2174         free_stmt_vec_info (stmt);
2175     }
2176
2177   vect_destroy_datarefs (NULL, bb_vinfo);
2178   free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2179   BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2180   slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2181   FOR_EACH_VEC_ELT (slp_instances, i, instance)
2182     vect_free_slp_instance (instance);
2183   BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2184   destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2185   free (bb_vinfo);
2186   bb->aux = NULL;
2187 }
2188
2189
2190 /* Analyze statements contained in SLP tree node after recursively analyzing
2191    the subtree. Return TRUE if the operations are supported.  */
2192
2193 static bool
2194 vect_slp_analyze_node_operations (slp_tree node)
2195 {
2196   bool dummy;
2197   int i;
2198   gimple stmt;
2199   slp_tree child;
2200
2201   if (!node)
2202     return true;
2203
2204   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2205     if (!vect_slp_analyze_node_operations (child))
2206       return false;
2207
2208   FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2209     {
2210       stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2211       gcc_assert (stmt_info);
2212       gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect);
2213
2214       if (!vect_analyze_stmt (stmt, &dummy, node))
2215         return false;
2216     }
2217
2218   return true;
2219 }
2220
2221
2222 /* Analyze statements in SLP instances of the basic block.  Return TRUE if the
2223    operations are supported. */
2224
2225 bool
2226 vect_slp_analyze_operations (vec<slp_instance> slp_instances)
2227 {
2228   slp_instance instance;
2229   int i;
2230
2231   if (dump_enabled_p ())
2232     dump_printf_loc (MSG_NOTE, vect_location,
2233                      "=== vect_slp_analyze_operations ===\n");
2234
2235   for (i = 0; slp_instances.iterate (i, &instance); )
2236     {
2237       if (!vect_slp_analyze_node_operations (SLP_INSTANCE_TREE (instance)))
2238         {
2239           dump_printf_loc (MSG_NOTE, vect_location,
2240                            "removing SLP instance operations starting from: ");
2241           dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
2242                             SLP_TREE_SCALAR_STMTS
2243                               (SLP_INSTANCE_TREE (instance))[0], 0);
2244           vect_free_slp_instance (instance);
2245           slp_instances.ordered_remove (i);
2246         }
2247       else
2248         i++;
2249     }
2250
2251   if (!slp_instances.length ())
2252     return false;
2253
2254   return true;
2255 }
2256
2257
2258 /* Compute the scalar cost of the SLP node NODE and its children
2259    and return it.  Do not account defs that are marked in LIFE and
2260    update LIFE according to uses of NODE.  */
2261
2262 static unsigned
2263 vect_bb_slp_scalar_cost (basic_block bb,
2264                          slp_tree node, vec<bool, va_heap> *life)
2265 {
2266   unsigned scalar_cost = 0;
2267   unsigned i;
2268   gimple stmt;
2269   slp_tree child;
2270
2271   FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2272     {
2273       unsigned stmt_cost;
2274       ssa_op_iter op_iter;
2275       def_operand_p def_p;
2276       stmt_vec_info stmt_info;
2277
2278       if ((*life)[i])
2279         continue;
2280
2281       /* If there is a non-vectorized use of the defs then the scalar
2282          stmt is kept live in which case we do not account it or any
2283          required defs in the SLP children in the scalar cost.  This
2284          way we make the vectorization more costly when compared to
2285          the scalar cost.  */
2286       FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2287         {
2288           imm_use_iterator use_iter;
2289           gimple use_stmt;
2290           FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2291             if (!is_gimple_debug (use_stmt)
2292                 && (gimple_code (use_stmt) == GIMPLE_PHI
2293                     || gimple_bb (use_stmt) != bb
2294                     || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2295               {
2296                 (*life)[i] = true;
2297                 BREAK_FROM_IMM_USE_STMT (use_iter);
2298               }
2299         }
2300       if ((*life)[i])
2301         continue;
2302
2303       stmt_info = vinfo_for_stmt (stmt);
2304       if (STMT_VINFO_DATA_REF (stmt_info))
2305         {
2306           if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2307             stmt_cost = vect_get_stmt_cost (scalar_load);
2308           else
2309             stmt_cost = vect_get_stmt_cost (scalar_store);
2310         }
2311       else
2312         stmt_cost = vect_get_stmt_cost (scalar_stmt);
2313
2314       scalar_cost += stmt_cost;
2315     }
2316
2317   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2318     if (child)
2319       scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2320
2321   return scalar_cost;
2322 }
2323
2324 /* Check if vectorization of the basic block is profitable.  */
2325
2326 static bool
2327 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2328 {
2329   vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2330   slp_instance instance;
2331   int i, j;
2332   unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2333   unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2334   void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2335   stmt_vec_info stmt_info = NULL;
2336   stmt_vector_for_cost body_cost_vec;
2337   stmt_info_for_cost *ci;
2338
2339   /* Calculate vector costs.  */
2340   FOR_EACH_VEC_ELT (slp_instances, i, instance)
2341     {
2342       body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2343
2344       FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2345         {
2346           stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2347           (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2348                                 stmt_info, ci->misalign, vect_body);
2349         }
2350     }
2351
2352   /* Calculate scalar cost.  */
2353   FOR_EACH_VEC_ELT (slp_instances, i, instance)
2354     {
2355       auto_vec<bool, 20> life;
2356       life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2357       scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2358                                               SLP_INSTANCE_TREE (instance),
2359                                               &life);
2360     }
2361
2362   /* Complete the target-specific cost calculation.  */
2363   finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2364                &vec_inside_cost, &vec_epilogue_cost);
2365
2366   vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2367
2368   if (dump_enabled_p ())
2369     {
2370       dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2371       dump_printf (MSG_NOTE, "  Vector inside of basic block cost: %d\n",
2372                    vec_inside_cost);
2373       dump_printf (MSG_NOTE, "  Vector prologue cost: %d\n", vec_prologue_cost);
2374       dump_printf (MSG_NOTE, "  Vector epilogue cost: %d\n", vec_epilogue_cost);
2375       dump_printf (MSG_NOTE, "  Scalar cost of basic block: %d\n", scalar_cost);
2376     }
2377
2378   /* Vectorization is profitable if its cost is less than the cost of scalar
2379      version.  */
2380   if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2381     return false;
2382
2383   return true;
2384 }
2385
2386 /* Check if the basic block can be vectorized.  */
2387
2388 static bb_vec_info
2389 vect_slp_analyze_bb_1 (basic_block bb)
2390 {
2391   bb_vec_info bb_vinfo;
2392   vec<slp_instance> slp_instances;
2393   slp_instance instance;
2394   int i;
2395   int min_vf = 2;
2396   unsigned n_stmts = 0;
2397
2398   bb_vinfo = new_bb_vec_info (bb);
2399   if (!bb_vinfo)
2400     return NULL;
2401
2402   if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2403     {
2404       if (dump_enabled_p ())
2405         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2406                          "not vectorized: unhandled data-ref in basic "
2407                          "block.\n");
2408
2409       destroy_bb_vec_info (bb_vinfo);
2410       return NULL;
2411     }
2412
2413   if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2414     {
2415       if (dump_enabled_p ())
2416         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2417                          "not vectorized: not enough data-refs in "
2418                          "basic block.\n");
2419
2420       destroy_bb_vec_info (bb_vinfo);
2421       return NULL;
2422     }
2423
2424   if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2425     {
2426      if (dump_enabled_p ())
2427        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2428                         "not vectorized: unhandled data access in "
2429                         "basic block.\n");
2430
2431       destroy_bb_vec_info (bb_vinfo);
2432       return NULL;
2433     }
2434
2435   vect_pattern_recog (NULL, bb_vinfo);
2436
2437   if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2438     {
2439       if (dump_enabled_p ())
2440         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2441                          "not vectorized: bad data alignment in basic "
2442                          "block.\n");
2443
2444       destroy_bb_vec_info (bb_vinfo);
2445       return NULL;
2446     }
2447
2448   /* Check the SLP opportunities in the basic block, analyze and build SLP
2449      trees.  */
2450   if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2451     {
2452       if (dump_enabled_p ())
2453         {
2454           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2455                            "Failed to SLP the basic block.\n");
2456           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, 
2457                            "not vectorized: failed to find SLP opportunities "
2458                            "in basic block.\n");
2459         }
2460
2461       destroy_bb_vec_info (bb_vinfo);
2462       return NULL;
2463     }
2464
2465   slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2466
2467   /* Mark all the statements that we want to vectorize as pure SLP and
2468      relevant.  */
2469   FOR_EACH_VEC_ELT (slp_instances, i, instance)
2470     {
2471       vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2472       vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2473     }
2474
2475   /* Mark all the statements that we do not want to vectorize.  */
2476   for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2477        !gsi_end_p (gsi); gsi_next (&gsi))
2478     {
2479       stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2480       if (STMT_SLP_TYPE (vinfo) != pure_slp)
2481         STMT_VINFO_VECTORIZABLE (vinfo) = false;
2482     }
2483
2484   /* Analyze dependences.  At this point all stmts not participating in
2485      vectorization have to be marked.  Dependence analysis assumes
2486      that we either vectorize all SLP instances or none at all.  */
2487   if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2488      {
2489        if (dump_enabled_p ())
2490          dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2491                           "not vectorized: unhandled data dependence "
2492                           "in basic block.\n");
2493
2494        destroy_bb_vec_info (bb_vinfo);
2495        return NULL;
2496      }
2497
2498   if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2499     {
2500       if (dump_enabled_p ())
2501         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2502                          "not vectorized: unsupported alignment in basic "
2503                          "block.\n");
2504       destroy_bb_vec_info (bb_vinfo);
2505       return NULL;
2506     }
2507
2508   if (!vect_slp_analyze_operations (BB_VINFO_SLP_INSTANCES (bb_vinfo)))
2509     {
2510       if (dump_enabled_p ())
2511         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2512                          "not vectorized: bad operation in basic block.\n");
2513
2514       destroy_bb_vec_info (bb_vinfo);
2515       return NULL;
2516     }
2517
2518   /* Compute the costs of the SLP instances.  */
2519   FOR_EACH_VEC_ELT (slp_instances, i, instance)
2520     {
2521       gimple stmt = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2522       tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
2523       vect_analyze_slp_cost (NULL, bb_vinfo,
2524                              instance, TYPE_VECTOR_SUBPARTS (vectype));
2525     }
2526
2527   /* Cost model: check if the vectorization is worthwhile.  */
2528   if (!unlimited_cost_model (NULL)
2529       && !vect_bb_vectorization_profitable_p (bb_vinfo))
2530     {
2531       if (dump_enabled_p ())
2532         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2533                          "not vectorized: vectorization is not "
2534                          "profitable.\n");
2535
2536       destroy_bb_vec_info (bb_vinfo);
2537       return NULL;
2538     }
2539
2540   if (dump_enabled_p ())
2541     dump_printf_loc (MSG_NOTE, vect_location,
2542                      "Basic block will be vectorized using SLP\n");
2543
2544   return bb_vinfo;
2545 }
2546
2547
2548 bb_vec_info
2549 vect_slp_analyze_bb (basic_block bb)
2550 {
2551   bb_vec_info bb_vinfo;
2552   int insns = 0;
2553   gimple_stmt_iterator gsi;
2554   unsigned int vector_sizes;
2555
2556   if (dump_enabled_p ())
2557     dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2558
2559   for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2560     {
2561       gimple stmt = gsi_stmt (gsi);
2562       if (!is_gimple_debug (stmt)
2563           && !gimple_nop_p (stmt)
2564           && gimple_code (stmt) != GIMPLE_LABEL)
2565         insns++;
2566     }
2567
2568   if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2569     {
2570       if (dump_enabled_p ())
2571         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2572                          "not vectorized: too many instructions in "
2573                          "basic block.\n");
2574
2575       return NULL;
2576     }
2577
2578   /* Autodetect first vector size we try.  */
2579   current_vector_size = 0;
2580   vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2581
2582   while (1)
2583     {
2584       bb_vinfo = vect_slp_analyze_bb_1 (bb);
2585       if (bb_vinfo)
2586         return bb_vinfo;
2587
2588       destroy_bb_vec_info (bb_vinfo);
2589
2590       vector_sizes &= ~current_vector_size;
2591       if (vector_sizes == 0
2592           || current_vector_size == 0)
2593         return NULL;
2594
2595       /* Try the next biggest vector size.  */
2596       current_vector_size = 1 << floor_log2 (vector_sizes);
2597       if (dump_enabled_p ())
2598         dump_printf_loc (MSG_NOTE, vect_location,
2599                          "***** Re-trying analysis with "
2600                          "vector size %d\n", current_vector_size);
2601     }
2602 }
2603
2604
2605 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2606    the number of created vector stmts depends on the unrolling factor).
2607    However, the actual number of vector stmts for every SLP node depends on
2608    VF which is set later in vect_analyze_operations ().  Hence, SLP costs
2609    should be updated.  In this function we assume that the inside costs
2610    calculated in vect_model_xxx_cost are linear in ncopies.  */
2611
2612 void
2613 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2614 {
2615   unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2616   vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2617   slp_instance instance;
2618   stmt_vector_for_cost body_cost_vec;
2619   stmt_info_for_cost *si;
2620   void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2621
2622   if (dump_enabled_p ())
2623     dump_printf_loc (MSG_NOTE, vect_location,
2624                      "=== vect_update_slp_costs_according_to_vf ===\n");
2625
2626   FOR_EACH_VEC_ELT (slp_instances, i, instance)
2627     {
2628       /* We assume that costs are linear in ncopies.  */
2629       int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2630
2631       /* Record the instance's instructions in the target cost model.
2632          This was delayed until here because the count of instructions
2633          isn't known beforehand.  */
2634       body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2635
2636       FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2637         (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2638                               vinfo_for_stmt (si->stmt), si->misalign,
2639                               vect_body);
2640     }
2641 }
2642
2643
2644 /* For constant and loop invariant defs of SLP_NODE this function returns
2645    (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2646    OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2647    scalar stmts.  NUMBER_OF_VECTORS is the number of vector defs to create.
2648    REDUC_INDEX is the index of the reduction operand in the statements, unless
2649    it is -1.  */
2650
2651 static void
2652 vect_get_constant_vectors (tree op, slp_tree slp_node,
2653                            vec<tree> *vec_oprnds,
2654                            unsigned int op_num, unsigned int number_of_vectors,
2655                            int reduc_index)
2656 {
2657   vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2658   gimple stmt = stmts[0];
2659   stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2660   unsigned nunits;
2661   tree vec_cst;
2662   tree *elts;
2663   unsigned j, number_of_places_left_in_vector;
2664   tree vector_type;
2665   tree vop;
2666   int group_size = stmts.length ();
2667   unsigned int vec_num, i;
2668   unsigned number_of_copies = 1;
2669   vec<tree> voprnds;
2670   voprnds.create (number_of_vectors);
2671   bool constant_p, is_store;
2672   tree neutral_op = NULL;
2673   enum tree_code code = gimple_expr_code (stmt);
2674   gimple def_stmt;
2675   struct loop *loop;
2676   gimple_seq ctor_seq = NULL;
2677
2678   vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2679   nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2680
2681   if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2682       && reduc_index != -1)
2683     {
2684       op_num = reduc_index;
2685       op = gimple_op (stmt, op_num + 1);
2686       /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2687          we need either neutral operands or the original operands.  See
2688          get_initial_def_for_reduction() for details.  */
2689       switch (code)
2690         {
2691           case WIDEN_SUM_EXPR:
2692           case DOT_PROD_EXPR:
2693           case SAD_EXPR:
2694           case PLUS_EXPR:
2695           case MINUS_EXPR:
2696           case BIT_IOR_EXPR:
2697           case BIT_XOR_EXPR:
2698              if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2699                neutral_op = build_real (TREE_TYPE (op), dconst0);
2700              else
2701                neutral_op = build_int_cst (TREE_TYPE (op), 0);
2702
2703              break;
2704
2705           case MULT_EXPR:
2706              if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2707                neutral_op = build_real (TREE_TYPE (op), dconst1);
2708              else
2709                neutral_op = build_int_cst (TREE_TYPE (op), 1);
2710
2711              break;
2712
2713           case BIT_AND_EXPR:
2714             neutral_op = build_int_cst (TREE_TYPE (op), -1);
2715             break;
2716
2717           /* For MIN/MAX we don't have an easy neutral operand but
2718              the initial values can be used fine here.  Only for
2719              a reduction chain we have to force a neutral element.  */
2720           case MAX_EXPR:
2721           case MIN_EXPR:
2722             if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2723               neutral_op = NULL;
2724             else
2725               {
2726                 def_stmt = SSA_NAME_DEF_STMT (op);
2727                 loop = (gimple_bb (stmt))->loop_father;
2728                 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2729                                                     loop_preheader_edge (loop));
2730               }
2731             break;
2732
2733           default:
2734             gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo));
2735             neutral_op = NULL;
2736         }
2737     }
2738
2739   if (STMT_VINFO_DATA_REF (stmt_vinfo))
2740     {
2741       is_store = true;
2742       op = gimple_assign_rhs1 (stmt);
2743     }
2744   else
2745     is_store = false;
2746
2747   gcc_assert (op);
2748
2749   if (CONSTANT_CLASS_P (op))
2750     constant_p = true;
2751   else
2752     constant_p = false;
2753
2754   /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2755      created vectors. It is greater than 1 if unrolling is performed.
2756
2757      For example, we have two scalar operands, s1 and s2 (e.g., group of
2758      strided accesses of size two), while NUNITS is four (i.e., four scalars
2759      of this type can be packed in a vector).  The output vector will contain
2760      two copies of each scalar operand: {s1, s2, s1, s2}.  (NUMBER_OF_COPIES
2761      will be 2).
2762
2763      If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2764      containing the operands.
2765
2766      For example, NUNITS is four as before, and the group size is 8
2767      (s1, s2, ..., s8).  We will create two vectors {s1, s2, s3, s4} and
2768      {s5, s6, s7, s8}.  */
2769
2770   number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2771
2772   number_of_places_left_in_vector = nunits;
2773   elts = XALLOCAVEC (tree, nunits);
2774   bool place_after_defs = false;
2775   for (j = 0; j < number_of_copies; j++)
2776     {
2777       for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2778         {
2779           if (is_store)
2780             op = gimple_assign_rhs1 (stmt);
2781           else
2782             {
2783               switch (code)
2784                 {
2785                   case COND_EXPR:
2786                     if (op_num == 0 || op_num == 1)
2787                       {
2788                         tree cond = gimple_assign_rhs1 (stmt);
2789                         op = TREE_OPERAND (cond, op_num);
2790                       }
2791                     else
2792                       {
2793                         if (op_num == 2)
2794                           op = gimple_assign_rhs2 (stmt);
2795                         else
2796                           op = gimple_assign_rhs3 (stmt);
2797                       }
2798                     break;
2799
2800                   case CALL_EXPR:
2801                     op = gimple_call_arg (stmt, op_num);
2802                     break;
2803
2804                   case LSHIFT_EXPR:
2805                   case RSHIFT_EXPR:
2806                   case LROTATE_EXPR:
2807                   case RROTATE_EXPR:
2808                     op = gimple_op (stmt, op_num + 1);
2809                     /* Unlike the other binary operators, shifts/rotates have
2810                        the shift count being int, instead of the same type as
2811                        the lhs, so make sure the scalar is the right type if
2812                        we are dealing with vectors of
2813                        long long/long/short/char.  */
2814                     if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2815                       op = fold_convert (TREE_TYPE (vector_type), op);
2816                     break;
2817
2818                   default:
2819                     op = gimple_op (stmt, op_num + 1);
2820                     break;
2821                 }
2822             }
2823
2824           if (reduc_index != -1)
2825             {
2826               loop = (gimple_bb (stmt))->loop_father;
2827               def_stmt = SSA_NAME_DEF_STMT (op);
2828
2829               gcc_assert (loop);
2830
2831               /* Get the def before the loop.  In reduction chain we have only
2832                  one initial value.  */
2833               if ((j != (number_of_copies - 1)
2834                    || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2835                        && i != 0))
2836                   && neutral_op)
2837                 op = neutral_op;
2838               else
2839                 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2840                                             loop_preheader_edge (loop));
2841             }
2842
2843           /* Create 'vect_ = {op0,op1,...,opn}'.  */
2844           number_of_places_left_in_vector--;
2845           tree orig_op = op;
2846           if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2847             {
2848               if (CONSTANT_CLASS_P (op))
2849                 {
2850                   op = fold_unary (VIEW_CONVERT_EXPR,
2851                                    TREE_TYPE (vector_type), op);
2852                   gcc_assert (op && CONSTANT_CLASS_P (op));
2853                 }
2854               else
2855                 {
2856                   tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2857                   gimple init_stmt;
2858                   op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type), op);
2859                   init_stmt
2860                     = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR, op);
2861                   gimple_seq_add_stmt (&ctor_seq, init_stmt);
2862                   op = new_temp;
2863                 }
2864             }
2865           elts[number_of_places_left_in_vector] = op;
2866           if (!CONSTANT_CLASS_P (op))
2867             constant_p = false;
2868           if (TREE_CODE (orig_op) == SSA_NAME
2869               && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
2870               && STMT_VINFO_BB_VINFO (stmt_vinfo)
2871               && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
2872                   == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
2873             place_after_defs = true;
2874
2875           if (number_of_places_left_in_vector == 0)
2876             {
2877               number_of_places_left_in_vector = nunits;
2878
2879               if (constant_p)
2880                 vec_cst = build_vector (vector_type, elts);
2881               else
2882                 {
2883                   vec<constructor_elt, va_gc> *v;
2884                   unsigned k;
2885                   vec_alloc (v, nunits);
2886                   for (k = 0; k < nunits; ++k)
2887                     CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2888                   vec_cst = build_constructor (vector_type, v);
2889                 }
2890               tree init;
2891               gimple_stmt_iterator gsi;
2892               if (place_after_defs)
2893                 {
2894                   gsi = gsi_for_stmt
2895                           (vect_find_last_scalar_stmt_in_slp (slp_node));
2896                   init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
2897                 }
2898               else
2899                 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
2900               if (ctor_seq != NULL)
2901                 {
2902                   gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
2903                   gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2904                                                         GSI_SAME_STMT);
2905                   ctor_seq = NULL;
2906                 }
2907               voprnds.quick_push (init);
2908               place_after_defs = false;
2909             }
2910         }
2911     }
2912
2913   /* Since the vectors are created in the reverse order, we should invert
2914      them.  */
2915   vec_num = voprnds.length ();
2916   for (j = vec_num; j != 0; j--)
2917     {
2918       vop = voprnds[j - 1];
2919       vec_oprnds->quick_push (vop);
2920     }
2921
2922   voprnds.release ();
2923
2924   /* In case that VF is greater than the unrolling factor needed for the SLP
2925      group of stmts, NUMBER_OF_VECTORS to be created is greater than
2926      NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2927      to replicate the vectors.  */
2928   while (number_of_vectors > vec_oprnds->length ())
2929     {
2930       tree neutral_vec = NULL;
2931
2932       if (neutral_op)
2933         {
2934           if (!neutral_vec)
2935             neutral_vec = build_vector_from_val (vector_type, neutral_op);
2936
2937           vec_oprnds->quick_push (neutral_vec);
2938         }
2939       else
2940         {
2941           for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2942             vec_oprnds->quick_push (vop);
2943         }
2944     }
2945 }
2946
2947
2948 /* Get vectorized definitions from SLP_NODE that contains corresponding
2949    vectorized def-stmts.  */
2950
2951 static void
2952 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2953 {
2954   tree vec_oprnd;
2955   gimple vec_def_stmt;
2956   unsigned int i;
2957
2958   gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2959
2960   FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2961     {
2962       gcc_assert (vec_def_stmt);
2963       vec_oprnd = gimple_get_lhs (vec_def_stmt);
2964       vec_oprnds->quick_push (vec_oprnd);
2965     }
2966 }
2967
2968
2969 /* Get vectorized definitions for SLP_NODE.
2970    If the scalar definitions are loop invariants or constants, collect them and
2971    call vect_get_constant_vectors() to create vector stmts.
2972    Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2973    must be stored in the corresponding child of SLP_NODE, and we call
2974    vect_get_slp_vect_defs () to retrieve them.  */
2975
2976 void
2977 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2978                    vec<vec<tree> > *vec_oprnds, int reduc_index)
2979 {
2980   gimple first_stmt;
2981   int number_of_vects = 0, i;
2982   unsigned int child_index = 0;
2983   HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2984   slp_tree child = NULL;
2985   vec<tree> vec_defs;
2986   tree oprnd;
2987   bool vectorized_defs;
2988
2989   first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2990   FOR_EACH_VEC_ELT (ops, i, oprnd)
2991     {
2992       /* For each operand we check if it has vectorized definitions in a child
2993          node or we need to create them (for invariants and constants).  We
2994          check if the LHS of the first stmt of the next child matches OPRND.
2995          If it does, we found the correct child.  Otherwise, we call
2996          vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2997          to check this child node for the next operand.  */
2998       vectorized_defs = false;
2999       if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
3000         {
3001           child = SLP_TREE_CHILDREN (slp_node)[child_index];
3002
3003           /* We have to check both pattern and original def, if available.  */
3004           if (child)
3005             {
3006               gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
3007               gimple related
3008                 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
3009
3010               if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
3011                   || (related
3012                       && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
3013                 {
3014                   /* The number of vector defs is determined by the number of
3015                      vector statements in the node from which we get those
3016                      statements.  */
3017                   number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
3018                   vectorized_defs = true;
3019                   child_index++;
3020                 }
3021             }
3022           else
3023             child_index++;
3024         }
3025
3026       if (!vectorized_defs)
3027         {
3028           if (i == 0)
3029             {
3030               number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3031               /* Number of vector stmts was calculated according to LHS in
3032                  vect_schedule_slp_instance (), fix it by replacing LHS with
3033                  RHS, if necessary.  See vect_get_smallest_scalar_type () for
3034                  details.  */
3035               vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
3036                                              &rhs_size_unit);
3037               if (rhs_size_unit != lhs_size_unit)
3038                 {
3039                   number_of_vects *= rhs_size_unit;
3040                   number_of_vects /= lhs_size_unit;
3041                 }
3042             }
3043         }
3044
3045       /* Allocate memory for vectorized defs.  */
3046       vec_defs = vNULL;
3047       vec_defs.create (number_of_vects);
3048
3049       /* For reduction defs we call vect_get_constant_vectors (), since we are
3050          looking for initial loop invariant values.  */
3051       if (vectorized_defs && reduc_index == -1)
3052         /* The defs are already vectorized.  */
3053         vect_get_slp_vect_defs (child, &vec_defs);
3054       else
3055         /* Build vectors from scalar defs.  */
3056         vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
3057                                    number_of_vects, reduc_index);
3058
3059       vec_oprnds->quick_push (vec_defs);
3060
3061       /* For reductions, we only need initial values.  */
3062       if (reduc_index != -1)
3063         return;
3064     }
3065 }
3066
3067
3068 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3069    building a vector of type MASK_TYPE from it) and two input vectors placed in
3070    DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3071    shifting by STRIDE elements of DR_CHAIN for every copy.
3072    (STRIDE is the number of vectorized stmts for NODE divided by the number of
3073    copies).
3074    VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3075    the created stmts must be inserted.  */
3076
3077 static inline void
3078 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
3079                            tree mask, int first_vec_indx, int second_vec_indx,
3080                            gimple_stmt_iterator *gsi, slp_tree node,
3081                            tree vectype, vec<tree> dr_chain,
3082                            int ncopies, int vect_stmts_counter)
3083 {
3084   tree perm_dest;
3085   gimple perm_stmt = NULL;
3086   stmt_vec_info next_stmt_info;
3087   int i, stride;
3088   tree first_vec, second_vec, data_ref;
3089
3090   stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
3091
3092   /* Initialize the vect stmts of NODE to properly insert the generated
3093      stmts later.  */
3094   for (i = SLP_TREE_VEC_STMTS (node).length ();
3095        i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
3096     SLP_TREE_VEC_STMTS (node).quick_push (NULL);
3097
3098   perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3099   for (i = 0; i < ncopies; i++)
3100     {
3101       first_vec = dr_chain[first_vec_indx];
3102       second_vec = dr_chain[second_vec_indx];
3103
3104       /* Generate the permute statement.  */
3105       perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3106                                        first_vec, second_vec, mask);
3107       data_ref = make_ssa_name (perm_dest, perm_stmt);
3108       gimple_set_lhs (perm_stmt, data_ref);
3109       vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3110
3111       /* Store the vector statement in NODE.  */
3112       SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
3113
3114       first_vec_indx += stride;
3115       second_vec_indx += stride;
3116     }
3117
3118   /* Mark the scalar stmt as vectorized.  */
3119   next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
3120   STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
3121 }
3122
3123
3124 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3125    return in CURRENT_MASK_ELEMENT its equivalent in target specific
3126    representation.  Check that the mask is valid and return FALSE if not.
3127    Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3128    the next vector, i.e., the current first vector is not needed.  */
3129
3130 static bool
3131 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
3132                        int mask_nunits, bool only_one_vec, int index,
3133                        unsigned char *mask, int *current_mask_element,
3134                        bool *need_next_vector, int *number_of_mask_fixes,
3135                        bool *mask_fixed, bool *needs_first_vector)
3136 {
3137   int i;
3138
3139   /* Convert to target specific representation.  */
3140   *current_mask_element = first_mask_element + m;
3141   /* Adjust the value in case it's a mask for second and third vectors.  */
3142   *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
3143
3144   if (*current_mask_element < 0)
3145     {
3146       if (dump_enabled_p ())
3147         {
3148           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3149                            "permutation requires past vector ");
3150           dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3151           dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3152         }
3153       return false;
3154     }
3155
3156   if (*current_mask_element < mask_nunits)
3157     *needs_first_vector = true;
3158
3159   /* We have only one input vector to permute but the mask accesses values in
3160      the next vector as well.  */
3161   if (only_one_vec && *current_mask_element >= mask_nunits)
3162     {
3163       if (dump_enabled_p ())
3164         {
3165           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3166                            "permutation requires at least two vectors ");
3167           dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3168           dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3169         }
3170
3171       return false;
3172     }
3173
3174   /* The mask requires the next vector.  */
3175   while (*current_mask_element >= mask_nunits * 2)
3176     {
3177       if (*needs_first_vector || *mask_fixed)
3178         {
3179           /* We either need the first vector too or have already moved to the
3180              next vector. In both cases, this permutation needs three
3181              vectors.  */
3182           if (dump_enabled_p ())
3183             {
3184               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3185                                "permutation requires at "
3186                                "least three vectors ");
3187               dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3188               dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3189             }
3190
3191           return false;
3192         }
3193
3194       /* We move to the next vector, dropping the first one and working with
3195          the second and the third - we need to adjust the values of the mask
3196          accordingly.  */
3197       *current_mask_element -= mask_nunits * *number_of_mask_fixes;
3198
3199       for (i = 0; i < index; i++)
3200         mask[i] -= mask_nunits * *number_of_mask_fixes;
3201
3202       (*number_of_mask_fixes)++;
3203       *mask_fixed = true;
3204     }
3205
3206   *need_next_vector = *mask_fixed;
3207
3208   /* This was the last element of this mask. Start a new one.  */
3209   if (index == mask_nunits - 1)
3210     {
3211       *number_of_mask_fixes = 1;
3212       *mask_fixed = false;
3213       *needs_first_vector = false;
3214     }
3215
3216   return true;
3217 }
3218
3219
3220 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3221    If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3222    permute statements for the SLP node NODE of the SLP instance
3223    SLP_NODE_INSTANCE.  */
3224
3225 bool
3226 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3227                               gimple_stmt_iterator *gsi, int vf,
3228                               slp_instance slp_node_instance, bool analyze_only)
3229 {
3230   gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3231   stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3232   tree mask_element_type = NULL_TREE, mask_type;
3233   int i, j, k, nunits, vec_index = 0, scalar_index;
3234   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3235   gimple next_scalar_stmt;
3236   int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3237   int first_mask_element;
3238   int index, unroll_factor, current_mask_element, ncopies;
3239   unsigned char *mask;
3240   bool only_one_vec = false, need_next_vector = false;
3241   int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
3242   int number_of_mask_fixes = 1;
3243   bool mask_fixed = false;
3244   bool needs_first_vector = false;
3245   machine_mode mode;
3246
3247   mode = TYPE_MODE (vectype);
3248
3249   if (!can_vec_perm_p (mode, false, NULL))
3250     {
3251       if (dump_enabled_p ())
3252         {
3253           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3254                            "no vect permute for ");
3255           dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3256           dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3257         }
3258       return false;
3259     }
3260
3261   /* The generic VEC_PERM_EXPR code always uses an integral type of the
3262      same size as the vector element being permuted.  */
3263   mask_element_type = lang_hooks.types.type_for_mode
3264                 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3265   mask_type = get_vectype_for_scalar_type (mask_element_type);
3266   nunits = TYPE_VECTOR_SUBPARTS (vectype);
3267   mask = XALLOCAVEC (unsigned char, nunits);
3268   unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3269
3270   /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3271      unrolling factor.  */
3272   orig_vec_stmts_num = group_size *
3273                 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
3274   if (orig_vec_stmts_num == 1)
3275     only_one_vec = true;
3276
3277   /* Number of copies is determined by the final vectorization factor
3278      relatively to SLP_NODE_INSTANCE unrolling factor.  */
3279   ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3280
3281   if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3282     return false;
3283
3284   /* Generate permutation masks for every NODE. Number of masks for each NODE
3285      is equal to GROUP_SIZE.
3286      E.g., we have a group of three nodes with three loads from the same
3287      location in each node, and the vector size is 4. I.e., we have a
3288      a0b0c0a1b1c1... sequence and we need to create the following vectors:
3289      for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3290      for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3291      ...
3292
3293      The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3294      The last mask is illegal since we assume two operands for permute
3295      operation, and the mask element values can't be outside that range.
3296      Hence, the last mask must be converted into {2,5,5,5}.
3297      For the first two permutations we need the first and the second input
3298      vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3299      we need the second and the third vectors: {b1,c1,a2,b2} and
3300      {c2,a3,b3,c3}.  */
3301
3302   {
3303       scalar_index = 0;
3304       index = 0;
3305       vect_stmts_counter = 0;
3306       vec_index = 0;
3307       first_vec_index = vec_index++;
3308       if (only_one_vec)
3309         second_vec_index = first_vec_index;
3310       else
3311         second_vec_index =  vec_index++;
3312
3313       for (j = 0; j < unroll_factor; j++)
3314         {
3315           for (k = 0; k < group_size; k++)
3316             {
3317               i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3318               first_mask_element = i + j * group_size;
3319               if (!vect_get_mask_element (stmt, first_mask_element, 0,
3320                                           nunits, only_one_vec, index,
3321                                           mask, &current_mask_element,
3322                                           &need_next_vector,
3323                                           &number_of_mask_fixes, &mask_fixed,
3324                                           &needs_first_vector))
3325                 return false;
3326               gcc_assert (current_mask_element >= 0
3327                           && current_mask_element < 2 * nunits);
3328               mask[index++] = current_mask_element;
3329
3330               if (index == nunits)
3331                 {
3332                   index = 0;
3333                   if (!can_vec_perm_p (mode, false, mask))
3334                     {
3335                       if (dump_enabled_p ())
3336                         {
3337                           dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3338                                            vect_location, 
3339                                            "unsupported vect permute { ");
3340                           for (i = 0; i < nunits; ++i)
3341                             dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3342                                          mask[i]);
3343                           dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3344                         }
3345                       return false;
3346                     }
3347
3348                   if (!analyze_only)
3349                     {
3350                       int l;
3351                       tree mask_vec, *mask_elts;
3352                       mask_elts = XALLOCAVEC (tree, nunits);
3353                       for (l = 0; l < nunits; ++l)
3354                         mask_elts[l] = build_int_cst (mask_element_type,
3355                                                       mask[l]);
3356                       mask_vec = build_vector (mask_type, mask_elts);
3357
3358                       if (need_next_vector)
3359                         {
3360                           first_vec_index = second_vec_index;
3361                           second_vec_index = vec_index;
3362                         }
3363
3364                       next_scalar_stmt
3365                           = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3366
3367                       vect_create_mask_and_perm (stmt, next_scalar_stmt,
3368                                mask_vec, first_vec_index, second_vec_index,
3369                                gsi, node, vectype, dr_chain,
3370                                ncopies, vect_stmts_counter++);
3371                     }
3372                 }
3373             }
3374         }
3375     }
3376
3377   return true;
3378 }
3379
3380
3381
3382 /* Vectorize SLP instance tree in postorder.  */
3383
3384 static bool
3385 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3386                             unsigned int vectorization_factor)
3387 {
3388   gimple stmt;
3389   bool grouped_store, is_store;
3390   gimple_stmt_iterator si;
3391   stmt_vec_info stmt_info;
3392   unsigned int vec_stmts_size, nunits, group_size;
3393   tree vectype;
3394   int i;
3395   slp_tree child;
3396
3397   if (!node)
3398     return false;
3399
3400   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3401     vect_schedule_slp_instance (child, instance, vectorization_factor);
3402
3403   stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3404   stmt_info = vinfo_for_stmt (stmt);
3405
3406   /* VECTYPE is the type of the destination.  */
3407   vectype = STMT_VINFO_VECTYPE (stmt_info);
3408   nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3409   group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3410
3411   /* For each SLP instance calculate number of vector stmts to be created
3412      for the scalar stmts in each node of the SLP tree.  Number of vector
3413      elements in one vector iteration is the number of scalar elements in
3414      one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3415      size.  */
3416   vec_stmts_size = (vectorization_factor * group_size) / nunits;
3417
3418   if (!SLP_TREE_VEC_STMTS (node).exists ())
3419     {
3420       SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3421       SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3422     }
3423
3424   if (dump_enabled_p ())
3425     {
3426       dump_printf_loc (MSG_NOTE,vect_location,
3427                        "------>vectorizing SLP node starting from: ");
3428       dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3429       dump_printf (MSG_NOTE, "\n");
3430     }
3431
3432   /* Vectorized stmts go before the last scalar stmt which is where
3433      all uses are ready.  */
3434   si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3435
3436   /* Mark the first element of the reduction chain as reduction to properly
3437      transform the node.  In the analysis phase only the last element of the
3438      chain is marked as reduction.  */
3439   if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3440       && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3441     {
3442       STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3443       STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3444     }
3445
3446   /* Handle two-operation SLP nodes by vectorizing the group with
3447      both operations and then performing a merge.  */
3448   if (SLP_TREE_TWO_OPERATORS (node))
3449     {
3450       enum tree_code code0 = gimple_assign_rhs_code (stmt);
3451       enum tree_code ocode;
3452       gimple ostmt;
3453       unsigned char *mask = XALLOCAVEC (unsigned char, group_size);
3454       bool allsame = true;
3455       FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, ostmt)
3456         if (gimple_assign_rhs_code (ostmt) != code0)
3457           {
3458             mask[i] = 1;
3459             allsame = false;
3460             ocode = gimple_assign_rhs_code (ostmt);
3461           }
3462         else
3463           mask[i] = 0;
3464       if (!allsame)
3465         {
3466           vec<gimple> v0;
3467           vec<gimple> v1;
3468           unsigned j;
3469           tree tmask = NULL_TREE;
3470           vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3471           v0 = SLP_TREE_VEC_STMTS (node).copy ();
3472           SLP_TREE_VEC_STMTS (node).truncate (0);
3473           gimple_assign_set_rhs_code (stmt, ocode);
3474           vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3475           gimple_assign_set_rhs_code (stmt, code0);
3476           v1 = SLP_TREE_VEC_STMTS (node).copy ();
3477           SLP_TREE_VEC_STMTS (node).truncate (0);
3478           tree meltype = build_nonstandard_integer_type
3479               (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
3480           tree mvectype = get_same_sized_vectype (meltype, vectype);
3481           unsigned k = 0, l;
3482           for (j = 0; j < v0.length (); ++j)
3483             {
3484               tree *melts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (vectype));
3485               for (l = 0; l < TYPE_VECTOR_SUBPARTS (vectype); ++l)
3486                 {
3487                   if (k >= group_size)
3488                     k = 0;
3489                   melts[l] = build_int_cst
3490                       (meltype, mask[k++] * TYPE_VECTOR_SUBPARTS (vectype) + l);
3491                 }
3492               tmask = build_vector (mvectype, melts);
3493
3494               /* ???  Not all targets support a VEC_PERM_EXPR with a
3495                  constant mask that would translate to a vec_merge RTX
3496                  (with their vec_perm_const_ok).  We can either not
3497                  vectorize in that case or let veclower do its job.
3498                  Unfortunately that isn't too great and at least for
3499                  plus/minus we'd eventually like to match targets
3500                  vector addsub instructions.  */
3501               gimple vstmt;
3502               vstmt = gimple_build_assign (make_ssa_name (vectype),
3503                                            VEC_PERM_EXPR,
3504                                            gimple_assign_lhs (v0[j]),
3505                                            gimple_assign_lhs (v1[j]), tmask);
3506               vect_finish_stmt_generation (stmt, vstmt, &si);
3507               SLP_TREE_VEC_STMTS (node).quick_push (vstmt);
3508             }
3509           v0.release ();
3510           v1.release ();
3511           return false;
3512         }
3513     }
3514   is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3515   return is_store;
3516 }
3517
3518 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3519    For loop vectorization this is done in vectorizable_call, but for SLP
3520    it needs to be deferred until end of vect_schedule_slp, because multiple
3521    SLP instances may refer to the same scalar stmt.  */
3522
3523 static void
3524 vect_remove_slp_scalar_calls (slp_tree node)
3525 {
3526   gimple stmt, new_stmt;
3527   gimple_stmt_iterator gsi;
3528   int i;
3529   slp_tree child;
3530   tree lhs;
3531   stmt_vec_info stmt_info;
3532
3533   if (!node)
3534     return;
3535
3536   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3537     vect_remove_slp_scalar_calls (child);
3538
3539   FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3540     {
3541       if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3542         continue;
3543       stmt_info = vinfo_for_stmt (stmt);
3544       if (stmt_info == NULL
3545           || is_pattern_stmt_p (stmt_info)
3546           || !PURE_SLP_STMT (stmt_info))
3547         continue;
3548       lhs = gimple_call_lhs (stmt);
3549       new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3550       set_vinfo_for_stmt (new_stmt, stmt_info);
3551       set_vinfo_for_stmt (stmt, NULL);
3552       STMT_VINFO_STMT (stmt_info) = new_stmt;
3553       gsi = gsi_for_stmt (stmt);
3554       gsi_replace (&gsi, new_stmt, false);
3555       SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3556     }
3557 }
3558
3559 /* Generate vector code for all SLP instances in the loop/basic block.  */
3560
3561 bool
3562 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3563 {
3564   vec<slp_instance> slp_instances;
3565   slp_instance instance;
3566   unsigned int i, vf;
3567   bool is_store = false;
3568
3569   if (loop_vinfo)
3570     {
3571       slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3572       vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3573     }
3574   else
3575     {
3576       slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3577       vf = 1;
3578     }
3579
3580   FOR_EACH_VEC_ELT (slp_instances, i, instance)
3581     {
3582       /* Schedule the tree of INSTANCE.  */
3583       is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3584                                              instance, vf);
3585       if (dump_enabled_p ())
3586         dump_printf_loc (MSG_NOTE, vect_location,
3587                          "vectorizing stmts using SLP.\n");
3588     }
3589
3590   FOR_EACH_VEC_ELT (slp_instances, i, instance)
3591     {
3592       slp_tree root = SLP_INSTANCE_TREE (instance);
3593       gimple store;
3594       unsigned int j;
3595       gimple_stmt_iterator gsi;
3596
3597       /* Remove scalar call stmts.  Do not do this for basic-block
3598          vectorization as not all uses may be vectorized.
3599          ???  Why should this be necessary?  DCE should be able to
3600          remove the stmts itself.
3601          ???  For BB vectorization we can as well remove scalar
3602          stmts starting from the SLP tree root if they have no
3603          uses.  */
3604       if (loop_vinfo)
3605         vect_remove_slp_scalar_calls (root);
3606
3607       for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3608                   && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3609         {
3610           if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3611             break;
3612
3613          if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3614            store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3615           /* Free the attached stmt_vec_info and remove the stmt.  */
3616           gsi = gsi_for_stmt (store);
3617           unlink_stmt_vdef (store);
3618           gsi_remove (&gsi, true);
3619           release_defs (store);
3620           free_stmt_vec_info (store);
3621         }
3622     }
3623
3624   return is_store;
3625 }
3626
3627
3628 /* Vectorize the basic block.  */
3629
3630 void
3631 vect_slp_transform_bb (basic_block bb)
3632 {
3633   bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3634   gimple_stmt_iterator si;
3635
3636   gcc_assert (bb_vinfo);
3637
3638   if (dump_enabled_p ())
3639     dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3640
3641   for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3642     {
3643       gimple stmt = gsi_stmt (si);
3644       stmt_vec_info stmt_info;
3645
3646       if (dump_enabled_p ())
3647         {
3648           dump_printf_loc (MSG_NOTE, vect_location,
3649                            "------>SLPing statement: ");
3650           dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3651           dump_printf (MSG_NOTE, "\n");
3652         }
3653
3654       stmt_info = vinfo_for_stmt (stmt);
3655       gcc_assert (stmt_info);
3656
3657       /* Schedule all the SLP instances when the first SLP stmt is reached.  */
3658       if (STMT_SLP_TYPE (stmt_info))
3659         {
3660           vect_schedule_slp (NULL, bb_vinfo);
3661           break;
3662         }
3663     }
3664
3665   if (dump_enabled_p ())
3666     dump_printf_loc (MSG_NOTE, vect_location,
3667                      "BASIC BLOCK VECTORIZED\n");
3668
3669   destroy_bb_vec_info (bb_vinfo);
3670 }