1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
54 /* For lang_hooks.types.type_for_mode. */
55 #include "langhooks.h"
57 /* Says whether a statement is a load, a store of a vectorized statement
58 result, or a store of an invariant value. */
59 enum vec_load_store_type {
65 /* Return the vectorized type for the given statement. */
68 stmt_vectype (struct _stmt_vec_info *stmt_info)
70 return STMT_VINFO_VECTYPE (stmt_info);
73 /* Return TRUE iff the given statement is in an inner loop relative to
74 the loop being vectorized. */
76 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
78 gimple *stmt = STMT_VINFO_STMT (stmt_info);
79 basic_block bb = gimple_bb (stmt);
80 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
86 loop = LOOP_VINFO_LOOP (loop_vinfo);
88 return (bb->loop_father == loop->inner);
91 /* Record the cost of a statement, either by directly informing the
92 target model or by saving it in a vector for later processing.
93 Return a preliminary estimate of the statement's cost. */
96 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
97 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
98 int misalign, enum vect_cost_model_location where)
100 if ((kind == vector_load || kind == unaligned_load)
101 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
102 kind = vector_gather_load;
103 if ((kind == vector_store || kind == unaligned_store)
104 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
105 kind = vector_scatter_store;
108 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
109 stmt_info_for_cost si = { count, kind,
110 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
112 body_cost_vec->safe_push (si);
114 (builtin_vectorization_cost (kind, vectype, misalign) * count);
117 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
118 count, kind, stmt_info, misalign, where);
121 /* Return a variable of type ELEM_TYPE[NELEMS]. */
124 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
126 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
130 /* ARRAY is an array of vectors created by create_vector_array.
131 Return an SSA_NAME for the vector in index N. The reference
132 is part of the vectorization of STMT and the vector is associated
133 with scalar destination SCALAR_DEST. */
136 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
137 tree array, unsigned HOST_WIDE_INT n)
139 tree vect_type, vect, vect_name, array_ref;
142 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
143 vect_type = TREE_TYPE (TREE_TYPE (array));
144 vect = vect_create_destination_var (scalar_dest, vect_type);
145 array_ref = build4 (ARRAY_REF, vect_type, array,
146 build_int_cst (size_type_node, n),
147 NULL_TREE, NULL_TREE);
149 new_stmt = gimple_build_assign (vect, array_ref);
150 vect_name = make_ssa_name (vect, new_stmt);
151 gimple_assign_set_lhs (new_stmt, vect_name);
152 vect_finish_stmt_generation (stmt, new_stmt, gsi);
157 /* ARRAY is an array of vectors created by create_vector_array.
158 Emit code to store SSA_NAME VECT in index N of the array.
159 The store is part of the vectorization of STMT. */
162 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
163 tree array, unsigned HOST_WIDE_INT n)
168 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
169 build_int_cst (size_type_node, n),
170 NULL_TREE, NULL_TREE);
172 new_stmt = gimple_build_assign (array_ref, vect);
173 vect_finish_stmt_generation (stmt, new_stmt, gsi);
176 /* PTR is a pointer to an array of type TYPE. Return a representation
177 of *PTR. The memory reference replaces those in FIRST_DR
181 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
185 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
186 /* Arrays have the same alignment as their type. */
187 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
193 /* Function vect_mark_relevant.
195 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
198 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
199 enum vect_relevant relevant, bool live_p)
201 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
202 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
203 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
204 gimple *pattern_stmt;
206 if (dump_enabled_p ())
208 dump_printf_loc (MSG_NOTE, vect_location,
209 "mark relevant %d, live %d: ", relevant, live_p);
210 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
213 /* If this stmt is an original stmt in a pattern, we might need to mark its
214 related pattern stmt instead of the original stmt. However, such stmts
215 may have their own uses that are not in any pattern, in such cases the
216 stmt itself should be marked. */
217 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
219 /* This is the last stmt in a sequence that was detected as a
220 pattern that can potentially be vectorized. Don't mark the stmt
221 as relevant/live because it's not going to be vectorized.
222 Instead mark the pattern-stmt that replaces it. */
224 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_NOTE, vect_location,
228 "last stmt in pattern. don't mark"
229 " relevant/live.\n");
230 stmt_info = vinfo_for_stmt (pattern_stmt);
231 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
232 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
233 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
237 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
238 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
239 STMT_VINFO_RELEVANT (stmt_info) = relevant;
241 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
242 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
244 if (dump_enabled_p ())
245 dump_printf_loc (MSG_NOTE, vect_location,
246 "already marked relevant/live.\n");
250 worklist->safe_push (stmt);
254 /* Function is_simple_and_all_uses_invariant
256 Return true if STMT is simple and all uses of it are invariant. */
259 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
265 if (!is_gimple_assign (stmt))
268 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
270 enum vect_def_type dt = vect_uninitialized_def;
272 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
274 if (dump_enabled_p ())
275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
276 "use not simple.\n");
280 if (dt != vect_external_def && dt != vect_constant_def)
286 /* Function vect_stmt_relevant_p.
288 Return true if STMT in loop that is represented by LOOP_VINFO is
289 "relevant for vectorization".
291 A stmt is considered "relevant for vectorization" if:
292 - it has uses outside the loop.
293 - it has vdefs (it alters memory).
294 - control stmts in the loop (except for the exit condition).
296 CHECKME: what other side effects would the vectorizer allow? */
299 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
300 enum vect_relevant *relevant, bool *live_p)
302 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
304 imm_use_iterator imm_iter;
308 *relevant = vect_unused_in_scope;
311 /* cond stmt other than loop exit cond. */
312 if (is_ctrl_stmt (stmt)
313 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
314 != loop_exit_ctrl_vec_info_type)
315 *relevant = vect_used_in_scope;
317 /* changing memory. */
318 if (gimple_code (stmt) != GIMPLE_PHI)
319 if (gimple_vdef (stmt)
320 && !gimple_clobber_p (stmt))
322 if (dump_enabled_p ())
323 dump_printf_loc (MSG_NOTE, vect_location,
324 "vec_stmt_relevant_p: stmt has vdefs.\n");
325 *relevant = vect_used_in_scope;
328 /* uses outside the loop. */
329 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
331 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
333 basic_block bb = gimple_bb (USE_STMT (use_p));
334 if (!flow_bb_inside_loop_p (loop, bb))
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE, vect_location,
338 "vec_stmt_relevant_p: used out of loop.\n");
340 if (is_gimple_debug (USE_STMT (use_p)))
343 /* We expect all such uses to be in the loop exit phis
344 (because of loop closed form) */
345 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
346 gcc_assert (bb == single_exit (loop)->dest);
353 if (*live_p && *relevant == vect_unused_in_scope
354 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_NOTE, vect_location,
358 "vec_stmt_relevant_p: stmt live but not relevant.\n");
359 *relevant = vect_used_only_live;
362 return (*live_p || *relevant);
366 /* Function exist_non_indexing_operands_for_use_p
368 USE is one of the uses attached to STMT. Check if USE is
369 used in STMT for anything other than indexing an array. */
372 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
375 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
377 /* USE corresponds to some operand in STMT. If there is no data
378 reference in STMT, then any operand that corresponds to USE
379 is not indexing an array. */
380 if (!STMT_VINFO_DATA_REF (stmt_info))
383 /* STMT has a data_ref. FORNOW this means that its of one of
387 (This should have been verified in analyze_data_refs).
389 'var' in the second case corresponds to a def, not a use,
390 so USE cannot correspond to any operands that are not used
393 Therefore, all we need to check is if STMT falls into the
394 first case, and whether var corresponds to USE. */
396 if (!gimple_assign_copy_p (stmt))
398 if (is_gimple_call (stmt)
399 && gimple_call_internal_p (stmt))
400 switch (gimple_call_internal_fn (stmt))
403 operand = gimple_call_arg (stmt, 3);
408 operand = gimple_call_arg (stmt, 2);
418 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
420 operand = gimple_assign_rhs1 (stmt);
421 if (TREE_CODE (operand) != SSA_NAME)
432 Function process_use.
435 - a USE in STMT in a loop represented by LOOP_VINFO
436 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
437 that defined USE. This is done by calling mark_relevant and passing it
438 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
439 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
443 Generally, LIVE_P and RELEVANT are used to define the liveness and
444 relevance info of the DEF_STMT of this USE:
445 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
446 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
448 - case 1: If USE is used only for address computations (e.g. array indexing),
449 which does not need to be directly vectorized, then the liveness/relevance
450 of the respective DEF_STMT is left unchanged.
451 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
452 skip DEF_STMT cause it had already been processed.
453 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
454 be modified accordingly.
456 Return true if everything is as expected. Return false otherwise. */
459 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
460 enum vect_relevant relevant, vec<gimple *> *worklist,
463 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
465 stmt_vec_info dstmt_vinfo;
466 basic_block bb, def_bb;
468 enum vect_def_type dt;
470 /* case 1: we are only interested in uses that need to be vectorized. Uses
471 that are used for address computation are not considered relevant. */
472 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
475 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
477 if (dump_enabled_p ())
478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
479 "not vectorized: unsupported use in stmt.\n");
483 if (!def_stmt || gimple_nop_p (def_stmt))
486 def_bb = gimple_bb (def_stmt);
487 if (!flow_bb_inside_loop_p (loop, def_bb))
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
494 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
495 DEF_STMT must have already been processed, because this should be the
496 only way that STMT, which is a reduction-phi, was put in the worklist,
497 as there should be no other uses for DEF_STMT in the loop. So we just
498 check that everything is as expected, and we are done. */
499 dstmt_vinfo = vinfo_for_stmt (def_stmt);
500 bb = gimple_bb (stmt);
501 if (gimple_code (stmt) == GIMPLE_PHI
502 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
503 && gimple_code (def_stmt) != GIMPLE_PHI
504 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
505 && bb->loop_father == def_bb->loop_father)
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE, vect_location,
509 "reduc-stmt defining reduc-phi in the same nest.\n");
510 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
511 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
512 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
513 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
514 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
518 /* case 3a: outer-loop stmt defining an inner-loop stmt:
519 outer-loop-header-bb:
525 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
527 if (dump_enabled_p ())
528 dump_printf_loc (MSG_NOTE, vect_location,
529 "outer-loop def-stmt defining inner-loop stmt.\n");
533 case vect_unused_in_scope:
534 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
535 vect_used_in_scope : vect_unused_in_scope;
538 case vect_used_in_outer_by_reduction:
539 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
540 relevant = vect_used_by_reduction;
543 case vect_used_in_outer:
544 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
545 relevant = vect_used_in_scope;
548 case vect_used_in_scope:
556 /* case 3b: inner-loop stmt defining an outer-loop stmt:
557 outer-loop-header-bb:
561 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
563 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE, vect_location,
567 "inner-loop def-stmt defining outer-loop stmt.\n");
571 case vect_unused_in_scope:
572 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
573 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
574 vect_used_in_outer_by_reduction : vect_unused_in_scope;
577 case vect_used_by_reduction:
578 case vect_used_only_live:
579 relevant = vect_used_in_outer_by_reduction;
582 case vect_used_in_scope:
583 relevant = vect_used_in_outer;
590 /* We are also not interested in uses on loop PHI backedges that are
591 inductions. Otherwise we'll needlessly vectorize the IV increment
592 and cause hybrid SLP for SLP inductions. Unless the PHI is live
594 else if (gimple_code (stmt) == GIMPLE_PHI
595 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
596 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
597 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
600 if (dump_enabled_p ())
601 dump_printf_loc (MSG_NOTE, vect_location,
602 "induction value on backedge.\n");
607 vect_mark_relevant (worklist, def_stmt, relevant, false);
612 /* Function vect_mark_stmts_to_be_vectorized.
614 Not all stmts in the loop need to be vectorized. For example:
623 Stmt 1 and 3 do not need to be vectorized, because loop control and
624 addressing of vectorized data-refs are handled differently.
626 This pass detects such stmts. */
629 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
631 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
632 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
633 unsigned int nbbs = loop->num_nodes;
634 gimple_stmt_iterator si;
637 stmt_vec_info stmt_vinfo;
641 enum vect_relevant relevant;
643 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE, vect_location,
645 "=== vect_mark_stmts_to_be_vectorized ===\n");
647 auto_vec<gimple *, 64> worklist;
649 /* 1. Init worklist. */
650 for (i = 0; i < nbbs; i++)
653 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
656 if (dump_enabled_p ())
658 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
662 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
663 vect_mark_relevant (&worklist, phi, relevant, live_p);
665 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
667 stmt = gsi_stmt (si);
668 if (dump_enabled_p ())
670 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
671 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
674 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
675 vect_mark_relevant (&worklist, stmt, relevant, live_p);
679 /* 2. Process_worklist */
680 while (worklist.length () > 0)
685 stmt = worklist.pop ();
686 if (dump_enabled_p ())
688 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
689 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
692 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
693 (DEF_STMT) as relevant/irrelevant according to the relevance property
695 stmt_vinfo = vinfo_for_stmt (stmt);
696 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
698 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
699 propagated as is to the DEF_STMTs of its USEs.
701 One exception is when STMT has been identified as defining a reduction
702 variable; in this case we set the relevance to vect_used_by_reduction.
703 This is because we distinguish between two kinds of relevant stmts -
704 those that are used by a reduction computation, and those that are
705 (also) used by a regular computation. This allows us later on to
706 identify stmts that are used solely by a reduction, and therefore the
707 order of the results that they produce does not have to be kept. */
709 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
711 case vect_reduction_def:
712 gcc_assert (relevant != vect_unused_in_scope);
713 if (relevant != vect_unused_in_scope
714 && relevant != vect_used_in_scope
715 && relevant != vect_used_by_reduction
716 && relevant != vect_used_only_live)
718 if (dump_enabled_p ())
719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
720 "unsupported use of reduction.\n");
725 case vect_nested_cycle:
726 if (relevant != vect_unused_in_scope
727 && relevant != vect_used_in_outer_by_reduction
728 && relevant != vect_used_in_outer)
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "unsupported use of nested cycle.\n");
738 case vect_double_reduction_def:
739 if (relevant != vect_unused_in_scope
740 && relevant != vect_used_by_reduction
741 && relevant != vect_used_only_live)
743 if (dump_enabled_p ())
744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
745 "unsupported use of double reduction.\n");
755 if (is_pattern_stmt_p (stmt_vinfo))
757 /* Pattern statements are not inserted into the code, so
758 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
759 have to scan the RHS or function arguments instead. */
760 if (is_gimple_assign (stmt))
762 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
763 tree op = gimple_assign_rhs1 (stmt);
766 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
768 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
769 relevant, &worklist, false)
770 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
771 relevant, &worklist, false))
775 for (; i < gimple_num_ops (stmt); i++)
777 op = gimple_op (stmt, i);
778 if (TREE_CODE (op) == SSA_NAME
779 && !process_use (stmt, op, loop_vinfo, relevant,
784 else if (is_gimple_call (stmt))
786 for (i = 0; i < gimple_call_num_args (stmt); i++)
788 tree arg = gimple_call_arg (stmt, i);
789 if (!process_use (stmt, arg, loop_vinfo, relevant,
796 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
798 tree op = USE_FROM_PTR (use_p);
799 if (!process_use (stmt, op, loop_vinfo, relevant,
804 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
806 gather_scatter_info gs_info;
807 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
809 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
813 } /* while worklist */
819 /* Function vect_model_simple_cost.
821 Models cost for simple operations, i.e. those that only emit ncopies of a
822 single op. Right now, this does not account for multiple insns that could
823 be generated for the single vector op. We will handle that shortly. */
826 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
827 enum vect_def_type *dt,
829 stmt_vector_for_cost *prologue_cost_vec,
830 stmt_vector_for_cost *body_cost_vec)
833 int inside_cost = 0, prologue_cost = 0;
835 /* The SLP costs were already calculated during SLP tree build. */
836 if (PURE_SLP_STMT (stmt_info))
839 /* Cost the "broadcast" of a scalar operand in to a vector operand.
840 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
842 for (i = 0; i < ndts; i++)
843 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
844 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
845 stmt_info, 0, vect_prologue);
847 /* Pass the inside-of-loop statements to the target-specific cost model. */
848 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
849 stmt_info, 0, vect_body);
851 if (dump_enabled_p ())
852 dump_printf_loc (MSG_NOTE, vect_location,
853 "vect_model_simple_cost: inside_cost = %d, "
854 "prologue_cost = %d .\n", inside_cost, prologue_cost);
858 /* Model cost for type demotion and promotion operations. PWR is normally
859 zero for single-step promotions and demotions. It will be one if
860 two-step promotion/demotion is required, and so on. Each additional
861 step doubles the number of instructions required. */
864 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
865 enum vect_def_type *dt, int pwr)
868 int inside_cost = 0, prologue_cost = 0;
869 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
870 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
871 void *target_cost_data;
873 /* The SLP costs were already calculated during SLP tree build. */
874 if (PURE_SLP_STMT (stmt_info))
878 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
880 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
882 for (i = 0; i < pwr + 1; i++)
884 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
886 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
887 vec_promote_demote, stmt_info, 0,
891 /* FORNOW: Assuming maximum 2 args per stmts. */
892 for (i = 0; i < 2; i++)
893 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
894 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
895 stmt_info, 0, vect_prologue);
897 if (dump_enabled_p ())
898 dump_printf_loc (MSG_NOTE, vect_location,
899 "vect_model_promotion_demotion_cost: inside_cost = %d, "
900 "prologue_cost = %d .\n", inside_cost, prologue_cost);
903 /* Function vect_model_store_cost
905 Models cost for stores. In the case of grouped accesses, one access
906 has the overhead of the grouped access attributed to it. */
909 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
910 vect_memory_access_type memory_access_type,
911 enum vect_def_type dt, slp_tree slp_node,
912 stmt_vector_for_cost *prologue_cost_vec,
913 stmt_vector_for_cost *body_cost_vec)
915 unsigned int inside_cost = 0, prologue_cost = 0;
916 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
917 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
918 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
920 if (dt == vect_constant_def || dt == vect_external_def)
921 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
922 stmt_info, 0, vect_prologue);
924 /* Grouped stores update all elements in the group at once,
925 so we want the DR for the first statement. */
926 if (!slp_node && grouped_access_p)
928 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
929 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
932 /* True if we should include any once-per-group costs as well as
933 the cost of the statement itself. For SLP we only get called
934 once per group anyhow. */
935 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
937 /* We assume that the cost of a single store-lanes instruction is
938 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
939 access is instead being provided by a permute-and-store operation,
940 include the cost of the permutes. */
942 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
944 /* Uses a high and low interleave or shuffle operations for each
946 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
947 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
948 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
949 stmt_info, 0, vect_body);
951 if (dump_enabled_p ())
952 dump_printf_loc (MSG_NOTE, vect_location,
953 "vect_model_store_cost: strided group_size = %d .\n",
957 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
958 /* Costs of the stores. */
959 if (memory_access_type == VMAT_ELEMENTWISE
960 || memory_access_type == VMAT_GATHER_SCATTER)
961 /* N scalar stores plus extracting the elements. */
962 inside_cost += record_stmt_cost (body_cost_vec,
963 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
964 scalar_store, stmt_info, 0, vect_body);
966 vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec);
968 if (memory_access_type == VMAT_ELEMENTWISE
969 || memory_access_type == VMAT_STRIDED_SLP)
970 inside_cost += record_stmt_cost (body_cost_vec,
971 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
972 vec_to_scalar, stmt_info, 0, vect_body);
974 if (dump_enabled_p ())
975 dump_printf_loc (MSG_NOTE, vect_location,
976 "vect_model_store_cost: inside_cost = %d, "
977 "prologue_cost = %d .\n", inside_cost, prologue_cost);
981 /* Calculate cost of DR's memory access. */
983 vect_get_store_cost (struct data_reference *dr, int ncopies,
984 unsigned int *inside_cost,
985 stmt_vector_for_cost *body_cost_vec)
987 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
988 gimple *stmt = DR_STMT (dr);
989 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
991 switch (alignment_support_scheme)
995 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
996 vector_store, stmt_info, 0,
999 if (dump_enabled_p ())
1000 dump_printf_loc (MSG_NOTE, vect_location,
1001 "vect_model_store_cost: aligned.\n");
1005 case dr_unaligned_supported:
1007 /* Here, we assign an additional cost for the unaligned store. */
1008 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1009 unaligned_store, stmt_info,
1010 DR_MISALIGNMENT (dr), vect_body);
1011 if (dump_enabled_p ())
1012 dump_printf_loc (MSG_NOTE, vect_location,
1013 "vect_model_store_cost: unaligned supported by "
1018 case dr_unaligned_unsupported:
1020 *inside_cost = VECT_MAX_COST;
1022 if (dump_enabled_p ())
1023 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1024 "vect_model_store_cost: unsupported access.\n");
1034 /* Function vect_model_load_cost
1036 Models cost for loads. In the case of grouped accesses, one access has
1037 the overhead of the grouped access attributed to it. Since unaligned
1038 accesses are supported for loads, we also account for the costs of the
1039 access scheme chosen. */
1042 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1043 vect_memory_access_type memory_access_type,
1045 stmt_vector_for_cost *prologue_cost_vec,
1046 stmt_vector_for_cost *body_cost_vec)
1048 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1049 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1050 unsigned int inside_cost = 0, prologue_cost = 0;
1051 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1053 /* Grouped loads read all elements in the group at once,
1054 so we want the DR for the first statement. */
1055 if (!slp_node && grouped_access_p)
1057 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1058 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1061 /* True if we should include any once-per-group costs as well as
1062 the cost of the statement itself. For SLP we only get called
1063 once per group anyhow. */
1064 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1066 /* We assume that the cost of a single load-lanes instruction is
1067 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1068 access is instead being provided by a load-and-permute operation,
1069 include the cost of the permutes. */
1071 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1073 /* Uses an even and odd extract operations or shuffle operations
1074 for each needed permute. */
1075 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1076 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1077 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1078 stmt_info, 0, vect_body);
1080 if (dump_enabled_p ())
1081 dump_printf_loc (MSG_NOTE, vect_location,
1082 "vect_model_load_cost: strided group_size = %d .\n",
1086 /* The loads themselves. */
1087 if (memory_access_type == VMAT_ELEMENTWISE
1088 || memory_access_type == VMAT_GATHER_SCATTER)
1090 /* N scalar loads plus gathering them into a vector. */
1091 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1092 inside_cost += record_stmt_cost (body_cost_vec,
1093 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1094 scalar_load, stmt_info, 0, vect_body);
1097 vect_get_load_cost (dr, ncopies, first_stmt_p,
1098 &inside_cost, &prologue_cost,
1099 prologue_cost_vec, body_cost_vec, true);
1100 if (memory_access_type == VMAT_ELEMENTWISE
1101 || memory_access_type == VMAT_STRIDED_SLP)
1102 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1103 stmt_info, 0, vect_body);
1105 if (dump_enabled_p ())
1106 dump_printf_loc (MSG_NOTE, vect_location,
1107 "vect_model_load_cost: inside_cost = %d, "
1108 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1112 /* Calculate cost of DR's memory access. */
1114 vect_get_load_cost (struct data_reference *dr, int ncopies,
1115 bool add_realign_cost, unsigned int *inside_cost,
1116 unsigned int *prologue_cost,
1117 stmt_vector_for_cost *prologue_cost_vec,
1118 stmt_vector_for_cost *body_cost_vec,
1119 bool record_prologue_costs)
1121 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1122 gimple *stmt = DR_STMT (dr);
1123 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1125 switch (alignment_support_scheme)
1129 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1130 stmt_info, 0, vect_body);
1132 if (dump_enabled_p ())
1133 dump_printf_loc (MSG_NOTE, vect_location,
1134 "vect_model_load_cost: aligned.\n");
1138 case dr_unaligned_supported:
1140 /* Here, we assign an additional cost for the unaligned load. */
1141 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1142 unaligned_load, stmt_info,
1143 DR_MISALIGNMENT (dr), vect_body);
1145 if (dump_enabled_p ())
1146 dump_printf_loc (MSG_NOTE, vect_location,
1147 "vect_model_load_cost: unaligned supported by "
1152 case dr_explicit_realign:
1154 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1155 vector_load, stmt_info, 0, vect_body);
1156 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1157 vec_perm, stmt_info, 0, vect_body);
1159 /* FIXME: If the misalignment remains fixed across the iterations of
1160 the containing loop, the following cost should be added to the
1162 if (targetm.vectorize.builtin_mask_for_load)
1163 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1164 stmt_info, 0, vect_body);
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE, vect_location,
1168 "vect_model_load_cost: explicit realign\n");
1172 case dr_explicit_realign_optimized:
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_NOTE, vect_location,
1176 "vect_model_load_cost: unaligned software "
1179 /* Unaligned software pipeline has a load of an address, an initial
1180 load, and possibly a mask operation to "prime" the loop. However,
1181 if this is an access in a group of loads, which provide grouped
1182 access, then the above cost should only be considered for one
1183 access in the group. Inside the loop, there is a load op
1184 and a realignment op. */
1186 if (add_realign_cost && record_prologue_costs)
1188 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1189 vector_stmt, stmt_info,
1191 if (targetm.vectorize.builtin_mask_for_load)
1192 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1193 vector_stmt, stmt_info,
1197 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1198 stmt_info, 0, vect_body);
1199 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1200 stmt_info, 0, vect_body);
1202 if (dump_enabled_p ())
1203 dump_printf_loc (MSG_NOTE, vect_location,
1204 "vect_model_load_cost: explicit realign optimized"
1210 case dr_unaligned_unsupported:
1212 *inside_cost = VECT_MAX_COST;
1214 if (dump_enabled_p ())
1215 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1216 "vect_model_load_cost: unsupported access.\n");
1225 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1226 the loop preheader for the vectorized stmt STMT. */
1229 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1232 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1235 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1236 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1240 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1244 if (nested_in_vect_loop_p (loop, stmt))
1247 pe = loop_preheader_edge (loop);
1248 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1249 gcc_assert (!new_bb);
1253 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1255 gimple_stmt_iterator gsi_bb_start;
1257 gcc_assert (bb_vinfo);
1258 bb = BB_VINFO_BB (bb_vinfo);
1259 gsi_bb_start = gsi_after_labels (bb);
1260 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1264 if (dump_enabled_p ())
1266 dump_printf_loc (MSG_NOTE, vect_location,
1267 "created new init_stmt: ");
1268 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1272 /* Function vect_init_vector.
1274 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1275 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1276 vector type a vector with all elements equal to VAL is created first.
1277 Place the initialization at BSI if it is not NULL. Otherwise, place the
1278 initialization at the loop preheader.
1279 Return the DEF of INIT_STMT.
1280 It will be used in the vectorization of STMT. */
1283 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1288 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1289 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1291 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1292 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1294 /* Scalar boolean value should be transformed into
1295 all zeros or all ones value before building a vector. */
1296 if (VECTOR_BOOLEAN_TYPE_P (type))
1298 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1299 tree false_val = build_zero_cst (TREE_TYPE (type));
1301 if (CONSTANT_CLASS_P (val))
1302 val = integer_zerop (val) ? false_val : true_val;
1305 new_temp = make_ssa_name (TREE_TYPE (type));
1306 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1307 val, true_val, false_val);
1308 vect_init_vector_1 (stmt, init_stmt, gsi);
1312 else if (CONSTANT_CLASS_P (val))
1313 val = fold_convert (TREE_TYPE (type), val);
1316 new_temp = make_ssa_name (TREE_TYPE (type));
1317 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1318 init_stmt = gimple_build_assign (new_temp,
1319 fold_build1 (VIEW_CONVERT_EXPR,
1323 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1324 vect_init_vector_1 (stmt, init_stmt, gsi);
1328 val = build_vector_from_val (type, val);
1331 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1332 init_stmt = gimple_build_assign (new_temp, val);
1333 vect_init_vector_1 (stmt, init_stmt, gsi);
1337 /* Function vect_get_vec_def_for_operand_1.
1339 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1340 DT that will be used in the vectorized stmt. */
1343 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1347 stmt_vec_info def_stmt_info = NULL;
1351 /* operand is a constant or a loop invariant. */
1352 case vect_constant_def:
1353 case vect_external_def:
1354 /* Code should use vect_get_vec_def_for_operand. */
1357 /* operand is defined inside the loop. */
1358 case vect_internal_def:
1360 /* Get the def from the vectorized stmt. */
1361 def_stmt_info = vinfo_for_stmt (def_stmt);
1363 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1364 /* Get vectorized pattern statement. */
1366 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1367 && !STMT_VINFO_RELEVANT (def_stmt_info))
1368 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1369 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1370 gcc_assert (vec_stmt);
1371 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1372 vec_oprnd = PHI_RESULT (vec_stmt);
1373 else if (is_gimple_call (vec_stmt))
1374 vec_oprnd = gimple_call_lhs (vec_stmt);
1376 vec_oprnd = gimple_assign_lhs (vec_stmt);
1380 /* operand is defined by a loop header phi. */
1381 case vect_reduction_def:
1382 case vect_double_reduction_def:
1383 case vect_nested_cycle:
1384 case vect_induction_def:
1386 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1388 /* Get the def from the vectorized stmt. */
1389 def_stmt_info = vinfo_for_stmt (def_stmt);
1390 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1391 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1392 vec_oprnd = PHI_RESULT (vec_stmt);
1394 vec_oprnd = gimple_get_lhs (vec_stmt);
1404 /* Function vect_get_vec_def_for_operand.
1406 OP is an operand in STMT. This function returns a (vector) def that will be
1407 used in the vectorized stmt for STMT.
1409 In the case that OP is an SSA_NAME which is defined in the loop, then
1410 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1412 In case OP is an invariant or constant, a new stmt that creates a vector def
1413 needs to be introduced. VECTYPE may be used to specify a required type for
1414 vector invariant. */
1417 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1420 enum vect_def_type dt;
1422 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1423 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1425 if (dump_enabled_p ())
1427 dump_printf_loc (MSG_NOTE, vect_location,
1428 "vect_get_vec_def_for_operand: ");
1429 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1430 dump_printf (MSG_NOTE, "\n");
1433 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1434 gcc_assert (is_simple_use);
1435 if (def_stmt && dump_enabled_p ())
1437 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1438 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1441 if (dt == vect_constant_def || dt == vect_external_def)
1443 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1447 vector_type = vectype;
1448 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1449 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1450 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1452 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1454 gcc_assert (vector_type);
1455 return vect_init_vector (stmt, op, vector_type, NULL);
1458 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1462 /* Function vect_get_vec_def_for_stmt_copy
1464 Return a vector-def for an operand. This function is used when the
1465 vectorized stmt to be created (by the caller to this function) is a "copy"
1466 created in case the vectorized result cannot fit in one vector, and several
1467 copies of the vector-stmt are required. In this case the vector-def is
1468 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1469 of the stmt that defines VEC_OPRND.
1470 DT is the type of the vector def VEC_OPRND.
1473 In case the vectorization factor (VF) is bigger than the number
1474 of elements that can fit in a vectype (nunits), we have to generate
1475 more than one vector stmt to vectorize the scalar stmt. This situation
1476 arises when there are multiple data-types operated upon in the loop; the
1477 smallest data-type determines the VF, and as a result, when vectorizing
1478 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1479 vector stmt (each computing a vector of 'nunits' results, and together
1480 computing 'VF' results in each iteration). This function is called when
1481 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1482 which VF=16 and nunits=4, so the number of copies required is 4):
1484 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1486 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1487 VS1.1: vx.1 = memref1 VS1.2
1488 VS1.2: vx.2 = memref2 VS1.3
1489 VS1.3: vx.3 = memref3
1491 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1492 VSnew.1: vz1 = vx.1 + ... VSnew.2
1493 VSnew.2: vz2 = vx.2 + ... VSnew.3
1494 VSnew.3: vz3 = vx.3 + ...
1496 The vectorization of S1 is explained in vectorizable_load.
1497 The vectorization of S2:
1498 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1499 the function 'vect_get_vec_def_for_operand' is called to
1500 get the relevant vector-def for each operand of S2. For operand x it
1501 returns the vector-def 'vx.0'.
1503 To create the remaining copies of the vector-stmt (VSnew.j), this
1504 function is called to get the relevant vector-def for each operand. It is
1505 obtained from the respective VS1.j stmt, which is recorded in the
1506 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1508 For example, to obtain the vector-def 'vx.1' in order to create the
1509 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1510 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1511 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1512 and return its def ('vx.1').
1513 Overall, to create the above sequence this function will be called 3 times:
1514 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1515 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1516 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1519 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1521 gimple *vec_stmt_for_operand;
1522 stmt_vec_info def_stmt_info;
1524 /* Do nothing; can reuse same def. */
1525 if (dt == vect_external_def || dt == vect_constant_def )
1528 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1529 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1530 gcc_assert (def_stmt_info);
1531 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1532 gcc_assert (vec_stmt_for_operand);
1533 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1534 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1536 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1541 /* Get vectorized definitions for the operands to create a copy of an original
1542 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1545 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1546 vec<tree> *vec_oprnds0,
1547 vec<tree> *vec_oprnds1)
1549 tree vec_oprnd = vec_oprnds0->pop ();
1551 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1552 vec_oprnds0->quick_push (vec_oprnd);
1554 if (vec_oprnds1 && vec_oprnds1->length ())
1556 vec_oprnd = vec_oprnds1->pop ();
1557 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1558 vec_oprnds1->quick_push (vec_oprnd);
1563 /* Get vectorized definitions for OP0 and OP1. */
1566 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1567 vec<tree> *vec_oprnds0,
1568 vec<tree> *vec_oprnds1,
1573 int nops = (op1 == NULL_TREE) ? 1 : 2;
1574 auto_vec<tree> ops (nops);
1575 auto_vec<vec<tree> > vec_defs (nops);
1577 ops.quick_push (op0);
1579 ops.quick_push (op1);
1581 vect_get_slp_defs (ops, slp_node, &vec_defs);
1583 *vec_oprnds0 = vec_defs[0];
1585 *vec_oprnds1 = vec_defs[1];
1591 vec_oprnds0->create (1);
1592 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1593 vec_oprnds0->quick_push (vec_oprnd);
1597 vec_oprnds1->create (1);
1598 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1599 vec_oprnds1->quick_push (vec_oprnd);
1605 /* Function vect_finish_stmt_generation.
1607 Insert a new stmt. */
1610 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1611 gimple_stmt_iterator *gsi)
1613 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1614 vec_info *vinfo = stmt_info->vinfo;
1616 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1618 if (!gsi_end_p (*gsi)
1619 && gimple_has_mem_ops (vec_stmt))
1621 gimple *at_stmt = gsi_stmt (*gsi);
1622 tree vuse = gimple_vuse (at_stmt);
1623 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1625 tree vdef = gimple_vdef (at_stmt);
1626 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1627 /* If we have an SSA vuse and insert a store, update virtual
1628 SSA form to avoid triggering the renamer. Do so only
1629 if we can easily see all uses - which is what almost always
1630 happens with the way vectorized stmts are inserted. */
1631 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1632 && ((is_gimple_assign (vec_stmt)
1633 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1634 || (is_gimple_call (vec_stmt)
1635 && !(gimple_call_flags (vec_stmt)
1636 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1638 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1639 gimple_set_vdef (vec_stmt, new_vdef);
1640 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1644 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1646 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1648 if (dump_enabled_p ())
1650 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1651 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1654 gimple_set_location (vec_stmt, gimple_location (stmt));
1656 /* While EH edges will generally prevent vectorization, stmt might
1657 e.g. be in a must-not-throw region. Ensure newly created stmts
1658 that could throw are part of the same region. */
1659 int lp_nr = lookup_stmt_eh_lp (stmt);
1660 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1661 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1664 /* We want to vectorize a call to combined function CFN with function
1665 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1666 as the types of all inputs. Check whether this is possible using
1667 an internal function, returning its code if so or IFN_LAST if not. */
1670 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1671 tree vectype_out, tree vectype_in)
1674 if (internal_fn_p (cfn))
1675 ifn = as_internal_fn (cfn);
1677 ifn = associated_internal_fn (fndecl);
1678 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1680 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1681 if (info.vectorizable)
1683 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1684 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1685 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1686 OPTIMIZE_FOR_SPEED))
1694 static tree permute_vec_elements (tree, tree, tree, gimple *,
1695 gimple_stmt_iterator *);
1697 /* STMT is a non-strided load or store, meaning that it accesses
1698 elements with a known constant step. Return -1 if that step
1699 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1702 compare_step_with_zero (gimple *stmt)
1704 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1705 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1706 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
1710 /* If the target supports a permute mask that reverses the elements in
1711 a vector of type VECTYPE, return that mask, otherwise return null. */
1714 perm_mask_for_reverse (tree vectype)
1718 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1720 vec_perm_builder sel (nunits, nunits, 1);
1721 for (i = 0; i < nunits; ++i)
1722 sel.quick_push (nunits - 1 - i);
1724 vec_perm_indices indices (sel, 1, nunits);
1725 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
1727 return vect_gen_perm_mask_checked (vectype, indices);
1730 /* A subroutine of get_load_store_type, with a subset of the same
1731 arguments. Handle the case where STMT is part of a grouped load
1734 For stores, the statements in the group are all consecutive
1735 and there is no gap at the end. For loads, the statements in the
1736 group might not be consecutive; there can be gaps between statements
1737 as well as at the end. */
1740 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
1741 vec_load_store_type vls_type,
1742 vect_memory_access_type *memory_access_type)
1744 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1745 vec_info *vinfo = stmt_info->vinfo;
1746 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1747 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
1748 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1749 data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1750 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1751 bool single_element_p = (stmt == first_stmt
1752 && !GROUP_NEXT_ELEMENT (stmt_info));
1753 unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
1754 unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype);
1756 /* True if the vectorized statements would access beyond the last
1757 statement in the group. */
1758 bool overrun_p = false;
1760 /* True if we can cope with such overrun by peeling for gaps, so that
1761 there is at least one final scalar iteration after the vector loop. */
1762 bool can_overrun_p = (vls_type == VLS_LOAD && loop_vinfo && !loop->inner);
1764 /* There can only be a gap at the end of the group if the stride is
1765 known at compile time. */
1766 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
1768 /* Stores can't yet have gaps. */
1769 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
1773 if (STMT_VINFO_STRIDED_P (stmt_info))
1775 /* Try to use consecutive accesses of GROUP_SIZE elements,
1776 separated by the stride, until we have a complete vector.
1777 Fall back to scalar accesses if that isn't possible. */
1778 if (nunits % group_size == 0)
1779 *memory_access_type = VMAT_STRIDED_SLP;
1781 *memory_access_type = VMAT_ELEMENTWISE;
1785 overrun_p = loop_vinfo && gap != 0;
1786 if (overrun_p && vls_type != VLS_LOAD)
1788 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1789 "Grouped store with gaps requires"
1790 " non-consecutive accesses\n");
1793 /* An overrun is fine if the trailing elements are smaller
1794 than the alignment boundary B. Every vector access will
1795 be a multiple of B and so we are guaranteed to access a
1796 non-gap element in the same B-sized block. */
1798 && gap < (vect_known_alignment_in_bytes (first_dr)
1799 / vect_get_scalar_dr_size (first_dr)))
1801 if (overrun_p && !can_overrun_p)
1803 if (dump_enabled_p ())
1804 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1805 "Peeling for outer loop is not supported\n");
1808 *memory_access_type = VMAT_CONTIGUOUS;
1813 /* We can always handle this case using elementwise accesses,
1814 but see if something more efficient is available. */
1815 *memory_access_type = VMAT_ELEMENTWISE;
1817 /* If there is a gap at the end of the group then these optimizations
1818 would access excess elements in the last iteration. */
1819 bool would_overrun_p = (gap != 0);
1820 /* An overrun is fine if the trailing elements are smaller than the
1821 alignment boundary B. Every vector access will be a multiple of B
1822 and so we are guaranteed to access a non-gap element in the
1823 same B-sized block. */
1825 && gap < (vect_known_alignment_in_bytes (first_dr)
1826 / vect_get_scalar_dr_size (first_dr)))
1827 would_overrun_p = false;
1829 if (!STMT_VINFO_STRIDED_P (stmt_info)
1830 && (can_overrun_p || !would_overrun_p)
1831 && compare_step_with_zero (stmt) > 0)
1833 /* First try using LOAD/STORE_LANES. */
1834 if (vls_type == VLS_LOAD
1835 ? vect_load_lanes_supported (vectype, group_size)
1836 : vect_store_lanes_supported (vectype, group_size))
1838 *memory_access_type = VMAT_LOAD_STORE_LANES;
1839 overrun_p = would_overrun_p;
1842 /* If that fails, try using permuting loads. */
1843 if (*memory_access_type == VMAT_ELEMENTWISE
1844 && (vls_type == VLS_LOAD
1845 ? vect_grouped_load_supported (vectype, single_element_p,
1847 : vect_grouped_store_supported (vectype, group_size)))
1849 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
1850 overrun_p = would_overrun_p;
1855 if (vls_type != VLS_LOAD && first_stmt == stmt)
1857 /* STMT is the leader of the group. Check the operands of all the
1858 stmts of the group. */
1859 gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
1862 gcc_assert (gimple_assign_single_p (next_stmt));
1863 tree op = gimple_assign_rhs1 (next_stmt);
1865 enum vect_def_type dt;
1866 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
1868 if (dump_enabled_p ())
1869 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1870 "use not simple.\n");
1873 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1879 gcc_assert (can_overrun_p);
1880 if (dump_enabled_p ())
1881 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1882 "Data access with gaps requires scalar "
1884 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
1890 /* A subroutine of get_load_store_type, with a subset of the same
1891 arguments. Handle the case where STMT is a load or store that
1892 accesses consecutive elements with a negative step. */
1894 static vect_memory_access_type
1895 get_negative_load_store_type (gimple *stmt, tree vectype,
1896 vec_load_store_type vls_type,
1897 unsigned int ncopies)
1899 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1900 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1901 dr_alignment_support alignment_support_scheme;
1905 if (dump_enabled_p ())
1906 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1907 "multiple types with negative step.\n");
1908 return VMAT_ELEMENTWISE;
1911 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1912 if (alignment_support_scheme != dr_aligned
1913 && alignment_support_scheme != dr_unaligned_supported)
1915 if (dump_enabled_p ())
1916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1917 "negative step but alignment required.\n");
1918 return VMAT_ELEMENTWISE;
1921 if (vls_type == VLS_STORE_INVARIANT)
1923 if (dump_enabled_p ())
1924 dump_printf_loc (MSG_NOTE, vect_location,
1925 "negative step with invariant source;"
1926 " no permute needed.\n");
1927 return VMAT_CONTIGUOUS_DOWN;
1930 if (!perm_mask_for_reverse (vectype))
1932 if (dump_enabled_p ())
1933 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1934 "negative step and reversing not supported.\n");
1935 return VMAT_ELEMENTWISE;
1938 return VMAT_CONTIGUOUS_REVERSE;
1941 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1942 if there is a memory access type that the vectorized form can use,
1943 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1944 or scatters, fill in GS_INFO accordingly.
1946 SLP says whether we're performing SLP rather than loop vectorization.
1947 VECTYPE is the vector type that the vectorized statements will use.
1948 NCOPIES is the number of vector statements that will be needed. */
1951 get_load_store_type (gimple *stmt, tree vectype, bool slp,
1952 vec_load_store_type vls_type, unsigned int ncopies,
1953 vect_memory_access_type *memory_access_type,
1954 gather_scatter_info *gs_info)
1956 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1957 vec_info *vinfo = stmt_info->vinfo;
1958 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1959 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1961 *memory_access_type = VMAT_GATHER_SCATTER;
1963 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
1965 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
1966 &gs_info->offset_dt,
1967 &gs_info->offset_vectype))
1969 if (dump_enabled_p ())
1970 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1971 "%s index use not simple.\n",
1972 vls_type == VLS_LOAD ? "gather" : "scatter");
1976 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1978 if (!get_group_load_store_type (stmt, vectype, slp, vls_type,
1979 memory_access_type))
1982 else if (STMT_VINFO_STRIDED_P (stmt_info))
1985 *memory_access_type = VMAT_ELEMENTWISE;
1989 int cmp = compare_step_with_zero (stmt);
1991 *memory_access_type = get_negative_load_store_type
1992 (stmt, vectype, vls_type, ncopies);
1995 gcc_assert (vls_type == VLS_LOAD);
1996 *memory_access_type = VMAT_INVARIANT;
1999 *memory_access_type = VMAT_CONTIGUOUS;
2002 /* FIXME: At the moment the cost model seems to underestimate the
2003 cost of using elementwise accesses. This check preserves the
2004 traditional behavior until that can be fixed. */
2005 if (*memory_access_type == VMAT_ELEMENTWISE
2006 && !STMT_VINFO_STRIDED_P (stmt_info))
2008 if (dump_enabled_p ())
2009 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2010 "not falling back to elementwise accesses\n");
2016 /* Function vectorizable_mask_load_store.
2018 Check if STMT performs a conditional load or store that can be vectorized.
2019 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2020 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2021 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2024 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
2025 gimple **vec_stmt, slp_tree slp_node)
2027 tree vec_dest = NULL;
2028 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2029 stmt_vec_info prev_stmt_info;
2030 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2031 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2032 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
2033 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2034 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2035 tree rhs_vectype = NULL_TREE;
2040 tree dataref_ptr = NULL_TREE;
2042 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2046 gather_scatter_info gs_info;
2047 vec_load_store_type vls_type;
2050 enum vect_def_type dt;
2052 if (slp_node != NULL)
2055 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2056 gcc_assert (ncopies >= 1);
2058 mask = gimple_call_arg (stmt, 2);
2060 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2063 /* FORNOW. This restriction should be relaxed. */
2064 if (nested_in_vect_loop && ncopies > 1)
2066 if (dump_enabled_p ())
2067 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2068 "multiple types in nested loop.");
2072 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2075 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2079 if (!STMT_VINFO_DATA_REF (stmt_info))
2082 elem_type = TREE_TYPE (vectype);
2084 if (TREE_CODE (mask) != SSA_NAME)
2087 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
2091 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2093 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
2094 || TYPE_VECTOR_SUBPARTS (mask_vectype) != TYPE_VECTOR_SUBPARTS (vectype))
2097 if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2099 tree rhs = gimple_call_arg (stmt, 3);
2100 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
2102 if (dt == vect_constant_def || dt == vect_external_def)
2103 vls_type = VLS_STORE_INVARIANT;
2105 vls_type = VLS_STORE;
2108 vls_type = VLS_LOAD;
2110 vect_memory_access_type memory_access_type;
2111 if (!get_load_store_type (stmt, vectype, false, vls_type, ncopies,
2112 &memory_access_type, &gs_info))
2115 if (memory_access_type == VMAT_GATHER_SCATTER)
2117 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2119 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
2120 if (TREE_CODE (masktype) == INTEGER_TYPE)
2122 if (dump_enabled_p ())
2123 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2124 "masked gather with integer mask not supported.");
2128 else if (memory_access_type != VMAT_CONTIGUOUS)
2130 if (dump_enabled_p ())
2131 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2132 "unsupported access type for masked %s.\n",
2133 vls_type == VLS_LOAD ? "load" : "store");
2136 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2137 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
2138 TYPE_MODE (mask_vectype),
2139 vls_type == VLS_LOAD)
2141 && !useless_type_conversion_p (vectype, rhs_vectype)))
2144 if (!vec_stmt) /* transformation not required. */
2146 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
2147 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2148 if (vls_type == VLS_LOAD)
2149 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
2152 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
2153 dt, NULL, NULL, NULL);
2156 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
2160 if (memory_access_type == VMAT_GATHER_SCATTER)
2162 tree vec_oprnd0 = NULL_TREE, op;
2163 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2164 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
2165 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
2166 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
2167 tree mask_perm_mask = NULL_TREE;
2168 edge pe = loop_preheader_edge (loop);
2171 enum { NARROW, NONE, WIDEN } modifier;
2172 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
2174 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
2175 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2176 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2177 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2178 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2179 scaletype = TREE_VALUE (arglist);
2180 gcc_checking_assert (types_compatible_p (srctype, rettype)
2181 && types_compatible_p (srctype, masktype));
2183 if (nunits == gather_off_nunits)
2185 else if (nunits == gather_off_nunits / 2)
2189 vec_perm_builder sel (gather_off_nunits, gather_off_nunits, 1);
2190 for (i = 0; i < gather_off_nunits; ++i)
2191 sel.quick_push (i | nunits);
2193 vec_perm_indices indices (sel, 1, gather_off_nunits);
2194 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
2197 else if (nunits == gather_off_nunits * 2)
2201 vec_perm_builder sel (nunits, nunits, 1);
2202 sel.quick_grow (nunits);
2203 for (i = 0; i < nunits; ++i)
2204 sel[i] = i < gather_off_nunits
2205 ? i : i + nunits - gather_off_nunits;
2206 vec_perm_indices indices (sel, 2, nunits);
2207 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2211 for (i = 0; i < nunits; ++i)
2212 sel[i] = i | gather_off_nunits;
2213 indices.new_vector (sel, 2, gather_off_nunits);
2214 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2219 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2221 ptr = fold_convert (ptrtype, gs_info.base);
2222 if (!is_gimple_min_invariant (ptr))
2224 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2225 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2226 gcc_assert (!new_bb);
2229 scale = build_int_cst (scaletype, gs_info.scale);
2231 prev_stmt_info = NULL;
2232 for (j = 0; j < ncopies; ++j)
2234 if (modifier == WIDEN && (j & 1))
2235 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2236 perm_mask, stmt, gsi);
2239 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
2242 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
2244 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2246 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
2247 == TYPE_VECTOR_SUBPARTS (idxtype));
2248 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2249 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2251 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2252 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2256 if (mask_perm_mask && (j & 1))
2257 mask_op = permute_vec_elements (mask_op, mask_op,
2258 mask_perm_mask, stmt, gsi);
2262 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2265 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2266 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2270 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2272 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
2273 == TYPE_VECTOR_SUBPARTS (masktype));
2274 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2275 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2277 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2278 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2284 = gimple_build_call (gs_info.decl, 5, mask_op, ptr, op, mask_op,
2287 if (!useless_type_conversion_p (vectype, rettype))
2289 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2290 == TYPE_VECTOR_SUBPARTS (rettype));
2291 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2292 gimple_call_set_lhs (new_stmt, op);
2293 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2294 var = make_ssa_name (vec_dest);
2295 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2296 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2300 var = make_ssa_name (vec_dest, new_stmt);
2301 gimple_call_set_lhs (new_stmt, var);
2304 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2306 if (modifier == NARROW)
2313 var = permute_vec_elements (prev_res, var,
2314 perm_mask, stmt, gsi);
2315 new_stmt = SSA_NAME_DEF_STMT (var);
2318 if (prev_stmt_info == NULL)
2319 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2321 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2322 prev_stmt_info = vinfo_for_stmt (new_stmt);
2325 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2327 if (STMT_VINFO_RELATED_STMT (stmt_info))
2329 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2330 stmt_info = vinfo_for_stmt (stmt);
2332 tree lhs = gimple_call_lhs (stmt);
2333 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2334 set_vinfo_for_stmt (new_stmt, stmt_info);
2335 set_vinfo_for_stmt (stmt, NULL);
2336 STMT_VINFO_STMT (stmt_info) = new_stmt;
2337 gsi_replace (gsi, new_stmt, true);
2340 else if (vls_type != VLS_LOAD)
2342 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2343 prev_stmt_info = NULL;
2344 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
2345 for (i = 0; i < ncopies; i++)
2347 unsigned align, misalign;
2351 tree rhs = gimple_call_arg (stmt, 3);
2352 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2353 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2355 /* We should have catched mismatched types earlier. */
2356 gcc_assert (useless_type_conversion_p (vectype,
2357 TREE_TYPE (vec_rhs)));
2358 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2359 NULL_TREE, &dummy, gsi,
2360 &ptr_incr, false, &inv_p);
2361 gcc_assert (!inv_p);
2365 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2366 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2367 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2368 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2369 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2370 TYPE_SIZE_UNIT (vectype));
2373 align = DR_TARGET_ALIGNMENT (dr);
2374 if (aligned_access_p (dr))
2376 else if (DR_MISALIGNMENT (dr) == -1)
2378 align = TYPE_ALIGN_UNIT (elem_type);
2382 misalign = DR_MISALIGNMENT (dr);
2383 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2385 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2386 misalign ? least_bit_hwi (misalign) : align);
2388 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2389 ptr, vec_mask, vec_rhs);
2390 gimple_call_set_nothrow (call, true);
2392 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2394 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2396 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2397 prev_stmt_info = vinfo_for_stmt (new_stmt);
2402 tree vec_mask = NULL_TREE;
2403 prev_stmt_info = NULL;
2404 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2405 for (i = 0; i < ncopies; i++)
2407 unsigned align, misalign;
2411 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2413 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2414 NULL_TREE, &dummy, gsi,
2415 &ptr_incr, false, &inv_p);
2416 gcc_assert (!inv_p);
2420 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2421 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2422 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2423 TYPE_SIZE_UNIT (vectype));
2426 align = DR_TARGET_ALIGNMENT (dr);
2427 if (aligned_access_p (dr))
2429 else if (DR_MISALIGNMENT (dr) == -1)
2431 align = TYPE_ALIGN_UNIT (elem_type);
2435 misalign = DR_MISALIGNMENT (dr);
2436 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2438 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2439 misalign ? least_bit_hwi (misalign) : align);
2441 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2443 gimple_call_set_lhs (call, make_ssa_name (vec_dest));
2444 gimple_call_set_nothrow (call, true);
2445 vect_finish_stmt_generation (stmt, call, gsi);
2447 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = call;
2449 STMT_VINFO_RELATED_STMT (prev_stmt_info) = call;
2450 prev_stmt_info = vinfo_for_stmt (call);
2454 if (vls_type == VLS_LOAD)
2456 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2458 if (STMT_VINFO_RELATED_STMT (stmt_info))
2460 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2461 stmt_info = vinfo_for_stmt (stmt);
2463 tree lhs = gimple_call_lhs (stmt);
2464 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2465 set_vinfo_for_stmt (new_stmt, stmt_info);
2466 set_vinfo_for_stmt (stmt, NULL);
2467 STMT_VINFO_STMT (stmt_info) = new_stmt;
2468 gsi_replace (gsi, new_stmt, true);
2474 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2477 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2478 gimple **vec_stmt, slp_tree slp_node,
2479 tree vectype_in, enum vect_def_type *dt)
2482 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2483 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2484 unsigned ncopies, nunits;
2486 op = gimple_call_arg (stmt, 0);
2487 vectype = STMT_VINFO_VECTYPE (stmt_info);
2488 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2490 /* Multiple types in SLP are handled by creating the appropriate number of
2491 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2496 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2498 gcc_assert (ncopies >= 1);
2500 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2504 unsigned int num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype);
2505 unsigned word_bytes = num_bytes / nunits;
2507 vec_perm_builder elts (num_bytes, num_bytes, 1);
2508 for (unsigned i = 0; i < nunits; ++i)
2509 for (unsigned j = 0; j < word_bytes; ++j)
2510 elts.quick_push ((i + 1) * word_bytes - j - 1);
2512 vec_perm_indices indices (elts, 1, num_bytes);
2513 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2518 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2519 if (dump_enabled_p ())
2520 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
2522 if (! PURE_SLP_STMT (stmt_info))
2524 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2525 1, vector_stmt, stmt_info, 0, vect_prologue);
2526 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2527 ncopies, vec_perm, stmt_info, 0, vect_body);
2532 tree_vector_builder telts (char_vectype, num_bytes, 1);
2533 for (unsigned i = 0; i < num_bytes; ++i)
2534 telts.quick_push (build_int_cst (char_type_node, elts[i]));
2535 tree bswap_vconst = telts.build ();
2538 vec<tree> vec_oprnds = vNULL;
2539 gimple *new_stmt = NULL;
2540 stmt_vec_info prev_stmt_info = NULL;
2541 for (unsigned j = 0; j < ncopies; j++)
2545 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2547 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2549 /* Arguments are ready. create the new vector stmt. */
2552 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
2554 tree tem = make_ssa_name (char_vectype);
2555 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2556 char_vectype, vop));
2557 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2558 tree tem2 = make_ssa_name (char_vectype);
2559 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
2560 tem, tem, bswap_vconst);
2561 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2562 tem = make_ssa_name (vectype);
2563 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2565 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2567 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2574 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2576 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2578 prev_stmt_info = vinfo_for_stmt (new_stmt);
2581 vec_oprnds.release ();
2585 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2586 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2587 in a single step. On success, store the binary pack code in
2591 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2592 tree_code *convert_code)
2594 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2595 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2599 int multi_step_cvt = 0;
2600 auto_vec <tree, 8> interm_types;
2601 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2602 &code, &multi_step_cvt,
2607 *convert_code = code;
2611 /* Function vectorizable_call.
2613 Check if GS performs a function call that can be vectorized.
2614 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2615 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2616 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2619 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2626 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2627 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2628 tree vectype_out, vectype_in;
2631 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2632 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2633 vec_info *vinfo = stmt_info->vinfo;
2634 tree fndecl, new_temp, rhs_type;
2636 enum vect_def_type dt[3]
2637 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2639 gimple *new_stmt = NULL;
2641 vec<tree> vargs = vNULL;
2642 enum { NARROW, NONE, WIDEN } modifier;
2646 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2649 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2653 /* Is GS a vectorizable call? */
2654 stmt = dyn_cast <gcall *> (gs);
2658 if (gimple_call_internal_p (stmt)
2659 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2660 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2661 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2664 if (gimple_call_lhs (stmt) == NULL_TREE
2665 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2668 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2670 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2672 /* Process function arguments. */
2673 rhs_type = NULL_TREE;
2674 vectype_in = NULL_TREE;
2675 nargs = gimple_call_num_args (stmt);
2677 /* Bail out if the function has more than three arguments, we do not have
2678 interesting builtin functions to vectorize with more than two arguments
2679 except for fma. No arguments is also not good. */
2680 if (nargs == 0 || nargs > 3)
2683 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2684 if (gimple_call_internal_p (stmt)
2685 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2688 rhs_type = unsigned_type_node;
2691 for (i = 0; i < nargs; i++)
2695 op = gimple_call_arg (stmt, i);
2697 /* We can only handle calls with arguments of the same type. */
2699 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2701 if (dump_enabled_p ())
2702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2703 "argument types differ.\n");
2707 rhs_type = TREE_TYPE (op);
2709 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2711 if (dump_enabled_p ())
2712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2713 "use not simple.\n");
2718 vectype_in = opvectype;
2720 && opvectype != vectype_in)
2722 if (dump_enabled_p ())
2723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2724 "argument vector types differ.\n");
2728 /* If all arguments are external or constant defs use a vector type with
2729 the same size as the output vector type. */
2731 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2733 gcc_assert (vectype_in);
2736 if (dump_enabled_p ())
2738 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2739 "no vectype for scalar type ");
2740 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2741 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2748 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2749 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2750 if (nunits_in == nunits_out / 2)
2752 else if (nunits_out == nunits_in)
2754 else if (nunits_out == nunits_in / 2)
2759 /* We only handle functions that do not read or clobber memory. */
2760 if (gimple_vuse (stmt))
2762 if (dump_enabled_p ())
2763 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2764 "function reads from or writes to memory.\n");
2768 /* For now, we only vectorize functions if a target specific builtin
2769 is available. TODO -- in some cases, it might be profitable to
2770 insert the calls for pieces of the vector, in order to be able
2771 to vectorize other operations in the loop. */
2773 internal_fn ifn = IFN_LAST;
2774 combined_fn cfn = gimple_call_combined_fn (stmt);
2775 tree callee = gimple_call_fndecl (stmt);
2777 /* First try using an internal function. */
2778 tree_code convert_code = ERROR_MARK;
2780 && (modifier == NONE
2781 || (modifier == NARROW
2782 && simple_integer_narrowing (vectype_out, vectype_in,
2784 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
2787 /* If that fails, try asking for a target-specific built-in function. */
2788 if (ifn == IFN_LAST)
2790 if (cfn != CFN_LAST)
2791 fndecl = targetm.vectorize.builtin_vectorized_function
2792 (cfn, vectype_out, vectype_in);
2794 fndecl = targetm.vectorize.builtin_md_vectorized_function
2795 (callee, vectype_out, vectype_in);
2798 if (ifn == IFN_LAST && !fndecl)
2800 if (cfn == CFN_GOMP_SIMD_LANE
2803 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2804 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2805 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2806 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2808 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2809 { 0, 1, 2, ... vf - 1 } vector. */
2810 gcc_assert (nargs == 0);
2812 else if (modifier == NONE
2813 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
2814 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
2815 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
2816 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
2820 if (dump_enabled_p ())
2821 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2822 "function is not vectorizable.\n");
2829 else if (modifier == NARROW && ifn == IFN_LAST)
2830 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
2832 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
2834 /* Sanity check: make sure that at least one copy of the vectorized stmt
2835 needs to be generated. */
2836 gcc_assert (ncopies >= 1);
2838 if (!vec_stmt) /* transformation not required. */
2840 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2841 if (dump_enabled_p ())
2842 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2844 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
2845 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
2846 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
2847 vec_promote_demote, stmt_info, 0, vect_body);
2854 if (dump_enabled_p ())
2855 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2858 scalar_dest = gimple_call_lhs (stmt);
2859 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2861 prev_stmt_info = NULL;
2862 if (modifier == NONE || ifn != IFN_LAST)
2864 tree prev_res = NULL_TREE;
2865 for (j = 0; j < ncopies; ++j)
2867 /* Build argument list for the vectorized call. */
2869 vargs.create (nargs);
2875 auto_vec<vec<tree> > vec_defs (nargs);
2876 vec<tree> vec_oprnds0;
2878 for (i = 0; i < nargs; i++)
2879 vargs.quick_push (gimple_call_arg (stmt, i));
2880 vect_get_slp_defs (vargs, slp_node, &vec_defs);
2881 vec_oprnds0 = vec_defs[0];
2883 /* Arguments are ready. Create the new vector stmt. */
2884 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2887 for (k = 0; k < nargs; k++)
2889 vec<tree> vec_oprndsk = vec_defs[k];
2890 vargs[k] = vec_oprndsk[i];
2892 if (modifier == NARROW)
2894 tree half_res = make_ssa_name (vectype_in);
2896 = gimple_build_call_internal_vec (ifn, vargs);
2897 gimple_call_set_lhs (call, half_res);
2898 gimple_call_set_nothrow (call, true);
2900 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2903 prev_res = half_res;
2906 new_temp = make_ssa_name (vec_dest);
2907 new_stmt = gimple_build_assign (new_temp, convert_code,
2908 prev_res, half_res);
2913 if (ifn != IFN_LAST)
2914 call = gimple_build_call_internal_vec (ifn, vargs);
2916 call = gimple_build_call_vec (fndecl, vargs);
2917 new_temp = make_ssa_name (vec_dest, call);
2918 gimple_call_set_lhs (call, new_temp);
2919 gimple_call_set_nothrow (call, true);
2922 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2923 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2926 for (i = 0; i < nargs; i++)
2928 vec<tree> vec_oprndsi = vec_defs[i];
2929 vec_oprndsi.release ();
2934 for (i = 0; i < nargs; i++)
2936 op = gimple_call_arg (stmt, i);
2939 = vect_get_vec_def_for_operand (op, stmt);
2942 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2944 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2947 vargs.quick_push (vec_oprnd0);
2950 if (gimple_call_internal_p (stmt)
2951 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2953 tree_vector_builder v (vectype_out, 1, 3);
2954 for (int k = 0; k < 3; ++k)
2955 v.quick_push (build_int_cst (unsigned_type_node,
2956 j * nunits_out + k));
2957 tree cst = v.build ();
2959 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2960 gimple *init_stmt = gimple_build_assign (new_var, cst);
2961 vect_init_vector_1 (stmt, init_stmt, NULL);
2962 new_temp = make_ssa_name (vec_dest);
2963 new_stmt = gimple_build_assign (new_temp, new_var);
2965 else if (modifier == NARROW)
2967 tree half_res = make_ssa_name (vectype_in);
2968 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
2969 gimple_call_set_lhs (call, half_res);
2970 gimple_call_set_nothrow (call, true);
2972 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2975 prev_res = half_res;
2978 new_temp = make_ssa_name (vec_dest);
2979 new_stmt = gimple_build_assign (new_temp, convert_code,
2980 prev_res, half_res);
2985 if (ifn != IFN_LAST)
2986 call = gimple_build_call_internal_vec (ifn, vargs);
2988 call = gimple_build_call_vec (fndecl, vargs);
2989 new_temp = make_ssa_name (vec_dest, new_stmt);
2990 gimple_call_set_lhs (call, new_temp);
2991 gimple_call_set_nothrow (call, true);
2994 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2996 if (j == (modifier == NARROW ? 1 : 0))
2997 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2999 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3001 prev_stmt_info = vinfo_for_stmt (new_stmt);
3004 else if (modifier == NARROW)
3006 for (j = 0; j < ncopies; ++j)
3008 /* Build argument list for the vectorized call. */
3010 vargs.create (nargs * 2);
3016 auto_vec<vec<tree> > vec_defs (nargs);
3017 vec<tree> vec_oprnds0;
3019 for (i = 0; i < nargs; i++)
3020 vargs.quick_push (gimple_call_arg (stmt, i));
3021 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3022 vec_oprnds0 = vec_defs[0];
3024 /* Arguments are ready. Create the new vector stmt. */
3025 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3029 for (k = 0; k < nargs; k++)
3031 vec<tree> vec_oprndsk = vec_defs[k];
3032 vargs.quick_push (vec_oprndsk[i]);
3033 vargs.quick_push (vec_oprndsk[i + 1]);
3036 if (ifn != IFN_LAST)
3037 call = gimple_build_call_internal_vec (ifn, vargs);
3039 call = gimple_build_call_vec (fndecl, vargs);
3040 new_temp = make_ssa_name (vec_dest, call);
3041 gimple_call_set_lhs (call, new_temp);
3042 gimple_call_set_nothrow (call, true);
3044 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3045 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3048 for (i = 0; i < nargs; i++)
3050 vec<tree> vec_oprndsi = vec_defs[i];
3051 vec_oprndsi.release ();
3056 for (i = 0; i < nargs; i++)
3058 op = gimple_call_arg (stmt, i);
3062 = vect_get_vec_def_for_operand (op, stmt);
3064 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3068 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3070 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3072 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3075 vargs.quick_push (vec_oprnd0);
3076 vargs.quick_push (vec_oprnd1);
3079 new_stmt = gimple_build_call_vec (fndecl, vargs);
3080 new_temp = make_ssa_name (vec_dest, new_stmt);
3081 gimple_call_set_lhs (new_stmt, new_temp);
3082 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3085 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3087 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3089 prev_stmt_info = vinfo_for_stmt (new_stmt);
3092 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3095 /* No current target implements this case. */
3100 /* The call in STMT might prevent it from being removed in dce.
3101 We however cannot remove it here, due to the way the ssa name
3102 it defines is mapped to the new definition. So just replace
3103 rhs of the statement with something harmless. */
3108 type = TREE_TYPE (scalar_dest);
3109 if (is_pattern_stmt_p (stmt_info))
3110 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3112 lhs = gimple_call_lhs (stmt);
3114 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3115 set_vinfo_for_stmt (new_stmt, stmt_info);
3116 set_vinfo_for_stmt (stmt, NULL);
3117 STMT_VINFO_STMT (stmt_info) = new_stmt;
3118 gsi_replace (gsi, new_stmt, false);
3124 struct simd_call_arg_info
3128 HOST_WIDE_INT linear_step;
3129 enum vect_def_type dt;
3131 bool simd_lane_linear;
3134 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3135 is linear within simd lane (but not within whole loop), note it in
3139 vect_simd_lane_linear (tree op, struct loop *loop,
3140 struct simd_call_arg_info *arginfo)
3142 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3144 if (!is_gimple_assign (def_stmt)
3145 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3146 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3149 tree base = gimple_assign_rhs1 (def_stmt);
3150 HOST_WIDE_INT linear_step = 0;
3151 tree v = gimple_assign_rhs2 (def_stmt);
3152 while (TREE_CODE (v) == SSA_NAME)
3155 def_stmt = SSA_NAME_DEF_STMT (v);
3156 if (is_gimple_assign (def_stmt))
3157 switch (gimple_assign_rhs_code (def_stmt))
3160 t = gimple_assign_rhs2 (def_stmt);
3161 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3163 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3164 v = gimple_assign_rhs1 (def_stmt);
3167 t = gimple_assign_rhs2 (def_stmt);
3168 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3170 linear_step = tree_to_shwi (t);
3171 v = gimple_assign_rhs1 (def_stmt);
3174 t = gimple_assign_rhs1 (def_stmt);
3175 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3176 || (TYPE_PRECISION (TREE_TYPE (v))
3177 < TYPE_PRECISION (TREE_TYPE (t))))
3186 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3188 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3189 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3194 arginfo->linear_step = linear_step;
3196 arginfo->simd_lane_linear = true;
3202 /* Function vectorizable_simd_clone_call.
3204 Check if STMT performs a function call that can be vectorized
3205 by calling a simd clone of the function.
3206 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3207 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3208 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3211 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3212 gimple **vec_stmt, slp_tree slp_node)
3217 tree vec_oprnd0 = NULL_TREE;
3218 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3220 unsigned int nunits;
3221 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3222 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3223 vec_info *vinfo = stmt_info->vinfo;
3224 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3225 tree fndecl, new_temp;
3227 gimple *new_stmt = NULL;
3229 auto_vec<simd_call_arg_info> arginfo;
3230 vec<tree> vargs = vNULL;
3232 tree lhs, rtype, ratype;
3233 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3235 /* Is STMT a vectorizable call? */
3236 if (!is_gimple_call (stmt))
3239 fndecl = gimple_call_fndecl (stmt);
3240 if (fndecl == NULL_TREE)
3243 struct cgraph_node *node = cgraph_node::get (fndecl);
3244 if (node == NULL || node->simd_clones == NULL)
3247 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3250 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3254 if (gimple_call_lhs (stmt)
3255 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3258 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3260 vectype = STMT_VINFO_VECTYPE (stmt_info);
3262 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3269 /* Process function arguments. */
3270 nargs = gimple_call_num_args (stmt);
3272 /* Bail out if the function has zero arguments. */
3276 arginfo.reserve (nargs, true);
3278 for (i = 0; i < nargs; i++)
3280 simd_call_arg_info thisarginfo;
3283 thisarginfo.linear_step = 0;
3284 thisarginfo.align = 0;
3285 thisarginfo.op = NULL_TREE;
3286 thisarginfo.simd_lane_linear = false;
3288 op = gimple_call_arg (stmt, i);
3289 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3290 &thisarginfo.vectype)
3291 || thisarginfo.dt == vect_uninitialized_def)
3293 if (dump_enabled_p ())
3294 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3295 "use not simple.\n");
3299 if (thisarginfo.dt == vect_constant_def
3300 || thisarginfo.dt == vect_external_def)
3301 gcc_assert (thisarginfo.vectype == NULL_TREE);
3303 gcc_assert (thisarginfo.vectype != NULL_TREE);
3305 /* For linear arguments, the analyze phase should have saved
3306 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3307 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3308 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3310 gcc_assert (vec_stmt);
3311 thisarginfo.linear_step
3312 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3314 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3315 thisarginfo.simd_lane_linear
3316 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3317 == boolean_true_node);
3318 /* If loop has been peeled for alignment, we need to adjust it. */
3319 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3320 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3321 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3323 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3324 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3325 tree opt = TREE_TYPE (thisarginfo.op);
3326 bias = fold_convert (TREE_TYPE (step), bias);
3327 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3329 = fold_build2 (POINTER_TYPE_P (opt)
3330 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3331 thisarginfo.op, bias);
3335 && thisarginfo.dt != vect_constant_def
3336 && thisarginfo.dt != vect_external_def
3338 && TREE_CODE (op) == SSA_NAME
3339 && simple_iv (loop, loop_containing_stmt (stmt), op,
3341 && tree_fits_shwi_p (iv.step))
3343 thisarginfo.linear_step = tree_to_shwi (iv.step);
3344 thisarginfo.op = iv.base;
3346 else if ((thisarginfo.dt == vect_constant_def
3347 || thisarginfo.dt == vect_external_def)
3348 && POINTER_TYPE_P (TREE_TYPE (op)))
3349 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3350 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3352 if (POINTER_TYPE_P (TREE_TYPE (op))
3353 && !thisarginfo.linear_step
3355 && thisarginfo.dt != vect_constant_def
3356 && thisarginfo.dt != vect_external_def
3359 && TREE_CODE (op) == SSA_NAME)
3360 vect_simd_lane_linear (op, loop, &thisarginfo);
3362 arginfo.quick_push (thisarginfo);
3365 unsigned int badness = 0;
3366 struct cgraph_node *bestn = NULL;
3367 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3368 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3370 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3371 n = n->simdclone->next_clone)
3373 unsigned int this_badness = 0;
3374 if (n->simdclone->simdlen
3375 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3376 || n->simdclone->nargs != nargs)
3378 if (n->simdclone->simdlen
3379 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
3380 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
3381 - exact_log2 (n->simdclone->simdlen)) * 1024;
3382 if (n->simdclone->inbranch)
3383 this_badness += 2048;
3384 int target_badness = targetm.simd_clone.usable (n);
3385 if (target_badness < 0)
3387 this_badness += target_badness * 512;
3388 /* FORNOW: Have to add code to add the mask argument. */
3389 if (n->simdclone->inbranch)
3391 for (i = 0; i < nargs; i++)
3393 switch (n->simdclone->args[i].arg_type)
3395 case SIMD_CLONE_ARG_TYPE_VECTOR:
3396 if (!useless_type_conversion_p
3397 (n->simdclone->args[i].orig_type,
3398 TREE_TYPE (gimple_call_arg (stmt, i))))
3400 else if (arginfo[i].dt == vect_constant_def
3401 || arginfo[i].dt == vect_external_def
3402 || arginfo[i].linear_step)
3405 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3406 if (arginfo[i].dt != vect_constant_def
3407 && arginfo[i].dt != vect_external_def)
3410 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3411 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3412 if (arginfo[i].dt == vect_constant_def
3413 || arginfo[i].dt == vect_external_def
3414 || (arginfo[i].linear_step
3415 != n->simdclone->args[i].linear_step))
3418 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3419 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3420 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3421 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3422 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3423 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3427 case SIMD_CLONE_ARG_TYPE_MASK:
3430 if (i == (size_t) -1)
3432 if (n->simdclone->args[i].alignment > arginfo[i].align)
3437 if (arginfo[i].align)
3438 this_badness += (exact_log2 (arginfo[i].align)
3439 - exact_log2 (n->simdclone->args[i].alignment));
3441 if (i == (size_t) -1)
3443 if (bestn == NULL || this_badness < badness)
3446 badness = this_badness;
3453 for (i = 0; i < nargs; i++)
3454 if ((arginfo[i].dt == vect_constant_def
3455 || arginfo[i].dt == vect_external_def)
3456 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3459 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3461 if (arginfo[i].vectype == NULL
3462 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3463 > bestn->simdclone->simdlen))
3467 fndecl = bestn->decl;
3468 nunits = bestn->simdclone->simdlen;
3469 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3471 /* If the function isn't const, only allow it in simd loops where user
3472 has asserted that at least nunits consecutive iterations can be
3473 performed using SIMD instructions. */
3474 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3475 && gimple_vuse (stmt))
3478 /* Sanity check: make sure that at least one copy of the vectorized stmt
3479 needs to be generated. */
3480 gcc_assert (ncopies >= 1);
3482 if (!vec_stmt) /* transformation not required. */
3484 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3485 for (i = 0; i < nargs; i++)
3486 if ((bestn->simdclone->args[i].arg_type
3487 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3488 || (bestn->simdclone->args[i].arg_type
3489 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
3491 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3493 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3494 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3495 ? size_type_node : TREE_TYPE (arginfo[i].op);
3496 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3497 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3498 tree sll = arginfo[i].simd_lane_linear
3499 ? boolean_true_node : boolean_false_node;
3500 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3502 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3503 if (dump_enabled_p ())
3504 dump_printf_loc (MSG_NOTE, vect_location,
3505 "=== vectorizable_simd_clone_call ===\n");
3506 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3512 if (dump_enabled_p ())
3513 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3516 scalar_dest = gimple_call_lhs (stmt);
3517 vec_dest = NULL_TREE;
3522 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3523 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3524 if (TREE_CODE (rtype) == ARRAY_TYPE)
3527 rtype = TREE_TYPE (ratype);
3531 prev_stmt_info = NULL;
3532 for (j = 0; j < ncopies; ++j)
3534 /* Build argument list for the vectorized call. */
3536 vargs.create (nargs);
3540 for (i = 0; i < nargs; i++)
3542 unsigned int k, l, m, o;
3544 op = gimple_call_arg (stmt, i);
3545 switch (bestn->simdclone->args[i].arg_type)
3547 case SIMD_CLONE_ARG_TYPE_VECTOR:
3548 atype = bestn->simdclone->args[i].vector_type;
3549 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
3550 for (m = j * o; m < (j + 1) * o; m++)
3552 if (TYPE_VECTOR_SUBPARTS (atype)
3553 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
3555 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3556 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3557 / TYPE_VECTOR_SUBPARTS (atype));
3558 gcc_assert ((k & (k - 1)) == 0);
3561 = vect_get_vec_def_for_operand (op, stmt);
3564 vec_oprnd0 = arginfo[i].op;
3565 if ((m & (k - 1)) == 0)
3567 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3570 arginfo[i].op = vec_oprnd0;
3572 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3574 bitsize_int ((m & (k - 1)) * prec));
3576 = gimple_build_assign (make_ssa_name (atype),
3578 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3579 vargs.safe_push (gimple_assign_lhs (new_stmt));
3583 k = (TYPE_VECTOR_SUBPARTS (atype)
3584 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3585 gcc_assert ((k & (k - 1)) == 0);
3586 vec<constructor_elt, va_gc> *ctor_elts;
3588 vec_alloc (ctor_elts, k);
3591 for (l = 0; l < k; l++)
3593 if (m == 0 && l == 0)
3595 = vect_get_vec_def_for_operand (op, stmt);
3598 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3600 arginfo[i].op = vec_oprnd0;
3603 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3607 vargs.safe_push (vec_oprnd0);
3610 vec_oprnd0 = build_constructor (atype, ctor_elts);
3612 = gimple_build_assign (make_ssa_name (atype),
3614 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3615 vargs.safe_push (gimple_assign_lhs (new_stmt));
3620 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3621 vargs.safe_push (op);
3623 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3624 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3629 = force_gimple_operand (arginfo[i].op, &stmts, true,
3634 edge pe = loop_preheader_edge (loop);
3635 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3636 gcc_assert (!new_bb);
3638 if (arginfo[i].simd_lane_linear)
3640 vargs.safe_push (arginfo[i].op);
3643 tree phi_res = copy_ssa_name (op);
3644 gphi *new_phi = create_phi_node (phi_res, loop->header);
3645 set_vinfo_for_stmt (new_phi,
3646 new_stmt_vec_info (new_phi, loop_vinfo));
3647 add_phi_arg (new_phi, arginfo[i].op,
3648 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3650 = POINTER_TYPE_P (TREE_TYPE (op))
3651 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3652 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3653 ? sizetype : TREE_TYPE (op);
3655 = wi::mul (bestn->simdclone->args[i].linear_step,
3657 tree tcst = wide_int_to_tree (type, cst);
3658 tree phi_arg = copy_ssa_name (op);
3660 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3661 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3662 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3663 set_vinfo_for_stmt (new_stmt,
3664 new_stmt_vec_info (new_stmt, loop_vinfo));
3665 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3667 arginfo[i].op = phi_res;
3668 vargs.safe_push (phi_res);
3673 = POINTER_TYPE_P (TREE_TYPE (op))
3674 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3675 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3676 ? sizetype : TREE_TYPE (op);
3678 = wi::mul (bestn->simdclone->args[i].linear_step,
3680 tree tcst = wide_int_to_tree (type, cst);
3681 new_temp = make_ssa_name (TREE_TYPE (op));
3682 new_stmt = gimple_build_assign (new_temp, code,
3683 arginfo[i].op, tcst);
3684 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3685 vargs.safe_push (new_temp);
3688 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3689 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3690 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3691 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3692 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3693 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3699 new_stmt = gimple_build_call_vec (fndecl, vargs);
3702 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3704 new_temp = create_tmp_var (ratype);
3705 else if (TYPE_VECTOR_SUBPARTS (vectype)
3706 == TYPE_VECTOR_SUBPARTS (rtype))
3707 new_temp = make_ssa_name (vec_dest, new_stmt);
3709 new_temp = make_ssa_name (rtype, new_stmt);
3710 gimple_call_set_lhs (new_stmt, new_temp);
3712 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3716 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3719 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3720 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3721 gcc_assert ((k & (k - 1)) == 0);
3722 for (l = 0; l < k; l++)
3727 t = build_fold_addr_expr (new_temp);
3728 t = build2 (MEM_REF, vectype, t,
3729 build_int_cst (TREE_TYPE (t),
3730 l * prec / BITS_PER_UNIT));
3733 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3734 bitsize_int (prec), bitsize_int (l * prec));
3736 = gimple_build_assign (make_ssa_name (vectype), t);
3737 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3738 if (j == 0 && l == 0)
3739 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3741 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3743 prev_stmt_info = vinfo_for_stmt (new_stmt);
3748 tree clobber = build_constructor (ratype, NULL);
3749 TREE_THIS_VOLATILE (clobber) = 1;
3750 new_stmt = gimple_build_assign (new_temp, clobber);
3751 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3755 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3757 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3758 / TYPE_VECTOR_SUBPARTS (rtype));
3759 gcc_assert ((k & (k - 1)) == 0);
3760 if ((j & (k - 1)) == 0)
3761 vec_alloc (ret_ctor_elts, k);
3764 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3765 for (m = 0; m < o; m++)
3767 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3768 size_int (m), NULL_TREE, NULL_TREE);
3770 = gimple_build_assign (make_ssa_name (rtype), tem);
3771 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3772 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3773 gimple_assign_lhs (new_stmt));
3775 tree clobber = build_constructor (ratype, NULL);
3776 TREE_THIS_VOLATILE (clobber) = 1;
3777 new_stmt = gimple_build_assign (new_temp, clobber);
3778 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3781 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3782 if ((j & (k - 1)) != k - 1)
3784 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3786 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3787 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3789 if ((unsigned) j == k - 1)
3790 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3792 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3794 prev_stmt_info = vinfo_for_stmt (new_stmt);
3799 tree t = build_fold_addr_expr (new_temp);
3800 t = build2 (MEM_REF, vectype, t,
3801 build_int_cst (TREE_TYPE (t), 0));
3803 = gimple_build_assign (make_ssa_name (vec_dest), t);
3804 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3805 tree clobber = build_constructor (ratype, NULL);
3806 TREE_THIS_VOLATILE (clobber) = 1;
3807 vect_finish_stmt_generation (stmt,
3808 gimple_build_assign (new_temp,
3814 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3816 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3818 prev_stmt_info = vinfo_for_stmt (new_stmt);
3823 /* The call in STMT might prevent it from being removed in dce.
3824 We however cannot remove it here, due to the way the ssa name
3825 it defines is mapped to the new definition. So just replace
3826 rhs of the statement with something harmless. */
3833 type = TREE_TYPE (scalar_dest);
3834 if (is_pattern_stmt_p (stmt_info))
3835 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3837 lhs = gimple_call_lhs (stmt);
3838 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3841 new_stmt = gimple_build_nop ();
3842 set_vinfo_for_stmt (new_stmt, stmt_info);
3843 set_vinfo_for_stmt (stmt, NULL);
3844 STMT_VINFO_STMT (stmt_info) = new_stmt;
3845 gsi_replace (gsi, new_stmt, true);
3846 unlink_stmt_vdef (stmt);
3852 /* Function vect_gen_widened_results_half
3854 Create a vector stmt whose code, type, number of arguments, and result
3855 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3856 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3857 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3858 needs to be created (DECL is a function-decl of a target-builtin).
3859 STMT is the original scalar stmt that we are vectorizing. */
3862 vect_gen_widened_results_half (enum tree_code code,
3864 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3865 tree vec_dest, gimple_stmt_iterator *gsi,
3871 /* Generate half of the widened result: */
3872 if (code == CALL_EXPR)
3874 /* Target specific support */
3875 if (op_type == binary_op)
3876 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3878 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3879 new_temp = make_ssa_name (vec_dest, new_stmt);
3880 gimple_call_set_lhs (new_stmt, new_temp);
3884 /* Generic support */
3885 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3886 if (op_type != binary_op)
3888 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3889 new_temp = make_ssa_name (vec_dest, new_stmt);
3890 gimple_assign_set_lhs (new_stmt, new_temp);
3892 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3898 /* Get vectorized definitions for loop-based vectorization. For the first
3899 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3900 scalar operand), and for the rest we get a copy with
3901 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3902 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3903 The vectors are collected into VEC_OPRNDS. */
3906 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3907 vec<tree> *vec_oprnds, int multi_step_cvt)
3911 /* Get first vector operand. */
3912 /* All the vector operands except the very first one (that is scalar oprnd)
3914 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3915 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3917 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3919 vec_oprnds->quick_push (vec_oprnd);
3921 /* Get second vector operand. */
3922 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3923 vec_oprnds->quick_push (vec_oprnd);
3927 /* For conversion in multiple steps, continue to get operands
3930 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3934 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3935 For multi-step conversions store the resulting vectors and call the function
3939 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3940 int multi_step_cvt, gimple *stmt,
3942 gimple_stmt_iterator *gsi,
3943 slp_tree slp_node, enum tree_code code,
3944 stmt_vec_info *prev_stmt_info)
3947 tree vop0, vop1, new_tmp, vec_dest;
3949 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3951 vec_dest = vec_dsts.pop ();
3953 for (i = 0; i < vec_oprnds->length (); i += 2)
3955 /* Create demotion operation. */
3956 vop0 = (*vec_oprnds)[i];
3957 vop1 = (*vec_oprnds)[i + 1];
3958 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3959 new_tmp = make_ssa_name (vec_dest, new_stmt);
3960 gimple_assign_set_lhs (new_stmt, new_tmp);
3961 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3964 /* Store the resulting vector for next recursive call. */
3965 (*vec_oprnds)[i/2] = new_tmp;
3968 /* This is the last step of the conversion sequence. Store the
3969 vectors in SLP_NODE or in vector info of the scalar statement
3970 (or in STMT_VINFO_RELATED_STMT chain). */
3972 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3975 if (!*prev_stmt_info)
3976 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3978 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3980 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3985 /* For multi-step demotion operations we first generate demotion operations
3986 from the source type to the intermediate types, and then combine the
3987 results (stored in VEC_OPRNDS) in demotion operation to the destination
3991 /* At each level of recursion we have half of the operands we had at the
3993 vec_oprnds->truncate ((i+1)/2);
3994 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3995 stmt, vec_dsts, gsi, slp_node,
3996 VEC_PACK_TRUNC_EXPR,
4000 vec_dsts.quick_push (vec_dest);
4004 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4005 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4006 the resulting vectors and call the function recursively. */
4009 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4010 vec<tree> *vec_oprnds1,
4011 gimple *stmt, tree vec_dest,
4012 gimple_stmt_iterator *gsi,
4013 enum tree_code code1,
4014 enum tree_code code2, tree decl1,
4015 tree decl2, int op_type)
4018 tree vop0, vop1, new_tmp1, new_tmp2;
4019 gimple *new_stmt1, *new_stmt2;
4020 vec<tree> vec_tmp = vNULL;
4022 vec_tmp.create (vec_oprnds0->length () * 2);
4023 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4025 if (op_type == binary_op)
4026 vop1 = (*vec_oprnds1)[i];
4030 /* Generate the two halves of promotion operation. */
4031 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4032 op_type, vec_dest, gsi, stmt);
4033 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4034 op_type, vec_dest, gsi, stmt);
4035 if (is_gimple_call (new_stmt1))
4037 new_tmp1 = gimple_call_lhs (new_stmt1);
4038 new_tmp2 = gimple_call_lhs (new_stmt2);
4042 new_tmp1 = gimple_assign_lhs (new_stmt1);
4043 new_tmp2 = gimple_assign_lhs (new_stmt2);
4046 /* Store the results for the next step. */
4047 vec_tmp.quick_push (new_tmp1);
4048 vec_tmp.quick_push (new_tmp2);
4051 vec_oprnds0->release ();
4052 *vec_oprnds0 = vec_tmp;
4056 /* Check if STMT performs a conversion operation, that can be vectorized.
4057 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4058 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4059 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4062 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4063 gimple **vec_stmt, slp_tree slp_node)
4067 tree op0, op1 = NULL_TREE;
4068 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4069 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4070 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4071 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4072 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4073 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4076 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4078 gimple *new_stmt = NULL;
4079 stmt_vec_info prev_stmt_info;
4082 tree vectype_out, vectype_in;
4084 tree lhs_type, rhs_type;
4085 enum { NARROW, NONE, WIDEN } modifier;
4086 vec<tree> vec_oprnds0 = vNULL;
4087 vec<tree> vec_oprnds1 = vNULL;
4089 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4090 vec_info *vinfo = stmt_info->vinfo;
4091 int multi_step_cvt = 0;
4092 vec<tree> interm_types = vNULL;
4093 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4095 unsigned short fltsz;
4097 /* Is STMT a vectorizable conversion? */
4099 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4102 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4106 if (!is_gimple_assign (stmt))
4109 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4112 code = gimple_assign_rhs_code (stmt);
4113 if (!CONVERT_EXPR_CODE_P (code)
4114 && code != FIX_TRUNC_EXPR
4115 && code != FLOAT_EXPR
4116 && code != WIDEN_MULT_EXPR
4117 && code != WIDEN_LSHIFT_EXPR)
4120 op_type = TREE_CODE_LENGTH (code);
4122 /* Check types of lhs and rhs. */
4123 scalar_dest = gimple_assign_lhs (stmt);
4124 lhs_type = TREE_TYPE (scalar_dest);
4125 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4127 op0 = gimple_assign_rhs1 (stmt);
4128 rhs_type = TREE_TYPE (op0);
4130 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4131 && !((INTEGRAL_TYPE_P (lhs_type)
4132 && INTEGRAL_TYPE_P (rhs_type))
4133 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4134 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4137 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4138 && ((INTEGRAL_TYPE_P (lhs_type)
4139 && !type_has_mode_precision_p (lhs_type))
4140 || (INTEGRAL_TYPE_P (rhs_type)
4141 && !type_has_mode_precision_p (rhs_type))))
4143 if (dump_enabled_p ())
4144 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4145 "type conversion to/from bit-precision unsupported."
4150 /* Check the operands of the operation. */
4151 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4153 if (dump_enabled_p ())
4154 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4155 "use not simple.\n");
4158 if (op_type == binary_op)
4162 op1 = gimple_assign_rhs2 (stmt);
4163 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4164 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4166 if (CONSTANT_CLASS_P (op0))
4167 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4169 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4173 if (dump_enabled_p ())
4174 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4175 "use not simple.\n");
4180 /* If op0 is an external or constant defs use a vector type of
4181 the same size as the output vector type. */
4183 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4185 gcc_assert (vectype_in);
4188 if (dump_enabled_p ())
4190 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4191 "no vectype for scalar type ");
4192 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4193 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4199 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4200 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4202 if (dump_enabled_p ())
4204 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4205 "can't convert between boolean and non "
4207 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4208 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4214 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4215 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4216 if (nunits_in < nunits_out)
4218 else if (nunits_out == nunits_in)
4223 /* Multiple types in SLP are handled by creating the appropriate number of
4224 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4228 else if (modifier == NARROW)
4229 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4231 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4233 /* Sanity check: make sure that at least one copy of the vectorized stmt
4234 needs to be generated. */
4235 gcc_assert (ncopies >= 1);
4237 bool found_mode = false;
4238 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4239 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4240 opt_scalar_mode rhs_mode_iter;
4242 /* Supportable by target? */
4246 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4248 if (supportable_convert_operation (code, vectype_out, vectype_in,
4253 if (dump_enabled_p ())
4254 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4255 "conversion not supported by target.\n");
4259 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4260 &code1, &code2, &multi_step_cvt,
4263 /* Binary widening operation can only be supported directly by the
4265 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4269 if (code != FLOAT_EXPR
4270 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4273 fltsz = GET_MODE_SIZE (lhs_mode);
4274 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4276 rhs_mode = rhs_mode_iter.require ();
4277 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4281 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4282 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4283 if (cvt_type == NULL_TREE)
4286 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4288 if (!supportable_convert_operation (code, vectype_out,
4289 cvt_type, &decl1, &codecvt1))
4292 else if (!supportable_widening_operation (code, stmt, vectype_out,
4293 cvt_type, &codecvt1,
4294 &codecvt2, &multi_step_cvt,
4298 gcc_assert (multi_step_cvt == 0);
4300 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4301 vectype_in, &code1, &code2,
4302 &multi_step_cvt, &interm_types))
4312 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4313 codecvt2 = ERROR_MARK;
4317 interm_types.safe_push (cvt_type);
4318 cvt_type = NULL_TREE;
4323 gcc_assert (op_type == unary_op);
4324 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4325 &code1, &multi_step_cvt,
4329 if (code != FIX_TRUNC_EXPR
4330 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4334 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4335 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4336 if (cvt_type == NULL_TREE)
4338 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4341 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4342 &code1, &multi_step_cvt,
4351 if (!vec_stmt) /* transformation not required. */
4353 if (dump_enabled_p ())
4354 dump_printf_loc (MSG_NOTE, vect_location,
4355 "=== vectorizable_conversion ===\n");
4356 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4358 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4359 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4361 else if (modifier == NARROW)
4363 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4364 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4368 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4369 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4371 interm_types.release ();
4376 if (dump_enabled_p ())
4377 dump_printf_loc (MSG_NOTE, vect_location,
4378 "transform conversion. ncopies = %d.\n", ncopies);
4380 if (op_type == binary_op)
4382 if (CONSTANT_CLASS_P (op0))
4383 op0 = fold_convert (TREE_TYPE (op1), op0);
4384 else if (CONSTANT_CLASS_P (op1))
4385 op1 = fold_convert (TREE_TYPE (op0), op1);
4388 /* In case of multi-step conversion, we first generate conversion operations
4389 to the intermediate types, and then from that types to the final one.
4390 We create vector destinations for the intermediate type (TYPES) received
4391 from supportable_*_operation, and store them in the correct order
4392 for future use in vect_create_vectorized_*_stmts (). */
4393 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4394 vec_dest = vect_create_destination_var (scalar_dest,
4395 (cvt_type && modifier == WIDEN)
4396 ? cvt_type : vectype_out);
4397 vec_dsts.quick_push (vec_dest);
4401 for (i = interm_types.length () - 1;
4402 interm_types.iterate (i, &intermediate_type); i--)
4404 vec_dest = vect_create_destination_var (scalar_dest,
4406 vec_dsts.quick_push (vec_dest);
4411 vec_dest = vect_create_destination_var (scalar_dest,
4413 ? vectype_out : cvt_type);
4417 if (modifier == WIDEN)
4419 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4420 if (op_type == binary_op)
4421 vec_oprnds1.create (1);
4423 else if (modifier == NARROW)
4424 vec_oprnds0.create (
4425 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4427 else if (code == WIDEN_LSHIFT_EXPR)
4428 vec_oprnds1.create (slp_node->vec_stmts_size);
4431 prev_stmt_info = NULL;
4435 for (j = 0; j < ncopies; j++)
4438 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4440 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4442 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4444 /* Arguments are ready, create the new vector stmt. */
4445 if (code1 == CALL_EXPR)
4447 new_stmt = gimple_build_call (decl1, 1, vop0);
4448 new_temp = make_ssa_name (vec_dest, new_stmt);
4449 gimple_call_set_lhs (new_stmt, new_temp);
4453 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4454 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4455 new_temp = make_ssa_name (vec_dest, new_stmt);
4456 gimple_assign_set_lhs (new_stmt, new_temp);
4459 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4461 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4464 if (!prev_stmt_info)
4465 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4467 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4468 prev_stmt_info = vinfo_for_stmt (new_stmt);
4475 /* In case the vectorization factor (VF) is bigger than the number
4476 of elements that we can fit in a vectype (nunits), we have to
4477 generate more than one vector stmt - i.e - we need to "unroll"
4478 the vector stmt by a factor VF/nunits. */
4479 for (j = 0; j < ncopies; j++)
4486 if (code == WIDEN_LSHIFT_EXPR)
4491 /* Store vec_oprnd1 for every vector stmt to be created
4492 for SLP_NODE. We check during the analysis that all
4493 the shift arguments are the same. */
4494 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4495 vec_oprnds1.quick_push (vec_oprnd1);
4497 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4501 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4502 &vec_oprnds1, slp_node);
4506 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4507 vec_oprnds0.quick_push (vec_oprnd0);
4508 if (op_type == binary_op)
4510 if (code == WIDEN_LSHIFT_EXPR)
4513 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4514 vec_oprnds1.quick_push (vec_oprnd1);
4520 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4521 vec_oprnds0.truncate (0);
4522 vec_oprnds0.quick_push (vec_oprnd0);
4523 if (op_type == binary_op)
4525 if (code == WIDEN_LSHIFT_EXPR)
4528 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4530 vec_oprnds1.truncate (0);
4531 vec_oprnds1.quick_push (vec_oprnd1);
4535 /* Arguments are ready. Create the new vector stmts. */
4536 for (i = multi_step_cvt; i >= 0; i--)
4538 tree this_dest = vec_dsts[i];
4539 enum tree_code c1 = code1, c2 = code2;
4540 if (i == 0 && codecvt2 != ERROR_MARK)
4545 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4547 stmt, this_dest, gsi,
4548 c1, c2, decl1, decl2,
4552 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4556 if (codecvt1 == CALL_EXPR)
4558 new_stmt = gimple_build_call (decl1, 1, vop0);
4559 new_temp = make_ssa_name (vec_dest, new_stmt);
4560 gimple_call_set_lhs (new_stmt, new_temp);
4564 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4565 new_temp = make_ssa_name (vec_dest);
4566 new_stmt = gimple_build_assign (new_temp, codecvt1,
4570 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4573 new_stmt = SSA_NAME_DEF_STMT (vop0);
4576 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4579 if (!prev_stmt_info)
4580 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4582 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4583 prev_stmt_info = vinfo_for_stmt (new_stmt);
4588 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4592 /* In case the vectorization factor (VF) is bigger than the number
4593 of elements that we can fit in a vectype (nunits), we have to
4594 generate more than one vector stmt - i.e - we need to "unroll"
4595 the vector stmt by a factor VF/nunits. */
4596 for (j = 0; j < ncopies; j++)
4600 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4604 vec_oprnds0.truncate (0);
4605 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4606 vect_pow2 (multi_step_cvt) - 1);
4609 /* Arguments are ready. Create the new vector stmts. */
4611 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4613 if (codecvt1 == CALL_EXPR)
4615 new_stmt = gimple_build_call (decl1, 1, vop0);
4616 new_temp = make_ssa_name (vec_dest, new_stmt);
4617 gimple_call_set_lhs (new_stmt, new_temp);
4621 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4622 new_temp = make_ssa_name (vec_dest);
4623 new_stmt = gimple_build_assign (new_temp, codecvt1,
4627 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4628 vec_oprnds0[i] = new_temp;
4631 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4632 stmt, vec_dsts, gsi,
4637 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4641 vec_oprnds0.release ();
4642 vec_oprnds1.release ();
4643 interm_types.release ();
4649 /* Function vectorizable_assignment.
4651 Check if STMT performs an assignment (copy) that can be vectorized.
4652 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4653 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4654 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4657 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4658 gimple **vec_stmt, slp_tree slp_node)
4663 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4664 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4667 enum vect_def_type dt[1] = {vect_unknown_def_type};
4671 vec<tree> vec_oprnds = vNULL;
4673 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4674 vec_info *vinfo = stmt_info->vinfo;
4675 gimple *new_stmt = NULL;
4676 stmt_vec_info prev_stmt_info = NULL;
4677 enum tree_code code;
4680 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4683 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4687 /* Is vectorizable assignment? */
4688 if (!is_gimple_assign (stmt))
4691 scalar_dest = gimple_assign_lhs (stmt);
4692 if (TREE_CODE (scalar_dest) != SSA_NAME)
4695 code = gimple_assign_rhs_code (stmt);
4696 if (gimple_assign_single_p (stmt)
4697 || code == PAREN_EXPR
4698 || CONVERT_EXPR_CODE_P (code))
4699 op = gimple_assign_rhs1 (stmt);
4703 if (code == VIEW_CONVERT_EXPR)
4704 op = TREE_OPERAND (op, 0);
4706 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4707 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4709 /* Multiple types in SLP are handled by creating the appropriate number of
4710 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4715 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4717 gcc_assert (ncopies >= 1);
4719 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4721 if (dump_enabled_p ())
4722 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4723 "use not simple.\n");
4727 /* We can handle NOP_EXPR conversions that do not change the number
4728 of elements or the vector size. */
4729 if ((CONVERT_EXPR_CODE_P (code)
4730 || code == VIEW_CONVERT_EXPR)
4732 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4733 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4734 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4737 /* We do not handle bit-precision changes. */
4738 if ((CONVERT_EXPR_CODE_P (code)
4739 || code == VIEW_CONVERT_EXPR)
4740 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4741 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
4742 || !type_has_mode_precision_p (TREE_TYPE (op)))
4743 /* But a conversion that does not change the bit-pattern is ok. */
4744 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4745 > TYPE_PRECISION (TREE_TYPE (op)))
4746 && TYPE_UNSIGNED (TREE_TYPE (op)))
4747 /* Conversion between boolean types of different sizes is
4748 a simple assignment in case their vectypes are same
4750 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
4751 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
4753 if (dump_enabled_p ())
4754 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4755 "type conversion to/from bit-precision "
4760 if (!vec_stmt) /* transformation not required. */
4762 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4763 if (dump_enabled_p ())
4764 dump_printf_loc (MSG_NOTE, vect_location,
4765 "=== vectorizable_assignment ===\n");
4766 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4771 if (dump_enabled_p ())
4772 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4775 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4778 for (j = 0; j < ncopies; j++)
4782 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
4784 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4786 /* Arguments are ready. create the new vector stmt. */
4787 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4789 if (CONVERT_EXPR_CODE_P (code)
4790 || code == VIEW_CONVERT_EXPR)
4791 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4792 new_stmt = gimple_build_assign (vec_dest, vop);
4793 new_temp = make_ssa_name (vec_dest, new_stmt);
4794 gimple_assign_set_lhs (new_stmt, new_temp);
4795 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4797 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4804 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4806 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4808 prev_stmt_info = vinfo_for_stmt (new_stmt);
4811 vec_oprnds.release ();
4816 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4817 either as shift by a scalar or by a vector. */
4820 vect_supportable_shift (enum tree_code code, tree scalar_type)
4823 machine_mode vec_mode;
4828 vectype = get_vectype_for_scalar_type (scalar_type);
4832 optab = optab_for_tree_code (code, vectype, optab_scalar);
4834 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4836 optab = optab_for_tree_code (code, vectype, optab_vector);
4838 || (optab_handler (optab, TYPE_MODE (vectype))
4839 == CODE_FOR_nothing))
4843 vec_mode = TYPE_MODE (vectype);
4844 icode = (int) optab_handler (optab, vec_mode);
4845 if (icode == CODE_FOR_nothing)
4852 /* Function vectorizable_shift.
4854 Check if STMT performs a shift operation that can be vectorized.
4855 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4856 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4857 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4860 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4861 gimple **vec_stmt, slp_tree slp_node)
4865 tree op0, op1 = NULL;
4866 tree vec_oprnd1 = NULL_TREE;
4867 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4869 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4870 enum tree_code code;
4871 machine_mode vec_mode;
4875 machine_mode optab_op2_mode;
4877 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4879 gimple *new_stmt = NULL;
4880 stmt_vec_info prev_stmt_info;
4887 vec<tree> vec_oprnds0 = vNULL;
4888 vec<tree> vec_oprnds1 = vNULL;
4891 bool scalar_shift_arg = true;
4892 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4893 vec_info *vinfo = stmt_info->vinfo;
4895 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4898 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4902 /* Is STMT a vectorizable binary/unary operation? */
4903 if (!is_gimple_assign (stmt))
4906 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4909 code = gimple_assign_rhs_code (stmt);
4911 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4912 || code == RROTATE_EXPR))
4915 scalar_dest = gimple_assign_lhs (stmt);
4916 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4917 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
4919 if (dump_enabled_p ())
4920 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4921 "bit-precision shifts not supported.\n");
4925 op0 = gimple_assign_rhs1 (stmt);
4926 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4928 if (dump_enabled_p ())
4929 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4930 "use not simple.\n");
4933 /* If op0 is an external or constant def use a vector type with
4934 the same size as the output vector type. */
4936 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4938 gcc_assert (vectype);
4941 if (dump_enabled_p ())
4942 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4943 "no vectype for scalar type\n");
4947 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4948 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4949 if (nunits_out != nunits_in)
4952 op1 = gimple_assign_rhs2 (stmt);
4953 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4955 if (dump_enabled_p ())
4956 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4957 "use not simple.\n");
4961 /* Multiple types in SLP are handled by creating the appropriate number of
4962 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4967 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4969 gcc_assert (ncopies >= 1);
4971 /* Determine whether the shift amount is a vector, or scalar. If the
4972 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4974 if ((dt[1] == vect_internal_def
4975 || dt[1] == vect_induction_def)
4977 scalar_shift_arg = false;
4978 else if (dt[1] == vect_constant_def
4979 || dt[1] == vect_external_def
4980 || dt[1] == vect_internal_def)
4982 /* In SLP, need to check whether the shift count is the same,
4983 in loops if it is a constant or invariant, it is always
4987 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4990 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4991 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4992 scalar_shift_arg = false;
4995 /* If the shift amount is computed by a pattern stmt we cannot
4996 use the scalar amount directly thus give up and use a vector
4998 if (dt[1] == vect_internal_def)
5000 gimple *def = SSA_NAME_DEF_STMT (op1);
5001 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
5002 scalar_shift_arg = false;
5007 if (dump_enabled_p ())
5008 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5009 "operand mode requires invariant argument.\n");
5013 /* Vector shifted by vector. */
5014 if (!scalar_shift_arg)
5016 optab = optab_for_tree_code (code, vectype, optab_vector);
5017 if (dump_enabled_p ())
5018 dump_printf_loc (MSG_NOTE, vect_location,
5019 "vector/vector shift/rotate found.\n");
5022 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5023 if (op1_vectype == NULL_TREE
5024 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5026 if (dump_enabled_p ())
5027 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5028 "unusable type for last operand in"
5029 " vector/vector shift/rotate.\n");
5033 /* See if the machine has a vector shifted by scalar insn and if not
5034 then see if it has a vector shifted by vector insn. */
5037 optab = optab_for_tree_code (code, vectype, optab_scalar);
5039 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5041 if (dump_enabled_p ())
5042 dump_printf_loc (MSG_NOTE, vect_location,
5043 "vector/scalar shift/rotate found.\n");
5047 optab = optab_for_tree_code (code, vectype, optab_vector);
5049 && (optab_handler (optab, TYPE_MODE (vectype))
5050 != CODE_FOR_nothing))
5052 scalar_shift_arg = false;
5054 if (dump_enabled_p ())
5055 dump_printf_loc (MSG_NOTE, vect_location,
5056 "vector/vector shift/rotate found.\n");
5058 /* Unlike the other binary operators, shifts/rotates have
5059 the rhs being int, instead of the same type as the lhs,
5060 so make sure the scalar is the right type if we are
5061 dealing with vectors of long long/long/short/char. */
5062 if (dt[1] == vect_constant_def)
5063 op1 = fold_convert (TREE_TYPE (vectype), op1);
5064 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5068 && TYPE_MODE (TREE_TYPE (vectype))
5069 != TYPE_MODE (TREE_TYPE (op1)))
5071 if (dump_enabled_p ())
5072 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5073 "unusable type for last operand in"
5074 " vector/vector shift/rotate.\n");
5077 if (vec_stmt && !slp_node)
5079 op1 = fold_convert (TREE_TYPE (vectype), op1);
5080 op1 = vect_init_vector (stmt, op1,
5081 TREE_TYPE (vectype), NULL);
5088 /* Supportable by target? */
5091 if (dump_enabled_p ())
5092 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5096 vec_mode = TYPE_MODE (vectype);
5097 icode = (int) optab_handler (optab, vec_mode);
5098 if (icode == CODE_FOR_nothing)
5100 if (dump_enabled_p ())
5101 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5102 "op not supported by target.\n");
5103 /* Check only during analysis. */
5104 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5106 && !vect_worthwhile_without_simd_p (vinfo, code)))
5108 if (dump_enabled_p ())
5109 dump_printf_loc (MSG_NOTE, vect_location,
5110 "proceeding using word mode.\n");
5113 /* Worthwhile without SIMD support? Check only during analysis. */
5115 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5116 && !vect_worthwhile_without_simd_p (vinfo, code))
5118 if (dump_enabled_p ())
5119 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5120 "not worthwhile without SIMD support.\n");
5124 if (!vec_stmt) /* transformation not required. */
5126 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5127 if (dump_enabled_p ())
5128 dump_printf_loc (MSG_NOTE, vect_location,
5129 "=== vectorizable_shift ===\n");
5130 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5136 if (dump_enabled_p ())
5137 dump_printf_loc (MSG_NOTE, vect_location,
5138 "transform binary/unary operation.\n");
5141 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5143 prev_stmt_info = NULL;
5144 for (j = 0; j < ncopies; j++)
5149 if (scalar_shift_arg)
5151 /* Vector shl and shr insn patterns can be defined with scalar
5152 operand 2 (shift operand). In this case, use constant or loop
5153 invariant op1 directly, without extending it to vector mode
5155 optab_op2_mode = insn_data[icode].operand[2].mode;
5156 if (!VECTOR_MODE_P (optab_op2_mode))
5158 if (dump_enabled_p ())
5159 dump_printf_loc (MSG_NOTE, vect_location,
5160 "operand 1 using scalar mode.\n");
5162 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5163 vec_oprnds1.quick_push (vec_oprnd1);
5166 /* Store vec_oprnd1 for every vector stmt to be created
5167 for SLP_NODE. We check during the analysis that all
5168 the shift arguments are the same.
5169 TODO: Allow different constants for different vector
5170 stmts generated for an SLP instance. */
5171 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5172 vec_oprnds1.quick_push (vec_oprnd1);
5177 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5178 (a special case for certain kind of vector shifts); otherwise,
5179 operand 1 should be of a vector type (the usual case). */
5181 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5184 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5188 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5190 /* Arguments are ready. Create the new vector stmt. */
5191 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5193 vop1 = vec_oprnds1[i];
5194 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5195 new_temp = make_ssa_name (vec_dest, new_stmt);
5196 gimple_assign_set_lhs (new_stmt, new_temp);
5197 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5199 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5206 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5208 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5209 prev_stmt_info = vinfo_for_stmt (new_stmt);
5212 vec_oprnds0.release ();
5213 vec_oprnds1.release ();
5219 /* Function vectorizable_operation.
5221 Check if STMT performs a binary, unary or ternary operation that can
5223 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5224 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5225 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5228 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5229 gimple **vec_stmt, slp_tree slp_node)
5233 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5234 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5236 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5237 enum tree_code code, orig_code;
5238 machine_mode vec_mode;
5242 bool target_support_p;
5244 enum vect_def_type dt[3]
5245 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5247 gimple *new_stmt = NULL;
5248 stmt_vec_info prev_stmt_info;
5254 vec<tree> vec_oprnds0 = vNULL;
5255 vec<tree> vec_oprnds1 = vNULL;
5256 vec<tree> vec_oprnds2 = vNULL;
5257 tree vop0, vop1, vop2;
5258 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5259 vec_info *vinfo = stmt_info->vinfo;
5261 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5264 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5268 /* Is STMT a vectorizable binary/unary operation? */
5269 if (!is_gimple_assign (stmt))
5272 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5275 orig_code = code = gimple_assign_rhs_code (stmt);
5277 /* For pointer addition and subtraction, we should use the normal
5278 plus and minus for the vector operation. */
5279 if (code == POINTER_PLUS_EXPR)
5281 if (code == POINTER_DIFF_EXPR)
5284 /* Support only unary or binary operations. */
5285 op_type = TREE_CODE_LENGTH (code);
5286 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5288 if (dump_enabled_p ())
5289 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5290 "num. args = %d (not unary/binary/ternary op).\n",
5295 scalar_dest = gimple_assign_lhs (stmt);
5296 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5298 /* Most operations cannot handle bit-precision types without extra
5300 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5301 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5302 /* Exception are bitwise binary operations. */
5303 && code != BIT_IOR_EXPR
5304 && code != BIT_XOR_EXPR
5305 && code != BIT_AND_EXPR)
5307 if (dump_enabled_p ())
5308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5309 "bit-precision arithmetic not supported.\n");
5313 op0 = gimple_assign_rhs1 (stmt);
5314 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5316 if (dump_enabled_p ())
5317 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5318 "use not simple.\n");
5321 /* If op0 is an external or constant def use a vector type with
5322 the same size as the output vector type. */
5325 /* For boolean type we cannot determine vectype by
5326 invariant value (don't know whether it is a vector
5327 of booleans or vector of integers). We use output
5328 vectype because operations on boolean don't change
5330 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5332 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5334 if (dump_enabled_p ())
5335 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5336 "not supported operation on bool value.\n");
5339 vectype = vectype_out;
5342 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5345 gcc_assert (vectype);
5348 if (dump_enabled_p ())
5350 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5351 "no vectype for scalar type ");
5352 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5354 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5360 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5361 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5362 if (nunits_out != nunits_in)
5365 if (op_type == binary_op || op_type == ternary_op)
5367 op1 = gimple_assign_rhs2 (stmt);
5368 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5370 if (dump_enabled_p ())
5371 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5372 "use not simple.\n");
5376 if (op_type == ternary_op)
5378 op2 = gimple_assign_rhs3 (stmt);
5379 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5381 if (dump_enabled_p ())
5382 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5383 "use not simple.\n");
5388 /* Multiple types in SLP are handled by creating the appropriate number of
5389 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5394 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5396 gcc_assert (ncopies >= 1);
5398 /* Shifts are handled in vectorizable_shift (). */
5399 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5400 || code == RROTATE_EXPR)
5403 /* Supportable by target? */
5405 vec_mode = TYPE_MODE (vectype);
5406 if (code == MULT_HIGHPART_EXPR)
5407 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5410 optab = optab_for_tree_code (code, vectype, optab_default);
5413 if (dump_enabled_p ())
5414 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5418 target_support_p = (optab_handler (optab, vec_mode)
5419 != CODE_FOR_nothing);
5422 if (!target_support_p)
5424 if (dump_enabled_p ())
5425 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5426 "op not supported by target.\n");
5427 /* Check only during analysis. */
5428 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5429 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5431 if (dump_enabled_p ())
5432 dump_printf_loc (MSG_NOTE, vect_location,
5433 "proceeding using word mode.\n");
5436 /* Worthwhile without SIMD support? Check only during analysis. */
5437 if (!VECTOR_MODE_P (vec_mode)
5439 && !vect_worthwhile_without_simd_p (vinfo, code))
5441 if (dump_enabled_p ())
5442 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5443 "not worthwhile without SIMD support.\n");
5447 if (!vec_stmt) /* transformation not required. */
5449 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5450 if (dump_enabled_p ())
5451 dump_printf_loc (MSG_NOTE, vect_location,
5452 "=== vectorizable_operation ===\n");
5453 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5459 if (dump_enabled_p ())
5460 dump_printf_loc (MSG_NOTE, vect_location,
5461 "transform binary/unary operation.\n");
5464 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5466 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5467 vectors with unsigned elements, but the result is signed. So, we
5468 need to compute the MINUS_EXPR into vectype temporary and
5469 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5470 tree vec_cvt_dest = NULL_TREE;
5471 if (orig_code == POINTER_DIFF_EXPR)
5472 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
5474 /* In case the vectorization factor (VF) is bigger than the number
5475 of elements that we can fit in a vectype (nunits), we have to generate
5476 more than one vector stmt - i.e - we need to "unroll" the
5477 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5478 from one copy of the vector stmt to the next, in the field
5479 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5480 stages to find the correct vector defs to be used when vectorizing
5481 stmts that use the defs of the current stmt. The example below
5482 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5483 we need to create 4 vectorized stmts):
5485 before vectorization:
5486 RELATED_STMT VEC_STMT
5490 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5492 RELATED_STMT VEC_STMT
5493 VS1_0: vx0 = memref0 VS1_1 -
5494 VS1_1: vx1 = memref1 VS1_2 -
5495 VS1_2: vx2 = memref2 VS1_3 -
5496 VS1_3: vx3 = memref3 - -
5497 S1: x = load - VS1_0
5500 step2: vectorize stmt S2 (done here):
5501 To vectorize stmt S2 we first need to find the relevant vector
5502 def for the first operand 'x'. This is, as usual, obtained from
5503 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5504 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5505 relevant vector def 'vx0'. Having found 'vx0' we can generate
5506 the vector stmt VS2_0, and as usual, record it in the
5507 STMT_VINFO_VEC_STMT of stmt S2.
5508 When creating the second copy (VS2_1), we obtain the relevant vector
5509 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5510 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5511 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5512 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5513 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5514 chain of stmts and pointers:
5515 RELATED_STMT VEC_STMT
5516 VS1_0: vx0 = memref0 VS1_1 -
5517 VS1_1: vx1 = memref1 VS1_2 -
5518 VS1_2: vx2 = memref2 VS1_3 -
5519 VS1_3: vx3 = memref3 - -
5520 S1: x = load - VS1_0
5521 VS2_0: vz0 = vx0 + v1 VS2_1 -
5522 VS2_1: vz1 = vx1 + v1 VS2_2 -
5523 VS2_2: vz2 = vx2 + v1 VS2_3 -
5524 VS2_3: vz3 = vx3 + v1 - -
5525 S2: z = x + 1 - VS2_0 */
5527 prev_stmt_info = NULL;
5528 for (j = 0; j < ncopies; j++)
5533 if (op_type == binary_op || op_type == ternary_op)
5534 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5537 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5539 if (op_type == ternary_op)
5540 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
5545 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5546 if (op_type == ternary_op)
5548 tree vec_oprnd = vec_oprnds2.pop ();
5549 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5554 /* Arguments are ready. Create the new vector stmt. */
5555 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5557 vop1 = ((op_type == binary_op || op_type == ternary_op)
5558 ? vec_oprnds1[i] : NULL_TREE);
5559 vop2 = ((op_type == ternary_op)
5560 ? vec_oprnds2[i] : NULL_TREE);
5561 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5562 new_temp = make_ssa_name (vec_dest, new_stmt);
5563 gimple_assign_set_lhs (new_stmt, new_temp);
5564 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5567 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
5568 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
5570 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
5571 gimple_assign_set_lhs (new_stmt, new_temp);
5572 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5575 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5582 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5584 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5585 prev_stmt_info = vinfo_for_stmt (new_stmt);
5588 vec_oprnds0.release ();
5589 vec_oprnds1.release ();
5590 vec_oprnds2.release ();
5595 /* A helper function to ensure data reference DR's base alignment. */
5598 ensure_base_align (struct data_reference *dr)
5603 if (DR_VECT_AUX (dr)->base_misaligned)
5605 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5607 unsigned int align_base_to = DR_TARGET_ALIGNMENT (dr) * BITS_PER_UNIT;
5609 if (decl_in_symtab_p (base_decl))
5610 symtab_node::get (base_decl)->increase_alignment (align_base_to);
5613 SET_DECL_ALIGN (base_decl, align_base_to);
5614 DECL_USER_ALIGN (base_decl) = 1;
5616 DR_VECT_AUX (dr)->base_misaligned = false;
5621 /* Function get_group_alias_ptr_type.
5623 Return the alias type for the group starting at FIRST_STMT. */
5626 get_group_alias_ptr_type (gimple *first_stmt)
5628 struct data_reference *first_dr, *next_dr;
5631 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5632 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
5635 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
5636 if (get_alias_set (DR_REF (first_dr))
5637 != get_alias_set (DR_REF (next_dr)))
5639 if (dump_enabled_p ())
5640 dump_printf_loc (MSG_NOTE, vect_location,
5641 "conflicting alias set types.\n");
5642 return ptr_type_node;
5644 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5646 return reference_alias_ptr_type (DR_REF (first_dr));
5650 /* Function vectorizable_store.
5652 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5654 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5655 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5656 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5659 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5665 tree vec_oprnd = NULL_TREE;
5666 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5667 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5669 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5670 struct loop *loop = NULL;
5671 machine_mode vec_mode;
5673 enum dr_alignment_support alignment_support_scheme;
5675 enum vect_def_type dt;
5676 stmt_vec_info prev_stmt_info = NULL;
5677 tree dataref_ptr = NULL_TREE;
5678 tree dataref_offset = NULL_TREE;
5679 gimple *ptr_incr = NULL;
5682 gimple *next_stmt, *first_stmt;
5684 unsigned int group_size, i;
5685 vec<tree> oprnds = vNULL;
5686 vec<tree> result_chain = vNULL;
5688 tree offset = NULL_TREE;
5689 vec<tree> vec_oprnds = vNULL;
5690 bool slp = (slp_node != NULL);
5691 unsigned int vec_num;
5692 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5693 vec_info *vinfo = stmt_info->vinfo;
5695 gather_scatter_info gs_info;
5696 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5699 vec_load_store_type vls_type;
5702 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5705 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5709 /* Is vectorizable store? */
5711 if (!is_gimple_assign (stmt))
5714 scalar_dest = gimple_assign_lhs (stmt);
5715 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5716 && is_pattern_stmt_p (stmt_info))
5717 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5718 if (TREE_CODE (scalar_dest) != ARRAY_REF
5719 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5720 && TREE_CODE (scalar_dest) != INDIRECT_REF
5721 && TREE_CODE (scalar_dest) != COMPONENT_REF
5722 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5723 && TREE_CODE (scalar_dest) != REALPART_EXPR
5724 && TREE_CODE (scalar_dest) != MEM_REF)
5727 /* Cannot have hybrid store SLP -- that would mean storing to the
5728 same location twice. */
5729 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
5731 gcc_assert (gimple_assign_single_p (stmt));
5733 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
5734 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5738 loop = LOOP_VINFO_LOOP (loop_vinfo);
5739 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5744 /* Multiple types in SLP are handled by creating the appropriate number of
5745 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5750 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5752 gcc_assert (ncopies >= 1);
5754 /* FORNOW. This restriction should be relaxed. */
5755 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5757 if (dump_enabled_p ())
5758 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5759 "multiple types in nested loop.\n");
5763 op = gimple_assign_rhs1 (stmt);
5765 /* In the case this is a store from a constant make sure
5766 native_encode_expr can handle it. */
5767 if (CONSTANT_CLASS_P (op) && native_encode_expr (op, NULL, 64) == 0)
5770 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
5772 if (dump_enabled_p ())
5773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5774 "use not simple.\n");
5778 if (dt == vect_constant_def || dt == vect_external_def)
5779 vls_type = VLS_STORE_INVARIANT;
5781 vls_type = VLS_STORE;
5783 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
5786 elem_type = TREE_TYPE (vectype);
5787 vec_mode = TYPE_MODE (vectype);
5789 /* FORNOW. In some cases can vectorize even if data-type not supported
5790 (e.g. - array initialization with 0). */
5791 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5794 if (!STMT_VINFO_DATA_REF (stmt_info))
5797 vect_memory_access_type memory_access_type;
5798 if (!get_load_store_type (stmt, vectype, slp, vls_type, ncopies,
5799 &memory_access_type, &gs_info))
5802 if (!vec_stmt) /* transformation not required. */
5804 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
5805 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5806 /* The SLP costs are calculated during SLP analysis. */
5807 if (!PURE_SLP_STMT (stmt_info))
5808 vect_model_store_cost (stmt_info, ncopies, memory_access_type, dt,
5812 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
5816 ensure_base_align (dr);
5818 if (memory_access_type == VMAT_GATHER_SCATTER)
5820 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5821 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
5822 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5823 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5824 edge pe = loop_preheader_edge (loop);
5827 enum { NARROW, NONE, WIDEN } modifier;
5828 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
5830 if (nunits == (unsigned int) scatter_off_nunits)
5832 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5836 vec_perm_builder sel (scatter_off_nunits, scatter_off_nunits, 1);
5837 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5838 sel.quick_push (i | nunits);
5840 vec_perm_indices indices (sel, 1, scatter_off_nunits);
5841 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
5843 gcc_assert (perm_mask != NULL_TREE);
5845 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5849 vec_perm_builder sel (nunits, nunits, 1);
5850 for (i = 0; i < (unsigned int) nunits; ++i)
5851 sel.quick_push (i | scatter_off_nunits);
5853 vec_perm_indices indices (sel, 2, nunits);
5854 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
5855 gcc_assert (perm_mask != NULL_TREE);
5861 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
5862 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5863 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5864 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5865 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5866 scaletype = TREE_VALUE (arglist);
5868 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5869 && TREE_CODE (rettype) == VOID_TYPE);
5871 ptr = fold_convert (ptrtype, gs_info.base);
5872 if (!is_gimple_min_invariant (ptr))
5874 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5875 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5876 gcc_assert (!new_bb);
5879 /* Currently we support only unconditional scatter stores,
5880 so mask should be all ones. */
5881 mask = build_int_cst (masktype, -1);
5882 mask = vect_init_vector (stmt, mask, masktype, NULL);
5884 scale = build_int_cst (scaletype, gs_info.scale);
5886 prev_stmt_info = NULL;
5887 for (j = 0; j < ncopies; ++j)
5892 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5894 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
5896 else if (modifier != NONE && (j & 1))
5898 if (modifier == WIDEN)
5901 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5902 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5905 else if (modifier == NARROW)
5907 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5910 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5919 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5921 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5925 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5927 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5928 == TYPE_VECTOR_SUBPARTS (srctype));
5929 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5930 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5931 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5932 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5936 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5938 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5939 == TYPE_VECTOR_SUBPARTS (idxtype));
5940 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5941 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5942 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5943 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5948 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
5950 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5952 if (prev_stmt_info == NULL)
5953 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5955 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5956 prev_stmt_info = vinfo_for_stmt (new_stmt);
5961 grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info);
5964 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5965 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5966 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5968 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5971 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5973 /* We vectorize all the stmts of the interleaving group when we
5974 reach the last stmt in the group. */
5975 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5976 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5985 grouped_store = false;
5986 /* VEC_NUM is the number of vect stmts to be created for this
5988 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5989 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5990 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
5991 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5992 op = gimple_assign_rhs1 (first_stmt);
5995 /* VEC_NUM is the number of vect stmts to be created for this
5997 vec_num = group_size;
5999 ref_type = get_group_alias_ptr_type (first_stmt);
6005 group_size = vec_num = 1;
6006 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
6009 if (dump_enabled_p ())
6010 dump_printf_loc (MSG_NOTE, vect_location,
6011 "transform store. ncopies = %d\n", ncopies);
6013 if (memory_access_type == VMAT_ELEMENTWISE
6014 || memory_access_type == VMAT_STRIDED_SLP)
6016 gimple_stmt_iterator incr_gsi;
6022 gimple_seq stmts = NULL;
6023 tree stride_base, stride_step, alias_off;
6027 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
6030 = fold_build_pointer_plus
6031 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
6032 size_binop (PLUS_EXPR,
6033 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
6034 convert_to_ptrofftype (DR_INIT (first_dr))));
6035 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
6037 /* For a store with loop-invariant (but other than power-of-2)
6038 stride (i.e. not a grouped access) like so:
6040 for (i = 0; i < n; i += stride)
6043 we generate a new induction variable and new stores from
6044 the components of the (vectorized) rhs:
6046 for (j = 0; ; j += VF*stride)
6051 array[j + stride] = tmp2;
6055 unsigned nstores = nunits;
6057 tree ltype = elem_type;
6058 tree lvectype = vectype;
6061 if (group_size < nunits
6062 && nunits % group_size == 0)
6064 nstores = nunits / group_size;
6066 ltype = build_vector_type (elem_type, group_size);
6069 /* First check if vec_extract optab doesn't support extraction
6070 of vector elts directly. */
6071 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6073 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6074 || !VECTOR_MODE_P (vmode)
6075 || (convert_optab_handler (vec_extract_optab,
6076 TYPE_MODE (vectype), vmode)
6077 == CODE_FOR_nothing))
6079 /* Try to avoid emitting an extract of vector elements
6080 by performing the extracts using an integer type of the
6081 same size, extracting from a vector of those and then
6082 re-interpreting it as the original vector type if
6085 = group_size * GET_MODE_BITSIZE (elmode);
6086 elmode = int_mode_for_size (lsize, 0).require ();
6087 /* If we can't construct such a vector fall back to
6088 element extracts from the original vector type and
6089 element size stores. */
6090 if (mode_for_vector (elmode,
6091 nunits / group_size).exists (&vmode)
6092 && VECTOR_MODE_P (vmode)
6093 && (convert_optab_handler (vec_extract_optab,
6095 != CODE_FOR_nothing))
6097 nstores = nunits / group_size;
6099 ltype = build_nonstandard_integer_type (lsize, 1);
6100 lvectype = build_vector_type (ltype, nstores);
6102 /* Else fall back to vector extraction anyway.
6103 Fewer stores are more important than avoiding spilling
6104 of the vector we extract from. Compared to the
6105 construction case in vectorizable_load no store-forwarding
6106 issue exists here for reasonable archs. */
6109 else if (group_size >= nunits
6110 && group_size % nunits == 0)
6117 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6118 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6121 ivstep = stride_step;
6122 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6123 build_int_cst (TREE_TYPE (ivstep), vf));
6125 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6127 create_iv (stride_base, ivstep, NULL,
6128 loop, &incr_gsi, insert_after,
6130 incr = gsi_stmt (incr_gsi);
6131 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6133 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6135 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6137 prev_stmt_info = NULL;
6138 alias_off = build_int_cst (ref_type, 0);
6139 next_stmt = first_stmt;
6140 for (g = 0; g < group_size; g++)
6142 running_off = offvar;
6145 tree size = TYPE_SIZE_UNIT (ltype);
6146 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6148 tree newoff = copy_ssa_name (running_off, NULL);
6149 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6151 vect_finish_stmt_generation (stmt, incr, gsi);
6152 running_off = newoff;
6154 unsigned int group_el = 0;
6155 unsigned HOST_WIDE_INT
6156 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6157 for (j = 0; j < ncopies; j++)
6159 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6160 and first_stmt == stmt. */
6165 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6167 vec_oprnd = vec_oprnds[0];
6171 gcc_assert (gimple_assign_single_p (next_stmt));
6172 op = gimple_assign_rhs1 (next_stmt);
6173 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6179 vec_oprnd = vec_oprnds[j];
6182 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
6183 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
6186 /* Pun the vector to extract from if necessary. */
6187 if (lvectype != vectype)
6189 tree tem = make_ssa_name (lvectype);
6191 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6192 lvectype, vec_oprnd));
6193 vect_finish_stmt_generation (stmt, pun, gsi);
6196 for (i = 0; i < nstores; i++)
6198 tree newref, newoff;
6199 gimple *incr, *assign;
6200 tree size = TYPE_SIZE (ltype);
6201 /* Extract the i'th component. */
6202 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6203 bitsize_int (i), size);
6204 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6207 elem = force_gimple_operand_gsi (gsi, elem, true,
6211 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6213 newref = build2 (MEM_REF, ltype,
6214 running_off, this_off);
6216 /* And store it to *running_off. */
6217 assign = gimple_build_assign (newref, elem);
6218 vect_finish_stmt_generation (stmt, assign, gsi);
6222 || group_el == group_size)
6224 newoff = copy_ssa_name (running_off, NULL);
6225 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6226 running_off, stride_step);
6227 vect_finish_stmt_generation (stmt, incr, gsi);
6229 running_off = newoff;
6232 if (g == group_size - 1
6235 if (j == 0 && i == 0)
6236 STMT_VINFO_VEC_STMT (stmt_info)
6237 = *vec_stmt = assign;
6239 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6240 prev_stmt_info = vinfo_for_stmt (assign);
6244 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6249 vec_oprnds.release ();
6253 auto_vec<tree> dr_chain (group_size);
6254 oprnds.create (group_size);
6256 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6257 gcc_assert (alignment_support_scheme);
6258 /* Targets with store-lane instructions must not require explicit
6260 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
6261 || alignment_support_scheme == dr_aligned
6262 || alignment_support_scheme == dr_unaligned_supported);
6264 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6265 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6266 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6268 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6269 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6271 aggr_type = vectype;
6273 /* In case the vectorization factor (VF) is bigger than the number
6274 of elements that we can fit in a vectype (nunits), we have to generate
6275 more than one vector stmt - i.e - we need to "unroll" the
6276 vector stmt by a factor VF/nunits. For more details see documentation in
6277 vect_get_vec_def_for_copy_stmt. */
6279 /* In case of interleaving (non-unit grouped access):
6286 We create vectorized stores starting from base address (the access of the
6287 first stmt in the chain (S2 in the above example), when the last store stmt
6288 of the chain (S4) is reached:
6291 VS2: &base + vec_size*1 = vx0
6292 VS3: &base + vec_size*2 = vx1
6293 VS4: &base + vec_size*3 = vx3
6295 Then permutation statements are generated:
6297 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6298 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6301 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6302 (the order of the data-refs in the output of vect_permute_store_chain
6303 corresponds to the order of scalar stmts in the interleaving chain - see
6304 the documentation of vect_permute_store_chain()).
6306 In case of both multiple types and interleaving, above vector stores and
6307 permutation stmts are created for every copy. The result vector stmts are
6308 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6309 STMT_VINFO_RELATED_STMT for the next copies.
6312 prev_stmt_info = NULL;
6313 for (j = 0; j < ncopies; j++)
6320 /* Get vectorized arguments for SLP_NODE. */
6321 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6324 vec_oprnd = vec_oprnds[0];
6328 /* For interleaved stores we collect vectorized defs for all the
6329 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6330 used as an input to vect_permute_store_chain(), and OPRNDS as
6331 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6333 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6334 OPRNDS are of size 1. */
6335 next_stmt = first_stmt;
6336 for (i = 0; i < group_size; i++)
6338 /* Since gaps are not supported for interleaved stores,
6339 GROUP_SIZE is the exact number of stmts in the chain.
6340 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6341 there is no interleaving, GROUP_SIZE is 1, and only one
6342 iteration of the loop will be executed. */
6343 gcc_assert (next_stmt
6344 && gimple_assign_single_p (next_stmt));
6345 op = gimple_assign_rhs1 (next_stmt);
6347 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6348 dr_chain.quick_push (vec_oprnd);
6349 oprnds.quick_push (vec_oprnd);
6350 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6354 /* We should have catched mismatched types earlier. */
6355 gcc_assert (useless_type_conversion_p (vectype,
6356 TREE_TYPE (vec_oprnd)));
6357 bool simd_lane_access_p
6358 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6359 if (simd_lane_access_p
6360 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6361 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6362 && integer_zerop (DR_OFFSET (first_dr))
6363 && integer_zerop (DR_INIT (first_dr))
6364 && alias_sets_conflict_p (get_alias_set (aggr_type),
6365 get_alias_set (TREE_TYPE (ref_type))))
6367 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6368 dataref_offset = build_int_cst (ref_type, 0);
6373 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6374 simd_lane_access_p ? loop : NULL,
6375 offset, &dummy, gsi, &ptr_incr,
6376 simd_lane_access_p, &inv_p);
6377 gcc_assert (bb_vinfo || !inv_p);
6381 /* For interleaved stores we created vectorized defs for all the
6382 defs stored in OPRNDS in the previous iteration (previous copy).
6383 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6384 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6386 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6387 OPRNDS are of size 1. */
6388 for (i = 0; i < group_size; i++)
6391 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
6392 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
6393 dr_chain[i] = vec_oprnd;
6394 oprnds[i] = vec_oprnd;
6398 = int_const_binop (PLUS_EXPR, dataref_offset,
6399 TYPE_SIZE_UNIT (aggr_type));
6401 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6402 TYPE_SIZE_UNIT (aggr_type));
6405 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6409 /* Combine all the vectors into an array. */
6410 vec_array = create_vector_array (vectype, vec_num);
6411 for (i = 0; i < vec_num; i++)
6413 vec_oprnd = dr_chain[i];
6414 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
6418 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6419 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
6420 gcall *call = gimple_build_call_internal (IFN_STORE_LANES, 1,
6422 gimple_call_set_lhs (call, data_ref);
6423 gimple_call_set_nothrow (call, true);
6425 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6433 result_chain.create (group_size);
6435 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
6439 next_stmt = first_stmt;
6440 for (i = 0; i < vec_num; i++)
6442 unsigned align, misalign;
6445 /* Bump the vector pointer. */
6446 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6450 vec_oprnd = vec_oprnds[i];
6451 else if (grouped_store)
6452 /* For grouped stores vectorized defs are interleaved in
6453 vect_permute_store_chain(). */
6454 vec_oprnd = result_chain[i];
6456 data_ref = fold_build2 (MEM_REF, vectype,
6460 : build_int_cst (ref_type, 0));
6461 align = DR_TARGET_ALIGNMENT (first_dr);
6462 if (aligned_access_p (first_dr))
6464 else if (DR_MISALIGNMENT (first_dr) == -1)
6466 align = dr_alignment (vect_dr_behavior (first_dr));
6468 TREE_TYPE (data_ref)
6469 = build_aligned_type (TREE_TYPE (data_ref),
6470 align * BITS_PER_UNIT);
6474 TREE_TYPE (data_ref)
6475 = build_aligned_type (TREE_TYPE (data_ref),
6476 TYPE_ALIGN (elem_type));
6477 misalign = DR_MISALIGNMENT (first_dr);
6479 if (dataref_offset == NULL_TREE
6480 && TREE_CODE (dataref_ptr) == SSA_NAME)
6481 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6484 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6486 tree perm_mask = perm_mask_for_reverse (vectype);
6488 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
6490 tree new_temp = make_ssa_name (perm_dest);
6492 /* Generate the permute statement. */
6494 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
6495 vec_oprnd, perm_mask);
6496 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6498 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
6499 vec_oprnd = new_temp;
6502 /* Arguments are ready. Create the new vector stmt. */
6503 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
6504 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6509 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6517 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6519 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6520 prev_stmt_info = vinfo_for_stmt (new_stmt);
6525 result_chain.release ();
6526 vec_oprnds.release ();
6531 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6532 VECTOR_CST mask. No checks are made that the target platform supports the
6533 mask, so callers may wish to test can_vec_perm_const_p separately, or use
6534 vect_gen_perm_mask_checked. */
6537 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
6539 tree mask_elt_type, mask_type;
6541 unsigned int nunits = sel.length ();
6542 gcc_checking_assert (nunits == TYPE_VECTOR_SUBPARTS (vectype));
6544 mask_elt_type = lang_hooks.types.type_for_mode
6545 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))).require (), 1);
6546 mask_type = get_vectype_for_scalar_type (mask_elt_type);
6548 tree_vector_builder mask_elts (mask_type, nunits, 1);
6549 for (unsigned int i = 0; i < nunits; ++i)
6550 mask_elts.quick_push (build_int_cst (mask_elt_type, sel[i]));
6551 return mask_elts.build ();
6554 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
6555 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6558 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
6560 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
6561 return vect_gen_perm_mask_any (vectype, sel);
6564 /* Given a vector variable X and Y, that was generated for the scalar
6565 STMT, generate instructions to permute the vector elements of X and Y
6566 using permutation mask MASK_VEC, insert them at *GSI and return the
6567 permuted vector variable. */
6570 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
6571 gimple_stmt_iterator *gsi)
6573 tree vectype = TREE_TYPE (x);
6574 tree perm_dest, data_ref;
6577 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
6578 data_ref = make_ssa_name (perm_dest);
6580 /* Generate the permute statement. */
6581 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
6582 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6587 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6588 inserting them on the loops preheader edge. Returns true if we
6589 were successful in doing so (and thus STMT can be moved then),
6590 otherwise returns false. */
6593 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6599 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6601 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6602 if (!gimple_nop_p (def_stmt)
6603 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6605 /* Make sure we don't need to recurse. While we could do
6606 so in simple cases when there are more complex use webs
6607 we don't have an easy way to preserve stmt order to fulfil
6608 dependencies within them. */
6611 if (gimple_code (def_stmt) == GIMPLE_PHI)
6613 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6615 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6616 if (!gimple_nop_p (def_stmt2)
6617 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6627 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6629 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6630 if (!gimple_nop_p (def_stmt)
6631 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6633 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6634 gsi_remove (&gsi, false);
6635 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6642 /* vectorizable_load.
6644 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6646 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6647 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6648 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6651 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6652 slp_tree slp_node, slp_instance slp_node_instance)
6655 tree vec_dest = NULL;
6656 tree data_ref = NULL;
6657 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6658 stmt_vec_info prev_stmt_info;
6659 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6660 struct loop *loop = NULL;
6661 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6662 bool nested_in_vect_loop = false;
6663 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6667 gimple *new_stmt = NULL;
6669 enum dr_alignment_support alignment_support_scheme;
6670 tree dataref_ptr = NULL_TREE;
6671 tree dataref_offset = NULL_TREE;
6672 gimple *ptr_incr = NULL;
6674 int i, j, group_size, group_gap_adj;
6675 tree msq = NULL_TREE, lsq;
6676 tree offset = NULL_TREE;
6677 tree byte_offset = NULL_TREE;
6678 tree realignment_token = NULL_TREE;
6680 vec<tree> dr_chain = vNULL;
6681 bool grouped_load = false;
6683 gimple *first_stmt_for_drptr = NULL;
6685 bool compute_in_loop = false;
6686 struct loop *at_loop;
6688 bool slp = (slp_node != NULL);
6689 bool slp_perm = false;
6690 enum tree_code code;
6691 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6694 gather_scatter_info gs_info;
6695 vec_info *vinfo = stmt_info->vinfo;
6698 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6701 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6705 /* Is vectorizable load? */
6706 if (!is_gimple_assign (stmt))
6709 scalar_dest = gimple_assign_lhs (stmt);
6710 if (TREE_CODE (scalar_dest) != SSA_NAME)
6713 code = gimple_assign_rhs_code (stmt);
6714 if (code != ARRAY_REF
6715 && code != BIT_FIELD_REF
6716 && code != INDIRECT_REF
6717 && code != COMPONENT_REF
6718 && code != IMAGPART_EXPR
6719 && code != REALPART_EXPR
6721 && TREE_CODE_CLASS (code) != tcc_declaration)
6724 if (!STMT_VINFO_DATA_REF (stmt_info))
6727 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6728 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6732 loop = LOOP_VINFO_LOOP (loop_vinfo);
6733 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6734 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6739 /* Multiple types in SLP are handled by creating the appropriate number of
6740 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6745 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6747 gcc_assert (ncopies >= 1);
6749 /* FORNOW. This restriction should be relaxed. */
6750 if (nested_in_vect_loop && ncopies > 1)
6752 if (dump_enabled_p ())
6753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6754 "multiple types in nested loop.\n");
6758 /* Invalidate assumptions made by dependence analysis when vectorization
6759 on the unrolled body effectively re-orders stmts. */
6761 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6762 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6763 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6765 if (dump_enabled_p ())
6766 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6767 "cannot perform implicit CSE when unrolling "
6768 "with negative dependence distance\n");
6772 elem_type = TREE_TYPE (vectype);
6773 mode = TYPE_MODE (vectype);
6775 /* FORNOW. In some cases can vectorize even if data-type not supported
6776 (e.g. - data copies). */
6777 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6779 if (dump_enabled_p ())
6780 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6781 "Aligned load, but unsupported type.\n");
6785 /* Check if the load is a part of an interleaving chain. */
6786 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6788 grouped_load = true;
6790 gcc_assert (!nested_in_vect_loop);
6791 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6793 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6794 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6796 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6799 /* Invalidate assumptions made by dependence analysis when vectorization
6800 on the unrolled body effectively re-orders stmts. */
6801 if (!PURE_SLP_STMT (stmt_info)
6802 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6803 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6804 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6806 if (dump_enabled_p ())
6807 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6808 "cannot perform implicit CSE when performing "
6809 "group loads with negative dependence distance\n");
6813 /* Similarly when the stmt is a load that is both part of a SLP
6814 instance and a loop vectorized stmt via the same-dr mechanism
6815 we have to give up. */
6816 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6817 && (STMT_SLP_TYPE (stmt_info)
6818 != STMT_SLP_TYPE (vinfo_for_stmt
6819 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6821 if (dump_enabled_p ())
6822 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6823 "conflicting SLP types for CSEd load\n");
6828 vect_memory_access_type memory_access_type;
6829 if (!get_load_store_type (stmt, vectype, slp, VLS_LOAD, ncopies,
6830 &memory_access_type, &gs_info))
6833 if (!vec_stmt) /* transformation not required. */
6836 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6837 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6838 /* The SLP costs are calculated during SLP analysis. */
6839 if (!PURE_SLP_STMT (stmt_info))
6840 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
6846 gcc_assert (memory_access_type
6847 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6849 if (dump_enabled_p ())
6850 dump_printf_loc (MSG_NOTE, vect_location,
6851 "transform load. ncopies = %d\n", ncopies);
6855 ensure_base_align (dr);
6857 if (memory_access_type == VMAT_GATHER_SCATTER)
6859 tree vec_oprnd0 = NULL_TREE, op;
6860 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6861 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6862 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6863 edge pe = loop_preheader_edge (loop);
6866 enum { NARROW, NONE, WIDEN } modifier;
6867 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6869 if (nunits == gather_off_nunits)
6871 else if (nunits == gather_off_nunits / 2)
6875 vec_perm_builder sel (gather_off_nunits, gather_off_nunits, 1);
6876 for (i = 0; i < gather_off_nunits; ++i)
6877 sel.quick_push (i | nunits);
6879 vec_perm_indices indices (sel, 1, gather_off_nunits);
6880 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6883 else if (nunits == gather_off_nunits * 2)
6887 vec_perm_builder sel (nunits, nunits, 1);
6888 for (i = 0; i < nunits; ++i)
6889 sel.quick_push (i < gather_off_nunits
6890 ? i : i + nunits - gather_off_nunits);
6892 vec_perm_indices indices (sel, 2, nunits);
6893 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6899 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6900 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6901 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6902 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6903 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6904 scaletype = TREE_VALUE (arglist);
6905 gcc_checking_assert (types_compatible_p (srctype, rettype));
6907 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6909 ptr = fold_convert (ptrtype, gs_info.base);
6910 if (!is_gimple_min_invariant (ptr))
6912 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6913 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6914 gcc_assert (!new_bb);
6917 /* Currently we support only unconditional gather loads,
6918 so mask should be all ones. */
6919 if (TREE_CODE (masktype) == INTEGER_TYPE)
6920 mask = build_int_cst (masktype, -1);
6921 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6923 mask = build_int_cst (TREE_TYPE (masktype), -1);
6924 mask = build_vector_from_val (masktype, mask);
6925 mask = vect_init_vector (stmt, mask, masktype, NULL);
6927 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6931 for (j = 0; j < 6; ++j)
6933 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6934 mask = build_real (TREE_TYPE (masktype), r);
6935 mask = build_vector_from_val (masktype, mask);
6936 mask = vect_init_vector (stmt, mask, masktype, NULL);
6941 scale = build_int_cst (scaletype, gs_info.scale);
6943 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6944 merge = build_int_cst (TREE_TYPE (rettype), 0);
6945 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6949 for (j = 0; j < 6; ++j)
6951 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6952 merge = build_real (TREE_TYPE (rettype), r);
6956 merge = build_vector_from_val (rettype, merge);
6957 merge = vect_init_vector (stmt, merge, rettype, NULL);
6959 prev_stmt_info = NULL;
6960 for (j = 0; j < ncopies; ++j)
6962 if (modifier == WIDEN && (j & 1))
6963 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6964 perm_mask, stmt, gsi);
6967 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
6970 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
6972 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6974 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6975 == TYPE_VECTOR_SUBPARTS (idxtype));
6976 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6977 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6979 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6980 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6985 = gimple_build_call (gs_info.decl, 5, merge, ptr, op, mask, scale);
6987 if (!useless_type_conversion_p (vectype, rettype))
6989 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6990 == TYPE_VECTOR_SUBPARTS (rettype));
6991 op = vect_get_new_ssa_name (rettype, vect_simple_var);
6992 gimple_call_set_lhs (new_stmt, op);
6993 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6994 var = make_ssa_name (vec_dest);
6995 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6997 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
7001 var = make_ssa_name (vec_dest, new_stmt);
7002 gimple_call_set_lhs (new_stmt, var);
7005 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7007 if (modifier == NARROW)
7014 var = permute_vec_elements (prev_res, var,
7015 perm_mask, stmt, gsi);
7016 new_stmt = SSA_NAME_DEF_STMT (var);
7019 if (prev_stmt_info == NULL)
7020 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7022 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7023 prev_stmt_info = vinfo_for_stmt (new_stmt);
7028 if (memory_access_type == VMAT_ELEMENTWISE
7029 || memory_access_type == VMAT_STRIDED_SLP)
7031 gimple_stmt_iterator incr_gsi;
7037 vec<constructor_elt, va_gc> *v = NULL;
7038 gimple_seq stmts = NULL;
7039 tree stride_base, stride_step, alias_off;
7041 gcc_assert (!nested_in_vect_loop);
7043 if (slp && grouped_load)
7045 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7046 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7047 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7048 ref_type = get_group_alias_ptr_type (first_stmt);
7055 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7059 = fold_build_pointer_plus
7060 (DR_BASE_ADDRESS (first_dr),
7061 size_binop (PLUS_EXPR,
7062 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7063 convert_to_ptrofftype (DR_INIT (first_dr))));
7064 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7066 /* For a load with loop-invariant (but other than power-of-2)
7067 stride (i.e. not a grouped access) like so:
7069 for (i = 0; i < n; i += stride)
7072 we generate a new induction variable and new accesses to
7073 form a new vector (or vectors, depending on ncopies):
7075 for (j = 0; ; j += VF*stride)
7077 tmp2 = array[j + stride];
7079 vectemp = {tmp1, tmp2, ...}
7082 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7083 build_int_cst (TREE_TYPE (stride_step), vf));
7085 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7087 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
7088 loop, &incr_gsi, insert_after,
7090 incr = gsi_stmt (incr_gsi);
7091 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7093 stride_step = force_gimple_operand (unshare_expr (stride_step),
7094 &stmts, true, NULL_TREE);
7096 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
7098 prev_stmt_info = NULL;
7099 running_off = offvar;
7100 alias_off = build_int_cst (ref_type, 0);
7101 int nloads = nunits;
7103 tree ltype = TREE_TYPE (vectype);
7104 tree lvectype = vectype;
7105 auto_vec<tree> dr_chain;
7106 if (memory_access_type == VMAT_STRIDED_SLP)
7108 if (group_size < nunits)
7110 /* First check if vec_init optab supports construction from
7111 vector elts directly. */
7112 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7114 if (mode_for_vector (elmode, group_size).exists (&vmode)
7115 && VECTOR_MODE_P (vmode)
7116 && (convert_optab_handler (vec_init_optab,
7117 TYPE_MODE (vectype), vmode)
7118 != CODE_FOR_nothing))
7120 nloads = nunits / group_size;
7122 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7126 /* Otherwise avoid emitting a constructor of vector elements
7127 by performing the loads using an integer type of the same
7128 size, constructing a vector of those and then
7129 re-interpreting it as the original vector type.
7130 This avoids a huge runtime penalty due to the general
7131 inability to perform store forwarding from smaller stores
7132 to a larger load. */
7134 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7135 elmode = int_mode_for_size (lsize, 0).require ();
7136 /* If we can't construct such a vector fall back to
7137 element loads of the original vector type. */
7138 if (mode_for_vector (elmode,
7139 nunits / group_size).exists (&vmode)
7140 && VECTOR_MODE_P (vmode)
7141 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7142 != CODE_FOR_nothing))
7144 nloads = nunits / group_size;
7146 ltype = build_nonstandard_integer_type (lsize, 1);
7147 lvectype = build_vector_type (ltype, nloads);
7157 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7161 /* For SLP permutation support we need to load the whole group,
7162 not only the number of vector stmts the permutation result
7166 ncopies = (group_size * vf + nunits - 1) / nunits;
7167 dr_chain.create (ncopies);
7170 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7173 unsigned HOST_WIDE_INT
7174 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7175 for (j = 0; j < ncopies; j++)
7178 vec_alloc (v, nloads);
7179 for (i = 0; i < nloads; i++)
7181 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7183 new_stmt = gimple_build_assign (make_ssa_name (ltype),
7184 build2 (MEM_REF, ltype,
7185 running_off, this_off));
7186 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7188 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7189 gimple_assign_lhs (new_stmt));
7193 || group_el == group_size)
7195 tree newoff = copy_ssa_name (running_off);
7196 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7197 running_off, stride_step);
7198 vect_finish_stmt_generation (stmt, incr, gsi);
7200 running_off = newoff;
7206 tree vec_inv = build_constructor (lvectype, v);
7207 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7208 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7209 if (lvectype != vectype)
7211 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7213 build1 (VIEW_CONVERT_EXPR,
7214 vectype, new_temp));
7215 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7222 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7224 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7229 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7231 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7232 prev_stmt_info = vinfo_for_stmt (new_stmt);
7238 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7239 slp_node_instance, false, &n_perms);
7246 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7247 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7248 /* For SLP vectorization we directly vectorize a subchain
7249 without permutation. */
7250 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7251 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7252 /* For BB vectorization always use the first stmt to base
7253 the data ref pointer on. */
7255 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7257 /* Check if the chain of loads is already vectorized. */
7258 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7259 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7260 ??? But we can only do so if there is exactly one
7261 as we have no way to get at the rest. Leave the CSE
7263 ??? With the group load eventually participating
7264 in multiple different permutations (having multiple
7265 slp nodes which refer to the same group) the CSE
7266 is even wrong code. See PR56270. */
7269 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7272 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7275 /* VEC_NUM is the number of vect stmts to be created for this group. */
7278 grouped_load = false;
7279 /* For SLP permutation support we need to load the whole group,
7280 not only the number of vector stmts the permutation result
7284 vec_num = (group_size * vf + nunits - 1) / nunits;
7285 group_gap_adj = vf * group_size - nunits * vec_num;
7289 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7291 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7295 vec_num = group_size;
7297 ref_type = get_group_alias_ptr_type (first_stmt);
7303 group_size = vec_num = 1;
7305 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7308 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7309 gcc_assert (alignment_support_scheme);
7310 /* Targets with load-lane instructions must not require explicit
7312 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
7313 || alignment_support_scheme == dr_aligned
7314 || alignment_support_scheme == dr_unaligned_supported);
7316 /* In case the vectorization factor (VF) is bigger than the number
7317 of elements that we can fit in a vectype (nunits), we have to generate
7318 more than one vector stmt - i.e - we need to "unroll" the
7319 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7320 from one copy of the vector stmt to the next, in the field
7321 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7322 stages to find the correct vector defs to be used when vectorizing
7323 stmts that use the defs of the current stmt. The example below
7324 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7325 need to create 4 vectorized stmts):
7327 before vectorization:
7328 RELATED_STMT VEC_STMT
7332 step 1: vectorize stmt S1:
7333 We first create the vector stmt VS1_0, and, as usual, record a
7334 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7335 Next, we create the vector stmt VS1_1, and record a pointer to
7336 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7337 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7339 RELATED_STMT VEC_STMT
7340 VS1_0: vx0 = memref0 VS1_1 -
7341 VS1_1: vx1 = memref1 VS1_2 -
7342 VS1_2: vx2 = memref2 VS1_3 -
7343 VS1_3: vx3 = memref3 - -
7344 S1: x = load - VS1_0
7347 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7348 information we recorded in RELATED_STMT field is used to vectorize
7351 /* In case of interleaving (non-unit grouped access):
7358 Vectorized loads are created in the order of memory accesses
7359 starting from the access of the first stmt of the chain:
7362 VS2: vx1 = &base + vec_size*1
7363 VS3: vx3 = &base + vec_size*2
7364 VS4: vx4 = &base + vec_size*3
7366 Then permutation statements are generated:
7368 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7369 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7372 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7373 (the order of the data-refs in the output of vect_permute_load_chain
7374 corresponds to the order of scalar stmts in the interleaving chain - see
7375 the documentation of vect_permute_load_chain()).
7376 The generation of permutation stmts and recording them in
7377 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7379 In case of both multiple types and interleaving, the vector loads and
7380 permutation stmts above are created for every copy. The result vector
7381 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7382 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7384 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7385 on a target that supports unaligned accesses (dr_unaligned_supported)
7386 we generate the following code:
7390 p = p + indx * vectype_size;
7395 Otherwise, the data reference is potentially unaligned on a target that
7396 does not support unaligned accesses (dr_explicit_realign_optimized) -
7397 then generate the following code, in which the data in each iteration is
7398 obtained by two vector loads, one from the previous iteration, and one
7399 from the current iteration:
7401 msq_init = *(floor(p1))
7402 p2 = initial_addr + VS - 1;
7403 realignment_token = call target_builtin;
7406 p2 = p2 + indx * vectype_size
7408 vec_dest = realign_load (msq, lsq, realignment_token)
7413 /* If the misalignment remains the same throughout the execution of the
7414 loop, we can create the init_addr and permutation mask at the loop
7415 preheader. Otherwise, it needs to be created inside the loop.
7416 This can only occur when vectorizing memory accesses in the inner-loop
7417 nested within an outer-loop that is being vectorized. */
7419 if (nested_in_vect_loop
7420 && (DR_STEP_ALIGNMENT (dr) % GET_MODE_SIZE (TYPE_MODE (vectype))) != 0)
7422 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7423 compute_in_loop = true;
7426 if ((alignment_support_scheme == dr_explicit_realign_optimized
7427 || alignment_support_scheme == dr_explicit_realign)
7428 && !compute_in_loop)
7430 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7431 alignment_support_scheme, NULL_TREE,
7433 if (alignment_support_scheme == dr_explicit_realign_optimized)
7435 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7436 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7443 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7444 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7446 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7447 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7449 aggr_type = vectype;
7451 prev_stmt_info = NULL;
7453 for (j = 0; j < ncopies; j++)
7455 /* 1. Create the vector or array pointer update chain. */
7458 bool simd_lane_access_p
7459 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7460 if (simd_lane_access_p
7461 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7462 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7463 && integer_zerop (DR_OFFSET (first_dr))
7464 && integer_zerop (DR_INIT (first_dr))
7465 && alias_sets_conflict_p (get_alias_set (aggr_type),
7466 get_alias_set (TREE_TYPE (ref_type)))
7467 && (alignment_support_scheme == dr_aligned
7468 || alignment_support_scheme == dr_unaligned_supported))
7470 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7471 dataref_offset = build_int_cst (ref_type, 0);
7474 else if (first_stmt_for_drptr
7475 && first_stmt != first_stmt_for_drptr)
7478 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7479 at_loop, offset, &dummy, gsi,
7480 &ptr_incr, simd_lane_access_p,
7481 &inv_p, byte_offset);
7482 /* Adjust the pointer by the difference to first_stmt. */
7483 data_reference_p ptrdr
7484 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7485 tree diff = fold_convert (sizetype,
7486 size_binop (MINUS_EXPR,
7489 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7494 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
7495 offset, &dummy, gsi, &ptr_incr,
7496 simd_lane_access_p, &inv_p,
7499 else if (dataref_offset)
7500 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
7501 TYPE_SIZE_UNIT (aggr_type));
7503 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7504 TYPE_SIZE_UNIT (aggr_type));
7506 if (grouped_load || slp_perm)
7507 dr_chain.create (vec_num);
7509 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7513 vec_array = create_vector_array (vectype, vec_num);
7516 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7517 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7518 gcall *call = gimple_build_call_internal (IFN_LOAD_LANES, 1,
7520 gimple_call_set_lhs (call, vec_array);
7521 gimple_call_set_nothrow (call, true);
7523 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7525 /* Extract each vector into an SSA_NAME. */
7526 for (i = 0; i < vec_num; i++)
7528 new_temp = read_vector_array (stmt, gsi, scalar_dest,
7530 dr_chain.quick_push (new_temp);
7533 /* Record the mapping between SSA_NAMEs and statements. */
7534 vect_record_grouped_load_vectors (stmt, dr_chain);
7538 for (i = 0; i < vec_num; i++)
7541 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7544 /* 2. Create the vector-load in the loop. */
7545 switch (alignment_support_scheme)
7548 case dr_unaligned_supported:
7550 unsigned int align, misalign;
7553 = fold_build2 (MEM_REF, vectype, dataref_ptr,
7556 : build_int_cst (ref_type, 0));
7557 align = DR_TARGET_ALIGNMENT (dr);
7558 if (alignment_support_scheme == dr_aligned)
7560 gcc_assert (aligned_access_p (first_dr));
7563 else if (DR_MISALIGNMENT (first_dr) == -1)
7565 align = dr_alignment (vect_dr_behavior (first_dr));
7567 TREE_TYPE (data_ref)
7568 = build_aligned_type (TREE_TYPE (data_ref),
7569 align * BITS_PER_UNIT);
7573 TREE_TYPE (data_ref)
7574 = build_aligned_type (TREE_TYPE (data_ref),
7575 TYPE_ALIGN (elem_type));
7576 misalign = DR_MISALIGNMENT (first_dr);
7578 if (dataref_offset == NULL_TREE
7579 && TREE_CODE (dataref_ptr) == SSA_NAME)
7580 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
7584 case dr_explicit_realign:
7588 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
7590 if (compute_in_loop)
7591 msq = vect_setup_realignment (first_stmt, gsi,
7593 dr_explicit_realign,
7596 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7597 ptr = copy_ssa_name (dataref_ptr);
7599 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
7600 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7601 new_stmt = gimple_build_assign
7602 (ptr, BIT_AND_EXPR, dataref_ptr,
7604 (TREE_TYPE (dataref_ptr),
7605 -(HOST_WIDE_INT) align));
7606 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7608 = build2 (MEM_REF, vectype, ptr,
7609 build_int_cst (ref_type, 0));
7610 vec_dest = vect_create_destination_var (scalar_dest,
7612 new_stmt = gimple_build_assign (vec_dest, data_ref);
7613 new_temp = make_ssa_name (vec_dest, new_stmt);
7614 gimple_assign_set_lhs (new_stmt, new_temp);
7615 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7616 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7617 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7620 bump = size_binop (MULT_EXPR, vs,
7621 TYPE_SIZE_UNIT (elem_type));
7622 bump = size_binop (MINUS_EXPR, bump, size_one_node);
7623 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
7624 new_stmt = gimple_build_assign
7625 (NULL_TREE, BIT_AND_EXPR, ptr,
7627 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
7628 ptr = copy_ssa_name (ptr, new_stmt);
7629 gimple_assign_set_lhs (new_stmt, ptr);
7630 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7632 = build2 (MEM_REF, vectype, ptr,
7633 build_int_cst (ref_type, 0));
7636 case dr_explicit_realign_optimized:
7638 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7639 new_temp = copy_ssa_name (dataref_ptr);
7641 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
7642 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7643 new_stmt = gimple_build_assign
7644 (new_temp, BIT_AND_EXPR, dataref_ptr,
7645 build_int_cst (TREE_TYPE (dataref_ptr),
7646 -(HOST_WIDE_INT) align));
7647 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7649 = build2 (MEM_REF, vectype, new_temp,
7650 build_int_cst (ref_type, 0));
7656 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7657 new_stmt = gimple_build_assign (vec_dest, data_ref);
7658 new_temp = make_ssa_name (vec_dest, new_stmt);
7659 gimple_assign_set_lhs (new_stmt, new_temp);
7660 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7662 /* 3. Handle explicit realignment if necessary/supported.
7664 vec_dest = realign_load (msq, lsq, realignment_token) */
7665 if (alignment_support_scheme == dr_explicit_realign_optimized
7666 || alignment_support_scheme == dr_explicit_realign)
7668 lsq = gimple_assign_lhs (new_stmt);
7669 if (!realignment_token)
7670 realignment_token = dataref_ptr;
7671 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7672 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7673 msq, lsq, realignment_token);
7674 new_temp = make_ssa_name (vec_dest, new_stmt);
7675 gimple_assign_set_lhs (new_stmt, new_temp);
7676 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7678 if (alignment_support_scheme == dr_explicit_realign_optimized)
7681 if (i == vec_num - 1 && j == ncopies - 1)
7682 add_phi_arg (phi, lsq,
7683 loop_latch_edge (containing_loop),
7689 /* 4. Handle invariant-load. */
7690 if (inv_p && !bb_vinfo)
7692 gcc_assert (!grouped_load);
7693 /* If we have versioned for aliasing or the loop doesn't
7694 have any data dependencies that would preclude this,
7695 then we are sure this is a loop invariant load and
7696 thus we can insert it on the preheader edge. */
7697 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7698 && !nested_in_vect_loop
7699 && hoist_defs_of_uses (stmt, loop))
7701 if (dump_enabled_p ())
7703 dump_printf_loc (MSG_NOTE, vect_location,
7704 "hoisting out of the vectorized "
7706 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7708 tree tem = copy_ssa_name (scalar_dest);
7709 gsi_insert_on_edge_immediate
7710 (loop_preheader_edge (loop),
7711 gimple_build_assign (tem,
7713 (gimple_assign_rhs1 (stmt))));
7714 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7715 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7716 set_vinfo_for_stmt (new_stmt,
7717 new_stmt_vec_info (new_stmt, vinfo));
7721 gimple_stmt_iterator gsi2 = *gsi;
7723 new_temp = vect_init_vector (stmt, scalar_dest,
7725 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7729 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7731 tree perm_mask = perm_mask_for_reverse (vectype);
7732 new_temp = permute_vec_elements (new_temp, new_temp,
7733 perm_mask, stmt, gsi);
7734 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7737 /* Collect vector loads and later create their permutation in
7738 vect_transform_grouped_load (). */
7739 if (grouped_load || slp_perm)
7740 dr_chain.quick_push (new_temp);
7742 /* Store vector loads in the corresponding SLP_NODE. */
7743 if (slp && !slp_perm)
7744 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7746 /* With SLP permutation we load the gaps as well, without
7747 we need to skip the gaps after we manage to fully load
7748 all elements. group_gap_adj is GROUP_SIZE here. */
7749 group_elt += nunits;
7750 if (group_gap_adj != 0 && ! slp_perm
7751 && group_elt == group_size - group_gap_adj)
7753 wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7755 tree bump = wide_int_to_tree (sizetype, bump_val);
7756 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7761 /* Bump the vector pointer to account for a gap or for excess
7762 elements loaded for a permuted SLP load. */
7763 if (group_gap_adj != 0 && slp_perm)
7765 wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7767 tree bump = wide_int_to_tree (sizetype, bump_val);
7768 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7773 if (slp && !slp_perm)
7779 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7780 slp_node_instance, false,
7783 dr_chain.release ();
7791 if (memory_access_type != VMAT_LOAD_STORE_LANES)
7792 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7793 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7798 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7800 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7801 prev_stmt_info = vinfo_for_stmt (new_stmt);
7804 dr_chain.release ();
7810 /* Function vect_is_simple_cond.
7813 LOOP - the loop that is being vectorized.
7814 COND - Condition that is checked for simple use.
7817 *COMP_VECTYPE - the vector type for the comparison.
7818 *DTS - The def types for the arguments of the comparison
7820 Returns whether a COND can be vectorized. Checks whether
7821 condition operands are supportable using vec_is_simple_use. */
7824 vect_is_simple_cond (tree cond, vec_info *vinfo,
7825 tree *comp_vectype, enum vect_def_type *dts,
7829 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7832 if (TREE_CODE (cond) == SSA_NAME
7833 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
7835 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7836 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7837 &dts[0], comp_vectype)
7839 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7844 if (!COMPARISON_CLASS_P (cond))
7847 lhs = TREE_OPERAND (cond, 0);
7848 rhs = TREE_OPERAND (cond, 1);
7850 if (TREE_CODE (lhs) == SSA_NAME)
7852 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7853 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
7856 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
7857 || TREE_CODE (lhs) == FIXED_CST)
7858 dts[0] = vect_constant_def;
7862 if (TREE_CODE (rhs) == SSA_NAME)
7864 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7865 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
7868 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
7869 || TREE_CODE (rhs) == FIXED_CST)
7870 dts[1] = vect_constant_def;
7874 if (vectype1 && vectype2
7875 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7878 *comp_vectype = vectype1 ? vectype1 : vectype2;
7879 /* Invariant comparison. */
7880 if (! *comp_vectype)
7882 tree scalar_type = TREE_TYPE (lhs);
7883 /* If we can widen the comparison to match vectype do so. */
7884 if (INTEGRAL_TYPE_P (scalar_type)
7885 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
7886 TYPE_SIZE (TREE_TYPE (vectype))))
7887 scalar_type = build_nonstandard_integer_type
7888 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
7889 TYPE_UNSIGNED (scalar_type));
7890 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
7896 /* vectorizable_condition.
7898 Check if STMT is conditional modify expression that can be vectorized.
7899 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7900 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7903 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7904 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7905 else clause if it is 2).
7907 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7910 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7911 gimple **vec_stmt, tree reduc_def, int reduc_index,
7914 tree scalar_dest = NULL_TREE;
7915 tree vec_dest = NULL_TREE;
7916 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
7917 tree then_clause, else_clause;
7918 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7919 tree comp_vectype = NULL_TREE;
7920 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7921 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7924 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7925 enum vect_def_type dts[4]
7926 = {vect_unknown_def_type, vect_unknown_def_type,
7927 vect_unknown_def_type, vect_unknown_def_type};
7930 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
7931 stmt_vec_info prev_stmt_info = NULL;
7933 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7934 vec<tree> vec_oprnds0 = vNULL;
7935 vec<tree> vec_oprnds1 = vNULL;
7936 vec<tree> vec_oprnds2 = vNULL;
7937 vec<tree> vec_oprnds3 = vNULL;
7939 bool masked = false;
7941 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7944 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7946 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7949 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7950 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7954 /* FORNOW: not yet supported. */
7955 if (STMT_VINFO_LIVE_P (stmt_info))
7957 if (dump_enabled_p ())
7958 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7959 "value used after loop.\n");
7964 /* Is vectorizable conditional operation? */
7965 if (!is_gimple_assign (stmt))
7968 code = gimple_assign_rhs_code (stmt);
7970 if (code != COND_EXPR)
7973 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7974 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7979 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7981 gcc_assert (ncopies >= 1);
7982 if (reduc_index && ncopies > 1)
7983 return false; /* FORNOW */
7985 cond_expr = gimple_assign_rhs1 (stmt);
7986 then_clause = gimple_assign_rhs2 (stmt);
7987 else_clause = gimple_assign_rhs3 (stmt);
7989 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
7990 &comp_vectype, &dts[0], vectype)
7995 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
7998 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
8002 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8005 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8008 masked = !COMPARISON_CLASS_P (cond_expr);
8009 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8011 if (vec_cmp_type == NULL_TREE)
8014 cond_code = TREE_CODE (cond_expr);
8017 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8018 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8021 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8023 /* Boolean values may have another representation in vectors
8024 and therefore we prefer bit operations over comparison for
8025 them (which also works for scalar masks). We store opcodes
8026 to use in bitop1 and bitop2. Statement is vectorized as
8027 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8028 depending on bitop1 and bitop2 arity. */
8032 bitop1 = BIT_NOT_EXPR;
8033 bitop2 = BIT_AND_EXPR;
8036 bitop1 = BIT_NOT_EXPR;
8037 bitop2 = BIT_IOR_EXPR;
8040 bitop1 = BIT_NOT_EXPR;
8041 bitop2 = BIT_AND_EXPR;
8042 std::swap (cond_expr0, cond_expr1);
8045 bitop1 = BIT_NOT_EXPR;
8046 bitop2 = BIT_IOR_EXPR;
8047 std::swap (cond_expr0, cond_expr1);
8050 bitop1 = BIT_XOR_EXPR;
8053 bitop1 = BIT_XOR_EXPR;
8054 bitop2 = BIT_NOT_EXPR;
8059 cond_code = SSA_NAME;
8064 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8065 if (bitop1 != NOP_EXPR)
8067 machine_mode mode = TYPE_MODE (comp_vectype);
8070 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8071 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8074 if (bitop2 != NOP_EXPR)
8076 optab = optab_for_tree_code (bitop2, comp_vectype,
8078 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8082 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8085 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL);
8095 vec_oprnds0.create (1);
8096 vec_oprnds1.create (1);
8097 vec_oprnds2.create (1);
8098 vec_oprnds3.create (1);
8102 scalar_dest = gimple_assign_lhs (stmt);
8103 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8105 /* Handle cond expr. */
8106 for (j = 0; j < ncopies; j++)
8108 gassign *new_stmt = NULL;
8113 auto_vec<tree, 4> ops;
8114 auto_vec<vec<tree>, 4> vec_defs;
8117 ops.safe_push (cond_expr);
8120 ops.safe_push (cond_expr0);
8121 ops.safe_push (cond_expr1);
8123 ops.safe_push (then_clause);
8124 ops.safe_push (else_clause);
8125 vect_get_slp_defs (ops, slp_node, &vec_defs);
8126 vec_oprnds3 = vec_defs.pop ();
8127 vec_oprnds2 = vec_defs.pop ();
8129 vec_oprnds1 = vec_defs.pop ();
8130 vec_oprnds0 = vec_defs.pop ();
8138 = vect_get_vec_def_for_operand (cond_expr, stmt,
8140 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8146 = vect_get_vec_def_for_operand (cond_expr0,
8147 stmt, comp_vectype);
8148 vect_is_simple_use (cond_expr0, loop_vinfo, >emp, &dts[0]);
8151 = vect_get_vec_def_for_operand (cond_expr1,
8152 stmt, comp_vectype);
8153 vect_is_simple_use (cond_expr1, loop_vinfo, >emp, &dts[1]);
8155 if (reduc_index == 1)
8156 vec_then_clause = reduc_def;
8159 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8161 vect_is_simple_use (then_clause, loop_vinfo,
8164 if (reduc_index == 2)
8165 vec_else_clause = reduc_def;
8168 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8170 vect_is_simple_use (else_clause, loop_vinfo, >emp, &dts[3]);
8177 = vect_get_vec_def_for_stmt_copy (dts[0],
8178 vec_oprnds0.pop ());
8181 = vect_get_vec_def_for_stmt_copy (dts[1],
8182 vec_oprnds1.pop ());
8184 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8185 vec_oprnds2.pop ());
8186 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8187 vec_oprnds3.pop ());
8192 vec_oprnds0.quick_push (vec_cond_lhs);
8194 vec_oprnds1.quick_push (vec_cond_rhs);
8195 vec_oprnds2.quick_push (vec_then_clause);
8196 vec_oprnds3.quick_push (vec_else_clause);
8199 /* Arguments are ready. Create the new vector stmt. */
8200 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8202 vec_then_clause = vec_oprnds2[i];
8203 vec_else_clause = vec_oprnds3[i];
8206 vec_compare = vec_cond_lhs;
8209 vec_cond_rhs = vec_oprnds1[i];
8210 if (bitop1 == NOP_EXPR)
8211 vec_compare = build2 (cond_code, vec_cmp_type,
8212 vec_cond_lhs, vec_cond_rhs);
8215 new_temp = make_ssa_name (vec_cmp_type);
8216 if (bitop1 == BIT_NOT_EXPR)
8217 new_stmt = gimple_build_assign (new_temp, bitop1,
8221 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8223 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8224 if (bitop2 == NOP_EXPR)
8225 vec_compare = new_temp;
8226 else if (bitop2 == BIT_NOT_EXPR)
8228 /* Instead of doing ~x ? y : z do x ? z : y. */
8229 vec_compare = new_temp;
8230 std::swap (vec_then_clause, vec_else_clause);
8234 vec_compare = make_ssa_name (vec_cmp_type);
8236 = gimple_build_assign (vec_compare, bitop2,
8237 vec_cond_lhs, new_temp);
8238 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8242 new_temp = make_ssa_name (vec_dest);
8243 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
8244 vec_compare, vec_then_clause,
8246 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8248 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8255 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8257 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8259 prev_stmt_info = vinfo_for_stmt (new_stmt);
8262 vec_oprnds0.release ();
8263 vec_oprnds1.release ();
8264 vec_oprnds2.release ();
8265 vec_oprnds3.release ();
8270 /* vectorizable_comparison.
8272 Check if STMT is comparison expression that can be vectorized.
8273 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8274 comparison, put it in VEC_STMT, and insert it at GSI.
8276 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8279 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
8280 gimple **vec_stmt, tree reduc_def,
8283 tree lhs, rhs1, rhs2;
8284 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8285 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8286 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8287 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
8289 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8290 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
8294 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8295 stmt_vec_info prev_stmt_info = NULL;
8297 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8298 vec<tree> vec_oprnds0 = vNULL;
8299 vec<tree> vec_oprnds1 = vNULL;
8304 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8307 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
8310 mask_type = vectype;
8311 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8316 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8318 gcc_assert (ncopies >= 1);
8319 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8320 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8324 if (STMT_VINFO_LIVE_P (stmt_info))
8326 if (dump_enabled_p ())
8327 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8328 "value used after loop.\n");
8332 if (!is_gimple_assign (stmt))
8335 code = gimple_assign_rhs_code (stmt);
8337 if (TREE_CODE_CLASS (code) != tcc_comparison)
8340 rhs1 = gimple_assign_rhs1 (stmt);
8341 rhs2 = gimple_assign_rhs2 (stmt);
8343 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
8344 &dts[0], &vectype1))
8347 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
8348 &dts[1], &vectype2))
8351 if (vectype1 && vectype2
8352 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
8355 vectype = vectype1 ? vectype1 : vectype2;
8357 /* Invariant comparison. */
8360 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
8361 if (TYPE_VECTOR_SUBPARTS (vectype) != nunits)
8364 else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
8367 /* Can't compare mask and non-mask types. */
8368 if (vectype1 && vectype2
8369 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
8372 /* Boolean values may have another representation in vectors
8373 and therefore we prefer bit operations over comparison for
8374 them (which also works for scalar masks). We store opcodes
8375 to use in bitop1 and bitop2. Statement is vectorized as
8376 BITOP2 (rhs1 BITOP1 rhs2) or
8377 rhs1 BITOP2 (BITOP1 rhs2)
8378 depending on bitop1 and bitop2 arity. */
8379 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8381 if (code == GT_EXPR)
8383 bitop1 = BIT_NOT_EXPR;
8384 bitop2 = BIT_AND_EXPR;
8386 else if (code == GE_EXPR)
8388 bitop1 = BIT_NOT_EXPR;
8389 bitop2 = BIT_IOR_EXPR;
8391 else if (code == LT_EXPR)
8393 bitop1 = BIT_NOT_EXPR;
8394 bitop2 = BIT_AND_EXPR;
8395 std::swap (rhs1, rhs2);
8396 std::swap (dts[0], dts[1]);
8398 else if (code == LE_EXPR)
8400 bitop1 = BIT_NOT_EXPR;
8401 bitop2 = BIT_IOR_EXPR;
8402 std::swap (rhs1, rhs2);
8403 std::swap (dts[0], dts[1]);
8407 bitop1 = BIT_XOR_EXPR;
8408 if (code == EQ_EXPR)
8409 bitop2 = BIT_NOT_EXPR;
8415 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
8416 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
8417 dts, ndts, NULL, NULL);
8418 if (bitop1 == NOP_EXPR)
8419 return expand_vec_cmp_expr_p (vectype, mask_type, code);
8422 machine_mode mode = TYPE_MODE (vectype);
8425 optab = optab_for_tree_code (bitop1, vectype, optab_default);
8426 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8429 if (bitop2 != NOP_EXPR)
8431 optab = optab_for_tree_code (bitop2, vectype, optab_default);
8432 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8442 vec_oprnds0.create (1);
8443 vec_oprnds1.create (1);
8447 lhs = gimple_assign_lhs (stmt);
8448 mask = vect_create_destination_var (lhs, mask_type);
8450 /* Handle cmp expr. */
8451 for (j = 0; j < ncopies; j++)
8453 gassign *new_stmt = NULL;
8458 auto_vec<tree, 2> ops;
8459 auto_vec<vec<tree>, 2> vec_defs;
8461 ops.safe_push (rhs1);
8462 ops.safe_push (rhs2);
8463 vect_get_slp_defs (ops, slp_node, &vec_defs);
8464 vec_oprnds1 = vec_defs.pop ();
8465 vec_oprnds0 = vec_defs.pop ();
8469 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
8470 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
8475 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
8476 vec_oprnds0.pop ());
8477 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
8478 vec_oprnds1.pop ());
8483 vec_oprnds0.quick_push (vec_rhs1);
8484 vec_oprnds1.quick_push (vec_rhs2);
8487 /* Arguments are ready. Create the new vector stmt. */
8488 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
8490 vec_rhs2 = vec_oprnds1[i];
8492 new_temp = make_ssa_name (mask);
8493 if (bitop1 == NOP_EXPR)
8495 new_stmt = gimple_build_assign (new_temp, code,
8496 vec_rhs1, vec_rhs2);
8497 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8501 if (bitop1 == BIT_NOT_EXPR)
8502 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
8504 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
8506 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8507 if (bitop2 != NOP_EXPR)
8509 tree res = make_ssa_name (mask);
8510 if (bitop2 == BIT_NOT_EXPR)
8511 new_stmt = gimple_build_assign (res, bitop2, new_temp);
8513 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
8515 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8519 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8526 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8528 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8530 prev_stmt_info = vinfo_for_stmt (new_stmt);
8533 vec_oprnds0.release ();
8534 vec_oprnds1.release ();
8539 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
8540 can handle all live statements in the node. Otherwise return true
8541 if STMT is not live or if vectorizable_live_operation can handle it.
8542 GSI and VEC_STMT are as for vectorizable_live_operation. */
8545 can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
8546 slp_tree slp_node, gimple **vec_stmt)
8552 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
8554 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
8555 if (STMT_VINFO_LIVE_P (slp_stmt_info)
8556 && !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
8561 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt))
8562 && !vectorizable_live_operation (stmt, gsi, slp_node, -1, vec_stmt))
8568 /* Make sure the statement is vectorizable. */
8571 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
8572 slp_instance node_instance)
8574 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8575 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8576 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
8578 gimple *pattern_stmt;
8579 gimple_seq pattern_def_seq;
8581 if (dump_enabled_p ())
8583 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
8584 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8587 if (gimple_has_volatile_ops (stmt))
8589 if (dump_enabled_p ())
8590 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8591 "not vectorized: stmt has volatile operands\n");
8596 /* Skip stmts that do not need to be vectorized. In loops this is expected
8598 - the COND_EXPR which is the loop exit condition
8599 - any LABEL_EXPRs in the loop
8600 - computations that are used only for array indexing or loop control.
8601 In basic blocks we only analyze statements that are a part of some SLP
8602 instance, therefore, all the statements are relevant.
8604 Pattern statement needs to be analyzed instead of the original statement
8605 if the original statement is not relevant. Otherwise, we analyze both
8606 statements. In basic blocks we are called from some SLP instance
8607 traversal, don't analyze pattern stmts instead, the pattern stmts
8608 already will be part of SLP instance. */
8610 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
8611 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8612 && !STMT_VINFO_LIVE_P (stmt_info))
8614 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8616 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8617 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8619 /* Analyze PATTERN_STMT instead of the original stmt. */
8620 stmt = pattern_stmt;
8621 stmt_info = vinfo_for_stmt (pattern_stmt);
8622 if (dump_enabled_p ())
8624 dump_printf_loc (MSG_NOTE, vect_location,
8625 "==> examining pattern statement: ");
8626 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8631 if (dump_enabled_p ())
8632 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
8637 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8640 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8641 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8643 /* Analyze PATTERN_STMT too. */
8644 if (dump_enabled_p ())
8646 dump_printf_loc (MSG_NOTE, vect_location,
8647 "==> examining pattern statement: ");
8648 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8651 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
8656 if (is_pattern_stmt_p (stmt_info)
8658 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
8660 gimple_stmt_iterator si;
8662 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
8664 gimple *pattern_def_stmt = gsi_stmt (si);
8665 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
8666 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
8668 /* Analyze def stmt of STMT if it's a pattern stmt. */
8669 if (dump_enabled_p ())
8671 dump_printf_loc (MSG_NOTE, vect_location,
8672 "==> examining pattern def statement: ");
8673 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
8676 if (!vect_analyze_stmt (pattern_def_stmt,
8677 need_to_vectorize, node, node_instance))
8683 switch (STMT_VINFO_DEF_TYPE (stmt_info))
8685 case vect_internal_def:
8688 case vect_reduction_def:
8689 case vect_nested_cycle:
8690 gcc_assert (!bb_vinfo
8691 && (relevance == vect_used_in_outer
8692 || relevance == vect_used_in_outer_by_reduction
8693 || relevance == vect_used_by_reduction
8694 || relevance == vect_unused_in_scope
8695 || relevance == vect_used_only_live));
8698 case vect_induction_def:
8699 gcc_assert (!bb_vinfo);
8702 case vect_constant_def:
8703 case vect_external_def:
8704 case vect_unknown_def_type:
8709 if (STMT_VINFO_RELEVANT_P (stmt_info))
8711 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
8712 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
8713 || (is_gimple_call (stmt)
8714 && gimple_call_lhs (stmt) == NULL_TREE));
8715 *need_to_vectorize = true;
8718 if (PURE_SLP_STMT (stmt_info) && !node)
8720 dump_printf_loc (MSG_NOTE, vect_location,
8721 "handled only by SLP analysis\n");
8727 && (STMT_VINFO_RELEVANT_P (stmt_info)
8728 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8729 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8730 || vectorizable_conversion (stmt, NULL, NULL, node)
8731 || vectorizable_shift (stmt, NULL, NULL, node)
8732 || vectorizable_operation (stmt, NULL, NULL, node)
8733 || vectorizable_assignment (stmt, NULL, NULL, node)
8734 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8735 || vectorizable_call (stmt, NULL, NULL, node)
8736 || vectorizable_store (stmt, NULL, NULL, node)
8737 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance)
8738 || vectorizable_induction (stmt, NULL, NULL, node)
8739 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8740 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8744 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8745 || vectorizable_conversion (stmt, NULL, NULL, node)
8746 || vectorizable_shift (stmt, NULL, NULL, node)
8747 || vectorizable_operation (stmt, NULL, NULL, node)
8748 || vectorizable_assignment (stmt, NULL, NULL, node)
8749 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8750 || vectorizable_call (stmt, NULL, NULL, node)
8751 || vectorizable_store (stmt, NULL, NULL, node)
8752 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8753 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8758 if (dump_enabled_p ())
8760 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8761 "not vectorized: relevant stmt not ");
8762 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8763 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8772 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8773 need extra handling, except for vectorizable reductions. */
8774 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8775 && !can_vectorize_live_stmts (stmt, NULL, node, NULL))
8777 if (dump_enabled_p ())
8779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8780 "not vectorized: live stmt not supported: ");
8781 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8791 /* Function vect_transform_stmt.
8793 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8796 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
8797 bool *grouped_store, slp_tree slp_node,
8798 slp_instance slp_node_instance)
8800 bool is_store = false;
8801 gimple *vec_stmt = NULL;
8802 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8805 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
8806 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8808 switch (STMT_VINFO_TYPE (stmt_info))
8810 case type_demotion_vec_info_type:
8811 case type_promotion_vec_info_type:
8812 case type_conversion_vec_info_type:
8813 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
8817 case induc_vec_info_type:
8818 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node);
8822 case shift_vec_info_type:
8823 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
8827 case op_vec_info_type:
8828 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
8832 case assignment_vec_info_type:
8833 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
8837 case load_vec_info_type:
8838 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
8843 case store_vec_info_type:
8844 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8846 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
8848 /* In case of interleaving, the whole chain is vectorized when the
8849 last store in the chain is reached. Store stmts before the last
8850 one are skipped, and there vec_stmt_info shouldn't be freed
8852 *grouped_store = true;
8853 if (STMT_VINFO_VEC_STMT (stmt_info))
8860 case condition_vec_info_type:
8861 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
8865 case comparison_vec_info_type:
8866 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8870 case call_vec_info_type:
8871 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
8872 stmt = gsi_stmt (*gsi);
8873 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8877 case call_simd_clone_vec_info_type:
8878 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8879 stmt = gsi_stmt (*gsi);
8882 case reduc_vec_info_type:
8883 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
8889 if (!STMT_VINFO_LIVE_P (stmt_info))
8891 if (dump_enabled_p ())
8892 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8893 "stmt not supported.\n");
8898 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8899 This would break hybrid SLP vectorization. */
8901 gcc_assert (!vec_stmt
8902 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
8904 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8905 is being vectorized, but outside the immediately enclosing loop. */
8907 && STMT_VINFO_LOOP_VINFO (stmt_info)
8908 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8909 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
8910 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8911 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
8912 || STMT_VINFO_RELEVANT (stmt_info) ==
8913 vect_used_in_outer_by_reduction))
8915 struct loop *innerloop = LOOP_VINFO_LOOP (
8916 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
8917 imm_use_iterator imm_iter;
8918 use_operand_p use_p;
8922 if (dump_enabled_p ())
8923 dump_printf_loc (MSG_NOTE, vect_location,
8924 "Record the vdef for outer-loop vectorization.\n");
8926 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8927 (to be used when vectorizing outer-loop stmts that use the DEF of
8929 if (gimple_code (stmt) == GIMPLE_PHI)
8930 scalar_dest = PHI_RESULT (stmt);
8932 scalar_dest = gimple_assign_lhs (stmt);
8934 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
8936 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
8938 exit_phi = USE_STMT (use_p);
8939 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
8944 /* Handle stmts whose DEF is used outside the loop-nest that is
8945 being vectorized. */
8946 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8948 done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt);
8953 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
8959 /* Remove a group of stores (for SLP or interleaving), free their
8963 vect_remove_stores (gimple *first_stmt)
8965 gimple *next = first_stmt;
8967 gimple_stmt_iterator next_si;
8971 stmt_vec_info stmt_info = vinfo_for_stmt (next);
8973 tmp = GROUP_NEXT_ELEMENT (stmt_info);
8974 if (is_pattern_stmt_p (stmt_info))
8975 next = STMT_VINFO_RELATED_STMT (stmt_info);
8976 /* Free the attached stmt_vec_info and remove the stmt. */
8977 next_si = gsi_for_stmt (next);
8978 unlink_stmt_vdef (next);
8979 gsi_remove (&next_si, true);
8980 release_defs (next);
8981 free_stmt_vec_info (next);
8987 /* Function new_stmt_vec_info.
8989 Create and initialize a new stmt_vec_info struct for STMT. */
8992 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
8995 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
8997 STMT_VINFO_TYPE (res) = undef_vec_info_type;
8998 STMT_VINFO_STMT (res) = stmt;
9000 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
9001 STMT_VINFO_LIVE_P (res) = false;
9002 STMT_VINFO_VECTYPE (res) = NULL;
9003 STMT_VINFO_VEC_STMT (res) = NULL;
9004 STMT_VINFO_VECTORIZABLE (res) = true;
9005 STMT_VINFO_IN_PATTERN_P (res) = false;
9006 STMT_VINFO_RELATED_STMT (res) = NULL;
9007 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
9008 STMT_VINFO_DATA_REF (res) = NULL;
9009 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
9010 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
9012 if (gimple_code (stmt) == GIMPLE_PHI
9013 && is_loop_header_bb_p (gimple_bb (stmt)))
9014 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
9016 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
9018 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
9019 STMT_SLP_TYPE (res) = loop_vect;
9020 STMT_VINFO_NUM_SLP_USES (res) = 0;
9022 GROUP_FIRST_ELEMENT (res) = NULL;
9023 GROUP_NEXT_ELEMENT (res) = NULL;
9024 GROUP_SIZE (res) = 0;
9025 GROUP_STORE_COUNT (res) = 0;
9026 GROUP_GAP (res) = 0;
9027 GROUP_SAME_DR_STMT (res) = NULL;
9033 /* Create a hash table for stmt_vec_info. */
9036 init_stmt_vec_info_vec (void)
9038 gcc_assert (!stmt_vec_info_vec.exists ());
9039 stmt_vec_info_vec.create (50);
9043 /* Free hash table for stmt_vec_info. */
9046 free_stmt_vec_info_vec (void)
9050 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
9052 free_stmt_vec_info (STMT_VINFO_STMT (info));
9053 gcc_assert (stmt_vec_info_vec.exists ());
9054 stmt_vec_info_vec.release ();
9058 /* Free stmt vectorization related info. */
9061 free_stmt_vec_info (gimple *stmt)
9063 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9068 /* Check if this statement has a related "pattern stmt"
9069 (introduced by the vectorizer during the pattern recognition
9070 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9072 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9074 stmt_vec_info patt_info
9075 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9078 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9079 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9080 gimple_set_bb (patt_stmt, NULL);
9081 tree lhs = gimple_get_lhs (patt_stmt);
9082 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9083 release_ssa_name (lhs);
9086 gimple_stmt_iterator si;
9087 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9089 gimple *seq_stmt = gsi_stmt (si);
9090 gimple_set_bb (seq_stmt, NULL);
9091 lhs = gimple_get_lhs (seq_stmt);
9092 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9093 release_ssa_name (lhs);
9094 free_stmt_vec_info (seq_stmt);
9097 free_stmt_vec_info (patt_stmt);
9101 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9102 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9103 set_vinfo_for_stmt (stmt, NULL);
9108 /* Function get_vectype_for_scalar_type_and_size.
9110 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9114 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
9116 tree orig_scalar_type = scalar_type;
9117 scalar_mode inner_mode;
9118 machine_mode simd_mode;
9122 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9123 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9126 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9128 /* For vector types of elements whose mode precision doesn't
9129 match their types precision we use a element type of mode
9130 precision. The vectorization routines will have to make sure
9131 they support the proper result truncation/extension.
9132 We also make sure to build vector types with INTEGER_TYPE
9133 component type only. */
9134 if (INTEGRAL_TYPE_P (scalar_type)
9135 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9136 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9137 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9138 TYPE_UNSIGNED (scalar_type));
9140 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9141 When the component mode passes the above test simply use a type
9142 corresponding to that mode. The theory is that any use that
9143 would cause problems with this will disable vectorization anyway. */
9144 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9145 && !INTEGRAL_TYPE_P (scalar_type))
9146 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9148 /* We can't build a vector type of elements with alignment bigger than
9150 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9151 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9152 TYPE_UNSIGNED (scalar_type));
9154 /* If we felt back to using the mode fail if there was
9155 no scalar type for it. */
9156 if (scalar_type == NULL_TREE)
9159 /* If no size was supplied use the mode the target prefers. Otherwise
9160 lookup a vector mode of the specified size. */
9162 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9163 else if (!mode_for_vector (inner_mode, size / nbytes).exists (&simd_mode))
9165 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
9166 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9170 vectype = build_vector_type (scalar_type, nunits);
9172 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9173 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9176 /* Re-attach the address-space qualifier if we canonicalized the scalar
9178 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9179 return build_qualified_type
9180 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9185 unsigned int current_vector_size;
9187 /* Function get_vectype_for_scalar_type.
9189 Returns the vector type corresponding to SCALAR_TYPE as supported
9193 get_vectype_for_scalar_type (tree scalar_type)
9196 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9197 current_vector_size);
9199 && current_vector_size == 0)
9200 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9204 /* Function get_mask_type_for_scalar_type.
9206 Returns the mask type corresponding to a result of comparison
9207 of vectors of specified SCALAR_TYPE as supported by target. */
9210 get_mask_type_for_scalar_type (tree scalar_type)
9212 tree vectype = get_vectype_for_scalar_type (scalar_type);
9217 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9218 current_vector_size);
9221 /* Function get_same_sized_vectype
9223 Returns a vector type corresponding to SCALAR_TYPE of size
9224 VECTOR_TYPE if supported by the target. */
9227 get_same_sized_vectype (tree scalar_type, tree vector_type)
9229 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9230 return build_same_sized_truth_vector_type (vector_type);
9232 return get_vectype_for_scalar_type_and_size
9233 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9236 /* Function vect_is_simple_use.
9239 VINFO - the vect info of the loop or basic block that is being vectorized.
9240 OPERAND - operand in the loop or bb.
9242 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9243 DT - the type of definition
9245 Returns whether a stmt with OPERAND can be vectorized.
9246 For loops, supportable operands are constants, loop invariants, and operands
9247 that are defined by the current iteration of the loop. Unsupportable
9248 operands are those that are defined by a previous iteration of the loop (as
9249 is the case in reduction/induction computations).
9250 For basic blocks, supportable operands are constants and bb invariants.
9251 For now, operands defined outside the basic block are not supported. */
9254 vect_is_simple_use (tree operand, vec_info *vinfo,
9255 gimple **def_stmt, enum vect_def_type *dt)
9258 *dt = vect_unknown_def_type;
9260 if (dump_enabled_p ())
9262 dump_printf_loc (MSG_NOTE, vect_location,
9263 "vect_is_simple_use: operand ");
9264 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
9265 dump_printf (MSG_NOTE, "\n");
9268 if (CONSTANT_CLASS_P (operand))
9270 *dt = vect_constant_def;
9274 if (is_gimple_min_invariant (operand))
9276 *dt = vect_external_def;
9280 if (TREE_CODE (operand) != SSA_NAME)
9282 if (dump_enabled_p ())
9283 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9288 if (SSA_NAME_IS_DEFAULT_DEF (operand))
9290 *dt = vect_external_def;
9294 *def_stmt = SSA_NAME_DEF_STMT (operand);
9295 if (dump_enabled_p ())
9297 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
9298 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
9301 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
9302 *dt = vect_external_def;
9305 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
9306 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
9309 if (dump_enabled_p ())
9311 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
9314 case vect_uninitialized_def:
9315 dump_printf (MSG_NOTE, "uninitialized\n");
9317 case vect_constant_def:
9318 dump_printf (MSG_NOTE, "constant\n");
9320 case vect_external_def:
9321 dump_printf (MSG_NOTE, "external\n");
9323 case vect_internal_def:
9324 dump_printf (MSG_NOTE, "internal\n");
9326 case vect_induction_def:
9327 dump_printf (MSG_NOTE, "induction\n");
9329 case vect_reduction_def:
9330 dump_printf (MSG_NOTE, "reduction\n");
9332 case vect_double_reduction_def:
9333 dump_printf (MSG_NOTE, "double reduction\n");
9335 case vect_nested_cycle:
9336 dump_printf (MSG_NOTE, "nested cycle\n");
9338 case vect_unknown_def_type:
9339 dump_printf (MSG_NOTE, "unknown\n");
9344 if (*dt == vect_unknown_def_type)
9346 if (dump_enabled_p ())
9347 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9348 "Unsupported pattern.\n");
9352 switch (gimple_code (*def_stmt))
9359 if (dump_enabled_p ())
9360 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9361 "unsupported defining stmt:\n");
9368 /* Function vect_is_simple_use.
9370 Same as vect_is_simple_use but also determines the vector operand
9371 type of OPERAND and stores it to *VECTYPE. If the definition of
9372 OPERAND is vect_uninitialized_def, vect_constant_def or
9373 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9374 is responsible to compute the best suited vector type for the
9378 vect_is_simple_use (tree operand, vec_info *vinfo,
9379 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
9381 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
9384 /* Now get a vector type if the def is internal, otherwise supply
9385 NULL_TREE and leave it up to the caller to figure out a proper
9386 type for the use stmt. */
9387 if (*dt == vect_internal_def
9388 || *dt == vect_induction_def
9389 || *dt == vect_reduction_def
9390 || *dt == vect_double_reduction_def
9391 || *dt == vect_nested_cycle)
9393 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
9395 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9396 && !STMT_VINFO_RELEVANT (stmt_info)
9397 && !STMT_VINFO_LIVE_P (stmt_info))
9398 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9400 *vectype = STMT_VINFO_VECTYPE (stmt_info);
9401 gcc_assert (*vectype != NULL_TREE);
9403 else if (*dt == vect_uninitialized_def
9404 || *dt == vect_constant_def
9405 || *dt == vect_external_def)
9406 *vectype = NULL_TREE;
9414 /* Function supportable_widening_operation
9416 Check whether an operation represented by the code CODE is a
9417 widening operation that is supported by the target platform in
9418 vector form (i.e., when operating on arguments of type VECTYPE_IN
9419 producing a result of type VECTYPE_OUT).
9421 Widening operations we currently support are NOP (CONVERT), FLOAT
9422 and WIDEN_MULT. This function checks if these operations are supported
9423 by the target platform either directly (via vector tree-codes), or via
9427 - CODE1 and CODE2 are codes of vector operations to be used when
9428 vectorizing the operation, if available.
9429 - MULTI_STEP_CVT determines the number of required intermediate steps in
9430 case of multi-step conversion (like char->short->int - in that case
9431 MULTI_STEP_CVT will be 1).
9432 - INTERM_TYPES contains the intermediate type required to perform the
9433 widening operation (short in the above example). */
9436 supportable_widening_operation (enum tree_code code, gimple *stmt,
9437 tree vectype_out, tree vectype_in,
9438 enum tree_code *code1, enum tree_code *code2,
9439 int *multi_step_cvt,
9440 vec<tree> *interm_types)
9442 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9443 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
9444 struct loop *vect_loop = NULL;
9445 machine_mode vec_mode;
9446 enum insn_code icode1, icode2;
9447 optab optab1, optab2;
9448 tree vectype = vectype_in;
9449 tree wide_vectype = vectype_out;
9450 enum tree_code c1, c2;
9452 tree prev_type, intermediate_type;
9453 machine_mode intermediate_mode, prev_mode;
9454 optab optab3, optab4;
9456 *multi_step_cvt = 0;
9458 vect_loop = LOOP_VINFO_LOOP (loop_info);
9462 case WIDEN_MULT_EXPR:
9463 /* The result of a vectorized widening operation usually requires
9464 two vectors (because the widened results do not fit into one vector).
9465 The generated vector results would normally be expected to be
9466 generated in the same order as in the original scalar computation,
9467 i.e. if 8 results are generated in each vector iteration, they are
9468 to be organized as follows:
9469 vect1: [res1,res2,res3,res4],
9470 vect2: [res5,res6,res7,res8].
9472 However, in the special case that the result of the widening
9473 operation is used in a reduction computation only, the order doesn't
9474 matter (because when vectorizing a reduction we change the order of
9475 the computation). Some targets can take advantage of this and
9476 generate more efficient code. For example, targets like Altivec,
9477 that support widen_mult using a sequence of {mult_even,mult_odd}
9478 generate the following vectors:
9479 vect1: [res1,res3,res5,res7],
9480 vect2: [res2,res4,res6,res8].
9482 When vectorizing outer-loops, we execute the inner-loop sequentially
9483 (each vectorized inner-loop iteration contributes to VF outer-loop
9484 iterations in parallel). We therefore don't allow to change the
9485 order of the computation in the inner-loop during outer-loop
9487 /* TODO: Another case in which order doesn't *really* matter is when we
9488 widen and then contract again, e.g. (short)((int)x * y >> 8).
9489 Normally, pack_trunc performs an even/odd permute, whereas the
9490 repack from an even/odd expansion would be an interleave, which
9491 would be significantly simpler for e.g. AVX2. */
9492 /* In any case, in order to avoid duplicating the code below, recurse
9493 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9494 are properly set up for the caller. If we fail, we'll continue with
9495 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9497 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
9498 && !nested_in_vect_loop_p (vect_loop, stmt)
9499 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
9500 stmt, vectype_out, vectype_in,
9501 code1, code2, multi_step_cvt,
9504 /* Elements in a vector with vect_used_by_reduction property cannot
9505 be reordered if the use chain with this property does not have the
9506 same operation. One such an example is s += a * b, where elements
9507 in a and b cannot be reordered. Here we check if the vector defined
9508 by STMT is only directly used in the reduction statement. */
9509 tree lhs = gimple_assign_lhs (stmt);
9510 use_operand_p dummy;
9512 stmt_vec_info use_stmt_info = NULL;
9513 if (single_imm_use (lhs, &dummy, &use_stmt)
9514 && (use_stmt_info = vinfo_for_stmt (use_stmt))
9515 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
9518 c1 = VEC_WIDEN_MULT_LO_EXPR;
9519 c2 = VEC_WIDEN_MULT_HI_EXPR;
9532 case VEC_WIDEN_MULT_EVEN_EXPR:
9533 /* Support the recursion induced just above. */
9534 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
9535 c2 = VEC_WIDEN_MULT_ODD_EXPR;
9538 case WIDEN_LSHIFT_EXPR:
9539 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
9540 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
9544 c1 = VEC_UNPACK_LO_EXPR;
9545 c2 = VEC_UNPACK_HI_EXPR;
9549 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
9550 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
9553 case FIX_TRUNC_EXPR:
9554 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9555 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9556 computing the operation. */
9563 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
9566 if (code == FIX_TRUNC_EXPR)
9568 /* The signedness is determined from output operand. */
9569 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9570 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
9574 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9575 optab2 = optab_for_tree_code (c2, vectype, optab_default);
9578 if (!optab1 || !optab2)
9581 vec_mode = TYPE_MODE (vectype);
9582 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
9583 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
9589 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9590 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9591 /* For scalar masks we may have different boolean
9592 vector types having the same QImode. Thus we
9593 add additional check for elements number. */
9594 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9595 || (TYPE_VECTOR_SUBPARTS (vectype) / 2
9596 == TYPE_VECTOR_SUBPARTS (wide_vectype)));
9598 /* Check if it's a multi-step conversion that can be done using intermediate
9601 prev_type = vectype;
9602 prev_mode = vec_mode;
9604 if (!CONVERT_EXPR_CODE_P (code))
9607 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9608 intermediate steps in promotion sequence. We try
9609 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9611 interm_types->create (MAX_INTERM_CVT_STEPS);
9612 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9614 intermediate_mode = insn_data[icode1].operand[0].mode;
9615 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9618 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) / 2,
9619 current_vector_size);
9620 if (intermediate_mode != TYPE_MODE (intermediate_type))
9625 = lang_hooks.types.type_for_mode (intermediate_mode,
9626 TYPE_UNSIGNED (prev_type));
9628 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
9629 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
9631 if (!optab3 || !optab4
9632 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
9633 || insn_data[icode1].operand[0].mode != intermediate_mode
9634 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
9635 || insn_data[icode2].operand[0].mode != intermediate_mode
9636 || ((icode1 = optab_handler (optab3, intermediate_mode))
9637 == CODE_FOR_nothing)
9638 || ((icode2 = optab_handler (optab4, intermediate_mode))
9639 == CODE_FOR_nothing))
9642 interm_types->quick_push (intermediate_type);
9643 (*multi_step_cvt)++;
9645 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9646 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9647 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9648 || (TYPE_VECTOR_SUBPARTS (intermediate_type) / 2
9649 == TYPE_VECTOR_SUBPARTS (wide_vectype)));
9651 prev_type = intermediate_type;
9652 prev_mode = intermediate_mode;
9655 interm_types->release ();
9660 /* Function supportable_narrowing_operation
9662 Check whether an operation represented by the code CODE is a
9663 narrowing operation that is supported by the target platform in
9664 vector form (i.e., when operating on arguments of type VECTYPE_IN
9665 and producing a result of type VECTYPE_OUT).
9667 Narrowing operations we currently support are NOP (CONVERT) and
9668 FIX_TRUNC. This function checks if these operations are supported by
9669 the target platform directly via vector tree-codes.
9672 - CODE1 is the code of a vector operation to be used when
9673 vectorizing the operation, if available.
9674 - MULTI_STEP_CVT determines the number of required intermediate steps in
9675 case of multi-step conversion (like int->short->char - in that case
9676 MULTI_STEP_CVT will be 1).
9677 - INTERM_TYPES contains the intermediate type required to perform the
9678 narrowing operation (short in the above example). */
9681 supportable_narrowing_operation (enum tree_code code,
9682 tree vectype_out, tree vectype_in,
9683 enum tree_code *code1, int *multi_step_cvt,
9684 vec<tree> *interm_types)
9686 machine_mode vec_mode;
9687 enum insn_code icode1;
9688 optab optab1, interm_optab;
9689 tree vectype = vectype_in;
9690 tree narrow_vectype = vectype_out;
9692 tree intermediate_type, prev_type;
9693 machine_mode intermediate_mode, prev_mode;
9697 *multi_step_cvt = 0;
9701 c1 = VEC_PACK_TRUNC_EXPR;
9704 case FIX_TRUNC_EXPR:
9705 c1 = VEC_PACK_FIX_TRUNC_EXPR;
9709 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9710 tree code and optabs used for computing the operation. */
9717 if (code == FIX_TRUNC_EXPR)
9718 /* The signedness is determined from output operand. */
9719 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9721 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9726 vec_mode = TYPE_MODE (vectype);
9727 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
9732 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9733 /* For scalar masks we may have different boolean
9734 vector types having the same QImode. Thus we
9735 add additional check for elements number. */
9736 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9737 || (TYPE_VECTOR_SUBPARTS (vectype) * 2
9738 == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9740 /* Check if it's a multi-step conversion that can be done using intermediate
9742 prev_mode = vec_mode;
9743 prev_type = vectype;
9744 if (code == FIX_TRUNC_EXPR)
9745 uns = TYPE_UNSIGNED (vectype_out);
9747 uns = TYPE_UNSIGNED (vectype);
9749 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9750 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9751 costly than signed. */
9752 if (code == FIX_TRUNC_EXPR && uns)
9754 enum insn_code icode2;
9757 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
9759 = optab_for_tree_code (c1, intermediate_type, optab_default);
9760 if (interm_optab != unknown_optab
9761 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
9762 && insn_data[icode1].operand[0].mode
9763 == insn_data[icode2].operand[0].mode)
9766 optab1 = interm_optab;
9771 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9772 intermediate steps in promotion sequence. We try
9773 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9774 interm_types->create (MAX_INTERM_CVT_STEPS);
9775 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9777 intermediate_mode = insn_data[icode1].operand[0].mode;
9778 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9781 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) * 2,
9782 current_vector_size);
9783 if (intermediate_mode != TYPE_MODE (intermediate_type))
9788 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
9790 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
9793 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
9794 || insn_data[icode1].operand[0].mode != intermediate_mode
9795 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
9796 == CODE_FOR_nothing))
9799 interm_types->quick_push (intermediate_type);
9800 (*multi_step_cvt)++;
9802 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9803 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9804 || (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2
9805 == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9807 prev_mode = intermediate_mode;
9808 prev_type = intermediate_type;
9809 optab1 = interm_optab;
9812 interm_types->release ();