2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "util/Utils.h"
19 #include "ir/InternalType.h"
21 #include "ir/operation/AvgPool2D.h"
22 #include "ir/operation/MaxPool2D.h"
23 #include "util/ShapeInference.h"
24 #include "util/logging.h"
32 namespace shape_inference
42 template <typename T, typename U>
43 typename std::enable_if<std::is_integral<T>::value && std::is_integral<U>::value,
44 typename std::common_type<T, U>::type>::type
45 ceil_div(T dividend, U divisor)
47 assert(dividend > 0 && divisor > 0 && "this implementations is for positive numbers only");
48 return (dividend + divisor - 1) / divisor;
51 // Calculate the result of broadcast of two shapes
52 ir::Shape broadcastShapes(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape)
55 auto max_rank = std::max(lhs_shape.rank(), rhs_shape.rank());
57 for (int idx = 0; idx < max_rank; ++idx)
59 // Go over operands dimensions from right to left
60 int lhs_idx = lhs_shape.rank() - idx - 1;
61 int rhs_idx = rhs_shape.rank() - idx - 1;
63 int32_t lhs_dim = lhs_idx >= 0 ? lhs_shape.dim(lhs_idx) : 1;
64 int32_t rhs_dim = rhs_idx >= 0 ? rhs_shape.dim(rhs_idx) : 1;
66 if (lhs_dim != 1 && rhs_dim != 1 && lhs_dim != rhs_dim)
67 throw std::runtime_error("Incompatible shapes for broadcast");
69 out_shape.prepend(std::max(lhs_dim, rhs_dim));
81 // Calculate output height and width of convolution-like operation
82 std::pair<int, int> calcConvLikeHeightAndWidth(const int in_h, const int in_w, const int ker_h,
83 const int ker_w, const ir::Padding pad,
84 const ir::Stride stride)
86 int32_t out_h = 0, out_w = 0;
90 case ir::PaddingType::SAME:
91 out_h = ceil_div(in_h, stride.vertical);
92 out_w = ceil_div(in_w, stride.horizontal);
94 case ir::PaddingType::VALID:
95 out_h = ceil_div(in_h - ker_h + 1, stride.vertical);
96 out_w = ceil_div(in_w - ker_w + 1, stride.horizontal);
98 case ir::PaddingType::EXPLICIT:
99 out_h = (in_h + pad.param.top + pad.param.bottom - ker_h) / stride.vertical + 1;
100 out_w = (in_w + pad.param.left + pad.param.right - ker_w) / stride.horizontal + 1;
106 return {out_h, out_w};
109 ir::Shape inferEltwiseShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape)
111 return broadcastShapes(lhs_shape, rhs_shape);
114 ir::Shape inferArgMaxShape(const ir::Shape &input_shape, int axis, int rank)
117 for (int idx = 0; idx < rank; ++idx)
121 int32_t input_dim = input_shape.dim(idx);
122 out_shape.append(input_dim);
129 ir::Shape inferAvgPoolShape(const ir::Shape &in_shape, const ir::operation::AvgPool2D::Param ¶m,
130 const ir::Layout layout)
132 assert(layout == ir::Layout::NHWC);
133 auto ifm_shape = in_shape.asFeature(layout);
134 const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw,
135 param.padding, param.stride);
136 // Pooling don't change number of channels and batch size
137 return ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C};
140 ir::Shape inferReduceShape(const ir::Shape &input_shape, const std::vector<int> &axes,
143 int num_axis = axes.size();
144 int input_num_dims = input_shape.rank();
145 if (input_num_dims == 0)
147 ir::Shape out_shape(0);
153 for (int idx = 0; idx < input_num_dims; ++idx)
155 bool is_axis = false;
156 for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
158 if (axes[axis_idx] == idx || axes[axis_idx] + input_num_dims == idx)
170 out_shape.append(input_shape.dim(idx));
177 // Calculates size of reducing axis.
178 int num_reduce_axis = num_axis;
179 for (int i = 0; i < num_axis; ++i)
181 int current = axes[i];
184 current += input_num_dims;
186 assert(0 <= current && current < input_num_dims);
187 for (int j = 0; j < i; ++j)
189 int previous = axes[j];
192 previous += input_num_dims;
194 if (current == previous)
201 // Determines output dimensions.
203 int num_skip_axis = 0;
204 for (int idx = 0; idx < input_num_dims; ++idx)
206 bool is_axis = false;
207 for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx)
209 if (axes[axis_idx] == idx || axes[axis_idx] + input_num_dims == idx)
218 out_shape.append(input_shape.dim(idx));
225 ir::Shape inferBatchMatMulShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape,
226 const ir::operation::BatchMatMul::Param ¶m)
228 bool adj_x = param.adj_x;
229 bool adj_y = param.adj_y;
230 ir::Shape output_shape;
232 int output_rank = std::max(lhs_shape.rank(), rhs_shape.rank());
234 // Extend lhs and rhs shape
235 ir::Shape extended_lhs_shape(lhs_shape);
236 ir::Shape extended_rhs_shape(rhs_shape);
237 extended_lhs_shape.extendRank(output_rank);
238 extended_rhs_shape.extendRank(output_rank);
240 for (int i = 0; i < output_rank - 2; i++)
242 const int lhs_dim = extended_lhs_shape.dim(i);
243 const int rhs_dim = extended_rhs_shape.dim(i);
244 int broadcast_dim = lhs_dim;
245 if (lhs_dim != rhs_dim)
249 broadcast_dim = rhs_dim;
251 else if (rhs_dim != 1)
253 throw std::runtime_error{"BatchMatMul shape inference: invalid brodcasting input shape"};
257 output_shape.append(broadcast_dim);
260 // Fill in the matmul dimensions.
261 int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2;
262 int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1;
264 output_shape.append(extended_lhs_shape.dim(lhs_rows_index));
265 output_shape.append(extended_rhs_shape.dim(rhs_cols_index));
270 ir::Shape inferBroadcastToShape(const ir::Shape wshape, const int32_t *shape_buffer)
272 const int num_elements = wshape.num_elements();
274 assert(num_elements != 0);
275 assert(shape_buffer);
277 ir::Shape new_shape(num_elements);
279 for (int i = 0; i < num_elements; ++i)
281 assert(shape_buffer[i] != 0); // It shouldn't be 0.
282 new_shape.dim(i) = shape_buffer[i];
288 ir::Shape inferConcatShape(const Shapes &in_shapes, const ir::operation::Concat::Param ¶m)
290 const int32_t concat_axis = param.axis >= 0 ? param.axis : in_shapes[0].rank() + param.axis;
291 const auto &first_in_shape = in_shapes[0];
293 // Check that all shapes are equal except for concat axis dimension
294 for (const auto &in_shape : in_shapes)
296 if (in_shape.rank() != first_in_shape.rank())
297 throw std::runtime_error("Rank in all input tensors should be same");
299 for (int64_t dim_idx = 0; dim_idx < in_shape.rank(); ++dim_idx)
300 if (!(dim_idx == concat_axis || in_shape.dim(dim_idx) == first_in_shape.dim(dim_idx)))
301 throw std::runtime_error("All tensor should have same dimension "
302 "except dimension on passed axis");
305 // Calculate output shape
306 ir::Shape out_shape(first_in_shape);
307 out_shape.dim(concat_axis) = 0;
308 for (const auto &in_shape : in_shapes)
309 out_shape.dim(concat_axis) += in_shape.dim(concat_axis);
313 ir::Shape inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape,
314 const ir::operation::Conv2D::Param ¶m, ir::Layout layout)
316 auto ifm_shape = in_shape.asFeature(layout);
318 // Kernel format is [depth_out, kernel_height, kernel_width, depth_in]
319 auto kf_shape = ker_shape.asFeature(layout);
320 assert(ifm_shape.C == kf_shape.C);
322 const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W,
323 param.padding, param.stride);
325 return ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.N};
328 ir::Shape inferDepthwiseConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape,
329 const ir::operation::DepthwiseConv2D::Param ¶m,
332 assert(layout == ir::Layout::NHWC);
333 auto ifm_shape = in_shape.asFeature(layout);
335 // Kernel format is [1, kernel_height, kernel_width, depth_out]
336 auto kf_shape = ker_shape.asFeature(layout);
337 assert(kf_shape.C == static_cast<int32_t>(ifm_shape.C * param.multiplier));
338 assert(kf_shape.N == 1);
340 const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W,
341 param.padding, param.stride);
343 return ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.C};
346 ir::Shape inferExpandDimsShape(const ir::Shape &in_shape, int32_t axis)
348 ir::Shape out_shape(in_shape.rank() + 1);
350 axis = ((axis >= 0) ? axis : /* when axis < 0 */ (out_shape.rank() + axis));
351 if (!(0 <= axis && axis <= in_shape.rank()))
352 throw std::runtime_error("axis of dim is out of range");
354 for (int x = 0, out_x = 0; out_x < out_shape.rank(); ++out_x)
357 out_shape.dim(out_x) = 1;
359 out_shape.dim(out_x) = in_shape.dim(x++);
365 ir::Shape inferFillShape(const ir::Shape &in_shape, const int32_t *buffer)
367 ir::Shape out_shape(in_shape.dim(0));
369 for (int out_x = 0; out_x < out_shape.rank(); ++out_x)
371 out_shape.dim(out_x) = buffer[out_x];
377 ir::Shape inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape)
379 assert(in_shape.rank() >= 2);
380 assert(ker_shape.rank() == 2);
382 const auto input_size_with_batch = in_shape.num_elements();
383 const auto num_units = ker_shape.dim(0);
384 const auto input_size = ker_shape.dim(1);
385 const auto batch_size = input_size_with_batch / input_size;
386 assert(input_size_with_batch % input_size == 0);
388 return {ir::Shape({static_cast<int32_t>(batch_size), num_units})};
391 ir::Shape inferGatherShape(const ir::Shape &input_shape, const ir::Shape &indices_shape, int axis,
395 const int indices_rank = indices_shape.rank();
396 for (int idx = 0; idx < rank; ++idx)
400 for (int indices_idx = 0; indices_idx < indices_rank; indices_idx++)
402 out_shape.append(indices_shape.dim(indices_idx));
407 out_shape.append(input_shape.dim(idx));
414 ir::Shape inferMaxPoolShape(const ir::Shape &in_shape, const ir::operation::MaxPool2D::Param ¶m,
415 const ir::Layout layout)
417 assert(layout == ir::Layout::NHWC);
418 auto ifm_shape = in_shape.asFeature(layout);
419 const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw,
420 param.padding, param.stride);
421 // Pooling don't change number of channels and batch size
422 return ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C};
425 ir::Shape inferOnehotShape(const ir::Shape &input_shape, const int depth, int axis)
428 const auto rank = input_shape.rank() + 1;
429 ir::Shape newShape(rank);
431 axis = (axis == -1) ? (rank - 1) : axis;
433 for (int i = 0; i < rank; ++i)
437 newShape.dim(i) = input_shape.dim(i);
441 newShape.dim(i) = depth;
445 newShape.dim(i) = input_shape.dim(i - 1);
452 ir::Shape inferPackShape(const ir::Shape &input_shape, int axis, int rank, int num)
457 for (int out_idx = 0; out_idx < rank; ++out_idx)
461 out_shape.append(num);
465 out_shape.append(input_shape.dim(in_idx++));
472 ir::Shape inferPadShape(const ir::Shape &in_shape, const int32_t *pad_buf, const size_t num_pads)
474 assert(num_pads % 2 == 0);
475 const int32_t rank = num_pads / 2;
478 for (int32_t i = 0; i < rank; ++i)
480 const auto before_padding = pad_buf[i * 2];
481 const auto after_padding = pad_buf[i * 2 + 1];
483 ret.dim(i) = in_shape.dim(i) + before_padding + after_padding;
489 template <typename T> ir::Shape inferRangeShape(T start_val, T limit_val, T delta_val)
491 ir::Shape out_shape(static_cast<int>(1));
494 (std::is_integral<T>::value
495 ? ((std::abs(start_val - limit_val) + std::abs(delta_val) - 1) / std::abs(delta_val))
496 : std::ceil(std::abs((start_val - limit_val) / delta_val)));
500 // template instantiation
501 template ir::Shape inferRangeShape(int start_val, int limit_val, int delta_val);
502 template ir::Shape inferRangeShape(float start_val, float limit_val, float delta_val);
504 ir::Shape inferReshapeShape(const int32_t *shape_buf, const int32_t shape_num_elements,
505 const size_t total_num_elements)
507 ir::Shape ret(shape_num_elements);
508 int32_t flatten_dim = ir::Shape::UNSPECIFIED_DIM;
509 for (int32_t i = 0; i < shape_num_elements; ++i)
511 if (shape_buf[i] < 0)
513 if (flatten_dim != ir::Shape::UNSPECIFIED_DIM)
514 throw std::runtime_error("Reshape: 2nd param has special dim(for flatten) more than twice");
520 ret.dim(i) = shape_buf[i];
523 if (flatten_dim != ir::Shape::UNSPECIFIED_DIM)
524 ret.dim(flatten_dim) = total_num_elements / ret.num_elements();
527 if (total_num_elements != static_cast<size_t>(ret.num_elements()))
528 throw std::runtime_error("Reshape: 2nd param is not compatible with the shape of input");
533 ir::Shape inferSelectShape(const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape,
534 const ir::Shape &input_false_shape)
536 auto haveSameShapes = [](const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape,
537 const ir::Shape &input_false_shape) {
538 if ((input_cond_shape.rank() != input_true_shape.rank()) ||
539 input_cond_shape.rank() != input_false_shape.rank())
544 int rank = input_cond_shape.rank();
545 for (int i = 0; i < rank; ++i)
547 if (input_cond_shape.dim(i) != input_true_shape.dim(i) ||
548 input_cond_shape.dim(i) != input_false_shape.dim(i))
557 auto calculateShape = [](const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape,
558 const ir::Shape &input_false_shape, ir::Shape &new_shape) {
559 ir::Shape cond_shape = input_cond_shape;
560 ir::Shape true_shape = input_true_shape;
561 ir::Shape false_shape = input_false_shape;
563 (cond_shape.rank() >= true_shape.rank()) && (cond_shape.rank() >= false_shape.rank())
565 : (false_shape.rank() >= true_shape.rank() ? false_shape.rank() : true_shape.rank());
567 ir::Shape calculate_shape(most_rank);
569 cond_shape.extendRank(most_rank);
570 true_shape.extendRank(most_rank);
571 false_shape.extendRank(most_rank);
573 for (int i = 0; i < most_rank; ++i)
575 calculate_shape.dim(i) =
576 (cond_shape.dim(i) >= true_shape.dim(i)) && (cond_shape.dim(i) >= false_shape.dim(i))
578 : (false_shape.dim(i) >= true_shape.dim(i) ? false_shape.dim(i) : true_shape.dim(i));
580 if ((cond_shape.dim(i) != calculate_shape.dim(i) && cond_shape.dim(i) != 1) ||
581 (true_shape.dim(i) != calculate_shape.dim(i) && true_shape.dim(i) != 1) ||
582 (false_shape.dim(i) != calculate_shape.dim(i) && false_shape.dim(i) != 1))
588 new_shape = calculate_shape;
593 bool havesame = haveSameShapes(input_cond_shape, input_true_shape, input_false_shape);
596 return input_cond_shape;
600 bool possible = calculateShape(input_cond_shape, input_true_shape, input_false_shape, new_shape);
604 throw std::runtime_error("Broadcasting is not possible.");
610 ir::Shape inferSliceShape(const ir::Shape &input_shape, const int32_t *begins, const int32_t *sizes)
612 const uint32_t rank = input_shape.rank();
613 ir::Shape out_shape(rank);
615 for (uint32_t idx = 0; idx < rank; ++idx)
617 const auto input_dim = input_shape.dim(idx);
619 // begin is zero-based
620 auto begin = begins[idx];
622 throw std::runtime_error("shape inference Slice: Invalid begin.");
625 auto size = sizes[idx];
627 throw std::runtime_error("shape inference Slice: Invalid size.");
631 size = input_dim - begin;
635 if (input_dim < begin + size)
636 throw std::runtime_error("shape inference Slice: Invalid begin and size.");
638 out_shape.dim(idx) = size;
644 ir::Shape inferSpaceToBatchNDShape(const ir::Shape &input_shape, const ir::Shape &block_shape_shape,
645 const ir::Shape &padding_shape, const int32_t *block_shape_data,
646 const int32_t *padding_data)
648 const uint32_t rank = input_shape.rank();
649 ir::Shape out_shape(rank);
651 // Currently, only 4D NHWC input/output op_context are supported.
652 // The 4D array need to have exactly 2 spatial dimensions.
653 // TODO(nupurgarg): Support arbitrary dimension in SpaceToBatchND.
654 const int32_t kInputDimensionNum = 4;
655 const int32_t kBlockSizeDimensionNum = 1;
656 const int32_t kSpatialDimensionNum = 2;
658 UNUSED_RELEASE(kInputDimensionNum);
659 UNUSED_RELEASE(kBlockSizeDimensionNum);
660 UNUSED_RELEASE(block_shape_shape);
661 UNUSED_RELEASE(padding_shape);
663 assert(block_shape_shape.rank() == kBlockSizeDimensionNum);
664 assert(block_shape_shape.dim(0) == kSpatialDimensionNum);
665 assert(padding_shape.dim(0) == kSpatialDimensionNum);
666 assert(padding_shape.dim(1) == 2); // fixed, meaning left/right padding for each element
667 assert(padding_shape.rank() == 2); // fixed, meaning dimension(dim 0) and padding length(dim 1)
669 // Ensures the input height and width (with padding) is a multiple of block
670 // shape height and width.
671 for (int dim = 0; dim < kSpatialDimensionNum; ++dim)
674 (input_shape.dim(dim + 1) + padding_data[dim * 2] + padding_data[dim * 2 + 1]);
676 assert(final_dim_size % block_shape_data[dim] == 0);
678 out_shape.dim(dim + 1) = final_dim_size / block_shape_data[dim];
681 const int output_batch_size = input_shape.dim(0) * block_shape_data[0] * block_shape_data[1];
682 const int output_channel_size = input_shape.dim(3);
684 out_shape.dim(0) = output_batch_size;
685 out_shape.dim(3) = output_channel_size;
690 ir::Shape inferSplitShape(const ir::Shape input_shape, int axis_value, int num_splits)
692 ir::Shape newShape(input_shape);
694 assert(axis_value >= 0);
695 assert(axis_value < input_shape.rank());
697 const int input_size = input_shape.dim(axis_value);
698 assert(input_size % num_splits == 0);
699 const int slice_size = input_size / num_splits;
701 newShape.dim(axis_value) = slice_size;
706 ir::Shape inferSqueezeShape(const ir::Shape &in_shape, const ir::operation::Squeeze::Param ¶m)
708 const int ndims = param.ndim;
709 const int *squeeze_dims = param.dims;
710 bool should_squeeze[8] = {false};
711 int num_squeezed_dims = 0;
712 int shape_rank = in_shape.rank();
715 for (int idx = 0; idx < shape_rank; ++idx)
717 if (in_shape.dim(idx) == 1)
719 should_squeeze[idx] = true;
726 for (int idx = 0; idx < ndims; ++idx)
728 int current = squeeze_dims[idx];
731 current += shape_rank;
734 if (!(current >= 0 && current < shape_rank && in_shape.dim(current) == 1))
736 throw std::runtime_error(
737 "The following conditions must be met: 0 <= dim < Shape rank, dim == 1");
740 if (!should_squeeze[current])
744 should_squeeze[current] = true;
749 ir::Shape out_shape(shape_rank - num_squeezed_dims);
750 for (int in_idx = 0, out_idx = 0; in_idx < shape_rank; ++in_idx)
752 if (!should_squeeze[in_idx])
754 out_shape.dim(out_idx++) = in_shape.dim(in_idx);
761 // helper for for StridedSlice
762 template <typename T>
763 StridedSliceParams buildStridedSliceParams(const T *begin, const T *end, const T *strides,
764 const uint32_t begin_mask, const uint32_t end_mask,
765 const uint32_t shrink_axis_mask, const uint8_t rank)
767 StridedSliceParams op_params;
768 op_params.start_indices_count = rank;
769 op_params.stop_indices_count = rank;
770 op_params.strides_count = rank;
772 for (int i = 0; i < op_params.strides_count; ++i)
774 op_params.start_indices[i] = begin[i];
775 op_params.stop_indices[i] = end[i];
776 op_params.strides[i] = strides[i];
778 assert(op_params.strides[i] != 0);
781 op_params.begin_mask = begin_mask;
782 op_params.ellipsis_mask = 0; // NYI
783 op_params.end_mask = end_mask;
784 op_params.new_axis_mask = 0; // NYI
785 op_params.shrink_axis_mask = shrink_axis_mask;
787 assert(sizeof(op_params.begin_mask) * 4 >= rank);
792 // template instantiation
793 template StridedSliceParams
794 buildStridedSliceParams(const uint32_t *begin, const uint32_t *end, const uint32_t *strides,
795 const uint32_t begin_mask, const uint32_t end_mask,
796 const uint32_t shrink_axis_mask, const uint8_t rank);
798 int Clamp(const int v, const int lo, const int hi)
808 int StartForAxis(const StridedSliceParams ¶ms, const ir::Shape &input_shape, int axis)
810 const auto begin_mask = params.begin_mask;
811 const auto *start_indices = params.start_indices;
812 const auto *strides = params.strides;
813 // Begin with the specified index.
814 int start = start_indices[axis];
816 // begin_mask override
817 if (begin_mask & 1 << axis)
819 if (strides[axis] > 0)
821 // Forward iteration - use the first element. These values will get
822 // clamped below (Note: We could have set them to 0 and axis_size-1, but
823 // use lowest() and max() to maintain symmetry with StopForAxis())
824 start = std::numeric_limits<int>::lowest();
828 // Backward iteration - use the last element.
829 start = std::numeric_limits<int>::max();
833 // Handle negative indices
834 int axis_size = input_shape.dim(axis);
841 start = Clamp(start, 0, axis_size - 1);
846 // Return the "real" index for the end of iteration along that axis. This is an
847 // "end" in the traditional C sense, in that it points to one past the last
848 // element. ie. So if you were iterating through all elements of a 1D array of
849 // size 4, this function would return 4 as the stop, because it is one past the
850 // "real" indices of 0, 1, 2 & 3.
851 int StopForAxis(const StridedSliceParams ¶ms, const ir::Shape &input_shape, int axis,
854 const auto end_mask = params.end_mask;
855 const auto shrink_axis_mask = params.shrink_axis_mask;
856 const auto *stop_indices = params.stop_indices;
857 const auto *strides = params.strides;
859 // Begin with the specified index
860 const bool shrink_axis = shrink_axis_mask & (1 << axis);
861 int stop = stop_indices[axis];
863 // When shrinking an axis, the end position does not matter (and can be
864 // incorrect when negative indexing is used, see Issue #19260). Always use
865 // start_for_axis + 1 to generate a length 1 slice, since start_for_axis has
866 // already been adjusted for negative indices.
869 stop = start_for_axis + 1;
873 if (end_mask & (1 << axis))
875 if (strides[axis] > 0)
877 // Forward iteration - use the last element. These values will get
879 stop = std::numeric_limits<int>::max();
883 // Backward iteration - use the first element.
884 stop = std::numeric_limits<int>::lowest();
888 // Handle negative indices
890 const int axis_size = input_shape.dim(axis);
897 // Because the end index points one past the last element, we need slightly
898 // different clamping ranges depending on the direction.
899 if (strides[axis] > 0)
902 stop = Clamp(stop, 0, axis_size);
906 // Backward iteration
907 stop = Clamp(stop, -1, axis_size - 1);
913 ir::Shape inferStridedSliceShape(const ir::Shape &input_shape, const StridedSliceParams &op_params,
918 for (uint32_t idx = 0; idx < rank; ++idx)
920 int32_t stride = op_params.strides[idx];
921 int32_t begin = StartForAxis(op_params, input_shape, idx);
922 int32_t end = StopForAxis(op_params, input_shape, idx, begin);
924 // When shrinking an axis, the end position does not matter (and can be
925 // incorrect when negative indexing is used, see Issue #19260). Always use
926 // begin + 1 to generate a length 1 slice, since begin has
927 // already been adjusted for negative indices by StartForAxis.
928 const bool shrink_axis = op_params.shrink_axis_mask & (1 << idx);
934 int32_t dim_shape = std::ceil((end - begin) / static_cast<float>(stride));
935 dim_shape = dim_shape < 0 ? 0 : dim_shape;
938 out_shape.append(dim_shape);
945 ir::Shape inferTileShape(const ir::Shape &in_shape, const int32_t *multiplier)
947 // assert(in_shape.rank() == multiplier.rank());
948 ir::Shape new_Shape(in_shape.rank());
950 for (int i = 0; i < in_shape.rank(); ++i)
952 assert(multiplier[i]); // multiplier[i] shuld not be 0.
953 new_Shape.dim(i) = in_shape.dim(i) * multiplier[i];
958 ir::Shape inferTransposeShape(const ir::Shape &in_shape, const std::vector<int> &perm)
960 if (static_cast<int>(perm.size()) > in_shape.rank())
962 throw std::runtime_error("inferTransposeShape failed, bad rank size: " +
963 std::to_string(static_cast<int>(perm.size())));
965 ir::Shape out_shape(static_cast<int>(perm.size()));
966 for (int idx = 0; idx < static_cast<int>(perm.size()); idx++)
968 if (perm[idx] < 0 || perm[idx] >= static_cast<int>(perm.size()))
970 throw std::runtime_error("inferTransposeShape failed, bad perm value: " +
971 std::to_string(perm[idx]));
973 out_shape.dim(idx) = in_shape.dim(perm[idx]);
978 ir::Shape inferUnpackShape(const ir::Shape &input_shape, int axis, int rank)
982 for (int out_idx = 0; out_idx < rank; out_idx++)
986 out_shape.append(input_shape.dim(out_idx));
993 } // namespace shape_inference