1 //*****************************************************************************
2 // Copyright 2017-2020 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //*****************************************************************************
19 #include "ngraph/evaluator.hpp"
20 #include "ngraph/op/concat.hpp"
21 #include "ngraph/op/convert.hpp"
22 #include "ngraph/op/min.hpp"
23 #include "ngraph/op/minimum.hpp"
24 #include "ngraph/op/squeeze.hpp"
25 #include "ngraph/op/unsqueeze.hpp"
26 #include "ngraph/runtime/host_tensor.hpp"
27 #include "ngraph/shape.hpp"
28 #include "ngraph/type/element_type_traits.hpp"
29 #include "ngraph/util.hpp"
30 #include "ngraph/validation_util.hpp"
32 NGRAPH_SUPPRESS_DEPRECATED_START
35 using namespace ngraph;
37 Strides ngraph::conv_default_strides(const Node* /* node */,
38 const PartialShape& data_batch_shape,
39 const PartialShape& filters_shape)
43 if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() >= 2)
45 rank = data_batch_shape.rank().get_length() - 2;
47 else if (filters_shape.rank().is_static() && filters_shape.rank().get_length() >= 2)
49 rank = filters_shape.rank().get_length() - 2;
56 return Strides(rank, 1);
59 CoordinateDiff ngraph::conv_default_padding(const Node* /* node */,
60 const PartialShape& data_batch_shape,
61 const PartialShape& filters_shape)
65 if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() >= 2)
67 rank = data_batch_shape.rank().get_length() - 2;
69 else if (filters_shape.rank().is_static() && filters_shape.rank().get_length() >= 2)
71 rank = filters_shape.rank().get_length() - 2;
78 return CoordinateDiff(rank, 0);
82 // Infers the output shape of a windowed reduction operation, where the data may be dilated and/or
83 // padded, and the reduction window may be strided and/or dilated.
85 // TODO(amprocte): The messages here would be a bit friendlier if we didn't say "after
86 // padding/after dilation" for cases where there is actually no padding/dilation.
88 PartialShape ngraph::infer_windowed_reduction_output_shape(const Node* node,
89 const PartialShape& data_shape,
90 const Strides& data_dilation,
91 const CoordinateDiff& data_padding_below,
92 const CoordinateDiff& data_padding_above,
93 const PartialShape& window_shape,
94 const Strides& window_strides,
95 const Strides& window_dilation,
96 bool is_window_all_in_padding_allowed,
99 PartialShape data_shape_merged{PartialShape::dynamic()};
101 NODE_VALIDATION_CHECK(node,
102 data_shape_merged.merge_rank(data_shape.rank()) &&
103 data_shape_merged.merge_rank(data_dilation.size()) &&
104 data_shape_merged.merge_rank(data_padding_below.size()) &&
105 data_shape_merged.merge_rank(data_padding_above.size()) &&
106 data_shape_merged.merge_rank(window_shape.rank()) &&
107 data_shape_merged.merge_rank(window_strides.size()) &&
108 data_shape_merged.merge_rank(window_dilation.size()),
109 "Ranks for data shape (",
111 "), data dilation (",
113 "), padding below (",
115 "), padding above (",
119 "), window strides (",
121 "), and window dilation (",
125 PartialShape output_shape = PartialShape::dynamic(data_shape_merged.rank());
127 if (output_shape.rank().is_static())
129 for (size_t i = 0; i < output_shape.rank().get_length(); i++)
131 NODE_VALIDATION_CHECK(node,
132 data_dilation[i] > 0,
135 ") has zero dimension at axis ",
138 NODE_VALIDATION_CHECK(node,
139 window_strides[i] > 0,
142 ") has zero dimension at axis ",
145 NODE_VALIDATION_CHECK(node,
146 window_dilation[i] > 0,
149 ") has zero dimension at axis ",
153 bool data_dim_static = data_shape.rank().is_static() && data_shape[i].is_static();
154 bool window_dim_static = window_shape.rank().is_static() && window_shape[i].is_static();
156 ptrdiff_t data_padded_dilated_dim = -1;
159 data_padded_dilated_dim =
160 (static_cast<int64_t>(data_dilation[i]) * (data_shape[i].get_length() - 1)) +
161 1 + data_padding_below[i] + data_padding_above[i];
162 NODE_VALIDATION_CHECK(
164 data_padded_dilated_dim > 0,
165 "Data shape after padding and dilation has dimension less than 1 (dim: ",
166 data_padded_dilated_dim,
172 ptrdiff_t window_dilated_dim = -1;
173 if (window_dim_static)
176 static_cast<int64_t>(window_dilation[i]) * (window_shape[i].get_length() - 1) +
179 NODE_VALIDATION_CHECK(node,
180 window_dilated_dim > 0,
181 "Window after dilation has dimension less than 1 (dim: ",
187 NODE_VALIDATION_CHECK(
189 is_window_all_in_padding_allowed ||
190 (window_dilated_dim > data_padding_below[i] &&
191 window_dilated_dim > data_padding_above[i]),
192 "Window after dilation is sometimes entirely in the padding area for axis ",
194 " (dilated window dimension: ",
196 ", padding below dimension: ",
197 data_padding_below[i],
198 ", padding above dimension: ",
199 data_padding_above[i],
200 ") and this is not ",
204 if (data_dim_static && window_dim_static)
206 NODE_VALIDATION_CHECK(node,
207 window_dilated_dim <= data_padded_dilated_dim,
208 "Window after dilation has dimension (dim: ",
210 ") larger than the data shape after padding (dim: ",
211 data_padded_dilated_dim,
218 output_shape[i] = ceil_div(static_cast<size_t>(data_padded_dilated_dim) -
219 static_cast<size_t>(window_dilated_dim),
225 output_shape[i] = ((static_cast<size_t>(data_padded_dilated_dim) -
226 static_cast<size_t>(window_dilated_dim)) /
238 // Infers the output batch shape and element type for convolution fprop.
240 PartialShape ngraph::infer_convolution_forward(const Node* node,
241 const PartialShape& data_batch_shape,
242 const Strides& data_dilation,
243 const CoordinateDiff& data_padding_below,
244 const CoordinateDiff& data_padding_above,
245 const PartialShape& filters_shape,
246 const Strides& filter_strides,
247 const Strides& filter_dilation)
249 Rank data_batch_filters_rank{Rank::dynamic()};
251 NODE_VALIDATION_CHECK(
253 Rank::merge(data_batch_filters_rank, data_batch_shape.rank(), filters_shape.rank()),
254 "Data batch and filters rank do not match (data batch shape: ",
260 NODE_VALIDATION_CHECK(node,
261 data_batch_filters_rank.is_dynamic() ||
262 data_batch_filters_rank.get_length() >= 3,
263 "Data batch and filters must have rank of at least 3 (one batch axis, ",
264 "one input-channel axis, and at least one spatial dimension) ",
265 "(data batch shape: ",
271 Rank spatial_rank{Rank::dynamic()};
272 NODE_VALIDATION_CHECK(node,
273 Rank::merge(spatial_rank, spatial_rank, data_batch_filters_rank - 2) &&
274 Rank::merge(spatial_rank, spatial_rank, data_dilation.size()) &&
275 Rank::merge(spatial_rank, spatial_rank, data_padding_below.size()) &&
276 Rank::merge(spatial_rank, spatial_rank, data_padding_above.size()) &&
277 Rank::merge(spatial_rank, spatial_rank, filter_strides.size()) &&
278 Rank::merge(spatial_rank, spatial_rank, filter_dilation.size()),
279 "Ranks for data item shape/filters shape (data batch has shape ",
281 ", so data item rank is ",
282 (data_batch_shape.rank() - 2),
283 " and filters have shape ",
285 ", so filters spatial rank is ",
286 (filters_shape.rank() - 2),
287 "), data dilation (",
289 "), padding below (",
291 "), padding above (",
293 "), filter strides (",
295 "), and filter dilation (",
299 Dimension batch_size =
300 (data_batch_shape.rank().is_static() ? data_batch_shape[0] : Dimension::dynamic());
301 Dimension data_channel_count =
302 (data_batch_shape.rank().is_static() ? data_batch_shape[1] : Dimension::dynamic());
303 PartialShape data_spatial_shape(PartialShape::dynamic(spatial_rank));
305 Dimension filter_output_channel_count =
306 (filters_shape.rank().is_static() ? filters_shape[0] : Dimension::dynamic());
307 Dimension filter_input_channel_count =
308 (filters_shape.rank().is_static() ? filters_shape[1] : Dimension::dynamic());
309 PartialShape filter_spatial_shape(PartialShape::dynamic(spatial_rank));
312 // Note: spatial_rank is definitely static at this point.
315 for (size_t i = 0; i < spatial_rank.get_length(); i++)
317 if (data_batch_shape.rank().is_static())
319 data_spatial_shape[i] = data_batch_shape[i + 2];
322 if (filters_shape.rank().is_static())
324 filter_spatial_shape[i] = filters_shape[i + 2];
328 NODE_VALIDATION_CHECK(
329 node, batch_size.is_dynamic() || batch_size.get_length() > 0, "Batch size is zero.");
331 Dimension merged_channel_count;
333 NODE_VALIDATION_CHECK(
335 Dimension::merge(merged_channel_count, data_channel_count, filter_input_channel_count),
336 "Data batch channel count (",
338 ") does not match filter input ",
340 filter_input_channel_count,
343 NODE_VALIDATION_CHECK(node,
344 merged_channel_count.is_dynamic() ||
345 merged_channel_count.get_length() > 0,
346 "Data batch channel count and/or filter input channel count is zero.");
348 NODE_VALIDATION_CHECK(node,
349 filter_output_channel_count.is_dynamic() ||
350 filter_output_channel_count.get_length() > 0,
351 "Filter output channel count is zero.");
353 PartialShape data_output_shape = infer_windowed_reduction_output_shape(node,
358 filter_spatial_shape,
363 PartialShape batch_output_shape(PartialShape::dynamic(spatial_rank + 2));
364 batch_output_shape[0] = batch_size;
365 batch_output_shape[1] = filter_output_channel_count;
367 for (size_t i = 0; i < spatial_rank.get_length(); i++)
369 batch_output_shape[i + 2] = data_output_shape[i];
372 return batch_output_shape;
376 // Infers the output batch shape and element type for batched pooling fprop.
378 PartialShape ngraph::infer_batched_pooling_forward(const Node* node,
379 const PartialShape& data_batch_shape,
380 const CoordinateDiff& data_padding_below,
381 const CoordinateDiff& data_padding_above,
382 const PartialShape& window_shape,
383 const Strides& window_strides,
384 bool is_window_all_in_padding_allowed,
387 NODE_VALIDATION_CHECK(node,
388 data_batch_shape.rank().is_dynamic() ||
389 data_batch_shape.rank().get_length() >= 3,
390 "Data batch must have rank of at least 3 (one batch axis, ",
391 "one input-channel axis, and at least one spatial dimension) ",
392 "(data batch shape: ",
396 PartialShape data_spatial_shape{PartialShape::dynamic()};
398 NODE_VALIDATION_CHECK(node,
399 data_spatial_shape.merge_rank(data_batch_shape.rank() - 2) &&
400 data_spatial_shape.merge_rank(data_padding_below.size()) &&
401 data_spatial_shape.merge_rank(data_padding_above.size()) &&
402 data_spatial_shape.merge_rank(window_shape.rank()) &&
403 data_spatial_shape.merge_rank(window_strides.size()),
404 "Ranks for data item shape (data batch has shape ",
406 ", so data item rank is ",
407 (data_batch_shape.rank() - 2),
408 "), padding below (",
410 "), padding above (",
414 "), and window strides (",
418 Dimension batch_size{Dimension::dynamic()};
419 Dimension channel_count{Dimension::dynamic()};
420 PartialShape data_output_spatial_shape{PartialShape::dynamic(data_spatial_shape.rank())};
422 if (data_batch_shape.rank().is_static())
424 batch_size = data_batch_shape[0];
425 channel_count = data_batch_shape[1];
427 for (size_t i = 0; i < data_spatial_shape.rank().get_length(); i++)
429 data_spatial_shape[i] = data_batch_shape[i + 2];
432 NODE_VALIDATION_CHECK(
433 node, batch_size.is_dynamic() || batch_size.get_length() > 0, "Batch size is zero.");
435 NODE_VALIDATION_CHECK(node,
436 channel_count.is_dynamic() || channel_count.get_length() > 0,
437 "Channel count is zero.");
439 // For pooling ops we don't need dilation, so we fill in the identity value (all 1).
440 Strides data_dilation(data_spatial_shape.rank().get_length(), 1);
441 Strides window_dilation(data_spatial_shape.rank().get_length(), 1);
443 data_output_spatial_shape =
444 infer_windowed_reduction_output_shape(node,
452 is_window_all_in_padding_allowed,
456 PartialShape data_batch_output_shape{
457 PartialShape::dynamic(data_output_spatial_shape.rank() + 2)};
458 data_batch_output_shape[0] = batch_size;
459 data_batch_output_shape[1] = channel_count;
461 for (size_t i = 0; i < data_spatial_shape.rank().get_length(); i++)
463 data_batch_output_shape[i + 2] = data_output_spatial_shape[i];
466 return data_batch_output_shape;
469 struct ChannelShapedInputSpec
471 element::Type m_element_type;
472 PartialShape m_shape;
473 std::string m_input_name;
476 static std::tuple<element::Type, PartialShape, PartialShape> infer_batch_norm_forward_helper(
478 element::Type input_element_type,
479 const PartialShape& input_shape,
480 const std::vector<ChannelShapedInputSpec>& channel_shaped_inputs)
482 // Built up a slash-separated string naming all the channel-shaped inputs, for use in error
484 std::stringstream ss;
486 for (auto& inp : channel_shaped_inputs)
492 ss << inp.m_input_name;
495 std::string channel_input_names = ss.str();
497 // Infer output element type.
498 element::Type et_result{input_element_type};
500 for (auto& inp : channel_shaped_inputs)
502 NODE_VALIDATION_CHECK(node,
503 element::Type::merge(et_result, et_result, inp.m_element_type),
504 "Input element types do not match.");
507 // Extract channel dimension from input shape.
508 Dimension channel_dim{Dimension::dynamic()};
510 NODE_VALIDATION_CHECK(node,
511 input_shape.is_dynamic() || input_shape.rank().get_length() >= 2,
512 "Input argument must have rank of at least 2 (input argument shape: ",
516 if (input_shape.rank().is_static())
518 channel_dim = input_shape[1];
521 // Infer gamma/beta/mu/sigma shape, which must be consistent with a vector of size
523 PartialShape channel_shape{PartialShape::dynamic()};
525 for (auto& inp : channel_shaped_inputs)
527 NODE_VALIDATION_CHECK(node,
528 PartialShape::merge_into(channel_shape, inp.m_shape),
534 NODE_VALIDATION_CHECK(node,
535 channel_shape.merge_rank(1),
540 ") does not have rank 1.");
542 NODE_VALIDATION_CHECK(node,
543 Dimension::merge(channel_dim, channel_dim, channel_shape[0]),
544 "Input channel dimension (",
546 ") does not match shape for ",
552 NODE_VALIDATION_CHECK(node,
553 channel_dim.is_dynamic() || channel_dim.get_length() >= 1,
554 "Channel count must be at least 1.");
556 // Batch result shape is same as the input shape, except we may possibly have inferred more
557 // information from the channel count via gamma/beta/etc.
558 PartialShape batch_result_shape{input_shape};
560 if (batch_result_shape.rank().is_static())
562 batch_result_shape[1] = channel_dim;
565 return std::make_tuple(et_result, batch_result_shape, PartialShape{channel_dim});
568 std::tuple<element::Type, PartialShape, PartialShape>
569 ngraph::infer_batch_norm_forward(const Node* node,
570 element::Type input_element_type,
571 element::Type gamma_element_type,
572 element::Type beta_element_type,
573 element::Type mean_element_type,
574 element::Type variance_element_type,
575 const PartialShape& input_shape,
576 const PartialShape& gamma_shape,
577 const PartialShape& beta_shape,
578 const PartialShape& mean_shape,
579 const PartialShape& variance_shape)
581 return infer_batch_norm_forward_helper(node,
584 {{gamma_element_type, gamma_shape, "gamma"},
585 {beta_element_type, beta_shape, "beta"},
586 {mean_element_type, mean_shape, "mean"},
587 {variance_element_type, variance_shape, "variance"}});
590 std::tuple<element::Type, PartialShape, PartialShape>
591 ngraph::infer_batch_norm_forward(const Node* node,
592 element::Type input_element_type,
593 element::Type gamma_element_type,
594 element::Type beta_element_type,
595 const PartialShape& input_shape,
596 const PartialShape& gamma_shape,
597 const PartialShape& beta_shape)
599 return infer_batch_norm_forward_helper(
603 {{gamma_element_type, gamma_shape, "gamma"}, {beta_element_type, beta_shape, "beta"}});
606 void ngraph::infer_auto_padding(const Shape& image_shape,
607 const Shape& filter_shape,
608 const Strides& filter_strides,
609 const Strides& filter_dilations,
610 const op::PadType pad_type,
611 CoordinateDiff& padding_above,
612 CoordinateDiff& padding_below)
614 const auto image_dims = std::vector<Dimension>(std::begin(image_shape), std::end(image_shape));
615 // because image_shape is fully known result of try_apply_infer_auto_padding is ignored
616 try_apply_auto_padding(image_dims,
625 bool ngraph::try_apply_auto_padding(const PartialShape& image_shape,
626 const Shape& filter_shape,
627 const Strides& filter_strides,
628 const Strides& filter_dilations,
629 const op::PadType pad_type,
630 CoordinateDiff& padding_above,
631 CoordinateDiff& padding_below)
633 NGRAPH_CHECK(pad_type == op::PadType::SAME_UPPER || pad_type == op::PadType::SAME_LOWER);
635 if (image_shape.rank().is_dynamic())
639 const auto image_dims = static_cast<std::vector<Dimension>>(image_shape);
640 const bool are_spatial_dims_static =
641 std::all_of(std::begin(image_dims) + 2, std::end(image_dims), [](const Dimension& dim) {
642 return dim.is_static();
644 if (!are_spatial_dims_static)
649 for (size_t i = 0; i < static_cast<size_t>(filter_shape.size()); i++)
651 int64_t image_size = static_cast<int64_t>(image_dims[i + 2].get_length());
652 int64_t filter_size = (static_cast<int64_t>(filter_shape[i]) - 1) * filter_dilations[i] + 1;
653 int64_t filter_stride = static_cast<int64_t>(filter_strides[i]);
654 auto output_size = (image_size + filter_stride - 1) / filter_stride;
656 auto padding_needed =
657 std::max(int64_t(0), (output_size - 1) * filter_stride + filter_size - image_size);
658 auto padding_lhs = padding_needed / 2;
659 auto padding_rhs = padding_needed - padding_lhs;
660 padding_below.push_back(pad_type == op::PadType::SAME_UPPER ? padding_lhs : padding_rhs);
661 padding_above.push_back(pad_type == op::PadType::SAME_UPPER ? padding_rhs : padding_lhs);
666 PartialShape ngraph::infer_slice_shape(const Node* node,
667 const PartialShape& input_shape,
668 const std::vector<int64_t>& begin,
669 const std::vector<int64_t>& end,
670 const std::vector<int64_t>& strides,
671 const AxisSet& begin_mask,
672 const AxisSet& end_mask,
673 const AxisSet& new_axis_mask,
674 const AxisSet& shrink_axis_mask,
675 const AxisSet& ellipsis_mask)
677 if (begin.size() && end.size())
679 NODE_VALIDATION_CHECK(node,
680 begin.size() == end.size(),
681 "Lower bounds and Upper bounds needs to have same number of values");
683 if (begin.size() && strides.size())
685 NODE_VALIDATION_CHECK(node,
686 begin.size() == strides.size(),
687 "Lower bounds and strides needs to have same number of values");
689 if (end.size() && strides.size())
691 NODE_VALIDATION_CHECK(node,
692 end.size() == strides.size(),
693 "Upper bounds and strides needs to have same number of values");
696 NODE_VALIDATION_CHECK(node, ellipsis_mask.size() <= 1, "At most one ellipsis is allowed.");
698 if (input_shape.rank().is_dynamic())
700 return PartialShape::dynamic();
703 NODE_VALIDATION_CHECK(node,
704 input_shape.rank().get_length() + new_axis_mask.size() >= begin.size(),
705 "Input rank plus number of new axis has to be at least the size of Lower "
706 "and Upper bounds vector.");
708 std::vector<Dimension> dim;
710 size_t input_shape_idx = 0;
711 for (size_t axis = 0; axis < begin.size(); ++axis)
713 // add all dimensions hidden under the ellipsis mask if ellipsis mask is set
714 if (ellipsis_mask.count(axis))
716 // only one bit in ellipsis mask is allowed
717 int num_new_axis_after_ellipses = 0;
718 int num_input_axis_before_ellipses = 0;
719 for (size_t i = 0; i < axis; ++i)
721 if (!new_axis_mask.count(i))
723 num_input_axis_before_ellipses++;
726 for (size_t i = axis + 1; i < begin.size(); ++i)
728 if (new_axis_mask.count(i))
730 num_new_axis_after_ellipses++;
734 int64_t num_input_axis_after_ellipses =
735 (begin.size() - axis - num_new_axis_after_ellipses -
736 1); // -1 because it's a position of ellipses
737 int64_t num_of_hidden_dims = input_shape.rank().get_length() -
738 num_input_axis_after_ellipses -
739 num_input_axis_before_ellipses;
740 for (int64_t i = 0; i < num_of_hidden_dims; ++i)
742 dim.emplace_back(input_shape[input_shape_idx]);
748 // add new single dimension if new_axis_mask is set
749 if (new_axis_mask.count(axis))
753 // skip this dimension if shrink_axis_mask is set
754 else if (shrink_axis_mask.count(axis))
758 // calculating dimension (begin, end, begin_mask, end_mask, stride)
761 // check dynamic dimension
762 if (input_shape[input_shape_idx].is_dynamic())
765 dim.emplace_back(Dimension::dynamic());
769 int64_t lb = begin[axis];
770 int64_t ub = end[axis];
772 // set default value for stride or use given value
774 if (strides.size() > axis)
776 stride = strides[axis];
778 NODE_VALIDATION_CHECK(node, stride != 0, "Stride must be non-zero");
780 // convert negative indexes to positive
781 // take max for this case: if abs(lb) > input_shape[input_shape_idx],then after
783 // so according to tensorflow and numpy we just get 0
786 lb = std::max(input_shape[input_shape_idx].get_length() + lb, int64_t(0));
791 ub = std::max(input_shape[input_shape_idx].get_length() + ub,
792 stride > 0 ? int64_t(0) : int64_t(-1));
795 // apply restrictions when begin or end values more than max possible values.
796 lb = std::min(input_shape[input_shape_idx].get_length(), lb);
797 ub = std::min(input_shape[input_shape_idx].get_length(), ub);
799 int64_t dimension = 0;
803 if (begin_mask.count(axis))
805 lb = input_shape[input_shape_idx].get_length() - 1;
807 if (end_mask.count(axis))
812 lb = std::min(lb, input_shape[input_shape_idx].get_length() - 1);
813 lb -= 1; // we always get 1st element, so we need decrease range
816 dimension = (ub - lb) / stride + 1;
822 if (begin_mask.count(axis))
826 if (end_mask.count(axis))
828 ub = input_shape[input_shape_idx].get_length();
831 lb += 1; // we always get 1st element, so we need decrease range
834 dimension = (ub - lb) / stride + 1;
838 dim.emplace_back(dimension);
843 // get remaining values
844 for (; input_shape_idx < input_shape.rank().get_length(); ++input_shape_idx)
846 dim.emplace_back(input_shape[input_shape_idx]);
852 std::vector<size_t> ngraph::normalize_axes(const std::string& node_description,
853 const std::vector<int64_t>& axes,
854 const Rank& tensor_rank)
856 std::vector<size_t> new_axes;
858 for (const auto& axis : axes)
860 new_axes.push_back(normalize_axis(node_description, axis, tensor_rank));
866 int64_t ngraph::normalize_axis(const Node* node, std::int64_t axis, const Rank& tensor_rank)
868 return normalize_axis(node->description(), axis, tensor_rank);
871 int64_t ngraph::normalize_axis(const std::string& node_description,
873 const Rank& tensor_rank)
877 // Handling negative axis requires static tensor rank
878 NGRAPH_CHECK(tensor_rank.is_static(),
880 " Rank must be static in order to normalize negative axis=",
883 if (tensor_rank.is_dynamic())
888 const auto tensor_rank_value = tensor_rank.get_length();
889 return normalize_axis(node_description,
893 tensor_rank_value ? (tensor_rank_value - 1) : 0);
896 int64_t ngraph::normalize_axis(const Node* node,
898 std::uint64_t tensor_rank,
899 std::int64_t axis_range_min,
900 std::int64_t axis_range_max)
902 return ngraph::normalize_axis(
903 node->description(), axis, tensor_rank, axis_range_min, axis_range_max);
906 int64_t ngraph::normalize_axis(const std::string& node_description,
908 std::uint64_t tensor_rank,
909 std::int64_t axis_range_min,
910 std::int64_t axis_range_max)
912 // Accepted range of value for axis is [axis_range_min, axis_range_max].
913 NGRAPH_CHECK(((axis >= axis_range_min) && (axis <= axis_range_max)),
917 " out of the tensor rank range [",
925 axis = axis + tensor_rank;
928 return int64_t(axis);
931 void ngraph::opset1::infer_conv_backprop_auto_padding(const Shape& input_data_shape,
932 const Shape& filters_shape,
933 const Shape& output_shape,
934 const Strides& strides,
935 const Strides& dilations,
936 const op::PadType auto_pad_type,
937 const CoordinateDiff& output_padding,
938 CoordinateDiff& pads_begin,
939 CoordinateDiff& pads_end)
941 NGRAPH_CHECK(auto_pad_type == op::PadType::SAME_UPPER ||
942 auto_pad_type == op::PadType::SAME_LOWER);
944 size_t num_spatial_dims = input_data_shape.size();
945 NGRAPH_CHECK(filters_shape.size() == num_spatial_dims && strides.size() == num_spatial_dims &&
946 dilations.size() == num_spatial_dims && pads_begin.size() == num_spatial_dims &&
947 pads_end.size() == num_spatial_dims && output_padding.size() == num_spatial_dims);
949 pads_begin = CoordinateDiff(num_spatial_dims);
950 pads_end = CoordinateDiff(num_spatial_dims);
952 for (uint64_t i = 0; i < num_spatial_dims; ++i)
954 int total_padding = strides[i] * (input_data_shape[i] - 1) +
955 dilations[i] * (filters_shape[i] - 1) + 1 - output_shape[i] +
957 if (auto_pad_type != op::PadType::SAME_UPPER)
959 pads_begin[i] = total_padding / 2;
960 pads_end[i] = total_padding - pads_begin[i];
964 pads_end[i] = total_padding / 2;
965 pads_begin[i] = total_padding - pads_end[i];
972 /// \brief Scalar variant describes value of an Output, for use in max shape determination
974 /// For tensor values, we use the maximum value in the tensor
977 /// \brief No information known about the output
979 /// \brief uint64_t assoiated with the output
980 MaxValue(uint64_t value)
984 MaxValue(const vector<uint64_t>& slices, int64_t slice_axis)
986 , m_slice_axis(slice_axis)
988 m_value = *max_element(m_slices.begin(), m_slices.end());
990 uint64_t m_value{numeric_limits<uint64_t>::max()};
991 vector<uint64_t> m_slices;
992 int64_t m_slice_axis{-1};
995 vector<MaxValue> exec_constant(Node* node, vector<MaxValue>& inputs)
997 auto result = MaxValue();
998 auto op = as_type<op::Constant>(node);
999 auto element_type = op->get_output_element_type(0);
1000 if (element_type.is_integral())
1002 uint64_t max_val = 0;
1003 if (element_type.is_signed())
1005 for (auto elt : op->cast_vector<int64_t>())
1015 for (auto elt : op->cast_vector<uint64_t>())
1023 result = MaxValue(max_val);
1028 vector<MaxValue> exec_minimum(Node* node, vector<MaxValue>& inputs)
1030 uint64_t min_value = numeric_limits<uint64_t>::max();
1031 switch (node->get_output_element_type(0))
1033 case element::Type_t::i8: min_value = numeric_limits<int8_t>::max(); break;
1034 case element::Type_t::i16: min_value = numeric_limits<int16_t>::max(); break;
1035 case element::Type_t::i32: min_value = numeric_limits<int32_t>::max(); break;
1036 case element::Type_t::i64: min_value = numeric_limits<int64_t>::max(); break;
1037 case element::Type_t::u8: min_value = numeric_limits<uint8_t>::max(); break;
1038 case element::Type_t::u16: min_value = numeric_limits<uint16_t>::max(); break;
1039 case element::Type_t::u32: min_value = numeric_limits<uint32_t>::max(); break;
1040 case element::Type_t::u64: min_value = numeric_limits<uint64_t>::max(); break;
1043 min_value = min(min_value, inputs.at(0).m_value);
1044 min_value = min(min_value, inputs.at(1).m_value);
1045 return {MaxValue(min_value)};
1048 vector<MaxValue> exec_concat(Node* node, vector<MaxValue>& inputs)
1050 auto op = as_type<op::v0::Concat>(node);
1051 vector<uint64_t> slice_maxen;
1052 for (auto input : inputs)
1054 slice_maxen.push_back(input.m_value);
1056 auto axis = op->get_concatenation_axis();
1057 return {MaxValue(slice_maxen, axis)};
1060 vector<MaxValue> exec_reduce_min(Node* node, vector<MaxValue>& inputs)
1062 auto data = inputs.at(0);
1063 if (data.m_slice_axis >= 0 && data.m_slices.size() > 1)
1065 if (auto indices_const = as_type<op::v0::Constant>(node->get_input_node_ptr(1)))
1067 if (indices_const->get_output_element_type(0).is_integral())
1069 auto indices_shape = indices_const->get_output_shape(0);
1070 if (indices_shape == Shape{1})
1072 auto indices = indices_const->cast_vector<int64_t>();
1073 auto axis = indices.at(0);
1074 if (axis == data.m_slice_axis)
1077 MaxValue(*min_element(data.m_slices.begin(), data.m_slices.end()))};
1084 return {MaxValue(data.m_value)};
1086 vector<MaxValue> exec_nop(Node* node, vector<MaxValue>& inputs) { return {inputs.at(0)}; }
1089 pair<bool, uint64_t> ngraph::maximum_value(const Output<Node>& value)
1091 static Evaluator<MaxValue>::op_handler_map handlers = {
1092 {op::v0::Concat::type_info, exec_concat},
1093 {op::v0::Constant::type_info, exec_constant},
1094 {op::v0::Convert::type_info, exec_nop},
1095 {op::v0::Minimum::type_info, exec_minimum},
1096 {op::v1::Minimum::type_info, exec_minimum},
1097 {op::v1::ReduceMin::type_info, exec_reduce_min},
1098 {op::v0::Squeeze::type_info, exec_nop},
1099 {op::v0::Unsqueeze::type_info, exec_nop}};
1100 Evaluator<MaxValue>::value_map value_map;
1101 Evaluator<MaxValue> evaluator(handlers, value_map);
1102 auto val = evaluator.evaluate(value);
1103 return pair<bool, uint64_t>(val.m_value < numeric_limits<uint64_t>::max(), val.m_value);
1106 void ngraph::evaluate_nodes(std::map<RawNodeOutput, HostTensorPtr>& value_map,
1107 std::map<RawNodeOutput, HostTensorPtr>& output_tensor_map,
1108 const OutputVector& outputs)
1110 Evaluator<HostTensorPtr> evaluator({}, value_map);
1111 evaluator.set_univeral_handler(
1112 [&output_tensor_map](Node* node,
1113 const HostTensorVector& input_tensors) -> HostTensorVector {
1114 HostTensorVector output_tensors;
1115 for (auto v : node->outputs())
1117 auto it = output_tensor_map.find(v);
1118 if (it == output_tensor_map.end())
1120 auto c = make_shared<HostTensor>(v);
1121 output_tensors.push_back(c);
1125 output_tensors.push_back(it->second);
1128 if (node->evaluate(output_tensors, input_tensors))
1130 return output_tensors;
1134 NGRAPH_CHECK(false, "Evaluation failed on ", node);
1137 for (auto value : outputs)
1139 evaluator.evaluate(value);