1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
7 #include "ie_built_in_impl.hpp"
13 namespace InferenceEngine {
14 namespace ShapeInfer {
17 *@brief Implementation of Shape inference for Squeeze layer
19 class SqueezeShapeProp : public BuiltInShapeInferImpl {
21 explicit SqueezeShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
23 void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
24 const std::map<std::string, std::string>& params,
25 const std::map<std::string, Blob::Ptr>& blobs,
26 std::vector<SizeVector>& outShapes) override {
28 SqueezeLayer layer(lp);
29 layer.params = params;
31 validate(&layer, inBlobs, params, blobs);
33 const size_t SQUEEZE_DATA = 0;
34 const size_t SQUEEZE_INDEXES = 1;
39 idx_dims = inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getDims();
40 if (idx_dims.size() > 1)
41 THROW_IE_EXCEPTION << " Index vector should be 1 dimension";
43 if (inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getPrecision() != Precision::I32 &&
44 inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getPrecision() != Precision::FP32)
45 THROW_IE_EXCEPTION << " Incorrect 'indices_to_squeeze' input precision. Only FP32 and I32 are supported!";
47 data_dims = inBlobs[SQUEEZE_DATA]->getTensorDesc().getDims();
49 if (data_dims.size() <= idx_dims[0] && !(data_dims.size() == 1 && idx_dims[0] == 1))
50 THROW_IE_EXCEPTION << " Incompatible number of data dimensions and indexes vector length!";
52 switch (inBlobs[SQUEEZE_INDEXES]->precision()) {
53 case Precision::FP32: {
54 float* idx_data = inBlobs[SQUEEZE_INDEXES]->cbuffer().as<float*>() +
55 inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
56 for (size_t i = 0; i < idx_dims[0]; i++) {
57 float axis = idx_data[i];
59 axis += data_dims.size();
61 if (axis > data_dims.size()) {
62 THROW_IE_EXCEPTION << "Index to squeeze exceeds data tensor dimension";
63 } else if (data_dims[axis] != 1) {
64 THROW_IE_EXCEPTION << "Index to squeeze of data tensor dimension is not 1";
67 for (size_t j = 0; j < data_dims.size(); j++) {
69 for (size_t i = 0; i < inBlobs[SQUEEZE_INDEXES]->size(); i++) {
70 int32_t axis = idx_data[i];
72 axis += data_dims.size();
73 if (j == static_cast<size_t>(axis)) found = true;
75 if (!found) outShape.push_back(data_dims[j]);
79 case Precision::I32: {
80 int32_t* idx_data = inBlobs[SQUEEZE_INDEXES]->cbuffer().as<int32_t*>() +
81 inBlobs[SQUEEZE_INDEXES]->getTensorDesc().getBlockingDesc().getOffsetPadding();
82 for (size_t i = 0; i < idx_dims[0]; i++) {
83 int32_t axis = idx_data[i];
85 axis += data_dims.size();
87 if (axis > data_dims.size()) {
88 THROW_IE_EXCEPTION << "Index to squeeze exceeds data tensor dimension";
89 } else if (data_dims[axis] != 1) {
90 THROW_IE_EXCEPTION << "Index to squeeze of data tensor dimension is not 1";
93 for (size_t j = 0; j < data_dims.size(); j++) {
95 for (size_t i = 0; i < inBlobs[SQUEEZE_INDEXES]->size(); i++) {
96 int32_t axis = idx_data[i];
98 axis += data_dims.size();
99 if (j == static_cast<size_t>(axis)) found = true;
101 if (!found) outShape.push_back(data_dims[j]);
107 << "Incorrect 'indices_to_squeeze' input precision. Only FP32 and I32 are supported!";
109 outShapes.push_back(outShape);
113 } // namespace ShapeInfer
114 } // namespace InferenceEngine