Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / src / inference_engine / shape_infer / built-in / ie_rnn_shape_infer.hpp
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #pragma once
6
7 #include <description_buffer.hpp>
8 #include "ie_built_in_impl.hpp"
9 #include <map>
10 #include <memory>
11 #include <string>
12 #include <vector>
13
14 namespace InferenceEngine {
15 namespace ShapeInfer {
16
17 /**
18  *@brief Implementation of Shape inference for DetectionOutput layer
19  */
20 class RNNShapeProp : public BuiltInShapeInferImpl {
21 public:
22     explicit RNNShapeProp(const std::string& type) : BuiltInShapeInferImpl(type) {}
23
24     void inferShapesImpl(const std::vector<Blob::CPtr>& inBlobs,
25                          const std::map<std::string, std::string>& params,
26                          const std::map<std::string, Blob::Ptr>& blobs,
27                          std::vector<SizeVector>& outShapes) override {
28         LayerParams lp{};
29         RNNSequenceLayer rnn(lp);
30         rnn.params = params;
31         rnn.type = _type;
32         rnn.precision = Precision::FP32;   // FIXME: No ability to discover current precision. Assume fp32
33         validate(&rnn, inBlobs, params, blobs);
34
35         int state_size = rnn.hidden_size;
36
37         auto data_dims = inShapes[0];
38         data_dims[2] = static_cast<size_t>(state_size);
39         outShapes.push_back(data_dims);
40
41         for (int i = 1; i < inShapes.size(); i++) {
42             outShapes.push_back(inShapes[i]);
43         }
44     }
45 };
46
47 }  // namespace ShapeInfer
48 }  // namespace InferenceEngine