Publishing R3
[platform/upstream/dldt.git] / inference-engine / src / mkldnn_plugin / nodes / mkldnn_deconv_node.h
1 // Copyright (C) 2018 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5
6 #pragma once
7
8 #include <ie_common.h>
9 #include <mkldnn_node.h>
10 #include <memory>
11 #include <string>
12 #include <vector>
13
14 namespace MKLDNNPlugin {
15
16 class MKLDNNDeconvolutionNode : public MKLDNNNode {
17 public:
18     MKLDNNDeconvolutionNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng);
19     ~MKLDNNDeconvolutionNode() override = default;
20
21     void getSupportedDescriptors() override;
22     void createDescriptor(const std::vector<InferenceEngine::TensorDesc>& inputDesc,
23                           const std::vector<InferenceEngine::TensorDesc>& outputDesc) override;
24     void createPrimitive() override;
25     void execute(mkldnn::stream strm) override;
26     bool created() const override;
27     bool canBeInPlace() const override {
28         return false;
29     }
30
31     MKLDNNMemoryDesc getSrcMemDesc(mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx) override;
32     MKLDNNMemoryDesc getDstMemDesc(mkldnn::primitive_desc_iterator &primitive_desc_it, size_t idx) override;
33
34 private:
35     bool withBiases;
36     bool withGroups;
37     bool isDW;
38     std::vector<int> stride;
39     std::vector<int> paddingL;
40     std::vector<int> dilation;
41     std::vector<int> paddingR;
42     MKLDNNDims weightsDims;
43     static Register<MKLDNNDeconvolutionNode> reg;
44     InferenceEngine::Blob::Ptr biases;
45     std::vector<std::shared_ptr<mkldnn::convolution_forward::desc>> descs_fwd;
46     std::vector<std::shared_ptr<mkldnn::convolution_backward_data::desc>> descs_bwd;
47 };
48
49 }  // namespace MKLDNNPlugin
50