1 // Copyright (C) 2018 Intel Corporation
3 // SPDX-License-Identifier: Apache-2.0
9 #include <mkldnn_node.h>
14 namespace MKLDNNPlugin {
16 class MKLDNNRNN : public MKLDNNNode {
18 MKLDNNRNN(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng);
19 ~MKLDNNRNN() override = default;
21 void getSupportedDescriptors() override;
22 void createPrimitive() override;
23 bool created() const override;
25 void createDescriptor(const std::vector<InferenceEngine::TensorDesc>& inputDesc,
26 const std::vector<InferenceEngine::TensorDesc>& outputDesc) override;
28 void execute(mkldnn::stream strm) override;
31 static Register<MKLDNNRNN> reg;
33 InferenceEngine::CellType cellr_type = InferenceEngine::CellType::LSTM;
34 /** Native order if [batch, seq, data], other case is [seq, batch, data] */
35 bool nativeOrder = true;
36 bool swap_state = false;
42 const size_t num_gates = 4;
44 MKLDNNMemoryDesc in_data_d;
45 MKLDNNMemoryDesc out_data_d;
47 MKLDNNMemoryDesc in_state_d;
48 MKLDNNMemoryDesc out_state_d;
50 MKLDNNMemoryDesc w_data_d;
51 MKLDNNMemoryDesc w_state_d;
52 MKLDNNMemoryDesc w_bias_d;
54 std::vector<mkldnn::reorder> exec_before;
55 std::vector<mkldnn::reorder> exec_after;
58 } // namespace MKLDNNPlugin