Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / internal / graph_reorder_test.cpp
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
8
9 #include "test_graph.hpp"
10
11 #include <mock_error_listener.hpp>
12 #include <mkldnn_plugin/mkldnn_extension_mngr.h>
13 #include "tests_common.hpp"
14
15 using namespace ::testing;
16 using namespace std;
17 using namespace mkldnn;
18
19 class MKLDNNGraphReorderTests: public TestsCommon {
20 protected:
21     virtual void SetUp() {
22         TestsCommon::SetUp();
23     }
24 };
25
26 TEST_F(MKLDNNGraphReorderTests, cannotCreatePrimitiveDescriprorsWithoutOtherLayers) {
27     std::shared_ptr<MKLDNNPlugin::MKLDNNNode> node;
28     mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
29
30     InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32}));
31     node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layer, eng, {}));
32     ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType());
33
34     ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::details::InferenceEngineException);
35 }
36
37 TEST_F(MKLDNNGraphReorderTests, CreateReorder) {
38     std::string model = R"V0G0N(
39 <Net Name="Convolution_Only" version="2" precision="FP32" batch="1">
40     <layers>
41         <layer name="in1" type="Input" precision="FP32" id="0">
42             <output>
43                 <port id="0">
44                     <dim>1</dim>
45                     <dim>9</dim>
46                     <dim>16</dim>
47                     <dim>32</dim>
48                 </port>
49             </output>
50         </layer>
51         <layer name="conv1" id="1" type="Convolution" precision="FP32">
52             <convolution stride-x="1" stride-y="1"
53                          pad-x="0"    pad-y="0"
54                          kernel-x="1" kernel-y="1"
55                          output="17"   group="1"/>
56
57             <weights offset="0" size="612" />
58             <biases offset="612" size="68" />
59
60             <input>
61                 <port id="1">
62                     <dim>1</dim>
63                     <dim>9</dim>
64                     <dim>16</dim>
65                     <dim>32</dim>
66                 </port>
67             </input>
68             <output>
69                 <port id="2">
70                     <dim>1</dim>
71                     <dim>17</dim>
72                     <dim>16</dim>
73                     <dim>32</dim>
74                 </port>
75             </output>
76         </layer>
77     </layers>
78     <edges>
79         <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
80     </edges>
81 </Net>
82 )V0G0N";
83
84     InferenceEngine::CNNNetReader net_reader;
85     ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
86
87     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C,
88                                                                                    {(1 * 1 * 17 * 9 / 1 + 17)
89                                                       * sizeof(float)});
90     weights->allocate();
91     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
92     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
93
94     net_reader.SetWeights(weights_ptr);
95
96     MKLDNNGraphTestClass graph;
97     graph.CreateGraph(net_reader.getNetwork());
98
99     auto& nodes = graph.getNodes();
100     for (int i = 0; i < nodes.size(); i++) {
101         if (nodes[i]->getType() == MKLDNNPlugin::Reorder) {
102             ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors().size());
103             ASSERT_EQ(MKLDNNPlugin::impl_desc_type::reorder,
104                       nodes[i]->getSupportedPrimitiveDescriptors()[0].getImplementationType());
105             ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs.size());
106             if (i == 1) {
107                 ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout());
108                 ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout());
109             } else {
110                 ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout());
111                 ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout());
112             }
113             ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs.size());
114         }
115     }
116 }
117
118 TEST_F(MKLDNNGraphReorderTests, CreateInPlaceReorder) {
119     std::string model = R"V0G0N(
120 <Net Name="InPlaceReorder_Only" version="2" precision="FP32" batch="1">
121     <layers>
122         <layer name="in1" type="Input" precision="FP32" id="0">
123             <output>
124                 <port id="0">
125                     <dim>1</dim>
126                     <dim>9</dim>
127                     <dim>16</dim>
128                     <dim>32</dim>
129                 </port>
130             </output>
131         </layer>
132         <layer name="reshape1" id="1" type="Reshape" precision="FP32">
133             <input>
134                 <port id="1">
135                     <dim>1</dim>
136                     <dim>9</dim>
137                     <dim>16</dim>
138                     <dim>32</dim>
139                 </port>
140             </input>
141             <output>
142                 <port id="2">
143                     <dim>32</dim>
144                     <dim>144</dim>
145                 </port>
146             </output>
147         </layer>
148         <layer name="reshape2" id="2" type="Reshape" precision="FP32">
149             <data axis="0" num_axes="-1" dim="1, 4608"/>
150             <input>
151                 <port id="1">
152                     <dim>32</dim>
153                     <dim>144</dim>
154                 </port>
155             </input>
156             <output>
157                 <port id="2">
158                     <dim>1</dim>
159                     <dim>3</dim>
160                     <dim>48</dim>
161                     <dim>32</dim>
162                 </port>
163             </output>
164         </layer>
165         <layer name="scaleshift" id="3" type="ScaleShift" precision="FP32">
166             <input>
167                 <port id="1">
168                     <dim>1</dim>
169                     <dim>3</dim>
170                     <dim>48</dim>
171                     <dim>32</dim>
172                 </port>
173             </input>
174             <output>
175                 <port id="2">
176                     <dim>1</dim>
177                     <dim>3</dim>
178                     <dim>48</dim>
179                     <dim>32</dim>
180                 </port>
181             </output>
182             <blobs>
183                 <weights offset="0" size="12"/>
184                 <biases offset="12" size="12"/>
185             </blobs>
186         </layer>
187     </layers>
188     <edges>
189         <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
190         <edge from-layer="1" from-port="2" to-layer="2" to-port="1"/>
191         <edge from-layer="2" from-port="2" to-layer="3" to-port="1"/>
192     </edges>
193 </Net>
194 )V0G0N";
195
196     InferenceEngine::CNNNetReader net_reader;
197     ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
198
199     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {24});
200     weights->allocate();
201     float *data = weights->buffer().as<float *>();
202     size_t dataSize = weights->byteSize() / sizeof(float);
203     for (size_t i = 0; i < dataSize; i++) {
204         data[i] = 2;
205     }
206     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
207
208     net_reader.SetWeights(weights_ptr);
209     net_reader.getNetwork().addOutput("reshape1");
210
211     MKLDNNGraphTestClass graph;
212     graph.CreateGraph(net_reader.getNetwork());
213
214     InferenceEngine::SizeVector dims_src = {1, 9, 16, 32};
215
216     InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
217     src->allocate();
218     data = src->buffer().as<float *>();
219     dataSize = src->size();
220     for (size_t i = 0; i < dataSize; i++) {
221         data[i] = 1;
222     }
223
224     auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
225
226     if (srcPtr == nullptr)
227         FAIL() << "Cannot cast blob to TBlob<float>.";
228
229     InferenceEngine::BlobMap srcs;
230     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
231
232     InferenceEngine::OutputsDataMap out;
233     out = net_reader.getNetwork().getOutputsInfo();
234     InferenceEngine::BlobMap outputBlobs;
235
236     auto it = out.begin();
237     std::pair<std::string, InferenceEngine::DataPtr> item = *it;
238
239     InferenceEngine::TBlob<float>::Ptr output1;
240     output1 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
241     output1->allocate();
242     outputBlobs[item.first] = output1;
243
244     item = *(++it);
245
246     InferenceEngine::TBlob<float>::Ptr output2;
247     output2 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
248     output2->allocate();
249     outputBlobs[item.first] = output2;
250
251     graph.Infer(srcs, outputBlobs);
252
253     data = output1->data();
254     for (size_t i = 0; i < output1->size(); i++) {
255         ASSERT_EQ(data[i], 1);
256     }
257     data = output2->data();
258     for (size_t i = 0; i < output2->size(); i++) {
259         ASSERT_EQ(data[i], 4);
260     }
261 }