Publishing R3
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / internal / graph_reorder_test.cpp
1 // Copyright (C) 2018 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5
6 #include <gtest/gtest.h>
7 #include <gmock/gmock-spec-builders.h>
8 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "mock_mkldnn_primitive.hpp"
10
11 #include "test_graph.hpp"
12
13 #include <mock_mkldnn_extension.hpp>
14 #include <mkldnn/mkldnn_extension_ptr.hpp>
15 #include <mock_error_listener.hpp>
16 #include <mkldnn_plugin/mkldnn_extension_mngr.h>
17 #include "tests_common.hpp"
18
19 using namespace ::testing;
20 using namespace std;
21 using namespace mkldnn;
22
23 class MKLDNNGraphReorderTests: public TestsCommon {
24 protected:
25     virtual void SetUp() {
26         TestsCommon::SetUp();
27     }
28 };
29
30 TEST_F(MKLDNNGraphReorderTests, cannotCreatePrimitiveDescriprorsWithoutOtherLayers) {
31     std::shared_ptr<MKLDNNPlugin::MKLDNNNode> node;
32     mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
33
34     InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32}));
35     node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layer, eng, {}));
36     ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType());
37
38     ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::details::InferenceEngineException);
39 }
40
41 TEST_F(MKLDNNGraphReorderTests, CreateReorder) {
42     std::string model = R"V0G0N(
43 <Net Name="Convolution_Only" version="2" precision="FP32" batch="1">
44     <layers>
45         <layer name="in1" type="Input" precision="FP32" id="0">
46             <output>
47                 <port id="0">
48                     <dim>1</dim>
49                     <dim>9</dim>
50                     <dim>16</dim>
51                     <dim>32</dim>
52                 </port>
53             </output>
54         </layer>
55         <layer name="conv1" id="1" type="Convolution" precision="FP32">
56             <convolution stride-x="1" stride-y="1"
57                          pad-x="0"    pad-y="0"
58                          kernel-x="1" kernel-y="1"
59                          output="17"   group="1"/>
60
61             <weights offset="0" size="612" />
62             <biases offset="612" size="68" />
63
64             <input>
65                 <port id="1">
66                     <dim>1</dim>
67                     <dim>9</dim>
68                     <dim>16</dim>
69                     <dim>32</dim>
70                 </port>
71             </input>
72             <output>
73                 <port id="2">
74                     <dim>1</dim>
75                     <dim>17</dim>
76                     <dim>16</dim>
77                     <dim>32</dim>
78                 </port>
79             </output>
80         </layer>
81     </layers>
82     <edges>
83         <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
84     </edges>
85 </Net>
86 )V0G0N";
87
88     InferenceEngine::CNNNetReader net_reader;
89     ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
90
91     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C,
92                                                                                    {(1 * 1 * 17 * 9 / 1 + 17)
93                                                       * sizeof(float)});
94     weights->allocate();
95     fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
96     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
97
98     net_reader.SetWeights(weights_ptr);
99
100     MKLDNNGraphTestClass graph;
101     graph.CreateGraph(net_reader.getNetwork());
102
103     auto& nodes = graph.getNodes();
104     for (int i = 0; i < nodes.size(); i++) {
105         if (nodes[i]->getType() == MKLDNNPlugin::Reorder) {
106             ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors().size());
107             ASSERT_EQ(MKLDNNPlugin::impl_desc_type::reorder,
108                       nodes[i]->getSupportedPrimitiveDescriptors()[0].getImplementationType());
109             ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs.size());
110             if (i == 1) {
111                 ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout());
112                 ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout());
113             } else {
114                 ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout());
115                 ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout());
116             }
117             ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs.size());
118         }
119     }
120 }
121
122 TEST_F(MKLDNNGraphReorderTests, CreateInPlaceReorder) {
123     std::string model = R"V0G0N(
124 <Net Name="InPlaceReorder_Only" version="2" precision="FP32" batch="1">
125     <layers>
126         <layer name="in1" type="Input" precision="FP32" id="0">
127             <output>
128                 <port id="0">
129                     <dim>1</dim>
130                     <dim>9</dim>
131                     <dim>16</dim>
132                     <dim>32</dim>
133                 </port>
134             </output>
135         </layer>
136         <layer name="reshape1" id="1" type="Reshape" precision="FP32">
137             <input>
138                 <port id="1">
139                     <dim>1</dim>
140                     <dim>9</dim>
141                     <dim>16</dim>
142                     <dim>32</dim>
143                 </port>
144             </input>
145             <output>
146                 <port id="2">
147                     <dim>32</dim>
148                     <dim>144</dim>
149                 </port>
150             </output>
151         </layer>
152         <layer name="reshape2" id="2" type="Reshape" precision="FP32">
153             <data axis="0" num_axes="-1" dim="1, 4608"/>
154             <input>
155                 <port id="1">
156                     <dim>32</dim>
157                     <dim>144</dim>
158                 </port>
159             </input>
160             <output>
161                 <port id="2">
162                     <dim>1</dim>
163                     <dim>3</dim>
164                     <dim>48</dim>
165                     <dim>32</dim>
166                 </port>
167             </output>
168         </layer>
169         <layer name="scaleshift" id="3" type="ScaleShift" precision="FP32">
170             <input>
171                 <port id="1">
172                     <dim>1</dim>
173                     <dim>3</dim>
174                     <dim>48</dim>
175                     <dim>32</dim>
176                 </port>
177             </input>
178             <output>
179                 <port id="2">
180                     <dim>1</dim>
181                     <dim>3</dim>
182                     <dim>48</dim>
183                     <dim>32</dim>
184                 </port>
185             </output>
186             <blobs>
187                 <weights offset="0" size="12"/>
188                 <biases offset="12" size="12"/>
189             </blobs>
190         </layer>
191     </layers>
192     <edges>
193         <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
194         <edge from-layer="1" from-port="2" to-layer="2" to-port="1"/>
195         <edge from-layer="2" from-port="2" to-layer="3" to-port="1"/>
196     </edges>
197 </Net>
198 )V0G0N";
199
200     InferenceEngine::CNNNetReader net_reader;
201     ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
202
203     InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {24});
204     weights->allocate();
205     float *data = weights->buffer().as<float *>();
206     size_t dataSize = weights->byteSize() / sizeof(float);
207     for (size_t i = 0; i < dataSize; i++) {
208         data[i] = 2;
209     }
210     InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
211
212     net_reader.SetWeights(weights_ptr);
213     net_reader.getNetwork().addOutput("reshape1");
214
215     MKLDNNGraphTestClass graph;
216     graph.CreateGraph(net_reader.getNetwork());
217
218     InferenceEngine::SizeVector dims_src = {1, 9, 16, 32};
219
220     InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
221     src->allocate();
222     data = src->buffer().as<float *>();
223     dataSize = src->size();
224     for (size_t i = 0; i < dataSize; i++) {
225         data[i] = 1;
226     }
227
228     auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
229
230     if (srcPtr == nullptr)
231         FAIL() << "Cannot cast blob to TBlob<float>.";
232
233     InferenceEngine::BlobMap srcs;
234     srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
235
236     InferenceEngine::OutputsDataMap out;
237     out = net_reader.getNetwork().getOutputsInfo();
238     InferenceEngine::BlobMap outputBlobs;
239
240     auto it = out.begin();
241     std::pair<std::string, InferenceEngine::DataPtr> item = *it;
242
243     InferenceEngine::TBlob<float>::Ptr output1;
244     output1 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
245     output1->allocate();
246     outputBlobs[item.first] = output1;
247
248     item = *(++it);
249
250     InferenceEngine::TBlob<float>::Ptr output2;
251     output2 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
252     output2->allocate();
253     outputBlobs[item.first] = output2;
254
255     graph.Infer(srcs, outputBlobs);
256
257     data = output1->data();
258     for (size_t i = 0; i < output1->size(); i++) {
259         ASSERT_EQ(data[i], 1);
260     }
261     data = output2->data();
262     for (size_t i = 0; i < output2->size(); i++) {
263         ASSERT_EQ(data[i], 4);
264     }
265 }