updated readme file due to moving CMake scripts to the root folder
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / test_graph.hpp
1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #pragma once
6
7 #include <mkldnn_plugin/nodes/list.hpp>
8 #include <mkldnn_plugin/mkldnn_graph.h>
9 #include <mkldnn_plugin/mkldnn_memory.h>
10 #include <mkldnn_plugin/mkldnn_extension_utils.h>
11 #include <mkldnn_plugin/mkldnn_graph_optimizer.h>
12 #include <mkldnn_plugin/nodes/mkldnn_input_node.h>
13 #include <functional>
14
15 #define GARB_VAL(x) ((x + 100.0f + sin(x)) / (x + 150.f))
16
17 class MKLDNNGraphTestClass: public MKLDNNPlugin::MKLDNNGraph {
18 private:
19     MKLDNNPlugin::MKLDNNExtensionManager::Ptr extensionManager = std::make_shared<MKLDNNPlugin::MKLDNNExtensionManager>();
20
21 public:
22     enum class CheckDynBatchType {
23         Both,
24         Parent,
25         Child
26     };
27     MKLDNNGraphTestClass(): MKLDNNPlugin::MKLDNNGraph() {
28         auto defaultExtensions = std::make_shared<InferenceEngine::Extensions::Cpu::MKLDNNExtensions<mkldnn::impl::cpu::cpu_isa_t::isa_any>>();
29         extensionManager->AddExtension(defaultExtensions);
30
31     }
32     virtual ~MKLDNNGraphTestClass() = default;
33
34     static std::string getStrPrimitiveDescriptorType(MKLDNNPlugin::impl_desc_type type) {
35         std::string str_type;
36
37         auto add_type = [&](std::string t) {
38             if (!str_type.empty() && t.c_str()[0] != '_')
39                 str_type += "_";
40             str_type += t;
41         };
42
43 #define SEARCH_TYPE(_type)                                                                      \
44     if ((type & MKLDNNPlugin::impl_desc_type::_type) == MKLDNNPlugin::impl_desc_type::_type)    \
45         add_type(#_type)
46
47         SEARCH_TYPE(undef);
48         SEARCH_TYPE(reorder);
49         SEARCH_TYPE(jit);
50         SEARCH_TYPE(gemm);
51         SEARCH_TYPE(ref);
52
53         SEARCH_TYPE(avx512);
54         SEARCH_TYPE(avx2);
55         SEARCH_TYPE(sse42);
56         SEARCH_TYPE(blas);
57         SEARCH_TYPE(any);
58
59         SEARCH_TYPE(winograd);
60         SEARCH_TYPE(_dw);
61         SEARCH_TYPE(_1x1);
62
63         if (type == MKLDNNPlugin::impl_desc_type::unknown)
64             str_type = "unknown";
65         else if (str_type.empty())
66             str_type = "undef";
67         return str_type;
68     }
69
70     void PushInputData(const std::string& name, const InferenceEngine::Blob::Ptr &in, int batch) {
71         if (!IsReady()) THROW_IE_EXCEPTION<< "Wrong state. Topology not ready.";
72
73         auto input = inputNodes.find(name);
74         if (input != inputNodes.end()) {
75             MKLDNNPlugin::MKLDNNDims outDims;
76             if(input->second->getChildEdgeAt(0)->getDims().ndims() == 0 )
77                 outDims = MKLDNNPlugin::MKLDNNDims(InferenceEngine::SizeVector(1,1));
78             else
79                 outDims = input->second->getChildEdgeAt(0)->getDims();
80             if (batch < 1)
81                 batch = outDims[0];
82
83             const void *ext_data_ptr = in->cbuffer();
84             void *inter_data_ptr = input->second->getChildEdgeAt(0)->getMemory().GetData();
85
86             if (ext_data_ptr != inter_data_ptr)
87                 input->second->getChildEdgeAt(0)->getMemory().SetData(MKLDNNPlugin::MKLDNNExtensionUtils::IEPrecisionToDataType(in->getTensorDesc().getPrecision()),
88                                                                       MKLDNNPlugin::MKLDNNMemory::GetPlainFormat(outDims), ext_data_ptr, in->byteSize() / outDims[0] * batch, false);
89
90             // todo: make sure 'name' exists in this map...
91             if (_meanImages.find(name) != _meanImages.end()) {
92                 if (in->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32) {
93                     _meanImages[name].Subtract(outDims, reinterpret_cast<float *>(inter_data_ptr), in->getTensorDesc().getLayout());
94                 } else {
95                     THROW_IE_EXCEPTION << "Mean image of type " << in->getTensorDesc().getPrecision().name() << " is unsupported";
96                 }
97             }
98         } else {
99             THROW_IE_EXCEPTION << "Input blob for infer '" << name << "' doesn't correspond to input in network";
100         }
101     }
102
103     void Infer(const InferenceEngine::BlobMap& inputs, InferenceEngine::BlobMap& result, int batch = -1) {
104         try {
105             // need to retain converted blobs until infer finish
106             std::vector<InferenceEngine::Blob::Ptr> convertedInputs;
107             for (auto input : inputs) {
108                 switch (input.second->getTensorDesc().getPrecision()) {
109                     case InferenceEngine::Precision::FP32: {
110                         InferenceEngine::TBlob<float> *in_f = nullptr;
111                         in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(input.second.get());
112                         if (in_f == nullptr) {
113                             FAIL() << "Input data precision not supported. Expected float.";
114                         }
115
116                         if (in_f->readOnly() == nullptr) {
117                             THROW_IE_EXCEPTION << "Input data was not allocated.";
118                         }
119                     }
120                     break;
121                     case InferenceEngine::Precision::I32: {
122                         InferenceEngine::TBlob<int32_t> *in_f = nullptr;
123                         in_f = dynamic_cast<InferenceEngine::TBlob<int32_t> *>(input.second.get());
124                         if (in_f == nullptr) {
125                             FAIL() << "Input data precision not supported. Expected float.";
126                         }
127
128                         if (in_f->readOnly() == nullptr) {
129                             THROW_IE_EXCEPTION << "Input data was not allocated.";
130                         }
131                     }
132                     break;
133                     case InferenceEngine::Precision::U16: {
134                         InferenceEngine::TBlob<uint16_t> *in_f = nullptr;
135                         in_f = dynamic_cast<InferenceEngine::TBlob<uint16_t> *>(input.second.get());
136                         if (in_f == nullptr) {
137                             FAIL() << "Input data precision not supported. Expected float.";
138                         }
139
140                         if (in_f->readOnly() == nullptr) {
141                             THROW_IE_EXCEPTION << "Input data was not allocated.";
142                         }
143                     }
144                     break;
145                     case InferenceEngine::Precision::I16: {
146                         InferenceEngine::TBlob<int16_t> *in_f = nullptr;
147                         in_f = dynamic_cast<InferenceEngine::TBlob<int16_t> *>(input.second.get());
148                         if (in_f == nullptr) {
149                             FAIL() << "Input data precision not supported. Expected float.";
150                         }
151
152                         if (in_f->readOnly() == nullptr) {
153                             THROW_IE_EXCEPTION << "Input data was not allocated.";
154                         }
155                     }
156                     break;
157                     case InferenceEngine::Precision::U8: {
158                         InferenceEngine::TBlob<uint8_t> *in_f = nullptr;
159                         in_f = dynamic_cast<InferenceEngine::TBlob<uint8_t> *>(input.second.get());
160                         if (in_f == nullptr) {
161                             FAIL() << "Input data precision not supported. Expected float.";
162                         }
163
164                         if (in_f->readOnly() == nullptr) {
165                             THROW_IE_EXCEPTION << "Input data was not allocated.";
166                         }
167                     }
168                     break;
169                     case InferenceEngine::Precision::I8: {
170                         InferenceEngine::TBlob<int8_t> *in_f = nullptr;
171                         in_f = dynamic_cast<InferenceEngine::TBlob<int8_t> *>(input.second.get());
172                         if (in_f == nullptr) {
173                             FAIL() << "Input data precision not supported. Expected float.";
174                         }
175
176                         if (in_f->readOnly() == nullptr) {
177                             THROW_IE_EXCEPTION << "Input data was not allocated.";
178                         }
179                     }
180                     break;
181                     default:
182                         THROW_IE_EXCEPTION << "Unsupported input precision " << input.second->getTensorDesc().getPrecision();
183                 }
184
185                 PushInputData(input.first, input.second, batch);
186             }
187             MKLDNNPlugin::MKLDNNGraph::Infer(batch);
188         } catch (const std::exception &e) {
189             FAIL() << e.what();
190         }
191
192         PullOutputData(result);
193     }
194
195     std::vector<MKLDNNPlugin::MKLDNNNodePtr>& getNodes() {
196         return graphNodes;
197     }
198
199     void CreateGraph(InferenceEngine::ICNNNetwork &network, const MKLDNNPlugin::MKLDNNExtensionManager::Ptr& extMgr) {
200         MKLDNNGraph::CreateGraph(network, extMgr);
201     }
202
203     void CreateGraph(InferenceEngine::ICNNNetwork &network) {
204         CreateGraph(network, extensionManager);
205     }
206
207     void checkDynBatch(InferenceEngine::BlobMap& srcs, InferenceEngine::BlobMap& outputBlobs, int batch, size_t MB,
208                        const std::function<bool (const MKLDNNPlugin::MKLDNNNodePtr&)>& comp, CheckDynBatchType type = CheckDynBatchType::Both) {
209         for (auto &node : getNodes()) {
210             if (comp(node)) {
211                 auto inputBlob = node->getParentEdgeAt(0)->getBlob();
212                 auto *data = inputBlob->buffer().as<float *>();
213                 size_t dataSize = inputBlob->getTensorDesc().getBlockingDesc().getStrides()[0] * MB;
214                 for (size_t j = 0; j < dataSize; j++) {
215                     data[j] = GARB_VAL(j);
216                 }
217
218                 auto outputBlob = node->getChildEdgeAt(0)->getBlob();
219                 data = outputBlob->buffer().as<float *>();
220                 dataSize = outputBlob->getTensorDesc().getBlockingDesc().getStrides()[0] * MB;
221                 for (size_t j = 0; j < dataSize; j++) {
222                     data[j] = GARB_VAL(j);
223                 }
224             }
225         }
226
227         Infer(srcs, outputBlobs, batch);
228
229         for (auto &node : getNodes()) {
230             if (comp(node)) {
231                 auto inputBlob = node->getParentEdgeAt(0)->getBlob();
232                 auto *data = inputBlob->buffer().as<float *>();
233                 auto inputNoBatchSize = inputBlob->getTensorDesc().getBlockingDesc().getStrides()[0];
234                 for (size_t i = 0; i < batch; i++) {
235                     for (size_t j = 0; j < inputNoBatchSize; j++) {
236                         ASSERT_NE(data[i*inputNoBatchSize + j], GARB_VAL(i*inputNoBatchSize + j));
237                     }
238                 }
239
240                 if (type == CheckDynBatchType::Both || type == CheckDynBatchType::Parent) {
241                     for (size_t i = static_cast<size_t>(batch); i < MB; i++) {
242                         for (size_t j = 0; j < inputNoBatchSize; j++) {
243                             ASSERT_NEAR(data[i * inputNoBatchSize + j],
244                                         GARB_VAL(i * inputNoBatchSize + j), 0.001f);
245                         }
246                     }
247                 }
248
249                 auto outputBlob = node->getChildEdgeAt(0)->getBlob();
250                 data = outputBlob->buffer().as<float *>();
251                 auto outputNoBatchSize = outputBlob->getTensorDesc().getBlockingDesc().getStrides()[0];
252                 for (size_t i = 0; i < batch; i++) {
253                     for (size_t j = 0; j < outputNoBatchSize; j++) {
254                         ASSERT_NE(data[i*outputNoBatchSize + j], GARB_VAL(i*outputNoBatchSize + j));
255                     }
256                 }
257                 if (type == CheckDynBatchType::Both || type == CheckDynBatchType::Child) {
258                     for (size_t i = static_cast<size_t>(batch); i < MB; i++) {
259                         for (size_t j = 0; j < outputNoBatchSize; j++) {
260                             ASSERT_NEAR(data[i * outputNoBatchSize + j],
261                                         GARB_VAL(i * outputNoBatchSize + j), 0.001f);
262                         }
263                     }
264                 }
265             }
266         }
267     }
268 };