Publishing R3
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / test_graph.hpp
1 // Copyright (C) 2018 Intel Corporation
2 //
3 // SPDX-License-Identifier: Apache-2.0
4 //
5
6 #pragma once
7
8 #include <mkldnn_plugin/mkldnn_graph.h>
9 #include <mkldnn_plugin/mkldnn_memory.h>
10 #include <mkldnn_plugin/mkldnn_extension_utils.h>
11 #include <mkldnn_plugin/mkldnn_graph_optimizer.h>
12 #include <mkldnn_plugin/nodes/mkldnn_input_node.h>
13 #include <functional>
14
15 #define GARB_VAL(x) ((x + 100.0f + sin(x)) / (x + 150.f))
16
17 class MKLDNNGraphTestClass: public MKLDNNPlugin::MKLDNNGraph {
18 public:
19     enum class CheckDynBatchType {
20         Both,
21         Parent,
22         Child
23     };
24     MKLDNNGraphTestClass(): MKLDNNPlugin::MKLDNNGraph() {}
25     virtual ~MKLDNNGraphTestClass() = default;
26
27     static std::string getStrPrimitiveDescriptorType(MKLDNNPlugin::impl_desc_type type) {
28         std::string str_type;
29
30         auto add_type = [&](std::string t) {
31             if (!str_type.empty() && t.c_str()[0] != '_')
32                 str_type += "_";
33             str_type += t;
34         };
35
36 #define SEARCH_TYPE(_type)                                                                      \
37     if ((type & MKLDNNPlugin::impl_desc_type::_type) == MKLDNNPlugin::impl_desc_type::_type)    \
38         add_type(#_type)
39
40         SEARCH_TYPE(undef);
41         SEARCH_TYPE(reorder);
42         SEARCH_TYPE(jit);
43         SEARCH_TYPE(gemm);
44         SEARCH_TYPE(ref);
45
46         SEARCH_TYPE(avx512);
47         SEARCH_TYPE(avx2);
48         SEARCH_TYPE(sse42);
49         SEARCH_TYPE(blas);
50         SEARCH_TYPE(any);
51
52         SEARCH_TYPE(winograd);
53         SEARCH_TYPE(_dw);
54         SEARCH_TYPE(_1x1);
55
56         if (type == MKLDNNPlugin::impl_desc_type::unknown)
57             str_type = "unknown";
58         else if (str_type.empty())
59             str_type = "undef";
60         return str_type;
61     }
62
63     void PushInputData(const std::string& name, const InferenceEngine::Blob::Ptr &in, int batch) {
64         if (!IsReady()) THROW_IE_EXCEPTION<< "Wrong state. Topology not ready.";
65
66         auto input = inputNodes.find(name);
67         if (input != inputNodes.end()) {
68             MKLDNNPlugin::MKLDNNDims outDims = input->second->getChildEdgeAt(0)->getDims();
69             if (batch < 1)
70                 batch = outDims[0];
71
72             const void *ext_data_ptr = in->cbuffer();
73             void *inter_data_ptr = input->second->getChildEdgeAt(0)->getMemory().GetData();
74
75             if (ext_data_ptr != inter_data_ptr)
76                 input->second->getChildEdgeAt(0)->getMemory().SetData(MKLDNNPlugin::MKLDNNExtensionUtils::IEPrecisionToDataType(in->getTensorDesc().getPrecision()),
77                                                                       MKLDNNPlugin::MKLDNNMemory::GetPlainFormat(outDims), ext_data_ptr, in->byteSize() / outDims[0] * batch, false);
78
79             // todo: make sure 'name' exists in this map...
80             if (_meanImages.find(name) != _meanImages.end()) {
81                 if (in->getTensorDesc().getPrecision() == InferenceEngine::Precision::FP32) {
82                     _meanImages[name].Subtract(outDims, reinterpret_cast<float *>(inter_data_ptr));
83                 } else {
84                     THROW_IE_EXCEPTION << "Mean image of type " << in->getTensorDesc().getPrecision().name() << " is unsupported";
85                 }
86             }
87         } else {
88             THROW_IE_EXCEPTION << "Input blob for infer '" << name << "' doesn't correspond to input in network";
89         }
90     }
91
92     void Infer(const InferenceEngine::BlobMap& inputs, InferenceEngine::BlobMap& result, int batch = -1) {
93         for (auto it = result.begin(); it != result.end(); it++) {
94             InferenceEngine::TBlob<float> *out = dynamic_cast<InferenceEngine::TBlob<float> *>((*it).second.get());
95             if (out == nullptr) {
96                 FAIL() << "Output data precision not supported. Expected float.";
97             }
98         }
99
100         try {
101             // need to retain converted blobs until infer finish
102             std::vector<InferenceEngine::Blob::Ptr> convertedInputs;
103             for (auto input : inputs) {
104                 InferenceEngine::TBlob<float> *in_f = nullptr;
105                 switch (input.second->precision()) {
106                     case InferenceEngine::Precision::FP32:
107                         in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(input.second.get());
108                         break;
109                     default:
110                         THROW_IE_EXCEPTION << "Unsupported input precision " << input.second->precision();
111                 }
112
113                 switch (input.second->precision()) {
114                     case InferenceEngine::Precision::FP32: break;
115                     default: FAIL() << "Unsupported precision";
116                 }
117
118                 if (in_f == nullptr) {
119                     FAIL() << "Input data precision not supported. Expected float.";
120                 }
121
122                 if (in_f->readOnly() == nullptr) {
123                     THROW_IE_EXCEPTION << "Input data was not allocated.";
124                 }
125
126                 PushInputData(input.first, input.second, batch);
127             }
128             MKLDNNPlugin::MKLDNNGraph::Infer(batch);
129         } catch (const std::exception &e) {
130             FAIL() << e.what();
131         }
132
133         PullOutputData(result);
134     }
135
136     std::vector<MKLDNNPlugin::MKLDNNNodePtr>& getNodes() {
137         return graphNodes;
138     }
139
140     void CreateGraph(InferenceEngine::ICNNNetwork &network, const MKLDNNPlugin::MKLDNNExtensionManager::Ptr& extMgr) {
141         MKLDNNGraph::CreateGraph(network, extMgr);
142     }
143
144     void CreateGraph(InferenceEngine::ICNNNetwork &network) {
145         MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr;
146         CreateGraph(network, extMgr);
147     }
148
149     void checkDynBatch(InferenceEngine::BlobMap& srcs, InferenceEngine::BlobMap& outputBlobs, int batch, size_t MB,
150                        const std::function<bool (const MKLDNNPlugin::MKLDNNNodePtr&)>& comp, CheckDynBatchType type = CheckDynBatchType::Both) {
151         for (auto &node : getNodes()) {
152             if (comp(node)) {
153                 auto inputBlob = node->getParentEdgeAt(0)->getBlob();
154                 auto *data = inputBlob->buffer().as<float *>();
155                 size_t dataSize = inputBlob->getTensorDesc().getBlockingDesc().getStrides()[0] * MB;
156                 for (size_t j = 0; j < dataSize; j++) {
157                     data[j] = GARB_VAL(j);
158                 }
159
160                 auto outputBlob = node->getChildEdgeAt(0)->getBlob();
161                 data = outputBlob->buffer().as<float *>();
162                 dataSize = outputBlob->getTensorDesc().getBlockingDesc().getStrides()[0] * MB;
163                 for (size_t j = 0; j < dataSize; j++) {
164                     data[j] = GARB_VAL(j);
165                 }
166             }
167         }
168
169         Infer(srcs, outputBlobs, batch);
170
171         for (auto &node : getNodes()) {
172             if (comp(node)) {
173                 auto inputBlob = node->getParentEdgeAt(0)->getBlob();
174                 auto *data = inputBlob->buffer().as<float *>();
175                 auto inputNoBatchSize = inputBlob->getTensorDesc().getBlockingDesc().getStrides()[0];
176                 for (size_t i = 0; i < batch; i++) {
177                     for (size_t j = 0; j < inputNoBatchSize; j++) {
178                         ASSERT_NE(data[i*inputNoBatchSize + j], GARB_VAL(i*inputNoBatchSize + j));
179                     }
180                 }
181
182                 if (type == CheckDynBatchType::Both || type == CheckDynBatchType::Parent) {
183                     for (size_t i = static_cast<size_t>(batch); i < MB; i++) {
184                         for (size_t j = 0; j < inputNoBatchSize; j++) {
185                             ASSERT_NEAR(data[i * inputNoBatchSize + j],
186                                         GARB_VAL(i * inputNoBatchSize + j), 0.001f);
187                         }
188                     }
189                 }
190
191                 auto outputBlob = node->getChildEdgeAt(0)->getBlob();
192                 data = outputBlob->buffer().as<float *>();
193                 auto outputNoBatchSize = outputBlob->getTensorDesc().getBlockingDesc().getStrides()[0];
194                 for (size_t i = 0; i < batch; i++) {
195                     for (size_t j = 0; j < outputNoBatchSize; j++) {
196                         ASSERT_NE(data[i*outputNoBatchSize + j], GARB_VAL(i*outputNoBatchSize + j));
197                     }
198                 }
199                 if (type == CheckDynBatchType::Both || type == CheckDynBatchType::Child) {
200                     for (size_t i = static_cast<size_t>(batch); i < MB; i++) {
201                         for (size_t j = 0; j < outputNoBatchSize; j++) {
202                             ASSERT_NEAR(data[i * outputNoBatchSize + j],
203                                         GARB_VAL(i * outputNoBatchSize + j), 0.001f);
204                         }
205                     }
206                 }
207             }
208         }
209     }
210 };