Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / tests / unit / engines / mkldnn / graph / layers / extensions / fill_tests.cpp
1 // Copyright (C) 2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
3 //
4
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
8
9 #include "test_graph.hpp"
10
11 #include "single_layer_common.hpp"
12 #include <mkldnn_plugin/mkldnn_extension_utils.h>
13 #include <extension/ext_list.hpp>
14 #include "tests_common.hpp"
15
16
17 using namespace ::testing;
18 using namespace std;
19 using namespace mkldnn;
20
21 struct fill_test_params {
22     std::string                 precision;
23     InferenceEngine::SizeVector out_shape;
24     float                       value;
25
26     std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
27 };
28
29 class MKLDNNCPUExtFillTests : public TestsCommon, public WithParamInterface<fill_test_params> {
30     std::string model_t = R"V0G0N(
31 <net Name="Fill_net" version="2" precision="_IIDXP_" batch="1">
32     <layers>
33         <layer name="dims" type="Input" precision="I32" id="1">
34             <output>
35                 <port id="1">
36                     <dim>_DIM_SIZE_</dim>
37                 </port>
38             </output>
39         </layer>
40         <layer name="value" type="Input" precision="_IIDXP_" id="2">
41             <output>
42                 <port id="2">
43                     <dim>1</dim>
44                 </port>
45             </output>
46         </layer>
47         <layer name="output" id="2" type="Fill" precision="_IIDXP_">
48             <data/>
49             <input>
50                 <port id="1">
51                     <dim>_DIM_SIZE_</dim>
52                 </port>
53                 <port id="2">
54                     <dim>1</dim>
55                 </port>
56             </input>
57             <output>
58                 <port id="3">
59                     _OUT_
60                 </port>
61             </output>
62         </layer>
63     </layers>
64     <edges>
65         <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
66         <edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
67     </edges>
68 </net>
69 )V0G0N";
70
71     std::string getModel(fill_test_params p) {
72         std::string model = model_t;
73         std::string out_shape;
74
75         REPLACE_WITH_STR(model, "_IIDXP_", p.precision);
76         for (size_t i = 0; i < p.out_shape.size(); i++) {
77             out_shape += "<dim>";
78             out_shape += std::to_string(p.out_shape[i]) + "</dim>\n";
79         }
80         REPLACE_WITH_STR(model, "_OUT_", out_shape);
81         REPLACE_WITH_NUM(model, "_DIM_SIZE_", p.out_shape.size());
82
83         return model;
84     }
85
86 protected:
87     virtual void TearDown() {
88     }
89
90     virtual void SetUp() {
91         try {
92             TestsCommon::SetUp();
93             fill_test_params p = ::testing::WithParamInterface<fill_test_params>::GetParam();
94             std::string model = getModel(p);
95
96             InferenceEngine::CNNNetReader net_reader;
97             ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
98
99             InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
100             MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
101             extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
102
103             MKLDNNGraphTestClass graph;
104             graph.CreateGraph(net_reader.getNetwork(), extMgr);
105
106             // Output Data
107             InferenceEngine::OutputsDataMap out;
108             out = net_reader.getNetwork().getOutputsInfo();
109             InferenceEngine::BlobMap outputBlobs;
110
111             // Input Data
112             InferenceEngine::Blob::Ptr dims;
113             InferenceEngine::SizeVector vector_dim(1, p.out_shape.size());
114             dims = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, vector_dim, InferenceEngine::TensorDesc::getLayoutByDims(vector_dim) });
115             dims->allocate();
116             for (size_t i = 0; i < p.out_shape.size(); i++) {
117                 static_cast<int32_t*>(dims->buffer())[i] = static_cast<int32_t>(p.out_shape[i]);
118             }
119             auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(dims.get());
120             if (srcPtr == nullptr)
121                 FAIL() << "Cannot cast blob to TBlob<int32_t>.";
122
123             InferenceEngine::BlobMap srcs;
124             InferenceEngine::Blob::Ptr value_scalar;
125             InferenceEngine::SizeVector value_scalar_dim(1, 1);
126             std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
127             if (p.precision == "I32") {
128                 value_scalar = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, value_scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(value_scalar_dim) });
129                 value_scalar->allocate();
130                 static_cast<int32_t*>(value_scalar->buffer())[0] = static_cast<int32_t>(p.value);
131                 auto * value_scalarPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(value_scalar.get());
132                 if (value_scalarPtr == nullptr)
133                     FAIL() << "Cannot cast blob to TBlob<int32_t>.";
134
135                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("dims", dims));
136                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("value", value_scalar));
137
138                 // Output Blob
139                 InferenceEngine::TBlob<int32_t>::Ptr output;
140                 output = InferenceEngine::make_shared_blob<int32_t>(item.second->getTensorDesc());
141                 output->allocate();
142                 outputBlobs[item.first] = output;
143
144                 // Output Reference
145                 InferenceEngine::TBlob<int32_t> dst_ref(item.second->getTensorDesc());
146                 dst_ref.allocate();
147                 std::fill_n(static_cast<int32_t*>(dst_ref.data()), dst_ref.size(), static_cast<int32_t>(p.value));
148
149                 // Infer
150                 graph.Infer(srcs, outputBlobs);
151                 for (int i = 0; i < dst_ref.size(); i++) {
152                     if(dst_ref.data()[i] != (*output).data()[i])
153                         FAIL() << "The difference between res_ptr[i] and ref_ptr[i]";
154                 }
155             } else if (p.precision == "FP32") {
156                 value_scalar = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, value_scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(value_scalar_dim) });
157                 value_scalar->allocate();
158                 static_cast<float*>(value_scalar->buffer())[0] = p.value;
159                 auto * value_scalarPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(value_scalar.get());
160                 if (value_scalarPtr == nullptr)
161                     FAIL() << "Cannot cast blob to TBlob<float>.";
162
163                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("dims", dims));
164                 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("value", value_scalar));
165
166                 // Output Blob
167                 InferenceEngine::TBlob<float>::Ptr output;
168                 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
169                 output->allocate();
170                 outputBlobs[item.first] = output;
171
172                 // Output Reference
173                 InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
174                 dst_ref.allocate();
175                 std::fill_n(static_cast<float*>(dst_ref.data()), dst_ref.size(), p.value);
176
177                 // Infer
178                 graph.Infer(srcs, outputBlobs);
179                 compare(*output, dst_ref);
180             } else {
181                 return;
182             }
183         } catch (const InferenceEngine::details::InferenceEngineException &e) {
184             FAIL() << e.what();
185         }
186     }
187 };
188
189 TEST_P(MKLDNNCPUExtFillTests, TestsFill) {}
190
191 INSTANTIATE_TEST_CASE_P(
192     TestsFill, MKLDNNCPUExtFillTests,
193             ::testing::Values(
194 // Params: precision, value, out_shape
195                 fill_test_params{ "I32", { 1 }, 1.f },
196                 fill_test_params{ "I32", { 1, 3, 1 }, 1.f },
197                 fill_test_params{ "I32", { 2, 3, 6 }, -1.f },
198                 fill_test_params{"FP32", { 2, 3, 6 }, -1.f },
199                 fill_test_params{"FP32", { 1 }, 1.f },
200                 fill_test_params{"FP32", { 1, 3, 1, 2 }, .5f },
201                 fill_test_params{"FP32", { 4, 3, 2, 5, 4, 2 }, .25f }
202             ));