1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "test_graph.hpp"
11 #include <mkldnn_plugin/mkldnn_extension_utils.h>
12 #include "tests_common.hpp"
15 using namespace ::testing;
17 using namespace mkldnn;
20 struct input_test_params {
23 MKLDNNPlugin::impl_desc_type selectedType;
25 std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
28 class MKLDNNGraphInputTests: public TestsCommon,
29 public WithParamInterface<input_test_params> {
30 std::string model_t = R"V0G0N(
31 <net name="InputsOnly" version="2" precision="FP32" batch="1">
33 <layer name="in1" type="Input" precision="FP32" id="1">
43 <layer name="in2" type="Input" precision="FP32" id="2">
53 <layer name="in3" type="Input" precision="FP32" id="3">
61 <layer name="power1" id="4" type="Power" precision="FP32">
62 <power_data power="1" scale="1" shift="1"/>
80 <layer name="power2" id="5" type="Power" precision="FP32">
81 <power_data power="1" scale="1" shift="1"/>
99 <layer name="power3" id="6" type="Power" precision="FP32">
100 <power_data power="1" scale="1" shift="1"/>
116 <edge from-layer="1" from-port="1" to-layer="4" to-port="4"/>
117 <edge from-layer="2" from-port="2" to-layer="5" to-port="6"/>
118 <edge from-layer="3" from-port="3" to-layer="6" to-port="8"/>
123 std::string getModel(input_test_params p) {
128 virtual void TearDown() {
131 virtual void SetUp() {
133 TestsCommon::SetUp();
134 input_test_params p = ::testing::WithParamInterface<input_test_params>::GetParam();
135 std::string model = getModel(p);
137 InferenceEngine::CNNNetReader net_reader;
138 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
140 MKLDNNGraphTestClass graph;
141 graph.CreateGraph(net_reader.getNetwork());
143 auto& nodes = graph.getNodes();
144 for (int i = 0; i < nodes.size(); i++) {
145 if (nodes[i]->getType() == MKLDNNPlugin::Input || nodes[i]->getType() == MKLDNNPlugin::Output) {
146 ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size());
147 size_t count = (nodes[i]->getType() == MKLDNNPlugin::Input) ? 0 : 2;
148 if (nodes[i]->getName() == "in3") {
151 if (nodes[i]->getName() == "out_power3") {
154 for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
155 p.comp.at(count)(nodes[i]->getSupportedPrimitiveDescriptors().at(j));
157 ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor());
158 ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType());
161 } catch (const InferenceEngine::details::InferenceEngineException &e) {
167 TEST_P(MKLDNNGraphInputTests, TestsInput) {}
170 INSTANTIATE_TEST_CASE_P(
171 TestsInput, MKLDNNGraphInputTests,
173 input_test_params{1, MKLDNNPlugin::impl_desc_type::unknown, {
174 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
175 ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
176 ASSERT_EQ(0, impl.getConfig().inConfs.size());
177 ASSERT_EQ(1, impl.getConfig().outConfs.size());
178 ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().outConfs.at(0).desc.getLayout());
180 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
181 ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
182 ASSERT_EQ(0, impl.getConfig().inConfs.size());
183 ASSERT_EQ(1, impl.getConfig().outConfs.size());
184 ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().outConfs.at(0).desc.getLayout());
186 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
187 ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
188 ASSERT_EQ(1, impl.getConfig().inConfs.size());
189 ASSERT_EQ(0, impl.getConfig().outConfs.size());
190 ASSERT_EQ(InferenceEngine::Layout::NCHW, impl.getConfig().inConfs.at(0).desc.getLayout());
192 [](MKLDNNPlugin::PrimitiveDescInfo impl) {
193 ASSERT_EQ(MKLDNNPlugin::impl_desc_type::unknown, impl.getImplementationType());
194 ASSERT_EQ(1, impl.getConfig().inConfs.size());
195 ASSERT_EQ(0, impl.getConfig().outConfs.size());
196 ASSERT_EQ(InferenceEngine::Layout::NC, impl.getConfig().inConfs.at(0).desc.getLayout());
201 class MKLDNNGraphConstInputTests: public TestsCommon {
202 std::string model_t = R"V0G0N(
203 <net name="ConcatOnly" version="2" precision="FP32" batch="1">
205 <layer name="in1" type="Input" precision="FP32" id="1">
215 <custom offset="0" size="48"/>
218 <layer name="in2" type="Const" precision="FP32" id="2">
228 <custom offset="48" size="24"/>
231 <layer name="con" id="3" type="Concat" precision="FP32">
232 <concat_data axis="2"/>
258 <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
259 <edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
265 virtual void TearDown() {
268 virtual void SetUp() {
270 TestsCommon::SetUp();
271 std::string model = model_t;
273 InferenceEngine::CNNNetReader net_reader;
274 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
276 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {72});
278 float * data = weights->buffer();
280 std::cout << weights->size() << std::endl;
282 InferenceEngine::SizeVector dims_src1 = {1, 3, 2, 2};
283 InferenceEngine::SizeVector dims_src2 = {1, 3, 1, 2};
284 InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
286 float *srcData = src1->buffer();
287 for (size_t i = 0; i < 12; i++, data++, srcData++) {
292 InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
294 srcData = src2->buffer();
295 for (size_t i = 0; i < 6; i++, data++, srcData++) {
299 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
301 net_reader.SetWeights(weights_ptr);
303 MKLDNNGraphTestClass graph;
304 graph.CreateGraph(net_reader.getNetwork());
305 auto& nodes = graph.getNodes();
306 ASSERT_LE(3, nodes.size());
308 InferenceEngine::BlobMap srcs;
310 InferenceEngine::OutputsDataMap out;
311 out = net_reader.getNetwork().getOutputsInfo();
312 InferenceEngine::BlobMap outputBlobs;
314 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
316 InferenceEngine::TBlob<float>::Ptr output;
317 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
319 outputBlobs[item.first] = output;
321 graph.Infer(srcs, outputBlobs);
324 float *src1_ptr = src1->buffer();
325 size_t src1_size = src1->size();
326 float *src2_ptr = src2->buffer();
327 size_t src2_size = src2->size();
328 float *dst_ptr = output->buffer();
329 size_t dst_size = output->size();
331 int len1 = 1, len2 = 1, cycles;
332 for (int dim = 2; dim < output->dims().size(); dim++) {
333 len1 *= src1->dims()[dim];
334 len2 *= src2->dims()[dim];
338 int index1 = 0, index2 = 0, index = 0;
339 for (int cycle = 0; cycle < cycles; cycle ++) {
340 for (int i1 = 0; i1 < len1; i1++) {
341 if (src1_ptr[index1] != dst_ptr[index])
343 FAIL() << "index: " << index << " src: " << src1_ptr[index1] << ", dst: " << dst_ptr[index];
347 for (int i2 = 0; i2 < len2; i2++) {
348 if (src2_ptr[index2] != dst_ptr[index])
350 FAIL() << "index: " << index << " src: " << src2_ptr[index2] << ", dst: " << dst_ptr[index];
355 } catch (const InferenceEngine::details::InferenceEngineException &e) {
361 TEST_F(MKLDNNGraphConstInputTests, TestsConstInput) {}
364 struct input_layout_test_params {
365 InferenceEngine::Layout layout;
366 std::vector<float> reference;
367 MKLDNNPlugin::impl_desc_type selectedType;
368 std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
371 class MKLDNNGraphInputLayoutTest : public TestsCommon, public WithParamInterface<input_layout_test_params> {
372 std::string model_t = R"V0G0N(
373 <net name="InputLayers" version="2" batch="1">
375 <layer name="input" type="Input" precision="FP32" id="0">
385 <layer name="power1" id="1" type="Power" precision="FP32">
386 <power_data power="1" scale="1" shift="1"/>
406 <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
408 <pre-process reference-layer-name="input" mean-precision="FP32">
423 virtual void TearDown() {
426 virtual void SetUp() {
428 TestsCommon::SetUp();
429 input_layout_test_params p = ::testing::WithParamInterface<input_layout_test_params>::GetParam();
430 std::string model = model_t;
432 InferenceEngine::CNNNetReader net_reader;
433 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
435 MKLDNNGraphTestClass graph;
436 graph.CreateGraph(net_reader.getNetwork());
438 InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, { 1, 3, 2, 2 }, p.layout);
439 InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(desc);
441 fill_data_dbgval(src->buffer(), src->size());
442 InferenceEngine::BlobMap srcs;
443 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("input", src));
445 InferenceEngine::OutputsDataMap out = net_reader.getNetwork().getOutputsInfo();
446 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
447 InferenceEngine::TBlob<float>::Ptr output;
448 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
450 InferenceEngine::BlobMap outputBlobs;
451 outputBlobs[item.first] = output;
453 graph.Infer(srcs, outputBlobs);
455 if (memcmp((*output).data(), &p.reference[0], p.reference.size()) != 0)
456 FAIL() << "Wrong result with compare reference!";
458 catch (const InferenceEngine::details::InferenceEngineException &e) {
464 TEST_P(MKLDNNGraphInputLayoutTest, TestsLayoutInput) {}
466 INSTANTIATE_TEST_CASE_P(
467 TestsLayoutInput, MKLDNNGraphInputLayoutTest,
469 input_layout_test_params{ InferenceEngine::NCHW, { 0,1,2,3,3,4,5,6,6,7,8,9 }, MKLDNNPlugin::impl_desc_type::unknown },
470 input_layout_test_params{ InferenceEngine::NHWC, { 0,0,0,3,3,3,6,6,6,9,9,9 }, MKLDNNPlugin::impl_desc_type::unknown }