1 // Copyright (C) 2018 Intel Corporation
3 // SPDX-License-Identifier: Apache-2.0
6 #include <gtest/gtest.h>
7 #include <gmock/gmock-spec-builders.h>
8 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "mock_mkldnn_primitive.hpp"
11 #include "test_graph.hpp"
13 #include "single_layer_common.hpp"
14 #include <mkldnn_plugin/mkldnn_extension_utils.h>
15 #include <unordered_set>
16 #include <inference_engine/cnn_network_impl.hpp>
17 #include "tests_common.hpp"
19 using namespace ::testing;
21 using namespace mkldnn;
30 struct concat_test_params {
39 MKLDNNPlugin::impl_desc_type selectedType;
41 std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
44 class MKLDNNGraphConcatTests: public TestsCommon,
45 public WithParamInterface<concat_test_params> {
46 std::string model_t = R"V0G0N(
47 <net name="ConcatOnly" version="2" precision="FP32" batch="1">
49 <layer name="in1" type="Input" precision="FP32" id="1">
59 <layer name="in2" type="Input" precision="FP32" id="2">
69 <layer name="con" id="3" type="Concat" precision="FP32">
70 <concat_data axis="_AXIS_"/>
96 <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
97 <edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
102 std::string getModel(concat_test_params p) {
103 std::string model = model_t;
104 REPLACE_WITH_NUM(model, "_IN1_", p.in1.n);
105 REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
106 REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
107 REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
109 REPLACE_WITH_NUM(model, "_IN2_", p.in2.n);
110 REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
111 REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
112 REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
114 REPLACE_WITH_NUM(model, "_ON_", p.axis == 0 ? p.in1.n + p.in2.n : p.in1.n);
115 REPLACE_WITH_NUM(model, "_OC_", p.axis == 1 ? p.in1.c + p.in2.c : p.in1.c);
116 REPLACE_WITH_NUM(model, "_OH_", p.axis == 2 ? p.in1.h + p.in2.h : p.in1.h);
117 REPLACE_WITH_NUM(model, "_OW_", p.axis == 3 ? p.in1.w + p.in2.w : p.in1.w);
119 REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
124 virtual void TearDown() {
127 virtual void SetUp() {
129 TestsCommon::SetUp();
130 concat_test_params p = ::testing::WithParamInterface<concat_test_params>::GetParam();
131 std::string model = getModel(p);
133 InferenceEngine::CNNNetReader net_reader;
134 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
136 MKLDNNGraphTestClass graph;
137 graph.CreateGraph(net_reader.getNetwork());
138 auto& nodes = graph.getNodes();
139 for (int i = 0; i < nodes.size(); i++) {
140 if (nodes[i]->getType() == MKLDNNPlugin::Concatenation) {
141 ASSERT_EQ(p.num_prim_desc, nodes[i]->getSupportedPrimitiveDescriptors().size());
142 for (size_t j = 0; j < p.num_prim_desc && j < p.comp.size(); j++) {
143 p.comp.at(j)(nodes[i]->getSupportedPrimitiveDescriptors().at(j));
145 ASSERT_NE(nullptr, nodes[i]->getSelectedPrimitiveDescriptor());
146 ASSERT_EQ(p.selectedType, nodes[i]->getSelectedPrimitiveDescriptor()->getImplementationType());
149 ASSERT_LE(3, nodes.size());
151 InferenceEngine::SizeVector dims_src1 = {p.in1.n, p.in1.c, p.in1.h, p.in1.w};
152 InferenceEngine::SizeVector dims_src2 = {p.in2.n, p.in2.c, p.in2.h, p.in2.w};
154 InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
157 fill_data(src1->buffer(), src1->size());
158 InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
160 fill_data(src2->buffer(), src2->size());
161 InferenceEngine::BlobMap srcs;
162 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src1));
163 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
165 InferenceEngine::OutputsDataMap out;
166 out = net_reader.getNetwork().getOutputsInfo();
167 InferenceEngine::BlobMap outputBlobs;
169 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
171 InferenceEngine::TBlob<float>::Ptr output;
172 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
174 outputBlobs[item.first] = output;
176 graph.Infer(srcs, outputBlobs);
179 float *src1_ptr = src1->buffer();
180 size_t src1_size = src1->size();
181 float *src2_ptr = src2->buffer();
182 size_t src2_size = src2->size();
183 float *dst_ptr = output->buffer();
184 size_t dst_size = output->size();
186 int len1 = 1, len2 = 1, cycles;
187 for (int dim = p.axis; dim < output->dims().size(); dim++) {
188 len1 *= src1->dims()[dim];
189 len2 *= src2->dims()[dim];
194 int index1 = 0, index2 = 0, index = 0;
195 for (int cycle = 0; cycle < cycles; cycle ++) {
196 for (int i1 = 0; i1 < len1; i1++) {
197 if (src1_ptr[index1] != dst_ptr[index])
199 FAIL() << "index: " << index << " src: " << src1_ptr[index1] << ", dst: " << dst_ptr[index];
203 for (int i2 = 0; i2 < len2; i2++) {
204 if (src2_ptr[index2] != dst_ptr[index])
206 FAIL() << "index: " << index << " src: " << src2_ptr[index2] << ", dst: " << dst_ptr[index];
211 } catch (const InferenceEngine::details::InferenceEngineException &e) {
217 TEST_P(MKLDNNGraphConcatTests, TestsConcat) {}
219 class MKLDNNGraphDynBatchConcatTests: public TestsCommon, public WithParamInterface<concat_test_params> {
220 std::string model_t = R"V0G0N(
221 <net name="ConcatOnly" version="2" precision="FP32" batch="1">
223 <layer name="in1" type="Input" precision="FP32" id="1">
233 <layer name="in2" type="Input" precision="FP32" id="2">
243 <layer name="con" id="3" type="Concat" precision="FP32">
244 <concat_data axis="_AXIS_"/>
270 <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
271 <edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
276 std::string getModel(concat_test_params p) {
277 std::string model = model_t;
278 REPLACE_WITH_NUM(model, "_IN1_", p.in1.n);
279 REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
280 REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
281 REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
283 REPLACE_WITH_NUM(model, "_IN2_", p.in2.n);
284 REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
285 REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
286 REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
288 REPLACE_WITH_NUM(model, "_ON_", p.axis == 0 ? p.in1.n + p.in2.n : p.in1.n);
289 REPLACE_WITH_NUM(model, "_OC_", p.axis == 1 ? p.in1.c + p.in2.c : p.in1.c);
290 REPLACE_WITH_NUM(model, "_OH_", p.axis == 2 ? p.in1.h + p.in2.h : p.in1.h);
291 REPLACE_WITH_NUM(model, "_OW_", p.axis == 3 ? p.in1.w + p.in2.w : p.in1.w);
293 REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
298 virtual void TearDown() {
301 virtual void SetUp() {
303 TestsCommon::SetUp();
304 concat_test_params p = ::testing::WithParamInterface<concat_test_params>::GetParam();
305 std::string model = getModel(p);
310 InferenceEngine::CNNNetReader net_reader;
311 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
312 InferenceEngine::CNNNetwork network = net_reader.getNetwork();
313 auto implNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(&((InferenceEngine::ICNNNetwork&)network));
314 ASSERT_NE(nullptr, implNet) << "Failed to cast ICNNNetwork to CNNNetworkImpl";
315 InferenceEngine::ResponseDesc resp;
316 InferenceEngine::StatusCode sts = implNet->setBatchSizeReshape(MB, &resp);
317 ASSERT_EQ((int)InferenceEngine::StatusCode::OK, sts) << resp.msg;
319 MKLDNNGraphTestClass graph;
320 graph.setProperty({{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES}});
321 graph.CreateGraph(net_reader.getNetwork());
323 InferenceEngine::SizeVector dims_src1 = {MB, p.in1.c, p.in1.h, p.in1.w};
324 InferenceEngine::SizeVector dims_src2 = {MB, p.in2.c, p.in2.h, p.in2.w};
326 InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
329 fill_data(src1->buffer(), src1->size());
330 InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
332 fill_data(src2->buffer(), src2->size());
333 InferenceEngine::BlobMap srcs;
334 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src1));
335 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
337 InferenceEngine::OutputsDataMap out;
338 out = net_reader.getNetwork().getOutputsInfo();
339 InferenceEngine::BlobMap outputBlobs;
341 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
343 InferenceEngine::TBlob<float>::Ptr output;
344 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
346 outputBlobs[item.first] = output;
349 auto checkConcat = [](const MKLDNNPlugin::MKLDNNNodePtr& node) {
350 return node->getType() == MKLDNNPlugin::Concatenation;
353 MKLDNNGraphTestClass::CheckDynBatchType checkType = MKLDNNGraphTestClass::CheckDynBatchType::Both;
354 if (p.selectedType == MKLDNNPlugin::impl_desc_type::unknown)
355 checkType = MKLDNNGraphTestClass::CheckDynBatchType::Child;
357 graph.checkDynBatch(srcs, outputBlobs, MB, MB, checkConcat, checkType);
358 graph.checkDynBatch(srcs, outputBlobs, 1, MB, checkConcat, checkType);
359 } catch (const InferenceEngine::details::InferenceEngineException &e) {
365 TEST_P(MKLDNNGraphDynBatchConcatTests, TestsDynBatchConcat) {}
368 INSTANTIATE_TEST_CASE_P(
369 TestsDynBatchConcat, MKLDNNGraphDynBatchConcatTests,
374 2, 1, MKLDNNPlugin::impl_desc_type::ref
379 1, 2, MKLDNNPlugin::impl_desc_type::unknown
384 1, 2, MKLDNNPlugin::impl_desc_type::unknown
389 3, 1, MKLDNNPlugin::impl_desc_type::ref
394 1, 4, MKLDNNPlugin::impl_desc_type::unknown
399 1, 2, MKLDNNPlugin::impl_desc_type::unknown
402 struct concat_param {
409 struct two_concat_test_params {
414 concat_param concat1;
415 concat_param concat2;
418 class MKLDNNGraphTwoConcatTests: public TestsCommon,
419 public WithParamInterface<two_concat_test_params> {
420 std::string model_t = R"V0G0N(
421 <net name="TwoConcatsDiffFwd" version="2" precision="FP32" batch="1">
423 <layer name="in1" type="Input" precision="FP32" id="1">
433 <layer name="in2" type="Input" precision="FP32" id="2">
443 <layer name="in3" type="Input" precision="FP32" id="3">
453 <layer name="_CONCAT1_NAME_" id="4" type="Concat" precision="FP32">
454 <concat_data axis="_CONCAT1_AXIS_"/>
478 <layer name="_CONCAT2_NAME_" id="5" type="Concat" precision="FP32">
479 <concat_data axis="_CONCAT2_AXIS_"/>
505 <edge from-layer="1" from-port="1" to-layer="_FL11_" to-port="_FP11_"/>
506 <edge from-layer="2" from-port="1" to-layer="_FL21_" to-port="_FP21_"/>
507 <edge from-layer="3" from-port="1" to-layer="_FL31_" to-port="_FP31_"/>
508 <edge from-layer="_FSL_" from-port="_FSP_" to-layer="_FSLTL_" to-port="_FSLTP_"/>
512 void changeEdgeToLayer(std::string& model, int f_l, int f_p, int t_l, int t_p, dim4 dims) {
513 std::string TL = "_FL" + std::to_string(f_l) + std::to_string(f_p) + "_";
514 std::string TP = "_FP" + std::to_string(f_l) + std::to_string(f_p) + "_";
515 if (!FIND_STR(model, TL) || !FIND_STR(model, TP)) {
516 if (!FIND_STR(model, "_FSL_") || !FIND_STR(model, "_FSP_") ||
517 !FIND_STR(model, "_FSLTL_") || !FIND_STR(model, "_FSLTP_")) {
518 THROW_IE_EXCEPTION << "Incorrect configuration!";
520 REPLACE_WITH_NUM(model, "_FSL_", f_l);
521 REPLACE_WITH_NUM(model, "_FSP_", f_p);
522 REPLACE_WITH_NUM(model, "_FSLTL_", t_l);
523 REPLACE_WITH_NUM(model, "_FSLTP_", t_p);
525 REPLACE_WITH_NUM(model, TL, t_l);
526 REPLACE_WITH_NUM(model, TP, t_p);
529 std::string CI = "_CI" + std::to_string(t_l) + std::to_string(t_p);
530 REPLACE_WITH_NUM(model, CI + "N_", dims.n);
531 REPLACE_WITH_NUM(model, CI + "C_", dims.c);
532 REPLACE_WITH_NUM(model, CI + "H_", dims.h);
533 REPLACE_WITH_NUM(model, CI + "W_", dims.w);
537 std::string getModel(two_concat_test_params p) {
538 std::string model = model_t;
539 REPLACE_WITH_NUM(model, "_IN1_", p.in1.n);
540 REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
541 REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
542 REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
544 REPLACE_WITH_NUM(model, "_IN2_", p.in2.n);
545 REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
546 REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
547 REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
549 REPLACE_WITH_NUM(model, "_IN3_", p.in3.n);
550 REPLACE_WITH_NUM(model, "_IC3_", p.in3.c);
551 REPLACE_WITH_NUM(model, "_IW3_", p.in3.w);
552 REPLACE_WITH_NUM(model, "_IH3_", p.in3.h);
555 switch (p.concat1.input1) {
557 changeEdgeToLayer(model, 2, 1, 4, 1, p.in2);
561 changeEdgeToLayer(model, 3, 1, 4, 1, p.in3);
565 changeEdgeToLayer(model, 1, 1, 4, 1, p.in1);
570 switch (p.concat1.input2) {
572 changeEdgeToLayer(model, 2, 1, 4, 2, p.in2);
576 changeEdgeToLayer(model, 3, 1, 4, 2, p.in3);
580 changeEdgeToLayer(model, 1, 1, 4, 2, p.in1);
585 switch (p.concat2.input1) {
587 changeEdgeToLayer(model, 2, 1, 5, 1, p.in2);
591 changeEdgeToLayer(model, 3, 1, 5, 1, p.in3);
595 changeEdgeToLayer(model, 1, 1, 5, 1, p.in1);
600 switch (p.concat2.input2) {
602 changeEdgeToLayer(model, 2, 1, 5, 2, p.in2);
606 changeEdgeToLayer(model, 3, 1, 5, 2, p.in3);
610 changeEdgeToLayer(model, 1, 1, 5, 2, p.in1);
614 REPLACE_WITH_NUM(model, "_CON1_", p.concat1.axis == 0 ? concat11.n + concat12.n : concat21.n);
615 REPLACE_WITH_NUM(model, "_COC1_", p.concat1.axis == 1 ? concat11.c + concat12.c : concat21.c);
616 REPLACE_WITH_NUM(model, "_COH1_", p.concat1.axis == 2 ? concat11.h + concat12.h : concat21.h);
617 REPLACE_WITH_NUM(model, "_COW1_", p.concat1.axis == 3 ? concat11.w + concat12.w : concat21.w);
618 REPLACE_WITH_NUM(model, "_CONCAT1_AXIS_", p.concat1.axis);
619 REPLACE_WITH_STR(model, "_CONCAT1_NAME_", p.concat1.name);
621 REPLACE_WITH_NUM(model, "_CON2_", p.concat2.axis == 0 ? concat21.n + concat22.n : concat21.n);
622 REPLACE_WITH_NUM(model, "_COC2_", p.concat2.axis == 1 ? concat21.c + concat22.c : concat21.c);
623 REPLACE_WITH_NUM(model, "_COH2_", p.concat2.axis == 2 ? concat21.h + concat22.h : concat21.h);
624 REPLACE_WITH_NUM(model, "_COW2_", p.concat2.axis == 3 ? concat21.w + concat22.w : concat21.w);
625 REPLACE_WITH_NUM(model, "_CONCAT2_AXIS_", p.concat2.axis);
626 REPLACE_WITH_STR(model, "_CONCAT2_NAME_", p.concat2.name);
631 virtual void TearDown() {
634 virtual void SetUp() {
636 TestsCommon::SetUp();
637 two_concat_test_params p = ::testing::WithParamInterface<two_concat_test_params>::GetParam();
638 std::string model = getModel(p);
640 InferenceEngine::CNNNetReader net_reader;
641 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
643 MKLDNNGraphTestClass graph;
644 graph.CreateGraph(net_reader.getNetwork());
646 InferenceEngine::SizeVector dims_src1 = {p.in1.n, p.in1.c, p.in1.h, p.in1.w};
647 InferenceEngine::SizeVector dims_src2 = {p.in2.n, p.in2.c, p.in2.h, p.in2.w};
648 InferenceEngine::SizeVector dims_src3 = {p.in3.n, p.in3.c, p.in3.h, p.in3.w};
650 InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
652 fill_data(src1->buffer(), src1->size());
654 InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
656 fill_data(src2->buffer(), src2->size());
658 InferenceEngine::Blob::Ptr src3 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src3);
660 fill_data(src3->buffer(), src3->size());
662 InferenceEngine::BlobMap srcs;
663 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src1));
664 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
665 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in3", src3));
667 InferenceEngine::OutputsDataMap out;
668 out = net_reader.getNetwork().getOutputsInfo();
669 InferenceEngine::BlobMap outputBlobs;
671 for (auto it = out.begin(); it != out.end(); it++) {
672 std::pair<std::string, InferenceEngine::DataPtr> item = *it;
673 InferenceEngine::TBlob<float>::Ptr output;
674 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
676 outputBlobs[item.first] = output;
679 graph.Infer(srcs, outputBlobs);
681 for (auto concat : {p.concat1, p.concat2}) {
686 InferenceEngine::Blob::Ptr src1_c;
687 InferenceEngine::Blob::Ptr src2_c;
689 switch (concat.input1) {
691 src1_ptr = src2->buffer();
692 src1_size = src2->size();
696 src1_ptr = src3->buffer();
697 src1_size = src3->size();
701 src1_ptr = src1->buffer();
702 src1_size = src1->size();
706 switch (concat.input2) {
708 src2_ptr = src2->buffer();
709 src2_size = src2->size();
713 src2_ptr = src3->buffer();
714 src2_size = src3->size();
718 src2_ptr = src1->buffer();
719 src2_size = src1->size();
723 float *dst_ptr = outputBlobs[concat.name]->buffer();
724 size_t dst_size = outputBlobs[concat.name]->size();
726 int len1 = 1, len2 = 1, cycles;
727 for (int dim = concat.axis; dim < outputBlobs[concat.name]->dims().size(); dim++) {
728 len1 *= src1_c->dims()[dim];
729 len2 *= src2_c->dims()[dim];
731 cycles = concat.axis;
733 int index1 = 0, index2 = 0, index = 0;
734 for (int cycle = 0; cycle < cycles; cycle ++) {
735 for (int i1 = 0; i1 < len1; i1++) {
736 if (src1_ptr[index1] != dst_ptr[index])
738 FAIL() << concat.name << " index: " << index << " src: "
739 << src1_ptr[index1] << ", dst: " << dst_ptr[index];
743 for (int i2 = 0; i2 < len2; i2++) {
744 if (src2_ptr[index2] != dst_ptr[index])
746 FAIL() << concat.name << " index: " << index << " src: "
747 << src2_ptr[index2] << ", dst: " << dst_ptr[index];
753 } catch (const InferenceEngine::details::InferenceEngineException &e) {
759 TEST_P(MKLDNNGraphTwoConcatTests, TestsTwoConcat) {}
761 INSTANTIATE_TEST_CASE_P(
762 TestsTwoConcat, MKLDNNGraphTwoConcatTests,
764 two_concat_test_params {
768 {"concat1", 0, 0, 1},
771 two_concat_test_params {
775 {"concat1", 1, 0, 1},
778 two_concat_test_params {
782 {"concat1", 1, 0, 1},
785 two_concat_test_params {
789 {"concat1", 0, 0, 1},
792 two_concat_test_params {
796 {"concat1", 1, 0, 1},
799 two_concat_test_params {
803 {"concat1", 1, 0, 1},
808 class MKLDNNGraphTwoInputInConcatTests: public TestsCommon {
809 std::string model_t = R"V0G0N(
810 <net name="TwoConcatsDiffFwd" version="2" precision="FP32" batch="1">
812 <layer name="in1" type="Input" precision="FP32" id="1">
822 <layer name="in2" type="Input" precision="FP32" id="2">
832 <layer name="norm" id="3" type="ReLU" precision="FP32">
850 <layer name="power" id="4" type="Power" precision="FP32">
851 <power_data power="-1" scale="-1" shift="0"/>
869 <layer name="o_concat" id="5" type="Concat" precision="FP32">
870 <concat_data axis="1"/>
896 <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
897 <edge from-layer="1" from-port="1" to-layer="5" to-port="2"/>
898 <edge from-layer="1" from-port="1" to-layer="4" to-port="1"/>
899 <edge from-layer="2" from-port="1" to-layer="5" to-port="1"/>
905 virtual void TearDown() {
908 virtual void SetUp() {
910 TestsCommon::SetUp();
911 std::string model = model_t;
913 InferenceEngine::CNNNetReader net_reader;
914 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
916 MKLDNNGraphTestClass graph;
917 graph.CreateGraph(net_reader.getNetwork());
919 InferenceEngine::SizeVector dims_src1 = {1, 3, 2, 2};
920 InferenceEngine::SizeVector dims_src2 = {1, 2, 2, 2};
922 InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src1);
924 float *src1_data = src1->buffer();
925 for (size_t i = 0; i < src1->size(); i++) {
926 src1_data[i] = i + 1;
929 InferenceEngine::Blob::Ptr src2 = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src2);
931 fill_data(src2->buffer(), src2->size());
933 InferenceEngine::BlobMap srcs;
934 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src1));
935 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in2", src2));
937 InferenceEngine::OutputsDataMap out;
938 out = net_reader.getNetwork().getOutputsInfo();
939 InferenceEngine::BlobMap outputBlobs;
941 for (auto it = out.begin(); it != out.end(); it++) {
942 std::pair<std::string, InferenceEngine::DataPtr> item = *it;
943 InferenceEngine::TBlob<float>::Ptr output;
944 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
946 outputBlobs[item.first] = output;
949 graph.Infer(srcs, outputBlobs);
951 float *src1_ptr = src2->buffer();
952 size_t src1_size = src2->size();
953 float *src2_ptr = src1->buffer();
954 size_t src2_size = src1->size();
956 float *dst_ptr = outputBlobs["o_concat"]->buffer();
957 size_t dst_size = outputBlobs["o_concat"]->size();
959 int len1 = 1, len2 = 1, cycles;
960 for (int dim = 1; dim < outputBlobs["o_concat"]->dims().size(); dim++) {
961 len1 *= src2->dims()[dim];
962 len2 *= src1->dims()[dim];
966 int index1 = 0, index2 = 0, index = 0;
967 for (int cycle = 0; cycle < cycles; cycle ++) {
968 for (int i1 = 0; i1 < len1; i1++) {
969 if (src1_ptr[index1] != dst_ptr[index])
971 FAIL() << "concat index: " << index << " src: "
972 << src1_ptr[index1] << ", dst: " << dst_ptr[index];
976 for (int i2 = 0; i2 < len2; i2++) {
977 if (src2_ptr[index2] != dst_ptr[index])
979 FAIL() << "concat index: " << index << " src: "
980 << src2_ptr[index2] << ", dst: " << dst_ptr[index];
985 } catch (const InferenceEngine::details::InferenceEngineException &e) {
991 TEST_F(MKLDNNGraphTwoInputInConcatTests, TestSecondInputToConcat) {}
993 class MKLDNNGraphIncorrectConcatTests: public TestsCommon,
994 public WithParamInterface<concat_test_params> {
995 std::string model_t = R"V0G0N(
996 <net name="ConcatOnly" version="2" precision="FP32" batch="1">
998 <layer name="in1" type="Input" precision="FP32" id="1">
1008 <layer name="in2" type="Input" precision="FP32" id="2">
1018 <layer name="con" id="3" type="Concat" precision="FP32">
1019 <concat_data axis="_AXIS_"/>
1045 <edge from-layer="1" from-port="1" to-layer="3" to-port="1"/>
1046 <edge from-layer="2" from-port="2" to-layer="3" to-port="2"/>
1051 std::string getModel(concat_test_params p) {
1052 std::string model = model_t;
1053 REPLACE_WITH_NUM(model, "_IN1_", p.in1.n);
1054 REPLACE_WITH_NUM(model, "_IC1_", p.in1.c);
1055 REPLACE_WITH_NUM(model, "_IW1_", p.in1.w);
1056 REPLACE_WITH_NUM(model, "_IH1_", p.in1.h);
1058 REPLACE_WITH_NUM(model, "_IN2_", p.in2.n);
1059 REPLACE_WITH_NUM(model, "_IC2_", p.in2.c);
1060 REPLACE_WITH_NUM(model, "_IW2_", p.in2.w);
1061 REPLACE_WITH_NUM(model, "_IH2_", p.in2.h);
1063 REPLACE_WITH_NUM(model, "_ON_", p.axis == 0 ? p.in1.n + p.in2.n : p.in1.n);
1064 REPLACE_WITH_NUM(model, "_OC_", p.axis == 1 ? p.in1.c + p.in2.c : p.in1.c);
1065 REPLACE_WITH_NUM(model, "_OH_", p.axis == 2 ? p.in1.h + p.in2.h : p.in1.h);
1066 REPLACE_WITH_NUM(model, "_OW_", p.axis == 3 ? p.in1.w + p.in2.w : p.in1.w);
1068 REPLACE_WITH_NUM(model, "_AXIS_", p.axis);
1073 virtual void TearDown() {
1076 virtual void SetUp() {
1078 TestsCommon::SetUp();
1079 concat_test_params p = ::testing::WithParamInterface<concat_test_params>::GetParam();
1080 std::string model = getModel(p);
1082 InferenceEngine::CNNNetReader net_reader;
1083 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
1085 MKLDNNGraphTestClass graph;
1086 ASSERT_THROW(graph.CreateGraph(net_reader.getNetwork()), InferenceEngine::details::InferenceEngineException);
1087 } catch (const InferenceEngine::details::InferenceEngineException &e) {
1093 TEST_P(MKLDNNGraphIncorrectConcatTests, TestsIncorrectConcat) {}
1096 INSTANTIATE_TEST_CASE_P(
1097 TestsIncorrectConcat, MKLDNNGraphIncorrectConcatTests,
1099 concat_test_params {
1104 concat_test_params {