1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "test_graph.hpp"
11 #include <mock_error_listener.hpp>
12 #include <mkldnn_plugin/mkldnn_extension_mngr.h>
13 #include "tests_common.hpp"
15 using namespace ::testing;
17 using namespace mkldnn;
19 class MKLDNNGraphReorderTests: public TestsCommon {
21 virtual void SetUp() {
26 TEST_F(MKLDNNGraphReorderTests, cannotCreatePrimitiveDescriprorsWithoutOtherLayers) {
27 std::shared_ptr<MKLDNNPlugin::MKLDNNNode> node;
28 mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
30 InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32}));
31 node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layer, eng, {}));
32 ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType());
34 ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::details::InferenceEngineException);
37 TEST_F(MKLDNNGraphReorderTests, CreateReorder) {
38 std::string model = R"V0G0N(
39 <Net Name="Convolution_Only" version="2" precision="FP32" batch="1">
41 <layer name="in1" type="Input" precision="FP32" id="0">
51 <layer name="conv1" id="1" type="Convolution" precision="FP32">
52 <convolution stride-x="1" stride-y="1"
54 kernel-x="1" kernel-y="1"
55 output="17" group="1"/>
57 <weights offset="0" size="612" />
58 <biases offset="612" size="68" />
79 <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
84 InferenceEngine::CNNNetReader net_reader;
85 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
87 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C,
88 {(1 * 1 * 17 * 9 / 1 + 17)
91 fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
92 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
94 net_reader.SetWeights(weights_ptr);
96 MKLDNNGraphTestClass graph;
97 graph.CreateGraph(net_reader.getNetwork());
99 auto& nodes = graph.getNodes();
100 for (int i = 0; i < nodes.size(); i++) {
101 if (nodes[i]->getType() == MKLDNNPlugin::Reorder) {
102 ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors().size());
103 ASSERT_EQ(MKLDNNPlugin::impl_desc_type::reorder,
104 nodes[i]->getSupportedPrimitiveDescriptors()[0].getImplementationType());
105 ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs.size());
107 ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout());
108 ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout());
110 ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout());
111 ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout());
113 ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs.size());
118 TEST_F(MKLDNNGraphReorderTests, CreateInPlaceReorder) {
119 std::string model = R"V0G0N(
120 <Net Name="InPlaceReorder_Only" version="2" precision="FP32" batch="1">
122 <layer name="in1" type="Input" precision="FP32" id="0">
132 <layer name="reshape1" id="1" type="Reshape" precision="FP32">
148 <layer name="reshape2" id="2" type="Reshape" precision="FP32">
149 <data axis="0" num_axes="-1" dim="1, 4608"/>
165 <layer name="scaleshift" id="3" type="ScaleShift" precision="FP32">
183 <weights offset="0" size="12"/>
184 <biases offset="12" size="12"/>
189 <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
190 <edge from-layer="1" from-port="2" to-layer="2" to-port="1"/>
191 <edge from-layer="2" from-port="2" to-layer="3" to-port="1"/>
196 InferenceEngine::CNNNetReader net_reader;
197 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
199 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {24});
201 float *data = weights->buffer().as<float *>();
202 size_t dataSize = weights->byteSize() / sizeof(float);
203 for (size_t i = 0; i < dataSize; i++) {
206 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
208 net_reader.SetWeights(weights_ptr);
209 net_reader.getNetwork().addOutput("reshape1");
211 MKLDNNGraphTestClass graph;
212 graph.CreateGraph(net_reader.getNetwork());
214 InferenceEngine::SizeVector dims_src = {1, 9, 16, 32};
216 InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
218 data = src->buffer().as<float *>();
219 dataSize = src->size();
220 for (size_t i = 0; i < dataSize; i++) {
224 auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
226 if (srcPtr == nullptr)
227 FAIL() << "Cannot cast blob to TBlob<float>.";
229 InferenceEngine::BlobMap srcs;
230 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
232 InferenceEngine::OutputsDataMap out;
233 out = net_reader.getNetwork().getOutputsInfo();
234 InferenceEngine::BlobMap outputBlobs;
236 auto it = out.begin();
237 std::pair<std::string, InferenceEngine::DataPtr> item = *it;
239 InferenceEngine::TBlob<float>::Ptr output1;
240 output1 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
242 outputBlobs[item.first] = output1;
246 InferenceEngine::TBlob<float>::Ptr output2;
247 output2 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
249 outputBlobs[item.first] = output2;
251 graph.Infer(srcs, outputBlobs);
253 data = output1->data();
254 for (size_t i = 0; i < output1->size(); i++) {
255 ASSERT_EQ(data[i], 1);
257 data = output2->data();
258 for (size_t i = 0; i < output2->size(); i++) {
259 ASSERT_EQ(data[i], 4);