1 // Copyright (C) 2018 Intel Corporation
3 // SPDX-License-Identifier: Apache-2.0
6 #include <gtest/gtest.h>
7 #include <gmock/gmock-spec-builders.h>
8 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "mock_mkldnn_primitive.hpp"
11 #include "test_graph.hpp"
13 #include <mock_mkldnn_extension.hpp>
14 #include <mkldnn/mkldnn_extension_ptr.hpp>
15 #include <mock_error_listener.hpp>
16 #include <mkldnn_plugin/mkldnn_extension_mngr.h>
17 #include "tests_common.hpp"
19 using namespace ::testing;
21 using namespace mkldnn;
23 class MKLDNNGraphReorderTests: public TestsCommon {
25 virtual void SetUp() {
30 TEST_F(MKLDNNGraphReorderTests, cannotCreatePrimitiveDescriprorsWithoutOtherLayers) {
31 std::shared_ptr<MKLDNNPlugin::MKLDNNNode> node;
32 mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
34 InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32}));
35 node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layer, eng, {}));
36 ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType());
38 ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::details::InferenceEngineException);
41 TEST_F(MKLDNNGraphReorderTests, CreateReorder) {
42 std::string model = R"V0G0N(
43 <Net Name="Convolution_Only" version="2" precision="FP32" batch="1">
45 <layer name="in1" type="Input" precision="FP32" id="0">
55 <layer name="conv1" id="1" type="Convolution" precision="FP32">
56 <convolution stride-x="1" stride-y="1"
58 kernel-x="1" kernel-y="1"
59 output="17" group="1"/>
61 <weights offset="0" size="612" />
62 <biases offset="612" size="68" />
83 <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
88 InferenceEngine::CNNNetReader net_reader;
89 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
91 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C,
92 {(1 * 1 * 17 * 9 / 1 + 17)
95 fill_data((float *) weights->buffer(), weights->size() / sizeof(float));
96 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
98 net_reader.SetWeights(weights_ptr);
100 MKLDNNGraphTestClass graph;
101 graph.CreateGraph(net_reader.getNetwork());
103 auto& nodes = graph.getNodes();
104 for (int i = 0; i < nodes.size(); i++) {
105 if (nodes[i]->getType() == MKLDNNPlugin::Reorder) {
106 ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors().size());
107 ASSERT_EQ(MKLDNNPlugin::impl_desc_type::reorder,
108 nodes[i]->getSupportedPrimitiveDescriptors()[0].getImplementationType());
109 ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs.size());
111 ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout());
112 ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout());
114 ASSERT_NE(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().inConfs[0].desc.getLayout());
115 ASSERT_EQ(InferenceEngine::Layout::NCHW, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs[0].desc.getLayout());
117 ASSERT_EQ(1, nodes[i]->getSupportedPrimitiveDescriptors()[0].getConfig().outConfs.size());
122 TEST_F(MKLDNNGraphReorderTests, CreateInPlaceReorder) {
123 std::string model = R"V0G0N(
124 <Net Name="InPlaceReorder_Only" version="2" precision="FP32" batch="1">
126 <layer name="in1" type="Input" precision="FP32" id="0">
136 <layer name="reshape1" id="1" type="Reshape" precision="FP32">
152 <layer name="reshape2" id="2" type="Reshape" precision="FP32">
153 <data axis="0" num_axes="-1" dim="1, 4608"/>
169 <layer name="scaleshift" id="3" type="ScaleShift" precision="FP32">
187 <weights offset="0" size="12"/>
188 <biases offset="12" size="12"/>
193 <edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
194 <edge from-layer="1" from-port="2" to-layer="2" to-port="1"/>
195 <edge from-layer="2" from-port="2" to-layer="3" to-port="1"/>
200 InferenceEngine::CNNNetReader net_reader;
201 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
203 InferenceEngine::TBlob<uint8_t> *weights = new InferenceEngine::TBlob<uint8_t>(InferenceEngine::Precision::U8, InferenceEngine::C, {24});
205 float *data = weights->buffer().as<float *>();
206 size_t dataSize = weights->byteSize() / sizeof(float);
207 for (size_t i = 0; i < dataSize; i++) {
210 InferenceEngine::TBlob<uint8_t>::Ptr weights_ptr = InferenceEngine::TBlob<uint8_t>::Ptr(weights);
212 net_reader.SetWeights(weights_ptr);
213 net_reader.getNetwork().addOutput("reshape1");
215 MKLDNNGraphTestClass graph;
216 graph.CreateGraph(net_reader.getNetwork());
218 InferenceEngine::SizeVector dims_src = {1, 9, 16, 32};
220 InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float, const InferenceEngine::SizeVector>(InferenceEngine::Precision::FP32, InferenceEngine::NCHW, dims_src);
222 data = src->buffer().as<float *>();
223 dataSize = src->size();
224 for (size_t i = 0; i < dataSize; i++) {
228 auto * srcPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(src.get());
230 if (srcPtr == nullptr)
231 FAIL() << "Cannot cast blob to TBlob<float>.";
233 InferenceEngine::BlobMap srcs;
234 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("in1", src));
236 InferenceEngine::OutputsDataMap out;
237 out = net_reader.getNetwork().getOutputsInfo();
238 InferenceEngine::BlobMap outputBlobs;
240 auto it = out.begin();
241 std::pair<std::string, InferenceEngine::DataPtr> item = *it;
243 InferenceEngine::TBlob<float>::Ptr output1;
244 output1 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
246 outputBlobs[item.first] = output1;
250 InferenceEngine::TBlob<float>::Ptr output2;
251 output2 = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
253 outputBlobs[item.first] = output2;
255 graph.Infer(srcs, outputBlobs);
257 data = output1->data();
258 for (size_t i = 0; i < output1->size(); i++) {
259 ASSERT_EQ(data[i], 1);
261 data = output2->data();
262 for (size_t i = 0; i < output2->size(); i++) {
263 ASSERT_EQ(data[i], 4);