1 // Copyright (C) 2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include <gtest/gtest.h>
6 #include <gmock/gmock-spec-builders.h>
7 #include "mkldnn_plugin/mkldnn_graph.h"
9 #include "test_graph.hpp"
11 #include "single_layer_common.hpp"
12 #include <mkldnn_plugin/mkldnn_extension_utils.h>
13 #include <extension/ext_list.hpp>
14 #include "tests_common.hpp"
17 using namespace ::testing;
19 using namespace mkldnn;
21 struct range_test_params {
22 std::string precision;
26 InferenceEngine::SizeVector out_shape;
28 std::vector<std::function<void(MKLDNNPlugin::PrimitiveDescInfo)>> comp;
31 template <typename data_t>
36 InferenceEngine::TBlob<data_t> &dst
38 data_t* dst_data = dst.data();
39 size_t work_amount_dst = std::floor(std::abs((limit - start) / delta));
40 if (work_amount_dst != dst.size())
41 FAIL() << "Range indexes exceeds data tensor dimension";
43 data_t dst_value = static_cast<data_t>(start);
44 for (size_t iwork = 0; iwork < work_amount_dst; ++iwork, dst_value += static_cast<data_t>(delta)) {
45 dst_data[iwork] = dst_value;
49 class MKLDNNCPUExtRangeTests : public TestsCommon, public WithParamInterface<range_test_params> {
50 std::string model_t = R"V0G0N(
51 <net Name="Range_net" version="2" precision="_IIDXP_" batch="1">
53 <layer name="start" type="Input" precision="_IIDXP_" id="1">
60 <layer name="limit" type="Input" precision="_IIDXP_" id="2">
67 <layer name="delta" type="Input" precision="_IIDXP_" id="3">
74 <layer name="output" id="2" type="Range" precision="_IIDXP_">
95 <edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
96 <edge from-layer="2" from-port="2" to-layer="2" to-port="2"/>
97 <edge from-layer="3" from-port="3" to-layer="2" to-port="3"/>
102 std::string getModel(range_test_params p) {
103 std::string model = model_t;
104 std::string out_shape;
106 REPLACE_WITH_STR(model, "_IIDXP_", p.precision);
107 for (size_t i = 0; i < p.out_shape.size(); i++) {
108 out_shape += "<dim>";
109 out_shape += std::to_string(p.out_shape[i]) + "</dim>\n";
111 REPLACE_WITH_STR(model, "_OUT_", out_shape);
117 virtual void TearDown() {
120 virtual void SetUp() {
122 TestsCommon::SetUp();
123 range_test_params p = ::testing::WithParamInterface<range_test_params>::GetParam();
124 std::string model = getModel(p);
126 InferenceEngine::CNNNetReader net_reader;
127 ASSERT_NO_THROW(net_reader.ReadNetwork(model.data(), model.length()));
129 InferenceEngine::Extension cpuExt(make_so_name("cpu_extension"));
130 MKLDNNPlugin::MKLDNNExtensionManager::Ptr extMgr(new MKLDNNPlugin::MKLDNNExtensionManager());
131 extMgr->AddExtension(InferenceEngine::IExtensionPtr(&cpuExt, [](InferenceEngine::IExtension*){}));
133 MKLDNNGraphTestClass graph;
134 graph.CreateGraph(net_reader.getNetwork(), extMgr);
137 InferenceEngine::OutputsDataMap out;
138 out = net_reader.getNetwork().getOutputsInfo();
139 InferenceEngine::BlobMap outputBlobs;
142 InferenceEngine::Blob::Ptr start_scalar;
143 InferenceEngine::Blob::Ptr limit_scalar;
144 InferenceEngine::Blob::Ptr delta_scalar;
145 std::pair<std::string, InferenceEngine::DataPtr> item = *out.begin();
146 InferenceEngine::SizeVector scalar_dim(1, 1);
147 InferenceEngine::BlobMap srcs;
148 InferenceEngine::SizeVector out_dims;
149 if (p.precision == "I32") {
150 start_scalar = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
151 start_scalar->allocate();
152 static_cast<int32_t*>(start_scalar->buffer())[0] = static_cast<int32_t>(p.start);
153 auto * start_scalarPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(start_scalar.get());
154 if (start_scalarPtr == nullptr)
155 FAIL() << "Cannot cast blob to TBlob<int32_t>.";
157 limit_scalar = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
158 limit_scalar->allocate();
159 static_cast<int32_t*>(limit_scalar->buffer())[0] = static_cast<int32_t>(p.limit);
160 auto * limit_scalarPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(limit_scalar.get());
161 if (limit_scalarPtr == nullptr)
162 FAIL() << "Cannot cast blob to TBlob<int32_t>.";
164 delta_scalar = InferenceEngine::make_shared_blob<int32_t>({ InferenceEngine::Precision::I32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
165 delta_scalar->allocate();
166 static_cast<int32_t*>(delta_scalar->buffer())[0] = static_cast<int32_t>(p.delta);
167 auto * delta_scalarPtr = dynamic_cast<InferenceEngine::TBlob<int32_t>*>(delta_scalar.get());
168 if (delta_scalarPtr == nullptr)
169 FAIL() << "Cannot cast blob to TBlob<int32_t>.";
171 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("start", start_scalar));
172 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("limit", limit_scalar));
173 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("delta", delta_scalar));
176 InferenceEngine::TBlob<int32_t>::Ptr output;
177 output = InferenceEngine::make_shared_blob<int32_t>(item.second->getTensorDesc());
179 outputBlobs[item.first] = output;
182 InferenceEngine::TBlob<int32_t> dst_ref(item.second->getTensorDesc());
184 ref_range(p.start, p.limit, p.delta, dst_ref);
187 graph.Infer(srcs, outputBlobs);
188 for (int i = 0; i < dst_ref.size(); i++) {
189 if (dst_ref.data()[i] != (*output).data()[i])
190 FAIL() << "The difference between res_ptr[i] and ref_ptr[i]";
192 } else if (p.precision == "FP32") {
193 start_scalar = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
194 start_scalar->allocate();
195 static_cast<float*>(start_scalar->buffer())[0] = p.start;
196 auto * start_scalarPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(start_scalar.get());
197 if (start_scalarPtr == nullptr)
198 FAIL() << "Cannot cast blob to TBlob<float>.";
200 limit_scalar = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
201 limit_scalar->allocate();
202 static_cast<float*>(limit_scalar->buffer())[0] = p.limit;
203 auto * limit_scalarPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(limit_scalar.get());
204 if (limit_scalarPtr == nullptr)
205 FAIL() << "Cannot cast blob to TBlob<float>.";
207 delta_scalar = InferenceEngine::make_shared_blob<float>({ InferenceEngine::Precision::FP32, scalar_dim, InferenceEngine::TensorDesc::getLayoutByDims(scalar_dim) });
208 delta_scalar->allocate();
209 static_cast<float*>(delta_scalar->buffer())[0] = p.delta;
210 auto * delta_scalarPtr = dynamic_cast<InferenceEngine::TBlob<float>*>(delta_scalar.get());
211 if (delta_scalarPtr == nullptr)
212 FAIL() << "Cannot cast blob to TBlob<float>.";
214 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("start", start_scalar));
215 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("limit", limit_scalar));
216 srcs.insert(std::pair<std::string, InferenceEngine::Blob::Ptr>("delta", delta_scalar));
219 InferenceEngine::Blob::Ptr output;
220 output = InferenceEngine::make_shared_blob<float>(item.second->getTensorDesc());
222 outputBlobs[item.first] = output;
225 InferenceEngine::TBlob<float> dst_ref(item.second->getTensorDesc());
227 ref_range(p.start, p.limit, p.delta, dst_ref);
230 graph.Infer(srcs, outputBlobs);
231 compare(*output, dst_ref);
235 } catch (const InferenceEngine::details::InferenceEngineException &e) {
241 TEST_P(MKLDNNCPUExtRangeTests, TestsRange) {}
243 INSTANTIATE_TEST_CASE_P(
244 TestsRange, MKLDNNCPUExtRangeTests,
246 // Params: precision, start, limit, delta, out_shape
247 range_test_params{ "I32", 3.f, 18.f, 3.f, { 5 } },
248 range_test_params{ "I32", 3.f, 1.f, -1.f, { 2 } },
249 range_test_params{ "I32", 3.f, -3.f, -1.f, { 6 } },
250 range_test_params{ "I32", 0.f, 5.f, 1.f, { 5 } },
251 range_test_params{"FP32", 3.f, 18.f, 3.f, { 5 } },
252 range_test_params{"FP32", 3.f, 1.f, -.5f, { 4 } },
253 range_test_params{"FP32", 3.f, -1.f, -.5f, { 8 } },
254 range_test_params{"FP32", 0.f, 5.f, 1.f, { 5 } }