1 // Copyright (C) 2019-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include <transformations/convert_batch_to_space.hpp>
6 #include <transformations/convert_space_to_batch.hpp>
8 #include "layer_test_utils.hpp"
10 namespace LayerTestsUtils {
12 LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f) {
13 core = PluginCache::get().ie(targetDevice);
16 void LayerTestsCommon::Run() {
17 SKIP_IF_CURRENT_TEST_IS_DISABLED()
25 LayerTestsCommon::~LayerTestsCommon() {
26 if (!configuration.empty()) {
27 PluginCache::get().reset();
31 InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo &info) const {
32 return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
35 void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const InferenceEngine::Blob::Ptr &actual) {
36 ASSERT_EQ(expected.size(), actual->byteSize());
37 const auto &expectedBuffer = expected.data();
39 auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(actual);
41 const auto lockedMemory = memory->wmap();
42 const auto actualBuffer = lockedMemory.as<const std::uint8_t *>();
44 const auto &precision = actual->getTensorDesc().getPrecision();
45 auto bufferSize = actual->size();
46 // With dynamic batch, you need to size
47 if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED)) {
48 auto batchSize = actual->getTensorDesc().getDims()[0];
49 auto halfBatchSize = batchSize > 1 ? batchSize/ 2 : 1;
50 bufferSize = (actual->size() * halfBatchSize / batchSize);
52 const auto &size = bufferSize;
54 case InferenceEngine::Precision::FP32:
55 Compare(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer),
58 case InferenceEngine::Precision::I32:
59 Compare(reinterpret_cast<const std::int32_t *>(expectedBuffer),
60 reinterpret_cast<const std::int32_t *>(actualBuffer), size, 0);
63 FAIL() << "Comparator for " << precision << " precision isn't supported";
67 void LayerTestsCommon::Compare(const InferenceEngine::Blob::Ptr &expected, const InferenceEngine::Blob::Ptr &actual) {
68 auto get_raw_buffer = [] (const InferenceEngine::Blob::Ptr &blob) {
69 auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
71 const auto lockedMemory = memory->wmap();
72 return lockedMemory.as<const std::uint8_t *>();
74 const auto expectedBuffer = get_raw_buffer(expected);
75 const auto actualBuffer = get_raw_buffer(actual);
77 const auto &precision = actual->getTensorDesc().getPrecision();
78 const auto &size = actual->size();
80 case InferenceEngine::Precision::FP32:
81 Compare(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer),
84 case InferenceEngine::Precision::I32:
85 Compare(reinterpret_cast<const std::int32_t *>(expectedBuffer),
86 reinterpret_cast<const std::int32_t *>(actualBuffer), size, 0);
89 FAIL() << "Comparator for " << precision << " precision isn't supported";
93 void LayerTestsCommon::ConfigurePlugin() {
94 if (!configuration.empty()) {
95 core->SetConfig(configuration, targetDevice);
99 void LayerTestsCommon::ConfigureNetwork() const {
100 for (const auto &in : cnnNetwork.getInputsInfo()) {
101 if (inLayout != InferenceEngine::Layout::ANY) {
102 in.second->setLayout(inLayout);
104 if (inPrc != InferenceEngine::Precision::UNSPECIFIED) {
105 in.second->setPrecision(inPrc);
109 for (const auto &out : cnnNetwork.getOutputsInfo()) {
110 if (outLayout != InferenceEngine::Layout::ANY) {
111 out.second->setLayout(outLayout);
113 if (outPrc != InferenceEngine::Precision::UNSPECIFIED) {
114 out.second->setPrecision(outPrc);
119 void LayerTestsCommon::LoadNetwork() {
120 cnnNetwork = InferenceEngine::CNNNetwork{function};
122 executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
125 void LayerTestsCommon::Infer() {
126 inferRequest = executableNetwork.CreateInferRequest();
129 for (const auto &input : executableNetwork.GetInputsInfo()) {
130 const auto &info = input.second;
131 auto blob = GenerateInput(*info);
132 inferRequest.SetBlob(info->name(), blob);
133 inputs.push_back(blob);
135 if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
136 configuration.count(InferenceEngine::PluginConfigParams::YES)) {
137 auto batchSize = executableNetwork.GetInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
138 inferRequest.SetBatch(batchSize);
140 inferRequest.Infer();
143 std::vector<std::vector<std::uint8_t>> LayerTestsCommon::CalculateRefs() {
144 // nGraph interpreter does not support f16
145 // IE converts f16 to f32
146 ngraph::pass::ConvertPrecision<ngraph::element::Type_t::f16, ngraph::element::Type_t::f32>().run_on_function(function);
147 function->validate_nodes_and_infer_types();
148 auto referenceInputs = std::vector<std::vector<std::uint8_t>>(inputs.size());
149 for (std::size_t i = 0; i < inputs.size(); ++i) {
150 const auto& input = inputs[i];
151 const auto& inputSize = input->byteSize();
153 auto& referenceInput = referenceInputs[i];
154 referenceInput.resize(inputSize);
156 auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(input);
158 const auto lockedMemory = memory->wmap();
159 const auto buffer = lockedMemory.as<const std::uint8_t*>();
160 std::copy(buffer, buffer + inputSize, referenceInput.data());
163 auto ieOutPrc = outPrc;
164 if (outPrc == InferenceEngine::Precision::UNSPECIFIED) {
165 const auto &actualOutputs = GetOutputs();
166 ieOutPrc = actualOutputs[0]->getTensorDesc().getPrecision();
169 const auto &convertType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(ieOutPrc);
170 std::vector<std::vector<std::uint8_t>> expectedOutputs;
173 expectedOutputs = ngraph::helpers::interpreterFunction(function, referenceInputs, convertType);
176 case CONSTANT_FOLDING: {
177 const auto &foldedFunc = ngraph::helpers::foldFunction(function, referenceInputs);
178 expectedOutputs = ngraph::helpers::getConstData(foldedFunc, convertType);
182 // reference inference on device with other options and nGraph function has to be implemented here
185 case INTERPRETER_TRANSFORMATIONS: {
186 auto cloned_function = ngraph::clone_function(*function);
188 // todo: add functionality to configure the necessary transformations for each test separately
189 ngraph::pass::Manager m;
190 m.register_pass<ngraph::pass::ConvertSpaceToBatch>();
191 m.register_pass<ngraph::pass::ConvertBatchToSpace>();
192 m.run_passes(cloned_function);
193 expectedOutputs = ngraph::helpers::interpreterFunction(cloned_function, referenceInputs, convertType);
198 return expectedOutputs;
201 std::vector<InferenceEngine::Blob::Ptr> LayerTestsCommon::GetOutputs() {
202 auto outputs = std::vector<InferenceEngine::Blob::Ptr>{};
203 for (const auto &output : executableNetwork.GetOutputsInfo()) {
204 const auto &name = output.first;
205 outputs.push_back(inferRequest.GetBlob(name));
210 void LayerTestsCommon::Compare(const std::vector<std::vector<std::uint8_t>>& expectedOutputs, const std::vector<InferenceEngine::Blob::Ptr>& actualOutputs) {
211 for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) {
212 const auto& expected = expectedOutputs[outputIndex];
213 const auto& actual = actualOutputs[outputIndex];
214 Compare(expected, actual);
218 void LayerTestsCommon::Validate() {
219 auto expectedOutputs = CalculateRefs();
220 const auto& actualOutputs = GetOutputs();
222 if (expectedOutputs.empty()) {
226 IE_ASSERT(actualOutputs.size() == expectedOutputs.size())
227 << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
229 Compare(expectedOutputs, actualOutputs);
232 void LayerTestsCommon::SetRefMode(RefMode mode) {
235 } // namespace LayerTestsUtils