1 // Copyright (C) 2019-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include "layer_test_utils.hpp"
7 namespace LayerTestsUtils {
9 LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f) {
10 core = PluginCache::get().ie(targetDevice);
13 void LayerTestsCommon::Run() {
14 SKIP_IF_CURRENT_TEST_IS_DISABLED()
22 LayerTestsCommon::~LayerTestsCommon() {
23 if (!configuration.empty()) {
24 PluginCache::get().reset();
28 InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo &info) const {
29 return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
32 void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const InferenceEngine::Blob::Ptr &actual) {
33 ASSERT_EQ(expected.size(), actual->byteSize());
34 const auto &expectedBuffer = expected.data();
36 auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(actual);
38 const auto lockedMemory = memory->wmap();
39 const auto actualBuffer = lockedMemory.as<const std::uint8_t *>();
41 const auto &precision = actual->getTensorDesc().getPrecision();
42 auto bufferSize = actual->size();
43 // With dynamic batch, you need to size
44 if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED)) {
45 auto batchSize = actual->getTensorDesc().getDims()[0];
46 auto halfBatchSize = batchSize > 1 ? batchSize/ 2 : 1;
47 bufferSize = (actual->size() * halfBatchSize / batchSize);
49 const auto &size = bufferSize;
51 case InferenceEngine::Precision::FP32:
52 Compare(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer),
55 case InferenceEngine::Precision::I32:
56 Compare(reinterpret_cast<const std::int32_t *>(expectedBuffer),
57 reinterpret_cast<const std::int32_t *>(actualBuffer), size, 0);
60 FAIL() << "Comparator for " << precision << " precision isn't supported";
64 void LayerTestsCommon::ConfigurePlugin() {
65 if (!configuration.empty()) {
66 core->SetConfig(configuration, targetDevice);
70 void LayerTestsCommon::ConfigureNetwork() const {
71 for (const auto &in : cnnNetwork.getInputsInfo()) {
72 if (inLayout != InferenceEngine::Layout::ANY) {
73 in.second->setLayout(inLayout);
75 if (inPrc != InferenceEngine::Precision::UNSPECIFIED) {
76 in.second->setPrecision(inPrc);
80 for (const auto &out : cnnNetwork.getOutputsInfo()) {
81 if (outLayout != InferenceEngine::Layout::ANY) {
82 out.second->setLayout(outLayout);
84 if (outPrc != InferenceEngine::Precision::UNSPECIFIED) {
85 out.second->setPrecision(outPrc);
90 void LayerTestsCommon::LoadNetwork() {
91 cnnNetwork = InferenceEngine::CNNNetwork{function};
93 executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
96 void LayerTestsCommon::Infer() {
97 inferRequest = executableNetwork.CreateInferRequest();
100 for (const auto &input : cnnNetwork.getInputsInfo()) {
101 const auto &info = input.second;
102 auto blob = GenerateInput(*info);
103 inferRequest.SetBlob(info->name(), blob);
104 inputs.push_back(blob);
106 if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
107 configuration.count(InferenceEngine::PluginConfigParams::YES)) {
108 auto batchSize = cnnNetwork.getInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
109 inferRequest.SetBatch(batchSize);
111 inferRequest.Infer();
114 std::vector<std::vector<std::uint8_t>> LayerTestsCommon::CalculateRefs() {
115 // nGraph interpreter does not support f16
116 // IE converts f16 to f32
117 ngraph::pass::ConvertPrecision<ngraph::element::Type_t::f16, ngraph::element::Type_t::f32>().run_on_function(function);
118 function->validate_nodes_and_infer_types();
119 auto referenceInputs = std::vector<std::vector<std::uint8_t>>(inputs.size());
120 for (std::size_t i = 0; i < inputs.size(); ++i) {
121 const auto& input = inputs[i];
122 const auto& inputSize = input->byteSize();
124 auto& referenceInput = referenceInputs[i];
125 referenceInput.resize(inputSize);
127 auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(input);
129 const auto lockedMemory = memory->wmap();
130 const auto buffer = lockedMemory.as<const std::uint8_t*>();
131 std::copy(buffer, buffer + inputSize, referenceInput.data());
134 const auto &actualOutputs = GetOutputs();
135 const auto &convertType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(actualOutputs[0]->getTensorDesc().getPrecision());
136 std::vector<std::vector<std::uint8_t>> expectedOutputs;
139 expectedOutputs = ngraph::helpers::interpreterFunction(function, referenceInputs, convertType);
142 case CONSTANT_FOLDING: {
143 const auto &foldedFunc = ngraph::helpers::foldFunction(function, referenceInputs);
144 expectedOutputs = ngraph::helpers::getConstData(foldedFunc, convertType);
148 // reference inference on device with other options and nGraph function has to be implemented here
153 return expectedOutputs;
156 std::vector<InferenceEngine::Blob::Ptr> LayerTestsCommon::GetOutputs() {
157 auto outputs = std::vector<InferenceEngine::Blob::Ptr>{};
158 for (const auto &output : cnnNetwork.getOutputsInfo()) {
159 const auto &name = output.first;
160 outputs.push_back(inferRequest.GetBlob(name));
165 void LayerTestsCommon::Compare(const std::vector<std::vector<std::uint8_t>>& expectedOutputs, const std::vector<InferenceEngine::Blob::Ptr>& actualOutputs) {
166 for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) {
167 const auto& expected = expectedOutputs[outputIndex];
168 const auto& actual = actualOutputs[outputIndex];
169 Compare(expected, actual);
173 void LayerTestsCommon::Validate() {
174 auto expectedOutputs = CalculateRefs();
175 const auto& actualOutputs = GetOutputs();
177 if (expectedOutputs.empty()) {
181 IE_ASSERT(actualOutputs.size() == expectedOutputs.size())
182 << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
184 Compare(expectedOutputs, actualOutputs);
187 void LayerTestsCommon::SetRefMode(RefMode mode) {
190 } // namespace LayerTestsUtils