1 // Copyright (C) 2019-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include "layer_test_utils.hpp"
7 namespace LayerTestsUtils {
9 LayerTestsCommon::LayerTestsCommon() : threshold(1e-2f) {
10 core = PluginCache::get().ie(targetDevice);
13 void LayerTestsCommon::Run() {
14 SKIP_IF_CURRENT_TEST_IS_DISABLED()
22 LayerTestsCommon::~LayerTestsCommon() {
23 if (!configuration.empty() || targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
24 PluginCache::get().reset();
28 InferenceEngine::Blob::Ptr LayerTestsCommon::GenerateInput(const InferenceEngine::InputInfo &info) const {
29 return FuncTestUtils::createAndFillBlob(info.getTensorDesc());
32 void LayerTestsCommon::Compare(const std::vector<std::uint8_t> &expected, const InferenceEngine::Blob::Ptr &actual) {
33 ASSERT_EQ(expected.size(), actual->byteSize());
34 const auto &expectedBuffer = expected.data();
36 auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(actual);
38 const auto lockedMemory = memory->wmap();
39 const auto actualBuffer = lockedMemory.as<const std::uint8_t *>();
41 const auto &precision = actual->getTensorDesc().getPrecision();
42 const auto &size = actual->size();
44 case InferenceEngine::Precision::FP32:
45 Compare(reinterpret_cast<const float *>(expectedBuffer), reinterpret_cast<const float *>(actualBuffer),
48 case InferenceEngine::Precision::I32:
49 Compare(reinterpret_cast<const std::int32_t *>(expectedBuffer),
50 reinterpret_cast<const std::int32_t *>(actualBuffer), size, 0);
53 FAIL() << "Comparator for " << precision << " precision isn't supported";
57 void LayerTestsCommon::ConfigurePlugin() const {
58 if (!configuration.empty()) {
59 core->SetConfig(configuration, targetDevice);
63 void LayerTestsCommon::ConfigureNetwork() const {
64 for (const auto &in : cnnNetwork.getInputsInfo()) {
65 if (inLayout != InferenceEngine::Layout::ANY) {
66 in.second->setLayout(inLayout);
68 if (inPrc != InferenceEngine::Precision::UNSPECIFIED) {
69 in.second->setPrecision(inPrc);
73 for (const auto &out : cnnNetwork.getOutputsInfo()) {
74 if (outLayout != InferenceEngine::Layout::ANY) {
75 out.second->setLayout(outLayout);
77 if (outPrc != InferenceEngine::Precision::UNSPECIFIED) {
78 out.second->setPrecision(outPrc);
83 void LayerTestsCommon::LoadNetwork() {
84 cnnNetwork = InferenceEngine::CNNNetwork{function};
86 executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice);
89 void LayerTestsCommon::Infer() {
90 inferRequest = executableNetwork.CreateInferRequest();
93 for (const auto &input : cnnNetwork.getInputsInfo()) {
94 const auto &info = input.second;
96 auto blob = GenerateInput(*info);
97 inferRequest.SetBlob(info->name(), blob);
98 inputs.push_back(blob);
100 inferRequest.Infer();
103 std::vector<std::vector<std::uint8_t>> LayerTestsCommon::CalculateRefs() {
104 // nGraph interpreter does not support f16
105 // IE converts f16 to f32
106 ngraph::pass::ConvertPrecision<ngraph::element::Type_t::f16, ngraph::element::Type_t::f32>().run_on_function(function);
107 function->validate_nodes_and_infer_types();
108 auto referenceInputs = std::vector<std::vector<std::uint8_t>>(inputs.size());
109 for (std::size_t i = 0; i < inputs.size(); ++i) {
110 const auto& input = inputs[i];
111 const auto& inputSize = input->byteSize();
113 auto& referenceInput = referenceInputs[i];
114 referenceInput.resize(inputSize);
116 auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(input);
118 const auto lockedMemory = memory->wmap();
119 const auto buffer = lockedMemory.as<const std::uint8_t*>();
120 std::copy(buffer, buffer + inputSize, referenceInput.data());
123 const auto &actualOutputs = GetOutputs();
124 const auto &convertType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(actualOutputs[0]->getTensorDesc().getPrecision());
125 std::vector<std::vector<std::uint8_t>> expectedOutputs;
128 expectedOutputs = ngraph::helpers::interpreterFunction(function, referenceInputs, convertType);
131 case CONSTANT_FOLDING: {
132 const auto &foldedFunc = ngraph::helpers::foldFunction(function, referenceInputs);
133 expectedOutputs = ngraph::helpers::getConstData(foldedFunc, convertType);
137 // reference inference on device with other options and nGraph function has to be implemented here
142 return expectedOutputs;
145 std::vector<InferenceEngine::Blob::Ptr> LayerTestsCommon::GetOutputs() {
146 auto outputs = std::vector<InferenceEngine::Blob::Ptr>{};
147 for (const auto &output : cnnNetwork.getOutputsInfo()) {
148 const auto &name = output.first;
149 outputs.push_back(inferRequest.GetBlob(name));
154 void LayerTestsCommon::Compare(const std::vector<std::vector<std::uint8_t>>& expectedOutputs, const std::vector<InferenceEngine::Blob::Ptr>& actualOutputs) {
155 for (std::size_t outputIndex = 0; outputIndex < expectedOutputs.size(); ++outputIndex) {
156 const auto& expected = expectedOutputs[outputIndex];
157 const auto& actual = actualOutputs[outputIndex];
158 Compare(expected, actual);
162 void LayerTestsCommon::Validate() {
163 auto expectedOutputs = CalculateRefs();
164 const auto& actualOutputs = GetOutputs();
166 if (expectedOutputs.empty()) {
170 IE_ASSERT(actualOutputs.size() == expectedOutputs.size())
171 << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size();
173 Compare(expectedOutputs, actualOutputs);
176 void LayerTestsCommon::SetRefMode(RefMode mode) {
179 } // namespace LayerTestsUtils