--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <string>
+
+#include "functional_test_utils/plugin_config.hpp"
+#include "functional_test_utils/blob_utils.hpp"
+#include "legacy/ie_ngraph_utils.hpp"
+
+void PreparePluginConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
+ const float MAX_VAL_2B_FEAT = 16384.0f;
+ auto inputParameters = test->GetFunction()->get_parameters();
+ auto& configuration = test->GetConfiguration();
+ for (size_t i = 0; i < inputParameters.size(); ++i) {
+ std::string scaleFactorConfigKey = "GNA_SCALE_FACTOR" + std::string("_") + std::to_string(i);
+ if (configuration.find(scaleFactorConfigKey) != configuration.end()) {
+ continue;
+ }
+
+ auto elementType = inputParameters[i]->get_element_type();
+ auto shape = inputParameters[i]->get_shape();
+ auto precision = InferenceEngine::details::convertPrecision(elementType);
+ precision = (precision.getPrecVal() == InferenceEngine::Precision::FP16) ?
+ InferenceEngine::Precision(InferenceEngine::Precision::FP32) : precision;
+
+ InferenceEngine::SizeVector size(shape);
+ InferenceEngine::TensorDesc tensor(precision, size, InferenceEngine::Layout::ANY);
+ InferenceEngine::DataPtr dataPtr = std::make_shared<InferenceEngine::Data>("tmp", tensor);
+
+ InferenceEngine::InputInfo info;
+ info.setInputData(dataPtr);
+ info.setPrecision(precision);
+
+ auto blob = test->GenerateInput(info);
+ float floatScaleFactor = 1.0f;
+
+ auto memory = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
+ IE_ASSERT(memory);
+
+ const auto lockedMemory = memory->wmap();
+ if (precision == InferenceEngine::Precision::FP32) {
+ float* ptrFloatFeat = lockedMemory.as<float*>();
+ float max = 0.0;
+
+ for (size_t i = 0; i < blob->size(); i++) {
+ if (fabs(ptrFloatFeat[i]) > max) {
+ max = fabs(ptrFloatFeat[i]);
+ }
+ }
+
+ floatScaleFactor = (max == 0) ? 1.0f : MAX_VAL_2B_FEAT / max;
+ }
+
+ configuration[scaleFactorConfigKey] = std::to_string(floatScaleFactor);
+ }
+}
#include <transformations/op_conversions/convert_space_to_batch.hpp>
#include "layer_test_utils.hpp"
+#include "plugin_config.hpp"
namespace LayerTestsUtils {
void LayerTestsCommon::LoadNetwork() {
cnnNetwork = InferenceEngine::CNNNetwork{function};
+ PreparePluginConfiguration(this);
ConfigureNetwork();
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration);
}
void LayerTestsCommon::SetRefMode(RefMode mode) {
refMode = mode;
}
+
+std::shared_ptr<ngraph::Function> LayerTestsCommon::GetFunction() {
+ return function;
+}
+
+std::map<std::string, std::string>& LayerTestsCommon::GetConfiguration() {
+ return configuration;
+}
} // namespace LayerTestsUtils