From 6e0001a1892994645a2aaf5b6ff0df7d7e5648ed Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Wed, 29 Jul 2020 14:08:58 +0300 Subject: [PATCH] [IE CLDNN] Changed weights layout for 1d scaleshift (#1483) --- .../src/cldnn_engine/cldnn_program.cpp | 20 +++++++- .../shared_tests_instances/skip_tests_config.cpp | 3 -- .../subgraph_tests/scale_shift.cpp | 57 ++++++++++++++++++++++ .../shared/src/subgraph_tests/scale_shift.cpp | 7 ++- 4 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/scale_shift.cpp diff --git a/inference-engine/src/cldnn_engine/cldnn_program.cpp b/inference-engine/src/cldnn_engine/cldnn_program.cpp index 45fad3a..10c3a0e 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_program.cpp @@ -1318,9 +1318,18 @@ void Program::CreateScaleShiftPrimitive(cldnn::topology& topology, InferenceEngi cldnn::primitive_id biasPrimID = scaleShiftLayer->name + m_biasesTag; const auto& wDims = scaleShiftLayer->_weights->getTensorDesc().getDims(); + const auto& iDims = scaleShiftLayer->insData.front().lock()->getTensorDesc().getDims(); cldnn::tensor weightTensor(1); switch (wDims.size()) { - case 1: weightTensor = (cldnn::tensor) cldnn::feature(TensorValue(wDims[0])); // value per feature (or 1 global value) + case 1: + if (iDims.size() != 1) { + weightTensor = (cldnn::tensor) cldnn::feature(TensorValue(wDims[0])); // value per feature (or 1 global value) + } else if (iDims.size() == 1 && wDims[0] == iDims[0]) { + // If input tensor is 1D, then we need to interpret weights as batch to have consistent shapes. + weightTensor = (cldnn::tensor) cldnn::batch(TensorValue(wDims[0])); + } else { + THROW_IE_EXCEPTION << "inconsistent input tensor and scale shapes in scaleshift layer " << layer->name; + } break; default: weightTensor = CldnnTensorFromIEDims(wDims); break; @@ -3532,6 +3541,15 @@ void Program::AddConstantBlobInput(cldnn::topology& topology, InferenceEngine::C needsBatchInterpretation = true; break; } + } else if (LayerTypeFromStr(next->type) == Eltwise) { + bool all_inputs_1d = true; + for (auto& in : next->insData) { + auto& in_shape = in.lock()->getTensorDesc().getDims(); + if (in_shape.size() != 1) + all_inputs_1d = false; + } + needsBatchInterpretation = all_inputs_1d; + break; } else if (LayerTypeFromStr(next->type) == Gather) { needsBatchInterpretation = true; break; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp index c5a7b20..1f9d072 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/skip_tests_config.cpp @@ -9,9 +9,6 @@ std::vector disabledTestPatterns() { return { - // cldnn treats 1d constant as [1, f, 1, 1] tensor instead of [b, 1, 1, 1] which leads to fails of these tests - R"(.*(EltwiseLayerTest).*IS=\(.*\..*\..*\..*\..*\).*secondaryInputType=CONSTANT.*opType=SCALAR.*)", - R"(.*(EltwiseLayerTest).*IS=\(.*\).*secondaryInputType=CONSTANT.*)", // Issues - 34059 ".*BehaviorTests\\.pluginDoesNotChangeOriginalNetwork.*", //TODO: Issue: 34349 diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/scale_shift.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/scale_shift.cpp new file mode 100644 index 0000000..5705208 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/subgraph_tests/scale_shift.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "subgraph_tests/scaleshift.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +std::vector>> inShapes = { + {{100}}, + {{100}, {100}}, + {{1, 8}}, + {{2, 16}}, + {{3, 32}}, + {{4, 64}}, + {{4, 64}, {64}}, + {{5, 128}}, + {{6, 256}}, + {{7, 512}}, + {{8, 1024}} +}; + +std::vector> Scales = { + {2.0f}, + {3.0f}, + {-1.0f}, + {-2.0f}, + {-3.0f} +}; + +std::vector> Shifts = { + {1.0f}, + {2.0f}, + {3.0f}, + {-1.0f}, + {-2.0f}, + {-3.0f} +}; + +std::vector netPrecisions = {InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16, +}; + +} // namespace + +INSTANTIATE_TEST_CASE_P(ScaleShift, ScaleShiftLayerTest, + ::testing::Combine( + ::testing::ValuesIn(inShapes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(Scales), + ::testing::ValuesIn(Shifts)), + ScaleShiftLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/scale_shift.cpp b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/scale_shift.cpp index 5d0bc1d..7bbd371 100644 --- a/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/scale_shift.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/subgraph_tests/scale_shift.cpp @@ -29,11 +29,14 @@ namespace LayerTestsDefinitions { InferenceEngine::Precision netPrecision; std::vector scale, shift; std::tie(inputShapes, netPrecision, targetDevice, scale, shift) = this->GetParam(); + auto paramsShape = ngraph::Shape{1}; + if (inputShapes.size() > 1) + paramsShape = ngraph::Shape(inputShapes[1]); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto paramsIn = ngraph::builder::makeParams(ngPrc, {inputShapes[0]}); - auto mul_const = std::make_shared(ngPrc, ngraph::Shape{1}, scale); + auto mul_const = std::make_shared(ngPrc, paramsShape, scale); auto mul = std::make_shared(paramsIn[0], mul_const); - auto add_const = std::make_shared(ngPrc, ngraph::Shape{1}, shift); + auto add_const = std::make_shared(ngPrc, paramsShape, shift); auto add = std::make_shared(mul, add_const); function = std::make_shared(add, paramsIn, "scale_shift"); } -- 2.7.4