From: Roman Lyamin Date: Wed, 11 Nov 2020 05:53:30 +0000 (+0300) Subject: [IE CLDNN] Added LogSoftmax-5 operation (#2945) X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6b09d5769f39612755fabf2798d5df59095a45b7;p=platform%2Fupstream%2Fdldt.git [IE CLDNN] Added LogSoftmax-5 operation (#2945) --- diff --git a/inference-engine/src/cldnn_engine/cldnn_engine.cpp b/inference-engine/src/cldnn_engine/cldnn_engine.cpp index c1c5215..f56ebc2 100644 --- a/inference-engine/src/cldnn_engine/cldnn_engine.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_engine.cpp @@ -136,7 +136,8 @@ InferenceEngine::ICNNNetwork::Ptr clDNNEngine::CloneAndTransformNetwork(const In std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node); + std::dynamic_pointer_cast(node) || + std::dynamic_pointer_cast(node); }; auto nGraphFunc = clonedNetwork->getFunction(); // Disable shape inference (WA for generic operations) diff --git a/inference-engine/src/cldnn_engine/cldnn_program.cpp b/inference-engine/src/cldnn_engine/cldnn_program.cpp index e2eec46..59ed9cc 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_program.cpp @@ -761,6 +761,7 @@ Program::LayerType Program::LayerTypeFromStr(const std::string &str) { { "Pooling" , Pooling }, { "FullyConnected" , FullyConnected }, { "SoftMax" , SoftMax }, + { "LogSoftmax", LogSoftmax }, { "Power" , Power }, { "Split" , Split }, { "VariadicSplit", VariadicSplit }, @@ -1442,6 +1443,8 @@ void Program::CreateSingleLayerPrimitive(cldnn::topology& topology, InferenceEng break; case SoftMax: CreateSoftMaxPrimitive(topology, layer); break; + case LogSoftmax: CreateLogSoftmaxPrimitive(topology, layer); + break; case Power: CreatePowerPrimitive(topology, layer); break; case Split: CreateSplitPrimitive(topology, layer); @@ -2922,6 +2925,36 @@ void Program::CreateSoftMaxPrimitive(cldnn::topology& topology, InferenceEngine: AddPrimitiveToProfiler(softmaxLayerName, layer); } +void Program::CreateLogSoftmaxPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer) { + ValidateLayer(layer, 1); + auto inputPrimitives = GetPrevLayersPrimitives(layer); + auto logSoftmaxLayer = as(layer); + auto sz = logSoftmaxLayer->input().get()->getTensorDesc().getDims().size(); + + auto axis = logSoftmaxLayer->GetParamAsInt("axis", 1); + if (axis < 0) axis += sz; + cldnn::softmax::dimension_t softmax_axis; + + switch (axis) { + case 0: softmax_axis = cldnn::softmax::normalize_all; break; + case 1: softmax_axis = cldnn::softmax::normalize_f; break; + case 2: softmax_axis = sz > 4 ? cldnn::softmax::normalize_z : cldnn::softmax::normalize_y; break; + case 3: softmax_axis = sz > 4 ? cldnn::softmax::normalize_y : cldnn::softmax::normalize_x; break; + case 4: softmax_axis = cldnn::softmax::normalize_x; break; + default: THROW_CLDNN_EXCEPTION("Unsupported logsoftmax axis " << axis); + } + + std::string softmaxLayerName = "softMax"; + auto softmaxPrim = cldnn::softmax(softmaxLayerName, inputPrimitives[0], softmax_axis); + topology.add(softmaxPrim); + AddPrimitiveToProfiler(softmaxLayerName, layer); + + std::string logSoftmaxLayerName = layer_type_name_ID(layer); + auto logPrim = cldnn::activation(logSoftmaxLayerName, softmaxLayerName, cldnn::activation_func::log); + topology.add(logPrim); + AddPrimitiveToProfiler(logSoftmaxLayerName, layer); +} + void Program::CreateFullyConnectedPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer) { ValidateLayer(layer, {1, 2, 3}); auto inputPrimitives = GetPrevLayersPrimitives(layer); diff --git a/inference-engine/src/cldnn_engine/cldnn_program.h b/inference-engine/src/cldnn_engine/cldnn_program.h index 6802fa3..8276944 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.h +++ b/inference-engine/src/cldnn_engine/cldnn_program.h @@ -138,6 +138,7 @@ public: Pooling, FullyConnected, SoftMax, + LogSoftmax, Power, Split, VariadicSplit, @@ -337,6 +338,7 @@ private: void CreateFusedSplitConvMergePrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer, bool useGroups = true); void CreatePowerPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer); void CreateSoftMaxPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer); + void CreateLogSoftmaxPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer); void CreateFullyConnectedPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer); void CreatePoolingPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer); void CreateLRNPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer); diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/log_softmax.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/log_softmax.cpp new file mode 100644 index 0000000..f497609 --- /dev/null +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/log_softmax.cpp @@ -0,0 +1,76 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "single_layer_tests/log_softmax.hpp" +#include "common_test_utils/test_constants.hpp" + +using namespace LayerTestsDefinitions; + +namespace { + +const std::vector netPrecisions = { + InferenceEngine::Precision::FP32, +}; + +const std::vector inputShapes2D = { + InferenceEngine::SizeVector {1, 100}, + InferenceEngine::SizeVector {100, 1}, + InferenceEngine::SizeVector {10, 10}, +}; + +const std::vector axis2D = { + -1, 1 +}; + +const auto params2D = testing::Combine( + testing::ValuesIn(netPrecisions), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(InferenceEngine::Layout::ANY), + testing::ValuesIn(inputShapes2D), + testing::ValuesIn(axis2D), + testing::Values(CommonTestUtils::DEVICE_GPU), + testing::Values(std::map()) +); + +INSTANTIATE_TEST_CASE_P( + smoke_LogSoftmax2D, + LogSoftmaxLayerTest, + params2D, + LogSoftmaxLayerTest::getTestCaseName +); + +const std::vector inputShapes4D = { + InferenceEngine::SizeVector {1, 100, 1, 1}, + InferenceEngine::SizeVector {1, 3, 4, 3}, + InferenceEngine::SizeVector {2, 3, 4, 5}, +}; + +const std::vector axis4D = { + -3, -2, -1, 1, 2, 3 +}; + +const auto params4D = testing::Combine( + testing::ValuesIn(netPrecisions), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Precision::UNSPECIFIED), + testing::Values(InferenceEngine::Layout::ANY), + testing::Values(InferenceEngine::Layout::ANY), + testing::ValuesIn(inputShapes4D), + testing::ValuesIn(axis4D), + testing::Values(CommonTestUtils::DEVICE_GPU), + testing::Values(std::map()) +); + +INSTANTIATE_TEST_CASE_P( + smoke_LogSoftmax4D, + LogSoftmaxLayerTest, + params4D, + LogSoftmaxLayerTest::getTestCaseName +); + +} // namespace