std::dynamic_pointer_cast<const ::ngraph::opset4::HSwish>(node) ||
std::dynamic_pointer_cast<const ::ngraph::opset4::ReduceL1>(node) ||
std::dynamic_pointer_cast<const ::ngraph::opset4::ReduceL2>(node) ||
- std::dynamic_pointer_cast<const ::ngraph::opset4::SoftPlus>(node);
+ std::dynamic_pointer_cast<const ::ngraph::opset4::SoftPlus>(node) ||
+ std::dynamic_pointer_cast<const ::ngraph::opset5::LogSoftmax>(node);
};
auto nGraphFunc = clonedNetwork->getFunction();
// Disable shape inference (WA for generic operations)
{ "Pooling" , Pooling },
{ "FullyConnected" , FullyConnected },
{ "SoftMax" , SoftMax },
+ { "LogSoftmax", LogSoftmax },
{ "Power" , Power },
{ "Split" , Split },
{ "VariadicSplit", VariadicSplit },
break;
case SoftMax: CreateSoftMaxPrimitive(topology, layer);
break;
+ case LogSoftmax: CreateLogSoftmaxPrimitive(topology, layer);
+ break;
case Power: CreatePowerPrimitive(topology, layer);
break;
case Split: CreateSplitPrimitive(topology, layer);
AddPrimitiveToProfiler(softmaxLayerName, layer);
}
+void Program::CreateLogSoftmaxPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer) {
+ ValidateLayer(layer, 1);
+ auto inputPrimitives = GetPrevLayersPrimitives(layer);
+ auto logSoftmaxLayer = as<InferenceEngine::GenericLayer*>(layer);
+ auto sz = logSoftmaxLayer->input().get()->getTensorDesc().getDims().size();
+
+ auto axis = logSoftmaxLayer->GetParamAsInt("axis", 1);
+ if (axis < 0) axis += sz;
+ cldnn::softmax::dimension_t softmax_axis;
+
+ switch (axis) {
+ case 0: softmax_axis = cldnn::softmax::normalize_all; break;
+ case 1: softmax_axis = cldnn::softmax::normalize_f; break;
+ case 2: softmax_axis = sz > 4 ? cldnn::softmax::normalize_z : cldnn::softmax::normalize_y; break;
+ case 3: softmax_axis = sz > 4 ? cldnn::softmax::normalize_y : cldnn::softmax::normalize_x; break;
+ case 4: softmax_axis = cldnn::softmax::normalize_x; break;
+ default: THROW_CLDNN_EXCEPTION("Unsupported logsoftmax axis " << axis);
+ }
+
+ std::string softmaxLayerName = "softMax";
+ auto softmaxPrim = cldnn::softmax(softmaxLayerName, inputPrimitives[0], softmax_axis);
+ topology.add(softmaxPrim);
+ AddPrimitiveToProfiler(softmaxLayerName, layer);
+
+ std::string logSoftmaxLayerName = layer_type_name_ID(layer);
+ auto logPrim = cldnn::activation(logSoftmaxLayerName, softmaxLayerName, cldnn::activation_func::log);
+ topology.add(logPrim);
+ AddPrimitiveToProfiler(logSoftmaxLayerName, layer);
+}
+
void Program::CreateFullyConnectedPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer) {
ValidateLayer(layer, {1, 2, 3});
auto inputPrimitives = GetPrevLayersPrimitives(layer);
Pooling,
FullyConnected,
SoftMax,
+ LogSoftmax,
Power,
Split,
VariadicSplit,
void CreateFusedSplitConvMergePrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer, bool useGroups = true);
void CreatePowerPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer);
void CreateSoftMaxPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer);
+ void CreateLogSoftmaxPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer);
void CreateFullyConnectedPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer);
void CreatePoolingPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer);
void CreateLRNPrimitive(cldnn::topology& topology, InferenceEngine::CNNLayerPtr &layer);
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "single_layer_tests/log_softmax.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP32,
+};
+
+const std::vector<InferenceEngine::SizeVector> inputShapes2D = {
+ InferenceEngine::SizeVector {1, 100},
+ InferenceEngine::SizeVector {100, 1},
+ InferenceEngine::SizeVector {10, 10},
+};
+
+const std::vector<int64_t> axis2D = {
+ -1, 1
+};
+
+const auto params2D = testing::Combine(
+ testing::ValuesIn(netPrecisions),
+ testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ testing::Values(InferenceEngine::Layout::ANY),
+ testing::Values(InferenceEngine::Layout::ANY),
+ testing::ValuesIn(inputShapes2D),
+ testing::ValuesIn(axis2D),
+ testing::Values(CommonTestUtils::DEVICE_GPU),
+ testing::Values(std::map<std::string, std::string>())
+);
+
+INSTANTIATE_TEST_CASE_P(
+ smoke_LogSoftmax2D,
+ LogSoftmaxLayerTest,
+ params2D,
+ LogSoftmaxLayerTest::getTestCaseName
+);
+
+const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
+ InferenceEngine::SizeVector {1, 100, 1, 1},
+ InferenceEngine::SizeVector {1, 3, 4, 3},
+ InferenceEngine::SizeVector {2, 3, 4, 5},
+};
+
+const std::vector<int64_t> axis4D = {
+ -3, -2, -1, 1, 2, 3
+};
+
+const auto params4D = testing::Combine(
+ testing::ValuesIn(netPrecisions),
+ testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ testing::Values(InferenceEngine::Layout::ANY),
+ testing::Values(InferenceEngine::Layout::ANY),
+ testing::ValuesIn(inputShapes4D),
+ testing::ValuesIn(axis4D),
+ testing::Values(CommonTestUtils::DEVICE_GPU),
+ testing::Values(std::map<std::string, std::string>())
+);
+
+INSTANTIATE_TEST_CASE_P(
+ smoke_LogSoftmax4D,
+ LogSoftmaxLayerTest,
+ params4D,
+ LogSoftmaxLayerTest::getTestCaseName
+);
+
+} // namespace