#include <transformations/op_conversions/convert_space_to_batch.hpp>
#include <transformations/op_conversions/convert_batch_to_space.hpp>
#include <transformations/op_conversions/convert_mod.hpp>
+#include <transformations/op_conversions/log_softmax_decomposition.hpp>
#include <transformations/convert_precision.hpp>
#include <transformations/init_node_info.hpp>
#include <transformations/rt_info/fused_names_attribute.hpp>
pass_config->disable<ngraph::pass::SoftPlusDecomposition>();
pass_config->disable<ngraph::pass::HSigmoidDecomposition>();
pass_config->disable<ngraph::pass::ConvertMod>();
+ pass_config->disable<ngraph::pass::LogSoftmaxDecomposition>();
pass_config->enable<ngraph::pass::ConvertPadToGroupConvolution>();
StatusCode execute(std::vector<Blob::Ptr>& inputs, std::vector<Blob::Ptr>& outputs, ResponseDesc *resp) noexcept override {
const float *src_data = inputs[0]->cbuffer().as<float *>() +
inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
- float* dst_data = outputs[0]->cbuffer().as<float *>() +
+ float* dst_data = outputs[0]->buffer().as<float *>() +
outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
if (is_last_dim) {
parallel_for(axis_step, [&](size_t i) {
- float reduce_prod = 0.0f;
const float *src_dataPtr = &src_data[i * reduced_axis_size];
+ float *dst_dataPtr = &dst_data[i * reduced_axis_size];
+
+ float reduce_prod = 0.0f;
+ const float max = *std::max_element(src_dataPtr, src_dataPtr + reduced_axis_size);
for (size_t j = 0; j < reduced_axis_size; ++j)
- reduce_prod += expf(src_dataPtr[j]);
+ reduce_prod += expf(src_dataPtr[j] - max);
+
reduce_prod = logf(reduce_prod);
- float *dst_dataPtr = reinterpret_cast<float*>(&dst_data[i * reduced_axis_size]);
for (size_t j = 0; j < reduced_axis_size; ++j)
- dst_dataPtr[j] = src_dataPtr[j] - reduce_prod;
+ dst_dataPtr[j] = src_dataPtr[j] - max - reduce_prod;
});
} else {
parallel_for2d(axis_step, reduced_axis_stride, [&](size_t k, size_t i) {
- float reduce_prod = 0.0f;
const float *src_dataPtr = &src_data[k * reduced_axis_stride * reduced_axis_size + i];
+ float *dst_dataPtr = &dst_data[k * reduced_axis_stride * reduced_axis_size + i];
+
+ float reduce_prod = 0.0f;
+ float max = std::numeric_limits<float>::min();
for (size_t j = 0; j < reduced_axis_size; ++j) {
- reduce_prod += expf((*src_dataPtr));
- src_dataPtr += reduced_axis_stride;
+ if (src_dataPtr[j * reduced_axis_stride] > max)
+ max = src_dataPtr[j * reduced_axis_stride];
}
+ for (size_t j = 0; j < reduced_axis_size; ++j)
+ reduce_prod += expf(src_dataPtr[j * reduced_axis_stride] - max);
+
reduce_prod = logf(reduce_prod);
- src_dataPtr = &src_data[k * reduced_axis_stride * reduced_axis_size + i];
- float *dst_dataPtr = reinterpret_cast<float*>(&dst_data[k * reduced_axis_stride * reduced_axis_size + i]);
- for (size_t j = 0; j < reduced_axis_size; ++j) {
- (*dst_dataPtr) = (*src_dataPtr) - reduce_prod;
- src_dataPtr += reduced_axis_stride;
- dst_dataPtr += reduced_axis_stride;
- }
+ for (size_t j = 0; j < reduced_axis_size; ++j)
+ dst_dataPtr[j * reduced_axis_stride] = src_dataPtr[j * reduced_axis_stride] - max - reduce_prod;
});
}
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "single_layer_tests/log_softmax.hpp"
+#include "common_test_utils/test_constants.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP32,
+};
+
+const std::vector<InferenceEngine::SizeVector> inputShapes2D = {
+ InferenceEngine::SizeVector {1, 100},
+ InferenceEngine::SizeVector {100, 1},
+ InferenceEngine::SizeVector {10, 10},
+};
+
+const std::vector<int64_t> axis2D = {
+ -2, -1, 0, 1
+};
+
+const auto params2D = testing::Combine(
+ testing::ValuesIn(netPrecisions),
+ testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ testing::Values(InferenceEngine::Layout::ANY),
+ testing::Values(InferenceEngine::Layout::ANY),
+ testing::ValuesIn(inputShapes2D),
+ testing::ValuesIn(axis2D),
+ testing::Values(CommonTestUtils::DEVICE_CPU),
+ testing::Values(std::map<std::string, std::string>())
+);
+
+INSTANTIATE_TEST_CASE_P(
+ smoke_LogSoftmax2D,
+ LogSoftmaxLayerTest,
+ params2D,
+ LogSoftmaxLayerTest::getTestCaseName
+);
+
+const std::vector<InferenceEngine::SizeVector> inputShapes4D = {
+ InferenceEngine::SizeVector {1, 100, 1, 1},
+ InferenceEngine::SizeVector {1, 3, 4, 3},
+ InferenceEngine::SizeVector {2, 3, 4, 5},
+};
+
+const std::vector<int64_t> axis4D = {
+ -4, -3, -2, -1, 0, 1, 2, 3
+};
+
+const auto params4D = testing::Combine(
+ testing::ValuesIn(netPrecisions),
+ testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ testing::Values(InferenceEngine::Layout::ANY),
+ testing::Values(InferenceEngine::Layout::ANY),
+ testing::ValuesIn(inputShapes4D),
+ testing::ValuesIn(axis4D),
+ testing::Values(CommonTestUtils::DEVICE_CPU),
+ testing::Values(std::map<std::string, std::string>())
+);
+
+INSTANTIATE_TEST_CASE_P(
+ smoke_LogSoftmax4D,
+ LogSoftmaxLayerTest,
+ params4D,
+ LogSoftmaxLayerTest::getTestCaseName
+);
+
+} // namespace
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+
+namespace LayerTestsDefinitions {
+
+using logSoftmaxLayerTestParams = std::tuple<
+ InferenceEngine::Precision, // netPrecision
+ InferenceEngine::Precision, // Input precision
+ InferenceEngine::Precision, // Output precision
+ InferenceEngine::Layout, // Input layout
+ InferenceEngine::Layout, // Output layout
+ InferenceEngine::SizeVector, // inputShape
+ int64_t, // axis
+ std::string, // targetDevice
+ std::map<std::string, std::string> // config
+>;
+
+class LogSoftmaxLayerTest : public testing::WithParamInterface<logSoftmaxLayerTestParams>,
+ virtual public LayerTestsUtils::LayerTestsCommon {
+public:
+ static std::string getTestCaseName(testing::TestParamInfo<logSoftmaxLayerTestParams> obj);
+
+protected:
+ void SetUp() override;
+};
+
+} // namespace LayerTestsDefinitions
using softMaxLayerTestParams = std::tuple<
InferenceEngine::Precision, // netPrecision
- InferenceEngine::Precision, // Input precision
- InferenceEngine::Precision, // Output precision
- InferenceEngine::Layout, // Input layout
- InferenceEngine::Layout, // Output layout
+ InferenceEngine::Precision, // Input precision
+ InferenceEngine::Precision, // Output precision
+ InferenceEngine::Layout, // Input layout
+ InferenceEngine::Layout, // Output layout
InferenceEngine::SizeVector, // inputShape
size_t, // axis
std::string, // targetDevice
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+//
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests/log_softmax.hpp"
+
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/skip_tests_config.hpp"
+#include "functional_test_utils/layer_test_utils.hpp"
+
+#include "ie_core.hpp"
+
+#include <tuple>
+#include <string>
+#include <vector>
+#include <memory>
+
+namespace LayerTestsDefinitions {
+
+std::string LogSoftmaxLayerTest::getTestCaseName(testing::TestParamInfo<logSoftmaxLayerTestParams> obj) {
+ InferenceEngine::Precision netPrecision;
+ InferenceEngine::Precision inPrc, outPrc;
+ InferenceEngine::Layout inLayout, outLayout;
+ InferenceEngine::SizeVector inputShape;
+ int64_t axis;
+ std::string targetDevice;
+ std::map<std::string, std::string> config;
+ std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, axis, targetDevice, config) = obj.param;
+
+ std::ostringstream result;
+ result << "netPRC=" << netPrecision.name() << "_";
+ result << "inPRC=" << inPrc.name() << "_";
+ result << "outPRC=" << outPrc.name() << "_";
+ result << "inL=" << inLayout << "_";
+ result << "outL=" << outLayout << "_";
+ result << "IS=" << CommonTestUtils::vec2str(inputShape) << "_";
+ result << "axis=" << axis << "_";
+ result << "trgDev=" << targetDevice;
+
+ return result.str();
+}
+
+void LogSoftmaxLayerTest::SetUp() {
+ InferenceEngine::SizeVector inputShape;
+ InferenceEngine::Precision netPrecision;
+ int64_t axis;
+
+ std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, axis, targetDevice, configuration) = GetParam();
+ outLayout = inLayout;
+
+ const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+
+ const auto params = ngraph::builder::makeParams(ngPrc, {inputShape});
+
+ const auto paramOuts =
+ ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
+
+ const auto logSoftmax = std::make_shared<ngraph::op::v5::LogSoftmax>(paramOuts.at(0), axis);
+
+ const ngraph::ResultVector results {std::make_shared<ngraph::opset1::Result>(logSoftmax)};
+
+ function = std::make_shared<ngraph::Function>(results, params, "logSoftmax");
+}
+
+TEST_P(LogSoftmaxLayerTest, CompareWithRefs) {
+ Run();
+}
+
+} // namespace LayerTestsDefinitions
# Input data precision not supported. Expected float.
ctc_greedy_decoder_f16
-# Wrong output when axis 0
-IE_CPU.log_softmax_1d_single_value
-IE_CPU.log_softmax_2d_axis0
-IE_CPU.log_softmax_2d_axis_neg2
-IE_CPU.log_softmax_3d_axis_0
-IE_CPU.log_softmax_3d_axis_neg3
-
#-------------------------------------------------------------------------------
#
# Inference Engine GPU plugin excludes