std::dynamic_pointer_cast<const ::ngraph::opset3::ExtractImagePatches>(node) ||
std::dynamic_pointer_cast<const ::ngraph::opset4::HSwish>(node) ||
std::dynamic_pointer_cast<const ::ngraph::opset4::ReduceL1>(node) ||
- std::dynamic_pointer_cast<const ::ngraph::opset4::ReduceL2>(node);
+ std::dynamic_pointer_cast<const ::ngraph::opset4::ReduceL2>(node) ||
+ std::dynamic_pointer_cast<const ::ngraph::opset4::SoftPlus>(node);
};
auto nGraphFunc = clonedNetwork->getFunction();
// Disable shape inference (WA for generic operations)
std::dynamic_pointer_cast<const ngraph::opset4::HSwish>(node) ||
std::dynamic_pointer_cast<const ngraph::opset4::ReduceL1>(node) ||
std::dynamic_pointer_cast<const ngraph::opset4::ReduceL2>(node) ||
+ std::dynamic_pointer_cast<const ngraph::opset4::SoftPlus>(node) ||
std::dynamic_pointer_cast<const ngraph::opset4::Pad>(node);
};
auto nGraphFunc = clonedNetwork->getFunction();
MKLDNN_EXTENSION_NODE(MathImpl, Sign);
MKLDNN_EXTENSION_NODE(MathImpl, Sin);
MKLDNN_EXTENSION_NODE(MathImpl, Sinh);
-MKLDNN_EXTENSION_NODE(MathImpl, Softplus);
+MKLDNN_EXTENSION_NODE(MathImpl, SoftPlus);
MKLDNN_EXTENSION_NODE(MathImpl, Softsign);
MKLDNN_EXTENSION_NODE(MathImpl, Tan);
MKLDNN_EXTENSION_NODE(ExperimentalDetectronTopKROIsImpl, ExperimentalDetectronTopKROIs);
else if (math_func == "Sign") mathFunction = Math::Sign;
else if (math_func == "Sin") mathFunction = Math::Sin;
else if (math_func == "Sinh") mathFunction = Math::Sinh;
- else if (math_func == "Softplus") mathFunction = Math::Softplus;
+ else if (math_func == "SoftPlus") mathFunction = Math::SoftPlus;
else if (math_func == "Softsign") mathFunction = Math::Softsign;
else if (math_func == "Tan") mathFunction = Math::Tan;
else
dst_data[i] = sinhf(src_data[i]);
});
break;
- case Math::Softplus:
+ case Math::SoftPlus:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = logf(expf(src_data[i]) + 1);
});
Sign,
Sin,
Sinh,
- Softplus,
+ SoftPlus,
Softsign,
Tan
};
REG_FACTORY_FOR(MathImpl, Sign);
REG_FACTORY_FOR(MathImpl, Sin);
REG_FACTORY_FOR(MathImpl, Sinh);
-REG_FACTORY_FOR(MathImpl, Softplus);
+REG_FACTORY_FOR(MathImpl, SoftPlus);
REG_FACTORY_FOR(MathImpl, Softsign);
REG_FACTORY_FOR(MathImpl, Tan);
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <transformations_visibility.hpp>
+#include <ngraph/pass/graph_rewrite.hpp>
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API SoftPlusDecomposition;
+
+} // namespace pass
+} // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief SoftPlusDecomposition transformation replaces SoftPlus op to
+ * group of operations: log(exp(x) + 1).
+ */
+class ngraph::pass::SoftPlusDecomposition: public ngraph::pass::MatcherPass {
+public:
+ SoftPlusDecomposition();
+};
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <transformations_visibility.hpp>
+#include <ngraph/pass/graph_rewrite.hpp>
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API SoftPlusFusion;
+
+} // namespace pass
+} // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief SoftPlusFusion transformation replaces group of
+ * operations: log(exp(x) + 1) to SoftPlus op.
+ */
+class ngraph::pass::SoftPlusFusion: public ngraph::pass::MatcherPass {
+public:
+ SoftPlusFusion();
+};
#include "transformations/init_node_info.hpp"
#include "transformations/itt.hpp"
#include "transformations/mish_fusion.hpp"
+#include "transformations/softplus_fusion.hpp"
#include "transformations/swish_fusion.hpp"
#include "transformations/hswish_fusion.hpp"
manager.register_pass<ngraph::pass::ConvertScatterElementsToScatter>(); // partially depends on CF
manager.register_pass<ngraph::pass::DepthToSpaceFusion>();
manager.register_pass<ngraph::pass::MishFusion>();
+ manager.register_pass<ngraph::pass::SoftPlusFusion>();
manager.register_pass<ngraph::pass::SwishFusion>();
manager.register_pass<ngraph::pass::HSwishFusion>();
manager.register_pass<ngraph::pass::ConvertPadToGroupConvolution>();
#include "transformations/convert_opset3_to_opset2/convert_shuffle_channels3.hpp"
#include "transformations/convert_opset3_to_opset2/convert_topk3.hpp"
#include "transformations/convert_extract_image_patches_to_reorg_yolo.hpp"
+#include "transformations/softplus_decomposition.hpp"
#include "transformations/itt.hpp"
#include <memory>
manager.register_pass<ngraph::pass::ConvertShuffleChannels3>();
manager.register_pass<ngraph::pass::ConvertTopK3>();
manager.register_pass<ngraph::pass::ConvertExtractImagePatchesToReorgYolo>();
+ manager.register_pass<ngraph::pass::SoftPlusDecomposition>();
manager.set_callback(m_transformation_callback);
manager.run_passes(f);
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/softplus_decomposition.hpp"
+
+#include <memory>
+#include <vector>
+
+#include <ngraph/opsets/opset4.hpp>
+#include <ngraph/rt_info.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
+
+ngraph::pass::SoftPlusDecomposition::SoftPlusDecomposition() {
+ // decomposes SoftPlus(x) operation into ln(exp(x) + 1.0)
+ auto input = ngraph::pattern::any_input();
+ auto softplus = std::make_shared<ngraph::opset4::SoftPlus>(input);
+
+ ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher& m) {
+ auto &pattern_to_output = m.get_pattern_value_map();
+ auto softplus_input = pattern_to_output.at(input);
+ auto softplus_node = pattern_to_output.at(softplus).get_node_shared_ptr();
+
+ if (m_transformation_callback(softplus_node)) {
+ return false;
+ }
+
+ auto exp = std::make_shared<ngraph::opset4::Exp>(softplus_input);
+ auto add = std::make_shared<ngraph::opset4::Add>(exp,
+ opset4::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.0}));
+ auto log = std::make_shared<ngraph::opset4::Log>(add);
+
+ log->set_friendly_name(softplus_node->get_friendly_name());
+ ngraph::copy_runtime_info(softplus_node, {exp, add, log});
+ ngraph::replace_node(softplus_node, log);
+ return true;
+ };
+
+ auto m = std::make_shared<ngraph::pattern::Matcher>(softplus, "SoftPlusDecomposition");
+ register_matcher(m, callback);
+}
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/softplus_fusion.hpp"
+
+#include <memory>
+#include <vector>
+
+#include <ngraph/opsets/opset4.hpp>
+#include <ngraph/rt_info.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
+
+ngraph::pass::SoftPlusFusion::SoftPlusFusion() {
+ // fuses ln(exp(x) + 1.0) operations into SoftPlus(x)
+ auto input = ngraph::pattern::any_input();
+ auto exp = std::make_shared<ngraph::opset4::Exp>(input);
+ auto add_constant = ngraph::pattern::wrap_type<ngraph::opset4::Constant>();
+ auto add = std::make_shared<ngraph::opset4::Add>(exp, add_constant);
+ auto log = std::make_shared<ngraph::opset4::Log>(add);
+
+ ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher &m) {
+ auto &pattern_to_output = m.get_pattern_value_map();
+ auto exp_input = pattern_to_output.at(input);
+
+ auto constant = std::dynamic_pointer_cast<ngraph::opset4::Constant>(pattern_to_output.at(add_constant).get_node_shared_ptr());
+
+ if (constant == nullptr) {
+ return false;
+ }
+
+ if (constant->get_element_type() == ngraph::element::f32 || constant->get_element_type() == ngraph::element::f16) {
+ auto data = constant->cast_vector<float>();
+ if (data.size() != 1 || data[0] != 1.0) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+
+ auto softplus = std::make_shared<ngraph::opset4::SoftPlus>(exp_input);
+
+ softplus->set_friendly_name(m.get_match_root()->get_friendly_name());
+ ngraph::copy_runtime_info({pattern_to_output.at(log).get_node_shared_ptr(),
+ pattern_to_output.at(add).get_node_shared_ptr(),
+ pattern_to_output.at(exp).get_node_shared_ptr()}, softplus);
+ ngraph::replace_node(m.get_match_root(), softplus);
+ return true;
+ };
+
+ auto m = std::make_shared<ngraph::pattern::Matcher>(log, "SoftPlusFusion");
+ register_matcher(m, callback);
+}
#include <legacy/convert_function_to_cnn_network.hpp>
#include <generic_ie.hpp>
#include <ngraph/opsets/opset3.hpp>
+#include <ngraph/opsets/opset4.hpp>
#include <transformations/tensor_iterator_transformations/apply_transformations_to_ti_body.hpp>
#include <transformations/convert_opset3_to_opset2/convert_opset3_to_opset2.hpp>
#include <transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.hpp>
const auto transformationsPredicate = [](const std::shared_ptr<const ngraph::Node> &node) -> bool {
return std::dynamic_pointer_cast<const ngraph::opset3::Gelu>(node) ||
(std::dynamic_pointer_cast<const ngraph::opset3::MatMul>(node) &&
- std::dynamic_pointer_cast<const ngraph::vpu::op::DynamicShapeResolver>(node->input_value(0).get_node_shared_ptr()));
+ std::dynamic_pointer_cast<const ngraph::vpu::op::DynamicShapeResolver>(node->input_value(0).get_node_shared_ptr())) ||
+ std::dynamic_pointer_cast<const ngraph::opset4::SoftPlus>(node);
};
auto nGraphFunc = originalOrConvertNetwork->getFunction();
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include <string>
+#include <memory>
+
+#include <ngraph/function.hpp>
+#include <ngraph/opsets/opset4.hpp>
+#include <ngraph/pass/manager.hpp>
+#include <transformations/softplus_decomposition.hpp>
+#include <transformations/init_node_info.hpp>
+#include <transformations/utils/utils.hpp>
+
+#include "common_test_utils/ngraph_test_utils.hpp"
+
+using namespace testing;
+
+TEST(TransformationTests, SoftPlusDecomposition) {
+ std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
+ {
+ auto data = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
+ auto softplus = std::make_shared<ngraph::opset4::SoftPlus>(data);
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{softplus}, ngraph::ParameterVector{data});
+
+ ngraph::pass::Manager manager;
+ manager.register_pass<ngraph::pass::InitNodeInfo>();
+ manager.register_pass<ngraph::pass::SoftPlusDecomposition>();
+ manager.run_passes(f);
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto input = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
+ auto exp = std::make_shared<ngraph::opset4::Exp>(input);
+ auto add = std::make_shared<ngraph::opset4::Add>(exp,
+ ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.0}));
+ auto log = std::make_shared<ngraph::opset4::Log>(add);
+
+ f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{log}, ngraph::ParameterVector{input});
+ }
+
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <gtest/gtest.h>
+
+#include <string>
+#include <memory>
+
+#include <ngraph/function.hpp>
+#include <ngraph/opsets/opset4.hpp>
+#include <ngraph/pass/manager.hpp>
+#include <transformations/softplus_fusion.hpp>
+#include <transformations/init_node_info.hpp>
+#include <transformations/utils/utils.hpp>
+
+#include "common_test_utils/ngraph_test_utils.hpp"
+
+using namespace testing;
+
+TEST(TransformationTests, SoftPlusFusing) {
+ std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
+ {
+ auto input0 = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
+ auto exp = std::make_shared<ngraph::opset4::Exp>(input0);
+ auto input_const = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.0});
+ auto add = std::make_shared<ngraph::opset4::Add>(exp, input_const);
+ auto log = std::make_shared<ngraph::opset4::Log>(add);
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{log}, ngraph::ParameterVector{input0});
+
+ ngraph::pass::Manager manager;
+ manager.register_pass<ngraph::pass::InitNodeInfo>();
+ manager.register_pass<ngraph::pass::SoftPlusFusion>();
+ manager.run_passes(f);
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto data = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::Shape{3, 1, 2});
+ auto softplus = std::make_shared<ngraph::opset4::SoftPlus>(data);
+
+ f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{softplus}, ngraph::ParameterVector{data});
+ }
+
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
+
+TEST(TransformationTests, SoftPlusFusingDynamic) {
+ std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
+ {
+ auto input0 = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic(1));
+ auto exp = std::make_shared<ngraph::opset4::Exp>(input0);
+ auto input_const = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.0});
+ auto add = std::make_shared<ngraph::opset4::Add>(exp, input_const);
+ auto log = std::make_shared<ngraph::opset4::Log>(add);
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{log}, ngraph::ParameterVector{input0});
+
+ ngraph::pass::Manager manager;
+ manager.register_pass<ngraph::pass::InitNodeInfo>();
+ manager.register_pass<ngraph::pass::SoftPlusFusion>();
+ manager.run_passes(f);
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto data = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic(1));
+ auto softplus = std::make_shared<ngraph::opset4::SoftPlus>(data);
+
+ f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{softplus}, ngraph::ParameterVector{data});
+ }
+
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
+
+TEST(TransformationTests, SoftPlusFusingNegative) {
+ std::shared_ptr<ngraph::Function> f(nullptr), f_ref(nullptr);
+ {
+ auto input0 = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic(1));
+ auto exp = std::make_shared<ngraph::opset4::Exp>(input0);
+ auto input_const = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {-1.0});
+ auto add = std::make_shared<ngraph::opset4::Add>(exp, input_const);
+ auto log = std::make_shared<ngraph::opset4::Log>(add);
+
+ f = std::make_shared<ngraph::Function>(ngraph::NodeVector{log}, ngraph::ParameterVector{input0});
+
+ ngraph::pass::Manager manager;
+ manager.register_pass<ngraph::pass::InitNodeInfo>();
+ manager.register_pass<ngraph::pass::SoftPlusFusion>();
+ manager.run_passes(f);
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto input0 = std::make_shared<ngraph::opset4::Parameter>(ngraph::element::f32, ngraph::PartialShape::dynamic(1));
+ auto exp = std::make_shared<ngraph::opset4::Exp>(input0);
+ auto input_const = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {-1.0});
+ auto add = std::make_shared<ngraph::opset4::Add>(exp, input_const);
+ auto log = std::make_shared<ngraph::opset4::Log>(add);
+
+ f_ref = std::make_shared<ngraph::Function>(ngraph::NodeVector{log}, ngraph::ParameterVector{input0});
+ }
+
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
Selu,
Ceiling,
Mish,
- HSwish
+ HSwish,
+ SoftPlus
};
const std::vector<ActivationTypes> activationParamTypes = {
Selu,
Ceiling,
Mish,
- HSwish
+ HSwish,
+ SoftPlus
};
std::map<std::vector<size_t>, std::vector<std::vector<size_t>>> basic = {
Exp,
Log,
Gelu,
- Mish
+ Mish,
+ SoftPlus
};
std::map<std::vector<size_t>, std::vector<std::vector<size_t>>> basic = {
{ngraph::helpers::ActivationTypes::PReLu, "PReLu"},
{ngraph::helpers::ActivationTypes::Mish, "Mish"},
{ngraph::helpers::ActivationTypes::HSwish, "HSwish"},
+ {ngraph::helpers::ActivationTypes::SoftPlus, "SoftPlus"},
};
typedef std::tuple<
Ceiling,
PReLu,
Mish,
- HSwish
+ HSwish,
+ SoftPlus
};
enum EltwiseTypes {
return std::make_shared<ngraph::op::v4::Mish>(in);
case ngraph::helpers::ActivationTypes::HSwish:
return std::make_shared<ngraph::op::v4::HSwish>(in);
+ case ngraph::helpers::ActivationTypes::SoftPlus:
+ return std::make_shared<ngraph::op::v4::SoftPlus>(in);
default:
throw std::runtime_error("Can't create layer for this activation type");
}
for (i = 0; i < dst_size; i++) {
dst_data[i] = sinhf(src_data[i]);
}
- } else if (math_function == "Softplus") {
+ } else if (math_function == "SoftPlus") {
for (i = 0; i < dst_size; i++) {
dst_data[i] = logf(expf(src_data[i]) + 1);
}
math_test_params{ "Sign",{ 3 },{ -0.5f, 0.f, 0.5f },{},{},{},{-1, 0, 1} },
math_test_params{ "Sin",{ 3 },{ -1, 0, 1 },{},{},{},{ -0.841470957f, 0.0f, 0.841470957f } },
math_test_params{ "Sinh",{ 3 },{ -0.5f, 0.f, 0.5f },{},{},{},{ } },
- math_test_params{ "Softplus",{ 3 },{ -1, 0, 1 },{},{},{},{ 0.31326166f, 0.69314718f, 1.31326163f } },
+ math_test_params{ "SoftPlus",{ 3 },{ -1, 0, 1 },{},{},{},{ 0.31326166f, 0.69314718f, 1.31326163f } },
math_test_params{ "Softsign",{ 3 },{ -1, 0, 1 },{},{},{},{ -0.5f, 0.f, 0.5f } },
math_test_params{ "Tan",{ 3 },{ -1, 0, 1 },{},{},{},{ -1.55740774f, 0.0f, 1.55740774f } }
));
extensions/front/restore_ports.py
extensions/front/scatter_normalizer.py
extensions/front/softmax.py
-extensions/front/softplus.py
extensions/front/softsign_replacer.py
extensions/front/split_normalizer.py
extensions/front/SqueezeNormalize.py
+++ /dev/null
-"""
- Copyright (C) 2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from extensions.ops.activation_ops import Exp, Log
-from extensions.ops.elementwise import Add
-from mo.front.common.partial_infer.utils import float_array
-from mo.front.common.replacement import FrontReplacementOp
-from mo.front.tf.graph_utils import create_op_node_with_second_input
-from mo.graph.graph import Graph, rename_nodes
-
-
-class SoftPlus(FrontReplacementOp):
- """
- The transformation replaces SoftPlus(x) with log(1.0 + exp(x)).
- """
- op = 'SoftPlus'
- enabled = True
-
- def replace_sub_graph(self, graph: Graph, match: dict):
- softplus = match['op']
-
- name = softplus.soft_get('name', softplus.id)
- exp_node = Exp(graph, {'name': name + '/Exp'}).create_node()
- add_node = create_op_node_with_second_input(graph, Add, float_array([1.0]), {'name': name + '/Add'})
- log_node = Log(graph, {'name': name + '/Log'}).create_node()
- rename_nodes([(softplus, name + '/Log'), (log_node, name)])
-
- softplus.in_port(0).get_connection().set_destination(exp_node.in_port(0))
- add_node.in_port(0).connect(exp_node.out_port(0))
- log_node.in_port(0).connect(add_node.out_port(0))
- softplus.out_port(0).get_connection().set_source(log_node.out_port(0))
+++ /dev/null
-"""
- Copyright (C) 2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-import unittest
-
-from extensions.front.softplus import SoftPlus
-from mo.front.common.partial_infer.utils import int64_array
-from mo.utils.ir_engine.compare_graphs import compare_graphs
-from mo.utils.unittest.graph import build_graph
-
-
-class TestSoftPlus(unittest.TestCase):
- nodes = {
- 'node_1': {'shape': int64_array([1, 2, 3, 4]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
- 'softplus': {'value': None, 'kind': 'op', 'op': 'SoftPlus'},
- 'exp': {'value': None, 'kind': 'op', 'op': 'Exp'},
- 'add': {'value': None, 'kind': 'op', 'op': 'Add'},
- 'add_const': {'value': None, 'kind': 'op', 'op': 'Const'},
- 'log': {'value': None, 'kind': 'op', 'op': 'Log'},
- 'last': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}
- }
-
- def test_softplus_1(self):
- graph = build_graph(self.nodes, [('node_1', 'softplus'),
- ('softplus', 'last')], nodes_with_edges_only=True)
-
- graph_ref = build_graph(self.nodes, [('node_1', 'exp'),
- ('exp', 'add'),
- ('add_const', 'add'),
- ('add', 'log'),
- ('log', 'last')], nodes_with_edges_only=True)
-
- graph.stage = 'front'
- SoftPlus().find_and_replace_pattern(graph)
-
- (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True)
- self.assertTrue(flag, resp)
enabled = False
operation = None
op = None
+ version = 'opset1'
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'type': self.op,
'op': self.op,
'operation': self.operation,
- 'version': 'opset1',
+ 'version': self.version,
'infer': self.infer,
'in_ports_count': 1,
'out_ports_count': 1,
class Asinh(Activation):
op = 'Asinh'
+ version = 'opset4'
operation = staticmethod(lambda x: np.arcsinh(x))
- def __init__(self, graph: Graph, attrs: dict):
- sp_attrs = {'version': 'opset4'}
- sp_attrs.update(attrs)
- super().__init__(graph, sp_attrs)
-
class Cos(Activation):
op = 'Cos'
class Acosh(Activation):
op = 'Acosh'
+ version = 'opset4'
operation = staticmethod(lambda x: np.arccosh(x))
- def __init__(self, graph: Graph, attrs: dict):
- sp_attrs = {'version': 'opset4'}
- sp_attrs.update(attrs)
- super().__init__(graph, sp_attrs)
-
class Tan(Activation):
op = 'Tan'
class Atanh(Activation):
op = 'Atanh'
+ version = 'opset4'
operation = staticmethod(lambda x: np.arctanh(x))
- def __init__(self, graph: Graph, attrs: dict):
- sp_attrs = {'version': 'opset4'}
- sp_attrs.update(attrs)
- super().__init__(graph, sp_attrs)
-
class ReLU6(AttributedClamp):
op = 'ReLU6'
operation = staticmethod(lambda x: np.log(x))
-class SoftPlus(Op):
+class SoftPlus(Activation):
op = 'SoftPlus'
-
- def __init__(self, graph: Graph, attrs: dict):
- mandatory_props = {
- 'op': self.op,
- 'type': None,
- 'in_ports_count': 1,
- 'out_ports_count': 1,
- 'infer': None
- }
- super().__init__(graph, mandatory_props, attrs)
+ version = 'opset4'
+ operation = staticmethod(lambda x: np.ln(np.exp(x) + 1.0))
class Mish(Activation):
op = 'Mish'
+ version = 'opset4'
operation = staticmethod(lambda x: x * np.tanh(np.ln(np.exp(x) + 1.0)))
- def __init__(self, graph: Graph, attrs: dict):
- sp_attrs = {'version': 'opset4'}
- sp_attrs.update(attrs)
- super().__init__(graph, sp_attrs)
-
class Swish(Op):
op = 'Swish'
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#pragma once
+
+#include "ngraph/node.hpp"
+#include "ngraph/op/op.hpp"
+
+namespace ngraph
+{
+ namespace op
+ {
+ namespace v4
+ {
+ /// \brief A Self Regularized Non-Monotonic Neural Activation Function
+ /// f(x) = ln(exp(x) + 1.)
+ ///
+ class NGRAPH_API SoftPlus : public ngraph::op::Op
+ {
+ public:
+ NGRAPH_RTTI_DECLARATION;
+
+ SoftPlus() = default;
+ /// \brief Constructs an SoftPlus operation.
+ ///
+ /// \param data Input tensor
+ SoftPlus(const Output<Node>& arg);
+ bool visit_attributes(AttributeVisitor& visitor) override;
+ void validate_and_infer_types() override;
+
+ virtual std::shared_ptr<Node>
+ clone_with_new_inputs(const OutputVector& new_args) const override;
+
+ bool evaluate(const HostTensorVector& outputs,
+ const HostTensorVector& inputs) const override;
+ };
+ }
+ }
+}
#include "ngraph/op/sinh.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/softmax.hpp"
+#include "ngraph/op/softplus.hpp"
#include "ngraph/op/space_to_batch.hpp"
#include "ngraph/op/space_to_depth.hpp"
#include "ngraph/op/split.hpp"
NGRAPH_OP(Mish, ngraph::op::v4)
NGRAPH_OP(ReduceL1, ngraph::op::v4)
NGRAPH_OP(ReduceL2, ngraph::op::v4)
+NGRAPH_OP(SoftPlus, ngraph::op::v4)
NGRAPH_OP(Swish, ngraph::op::v4)
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#pragma once
+
+#include <cmath>
+#include <cstddef>
+
+namespace ngraph
+{
+ namespace runtime
+ {
+ namespace reference
+ {
+ template <typename T>
+ void softplus(const T* arg, T* out, size_t count)
+ {
+ for (size_t i = 0; i < count; i++)
+ {
+ out[i] = std::log(std::exp(arg[i]) + 1.0);
+ }
+ }
+ }
+ }
+}
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#include "ngraph/op/softplus.hpp"
+#include "itt.hpp"
+#include "ngraph/attribute_visitor.hpp"
+#include "ngraph/runtime/host_tensor.hpp"
+#include "ngraph/runtime/reference/softplus.hpp"
+
+using namespace std;
+using namespace ngraph;
+
+NGRAPH_RTTI_DEFINITION(op::v4::SoftPlus, "SoftPlus", 4);
+
+op::v4::SoftPlus::SoftPlus(const Output<Node>& arg)
+ : Op({arg})
+{
+ constructor_validate_and_infer_types();
+}
+
+bool op::v4::SoftPlus::visit_attributes(AttributeVisitor& visitor)
+{
+ return true;
+}
+
+void op::v4::SoftPlus::validate_and_infer_types()
+{
+ set_output_size(1);
+ set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
+}
+
+shared_ptr<Node> op::v4::SoftPlus::clone_with_new_inputs(const OutputVector& new_args) const
+{
+ check_new_args_count(this, new_args);
+ return make_shared<op::v4::SoftPlus>(new_args.at(0));
+}
+
+namespace
+{
+ template <element::Type_t ET>
+ inline bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const size_t count)
+ {
+ using T = typename element_type_traits<ET>::value_type;
+ runtime::reference::softplus<T>(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
+ return true;
+ }
+
+ bool evaluate_softplus(const HostTensorPtr& arg, const HostTensorPtr& out, const size_t count)
+ {
+ bool rc = true;
+ out->set_unary(arg);
+
+ switch (arg->get_element_type())
+ {
+ TYPE_CASE(bf16)(arg, out, count);
+ break;
+ TYPE_CASE(f16)(arg, out, count);
+ break;
+ TYPE_CASE(f32)(arg, out, count);
+ break;
+ default: rc = false; break;
+ }
+ return rc;
+ }
+}
+
+bool op::v4::SoftPlus::evaluate(const HostTensorVector& outputs,
+ const HostTensorVector& inputs) const
+{
+ OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::SoftPlus::evaluate");
+ return evaluate_softplus(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+}
from ngraph.opset4 import sin
from ngraph.opset4 import sinh
from ngraph.opset4 import softmax
+from ngraph.opset4 import softplus
from ngraph.opset4 import space_to_batch
from ngraph.opset4 import space_to_depth
from ngraph.opset4 import split
from ngraph.opset1.ops import sin
from ngraph.opset1.ops import sinh
from ngraph.opset1.ops import softmax
+from ngraph.opset4.ops import softplus
from ngraph.opset2.ops import space_to_batch
from ngraph.opset1.ops import space_to_depth
from ngraph.opset1.ops import split
@nameable_op
+def softplus(data: NodeInput, name: Optional[str] = None) -> Node:
+ """Apply SoftPlus operation on each element of input tensor.
+
+ :param data: The tensor providing input data.
+ :return: The new node with SoftPlus operation applied on each element.
+ """
+ return _get_node_factory_opset4().create("SoftPlus", as_nodes(data), {})
+
+
+@nameable_op
def mish(data: NodeInput, name: Optional[str] = None,) -> Node:
"""Return a node which performs Mish.
op_eval/non_zero.cpp
op_eval/reduce_l1.cpp
op_eval/reduce_l2.cpp
+ op_eval/softplus.cpp
op_eval/split.cpp
op_eval/strided_slice.cpp
op_eval/variadic_split.cpp
type_prop/shape_of.cpp
type_prop/shuffle_channels.cpp
type_prop/slice.cpp
+ type_prop/softplus.cpp
type_prop/space_to_batch.cpp
type_prop/space_to_depth.cpp
type_prop/split.cpp
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#include <string>
+#include <vector>
+
+#include "gtest/gtest.h"
+
+#include "ngraph/op/softplus.hpp"
+#include "ngraph/runtime/host_tensor.hpp"
+#include "ngraph/validation_util.hpp"
+#include "runtime/backend.hpp"
+#include "util/test_tools.hpp"
+
+using namespace std;
+using namespace ngraph;
+
+TEST(op_eval, softplus_4D)
+{
+ auto p = make_shared<op::Parameter>(element::f32, Shape{4});
+ auto softplus = make_shared<op::v4::SoftPlus>(p);
+ auto fun = make_shared<Function>(OutputVector{softplus}, ParameterVector{p});
+
+ std::vector<float> inputs{-1.0, 0.0, 1.0, 20.0};
+ std::vector<float> expected_result{0.31326166, 0.69314718, 1.3132616, 20.0};
+
+ auto result = make_shared<HostTensor>();
+ ASSERT_TRUE(
+ fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{4}, inputs)}));
+ EXPECT_EQ(result->get_element_type(), element::f32);
+ EXPECT_EQ(result->get_shape(), Shape{4});
+ auto result_data = read_vector<float>(result);
+ for (size_t i = 0; i < inputs.size(); i++)
+ EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
+}
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#include "gtest/gtest.h"
+#include "ngraph/ngraph.hpp"
+#include "util/type_prop.hpp"
+
+using namespace std;
+using namespace ngraph;
+
+TEST(type_prop, softplus)
+{
+ auto data = make_shared<op::Parameter>(element::f32, Shape{1, 3, 6});
+ auto softplus_func = make_shared<op::v4::SoftPlus>(data);
+ EXPECT_EQ(softplus_func->get_element_type(), element::f32);
+ EXPECT_EQ(softplus_func->get_shape(), (Shape{1, 3, 6}));
+}
+
+TEST(type_prop, softplus_partial)
+{
+ auto data = make_shared<op::Parameter>(element::f32, PartialShape{1, Dimension::dynamic(), 6});
+ auto softplus_func = make_shared<op::v4::SoftPlus>(data);
+ EXPECT_EQ(softplus_func->get_element_type(), element::f32);
+ ASSERT_TRUE(softplus_func->get_output_partial_shape(0).same_scheme(
+ (PartialShape{1, Dimension::dynamic(), 6})));
+
+ // rank unknown
+ auto softplus_partial = make_shared<op::v4::SoftPlus>(
+ make_shared<op::Parameter>(element::f32, PartialShape::dynamic()));
+ ASSERT_TRUE(softplus_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
+}
+
+TEST(type_prop, softplus_partial_static_rank)
+{
+ auto data = make_shared<op::Parameter>(element::f32, PartialShape{1, Dimension::dynamic(), 6});
+ auto softplus_func = make_shared<op::v4::SoftPlus>(data);
+ EXPECT_EQ(softplus_func->get_element_type(), element::f32);
+ ASSERT_TRUE(softplus_func->get_output_partial_shape(0).same_scheme(
+ (PartialShape{1, Dimension::dynamic(), 6})));
+ ASSERT_TRUE(softplus_func->get_output_partial_shape(0).rank().is_static());
+}