--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#pragma once
+
+#include "ngraph/node.hpp"
+#include "ngraph/op/op.hpp"
+#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
+
+namespace ngraph
+{
+ namespace op
+ {
+ namespace v5
+ {
+ /// \brief A HSigmoid Activation Function
+ /// f(x) = min(max(x + 3, 0), 6) / 6 or
+ /// f(x) = min(ReLU(x + 3), 6) / 6
+ ///
+ class NGRAPH_API HSigmoid : public ngraph::op::util::UnaryElementwiseArithmetic
+ {
+ public:
+ NGRAPH_RTTI_DECLARATION;
+ HSigmoid() = default;
+
+ /// \brief Constructs a HSigmoid (hard version of Swish) operation.
+ ///
+ /// \param data Input tensor
+ HSigmoid(const Output<Node>& arg);
+
+ bool visit_attributes(AttributeVisitor& visitor) override;
+
+ virtual std::shared_ptr<Node>
+ clone_with_new_inputs(const OutputVector& new_args) const override;
+ bool evaluate(const HostTensorVector& outputs,
+ const HostTensorVector& inputs) const override;
+ };
+ }
+ }
+}
#include "ngraph/op/gru_cell.hpp"
#include "ngraph/op/gru_sequence.hpp"
#include "ngraph/op/hard_sigmoid.hpp"
+#include "ngraph/op/hsigmoid.hpp"
#include "ngraph/op/hswish.hpp"
#include "ngraph/op/interpolate.hpp"
#include "ngraph/op/less.hpp"
// New operations added in opset5
NGRAPH_OP(GatherND, ngraph::op::v5)
+NGRAPH_OP(GRUSequence, ngraph::op::v5)
+NGRAPH_OP(HSigmoid, ngraph::op::v5)
NGRAPH_OP(LogSoftmax, ngraph::op::v5)
NGRAPH_OP(LSTMSequence, ngraph::op::v5)
NGRAPH_OP(NonMaxSuppression, ngraph::op::v5)
-NGRAPH_OP(GRUSequence, ngraph::op::v5)
NGRAPH_OP(RNNSequence, ngraph::op::v5)
NGRAPH_OP(Round, ngraph::op::v5)
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#pragma once
+
+#include <cmath>
+#include <cstddef>
+
+namespace ngraph
+{
+ namespace runtime
+ {
+ namespace reference
+ {
+ template <typename T>
+ void hsigmoid(const T* arg, T* out, size_t count)
+ {
+ for (size_t i = 0; i < count; i++)
+ {
+ out[i] = std::min<T>(std::max<T>(arg[i] + 3.0f, 0.0f), 6.0f) / 6.0f;
+ }
+ }
+ }
+ }
+}
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#include "ngraph/op/hsigmoid.hpp"
+#include "ngraph/attribute_visitor.hpp"
+#include "ngraph/op/constant.hpp"
+
+#include "ngraph/runtime/host_tensor.hpp"
+#include "ngraph/runtime/reference/hsigmoid.hpp"
+
+using namespace std;
+using namespace ngraph;
+
+NGRAPH_RTTI_DEFINITION(op::v5::HSigmoid, "HSigmoid", 5);
+
+op::v5::HSigmoid::HSigmoid(const Output<Node>& arg)
+ : UnaryElementwiseArithmetic(arg)
+{
+ constructor_validate_and_infer_types();
+}
+
+bool op::v5::HSigmoid::visit_attributes(AttributeVisitor& visitor)
+{
+ return true;
+}
+
+shared_ptr<Node> op::v5::HSigmoid::clone_with_new_inputs(const OutputVector& new_args) const
+{
+ return make_shared<op::v5::HSigmoid>(new_args.at(0));
+}
+
+namespace
+{
+ template <element::Type_t ET>
+ inline bool evaluate(const HostTensorPtr& arg, const HostTensorPtr& out, const size_t count)
+ {
+ using T = typename element_type_traits<ET>::value_type;
+
+ runtime::reference::hsigmoid<T>(arg->get_data_ptr<ET>(), out->get_data_ptr<ET>(), count);
+ return true;
+ }
+
+ bool evaluate_hsigmoid(const HostTensorPtr& arg, const HostTensorPtr& out, const size_t count)
+ {
+ bool rc = true;
+ out->set_unary(arg);
+
+ switch (arg->get_element_type())
+ {
+ TYPE_CASE(bf16)(arg, out, count);
+ break;
+ TYPE_CASE(f16)(arg, out, count);
+ break;
+ TYPE_CASE(f32)(arg, out, count);
+ break;
+ default: rc = false; break;
+ }
+ return rc;
+ }
+}
+
+bool op::v5::HSigmoid::evaluate(const HostTensorVector& outputs,
+ const HostTensorVector& inputs) const
+{
+ return evaluate_hsigmoid(inputs[0], outputs[0], shape_size(get_output_shape(0)));
+}
node_input_output.cpp
op.cpp
op_eval/floor_mod.cpp
+ op_eval/hsigmoid.cpp
op_eval/hswish.cpp
op_eval/interpolate.cpp
op_eval/matmul.cpp
--- /dev/null
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#include <string>
+#include <vector>
+
+#include "gtest/gtest.h"
+
+#include "ngraph/op/hsigmoid.hpp"
+#include "ngraph/runtime/host_tensor.hpp"
+#include "ngraph/validation_util.hpp"
+#include "runtime/backend.hpp"
+#include "util/test_tools.hpp"
+
+using namespace std;
+using namespace ngraph;
+
+TEST(op_eval, hsigmoid)
+{
+ auto p = make_shared<op::Parameter>(element::f32, Shape{3});
+ auto swish = make_shared<op::v5::HSigmoid>(p);
+ auto fun = make_shared<Function>(OutputVector{swish}, ParameterVector{p});
+
+ std::vector<float> inputs{-0.5f, 0.0f, 0.5f};
+ std::vector<float> expected_result{0.416667f, 0.5f, 0.583333f};
+
+ auto result = make_shared<HostTensor>();
+ ASSERT_TRUE(
+ fun->evaluate({result}, {make_host_tensor<element::Type_t::f32>(Shape{3}, inputs)}));
+ EXPECT_EQ(result->get_element_type(), element::f32);
+ EXPECT_EQ(result->get_shape(), Shape{3});
+ auto result_data = read_vector<float>(result);
+ for (auto i = 0; i < inputs.size(); i++)
+ EXPECT_NEAR(result_data[i], expected_result[i], 0.000001);
+}