[neurun] Enable ResizeBilinear, ReLU1, ReLU6 ops (#4667)
author김수진/On-Device Lab(SR)/Engineer/삼성전자 <sjsujin.kim@samsung.com>
Wed, 13 Mar 2019 05:08:45 +0000 (14:08 +0900)
committer이춘석/On-Device Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Wed, 13 Mar 2019 05:08:45 +0000 (14:08 +0900)
Related : #4259

This commit enables `ResizeBilinear`, `ReLU1`, `ReLU6` for `acl_cl`.

Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
13 files changed:
runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
runtimes/neurun/src/backend/acl_cl/StageGenerator.h
runtimes/neurun/src/frontend/wrapper/OperationFactory.cc
runtimes/neurun/src/model/operation/Node.Include.h
runtimes/neurun/src/model/operation/Op.lst
runtimes/neurun/src/model/operation/ReLU1Node.cc [new file with mode: 0644]
runtimes/neurun/src/model/operation/ReLU1Node.h [new file with mode: 0644]
runtimes/neurun/src/model/operation/ReLU6Node.cc [new file with mode: 0644]
runtimes/neurun/src/model/operation/ReLU6Node.h [new file with mode: 0644]
runtimes/neurun/src/model/operation/ResizeBilinearNode.cc [new file with mode: 0644]
runtimes/neurun/src/model/operation/ResizeBilinearNode.h [new file with mode: 0644]
tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun
tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt

index e8bcbd3..f292299 100644 (file)
@@ -1736,6 +1736,130 @@ void StageGenerator::visit(const model::operation::ReLUNode &node)
   });
 }
 
+void StageGenerator::visit(const model::operation::ResizeBilinearNode &node)
+{
+  const auto ofm_index{node.getOutputs().at(0)};
+
+  const auto ifm_index{node.getInputs().at(model::operation::ResizeBilinearNode::Input::INPUT)};
+  const auto height_index{node.param().height_index};
+  const auto width_index{node.param().width_index};
+
+  struct Param
+  {
+    model::operand::Index ofm_index;
+    model::operand::Index ifm_index;
+
+    int32_t new_height;
+    int32_t new_width;
+  };
+
+  Param param;
+
+  param.ofm_index = ofm_index;
+  param.ifm_index = ifm_index;
+  param.new_height = _ctx.at(height_index).asScalar<int32_t>();
+  param.new_width = _ctx.at(width_index).asScalar<int32_t>();
+
+  auto tensors = _tensor_builder;
+
+  returnStage([tensors, param](IExecutionBuilder &builder) {
+    auto ofm_alloc = tensors->at(param.ofm_index).get();
+    auto ifm_alloc = tensors->at(param.ifm_index).get();
+
+    std::unique_ptr<::arm_compute::IFunction> fn;
+
+    auto l = make_layer<::arm_compute::CLScale>();
+
+    l->configure(ifm_alloc->handle(), ofm_alloc->handle(),
+                 ::arm_compute::InterpolationPolicy::BILINEAR, ::arm_compute::BorderMode::REPLICATE,
+                 ::arm_compute::PixelValue(0.f), ::arm_compute::SamplingPolicy::TOP_LEFT);
+
+    fn = std::move(l);
+
+    auto acl_fn = make_cl_function(std::move(fn));
+
+    builder.append(std::move(acl_fn));
+  });
+}
+
+void StageGenerator::visit(const model::operation::ReLU1Node &node)
+{
+  const auto ofm_index{node.getOutputs().at(0)};
+  const auto ifm_index{node.getInputs().at(model::operation::ReLU1Node::Input::INPUT)};
+
+  struct Param
+  {
+    model::operand::Index ofm_index;
+    model::operand::Index ifm_index;
+  };
+
+  Param param;
+
+  param.ofm_index = ofm_index;
+  param.ifm_index = ifm_index;
+
+  auto tensors = _tensor_builder;
+
+  returnStage([tensors, param](IExecutionBuilder &builder) {
+    auto ofm_alloc = tensors->at(param.ofm_index).get();
+    auto ifm_alloc = tensors->at(param.ifm_index).get();
+
+    const ::arm_compute::ActivationLayerInfo act_info{
+        ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
+
+    std::unique_ptr<::arm_compute::IFunction> fn;
+
+    auto l = make_layer<::arm_compute::CLActivationLayer>();
+
+    l->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info);
+
+    fn = std::move(l);
+
+    auto acl_fn = make_cl_function(std::move(fn));
+
+    builder.append(std::move(acl_fn));
+  });
+}
+
+void StageGenerator::visit(const model::operation::ReLU6Node &node)
+{
+  const auto ofm_index{node.getOutputs().at(0)};
+  const auto ifm_index{node.getInputs().at(model::operation::ReLU6Node::Input::INPUT)};
+
+  struct Param
+  {
+    model::operand::Index ofm_index;
+    model::operand::Index ifm_index;
+  };
+
+  Param param;
+
+  param.ofm_index = ofm_index;
+  param.ifm_index = ifm_index;
+
+  auto tensors = _tensor_builder;
+
+  returnStage([tensors, param](IExecutionBuilder &builder) {
+    auto ofm_alloc = tensors->at(param.ofm_index).get();
+    auto ifm_alloc = tensors->at(param.ifm_index).get();
+
+    const ::arm_compute::ActivationLayerInfo act_info{
+        ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0f};
+
+    std::unique_ptr<::arm_compute::IFunction> fn;
+
+    auto l = make_layer<::arm_compute::CLActivationLayer>();
+
+    l->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info);
+
+    fn = std::move(l);
+
+    auto acl_fn = make_cl_function(std::move(fn));
+
+    builder.append(std::move(acl_fn));
+  });
+}
+
 } // namespace acl_cl
 } // namespace backend
 } // namespace neurun
index 686293c..0e289a5 100644 (file)
@@ -62,6 +62,9 @@ public:
   virtual void visit(const model::operation::LogicalAndNode &) override;
   virtual void visit(const model::operation::RSQRTNode &) override;
   virtual void visit(const model::operation::ReLUNode &) override;
+  virtual void visit(const model::operation::ResizeBilinearNode &) override;
+  virtual void visit(const model::operation::ReLU1Node &) override;
+  virtual void visit(const model::operation::ReLU6Node &) override;
 
 private:
   const neurun::model::operand::Set &_ctx;
index 8275e07..48e8f16 100644 (file)
@@ -655,6 +655,51 @@ OperationFactory::OperationFactory()
 
     return new operation::ReLUNode{inputs, outputs};
   };
+
+  _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param) {
+    assert(init_param.input_count == 3 && init_param.output_count == 1);
+
+    operand::IndexSet outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> IFM Index
+    //  1 -> Height Index
+    //  2 -> Width Index
+    operand::IndexSet inputs{init_param.inputs[0]};
+
+    operation::ResizeBilinearNode::Param param;
+    param.height_index = operand::Index{init_param.inputs[1]};
+    param.width_index = operand::Index{init_param.inputs[2]};
+
+    return new operation::ResizeBilinearNode{inputs, outputs, param};
+  };
+
+  _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param) {
+    assert(init_param.input_count == 1 && init_param.output_count == 1);
+
+    operand::IndexSet outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> input Tensor Index
+    operand::IndexSet inputs{init_param.inputs[0]};
+
+    return new operation::ReLU1Node{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param) {
+    assert(init_param.input_count == 1 && init_param.output_count == 1);
+
+    operand::IndexSet outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> input Tensor Index
+    operand::IndexSet inputs{init_param.inputs[0]};
+
+    return new operation::ReLU6Node{inputs, outputs};
+  };
 }
 
 neurun::model::operation::Node *OperationFactory::create(ANeuralNetworksOperationType type,
index 7289c9d..6104f27 100644 (file)
@@ -42,3 +42,6 @@
 #include "LogicalAndNode.h"
 #include "RSQRTNode.h"
 #include "ReLUNode.h"
+#include "ResizeBilinearNode.h"
+#include "ReLU1Node.h"
+#include "ReLU6Node.h"
index a3ea626..ed2d6db 100644 (file)
@@ -46,4 +46,7 @@ OP(NotEqualNode            , true    , NOT_EQUAL_EX)
 OP(LogicalAndNode          , true    , LOGICAL_AND_EX)
 OP(RSQRTNode               , true    , RSQRT_EX)
 OP(ReLUNode                , true    , RELU)
+OP(ResizeBilinearNode      , true    , RESIZE_BILINEAR)
+OP(ReLU1Node               , true    , RELU1)
+OP(ReLU6Node               , true    , RELU6)
 OP(PermuteNode             , false   , NOT_AVAILABLE)
diff --git a/runtimes/neurun/src/model/operation/ReLU1Node.cc b/runtimes/neurun/src/model/operation/ReLU1Node.cc
new file mode 100644 (file)
index 0000000..576f83c
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReLU1Node.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ReLU1Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+ReLU1Node::ReLU1Node(const operand::IndexSet &inputs, const operand::IndexSet &outputs)
+    : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/src/model/operation/ReLU1Node.h b/runtimes/neurun/src/model/operation/ReLU1Node.h
new file mode 100644 (file)
index 0000000..5697c9f
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_ReLU1_NODE_H__
+#define __NEURUN_MODEL_OPERATION_ReLU1_NODE_H__
+
+#include "model/operation/Node.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ReLU1Node : public model::operation::Node
+{
+public:
+  enum Input
+  {
+    INPUT = 0
+  };
+
+public:
+  ReLU1Node(const operand::IndexSet &inputs, const operand::IndexSet &outputs);
+
+public:
+  virtual void accept(NodeVisitor &&) const override;
+  virtual std::string getName() const override { return "ReLU1"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_ReLU1_NODE_H__
diff --git a/runtimes/neurun/src/model/operation/ReLU6Node.cc b/runtimes/neurun/src/model/operation/ReLU6Node.cc
new file mode 100644 (file)
index 0000000..37d58e8
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ReLU6Node.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ReLU6Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+ReLU6Node::ReLU6Node(const operand::IndexSet &inputs, const operand::IndexSet &outputs)
+    : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/src/model/operation/ReLU6Node.h b/runtimes/neurun/src/model/operation/ReLU6Node.h
new file mode 100644 (file)
index 0000000..69ebff3
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_ReLU6_NODE_H__
+#define __NEURUN_MODEL_OPERATION_ReLU6_NODE_H__
+
+#include "model/operation/Node.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ReLU6Node : public model::operation::Node
+{
+public:
+  enum Input
+  {
+    INPUT = 0
+  };
+
+public:
+  ReLU6Node(const operand::IndexSet &inputs, const operand::IndexSet &outputs);
+
+public:
+  virtual void accept(NodeVisitor &&) const override;
+  virtual std::string getName() const override { return "ReLU6"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_ReLU6_NODE_H__
diff --git a/runtimes/neurun/src/model/operation/ResizeBilinearNode.cc b/runtimes/neurun/src/model/operation/ResizeBilinearNode.cc
new file mode 100644 (file)
index 0000000..df9b93a
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResizeBilinearNode.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void ResizeBilinearNode::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+ResizeBilinearNode::ResizeBilinearNode(const operand::IndexSet &inputs,
+                                       const operand::IndexSet &outputs, const Param &param)
+    : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
diff --git a/runtimes/neurun/src/model/operation/ResizeBilinearNode.h b/runtimes/neurun/src/model/operation/ResizeBilinearNode.h
new file mode 100644 (file)
index 0000000..f29adb2
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_RESIZE_BILINEAR_NODE_H__
+#define __NEURUN_MODEL_OPERATION_RESIZE_BILINEAR_NODE_H__
+
+#include <memory>
+
+#include "model/operation/Node.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class ResizeBilinearNode : public model::operation::Node
+{
+public:
+  enum Input
+  {
+    INPUT = 0
+  };
+
+  struct Param
+  {
+    operand::Index height_index;
+    operand::Index width_index;
+  };
+
+public:
+  ResizeBilinearNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+                     const Param &param);
+
+public:
+  virtual void accept(NodeVisitor &&) const override;
+  virtual std::string getName() const override { return "ResizeBilinear"; }
+
+public:
+  const Param &param() const { return _param; }
+
+private:
+  Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_RESIZE_BILINEAR_NODE_H__
index 7d91c12..9674b2f 100644 (file)
@@ -33,6 +33,7 @@ GeneratedTests.reduce_min*
 GeneratedTests.relu1*
 GeneratedTests.relu6*
 GeneratedTests.resize_bilinear*
+GeneratedTests.relu*
 GeneratedTests.rnn*
 GeneratedTests.mean*
 GeneratedTests.pad*
index ec63189..2a89033 100644 (file)
@@ -17,9 +17,12 @@ reduce_max
 relu
 reshape
 rsqrt
+relu6
+reshape
+resize_bilinear
 strided_slice
 sub/broadcast
 tanh
 transpose
 MODELS/inception_module
-squeeze
\ No newline at end of file
+squeeze