[locomotiv] Introduce ReLU (#3485)
author박세희/On-Device Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Thu, 16 May 2019 04:54:25 +0000 (13:54 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 16 May 2019 04:54:25 +0000 (13:54 +0900)
* [locomotive] Introduce ReLU

This will enable ReLU FLOAT32 execution

Signed-off-by: SaeHie Park <saehie.park@samsung.com>
* remove unused using

* use index enumerator

* remove unused code

contrib/locomotiv/src/Node.lst
contrib/locomotiv/src/Node/ReLU.cpp [new file with mode: 0644]
contrib/locomotiv/src/Node/ReLU.test.cpp [new file with mode: 0644]

index a7c689c..ff19451 100644 (file)
@@ -7,3 +7,4 @@
 NODE(Forward)
 NODE(Pull)
 NODE(Push)
+NODE(ReLU)
diff --git a/contrib/locomotiv/src/Node/ReLU.cpp b/contrib/locomotiv/src/Node/ReLU.cpp
new file mode 100644 (file)
index 0000000..b61f242
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NodeExecution.h"
+
+#include "NodeDataImpl.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <nncc/core/ADT/tensor/Buffer.h>
+#include <nncc/core/ADT/tensor/Index.h>
+#include <nncc/core/ADT/tensor/IndexEnumerator.h>
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+
+using nncc::core::ADT::tensor::Index;
+using nncc::core::ADT::tensor::IndexEnumerator;
+using nncc::core::ADT::tensor::LexicalLayout;
+using nncc::core::ADT::tensor::make_buffer;
+
+#include <stdexcept>
+
+namespace
+{
+
+inline float relu_ew(float val) { return val > 0.0f ? val : 0.0f; }
+
+} // namespace
+
+namespace locomotiv
+{
+
+void NodeExecution::execute(loco::ReLU *relu)
+{
+  auto input_data = annot_data(relu->input());
+
+  if (!input_data)
+  {
+    throw std::runtime_error("Ingredient not ready");
+  }
+
+  switch (input_data->dtype())
+  {
+  case loco::DataType::FLOAT32:
+  {
+    auto input_bufptr = input_data->as_f32_bufptr();
+    auto relu_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
+    auto *shape = input_data->shape();
+
+    for (IndexEnumerator e{*shape}; e.valid(); e.advance())
+    {
+      const auto &index = e.current();
+      relu_buf.at(index) = relu_ew(input_bufptr->at(index));
+    }
+
+    auto relu_data = make_data(relu_buf);
+    erase_annot_data(relu);
+    annot_data(relu, std::move(relu_data));
+    break;
+  }
+  default:
+    throw std::runtime_error("NYI for this DataType");
+  }
+}
+
+} // namespace locomotiv
diff --git a/contrib/locomotiv/src/Node/ReLU.test.cpp b/contrib/locomotiv/src/Node/ReLU.test.cpp
new file mode 100644 (file)
index 0000000..3f2b578
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NodeExecution.h"
+
+#include "locomotiv/NodeData.h"
+#include "NodeDataImpl.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <nncc/core/ADT/tensor/Buffer.h>
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+
+#include <gtest/gtest.h>
+
+using nncc::core::ADT::tensor::Index;
+using nncc::core::ADT::tensor::Shape;
+using nncc::core::ADT::tensor::LexicalLayout;
+using nncc::core::ADT::tensor::make_buffer;
+
+TEST(NodeExecution_ReLU, f32)
+{
+  // Make pull-relu graph
+  auto g = loco::make_graph();
+  auto pull = g->nodes()->create<loco::Pull>();
+  pull->dtype(loco::DataType::FLOAT32);
+  pull->rank(1);
+  pull->dim(0) = loco::make_dimension(2);
+  auto relu = g->nodes()->create<loco::ReLU>();
+  relu->input(pull);
+
+  // Make and assign data to pull node
+  auto pull_buf = make_buffer<float, LexicalLayout>(Shape{2});
+  pull_buf.at(Index{0}) = -10.0f;
+  pull_buf.at(Index{1}) = 10.0f;
+  auto pull_data = locomotiv::make_data(pull_buf);
+  locomotiv::annot_data(pull, std::move(pull_data));
+
+  locomotiv::NodeExecution::get().run(relu);
+
+  auto relu_data = locomotiv::annot_data(relu);
+  ASSERT_NE(relu_data, nullptr);
+  ASSERT_EQ(relu_data->dtype(), loco::DataType::FLOAT32);
+  ASSERT_EQ(*(relu_data->shape()), Shape{2});
+  ASSERT_FLOAT_EQ(relu_data->as_f32_bufptr()->at(Index{0}), 0.0f);
+  ASSERT_FLOAT_EQ(relu_data->as_f32_bufptr()->at(Index{1}), 10.0f);
+}