[locomotiv] Support TensorSoftmax (#6620)
author남궁석/On-Device Lab(SR)/Engineer/삼성전자 <sk.namkoong@samsung.com>
Mon, 19 Aug 2019 05:28:44 +0000 (14:28 +0900)
committer박종현/On-Device Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Mon, 19 Aug 2019 05:28:44 +0000 (14:28 +0900)
* [locomotiv] Support TensorSoftmax

This commit will enable supporting `TensorSoftmax` in `locomotiv`

Signed-off-by: Seok NamKoong <sk.namkoong@samsung.com>
* Modify domain assert

compiler/locomotiv/src/Node.lst
compiler/locomotiv/src/Node/Softmax.cpp [new file with mode: 0644]
compiler/locomotiv/src/Node/Softmax.test.cpp [new file with mode: 0644]

index 391294a..86aae88 100644 (file)
@@ -28,3 +28,4 @@ NODE(ReLU)
 NODE(ReLU6)
 NODE(Reshape<loco::ReshapeType::Fixed>)
 NODE(TensorConcat)
+NODE(TensorSoftmax)
diff --git a/compiler/locomotiv/src/Node/Softmax.cpp b/compiler/locomotiv/src/Node/Softmax.cpp
new file mode 100644 (file)
index 0000000..4093b98
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NodeExecution.h"
+
+#include "NodeDataImpl.h"
+#include "NodeDomain.h"
+#include "Validation.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <nncc/core/ADT/tensor/Buffer.h>
+#include <nncc/core/ADT/tensor/Index.h>
+#include <nncc/core/ADT/tensor/IndexEnumerator.h>
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+
+using nncc::core::ADT::tensor::Index;
+using nncc::core::ADT::tensor::IndexEnumerator;
+using nncc::core::ADT::tensor::LexicalLayout;
+using nncc::core::ADT::tensor::make_buffer;
+using nncc::core::ADT::tensor::Shape;
+
+#include <cassert>
+#include <stdexcept>
+#include <math.h>
+
+namespace
+{
+
+Index reduce_index(const Index &index, uint32_t axis)
+{
+  Index r_index;
+
+  r_index.resize(index.rank());
+  for (uint32_t i = 0; i < index.rank(); ++i)
+    r_index.at(i) = index.at(i);
+  r_index.at(axis) = 0;
+
+  return r_index;
+}
+
+Shape reduce_shape(const Shape &shape, uint32_t axis)
+{
+  Shape r_shape;
+
+  r_shape.resize(shape.rank());
+  for (uint32_t i = 0; i < shape.rank(); ++i)
+    r_shape.dim(i) = shape.dim(i);
+  r_shape.dim(axis) = 1;
+
+  return r_shape;
+}
+
+} // namespace
+
+namespace locomotiv
+{
+
+void NodeExecution::execute(loco::TensorSoftmax *softmax)
+{
+  auto input_data = annot_data(softmax->input());
+
+  validate(input_data, "Input not ready");
+  validate(annot_domain(softmax->input()) == loco::Domain::Tensor,
+           "Input domain of TensorSoftmax is not Tensor");
+
+  std::unique_ptr<NodeData> softmax_data = nullptr;
+
+  switch (input_data->dtype())
+  {
+    case loco::DataType::FLOAT32:
+    {
+      auto axis = softmax->axis();
+
+      auto *input_shape = input_data->shape();
+      auto input_bufptr = input_data->as_f32_bufptr();
+      auto softmax_buf = make_buffer<float, LexicalLayout>(*input_data->shape());
+
+      auto reduce_sum_shape = reduce_shape(*input_shape, axis);
+      auto reduce_sum_bufptr = make_buffer<float, LexicalLayout>(reduce_sum_shape);
+
+      for (IndexEnumerator e{*input_shape}; e.valid(); e.advance())
+      {
+        const auto &index = e.current();
+        const auto r_index = reduce_index(index, axis);
+
+        reduce_sum_bufptr.at(r_index) += exp(input_bufptr->at(index));
+      }
+
+      for (IndexEnumerator e{*input_shape}; e.valid(); e.advance())
+      {
+        const auto &index = e.current();
+        const auto r_index = reduce_index(index, axis);
+
+        softmax_buf.at(index) = exp(input_bufptr->at(index)) / reduce_sum_bufptr.at(r_index);
+      }
+
+      softmax_data = make_data(softmax_buf);
+      break;
+    }
+    default:
+      throw std::runtime_error("NYI for this DataType");
+  }
+
+  assert(softmax_data != nullptr);
+  erase_annot_data(softmax);
+  annot_data(softmax, std::move(softmax_data));
+  annot_domain(softmax, annot_domain(softmax->input()));
+}
+
+} // namespace locomotiv
diff --git a/compiler/locomotiv/src/Node/Softmax.test.cpp b/compiler/locomotiv/src/Node/Softmax.test.cpp
new file mode 100644 (file)
index 0000000..21d2402
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NodeExecution.h"
+
+#include "locomotiv/NodeData.h"
+#include "NodeDataImpl.h"
+#include "NodeDomain.h"
+
+#include <nncc/core/ADT/tensor/Shape.h>
+#include <nncc/core/ADT/tensor/Buffer.h>
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+
+#include <gtest/gtest.h>
+
+using nncc::core::ADT::tensor::Index;
+using nncc::core::ADT::tensor::Shape;
+using nncc::core::ADT::tensor::LexicalLayout;
+using nncc::core::ADT::tensor::make_buffer;
+
+TEST(NodeExecution_Softmax, f32)
+{
+  // Make pull-softmax graph
+  auto g = loco::make_graph();
+  auto pull = g->nodes()->create<loco::Pull>();
+  pull->dtype(loco::DataType::FLOAT32);
+  pull->shape({2, 2});
+  auto softmax = g->nodes()->create<loco::TensorSoftmax>();
+  softmax->input(pull);
+  softmax->axis(1);
+
+  // Make and assign data to pull node
+  auto pull_buf = make_buffer<float, LexicalLayout>({2, 2});
+  pull_buf.at(Index{0, 0}) = 1.1f;
+  pull_buf.at(Index{0, 1}) = 1.1f;
+  pull_buf.at(Index{1, 0}) = 3.3f;
+  pull_buf.at(Index{1, 1}) = 3.3f;
+  auto pull_data = locomotiv::make_data(pull_buf);
+  locomotiv::annot_data(pull, std::move(pull_data));
+  locomotiv::annot_domain(pull, loco::Domain::Tensor);
+
+  locomotiv::NodeExecution::get().run(softmax);
+
+  auto kShape = Shape{2, 2};
+  auto softmax_data = locomotiv::annot_data(softmax);
+  ASSERT_NE(softmax_data, nullptr);
+  ASSERT_EQ(softmax_data->dtype(), loco::DataType::FLOAT32);
+  ASSERT_EQ(*(softmax_data->shape()), kShape);
+  ASSERT_FLOAT_EQ(softmax_data->as_f32_bufptr()->at(Index{0, 0}), 0.5f);
+  ASSERT_FLOAT_EQ(softmax_data->as_f32_bufptr()->at(Index{0, 1}), 0.5f);
+  ASSERT_FLOAT_EQ(softmax_data->as_f32_bufptr()->at(Index{1, 0}), 0.5f);
+  ASSERT_FLOAT_EQ(softmax_data->as_f32_bufptr()->at(Index{1, 1}), 0.5f);
+
+  ASSERT_EQ(locomotiv::annot_domain(softmax), loco::Domain::Tensor);
+}