Add softmax operation implementation (#542)
authorVladimir Plazun/AI Tools Lab /SRR/Engineer/삼성전자 <v.plazun@partner.samsung.com>
Tue, 10 Jul 2018 10:38:01 +0000 (13:38 +0300)
committerSergey Vostokov/AI Tools Lab /SRR/Staff Engineer/삼성전자 <s.vostokov@samsung.com>
Tue, 10 Jul 2018 10:38:01 +0000 (19:38 +0900)
Used by model IR interpreter backend

Signed-off-by: Vladimir Plazun <v.plazun@partner.samsung.com>
contrib/nnc/libs/backend/interpreter/core/include/interpreter/ops/Softmax.h [new file with mode: 0644]
contrib/nnc/libs/backend/interpreter/core/src/ops/Softmax.cpp [new file with mode: 0644]

diff --git a/contrib/nnc/libs/backend/interpreter/core/include/interpreter/ops/Softmax.h b/contrib/nnc/libs/backend/interpreter/core/include/interpreter/ops/Softmax.h
new file mode 100644 (file)
index 0000000..759f1ca
--- /dev/null
@@ -0,0 +1,63 @@
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_SOFTMAX_IMPL_
+#define _NNC_CORE_BACKEND_INTERPRETER_SOFTMAX_IMPL_
+
+#include <cmath>
+
+#include "nnc/core/linalg/ShapeRange.h"
+#include "nnc/core/linalg/Tensor.h"
+
+#include "interpreter/ops/OperationImpl.h"
+#include "interpreter/ops/Elementwise.h"
+#include "interpreter/ops/Reduce.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+
+class Softmax : public OperationImpl<float>
+{
+public:
+  Softmax(const Shape &inputShape, const TensorVariant &input, uint32_t axis)
+      : _inShape(inputShape), _axis(axis), _input(input)
+  {
+  }
+
+  std::vector<TensorVariant> operator()() override
+  {
+    Tensor<float> inputAccessor(_input);
+
+    Shape expsumShape = _inShape;
+    expsumShape.dim(_axis) = 1;
+    TensorVariant expsum =
+        Reduce<float>(_inShape, expsumShape, _input, _axis,
+                      [](float expsum, float item) { return expsum + std::exp(item); })()[0];
+
+    Tensor<float> expsumAccessor(expsum);
+
+    return Fill<float>(_inShape, [&inputAccessor, &expsumAccessor, this](const Index &id) {
+      Index expsumIndex = id;
+      expsumIndex.at(_axis) = 0;
+      return std::exp(inputAccessor.at(id)) / expsumAccessor.at(expsumIndex);
+    })();
+  };
+
+private:
+  const Shape &_inShape;
+  const uint32_t _axis;
+  const TensorVariant _input;
+};
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif //_NNC_CORE_BACKEND_INTERPRETER_SOFTMAX_IMPL_
diff --git a/contrib/nnc/libs/backend/interpreter/core/src/ops/Softmax.cpp b/contrib/nnc/libs/backend/interpreter/core/src/ops/Softmax.cpp
new file mode 100644 (file)
index 0000000..e030448
--- /dev/null
@@ -0,0 +1,2 @@
+#include "interpreter/ops/Softmax.h"
+