Add interpreter implementations for new operations (#1456)
authorVladimir Plazun/AI Tools Lab /SRR/Engineer/삼성전자 <v.plazun@partner.samsung.com>
Thu, 13 Sep 2018 17:03:15 +0000 (20:03 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Thu, 13 Sep 2018 17:03:15 +0000 (20:03 +0300)
Adds implementation for operation added in #1396 ( Scale, Dropout, BatchNorm )

TODO: BatchNorm and Dropout are simple stubs( just copy input to output )

Signed-off-by: Vladimir Plazun <v.plazun@partner.samsung.com>
contrib/nnc/passes/interpreter/Interpreter.cpp
contrib/nnc/passes/interpreter/ops/BatchNorm.cpp [new file with mode: 0644]
contrib/nnc/passes/interpreter/ops/BatchNorm.h [new file with mode: 0644]
contrib/nnc/passes/interpreter/ops/Dropout.cpp [new file with mode: 0644]
contrib/nnc/passes/interpreter/ops/Dropout.h [new file with mode: 0644]
contrib/nnc/passes/interpreter/ops/Scale.cpp [new file with mode: 0644]
contrib/nnc/passes/interpreter/ops/Scale.h [new file with mode: 0644]

index de16ea6..809cadb 100644 (file)
@@ -26,6 +26,9 @@
 #include "ops/Pool.h"
 #include "ops/Reshape.h"
 #include "ops/Softmax.h"
+#include "ops/Scale.h"
+#include "ops/Dropout.h"
+#include "ops/BatchNorm.h"
 
 namespace nncc
 {
@@ -170,11 +173,8 @@ void NNInterpreter::visit(ADT::INode *node, ops::BatchNormOp &op)
   mapByName(node);
   auto operand = node->getPrevNodes()[0];
   TensorVariant input(var(operand.node->getId())[operand.index]);
-  (void)input; (void)op;
   // TODO implement this
-  //  var(node->getId()) = impl::BatchNormOp(input, op)();
-  assert("BatchNormOp Not implemented yet" == 0);
-
+    var(node->getId()) = impl::BatchNorm<float>(input, op)();
 }
 
 void NNInterpreter::visit(ADT::INode *node, ops::ScaleOp &op)
@@ -182,10 +182,8 @@ void NNInterpreter::visit(ADT::INode *node, ops::ScaleOp &op)
   mapByName(node);
   auto operand = node->getPrevNodes()[0];
   TensorVariant input(var(operand.node->getId())[operand.index]);
-  (void)input; (void)op;
   // TODO implement this
-  // var(node->getId()) = impl::ScaleOp(input, op)();
-  assert("ScaleOp Not implemented yet" == 0);
+   var(node->getId()) = impl::Scale(input, op)();
 }
 
 void NNInterpreter::visit(ADT::INode *node, ops::DropoutOp &op)
@@ -193,10 +191,8 @@ void NNInterpreter::visit(ADT::INode *node, ops::DropoutOp &op)
   mapByName(node);
   auto operand = node->getPrevNodes()[0];
   TensorVariant input(var(operand.node->getId())[operand.index]);
-  (void)input; (void)op;
   // TODO implement this
-  // var(node->getId()) = impl::DropoutOp(input, op)();
-  assert("DropoutOp Not implemented yet" == 0);
+   var(node->getId()) = impl::Dropout<float>(input, op)();
 }
 
 void NNInterpreter::mapByName(ADT::INode::Ref n) {
diff --git a/contrib/nnc/passes/interpreter/ops/BatchNorm.cpp b/contrib/nnc/passes/interpreter/ops/BatchNorm.cpp
new file mode 100644 (file)
index 0000000..5574b76
--- /dev/null
@@ -0,0 +1,3 @@
+#include "BatchNorm.h"
+
+//Stub to ensure BatchNorm.h is compiled
diff --git a/contrib/nnc/passes/interpreter/ops/BatchNorm.h b/contrib/nnc/passes/interpreter/ops/BatchNorm.h
new file mode 100644 (file)
index 0000000..fc06404
--- /dev/null
@@ -0,0 +1,60 @@
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_BATCHNORM_IMPL_
+#define _NNC_CORE_BACKEND_INTERPRETER_BATCHNORM_IMPL_
+
+#include "OperationImpl.h"
+#include "Fill.h"
+
+#include "core/modelIR/operations/batch_norm.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+
+using nncc::contrib::core::IR::model::ops::BatchNormOp;
+
+/**
+ * @brief Implements DropoutOp for interpreter backend
+ * @note Simply copies input to output
+ * @tparam T type of data in input tensor
+ */
+template<typename T>
+class BatchNorm : public OperationImpl<T>
+{
+public:
+  /**
+   * @param in input data
+   * @param op batch normalization operation description
+   */
+  explicit BatchNorm(const TensorVariant& input, const BatchNormOp& op) : _input(input), _op(op) {}
+
+  /**
+   * @brief computes operation aplication result
+   * @return vector of all outputs from this node
+   */
+  std::vector<TensorVariant> operator()() override
+  {
+    //For now BatchNorm just copies input to output
+    return Fill<T>(_input.getShape(), [this](const Index& idx) {
+      return _input.at(idx);
+    })();
+  }
+
+private:
+  const Tensor<float> _input;
+  const BatchNormOp& _op;
+};
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_BACKEND_INTERPRETER_BATCHNORM_IMPL_
diff --git a/contrib/nnc/passes/interpreter/ops/Dropout.cpp b/contrib/nnc/passes/interpreter/ops/Dropout.cpp
new file mode 100644 (file)
index 0000000..94038a5
--- /dev/null
@@ -0,0 +1,3 @@
+#include "Dropout.h"
+
+//Stub to ensure Dropout.h is compiled
diff --git a/contrib/nnc/passes/interpreter/ops/Dropout.h b/contrib/nnc/passes/interpreter/ops/Dropout.h
new file mode 100644 (file)
index 0000000..faef5de
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_DROPOUT_IMPL_
+#define _NNC_CORE_BACKEND_INTERPRETER_DROPOUT_IMPL_
+
+#include "OperationImpl.h"
+#include "Fill.h"
+
+#include "core/modelIR/operations/dropout_op.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+
+using nncc::contrib::core::IR::model::ops::DropoutOp;
+
+/**
+ * @brief Implements DropoutOp for interpreter backend
+ * @note Simply copies input to output
+ * @tparam T type of data in input tensor
+ */
+template<typename T>
+class Dropout : public OperationImpl<float> {
+public:
+  /**
+   * @param in input data
+   * @param op dropout operation description
+   */
+  explicit Dropout(const TensorVariant& in, const DropoutOp& op) : _input(in), _op(op) {}
+
+  /**
+   * @brief computes operation aplication result
+   * @return vector of all outputs from this node
+   */
+  std::vector<TensorVariant> operator()() override;
+
+private:
+  const Tensor<float> _input;
+  const DropoutOp& _op;
+};
+
+template<typename T>
+std::vector<TensorVariant> Dropout<T>::operator()()
+{
+  //For now dropout just copies input to output
+  return Fill<T>(_input.getShape(), [this](const Index& idx) {
+    return _input.at(idx);
+  })();
+}
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_BACKEND_INTERPRETER_DROPOUT_IMPL_
diff --git a/contrib/nnc/passes/interpreter/ops/Scale.cpp b/contrib/nnc/passes/interpreter/ops/Scale.cpp
new file mode 100644 (file)
index 0000000..dbe1ccb
--- /dev/null
@@ -0,0 +1,29 @@
+#include "Scale.h"
+
+#include "Fill.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+
+std::vector<TensorVariant> Scale::operator()()
+{
+  //For now handles only most common case with scale applied by last dimension
+  Tensor<float> weightsAccessor(_op.getWeights());
+  return Fill<float>(_input.getShape(), [this, weightsAccessor](const Index &idx) {
+    return _input.at(idx) * weightsAccessor.at({idx.at(idx.rank() - 1)});
+  })();
+}
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
diff --git a/contrib/nnc/passes/interpreter/ops/Scale.h b/contrib/nnc/passes/interpreter/ops/Scale.h
new file mode 100644 (file)
index 0000000..01a6d3e
--- /dev/null
@@ -0,0 +1,49 @@
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_SCALE_IMPL_
+#define _NNC_CORE_BACKEND_INTERPRETER_SCALE_IMPL_
+
+#include "OperationImpl.h"
+
+#include "core/modelIR/operations/scale_op.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+/**
+ * @brief Implements ScaleOp for interpreter backend
+ * @todo check if I need support for any datatypes other than DTYPE::FLOAT
+ */
+using nncc::contrib::core::IR::model::ops::ScaleOp;
+
+class Scale : public OperationImpl<float> {
+public:
+  /**
+   * @param in input data
+   * @param op scale operation description
+   */
+  explicit Scale(const TensorVariant& in, const ScaleOp& op) : _input(in), _op(op) {}
+
+  /**
+   * @brief computes operation aplication result
+   * @return vector of all outputs from this node
+   */
+  std::vector<TensorVariant> operator()() override;
+
+private:
+  Tensor<float> _input;
+  const ScaleOp& _op;
+};
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_BACKEND_INTERPRETER_SCALE_IMPL_