[nnc backend] Add NNInterpreter class (#576)
authorVladimir Plazun/AI Tools Lab /SRR/Engineer/삼성전자 <v.plazun@partner.samsung.com>
Wed, 11 Jul 2018 14:08:57 +0000 (17:08 +0300)
committerSergey Vostokov/AI Tools Lab /SRR/Staff Engineer/삼성전자 <s.vostokov@samsung.com>
Wed, 11 Jul 2018 14:08:57 +0000 (23:08 +0900)
Add NNInterpreter class

This class implements model IR interpreter

Signed-off-by: Vladimir Plazun <v.plazun@partner.samsung.com>
contrib/nnc/libs/backend/interpreter/core/include/interpreter/core/Interpreter.h [new file with mode: 0644]
contrib/nnc/libs/backend/interpreter/core/src/Interpreter.cpp [new file with mode: 0644]

diff --git a/contrib/nnc/libs/backend/interpreter/core/include/interpreter/core/Interpreter.h b/contrib/nnc/libs/backend/interpreter/core/include/interpreter/core/Interpreter.h
new file mode 100644 (file)
index 0000000..4d52477
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef _NNC_BACKEND_INTERPRETER_CORE_INTERPRETER_
+#define _NNC_BACKEND_INTERPRETER_CORE_INTERPRETER_
+
+#include <functional>
+#include <map>
+#include <string>
+#include <unordered_map>
+
+#include "nnc/core/IR/model/visitor/visitor.h"
+#include "nnc/core/IR/model/graph/ir_node.h"
+
+#include "nnc/core/linalg/Tensor.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace core
+{
+
+using namespace nncc::contrib::core::IR::model;
+using nncc::contrib::core::data::Index;
+using nncc::contrib::core::data::Tensor;
+
+class NNInterpreter : public Visitor
+{
+public:
+  explicit NNInterpreter() = default;
+
+  void visit(ADT::INode::Ref node, ops::ConcatOp &op) override;
+  void visit(ADT::INode::Ref node, ops::Conv2DOp &op) override;
+  void visit(ADT::INode::Ref node, ops::DepthwiseConv2DOp &op) override;
+  void visit(ADT::INode::Ref node, ops::ReluOp &op) override;
+  void visit(ADT::INode::Ref node, ops::SoftmaxOp &op) override;
+  void visit(ADT::INode::Ref node, ops::PoolOp &op) override;
+  void visit(ADT::INode::Ref node, ops::FullyConnectedOp &op) override;
+  void visit(ADT::INode::Ref node, ops::CappedReluOp &op) override;
+  void visit(ADT::INode::Ref node, ops::BiasAddOp &op) override;
+  void visit(ADT::INode::Ref node, ops::VariableOp &op) override;
+  void visit(ADT::INode::Ref node, ops::ReshapeOp &op) override;
+
+  void setInput(const std::string &name, TensorVariant data);
+  std::vector<TensorVariant> &getResult(ADT::INode::Ref node);
+
+  ~NNInterpreter() override = default;
+
+private:
+  std::vector<TensorVariant> &var(size_t id);
+
+private:
+  std::map<size_t, std::vector<TensorVariant>> vars;
+  std::unordered_map<std::string, TensorVariant> data;
+};
+
+} // namespace core
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif //_NNC_BACKEND_INTERPRETER_CORE_INTERPRETER_
diff --git a/contrib/nnc/libs/backend/interpreter/core/src/Interpreter.cpp b/contrib/nnc/libs/backend/interpreter/core/src/Interpreter.cpp
new file mode 100644 (file)
index 0000000..241157d
--- /dev/null
@@ -0,0 +1,149 @@
+#include <cmath>
+
+#include "interpreter/core/Interpreter.h"
+
+#include "nnc/core/IR/model/operations/fully_connected_op.h"
+#include "nnc/core/IR/model/operations/softmax_op.h"
+#include "nnc/core/IR/model/operations/capped_relu_op.h"
+#include "nnc/core/IR/model/operations/depthwise_conv2d_op.h"
+#include "nnc/core/IR/model/operations/conv_2d_op.h"
+#include "nnc/core/IR/model/operations/pool_op.h"
+#include "nnc/core/IR/model/operations/variable_op.h"
+#include "nnc/core/IR/model/operations/relu_op.h"
+#include "nnc/core/IR/model/operations/concat_op.h"
+#include "nnc/core/IR/model/operations/bias_add_op.h"
+
+#include "interpreter/ops/Bias.h"
+#include "interpreter/ops/Concat.h"
+#include "interpreter/ops/conv_2D.h"
+#include "interpreter/ops/Depthwise_conv_2D.h"
+#include "interpreter/ops/Elementwise.h"
+#include "interpreter/ops/FullyConnected.h"
+#include "interpreter/ops/Pool.h"
+#include "interpreter/ops/Reshape.h"
+#include "interpreter/ops/Softmax.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace core
+{
+
+using nncc::contrib::core::data::Tensor;
+namespace ops = nncc::contrib::core::IR::model::ops;
+namespace impl = nncc::contrib::backend::interpreter::impl;
+
+std::vector<TensorVariant> &NNInterpreter::var(size_t id) { return vars[id]; }
+
+void NNInterpreter::setInput(const std::string &name, TensorVariant t) { data.emplace(name, t); }
+
+void NNInterpreter::visit(ADT::INode::Ref node, ops::VariableOp &op)
+{
+  (void)op;
+  auto it = data.find(node->getName());
+  if( it == data.end() )
+  {
+    throw std::runtime_error("Can't find data for node \"" + node->getName() + ". Input data was not set correctly?");
+  }
+  var(node->getId()) = {it->second};
+}
+
+std::vector<TensorVariant> &NNInterpreter::getResult(ADT::INode::Ref node)
+{
+  auto res = vars.find(node->getId());
+  if (res != vars.end())
+  {
+    return res->second;
+  }
+  else
+  {
+    throw std::runtime_error("No such value");
+  }
+}
+
+void NNInterpreter::visit(ADT::INode::Ref node, ops::ConcatOp &op)
+{
+  auto &operands = node->getPrevNodes();
+  std::vector<TensorVariant> ins;
+  for (auto &in : operands)
+  {
+    ins.push_back(var(in.node->getId())[in.index]);
+  }
+  var(node->getId()) = impl::Concat<float>(ins, op.getOutputShape(0), op.getAxis())();
+}
+
+void NNInterpreter::visit(ADT::INode::Ref node, ops::Conv2DOp &op)
+{
+  auto operand = node->getPrevNodes()[0];
+  var(node->getId()) = impl::Conv2D(var(operand.node->getId())[operand.index], op)();
+}
+
+void NNInterpreter::visit(ADT::INode::Ref node, ops::ReshapeOp &op)
+{
+  auto operand = node->getPrevNodes()[0];
+  auto input = var(operand.node->getId())[operand.index];
+  var(node->getId()) = impl::Reshape<float>(input, op)();
+}
+
+void NNInterpreter::visit(ADT::INode::Ref node, ops::ReluOp &op)
+{
+  auto operand = node->getPrevNodes()[0];
+  Tensor<float> input(var(operand.node->getId())[operand.index]);
+  var(node->getId()) = impl::Fill<float>(
+      op.getOutputShape(0), [&input](const Index &id) { return std::max(input.at(id), 0.0f); })();
+}
+
+void NNInterpreter::visit(ADT::INode::Ref node, ops::SoftmaxOp &op)
+{
+  auto operand = node->getPrevNodes()[0];
+  auto input = var(operand.node->getId())[operand.index];
+  var(node->getId()) = impl::Softmax(op.getInputShape(0), input, op.getAxis())();
+}
+
+void NNInterpreter::visit(ADT::INode::Ref node, ops::PoolOp &op)
+{
+  auto operand = node->getPrevNodes()[0];
+  auto input = var(operand.node->getId())[operand.index];
+  var(node->getId()) = impl::Pool(input, op)();
+}
+
+void NNInterpreter::visit(ADT::INode::Ref node, ops::FullyConnectedOp &op)
+{
+  auto operand = node->getPrevNodes()[0];
+  TensorVariant input = var(operand.node->getId())[operand.index];
+  var(node->getId()) = impl::FullyConnected<float>(input, op)();
+}
+
+void NNInterpreter::visit(ADT::INode *node, ops::CappedReluOp &op)
+{
+  auto operand = node->getPrevNodes()[0];
+  Tensor<float> input(var(operand.node->getId())[operand.index]);
+  var(node->getId()) = impl::Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
+    return std::min(std::max(input.at(id), 0.0f), op.getCap());
+  })();
+}
+
+void NNInterpreter::visit(ADT::INode *node, ops::DepthwiseConv2DOp &op)
+{
+  auto operand = node->getPrevNodes()[0];
+  TensorVariant input(var(operand.node->getId())[operand.index]);
+  var(node->getId()) = impl::DepthwiseConv2D(input, op)();
+}
+
+void NNInterpreter::visit(ADT::INode *node, ops::BiasAddOp &op)
+{
+  auto operand = node->getPrevNodes()[0];
+  auto input = var(operand.node->getId())[operand.index];
+  var(node->getId()) = impl::BiasAdd(input, op.getWeights(), op.getOutputShape(0))();
+}
+
+} // namespace core
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc