Set input and output tensor info for interpreter (#4798)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Thu, 21 Mar 2019 10:48:06 +0000 (19:48 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 21 Mar 2019 10:48:06 +0000 (19:48 +0900)
- Set input and output tensor info for interpreter
- Fix TensorInfo bug
- Remove sink/source implementation

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/core/src/exec/interp/ExecManager.cc
runtimes/neurun/core/src/exec/interp/ExecManager.h
runtimes/neurun/core/src/exec/interp/TensorInfo.h
runtimes/neurun/test/interp/ExecManager.cc

index 5873d2d..0e9c5d4 100644 (file)
@@ -25,69 +25,34 @@ namespace interp
 
 void ExecManager::setInput(const neurun::model::operand::IO::Index &index,
                            const neurun::model::operand::TypeInfo &type,
-                           const neurun::model::operand::Shape &shape, const void *buffer,
-                           size_t length)
+                           const neurun::model::operand::Shape &shape, const void *, size_t length)
 {
-  using ::neurun::model::operand::DataType;
-  switch (type.type())
+  const auto input_index = _model->inputs.at(index);
+  const TensorInfo info{shape, type};
+  _tensor_info_map.insert({input_index, info});
+
+  if (length < info.total_size())
   {
-    case DataType::SCALAR_FLOAT32:
-    case DataType::TENSOR_FLOAT32:
-      source<Source<float>>(index, reinterpret_cast<const float *>(buffer), length);
-      break;
-    case DataType::SCALAR_INT32:
-    case DataType::TENSOR_INT32:
-      source<Source<int32_t>>(index, reinterpret_cast<const int32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_UINT32:
-      source<Source<uint32_t>>(index, reinterpret_cast<const uint32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_BOOL:
-    case DataType::TENSOR_BOOL8:
-    case DataType::TENSOR_QUANT8_ASYMM:
-      source<Source<uint8_t>>(index, reinterpret_cast<const uint8_t *>(buffer), length);
-      break;
-    default:
-      throw std::runtime_error("Not supported, yet");
-      break;
+    throw std::runtime_error{"Too small length"};
   }
 
-  const auto input_index = _model->inputs.at(index);
-  const auto info = compiler::TensorInfo(shape, type);
-  _tensor_info_map.insert({input_index, info});
+  // TODO Make interpreter tensor using buffer and info
 }
 
 void ExecManager::setOutput(const neurun::model::operand::IO::Index &index,
                             const neurun::model::operand::TypeInfo &type,
-                            const neurun::model::operand::Shape &shape, void *buffer, size_t length)
+                            const neurun::model::operand::Shape &shape, void *, size_t length)
 {
-  using ::neurun::model::operand::DataType;
-  switch (type.type())
+  const auto output_index = _model->outputs.at(index);
+  const TensorInfo info{shape, type};
+  _tensor_info_map.insert({output_index, info});
+
+  if (length < info.total_size())
   {
-    case DataType::SCALAR_FLOAT32:
-    case DataType::TENSOR_FLOAT32:
-      sink<Sink<float>>(index, reinterpret_cast<float *>(buffer), length);
-      break;
-    case DataType::SCALAR_INT32:
-    case DataType::TENSOR_INT32:
-      sink<Sink<int32_t>>(index, reinterpret_cast<int32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_UINT32:
-      sink<Sink<uint32_t>>(index, reinterpret_cast<uint32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_BOOL:
-    case DataType::TENSOR_BOOL8:
-    case DataType::TENSOR_QUANT8_ASYMM:
-      sink<Sink<uint8_t>>(index, reinterpret_cast<uint8_t *>(buffer), length);
-      break;
-    default:
-      throw std::runtime_error("Not supported, yet");
-      break;
+    throw std::runtime_error{"Too small length"};
   }
 
-  const auto output_index = _model->outputs.at(index);
-  const auto info = compiler::TensorInfo(shape, type);
-  _tensor_info_map.insert({output_index, info});
+  // TODO Make interpreter tensor using buffer and info
 }
 
 void ExecManager::execute(void) { throw std::runtime_error{"NYI: ExecManager execute"}; }
index 8ddbf5e..d3021bb 100644 (file)
 #define __NEURUN_EXEC_INTERP_EXEC_MANAGER_H_
 
 #include "model/operand/IndexMap.h"
-#include "compiler/TensorInfo.h"
 #include "exec/IExecutor.h"
-#include "exec/Source.h"
-#include "exec/Sink.h"
+#include "TensorInfo.h"
 
 namespace neurun
 {
@@ -79,25 +77,9 @@ public:
   void execute(void);
 
 private:
-  template <typename T, typename... Args>
-  void source(const neurun::model::operand::IO::Index &index, Args &&... args)
-  {
-    _sources.at(index.value()) = std::move(std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
-  }
-  template <typename T, typename... Args>
-  void sink(const neurun::model::operand::IO::Index &index, Args &&... args)
-  {
-    _sinks.at(index.value()) = std::move(std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
-  }
-
-private:
   std::shared_ptr<const model::Model> _model;
-  // TODO use own TensorInfo instead of using compiler's TensorInfo struct
-  //      or define independent TensorInfo struct to use both compiler and interpreter
   // TODO use execution environment to handle tensor for each inference
-  model::operand::IndexMap<compiler::TensorInfo> _tensor_info_map;
-  std::vector<std::unique_ptr<ISource>> _sources;
-  std::vector<std::unique_ptr<ISink>> _sinks;
+  model::operand::IndexMap<TensorInfo> _tensor_info_map;
 };
 
 } // namespace interp
index 167f83a..8dfb5a0 100644 (file)
@@ -59,7 +59,7 @@ public:
    * @brief     Construct a new Tensor Info object
    * @param[in] origin Tensor info for copy
    */
-  TensorInfo(const TensorInfo &origin) : _shape(origin.shape), _typeInfo(origin.typeInfo)
+  TensorInfo(const TensorInfo &origin) : _shape(origin.shape()), _typeInfo(origin.typeInfo())
   {
     // DO NOTHING
   }
index f710ef1..233b12a 100644 (file)
@@ -42,8 +42,6 @@ protected:
     // model output: add result
     // lhs, rhs, result shape: {1, 2, 2, 1}
     // activation: none (constant)
-    ::neurun::graph::Graph graph;
-
     operand::Shape shape{4};
     shape.dim(0) = 1;
     shape.dim(1) = 2;
@@ -82,6 +80,7 @@ protected:
   }
   virtual void TearDown() { _executor = nullptr; }
 
+  ::neurun::graph::Graph graph{};
   std::unique_ptr<ExecManager> _executor{nullptr};
   const int32_t _activation_value{0};
 };
@@ -98,4 +97,48 @@ TEST_F(InterpExecManagerTest, create_simple)
   ASSERT_NE(_executor, nullptr);
 }
 
+TEST_F(InterpExecManagerTest, setInput)
+{
+  CreateSimpleModel();
+
+  auto input1 = operand::IO::Index{0};
+  auto input1_idx = graph.getInputs().at(input1);
+
+  auto input1_type = graph.operands().at(input1_idx).typeInfo();
+  auto input1_shape = graph.operands().at(input1_idx).shape();
+
+  const int32_t input1_buffer[4] = {1, 0, -1, -2};
+
+  EXPECT_THROW(_executor->setInput(input1, input1_type, input1_shape,
+                                   reinterpret_cast<const void *>(input1_buffer), 4),
+               std::runtime_error);
+  EXPECT_THROW(_executor->setInput(input1, input1_type, input1_shape,
+                                   reinterpret_cast<const void *>(input1_buffer), 12),
+               std::runtime_error);
+  EXPECT_NO_THROW(_executor->setInput(input1, input1_type, input1_shape,
+                                      reinterpret_cast<const void *>(input1_buffer), 16));
+}
+
+TEST_F(InterpExecManagerTest, setOutput)
+{
+  CreateSimpleModel();
+
+  auto output = operand::IO::Index{0};
+  auto output_idx = graph.getOutputs().at(output);
+
+  auto output_type = graph.operands().at(output_idx).typeInfo();
+  auto output_shape = graph.operands().at(output_idx).shape();
+
+  int32_t input1_buffer[4] = {};
+
+  EXPECT_THROW(_executor->setOutput(output, output_type, output_shape,
+                                    reinterpret_cast<void *>(input1_buffer), 4),
+               std::runtime_error);
+  EXPECT_THROW(_executor->setOutput(output, output_type, output_shape,
+                                    reinterpret_cast<void *>(input1_buffer), 12),
+               std::runtime_error);
+  EXPECT_NO_THROW(_executor->setOutput(output, output_type, output_shape,
+                                       reinterpret_cast<void *>(input1_buffer), 16));
+}
+
 } // namespace