[nnkit] Add TensorSet class for managing TensorData of onnx model (#3427)
author윤지영/On-Device Lab(SR)/Staff Engineer/삼성전자 <jy910.yun@samsung.com>
Fri, 10 May 2019 05:52:43 +0000 (14:52 +0900)
committer박종현/On-Device Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Fri, 10 May 2019 05:52:43 +0000 (14:52 +0900)
This patch adds TensorSet class.
And it also support to allocate and release the input and output tensor datas.

Signed-off-by: Jiyoung Yun <jy910.yun@samsung.com>
contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/Runner.h
contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorSet.h [new file with mode: 0644]
contrib/nnkit/libs/support/onnx/src/Backend.cpp
contrib/nnkit/libs/support/onnx/src/Runner.cpp

index 835cf14..f6df77b 100644 (file)
@@ -18,6 +18,7 @@
 #define __NNKIT_SUPPORT_ONNX_RUNNER_H__
 
 #include "nnkit/support/onnx/Allocator.h"
+#include "nnkit/support/onnx/TensorSet.h"
 
 #include <onnxruntime_c_api.h>
 
@@ -36,6 +37,9 @@ public:
   Runner(const std::string &path);
   ~Runner(void);
 
+  void prepareInputs(void);
+  void prepareOutputs(void);
+
 public:
   // Disallow copy
   Runner(const Runner &) = delete;
@@ -46,6 +50,9 @@ private:
   OrtSession *_session;
 
   std::unique_ptr<Allocator> _allocator;
+
+  std::unique_ptr<TensorSet> _inputs;
+  std::unique_ptr<TensorSet> _outputs;
 };
 
 } // namespace onnx
diff --git a/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorSet.h b/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorSet.h
new file mode 100644 (file)
index 0000000..61dd9f2
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNKIT_SUPPORT_ONNX_TENSOR_SET_H__
+#define __NNKIT_SUPPORT_ONNX_TENSOR_SET_H__
+
+#include "nnkit/support/onnx/Allocator.h"
+#include "nnkit/support/onnx/Status.h"
+
+#include <onnxruntime_c_api.h>
+
+#include <string>
+#include <vector>
+#include <cassert>
+
+namespace nnkit
+{
+namespace support
+{
+namespace onnx
+{
+
+class TensorSet final
+{
+public:
+  TensorSet(Allocator *allocator, size_t nums)
+      : _allocator(allocator), _names(nums), _types(nums), _dims(nums), _tensors(nums, nullptr)
+  {
+    // DO NOTHING
+  }
+
+  ~TensorSet(void)
+  {
+    for (auto it : _tensors)
+    {
+      OrtReleaseValue(it);
+    }
+  }
+
+  void set(size_t index, const std::string &name, ONNXTensorElementDataType type,
+           const std::vector<size_t> &dims)
+  {
+    _names[index] = name;
+    _types[index] = type;
+    _dims[index] = dims;
+
+    Status status;
+
+    status =
+        OrtCreateTensorAsOrtValue(_allocator, dims.data(), dims.size(), type, &_tensors[index]);
+    status.throwOnError();
+
+    assert(OrtIsTensor(_tensors[index]));
+  }
+
+private:
+  Allocator *_allocator;
+
+  std::vector<std::string> _names;
+  std::vector<ONNXTensorElementDataType> _types;
+  std::vector<std::vector<size_t>> _dims;
+  std::vector<OrtValue *> _tensors;
+};
+
+} // namespace onnx
+} // namespace support
+} // namespace nnkit
+
+#endif // __NNKIT_SUPPORT_ONNX_TENSOR_SET_H__
index 8d8ec8e..27ccf6f 100644 (file)
@@ -25,7 +25,9 @@ namespace onnx
 
 void Backend::prepare(const std::function<void(nnkit::TensorContext &)> &f)
 {
-  throw std::runtime_error{"NYI"};
+  // Prepare input and output tensors
+  _runner.prepareInputs();
+  _runner.prepareOutputs();
 }
 
 void Backend::run(void) { throw std::runtime_error{"NYI"}; }
index cb40c5d..e73f073 100644 (file)
@@ -51,6 +51,110 @@ Runner::~Runner(void)
   }
 }
 
+void Runner::prepareInputs(void)
+{
+  Status status;
+
+  assert(_inputs == nullptr);
+
+  size_t num_input_nodes;
+  status = OrtSessionGetInputCount(_session, &num_input_nodes);
+  status.throwOnError();
+
+  _inputs = stdex::make_unique<TensorSet>(_allocator.get(), num_input_nodes);
+
+  for (size_t i = 0; i < num_input_nodes; ++i)
+  {
+    char *input_name;
+    status = OrtSessionGetInputName(_session, i, _allocator.get(), &input_name);
+    status.throwOnError();
+
+    assert(input_name != nullptr);
+
+    std::string name{input_name};
+    _allocator->Free(input_name);
+
+    OrtTypeInfo *typeinfo;
+    status = OrtSessionGetInputTypeInfo(_session, i, &typeinfo);
+    status.throwOnError();
+
+    const OrtTensorTypeAndShapeInfo *tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo);
+    ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info);
+
+    uint32_t num_dims = OrtGetNumOfDimensions(tensor_info);
+    std::vector<size_t> dims(num_dims);
+    OrtGetDimensions(tensor_info, (int64_t *)dims.data(), num_dims);
+
+    // NOTE To run OnnxRuntime, the total size of input tensor must be fixed.
+    //      In the present code, the unknown shape that is -1 is arbitrarily changed to 1.
+    //
+    // TODO Add user argument related to unknown shape
+    //
+    for (uint32_t j = 0; j < num_dims; ++j)
+    {
+      if (dims[j] == -1)
+      {
+        dims[j] = 1;
+      }
+    }
+    OrtReleaseTypeInfo(typeinfo);
+
+    _inputs->set(i, name, type, dims);
+  }
+}
+
+void Runner::prepareOutputs(void)
+{
+  Status status;
+
+  assert(_outputs == nullptr);
+
+  size_t num_output_nodes;
+  status = OrtSessionGetOutputCount(_session, &num_output_nodes);
+  status.throwOnError();
+
+  _outputs = stdex::make_unique<TensorSet>(_allocator.get(), num_output_nodes);
+
+  for (size_t i = 0; i < num_output_nodes; ++i)
+  {
+    char *output_name;
+    status = OrtSessionGetOutputName(_session, i, _allocator.get(), &output_name);
+    status.throwOnError();
+
+    assert(output_name != nullptr);
+
+    std::string name{output_name};
+    _allocator->Free(output_name);
+
+    OrtTypeInfo *typeinfo;
+    status = OrtSessionGetOutputTypeInfo(_session, i, &typeinfo);
+    status.throwOnError();
+
+    const OrtTensorTypeAndShapeInfo *tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo);
+    ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info);
+
+    uint32_t num_dims = OrtGetNumOfDimensions(tensor_info);
+    std::vector<size_t> dims(num_dims);
+    OrtGetDimensions(tensor_info, (int64_t *)dims.data(), num_dims);
+
+    // NOTE To run OnnxRuntime, the total size of output tensor must be fixed.
+    //      In the present code, the unknown shape that is -1 is arbitrarily changed to 1.
+    //
+    // TODO Add user argument related to unknown shape
+    //
+    for (uint32_t j = 0; j < num_dims; ++j)
+    {
+      if (dims[j] == -1)
+      {
+        dims[j] = 1;
+      }
+    }
+    OrtReleaseTypeInfo(typeinfo);
+
+    _outputs->set(i, name, type, dims);
+  }
+}
+
 } // namespace onnx
 } // namespace support
 } // namespace nnkit