[nnkit] Run onnx model using onnx runtime (#3432)
author윤지영/On-Device Lab(SR)/Staff Engineer/삼성전자 <jy910.yun@samsung.com>
Fri, 10 May 2019 08:11:10 +0000 (17:11 +0900)
committer박종현/On-Device Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Fri, 10 May 2019 08:11:10 +0000 (17:11 +0900)
This patch has TensorContext class to run pre and post actions.

Signed-off-by: Jiyoung Yun <jy910.yun@samsung.com>
contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/Runner.h
contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorContext.h [new file with mode: 0644]
contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorSet.h
contrib/nnkit/libs/support/onnx/src/Backend.cpp
contrib/nnkit/libs/support/onnx/src/Runner.cpp

index f6df77b..f1b7b54 100644 (file)
@@ -40,6 +40,11 @@ public:
   void prepareInputs(void);
   void prepareOutputs(void);
 
+  TensorSet &inputs(void) { return *_inputs; }
+  TensorSet &outputs(void) { return *_outputs; }
+
+  void run(void);
+
 public:
   // Disallow copy
   Runner(const Runner &) = delete;
diff --git a/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorContext.h b/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorContext.h
new file mode 100644 (file)
index 0000000..d76ed0e
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNKIT_SUPPORT_ONNX_TENSOR_CONTEXT_H__
+#define __NNKIT_SUPPORT_ONNX_TENSOR_CONTEXT_H__
+
+#include "nnkit/support/onnx/TensorSet.h"
+
+#include <nnkit/TensorContext.h>
+
+#include <nncc/core/ADT/tensor/LexicalLayout.h>
+#include <nncc/core/ADT/tensor/Overlay.h>
+
+namespace nnkit
+{
+namespace support
+{
+namespace onnx
+{
+
+class TensorContext final : public nnkit::TensorContext
+{
+public:
+  TensorContext(TensorSet &tensors) : _tensors(tensors)
+  {
+    // DO NOTHING
+  }
+
+  uint32_t size(void) const override { return _tensors.size(); }
+
+  std::string name(uint32_t n) const override { return std::string{_tensors.name(n)}; }
+
+  nncc::core::ADT::tensor::Shape shape(uint32_t n) const override
+  {
+    const std::vector<size_t> &dims = _tensors.dim(n);
+
+    nncc::core::ADT::tensor::Shape shape;
+    shape.resize(dims.size());
+    for (size_t i = 0; i < dims.size(); ++i)
+    {
+      shape.dim(i) = dims[i];
+    }
+    return shape;
+  }
+
+  bool isFloatTensor(uint32_t n) const override
+  {
+    return (_tensors.type(n) == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT);
+  }
+
+  void getMutableFloatTensor(uint32_t n, const TensorContext::TypedAccessor<float> &f) override
+  {
+    if (_tensors.type(n) != ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT)
+    {
+      throw std::runtime_error{"type mismatch"};
+    }
+
+    using nncc::core::ADT::tensor::LexicalLayout;
+    using nncc::core::ADT::tensor::make_overlay;
+
+    Status status;
+
+    OrtValue *base = _tensors.mutable_tensor(n);
+    float *data;
+
+    status = OrtGetTensorMutableData(base, (void **)&data);
+    status.throwOnError();
+
+    auto overlay = make_overlay<float, LexicalLayout>(shape(n), data);
+
+    f(*this, n, overlay);
+  }
+
+  void getConstFloatTensor(uint32_t n, const TensorContext::TypedReader<float> &f) const override
+  {
+    if (_tensors.type(n) != ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT)
+    {
+      throw std::runtime_error{"type mismatch"};
+    }
+
+    using nncc::core::ADT::tensor::LexicalLayout;
+    using nncc::core::ADT::tensor::make_overlay;
+
+    Status status;
+
+    OrtValue *base = _tensors.mutable_tensor(n);
+    float *data;
+
+    status = OrtGetTensorMutableData(base, (void **)&data);
+    status.throwOnError();
+
+    auto overlay = make_overlay<float, LexicalLayout>(shape(n), data);
+
+    f(*this, n, overlay);
+  }
+
+private:
+  TensorSet &_tensors;
+};
+
+} // namespace onnx
+} // namespace support
+} // namespace nnkit
+
+#endif // __NNKIT_SUPPORT_ONNX_TENSOR_CONTEXT_H__
index 61dd9f2..b38fc9b 100644 (file)
@@ -66,6 +66,21 @@ public:
     assert(OrtIsTensor(_tensors[index]));
   }
 
+  size_t size(void) { return _names.size(); }
+
+  const char *name(size_t index) { return _names[index].c_str(); }
+  const std::vector<std::string> &names(void) { return _names; }
+
+  ONNXTensorElementDataType type(size_t index) { return _types[index]; }
+
+  const std::vector<size_t> &dim(size_t index) { return _dims[index]; }
+
+  const OrtValue *tensor(size_t index) { return _tensors[index]; }
+  const std::vector<OrtValue *> &tensors(void) { return _tensors; }
+
+  OrtValue *mutable_tensor(size_t index) { return _tensors[index]; }
+  std::vector<OrtValue *> mutable_tensors(void) { return _tensors; }
+
 private:
   Allocator *_allocator;
 
index 27ccf6f..af9e54c 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include "nnkit/support/onnx/Backend.h"
+#include "nnkit/support/onnx/TensorContext.h"
 
 namespace nnkit
 {
@@ -28,13 +29,17 @@ void Backend::prepare(const std::function<void(nnkit::TensorContext &)> &f)
   // Prepare input and output tensors
   _runner.prepareInputs();
   _runner.prepareOutputs();
+
+  TensorContext ctx(_runner.inputs());
+  f(ctx);
 }
 
-void Backend::run(void) { throw std::runtime_error{"NYI"}; }
+void Backend::run(void) { _runner.run(); }
 
 void Backend::teardown(const std::function<void(nnkit::TensorContext &)> &f)
 {
-  throw std::runtime_error{"NYI"};
+  TensorContext ctx(_runner.outputs());
+  f(ctx);
 }
 
 } // namespace onnx
index e73f073..bc6a81a 100644 (file)
@@ -155,6 +155,29 @@ void Runner::prepareOutputs(void)
   }
 }
 
+void Runner::run(void)
+{
+  Status status;
+
+  auto pinput_names = _inputs->names();
+  std::vector<const char *> input_names(pinput_names.size());
+  for (size_t i = 0; i < pinput_names.size(); ++i)
+  {
+    input_names[i] = pinput_names[i].c_str();
+  }
+
+  auto poutput_names = _outputs->names();
+  std::vector<const char *> output_names(poutput_names.size());
+  for (size_t i = 0; i < poutput_names.size(); ++i)
+  {
+    output_names[i] = poutput_names[i].c_str();
+  }
+
+  status = OrtRun(_session, NULL, input_names.data(), _inputs->tensors().data(), _inputs->size(),
+                  output_names.data(), _outputs->size(), _outputs->mutable_tensors().data());
+  status.throwOnError();
+}
+
 } // namespace onnx
 } // namespace support
 } // namespace nnkit