From 357a3adcb7991367e869a1f997038d7523af748c Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9C=A4=EC=A7=80=EC=98=81/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Fri, 10 May 2019 14:52:43 +0900 Subject: [PATCH] [nnkit] Add TensorSet class for managing TensorData of onnx model (#3427) This patch adds TensorSet class. And it also support to allocate and release the input and output tensor datas. Signed-off-by: Jiyoung Yun --- .../onnx/include/nnkit/support/onnx/Runner.h | 7 ++ .../onnx/include/nnkit/support/onnx/TensorSet.h | 82 ++++++++++++++++ contrib/nnkit/libs/support/onnx/src/Backend.cpp | 4 +- contrib/nnkit/libs/support/onnx/src/Runner.cpp | 104 +++++++++++++++++++++ 4 files changed, 196 insertions(+), 1 deletion(-) create mode 100644 contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorSet.h diff --git a/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/Runner.h b/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/Runner.h index 835cf14..f6df77b 100644 --- a/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/Runner.h +++ b/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/Runner.h @@ -18,6 +18,7 @@ #define __NNKIT_SUPPORT_ONNX_RUNNER_H__ #include "nnkit/support/onnx/Allocator.h" +#include "nnkit/support/onnx/TensorSet.h" #include @@ -36,6 +37,9 @@ public: Runner(const std::string &path); ~Runner(void); + void prepareInputs(void); + void prepareOutputs(void); + public: // Disallow copy Runner(const Runner &) = delete; @@ -46,6 +50,9 @@ private: OrtSession *_session; std::unique_ptr _allocator; + + std::unique_ptr _inputs; + std::unique_ptr _outputs; }; } // namespace onnx diff --git a/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorSet.h b/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorSet.h new file mode 100644 index 0000000..61dd9f2 --- /dev/null +++ b/contrib/nnkit/libs/support/onnx/include/nnkit/support/onnx/TensorSet.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNKIT_SUPPORT_ONNX_TENSOR_SET_H__ +#define __NNKIT_SUPPORT_ONNX_TENSOR_SET_H__ + +#include "nnkit/support/onnx/Allocator.h" +#include "nnkit/support/onnx/Status.h" + +#include + +#include +#include +#include + +namespace nnkit +{ +namespace support +{ +namespace onnx +{ + +class TensorSet final +{ +public: + TensorSet(Allocator *allocator, size_t nums) + : _allocator(allocator), _names(nums), _types(nums), _dims(nums), _tensors(nums, nullptr) + { + // DO NOTHING + } + + ~TensorSet(void) + { + for (auto it : _tensors) + { + OrtReleaseValue(it); + } + } + + void set(size_t index, const std::string &name, ONNXTensorElementDataType type, + const std::vector &dims) + { + _names[index] = name; + _types[index] = type; + _dims[index] = dims; + + Status status; + + status = + OrtCreateTensorAsOrtValue(_allocator, dims.data(), dims.size(), type, &_tensors[index]); + status.throwOnError(); + + assert(OrtIsTensor(_tensors[index])); + } + +private: + Allocator *_allocator; + + std::vector _names; + std::vector _types; + std::vector> _dims; + std::vector _tensors; +}; + +} // namespace onnx +} // namespace support +} // namespace nnkit + +#endif // __NNKIT_SUPPORT_ONNX_TENSOR_SET_H__ diff --git a/contrib/nnkit/libs/support/onnx/src/Backend.cpp b/contrib/nnkit/libs/support/onnx/src/Backend.cpp index 8d8ec8e..27ccf6f 100644 --- a/contrib/nnkit/libs/support/onnx/src/Backend.cpp +++ b/contrib/nnkit/libs/support/onnx/src/Backend.cpp @@ -25,7 +25,9 @@ namespace onnx void Backend::prepare(const std::function &f) { - throw std::runtime_error{"NYI"}; + // Prepare input and output tensors + _runner.prepareInputs(); + _runner.prepareOutputs(); } void Backend::run(void) { throw std::runtime_error{"NYI"}; } diff --git a/contrib/nnkit/libs/support/onnx/src/Runner.cpp b/contrib/nnkit/libs/support/onnx/src/Runner.cpp index cb40c5d..e73f073 100644 --- a/contrib/nnkit/libs/support/onnx/src/Runner.cpp +++ b/contrib/nnkit/libs/support/onnx/src/Runner.cpp @@ -51,6 +51,110 @@ Runner::~Runner(void) } } +void Runner::prepareInputs(void) +{ + Status status; + + assert(_inputs == nullptr); + + size_t num_input_nodes; + status = OrtSessionGetInputCount(_session, &num_input_nodes); + status.throwOnError(); + + _inputs = stdex::make_unique(_allocator.get(), num_input_nodes); + + for (size_t i = 0; i < num_input_nodes; ++i) + { + char *input_name; + status = OrtSessionGetInputName(_session, i, _allocator.get(), &input_name); + status.throwOnError(); + + assert(input_name != nullptr); + + std::string name{input_name}; + _allocator->Free(input_name); + + OrtTypeInfo *typeinfo; + status = OrtSessionGetInputTypeInfo(_session, i, &typeinfo); + status.throwOnError(); + + const OrtTensorTypeAndShapeInfo *tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo); + ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info); + + uint32_t num_dims = OrtGetNumOfDimensions(tensor_info); + std::vector dims(num_dims); + OrtGetDimensions(tensor_info, (int64_t *)dims.data(), num_dims); + + // NOTE To run OnnxRuntime, the total size of input tensor must be fixed. + // In the present code, the unknown shape that is -1 is arbitrarily changed to 1. + // + // TODO Add user argument related to unknown shape + // + for (uint32_t j = 0; j < num_dims; ++j) + { + if (dims[j] == -1) + { + dims[j] = 1; + } + } + OrtReleaseTypeInfo(typeinfo); + + _inputs->set(i, name, type, dims); + } +} + +void Runner::prepareOutputs(void) +{ + Status status; + + assert(_outputs == nullptr); + + size_t num_output_nodes; + status = OrtSessionGetOutputCount(_session, &num_output_nodes); + status.throwOnError(); + + _outputs = stdex::make_unique(_allocator.get(), num_output_nodes); + + for (size_t i = 0; i < num_output_nodes; ++i) + { + char *output_name; + status = OrtSessionGetOutputName(_session, i, _allocator.get(), &output_name); + status.throwOnError(); + + assert(output_name != nullptr); + + std::string name{output_name}; + _allocator->Free(output_name); + + OrtTypeInfo *typeinfo; + status = OrtSessionGetOutputTypeInfo(_session, i, &typeinfo); + status.throwOnError(); + + const OrtTensorTypeAndShapeInfo *tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo); + ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info); + + uint32_t num_dims = OrtGetNumOfDimensions(tensor_info); + std::vector dims(num_dims); + OrtGetDimensions(tensor_info, (int64_t *)dims.data(), num_dims); + + // NOTE To run OnnxRuntime, the total size of output tensor must be fixed. + // In the present code, the unknown shape that is -1 is arbitrarily changed to 1. + // + // TODO Add user argument related to unknown shape + // + for (uint32_t j = 0; j < num_dims; ++j) + { + if (dims[j] == -1) + { + dims[j] = 1; + } + } + OrtReleaseTypeInfo(typeinfo); + + _outputs->set(i, name, type, dims); + } +} + } // namespace onnx } // namespace support } // namespace nnkit -- 2.7.4