--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNKIT_SUPPORT_ONNX_TENSOR_SET_H__
+#define __NNKIT_SUPPORT_ONNX_TENSOR_SET_H__
+
+#include "nnkit/support/onnx/Allocator.h"
+#include "nnkit/support/onnx/Status.h"
+
+#include <onnxruntime_c_api.h>
+
+#include <string>
+#include <vector>
+#include <cassert>
+
+namespace nnkit
+{
+namespace support
+{
+namespace onnx
+{
+
+class TensorSet final
+{
+public:
+ TensorSet(Allocator *allocator, size_t nums)
+ : _allocator(allocator), _names(nums), _types(nums), _dims(nums), _tensors(nums, nullptr)
+ {
+ // DO NOTHING
+ }
+
+ ~TensorSet(void)
+ {
+ for (auto it : _tensors)
+ {
+ OrtReleaseValue(it);
+ }
+ }
+
+ void set(size_t index, const std::string &name, ONNXTensorElementDataType type,
+ const std::vector<size_t> &dims)
+ {
+ _names[index] = name;
+ _types[index] = type;
+ _dims[index] = dims;
+
+ Status status;
+
+ status =
+ OrtCreateTensorAsOrtValue(_allocator, dims.data(), dims.size(), type, &_tensors[index]);
+ status.throwOnError();
+
+ assert(OrtIsTensor(_tensors[index]));
+ }
+
+private:
+ Allocator *_allocator;
+
+ std::vector<std::string> _names;
+ std::vector<ONNXTensorElementDataType> _types;
+ std::vector<std::vector<size_t>> _dims;
+ std::vector<OrtValue *> _tensors;
+};
+
+} // namespace onnx
+} // namespace support
+} // namespace nnkit
+
+#endif // __NNKIT_SUPPORT_ONNX_TENSOR_SET_H__
}
}
+void Runner::prepareInputs(void)
+{
+ Status status;
+
+ assert(_inputs == nullptr);
+
+ size_t num_input_nodes;
+ status = OrtSessionGetInputCount(_session, &num_input_nodes);
+ status.throwOnError();
+
+ _inputs = stdex::make_unique<TensorSet>(_allocator.get(), num_input_nodes);
+
+ for (size_t i = 0; i < num_input_nodes; ++i)
+ {
+ char *input_name;
+ status = OrtSessionGetInputName(_session, i, _allocator.get(), &input_name);
+ status.throwOnError();
+
+ assert(input_name != nullptr);
+
+ std::string name{input_name};
+ _allocator->Free(input_name);
+
+ OrtTypeInfo *typeinfo;
+ status = OrtSessionGetInputTypeInfo(_session, i, &typeinfo);
+ status.throwOnError();
+
+ const OrtTensorTypeAndShapeInfo *tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo);
+ ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info);
+
+ uint32_t num_dims = OrtGetNumOfDimensions(tensor_info);
+ std::vector<size_t> dims(num_dims);
+ OrtGetDimensions(tensor_info, (int64_t *)dims.data(), num_dims);
+
+ // NOTE To run OnnxRuntime, the total size of input tensor must be fixed.
+ // In the present code, the unknown shape that is -1 is arbitrarily changed to 1.
+ //
+ // TODO Add user argument related to unknown shape
+ //
+ for (uint32_t j = 0; j < num_dims; ++j)
+ {
+ if (dims[j] == -1)
+ {
+ dims[j] = 1;
+ }
+ }
+ OrtReleaseTypeInfo(typeinfo);
+
+ _inputs->set(i, name, type, dims);
+ }
+}
+
+void Runner::prepareOutputs(void)
+{
+ Status status;
+
+ assert(_outputs == nullptr);
+
+ size_t num_output_nodes;
+ status = OrtSessionGetOutputCount(_session, &num_output_nodes);
+ status.throwOnError();
+
+ _outputs = stdex::make_unique<TensorSet>(_allocator.get(), num_output_nodes);
+
+ for (size_t i = 0; i < num_output_nodes; ++i)
+ {
+ char *output_name;
+ status = OrtSessionGetOutputName(_session, i, _allocator.get(), &output_name);
+ status.throwOnError();
+
+ assert(output_name != nullptr);
+
+ std::string name{output_name};
+ _allocator->Free(output_name);
+
+ OrtTypeInfo *typeinfo;
+ status = OrtSessionGetOutputTypeInfo(_session, i, &typeinfo);
+ status.throwOnError();
+
+ const OrtTensorTypeAndShapeInfo *tensor_info = OrtCastTypeInfoToTensorInfo(typeinfo);
+ ONNXTensorElementDataType type = OrtGetTensorElementType(tensor_info);
+
+ uint32_t num_dims = OrtGetNumOfDimensions(tensor_info);
+ std::vector<size_t> dims(num_dims);
+ OrtGetDimensions(tensor_info, (int64_t *)dims.data(), num_dims);
+
+ // NOTE To run OnnxRuntime, the total size of output tensor must be fixed.
+ // In the present code, the unknown shape that is -1 is arbitrarily changed to 1.
+ //
+ // TODO Add user argument related to unknown shape
+ //
+ for (uint32_t j = 0; j < num_dims; ++j)
+ {
+ if (dims[j] == -1)
+ {
+ dims[j] = 1;
+ }
+ }
+ OrtReleaseTypeInfo(typeinfo);
+
+ _outputs->set(i, name, type, dims);
+ }
+}
+
} // namespace onnx
} // namespace support
} // namespace nnkit