From 4284937c9e1854760d3d2ab9663a96fceac016fc Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=90=D0=BD=D0=B4=D1=80=D0=B5=D0=B9=20=D0=A2=D0=B8=D1=89?= =?utf8?q?=D0=B5=D0=BD=D0=BA=D0=BE/AI=20Tools=20Lab=20/SRR/Staff=20Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 16 Jan 2019 14:41:14 +0300 Subject: [PATCH] [nnc] Mobilenet support in soft backend in ONNX (#2866) Soft backend can't load int64 constant tensors that's why it was transformed in float one (reshape operation). Signed-off-by: Andrew V. Tischenko --- .../nnc/passes/onnx_frontend/ONNXImporterImpl.cpp | 23 ++++++++++++++++------ contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp | 3 ++- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h | 2 +- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp index 732a93c..3eb544c 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp @@ -111,16 +111,27 @@ mir::TensorVariant ONNXImporterImpl::createTensor(const onnx::TensorProto* tenso src_data = reinterpret_cast(tensor->int32_data().data()); mir::DTYPE type = mir::DTYPE::INT32; } else if (tensor->int64_data_size() != 0) { - // FIXME: we could lose the data here - type = mir::DTYPE::INT32; - element_size = sizeof(int32_t); +// // FIXME: we could lose the data here +// type = mir::DTYPE::INT32; +// element_size = sizeof(int32_t); +// buffer_size = tensor->int64_data_size() * element_size; +// +// auto src_data64 = reinterpret_cast(tensor->int64_data().data()); +// std::shared_ptr shared_buffer (new char[buffer_size], std::default_delete()); +// auto dst_data = reinterpret_cast(shared_buffer.get()); +// for (int i = 0; i < tensor->int64_data_size(); i++) { +// dst_data[i] = (int32_t)src_data64 [i]; +// } +// return mir::TensorVariant(shape, shared_buffer, type, element_size); + // FIXME: it's a hack to support mobilenet in soft backend + // (the above code works under interpreter but does not work in softbackend) + element_size = sizeof(float); buffer_size = tensor->int64_data_size() * element_size; - auto src_data64 = reinterpret_cast(tensor->int64_data().data()); std::shared_ptr shared_buffer (new char[buffer_size], std::default_delete()); - auto dst_data = reinterpret_cast(shared_buffer.get()); + auto dst_data = reinterpret_cast(shared_buffer.get()); for (int i = 0; i < tensor->int64_data_size(); i++) { - dst_data[i] = (int32_t)src_data64 [i]; + dst_data[i] = static_cast(src_data64 [i]); } return mir::TensorVariant(shape, shared_buffer, type, element_size); } else if (tensor->raw_data().size() != 0) { diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index d660571..4d67f8a 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include "core/modelIR/Index.h" #include "core/modelIR/Graph.h" #include "core/modelIR/Scalar.h" @@ -310,7 +311,7 @@ ONNXOpCreator::convertReshape(const std::vector& inputs) { std::vector shape_vector(cnt); ShapeRange out_range(shape_tensor_shape); // FIXME: real type could be int64_t but _elementSize is correct that's why it works - Tensor tensor_accessor(shape_tensor); + Tensor tensor_accessor(shape_tensor); int i = 0; for (auto idx : out_range) { diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h index c89f714..f5b405a 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h @@ -55,7 +55,7 @@ public: convertConstant(const onnx::NodeProto& onnx_node, InputTensors& input_tensors); - std::vector + std::vector convertPool(const std::vector& inputs, ONNXOpCode op_code, const onnx::NodeProto& onnx_node); -- 2.7.4