From: Андрей Тищенко/AI Tools Lab /SRR/Staff Engineer/삼성전자 Date: Wed, 16 Jan 2019 11:41:14 +0000 (+0300) Subject: [nnc] Mobilenet support in soft backend in ONNX (#2866) X-Git-Tag: nncc_backup~953 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4284937c9e1854760d3d2ab9663a96fceac016fc;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Mobilenet support in soft backend in ONNX (#2866) Soft backend can't load int64 constant tensors that's why it was transformed in float one (reshape operation). Signed-off-by: Andrew V. Tischenko --- diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp index 732a93c..3eb544c 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp @@ -111,16 +111,27 @@ mir::TensorVariant ONNXImporterImpl::createTensor(const onnx::TensorProto* tenso src_data = reinterpret_cast(tensor->int32_data().data()); mir::DTYPE type = mir::DTYPE::INT32; } else if (tensor->int64_data_size() != 0) { - // FIXME: we could lose the data here - type = mir::DTYPE::INT32; - element_size = sizeof(int32_t); +// // FIXME: we could lose the data here +// type = mir::DTYPE::INT32; +// element_size = sizeof(int32_t); +// buffer_size = tensor->int64_data_size() * element_size; +// +// auto src_data64 = reinterpret_cast(tensor->int64_data().data()); +// std::shared_ptr shared_buffer (new char[buffer_size], std::default_delete()); +// auto dst_data = reinterpret_cast(shared_buffer.get()); +// for (int i = 0; i < tensor->int64_data_size(); i++) { +// dst_data[i] = (int32_t)src_data64 [i]; +// } +// return mir::TensorVariant(shape, shared_buffer, type, element_size); + // FIXME: it's a hack to support mobilenet in soft backend + // (the above code works under interpreter but does not work in softbackend) + element_size = sizeof(float); buffer_size = tensor->int64_data_size() * element_size; - auto src_data64 = reinterpret_cast(tensor->int64_data().data()); std::shared_ptr shared_buffer (new char[buffer_size], std::default_delete()); - auto dst_data = reinterpret_cast(shared_buffer.get()); + auto dst_data = reinterpret_cast(shared_buffer.get()); for (int i = 0; i < tensor->int64_data_size(); i++) { - dst_data[i] = (int32_t)src_data64 [i]; + dst_data[i] = static_cast(src_data64 [i]); } return mir::TensorVariant(shape, shared_buffer, type, element_size); } else if (tensor->raw_data().size() != 0) { diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index d660571..4d67f8a 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include "core/modelIR/Index.h" #include "core/modelIR/Graph.h" #include "core/modelIR/Scalar.h" @@ -310,7 +311,7 @@ ONNXOpCreator::convertReshape(const std::vector& inputs) { std::vector shape_vector(cnt); ShapeRange out_range(shape_tensor_shape); // FIXME: real type could be int64_t but _elementSize is correct that's why it works - Tensor tensor_accessor(shape_tensor); + Tensor tensor_accessor(shape_tensor); int i = 0; for (auto idx : out_range) { diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h index c89f714..f5b405a 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h @@ -55,7 +55,7 @@ public: convertConstant(const onnx::NodeProto& onnx_node, InputTensors& input_tensors); - std::vector + std::vector convertPool(const std::vector& inputs, ONNXOpCode op_code, const onnx::NodeProto& onnx_node);