[nnc] Mobilenet support in soft backend in ONNX (#2866)
authorАндрей Тищенко/AI Tools Lab /SRR/Staff Engineer/삼성전자 <a.tischenko@partner.samsung.com>
Wed, 16 Jan 2019 11:41:14 +0000 (14:41 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Wed, 16 Jan 2019 11:41:14 +0000 (14:41 +0300)
Soft backend can't load int64 constant tensors that's why it was transformed in float one (reshape operation).

Signed-off-by: Andrew V. Tischenko <a.tischenko@partner.samsung.com>
contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h

index 732a93c..3eb544c 100644 (file)
@@ -111,16 +111,27 @@ mir::TensorVariant ONNXImporterImpl::createTensor(const onnx::TensorProto* tenso
     src_data = reinterpret_cast<const char*>(tensor->int32_data().data());
     mir::DTYPE type = mir::DTYPE::INT32;
   } else if (tensor->int64_data_size() != 0) {
-    // FIXME: we could lose the data here
-    type = mir::DTYPE::INT32;
-    element_size = sizeof(int32_t);
+//    // FIXME: we could lose the data here
+//    type = mir::DTYPE::INT32;
+//    element_size = sizeof(int32_t);
+//    buffer_size = tensor->int64_data_size() * element_size;
+//
+//    auto src_data64 = reinterpret_cast<const int64_t *>(tensor->int64_data().data());
+//    std::shared_ptr<char> shared_buffer (new char[buffer_size], std::default_delete<char[]>());
+//    auto dst_data = reinterpret_cast<int32_t *>(shared_buffer.get());
+//    for (int i = 0; i < tensor->int64_data_size(); i++) {
+//      dst_data[i] = (int32_t)src_data64 [i];
+//    }
+//    return mir::TensorVariant(shape, shared_buffer, type, element_size);
+    // FIXME: it's a hack to support mobilenet in soft backend
+    // (the above code works under interpreter but does not work in softbackend)
+    element_size = sizeof(float);
     buffer_size = tensor->int64_data_size() * element_size;
-
     auto src_data64 = reinterpret_cast<const int64_t *>(tensor->int64_data().data());
     std::shared_ptr<char> shared_buffer (new char[buffer_size], std::default_delete<char[]>());
-    auto dst_data = reinterpret_cast<int32_t *>(shared_buffer.get());
+    auto dst_data = reinterpret_cast<float *>(shared_buffer.get());
     for (int i = 0; i < tensor->int64_data_size(); i++) {
-      dst_data[i] = (int32_t)src_data64 [i];
+      dst_data[i] = static_cast<float>(src_data64 [i]);
     }
     return mir::TensorVariant(shape, shared_buffer, type, element_size);
   } else if (tensor->raw_data().size() != 0) {
index d660571..4d67f8a 100644 (file)
@@ -17,6 +17,7 @@
 #include <set>
 #include <cmath>
 #include <iostream>
+#include <core/modelIR/Scalar.h>
 #include "core/modelIR/Index.h"
 #include "core/modelIR/Graph.h"
 #include "core/modelIR/Scalar.h"
@@ -310,7 +311,7 @@ ONNXOpCreator::convertReshape(const std::vector<mir::IODescriptor>& inputs) {
   std::vector<int32_t > shape_vector(cnt);
   ShapeRange out_range(shape_tensor_shape);
   // FIXME: real type could be int64_t but _elementSize is correct that's why it works
-  Tensor<int32_t> tensor_accessor(shape_tensor);
+  Tensor<float> tensor_accessor(shape_tensor);
 
   int i = 0;
   for (auto idx : out_range) {
index c89f714..f5b405a 100644 (file)
@@ -55,7 +55,7 @@ public:
   convertConstant(const onnx::NodeProto& onnx_node,
                            InputTensors& input_tensors);
 
-    std::vector<mir::IODescriptor>
+  std::vector<mir::IODescriptor>
   convertPool(const std::vector<mir::IODescriptor>& inputs,
               ONNXOpCode op_code,
               const onnx::NodeProto& onnx_node);