src_data = reinterpret_cast<const char*>(tensor->int32_data().data());
mir::DTYPE type = mir::DTYPE::INT32;
} else if (tensor->int64_data_size() != 0) {
- // FIXME: we could lose the data here
- type = mir::DTYPE::INT32;
- element_size = sizeof(int32_t);
+// // FIXME: we could lose the data here
+// type = mir::DTYPE::INT32;
+// element_size = sizeof(int32_t);
+// buffer_size = tensor->int64_data_size() * element_size;
+//
+// auto src_data64 = reinterpret_cast<const int64_t *>(tensor->int64_data().data());
+// std::shared_ptr<char> shared_buffer (new char[buffer_size], std::default_delete<char[]>());
+// auto dst_data = reinterpret_cast<int32_t *>(shared_buffer.get());
+// for (int i = 0; i < tensor->int64_data_size(); i++) {
+// dst_data[i] = (int32_t)src_data64 [i];
+// }
+// return mir::TensorVariant(shape, shared_buffer, type, element_size);
+ // FIXME: it's a hack to support mobilenet in soft backend
+ // (the above code works under interpreter but does not work in softbackend)
+ element_size = sizeof(float);
buffer_size = tensor->int64_data_size() * element_size;
-
auto src_data64 = reinterpret_cast<const int64_t *>(tensor->int64_data().data());
std::shared_ptr<char> shared_buffer (new char[buffer_size], std::default_delete<char[]>());
- auto dst_data = reinterpret_cast<int32_t *>(shared_buffer.get());
+ auto dst_data = reinterpret_cast<float *>(shared_buffer.get());
for (int i = 0; i < tensor->int64_data_size(); i++) {
- dst_data[i] = (int32_t)src_data64 [i];
+ dst_data[i] = static_cast<float>(src_data64 [i]);
}
return mir::TensorVariant(shape, shared_buffer, type, element_size);
} else if (tensor->raw_data().size() != 0) {
#include <set>
#include <cmath>
#include <iostream>
+#include <core/modelIR/Scalar.h>
#include "core/modelIR/Index.h"
#include "core/modelIR/Graph.h"
#include "core/modelIR/Scalar.h"
std::vector<int32_t > shape_vector(cnt);
ShapeRange out_range(shape_tensor_shape);
// FIXME: real type could be int64_t but _elementSize is correct that's why it works
- Tensor<int32_t> tensor_accessor(shape_tensor);
+ Tensor<float> tensor_accessor(shape_tensor);
int i = 0;
for (auto idx : out_range) {