extern "C" int ml_single_invoke_fast(ml_single_h single, const ml_tensors_data_h input, ml_tensors_data_h output);
#endif
+// TODO. this is a temporary solution so it should be fixed with using ML_TENSOR_RANK_LIMIT instead.
+// As of now, ML_TENSOR_RANK_LIMIT is a fixed value of 16. Ideally, if we set input or output
+// tensor dimension to actual tensor dimension value using ml_tensors_info_set_tensor_dimension
+// function then we should get same dimension value using ml_tensors_info_get_tensor_dimension function.
+// However, as of now, ml_tensors_info_get_tensor_dimension function always says a fixed value of 16.
+#define MAX_TENSOR_DIMENSION_SIZE 4
+
namespace InferenceEngineImpl
{
namespace MLAPIImpl
}
LOGI("Input tensor dimension:");
- for (unsigned int shape_idx = 0; shape_idx < ML_TENSOR_RANK_LIMIT; ++shape_idx) {
+ for (unsigned int shape_idx = 0; shape_idx < MAX_TENSOR_DIMENSION_SIZE; ++shape_idx) {
tensor_info.shape.push_back(in_dim[shape_idx]);
in_size *= static_cast<size_t>(in_dim[shape_idx]);
LOGI("%u", in_dim[shape_idx]);
for (auto& output : mDesignated_outputs) {
inference_engine_tensor_info tensor_info;
ml_tensor_type_e out_type;
- unsigned int out_dim[ML_TENSOR_RANK_LIMIT];
+ unsigned int out_dim[MAX_TENSOR_DIMENSION_SIZE];
size_t out_size = 1;
ret = ml_tensors_info_get_tensor_type(mOutputInfoHandle, output.second, &out_type);
LOGI("Output tensor dimension:");
- for (unsigned int shape_idx = 0; shape_idx < ML_TENSOR_RANK_LIMIT; ++shape_idx) {
+ for (unsigned int shape_idx = 0; shape_idx < MAX_TENSOR_DIMENSION_SIZE; ++shape_idx) {
out_size *= static_cast<size_t>(out_dim[shape_idx]);
if (out_dim[shape_idx] == 1 && shape_size == 0)