From: Parichay Kapoor Date: Tue, 28 Jul 2020 07:23:08 +0000 (+0900) Subject: [tensor] Update GstTensorMemory coherent with ml_tensors_data_s X-Git-Tag: accepted/tizen/unified/20200803.122219~3 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=63d6fba3e441c7c514713c28f6309d8c013ccbee;p=platform%2Fupstream%2Fnnstreamer.git [tensor] Update GstTensorMemory coherent with ml_tensors_data_s One of the efforts to minimize API latency is to avoid creating data wrappers in API and internally in nnstreamer and avoid its copying. Rather just interpret the API data structure as internal definition. This requires internal and external structures to be coherent. This commit makes GstTensorMemory coherent with ml_tensor_data_s by removing `tensor_type type` from GstTensorMemory. type can be remove from GstTensorMemory as GstTensorMemory is used internally and its usage is always supported with GstTensorInfo which contains the type information. Correspondingly update the existing elements and extensions which use GstTensorMemory to use the GstTensorInfo type information. Signed-off-by: Parichay Kapoor --- diff --git a/api/capi/src/nnstreamer-capi-single.c b/api/capi/src/nnstreamer-capi-single.c index 6ba2cf5..a8dad83 100644 --- a/api/capi/src/nnstreamer-capi-single.c +++ b/api/capi/src/nnstreamer-capi-single.c @@ -128,7 +128,6 @@ __setup_in_out_tensors (ml_single * single_h) /** memory will be setup during invoke */ in_tensors[i].data = NULL; in_tensors[i].size = ml_tensor_info_get_size (&single_h->in_info.info[i]); - in_tensors[i].type = (tensor_type) single_h->in_info.info[i].type; } /** Setup output buffer */ @@ -136,7 +135,6 @@ __setup_in_out_tensors (ml_single * single_h) /** memory will be allocated by tensor_filter_single */ out_tensors[i].data = NULL; out_tensors[i].size = ml_tensor_info_get_size (&single_h->out_info.info[i]); - out_tensors[i].type = (tensor_type) single_h->out_info.info[i].type; } } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_nnfw.c b/ext/nnstreamer/tensor_filter/tensor_filter_nnfw.c index 0444f99..b7e362b 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_nnfw.c +++ b/ext/nnstreamer/tensor_filter/tensor_filter_nnfw.c @@ -596,19 +596,23 @@ nnfw_tensor_memory_set (const GstTensorFilterProperties * prop, int err = 0; guint idx; unsigned int num_tensors = 0; + const GstTensorsInfo *info; g_return_val_if_fail (prop != NULL, -EINVAL); g_return_val_if_fail (mem != NULL, -EINVAL); g_return_val_if_fail (pdata != NULL, -EINVAL); g_return_val_if_fail (pdata->session != NULL, -EPERM); - if (is_input) + if (is_input) { num_tensors = pdata->in_info.num_tensors; - else + info = &pdata->in_info; + } else { num_tensors = pdata->out_info.num_tensors; + info = &pdata->out_info; + } for (idx = 0; idx < num_tensors; idx++) { - err = nnfw_tensor_type_from_gst (mem[idx].type, &nnfw_type); + err = nnfw_tensor_type_from_gst (info->info[idx].type, &nnfw_type); if (err != 0) return err; diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc index f7dfc6f..946b6db 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.cc @@ -95,9 +95,9 @@ TensorFilterOpenvino::convertFromIETypeStr (std::string type) InferenceEngine::Blob::Ptr TensorFilterOpenvino::convertGstTensorMemoryToBlobPtr ( const InferenceEngine::TensorDesc tensorDesc, - const GstTensorMemory * gstTensor) + const GstTensorMemory * gstTensor, const tensor_type gstType) { - switch (gstTensor->type) { + switch (gstType) { case _NNS_UINT8: return InferenceEngine::Blob::Ptr ( new InferenceEngine::TBlob( @@ -428,7 +428,7 @@ TensorFilterOpenvino::invoke (const GstTensorFilterProperties * prop, for (i = 0; i < num_tensors; ++i) { const GstTensorInfo *info = &((prop->input_meta).info[i]); InferenceEngine::Blob::Ptr blob = convertGstTensorMemoryToBlobPtr ( - this->_inputTensorDescs[i], &(input[i])); + this->_inputTensorDescs[i], &(input[i]), prop->input_meta.info[i].type); if (blob == nullptr) { ml_loge ("Failed to create a blob for the input tensor: %u", i); return RetEInval; @@ -441,7 +441,8 @@ TensorFilterOpenvino::invoke (const GstTensorFilterProperties * prop, for (i = 0; i < num_tensors; ++i) { const GstTensorInfo *info = &((prop->output_meta).info[i]); InferenceEngine::Blob::Ptr blob = convertGstTensorMemoryToBlobPtr ( - this->_outputTensorDescs[i], &(output[i])); + this->_outputTensorDescs[i], &(output[i]), + prop->output_meta.info[i].type); outBlobMap.insert (make_pair (std::string(info->name), blob)); if (blob == nullptr) { ml_loge ("Failed to create a blob for the output tensor: %u", i); diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh index 17efb61..b57a9f7 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh +++ b/ext/nnstreamer/tensor_filter/tensor_filter_openvino.hh @@ -59,7 +59,7 @@ public: static tensor_type convertFromIETypeStr (std::string type); static InferenceEngine::Blob::Ptr convertGstTensorMemoryToBlobPtr ( const InferenceEngine::TensorDesc tensorDesc, - const GstTensorMemory * gstTensor); + const GstTensorMemory * gstTensor, const tensor_type gstType); static bool isAcclDevSupported (std::vector &devsVector, accl_hw hw); diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_python.cc b/ext/nnstreamer/tensor_filter/tensor_filter_python.cc index bfc5229..a90e4ed 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_python.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_python.cc @@ -125,7 +125,7 @@ public: PyObject* PyTensorShape_New (const GstTensorInfo *info); - int checkTensorType (GstTensorMemory *output, PyArrayObject *array); + int checkTensorType (int nns_type, int np_type); int checkTensorSize (GstTensorMemory *output, PyArrayObject *array); tensor_type getTensorType (NPY_TYPES npyType); @@ -357,19 +357,13 @@ PYCore::loadScript () /** * @brief check the data type of tensors in array - * @param output : tensor memory for output tensors - * @param array : python array + * @param nns_type : tensor type for output tensor + * @param np_type : python array type for output tensor * @return a boolean value for whether the types are matched */ int -PYCore::checkTensorType (GstTensorMemory *output, PyArrayObject *array) +PYCore::checkTensorType (int nns_type, int np_type) { - if (nullptr == output || nullptr == array) - throw std::invalid_argument ("Null pointers are given to PYCore::checkTensorType().\n"); - - int nns_type = output->type; - int np_type = PyArray_TYPE(array); - switch (nns_type) { case _NNS_INT64: return np_type == NPY_INT64; case _NNS_UINT64: return np_type == NPY_UINT64; @@ -631,9 +625,10 @@ PYCore::run (const GstTensorMemory * input, GstTensorMemory * output) PyObject *param = PyList_New(0); for (unsigned int i = 0; i < inputTensorMeta.num_tensors; i++) { /** create a Numpy array wrapper (1-D) for NNS tensor data */ - npy_intp input_dims[] = {(npy_intp) (input[i].size / gst_tensor_get_element_size (input[i].type))}; + tensor_type nns_type = inputTensorMeta.info[i].type; + npy_intp input_dims[] = {(npy_intp) (input[i].size / gst_tensor_get_element_size (nns_type))}; PyObject *input_array = PyArray_SimpleNewFromData( - 1, input_dims, getNumpyType(input[i].type), input[i].data); + 1, input_dims, getNumpyType(nns_type), input[i].data); PyList_Append(param, input_array); } @@ -649,7 +644,7 @@ PYCore::run (const GstTensorMemory * input, GstTensorMemory * output) for (unsigned int i = 0; i < outputTensorMeta.num_tensors; i++) { PyArrayObject* output_array = (PyArrayObject*) PyList_GetItem(result, (Py_ssize_t) i); /** type/size checking */ - if (checkTensorType(&output[i], output_array) && + if (checkTensorType(outputTensorMeta.info[i].type, PyArray_TYPE(output_array)) && checkTensorSize(&output[i], output_array)) { /** obtain the pointer to the buffer for the output array */ output[i].data = PyArray_DATA(output_array); diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc b/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc index 802687f..6dadc5d 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc @@ -393,8 +393,6 @@ TorchCore::processIValue (torch::jit::IValue value, GstTensorMemory * output) /** make the memory contiguous for direct access */ output_tensor = output_tensor.contiguous (); - output->type = getTensorTypeFromTorch (output_tensor.scalar_type ()); - /* validate output tensor once */ if (!configured && validateOutputTensor (output_tensor)) { ml_loge ("Output Tensor Information is not valid"); @@ -434,8 +432,8 @@ TorchCore::invoke (const GstTensorMemory * input, GstTensorMemory * output) input_shape.assign (&inputTensorMeta.info[i].dimension[0], &inputTensorMeta.info[i].dimension[0] + NNS_TENSOR_RANK_LIMIT); - if (!getTensorTypeToTorch (input[i].type, &type)) { - ml_loge ("This data type is not valid: %d", input[i].type); + if (!getTensorTypeToTorch (inputTensorMeta.info[i].type, &type)) { + ml_loge ("This data type is not valid: %d", inputTensorMeta.info[i].type); return -1; } at::TensorOptions options = torch::TensorOptions ().dtype (type); diff --git a/gst/nnstreamer/include/tensor_typedef.h b/gst/nnstreamer/include/tensor_typedef.h index bdee3d0..817c059 100644 --- a/gst/nnstreamer/include/tensor_typedef.h +++ b/gst/nnstreamer/include/tensor_typedef.h @@ -153,16 +153,17 @@ typedef uint32_t tensor_dim[NNS_TENSOR_RANK_LIMIT]; /** * @brief The unit of each data tensors. It will be used as an input/output tensor of other/tensors. + * @note This must be coherent with api/capi/include/nnstreamer-capi-private.h:ml_tensor_data_s */ typedef struct { - void *data; - size_t size; - tensor_type type; + void *data; /**< The instance of tensor data. */ + size_t size; /**< The size of tensor. */ } GstTensorMemory; /** * @brief Internal data structure for tensor info. + * @note This must be coherent with api/capi/include/nnstreamer-capi-private.h:ml_tensor_info_s */ typedef struct { @@ -175,6 +176,7 @@ typedef struct /** * @brief Internal meta data exchange format for a other/tensors instance + * @note This must be coherent with api/capi/include/nnstreamer-capi-private.h:ml_tensors_info_s */ typedef struct { diff --git a/gst/nnstreamer/tensor_decoder/tensordec.c b/gst/nnstreamer/tensor_decoder/tensordec.c index f3a9756..87f5524 100644 --- a/gst/nnstreamer/tensor_decoder/tensordec.c +++ b/gst/nnstreamer/tensor_decoder/tensordec.c @@ -700,7 +700,6 @@ gst_tensordec_transform (GstBaseTransform * trans, input[i].data = in_info[i].data; input[i].size = in_info[i].size; - input[i].type = self->tensor_config.info.info[i].type; } res = self->decoder->decode (&self->plugin_data, &self->tensor_config, diff --git a/gst/nnstreamer/tensor_filter/tensor_filter.c b/gst/nnstreamer/tensor_filter/tensor_filter.c index 2d07869..15648be 100644 --- a/gst/nnstreamer/tensor_filter/tensor_filter.c +++ b/gst/nnstreamer/tensor_filter/tensor_filter.c @@ -483,7 +483,6 @@ gst_tensor_filter_transform (GstBaseTransform * trans, in_tensors[i].data = in_info[i].data; in_tensors[i].size = in_info[i].size; - in_tensors[i].type = prop->input_meta.info[i].type; } /* 2. Prepare output tensors. */ @@ -493,7 +492,6 @@ gst_tensor_filter_transform (GstBaseTransform * trans, for (i = 0; i < prop->output_meta.num_tensors; i++) { out_tensors[i].data = NULL; out_tensors[i].size = gst_tensor_filter_get_output_size (self, i); - out_tensors[i].type = prop->output_meta.info[i].type; /* allocate memory if allocate_in_invoke is FALSE */ if (allocate_in_invoke == FALSE) { diff --git a/tests/nnstreamer_filter_armnn/unittest_filter_armnn.cc b/tests/nnstreamer_filter_armnn/unittest_filter_armnn.cc index 4554823..50423a4 100644 --- a/tests/nnstreamer_filter_armnn/unittest_filter_armnn.cc +++ b/tests/nnstreamer_filter_armnn/unittest_filter_armnn.cc @@ -188,7 +188,6 @@ TEST (nnstreamer_filter_armnn, invoke_00) const GstTensorFilterFramework *sp = nnstreamer_filter_find ("armnn"); EXPECT_NE (sp, (void *) NULL); - output.type = input.type = _NNS_FLOAT32; output.size = input.size = sizeof(float) * 1; input.data = g_malloc(input.size); @@ -301,7 +300,6 @@ TEST (nnstreamer_filter_armnn, invoke_advanced) EXPECT_EQ (res.info[0].dimension[2], info.info[0].dimension[2]); EXPECT_EQ (res.info[0].dimension[3], info.info[0].dimension[3]); - input.type = res.info[0].type; input.size = gst_tensor_info_get_size (&res.info[0]); ret = sp->getOutputDimension (&prop, &data, &res); @@ -321,7 +319,6 @@ TEST (nnstreamer_filter_armnn, invoke_advanced) EXPECT_EQ (res.info[0].dimension[2], info.info[0].dimension[2]); EXPECT_EQ (res.info[0].dimension[3], info.info[0].dimension[3]); - output.type = res.info[0].type; output.size = gst_tensor_info_get_size (&res.info[0]); input.data = g_malloc(input.size); @@ -400,8 +397,7 @@ TEST (nnstreamer_filter_armnn, invoke_01) &input_uint8_size, NULL)); /** Convert the data from uint8 to float */ - input.type = _NNS_FLOAT32; - input.size = input_uint8_size * gst_tensor_get_element_size (input.type); + input.size = input_uint8_size * gst_tensor_get_element_size (_NNS_FLOAT32); input.data = g_malloc (input.size); for (gsize idx=0; idx < input_uint8_size; idx ++) { ((float *) input.data)[idx] = @@ -413,8 +409,7 @@ TEST (nnstreamer_filter_armnn, invoke_01) const GstTensorFilterFramework *sp = nnstreamer_filter_find ("armnn"); EXPECT_NE (sp, (void *) NULL); - output.type = _NNS_FLOAT32; - output.size = gst_tensor_get_element_size (output.type) * num_labels; + output.size = gst_tensor_get_element_size (_NNS_FLOAT32) * num_labels; output.data = g_malloc(output.size); ret = sp->open (&prop, &data); diff --git a/tests/nnstreamer_filter_extensions_common/unittest_tizen_template.cc.in b/tests/nnstreamer_filter_extensions_common/unittest_tizen_template.cc.in index b8f70a0..ed8e5a3 100644 --- a/tests/nnstreamer_filter_extensions_common/unittest_tizen_template.cc.in +++ b/tests/nnstreamer_filter_extensions_common/unittest_tizen_template.cc.in @@ -394,8 +394,7 @@ TEST (nnstreamer_@EXT_ABBRV@_basic_functions, invoke_fail_n) ret = sp->open (&prop, &data); EXPECT_EQ (ret, 0); - input[0].type = output[0].type = _NNS_FLOAT32; - input[0].size = output[0].size = 1 * gst_tensor_get_element_size (output[0].type); + input[0].size = output[0].size = 1 * gst_tensor_get_element_size (_NNS_FLOAT32); input[0].data = g_malloc (input[0].size); output[0].data = g_malloc (output[0].size); @@ -491,13 +490,11 @@ TEST (nnstreamer_@EXT_ABBRV@_basic_functions, invoke) for (i = 0; i < input_info.num_tensors; ++i) { input[i].size = gst_tensor_info_get_size (&input_info.info[i]); - input[i].type = input_info.info[i].type; input[i].data = g_malloc (input[i].size); } for (i = 0; i < output_info.num_tensors; ++i) { output[i].size = gst_tensor_info_get_size (&output_info.info[i]); - output[i].type = output_info.info[i].type; output[i].data = g_malloc (output[i].size); } @@ -558,8 +555,7 @@ TEST (nnstreamer_@EXT_ABBRV@_basic_functions, reload_model) ret = sp->reloadModel (&prop, &data); EXPECT_EQ (ret, 0); - output.type = input.type = _NNS_FLOAT32; - output.size = input.size = gst_tensor_get_element_size (input.type) * 10; + output.size = input.size = gst_tensor_get_element_size (_NNS_FLOAT32) * 10; input.data = g_malloc(input.size); output.data = g_malloc(output.size); diff --git a/tests/nnstreamer_filter_openvino/unittest_openvino.cc b/tests/nnstreamer_filter_openvino/unittest_openvino.cc index a7f7810..ff08173 100644 --- a/tests/nnstreamer_filter_openvino/unittest_openvino.cc +++ b/tests/nnstreamer_filter_openvino/unittest_openvino.cc @@ -1089,8 +1089,7 @@ TEST (tensor_filter_openvino, convertFromIETypeStr_1_n) InferenceEngine::Blob::Ptr ret; \ GstTensorMemory mem; \ \ - mem.type = nns_type; \ - mem.size = gst_tensor_get_element_size (mem.type); \ + mem.size = gst_tensor_get_element_size (nns_type); \ for (int i = 0; i < NNS_TENSOR_RANK_LIMIT; ++i) { \ dims[i] = MOBINET_V2_IN_DIMS[i]; \ mem.size *= MOBINET_V2_IN_DIMS[i]; \ @@ -1098,9 +1097,9 @@ TEST (tensor_filter_openvino, convertFromIETypeStr_1_n) tensorTestDesc.setDims (dims); \ mem.data = (void *) g_malloc0 (mem.size); \ \ - ret = tfOvTest.convertGstTensorMemoryToBlobPtr (tensorTestDesc, &mem); \ + ret = tfOvTest.convertGstTensorMemoryToBlobPtr (tensorTestDesc, &mem, nns_type); \ EXPECT_EQ (mem.size, ret->byteSize ()); \ - EXPECT_EQ (gst_tensor_get_element_size (mem.type), ret->element_size ()); \ + EXPECT_EQ (gst_tensor_get_element_size (nns_type), ret->element_size ()); \ g_free (mem.data); \ } while (0); diff --git a/tests/tizen_nnfw_runtime/unittest_tizen_nnfw_runtime_raw.cc b/tests/tizen_nnfw_runtime/unittest_tizen_nnfw_runtime_raw.cc index 8f9284a..e9cd699 100644 --- a/tests/tizen_nnfw_runtime/unittest_tizen_nnfw_runtime_raw.cc +++ b/tests/tizen_nnfw_runtime/unittest_tizen_nnfw_runtime_raw.cc @@ -198,9 +198,6 @@ TEST (nnstreamer_nnfw_runtime_raw_functions, DISABLED_set_dimension) input.size = gst_tensor_info_get_size (&in_info.info[0]); output.size = gst_tensor_info_get_size (&out_info.info[0]); - input.type = in_info.info[0].type; - output.type = out_info.info[0].type; - input.data = g_malloc (input.size); output.data = g_malloc (output.size); @@ -246,7 +243,6 @@ TEST (nnstreamer_nnfw_runtime_raw_functions, invoke) const GstTensorFilterFramework *sp = nnstreamer_filter_find ("nnfw"); EXPECT_NE (sp, (void *) NULL); - output.type = input.type = _NNS_FLOAT32; output.size = input.size = sizeof (float) * 1; input.data = g_malloc (input.size); @@ -374,7 +370,6 @@ TEST (nnstreamer_nnfw_runtime_raw_functions, DISABLED_invoke_advanced) EXPECT_EQ (res.info[0].dimension[2], info.info[0].dimension[2]); EXPECT_EQ (res.info[0].dimension[3], info.info[0].dimension[3]); - input.type = res.info[0].type; input.size = gst_tensor_info_get_size (&res.info[0]); ret = sp->getOutputDimension (&prop, &data, &res); @@ -394,7 +389,6 @@ TEST (nnstreamer_nnfw_runtime_raw_functions, DISABLED_invoke_advanced) EXPECT_EQ (res.info[0].dimension[2], info.info[0].dimension[2]); EXPECT_EQ (res.info[0].dimension[3], info.info[0].dimension[3]); - output.type = res.info[0].type; output.size = gst_tensor_info_get_size (&res.info[0]); input.data = NULL;