From 388f1900ab3083e6119b9c350e8b293bfe2bb538 Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Thu, 5 Jul 2018 16:51:19 +0900 Subject: [PATCH] [Filter] Change vmethod prototype to return outptr. To prepare subplugin output pointer allocation (#231), change the vmethod (invoke_NN) prototype. This prepares #231. Signed-off-by: MyungJoo Ham --- gst/tensor_filter/tensor_filter.c | 43 ++++++++++++----------- gst/tensor_filter/tensor_filter.h | 8 +++-- gst/tensor_filter/tensor_filter_custom.c | 13 +++---- gst/tensor_filter/tensor_filter_tensorflow_lite.c | 16 ++++----- 4 files changed, 40 insertions(+), 40 deletions(-) diff --git a/gst/tensor_filter/tensor_filter.c b/gst/tensor_filter/tensor_filter.c index 1752a2c..88c589f 100644 --- a/gst/tensor_filter/tensor_filter.c +++ b/gst/tensor_filter/tensor_filter.c @@ -291,19 +291,15 @@ gst_tensor_filter_init (GstTensor_Filter * filter) /** * @brief Invoke callbacks of filter->prop.fw. Gurantees calling open for the first call. */ -#define gst_tensor_filter_call(filter, funcname, ...) ({ \ - int __ret = 0; \ - do { \ +#define gst_tensor_filter_call(filter, ret, funcname, ...) do { \ if (filter->prop.fwOpened == FALSE) { \ if (filter->prop.fw->open != NULL) \ filter->prop.fw->open(filter, &filter->privateData); \ filter->prop.fwOpened = TRUE; \ } \ g_assert(filter->prop.fwClosed != TRUE); \ - __ret = filter->prop.fw->funcname(filter, &filter->privateData, __VA_ARGS__); \ - } while(0); \ - __ret; \ -}) + ret = filter->prop.fw->funcname(filter, &filter->privateData, __VA_ARGS__); \ + } while(0) /* @TODO Call this where appropriate */ #define gst_tensor_filter_close(filter) \ @@ -449,11 +445,11 @@ gst_tensor_filter_fix_caps (GstTensor_Filter * filter, gboolean isInput, /* 3-1-1. Try get output dim for srcpad */ if (prop->fw->getOutputDimension) { - ret = gst_tensor_filter_call (filter, getOutputDimension, rdim, &rtype); + gst_tensor_filter_call (filter, ret, getOutputDimension, rdim, &rtype); g_assert (ret == 0); } else if (configured == _TFC_ALL) { g_assert (prop->fw->setInputDimension); - ret = gst_tensor_filter_call (filter, setInputDimension, dimension, _type, + gst_tensor_filter_call (filter, ret, setInputDimension, dimension, _type, rdim, &rtype); } else { /* We do not have enough info for dimension */ @@ -481,7 +477,7 @@ gst_tensor_filter_fix_caps (GstTensor_Filter * filter, gboolean isInput, /* 3-1-1. Try get output dim for srcpad */ if (prop->fw->getInputDimension) { - ret = gst_tensor_filter_call (filter, getInputDimension, rdim, &rtype); + gst_tensor_filter_call (filter, ret, getInputDimension, rdim, &rtype); g_assert (ret == 0); } else { /* We do not have output->input dimension conversion. */ @@ -538,7 +534,7 @@ gst_tensor_filter_check_consistency_fw (GstTensor_Filter * filter, return TRUE; /* Nothing to check. FW is not configured, yet */ if (checkInput == TRUE && fw->getInputDimension != NULL) { - ret = gst_tensor_filter_call (filter, getInputDimension, dim, &type); + gst_tensor_filter_call (filter, ret, getInputDimension, dim, &type); if (ret) { debug_print (TRUE, "getInputDimension failed (%d). But we can continue with it.\n", ret); @@ -554,7 +550,7 @@ gst_tensor_filter_check_consistency_fw (GstTensor_Filter * filter, } if (checkOutput == TRUE && fw->getOutputDimension != NULL) { - ret = gst_tensor_filter_call (filter, getOutputDimension, dim, &type); + gst_tensor_filter_call (filter, ret, getOutputDimension, dim, &type); if (ret) { debug_print (TRUE, "getOutputDimension failed (%d). But we can continue with it.\n", @@ -774,9 +770,9 @@ gst_tensor_filter_transform (GstBaseTransform * trans, GstBuffer * inbuf, GstBuffer * outbuf) { GstTensor_Filter *filter = GST_TENSOR_FILTER_CAST (trans); - int ret; size_t outBufSize; uint8_t *inptr, *outptr; + uint8_t *retoutptr; GstMapInfo inInfo, outInfo; if (G_UNLIKELY (filter->prop.inputCapNegotiated == FALSE @@ -796,6 +792,9 @@ gst_tensor_filter_transform (GstBaseTransform * trans, g_assert ((filter->prop.inputConfigured & _TFC_ALL) == _TFC_ALL && (filter->prop.outputConfigured & _TFC_ALL) == _TFC_ALL); + /* allocate_in_invoke is NYI */ + g_assert (filter->prop.fw->allocate_in_invoke == FALSE); + /* 1. Allocate outbuf */ g_assert (outbuf); outBufSize = tensor_element_size[filter->prop.outputType] * @@ -814,14 +813,13 @@ gst_tensor_filter_transform (GstBaseTransform * trans, inptr = inInfo.data; outptr = outInfo.data; - ret = gst_tensor_filter_call (filter, invoke_NN, inptr, outptr); + gst_tensor_filter_call (filter, retoutptr, invoke_NN, inptr, outptr); + g_assert (outptr == retoutptr); gst_buffer_unmap (inbuf, &inInfo); gst_buffer_unmap (outbuf, &outInfo); /* 3. Return result! */ - if (ret) - return GST_FLOW_ERROR; return GST_FLOW_OK; unknown_format: GST_ELEMENT_ERROR (filter, CORE, NOT_IMPLEMENTED, (NULL), ("unknown format")); @@ -881,7 +879,7 @@ gst_tensor_filter_property_process (GstTensor_Filter * filter) if (fw->getInputDimension != NULL) { g_assert (fw->getOutputDimension != NULL); - ret = gst_tensor_filter_call (filter, getInputDimension, dim, &type); + gst_tensor_filter_call (filter, ret, getInputDimension, dim, &type); if (ret) { err_print ("getInputDimension() returns %d. Cannot proceed.\n", ret); return -1; @@ -896,7 +894,7 @@ gst_tensor_filter_property_process (GstTensor_Filter * filter) prop->inputConfigured |= _TFC_DIMENSION; } - ret = gst_tensor_filter_call (filter, getOutputDimension, dim, &type); + gst_tensor_filter_call (filter, ret, getOutputDimension, dim, &type); if (ret) { err_print ("getOutputDimension() returns %d. Cannot proceed.\n", ret); return -1; @@ -1100,7 +1098,8 @@ gst_tensor_filter_fixate_caps (GstBaseTransform * trans, /* Before moving on, use if getInputDim/getOutputDim is available. */ if (fw->getInputDimension && (obj->prop.inputConfigured & _TFC_ALL) == _TFC_ALL) { - int ret = gst_tensor_filter_call (obj, getInputDimension, + int ret = 0; + gst_tensor_filter_call (obj, ret, getInputDimension, obj->prop.inputDimension, &obj->prop.inputType); if (ret == 0) { obj->prop.inputConfigured |= _TFC_ALL; @@ -1108,7 +1107,8 @@ gst_tensor_filter_fixate_caps (GstBaseTransform * trans, } if (fw->getOutputDimension && (obj->prop.outputConfigured & _TFC_ALL) == _TFC_ALL) { - int ret = gst_tensor_filter_call (obj, getOutputDimension, + int ret = 0; + gst_tensor_filter_call (obj, ret, getOutputDimension, obj->prop.outputDimension, &obj->prop.outputType); if (ret == 0) { obj->prop.outputConfigured |= _TFC_ALL; @@ -1128,7 +1128,8 @@ gst_tensor_filter_fixate_caps (GstBaseTransform * trans, if ((obj->prop.inputConfigured & _TFC_ALL) == _TFC_ALL) { if (fw->setInputDimension) { - int ret = gst_tensor_filter_call (obj, setInputDimension, + int ret = 0; + gst_tensor_filter_call (obj, ret, setInputDimension, obj->prop.inputDimension, obj->prop.inputType, obj->prop.outputDimension, &obj->prop.outputType); obj->prop.outputConfigured |= _TFC_ALL; diff --git a/gst/tensor_filter/tensor_filter.h b/gst/tensor_filter/tensor_filter.h index d8db5b9..2f40281 100644 --- a/gst/tensor_filter/tensor_filter.h +++ b/gst/tensor_filter/tensor_filter.h @@ -146,14 +146,16 @@ struct _GstTensor_Filter_Framework { gchar *name; /**< Name of the neural network framework, searchable by FRAMEWORK property */ gboolean allow_in_place; /**< TRUE if InPlace transfer of input-to-output is allowed. Not supported in main, yet */ - int (*invoke_NN)(const GstTensor_Filter *filter, void **private_data, const uint8_t *inputptr, uint8_t *outputptr); + gboolean allocate_in_invoke; /**< TRUE if invoke_NN is going to allocate outputptr by itself and return the address via outputptr. NYI. */ + + uint8_t *(*invoke_NN)(const GstTensor_Filter *filter, void **private_data, const uint8_t *inputptr, uint8_t *outputptr); /**< Mandatory callback. Invoke the given network model. * * @param[in] filter "this" pointer. Use this to read property values * @param[in/out] private_data A subplugin may save its internal private data here. The subplugin is responsible for alloc/free of this pointer. * @param[in] inputptr Input tensor. Allocated and filled by tensor_filter/main - * @param[out] outputptr Output tensor. Allocated by tensor_filter/main and to be filled by invoke_NN. - * @return 0 if OK. non-zero if error. + * @param[out] outputptr Output tensor. Allocated by tensor_filter/main and to be filled by invoke_NN. N/C if allocate_in_invoke is TRUE. + * @return outputptr if allocate_in_invoke = 00 if OK. non-zero if error. */ int (*getInputDimension)(const GstTensor_Filter *filter, void **private_data, tensor_dim inputDimension, tensor_type *type); diff --git a/gst/tensor_filter/tensor_filter_custom.c b/gst/tensor_filter/tensor_filter_custom.c index ac1642d..828e733 100644 --- a/gst/tensor_filter/tensor_filter_custom.c +++ b/gst/tensor_filter/tensor_filter_custom.c @@ -149,7 +149,7 @@ custom_open (const GstTensor_Filter * filter, void **private_data) * @param[in] inptr The input tensor * @param[out] outptr The output tensor */ -static int +static uint8_t * custom_invoke (const GstTensor_Filter * filter, void **private_data, const uint8_t * inptr, uint8_t * outptr) { @@ -158,15 +158,15 @@ custom_invoke (const GstTensor_Filter * filter, void **private_data, /* Actually, tensor_filter must have called getInput/OotputDim first. */ g_assert (retval == 1); - - if (retval < 0) - return retval; - g_assert (filter->privateData && *private_data == filter->privateData); ptr = *private_data; - return ptr->methods->invoke (ptr->customFW_private_data, &(filter->prop), + retval = ptr->methods->invoke (ptr->customFW_private_data, &(filter->prop), inptr, outptr); + if (retval == 0) + return outptr; + else + return NULL; } /** @@ -244,6 +244,7 @@ custom_close (const GstTensor_Filter * filter, void **private_data) GstTensor_Filter_Framework NNS_support_custom = { .name = "custom", .allow_in_place = FALSE, /* custom cannot support in-place (outptr == inptr). */ + .allocate_in_invoke = FALSE, /* Let tensor_flow allocate output buffers */ .invoke_NN = custom_invoke, /* We need to disable getI/O-dim or setI-dim with the first call */ diff --git a/gst/tensor_filter/tensor_filter_tensorflow_lite.c b/gst/tensor_filter/tensor_filter_tensorflow_lite.c index 65931ef..703cdcb 100644 --- a/gst/tensor_filter/tensor_filter_tensorflow_lite.c +++ b/gst/tensor_filter/tensor_filter_tensorflow_lite.c @@ -86,15 +86,14 @@ tflite_open (const GstTensor_Filter * filter, void **private_data) /** * @brief The mandatory callback for GstTensor_Filter_Framework */ -static int +static uint8_t * tflite_invoke (const GstTensor_Filter * filter, void **private_data, const uint8_t * inptr, uint8_t * outptr) { - int retval = tflite_loadModelFile (filter, private_data); /* @TODO fill in *outputDimension (uint32_t[MAX_RANK]), *type */ /* @TODO call tflite core apis */ - return retval; /* NYI */ + return outptr; /* NYI */ } /** @@ -104,11 +103,10 @@ static int tflite_getInputDim (const GstTensor_Filter * filter, void **private_data, tensor_dim inputDimension, tensor_type * type) { - int retval = tflite_loadModelFile (filter, private_data); /* @TODO fill in *inputDimension (uint32_t[MAX_RANK]), *type */ /* @TODO call tflite core api "tflite_getInputDim" */ - return retval; /* NYI */ + return 0; /* NYI */ } /** @@ -118,11 +116,10 @@ static int tflite_getOutputDim (const GstTensor_Filter * filter, void **private_data, tensor_dim outputDimension, tensor_type * type) { - int retval = tflite_loadModelFile (filter, private_data); /* @TODO fill in *outputDimension (uint32_t[MAX_RANK]), *type */ /* @TODO call tflite core api "tflite_getOutputDim" */ - return retval; /* NYI */ + return 0; /* NYI */ } /** @@ -133,10 +130,8 @@ tflite_setInputDim (const GstTensor_Filter * filter, void **private_data, const tensor_dim iDimension, const tensor_type iType, tensor_dim oDimension, tensor_type * oType) { - - int retval = tflite_loadModelFile (filter, private_data); /* @TODO call tflite core apis */ - return retval; /* NYI */ + return 0; /* NYI */ } /** @@ -152,6 +147,7 @@ tflite_close (const GstTensor_Filter * filter, void **private_data) GstTensor_Filter_Framework NNS_support_tensorflow_lite = { .name = "tensorflow-lite", .allow_in_place = FALSE, /* Let's not do this yet. @TODO: support this to optimize performance later. */ + .allocate_in_invoke = FALSE, /* TFLite may need to use TRUE in the future. However, it is not supported, yet */ .invoke_NN = tflite_invoke, .getInputDimension = tflite_getInputDim, .getOutputDimension = tflite_getOutputDim, -- 2.7.4