* instantiate pads and add them to element
* set pad calback functions
* initialize instance structure
+ * @todo change the first index [0] of input/output Dimension & Type to loop for multi tensors
*/
static void
gst_tensor_filter_init (GstTensor_Filter * filter)
prop->outputConfigured = _TFC_INIT;
prop->modelFilename = NULL;
- prop->inputDimension[0] = 1; /* innermost */
- prop->inputDimension[1] = 1;
- prop->inputDimension[2] = 1;
- prop->inputDimension[3] = 1; /* out */
- prop->inputType = _NNS_END; /* not initialized */
+ prop->inputDimension[0][0] = 1; /* innermost */
+ prop->inputDimension[0][1] = 1;
+ prop->inputDimension[0][2] = 1;
+ prop->inputDimension[0][3] = 1; /* out */
+ prop->inputType[0] = _NNS_END; /* not initialized */
prop->inputCapNegotiated = FALSE;
- prop->outputDimension[0] = 1; /* innermost */
- prop->outputDimension[1] = 1;
- prop->outputDimension[2] = 1;
- prop->outputDimension[3] = 1; /* out */
- prop->outputType = _NNS_END; /* not initialized */
+ prop->outputDimension[0][0] = 1; /* innermost */
+ prop->outputDimension[0][1] = 1;
+ prop->outputDimension[0][2] = 1;
+ prop->outputDimension[0][3] = 1; /* out */
+ prop->outputType[0] = _NNS_END; /* not initialized */
prop->outputCapNegotiated = FALSE;
prop->customProperties = NULL;
GstCaps * fromCaps)
{
tensor_type *type = NULL, _type;
- uint32_t *dimension;
- tensor_dim dim;
+ uint32_t *dimension[NNS_TENSOR_SIZE_LIMIT];
+ tensor_dim dim[NNS_TENSOR_SIZE_LIMIT];
GstTensor_Filter_CheckStatus configured = _TFC_INIT;
GstTensor_Filter_Properties *prop = &filter->prop;
GstCaps *tmp = NULL, *tmp2 = NULL, *staticcap = NULL, *resultCaps = NULL;
staticcap = gst_static_caps_get (&rawcap);
if (isInput == TRUE) {
- type = &(prop->inputType);
- dimension = prop->inputDimension;
+ type = prop->inputType;
+ int i;
+ for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) {
+ dimension[i] = prop->inputDimension[i];
+ }
configured = prop->inputConfigured & _TFC_ALL;
} else {
- type = &(prop->outputType);
- dimension = prop->outputDimension;
+ type = prop->outputType;
+ int i;
+ for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) {
+ dimension[i] = prop->outputDimension[i];
+ }
configured = prop->outputConfigured & _TFC_ALL;
}
/* 2. configure caps based on type & dimension */
if (configured == _TFC_ALL) {
- rank = gst_tensor_filter_get_rank (dimension);
+ rank = gst_tensor_filter_get_rank (dimension[0]);
tmp2 =
gst_caps_new_simple ("other/tensor", "rank", G_TYPE_INT, rank, "type",
- G_TYPE_STRING, tensor_element_typename[*type], "dim1", G_TYPE_INT,
- dimension[0], "dim2", G_TYPE_INT, dimension[1], "dim3", G_TYPE_INT,
- dimension[2], "dim4", G_TYPE_INT, dimension[3], NULL);
+ G_TYPE_STRING, tensor_element_typename[type[0]], "dim1", G_TYPE_INT,
+ dimension[0][0], "dim2", G_TYPE_INT, dimension[0][1], "dim3",
+ G_TYPE_INT, dimension[0][2], "dim4", G_TYPE_INT, dimension[0][3], NULL);
tmp = gst_caps_intersect_full (staticcap, tmp2, GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (tmp2);
} else if (configured == _TFC_DIMENSION) {
- rank = gst_tensor_filter_get_rank (dimension);
+ rank = gst_tensor_filter_get_rank (dimension[0]);
tmp2 =
gst_caps_new_simple ("other/tensor", "rank", G_TYPE_INT, rank, "dim1",
- G_TYPE_INT, dimension[0], "dim2", G_TYPE_INT, dimension[1], "dim3",
- G_TYPE_INT, dimension[2], "dim4", G_TYPE_INT, dimension[3], NULL);
+ G_TYPE_INT, dimension[0][0], "dim2", G_TYPE_INT, dimension[0][1],
+ "dim3", G_TYPE_INT, dimension[0][2], "dim4", G_TYPE_INT,
+ dimension[0][3], NULL);
tmp = gst_caps_intersect_full (staticcap, tmp2, GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (tmp2);
} else if (configured == _TFC_TYPE) {
tmp2 =
gst_caps_new_simple ("other/tensor", "type", G_TYPE_STRING,
- tensor_element_typename[*type], NULL);
+ tensor_element_typename[type[0]], NULL);
tmp = gst_caps_intersect_full (staticcap, tmp2, GST_CAPS_INTERSECT_FIRST);
gst_caps_unref (tmp2);
} else {
}
/* 2-2. Extract effective dim info from tmp */
- dimension = dim;
- configured = gst_tensor_filter_generate_dim_from_cap (tmp, dimension, &_type);
+ dimension[0] = dim[0];
+ configured =
+ gst_tensor_filter_generate_dim_from_cap (tmp, dimension[0], &_type);
configured &= _TFC_ALL;
/* tmp is no more needed */
gst_caps_unref (tmp);
gst_tensor_filter_call (filter, ret, getOutputDimension, rdim, &rtype);
/* 3-1-1-a. If inputdim is available but outputdim is not available */
if (ret != 0 && configured == _TFC_ALL && prop->fw->setInputDimension) {
- gst_tensor_filter_call (filter, ret, setInputDimension, dimension, _type,
- rdim, &rtype);
+ gst_tensor_filter_call (filter, ret, setInputDimension, dimension[0],
+ _type, rdim, &rtype);
}
/* if ret == 0, either get or set has been successful. */
if (ret != 0) {
/* Once configures, it cannot be changed in runtime */
{
int rank = get_tensor_dimension (g_value_get_string (value),
- prop->inputDimension);
+ prop->inputDimension[0]);
g_assert (rank > 0 && rank <= NNS_TENSOR_RANK_LIMIT);
prop->inputConfigured |= _TFC_DIMENSION;
silent_debug ("Input Prop: %d:%d:%d:%d Rank %d\n",
- prop->inputDimension[0], prop->inputDimension[1],
- prop->inputDimension[2], prop->inputDimension[3], rank);
+ prop->inputDimension[0][0], prop->inputDimension[0][1],
+ prop->inputDimension[0][2], prop->inputDimension[0][3], rank);
}
break;
case PROP_OUTPUT:
/* Once configures, it cannot be changed in runtime */
{
int rank = get_tensor_dimension (g_value_get_string (value),
- prop->outputDimension);
+ prop->outputDimension[0]);
g_assert (rank > 0 && rank <= NNS_TENSOR_RANK_LIMIT);
prop->outputConfigured |= _TFC_DIMENSION;
silent_debug ("Output Prop: %d:%d:%d:%d Rank %d\n",
- prop->outputDimension[0], prop->outputDimension[1],
- prop->outputDimension[2], prop->outputDimension[3], rank);
+ prop->outputDimension[0][0], prop->outputDimension[0][1],
+ prop->outputDimension[0][2], prop->outputDimension[0][3], rank);
}
break;
case PROP_INPUTTYPE:
- g_assert (prop->inputType == _NNS_END && value);
+ g_assert (prop->inputType[0] == _NNS_END && value);
/* Once configures, it cannot be changed in runtime */
- prop->inputType = get_tensor_type (g_value_get_string (value));
+ prop->inputType[0] = get_tensor_type (g_value_get_string (value));
prop->inputConfigured |= _TFC_TYPE;
- g_assert (prop->inputType != _NNS_END);
+ g_assert (prop->inputType[0] != _NNS_END);
break;
case PROP_OUTPUTTYPE:
- g_assert (prop->outputType == _NNS_END && value);
+ g_assert (prop->outputType[0] == _NNS_END && value);
/* Once configures, it cannot be changed in runtime */
- prop->outputType = get_tensor_type (g_value_get_string (value));
+ prop->outputType[0] = get_tensor_type (g_value_get_string (value));
prop->outputConfigured |= _TFC_TYPE;
- g_assert (prop->outputType != _NNS_END);
+ g_assert (prop->outputType[0] != _NNS_END);
break;
case PROP_CUSTOM:
g_assert (prop->customProperties == NULL && value);
g_array_sized_new (FALSE, FALSE, 4, NNS_TENSOR_RANK_LIMIT);
int i;
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- g_array_append_val (input, filter->prop.inputDimension[i]);
+ g_array_append_val (input, filter->prop.inputDimension[0][i]);
g_value_take_boxed (value, input);
/* take function hands the object over from here so that we don't need to free it. */
}
g_array_sized_new (FALSE, FALSE, 4, NNS_TENSOR_RANK_LIMIT);
int i;
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- g_array_append_val (output, filter->prop.outputDimension[i]);
+ g_array_append_val (output, filter->prop.outputDimension[0][i]);
g_value_take_boxed (value, output);
/* take function hands the object over from here so that we don't need to free it. */
}
break;
case PROP_INPUTTYPE:
g_value_set_string (value,
- tensor_element_typename[filter->prop.inputType]);
+ tensor_element_typename[filter->prop.inputType[0]]);
break;
case PROP_OUTPUTTYPE:
g_value_set_string (value,
- tensor_element_typename[filter->prop.outputType]);
+ tensor_element_typename[filter->prop.outputType[0]]);
break;
case PROP_CUSTOM:
g_value_set_string (value, filter->prop.customProperties);
g_assert (outbuf);
if (filter->prop.fw->allocate_in_invoke == FALSE) {
- outBufSize = tensor_element_size[filter->prop.outputType] *
- get_tensor_element_count (filter->prop.outputDimension);
+ outBufSize = tensor_element_size[filter->prop.outputType[0]] *
+ get_tensor_element_count (filter->prop.outputDimension[0]);
if (gst_buffer_get_size (outbuf) < outBufSize) {
/** @todo: write a routine to say aloud when this happens */
gst_buffer_set_size (outbuf, outBufSize);
/** @todo Performance: cache get_tensor_element_count * tensor_element_size */
mem = gst_memory_new_wrapped (0, retoutptr,
- get_tensor_element_count (filter->prop.outputDimension) *
- tensor_element_size[filter->prop.outputType],
+ get_tensor_element_count (filter->prop.outputDimension[0]) *
+ tensor_element_size[filter->prop.outputType[0]],
0,
- get_tensor_element_count (filter->prop.outputDimension) *
- tensor_element_size[filter->prop.outputType], NULL, NULL);
+ get_tensor_element_count (filter->prop.outputDimension[0]) *
+ tensor_element_size[filter->prop.outputType[0]], NULL, NULL);
gst_buffer_insert_memory (outbuf, -1, mem);
}
gst_tensor_filter_call (filter, ret, getInputDimension, dim, &type);
if (ret == 0) {
if (prop->inputConfigured & _TFC_TYPE)
- if (prop->inputType != type)
+ if (prop->inputType[0] != type)
return -1;
if (prop->inputConfigured & _TFC_DIMENSION)
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- if (prop->inputDimension[i] != dim[i])
+ if (prop->inputDimension[0][i] != dim[i])
return -1;
if (fixate && !(prop->inputConfigured & _TFC_TYPE)) {
- prop->inputType = type;
+ prop->inputType[0] = type;
prop->inputConfigured |= _TFC_TYPE;
}
if (fixate && !(prop->inputConfigured & _TFC_DIMENSION)) {
- memcpy (prop->inputDimension, dim, sizeof (dim));
+ memcpy (prop->inputDimension[0], dim, sizeof (dim));
prop->inputConfigured |= _TFC_DIMENSION;
}
}
gst_tensor_filter_call (filter, ret, getOutputDimension, dim, &type);
if (ret == 0) {
if (prop->outputConfigured & _TFC_TYPE)
- if (prop->outputType != type)
+ if (prop->outputType[0] != type)
return -1;
if (prop->outputConfigured & _TFC_DIMENSION)
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- if (prop->outputDimension[i] != dim[i])
+ if (prop->outputDimension[0][i] != dim[i])
return -1;
if (fixate && !(prop->outputConfigured & _TFC_TYPE)) {
- prop->outputType = type;
+ prop->outputType[0] = type;
prop->outputConfigured |= _TFC_TYPE;
}
if (fixate && !(prop->outputConfigured & _TFC_DIMENSION)) {
- memcpy (prop->outputDimension, dim, sizeof (dim));
+ memcpy (prop->outputDimension[0], dim, sizeof (dim));
prop->outputConfigured |= _TFC_DIMENSION;
}
}
tensor_type itype, *cmptype;
/* If filter's inputdimension is not clear, yet, we cannot proceed. try again later */
if ((prop->inputConfigured & _TFC_ALL) == _TFC_ALL) {
- cmpdim = &(prop->inputDimension);
- cmptype = &(prop->inputType);
+ cmpdim = &(prop->inputDimension[0]);
+ cmptype = &(prop->inputType[0]);
} else {
if (fw->getInputDimension != NULL) {
gst_tensor_filter_call (filter, ret, getInputDimension, idim, &itype);
goto finalize;
if (prop->outputConfigured & _TFC_TYPE) {
- if (prop->outputType != type)
+ if (prop->outputType[0] != type)
return -1;
}
if (prop->outputConfigured & _TFC_DIMENSION) {
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
- if (prop->outputDimension[i] != dim[i])
+ if (prop->outputDimension[0][i] != dim[i])
return -1;
}
}
if (fixate && !(prop->outputConfigured & _TFC_TYPE)) {
- prop->outputType = type;
+ prop->outputType[0] = type;
prop->outputConfigured |= _TFC_TYPE;
}
if (fixate && !(prop->outputConfigured & _TFC_DIMENSION)) {
- memcpy (prop->outputDimension, dim, sizeof (dim));
+ memcpy (prop->outputDimension[0], dim, sizeof (dim));
prop->outputConfigured |= _TFC_DIMENSION;
}
}
{
if (input) {
prop->inputConfigured |=
- gst_tensor_filter_generate_dim_from_cap (caps, prop->inputDimension,
- &prop->inputType);
+ gst_tensor_filter_generate_dim_from_cap (caps, prop->inputDimension[0],
+ &prop->inputType[0]);
} else {
prop->outputConfigured |=
- gst_tensor_filter_generate_dim_from_cap (caps, prop->outputDimension,
- &prop->outputType);
+ gst_tensor_filter_generate_dim_from_cap (caps, prop->outputDimension[0],
+ &prop->outputType[0]);
}
}
&& (obj->prop.inputConfigured & _TFC_ALL) == _TFC_ALL) {
int ret = 0;
gst_tensor_filter_call (obj, ret, getInputDimension,
- obj->prop.inputDimension, &obj->prop.inputType);
+ obj->prop.inputDimension[0], &obj->prop.inputType[0]);
if (ret == 0) {
obj->prop.inputConfigured |= _TFC_ALL;
}
&& (obj->prop.outputConfigured & _TFC_ALL) == _TFC_ALL) {
int ret = 0;
gst_tensor_filter_call (obj, ret, getOutputDimension,
- obj->prop.outputDimension, &obj->prop.outputType);
+ obj->prop.outputDimension[0], &obj->prop.outputType[0]);
if (ret == 0) {
obj->prop.outputConfigured |= _TFC_ALL;
}
if (fw->setInputDimension) {
int ret = 0;
gst_tensor_filter_call (obj, ret, setInputDimension,
- obj->prop.inputDimension, obj->prop.inputType,
- obj->prop.outputDimension, &obj->prop.outputType);
+ obj->prop.inputDimension[0], obj->prop.inputType[0],
+ obj->prop.outputDimension[0], &obj->prop.outputType[0]);
obj->prop.outputConfigured |= _TFC_ALL;
g_assert (ret == 0);
return result;
#include <tensor_common.h>
G_BEGIN_DECLS
-
/* #defines don't like whitespacey bits */
#define GST_TYPE_TENSOR_FILTER \
(gst_tensor_filter_get_type())
#define GST_IS_TENSOR_FILTER_CLASS(klass) \
(G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_TENSOR_FILTER))
#define GST_TENSOR_FILTER_CAST(obj) ((GstTensor_Filter *)(obj))
-
typedef struct _GstTensor_Filter GstTensor_Filter;
typedef struct _GstTensor_FilterClass GstTensor_FilterClass;
-extern const char* nnfw_names[];
+extern const char *nnfw_names[];
/**
* @brief Internal data structure for tensor_filter instances.
*/
struct _GstTensor_Filter
{
- GstBaseTransform element; /**< This is the parent object */
+ GstBaseTransform element; /**< This is the parent object */
void *privateData; /**< NNFW plugin's private data is stored here */
*/
struct _GstTensor_FilterClass
{
- GstBaseTransformClass parent_class; /**< Inherits GstBaseTransformClass */
+ GstBaseTransformClass parent_class; /**< Inherits GstBaseTransformClass */
};
/**
gboolean allow_in_place; /**< TRUE if InPlace transfer of input-to-output is allowed. Not supported in main, yet */
gboolean allocate_in_invoke; /**< TRUE if invoke_NN is going to allocate outputptr by itself and return the address via outputptr. Do not change this value after cap negotiation is complete (or the stream has been started). */
- uint8_t *(*invoke_NN)(const GstTensor_Filter *filter, void **private_data, const uint8_t *inputptr, uint8_t *outputptr);
+ uint8_t *(*invoke_NN) (const GstTensor_Filter * filter, void **private_data,
+ const uint8_t * inputptr, uint8_t * outputptr);
/**< Mandatory callback. Invoke the given network model.
*
* @param[in] filter "this" pointer. Use this to read property values
* @return outputptr if allocate_in_invoke = 00 if OK. non-zero if error.
*/
- int (*getInputDimension)(const GstTensor_Filter *filter, void **private_data, tensor_dim inputDimension, tensor_type *type);
+ int (*getInputDimension) (const GstTensor_Filter * filter,
+ void **private_data, tensor_dim inputDimension, tensor_type * type);
/**< Optional. Set NULL if not supported. Get dimension of input tensor
* If getInputDimension is NULL, setInputDimension must be defined.
* If getInputDimension is defined, it is recommended to define getOutputDimension
* @param[in/out] private_data A subplugin may save its internal private data here. The subplugin is responsible for alloc/free of this pointer.
* @param[out] inputDimension dimension of input tensor (return value)
* @param[out] type type of input tensor element (return value)
- * @return 0 if OK. non-zero if error.
+ * @return the size of input tensors
*/
- int (*getOutputDimension)(const GstTensor_Filter *filter, void **private_data, tensor_dim outputDimension, tensor_type *type);
+ int (*getOutputDimension) (const GstTensor_Filter * filter,
+ void **private_data, tensor_dim outputDimension, tensor_type * type);
/**< Optional. Set NULL if not supported. Get dimension of output tensor
* If getInputDimension is NULL, setInputDimension must be defined.
* If getInputDimension is defined, it is recommended to define getOutputDimension
* @param[in/out] private_data A subplugin may save its internal private data here. The subplugin is responsible for alloc/free of this pointer.
* @param[out] outputDimension dimension of output tensor (return value)
* @param[out] type type of output tensor element (return value)
- * @return 0 if OK. non-zero if error.
+ * @return the size of output tensors
*/
- int (*setInputDimension)(const GstTensor_Filter *filter, void **private_data, const tensor_dim inputDimension, const tensor_type inputType, tensor_dim outputDimension, tensor_type *outputType);
+ int (*setInputDimension) (const GstTensor_Filter * filter,
+ void **private_data, const tensor_dim inputDimension,
+ const tensor_type inputType, tensor_dim outputDimension,
+ tensor_type * outputType);
/**< Optional. Set Null if not supported. Tensor_filter::main will
* configure input dimension from pad-cap in run-time for the sub-plugin.
* Then, the sub-plugin is required to return corresponding output dimension
* @return 0 if OK. non-zero if error.
*/
- void (*open)(const GstTensor_Filter *filter, void **private_data);
+ void (*open) (const GstTensor_Filter * filter, void **private_data);
/**< Optional. tensor_filter.c will call this before any of other callbacks and will call once before calling close
*
* @param[in] filter "this" pointer. Use this to read property values
* @param[in/out] private_data A subplugin may save its internal private data here. The subplugin is responsible for alloc/free of this pointer. Normally, open() allocates memory for private_data.
*/
- void (*close)(const GstTensor_Filter *filter, void **private_data);
+ void (*close) (const GstTensor_Filter * filter, void **private_data);
/**< Optional. tensor_filter.c will not call other callbacks after calling close. Free-ing private_data is this function's responsibility. Set NULL after that.
*
* @param[in] filter "this" pointer. Use this to read property values
extern GstTensor_Filter_Framework *tensor_filter_supported[];
G_END_DECLS
-
#endif /* __GST_TENSOR_FILTER_H__ */