static void
gst_tensor_filter_init (GstTensor_Filter * filter)
{
+ int i;
GstTensor_Filter_Properties *prop = &filter->prop;
prop->silent = TRUE;
prop->outputConfigured = _TFC_INIT;
prop->modelFilename = NULL;
- prop->inputDimension[0][0] = 1; /* innermost */
- prop->inputDimension[0][1] = 1;
- prop->inputDimension[0][2] = 1;
- prop->inputDimension[0][3] = 1; /* out */
- prop->inputType[0] = _NNS_END; /* not initialized */
- prop->inputCapNegotiated = FALSE;
+ for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) {
+ prop->inputMeta.dims[i][0] = 1; /* innermost */
+ prop->inputMeta.dims[i][1] = 1;
+ prop->inputMeta.dims[i][2] = 1;
+ prop->inputMeta.dims[i][3] = 1; /* out */
+ prop->inputMeta.types[i] = _NNS_END; /* not initialized */
+
+ prop->outputMeta.dims[i][0] = 1; /* innermost */
+ prop->outputMeta.dims[i][1] = 1;
+ prop->outputMeta.dims[i][2] = 1;
+ prop->outputMeta.dims[i][3] = 1; /* out */
+ prop->outputMeta.types[i] = _NNS_END; /* not initialized */
+ }
- prop->outputDimension[0][0] = 1; /* innermost */
- prop->outputDimension[0][1] = 1;
- prop->outputDimension[0][2] = 1;
- prop->outputDimension[0][3] = 1; /* out */
- prop->outputType[0] = _NNS_END; /* not initialized */
+ prop->inputCapNegotiated = FALSE;
prop->outputCapNegotiated = FALSE;
prop->customProperties = NULL;
* @return the rank value
*/
static int
-gst_tensor_filter_get_rank (tensor_dim dimension)
+gst_tensor_filter_get_rank (const tensor_dim dimension)
{
int i = 0;
int rank = 0;
}
static GstTensor_Filter_CheckStatus
-gst_tensor_filter_generate_dim_from_cap (GstCaps * caps, tensor_dim dim,
+gst_tensor_filter_generate_dim_from_cap (GstCaps * caps, const tensor_dim dim,
tensor_type * type);
/**
* @brief Find caps based on i/o configuration or from the 'other' cap
gst_tensor_filter_fix_caps (GstTensor_Filter * filter, gboolean isInput,
GstCaps * fromCaps)
{
- tensor_type *type = NULL, _type;
- uint32_t *dimension[NNS_TENSOR_SIZE_LIMIT];
+ tensor_type *type = NULL, _type[NNS_TENSOR_SIZE_LIMIT];
+ const uint32_t *dimension[NNS_TENSOR_SIZE_LIMIT];
tensor_dim dim[NNS_TENSOR_SIZE_LIMIT];
GstTensor_Filter_CheckStatus configured = _TFC_INIT;
GstTensor_Filter_Properties *prop = &filter->prop;
staticcap = gst_static_caps_get (&rawcap);
if (isInput == TRUE) {
- type = prop->inputType;
+ type = prop->inputMeta.types;
int i;
for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) {
- dimension[i] = prop->inputDimension[i];
+ dimension[i] = prop->inputMeta.dims[i];
}
configured = prop->inputConfigured & _TFC_ALL;
} else {
- type = prop->outputType;
+ type = prop->outputMeta.types;
int i;
for (i = 0; i < NNS_TENSOR_SIZE_LIMIT; i++) {
- dimension[i] = prop->outputDimension[i];
+ dimension[i] = prop->outputMeta.dims[i];
}
configured = prop->outputConfigured & _TFC_ALL;
}
/* 2-2. Extract effective dim info from tmp */
dimension[0] = dim[0];
configured =
- gst_tensor_filter_generate_dim_from_cap (tmp, dimension[0], &_type);
+ gst_tensor_filter_generate_dim_from_cap (tmp, dimension[0], &_type[0]);
configured &= _TFC_ALL;
/* tmp is no more needed */
gst_caps_unref (tmp);
/* 3. Calculate resultcap from fromcap. */
if (isInput == TRUE) {
/* result == srcpad (output) */
- tensor_dim rdim;
- tensor_type rtype;
+ GstTensor_TensorsMeta outputMeta;
int ret = -1;
/* 3-1-1. Try get output dim for srcpad */
- if (prop->fw->getOutputDimension)
- gst_tensor_filter_call (filter, ret, getOutputDimension, rdim, &rtype);
+ if (prop->fw->getOutputDimension) {
+ gst_tensor_filter_call (filter, ret, getOutputDimension, &outputMeta);
+ }
/* 3-1-1-a. If inputdim is available but outputdim is not available */
if (ret != 0 && configured == _TFC_ALL && prop->fw->setInputDimension) {
gst_tensor_filter_call (filter, ret, setInputDimension, dimension[0],
- _type, rdim, &rtype);
+ _type[0], outputMeta.dims[0], &outputMeta.types[0]);
}
/* if ret == 0, either get or set has been successful. */
if (ret != 0) {
/* 3-1.2. Configure resultCap from rdim/rtype */
if (resultCaps == NULL) {
- rank = gst_tensor_filter_get_rank (rdim);
+ rank = gst_tensor_filter_get_rank (outputMeta.dims[0]);
resultCaps =
gst_caps_new_simple ("other/tensor", "rank", G_TYPE_INT, rank, "type",
- G_TYPE_STRING, tensor_element_typename[rtype], "dim1", G_TYPE_INT,
- rdim[0], "dim2", G_TYPE_INT, rdim[1], "dim3", G_TYPE_INT,
- rdim[2], "dim4", G_TYPE_INT, rdim[3], NULL);
+ G_TYPE_STRING, tensor_element_typename[outputMeta.types[0]], "dim1",
+ G_TYPE_INT, outputMeta.dims[0][0], "dim2", G_TYPE_INT,
+ outputMeta.dims[0][1], "dim3", G_TYPE_INT, outputMeta.dims[0][2],
+ "dim4", G_TYPE_INT, outputMeta.dims[0][3], NULL);
}
} else {
/* result == sinkpad (input) */
- tensor_dim rdim;
- tensor_type rtype;
+ GstTensor_TensorsMeta meta;
int ret = -1;
/* 3-1-1. Try get output dim for srcpad */
- if (prop->fw->getInputDimension)
- gst_tensor_filter_call (filter, ret, getInputDimension, rdim, &rtype);
+ if (prop->fw->getInputDimension) {
+ gst_tensor_filter_call (filter, ret, getInputDimension, &meta);
+ }
if (ret != 0) {
/* We do not have output->input dimension conversion. */
/* knows nothing. This happens.. */
/* 3-1.2. Configure resultCap from rdim/rtype */
if (resultCaps == NULL) {
- rank = gst_tensor_filter_get_rank (rdim);
+ rank = gst_tensor_filter_get_rank ( meta.dims[0]);
resultCaps =
gst_caps_new_simple ("other/tensor", "rank", G_TYPE_INT, rank,
- "type", G_TYPE_STRING, tensor_element_typename[rtype], "dim1",
- G_TYPE_INT, rdim[0], "dim2", G_TYPE_INT, rdim[1], "dim3",
- G_TYPE_INT, rdim[2], "dim4", G_TYPE_INT, rdim[3], NULL);
+ "type", G_TYPE_STRING, tensor_element_typename[meta.types[0]], "dim1",
+ G_TYPE_INT, meta.dims[0][0], "dim2", G_TYPE_INT, meta.dims[0][1],
+ "dim3", G_TYPE_INT, meta.dims[0][2], "dim4", G_TYPE_INT,
+ meta.dims[0][3], NULL);
}
}
/* Once configures, it cannot be changed in runtime */
{
int i;
- prop->inputTensorSize =
+ prop->inputMeta.num_tensors =
get_tensor_dimension (g_value_get_string (value),
- prop->inputDimension, prop->inputTensorRank);
- for (i = 0; i < prop->inputTensorSize; i++) {
- g_assert (prop->inputTensorRank[i] > 0
- && prop->inputTensorRank[i] <= NNS_TENSOR_RANK_LIMIT);
+ prop->inputMeta.dims, prop->inputMeta.ranks);
+ for (i = 0; i < prop->inputMeta.num_tensors; i++) {
+ g_assert (prop->inputMeta.ranks[i] > 0
+ && prop->inputMeta.ranks[i] <= NNS_TENSOR_RANK_LIMIT);
silent_debug ("Input Prop: %d:%d:%d:%d Rank %d\n",
- prop->inputDimension[i][0], prop->inputDimension[i][1],
- prop->inputDimension[i][2], prop->inputDimension[i][3],
- prop->inputTensorRank[i]);
+ prop->inputMeta.dims[i][0], prop->inputMeta.dims[i][1],
+ prop->inputMeta.dims[i][2], prop->inputMeta.dims[i][3],
+ prop->inputMeta.ranks[i]);
}
prop->inputConfigured |= _TFC_DIMENSION;
}
/* Once configures, it cannot be changed in runtime */
{
int i;
- prop->outputTensorSize =
+ prop->outputMeta.num_tensors =
get_tensor_dimension (g_value_get_string (value),
- prop->outputDimension, prop->outputTensorRank);
- for (i = 0; i < prop->outputTensorSize; i++) {
- g_assert (prop->outputTensorRank[i] > 0
- && prop->outputTensorRank[i] <= NNS_TENSOR_RANK_LIMIT);
+ prop->outputMeta.dims, prop->outputMeta.ranks);
+ for (i = 0; i < prop->outputMeta.num_tensors; i++) {
+ g_assert (prop->outputMeta.ranks[i] > 0
+ && prop->outputMeta.ranks[i] <= NNS_TENSOR_RANK_LIMIT);
silent_debug ("Output Prop: %d:%d:%d:%d Rank %d\n",
- prop->outputDimension[i][0], prop->outputDimension[i][1],
- prop->outputDimension[i][2], prop->outputDimension[i][3],
- prop->outputTensorRank[i]);
+ prop->outputMeta.dims[i][0], prop->outputMeta.dims[i][1],
+ prop->outputMeta.dims[i][2], prop->outputMeta.dims[i][3],
+ prop->outputMeta.ranks[i]);
}
prop->outputConfigured |= _TFC_DIMENSION;
}
break;
case PROP_INPUTTYPE:
- g_assert (prop->inputType[0] == _NNS_END && value);
+ g_assert (prop->inputMeta.types[0] == _NNS_END && value);
/* Once configures, it cannot be changed in runtime */
- prop->inputType[0] = get_tensor_type (g_value_get_string (value));
+ prop->inputMeta.types[0] = get_tensor_type (g_value_get_string (value));
prop->inputConfigured |= _TFC_TYPE;
- g_assert (prop->inputType[0] != _NNS_END);
+ g_assert (prop->inputMeta.types[0] != _NNS_END);
break;
case PROP_OUTPUTTYPE:
- g_assert (prop->outputType[0] == _NNS_END && value);
+ g_assert (prop->outputMeta.types[0] == _NNS_END && value);
/* Once configures, it cannot be changed in runtime */
- prop->outputType[0] = get_tensor_type (g_value_get_string (value));
+ prop->outputMeta.types[0] = get_tensor_type (g_value_get_string (value));
prop->outputConfigured |= _TFC_TYPE;
- g_assert (prop->outputType[0] != _NNS_END);
+ g_assert (prop->outputMeta.types[0] != _NNS_END);
break;
case PROP_CUSTOM:
g_assert (prop->customProperties == NULL && value);
g_array_sized_new (FALSE, FALSE, 4, NNS_TENSOR_RANK_LIMIT);
int i;
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- g_array_append_val (input, filter->prop.inputDimension[0][i]);
+ g_array_append_val (input, filter->prop.inputMeta.dims[0][i]);
g_value_take_boxed (value, input);
/* take function hands the object over from here so that we don't need to free it. */
}
g_array_sized_new (FALSE, FALSE, 4, NNS_TENSOR_RANK_LIMIT);
int i;
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- g_array_append_val (output, filter->prop.outputDimension[0][i]);
+ g_array_append_val (output, filter->prop.outputMeta.dims[0][i]);
g_value_take_boxed (value, output);
/* take function hands the object over from here so that we don't need to free it. */
}
break;
case PROP_INPUTTYPE:
g_value_set_string (value,
- tensor_element_typename[filter->prop.inputType[0]]);
+ tensor_element_typename[filter->prop.inputMeta.types[0]]);
break;
case PROP_OUTPUTTYPE:
g_value_set_string (value,
- tensor_element_typename[filter->prop.outputType[0]]);
+ tensor_element_typename[filter->prop.outputMeta.types[0]]);
break;
case PROP_CUSTOM:
g_value_set_string (value, filter->prop.customProperties);
g_assert (outbuf);
if (filter->prop.fw->allocate_in_invoke == FALSE) {
- outBufSize = tensor_element_size[filter->prop.outputType[0]] *
- get_tensor_element_count (filter->prop.outputDimension[0]);
+ outBufSize = tensor_element_size[filter->prop.outputMeta.types[0]] *
+ get_tensor_element_count (filter->prop.outputMeta.dims[0]);
if (gst_buffer_get_size (outbuf) < outBufSize) {
/** @todo: write a routine to say aloud when this happens */
gst_buffer_set_size (outbuf, outBufSize);
/** @todo Performance: cache get_tensor_element_count * tensor_element_size */
mem = gst_memory_new_wrapped (0, retoutptr,
- get_tensor_element_count (filter->prop.outputDimension[0]) *
- tensor_element_size[filter->prop.outputType[0]],
+ get_tensor_element_count (filter->prop.outputMeta.dims[0]) *
+ tensor_element_size[filter->prop.outputMeta.types[0]],
0,
- get_tensor_element_count (filter->prop.outputDimension[0]) *
- tensor_element_size[filter->prop.outputType[0]], NULL, NULL);
+ get_tensor_element_count (filter->prop.outputMeta.dims[0]) *
+ tensor_element_size[filter->prop.outputMeta.types[0]], NULL, NULL);
gst_buffer_insert_memory (outbuf, -1, mem);
}
GstTensor_Filter_Framework *fw = filter->prop.fw;
GstTensor_Filter_Properties *prop = &filter->prop;
int ret;
- tensor_dim dim;
- tensor_type type;
- int i;
+ GstTensor_TensorsMeta meta;
+ int i, tensor_idx;
/* Ensure the subplugin is contacted first before checking the XOR assert */
if (!prop->fwOpened && fw->open)
prop->fwOpened = TRUE;
if (fw->getInputDimension != NULL) {
- gst_tensor_filter_call (filter, ret, getInputDimension, dim, &type);
+ gst_tensor_filter_call (filter, ret, getInputDimension, &meta);
if (ret == 0) {
- if (prop->inputConfigured & _TFC_TYPE)
- if (prop->inputType[0] != type)
- return -1;
- if (prop->inputConfigured & _TFC_DIMENSION)
- for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- if (prop->inputDimension[0][i] != dim[i])
+ for (tensor_idx = 0; tensor_idx < meta.num_tensors; tensor_idx++) {
+ if (prop->inputConfigured & _TFC_TYPE)
+ if (prop->inputMeta.types[tensor_idx] != meta.types[tensor_idx]) {
return -1;
- if (fixate && !(prop->inputConfigured & _TFC_TYPE)) {
- prop->inputType[0] = type;
- prop->inputConfigured |= _TFC_TYPE;
- }
- if (fixate && !(prop->inputConfigured & _TFC_DIMENSION)) {
- memcpy (prop->inputDimension[0], dim, sizeof (dim));
- prop->inputConfigured |= _TFC_DIMENSION;
+ }
+ if (prop->inputConfigured & _TFC_DIMENSION)
+ for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
+ if (prop->inputMeta.dims[tensor_idx][i] != meta.dims[tensor_idx][i]) {
+ return -1;
+ }
+ if (fixate && !(prop->inputConfigured & _TFC_TYPE)) {
+ prop->inputMeta.types[tensor_idx] = meta.types[tensor_idx];
+ prop->inputConfigured |= _TFC_TYPE;
+ }
+ if (fixate && !(prop->inputConfigured & _TFC_DIMENSION)) {
+ memcpy (prop->inputMeta.dims[tensor_idx], meta.dims[tensor_idx],
+ sizeof (meta.dims[tensor_idx]));
+ prop->inputConfigured |= _TFC_DIMENSION;
+ }
}
}
}
if (fw->getOutputDimension != NULL) {
- gst_tensor_filter_call (filter, ret, getOutputDimension, dim, &type);
+ gst_tensor_filter_call (filter, ret, getOutputDimension, &meta);
if (ret == 0) {
- if (prop->outputConfigured & _TFC_TYPE)
- if (prop->outputType[0] != type)
- return -1;
- if (prop->outputConfigured & _TFC_DIMENSION)
- for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
- if (prop->outputDimension[0][i] != dim[i])
+ for (tensor_idx = 0; tensor_idx < meta.num_tensors; tensor_idx++) {
+ if (prop->outputConfigured & _TFC_TYPE)
+ if (prop->outputMeta.types[tensor_idx] != meta.types[tensor_idx]) {
return -1;
- if (fixate && !(prop->outputConfigured & _TFC_TYPE)) {
- prop->outputType[0] = type;
- prop->outputConfigured |= _TFC_TYPE;
- }
- if (fixate && !(prop->outputConfigured & _TFC_DIMENSION)) {
- memcpy (prop->outputDimension[0], dim, sizeof (dim));
- prop->outputConfigured |= _TFC_DIMENSION;
+ }
+ if (prop->outputConfigured & _TFC_DIMENSION)
+ for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
+ if (prop->outputMeta.dims[tensor_idx][i] !=
+ meta.dims[tensor_idx][i]) {
+ return -1;
+ }
+ if (fixate && !(prop->outputConfigured & _TFC_TYPE)) {
+ prop->outputMeta.types[tensor_idx] = meta.types[tensor_idx];
+ prop->outputConfigured |= _TFC_TYPE;
+ }
+ if (fixate && !(prop->outputConfigured & _TFC_DIMENSION)) {
+ memcpy (prop->outputMeta.dims[tensor_idx], meta.dims[tensor_idx],
+ sizeof (meta.dims[tensor_idx]));
+ prop->outputConfigured |= _TFC_DIMENSION;
+ }
}
}
}
if (fw->setInputDimension != NULL) {
- tensor_dim idim, *cmpdim;
- tensor_type itype, *cmptype;
+ GstTensor_TensorsMeta *cmpMeta;
/* If filter's inputdimension is not clear, yet, we cannot proceed. try again later */
if ((prop->inputConfigured & _TFC_ALL) == _TFC_ALL) {
- cmpdim = &(prop->inputDimension[0]);
- cmptype = &(prop->inputType[0]);
+ cmpMeta = &meta;
+ memcpy (meta.dims[0], prop->outputMeta.dims[0], sizeof (meta.dims[0]));
+ meta.types[0] = prop->outputMeta.types[0];
} else {
- if (fw->getInputDimension != NULL) {
- gst_tensor_filter_call (filter, ret, getInputDimension, idim, &itype);
+ if (fw->getOutputDimension != NULL) {
+ gst_tensor_filter_call (filter, ret, getInputDimension, &meta);
if (ret != 0)
goto finalize;
- cmpdim = &idim;
- cmptype = &itype;
+ cmpMeta = &meta;
} else {
/* Nothing to do here */
goto finalize;
}
}
- gst_tensor_filter_call (filter, ret, setInputDimension, *cmpdim, *cmptype,
- dim, &type);
+ gst_tensor_filter_call (filter, ret, setInputDimension, cmpMeta->dims[0],
+ cmpMeta->types[0], meta.dims[0], &meta.types[0]);
if (ret != 0)
goto finalize;
if (prop->outputConfigured & _TFC_TYPE) {
- if (prop->outputType[0] != type)
+ if (prop->outputMeta.types[0] != meta.types[0]) {
return -1;
+ }
}
if (prop->outputConfigured & _TFC_DIMENSION) {
for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
- if (prop->outputDimension[0][i] != dim[i])
+ if (prop->outputMeta.dims[0][i] != meta.dims[0][i]) {
return -1;
+ }
}
}
if (fixate && !(prop->outputConfigured & _TFC_TYPE)) {
- prop->outputType[0] = type;
+ prop->outputMeta.types[0] = meta.types[0];
prop->outputConfigured |= _TFC_TYPE;
}
if (fixate && !(prop->outputConfigured & _TFC_DIMENSION)) {
- memcpy (prop->outputDimension[0], dim, sizeof (dim));
+ memcpy (prop->outputMeta.dims[0], meta.dims[0], sizeof (meta.dims[0]));
prop->outputConfigured |= _TFC_DIMENSION;
}
}
* @param[out] type tensor type derived from caps
*/
static GstTensor_Filter_CheckStatus
-gst_tensor_filter_generate_dim_from_cap (GstCaps * caps, tensor_dim dim,
+gst_tensor_filter_generate_dim_from_cap (GstCaps * caps, const tensor_dim dim,
tensor_type * type)
{
unsigned int i, capsize;
{
if (input) {
prop->inputConfigured |=
- gst_tensor_filter_generate_dim_from_cap (caps, prop->inputDimension[0],
- &prop->inputType[0]);
+ gst_tensor_filter_generate_dim_from_cap (caps, prop->inputMeta.dims[0],
+ &prop->inputMeta.types[0]);
} else {
prop->outputConfigured |=
- gst_tensor_filter_generate_dim_from_cap (caps, prop->outputDimension[0],
- &prop->outputType[0]);
+ gst_tensor_filter_generate_dim_from_cap (caps, prop->outputMeta.dims[0],
+ &prop->outputMeta.types[0]);
}
}
GstTensor_Filter_Framework *fw = obj->prop.fw;
GstCaps *sinkpadcap, *srcpadcap;
int check = gst_tensor_filter_property_process (obj, TRUE);
+ GstTensor_TensorsMeta meta;
gst_caps_unref (supposed);
g_assert (check >= 0);
if (fw->getInputDimension
&& (obj->prop.inputConfigured & _TFC_ALL) == _TFC_ALL) {
int ret = 0;
- gst_tensor_filter_call (obj, ret, getInputDimension,
- obj->prop.inputDimension[0], &obj->prop.inputType[0]);
+ int tensor_idx;
+ gst_tensor_filter_call (obj, ret, getInputDimension, &meta);
+ for (tensor_idx = 0; tensor_idx < meta.num_tensors; tensor_idx++) {
+ memcpy (obj->prop.inputMeta.dims[tensor_idx], meta.dims[tensor_idx],
+ sizeof (meta.dims[tensor_idx]));
+ obj->prop.inputMeta.types[tensor_idx] = meta.types[tensor_idx];
+
+ }
if (ret == 0) {
obj->prop.inputConfigured |= _TFC_ALL;
}
if (fw->getOutputDimension
&& (obj->prop.outputConfigured & _TFC_ALL) == _TFC_ALL) {
int ret = 0;
- gst_tensor_filter_call (obj, ret, getOutputDimension,
- obj->prop.outputDimension[0], &obj->prop.outputType[0]);
+ int tensor_idx;
+ gst_tensor_filter_call (obj, ret, getOutputDimension, &meta);
+ for (tensor_idx = 0; tensor_idx < meta.num_tensors; tensor_idx++) {
+ memcpy (obj->prop.outputMeta.dims[tensor_idx], meta.dims[tensor_idx],
+ sizeof (meta.dims[tensor_idx]));
+ obj->prop.outputMeta.types[tensor_idx] = meta.types[tensor_idx];
+ }
if (ret == 0) {
obj->prop.outputConfigured |= _TFC_ALL;
}
if (fw->setInputDimension) {
int ret = 0;
gst_tensor_filter_call (obj, ret, setInputDimension,
- obj->prop.inputDimension[0], obj->prop.inputType[0],
- obj->prop.outputDimension[0], &obj->prop.outputType[0]);
+ obj->prop.inputMeta.dims[0], obj->prop.inputMeta.types[0],
+ obj->prop.outputMeta.dims[0], &obj->prop.outputMeta.types[0]);
obj->prop.outputConfigured |= _TFC_ALL;
g_assert (ret == 0);
return result;
*/
int (*getInputDimension) (const GstTensor_Filter * filter,
- void **private_data, tensor_dim inputDimension, tensor_type * type);
+ void **private_data, GstTensor_TensorsMeta * meta);
/**< Optional. Set NULL if not supported. Get dimension of input tensor
* If getInputDimension is NULL, setInputDimension must be defined.
* If getInputDimension is defined, it is recommended to define getOutputDimension
* @return the size of input tensors
*/
int (*getOutputDimension) (const GstTensor_Filter * filter,
- void **private_data, tensor_dim outputDimension, tensor_type * type);
+ void **private_data, GstTensor_TensorsMeta * meta);
/**< Optional. Set NULL if not supported. Get dimension of output tensor
* If getInputDimension is NULL, setInputDimension must be defined.
* If getInputDimension is defined, it is recommended to define getOutputDimension
uint8_t *retptr = ptr->methods->allocate_invoke (ptr->customFW_private_data,
&(filter->prop), inptr, &size);
g_assert (size ==
- (get_tensor_element_count (filter->prop.outputDimension[0]) *
- tensor_element_size[filter->prop.outputType[0]]));
+ (get_tensor_element_count (filter->prop.outputMeta.dims[0]) *
+ tensor_element_size[filter->prop.outputMeta.types[0]]));
return retptr;
} else {
return NULL;
*/
static int
custom_getInputDim (const GstTensor_Filter * filter, void **private_data,
- tensor_dim inputDimension, tensor_type * type)
+ GstTensor_TensorsMeta * meta)
{
int retval = custom_loadlib (filter, private_data);
internal_data *ptr;
g_assert (filter->privateData && *private_data == filter->privateData);
ptr = *private_data;
- if (ptr->methods->getInputDim == NULL)
+ if (ptr->methods->getInputDim == NULL) {
return -1;
+ }
return ptr->methods->getInputDim (ptr->customFW_private_data, &(filter->prop),
- inputDimension, type);
+ meta);
}
/**
*/
static int
custom_getOutputDim (const GstTensor_Filter * filter, void **private_data,
- tensor_dim outputDimension, tensor_type * type)
+ GstTensor_TensorsMeta * meta)
{
int retval = custom_loadlib (filter, private_data);
internal_data *ptr;
g_assert (filter->privateData && *private_data == filter->privateData);
ptr = *private_data;
- if (ptr->methods->getOutputDim == NULL)
+ if (ptr->methods->getOutputDim == NULL) {
return -1;
+ }
return ptr->methods->getOutputDim (ptr->customFW_private_data,
- &(filter->prop), outputDimension, type);
+ &(filter->prop), meta);
}
/**
*/
static int
tf_getInputDim (const GstTensor_Filter * filter, void **private_data,
- tensor_dim inputDimension, tensor_type * type)
+ GstTensor_TensorsMeta * meta)
{
int temp_idx = 0;
tf_data *tf;
else
temp_idx = 0;
g_assert (filter->privateData && *private_data == filter->privateData);
- return tf_core_getInputDim (tf->tf_private_data, temp_idx,
- inputDimension, type);
+ return tf_core_getInputDim (tf->tf_private_data, meta->dims[0],
+ &meta->types[0], &meta->num_tensors);
}
/**
*/
static int
tf_getOutputDim (const GstTensor_Filter * filter, void **private_data,
- tensor_dim outputDimension, tensor_type * type)
+ GstTensor_TensorsMeta * meta)
{
int temp_idx = 0;
tf_data *tf;
else
temp_idx = 0;
g_assert (filter->privateData && *private_data == filter->privateData);
- return tf_core_getOutputDim (tf->tf_private_data, temp_idx,
- outputDimension, type);
+ return tf_core_getOutputDim (tf->tf_private_data, meta->dims[0],
+ &meta->types[0], &meta->num_tensors);
}
/**
TFCore::TFCore (const char *_model_path)
{
model_path = _model_path;
- input_idx_list_len = 0;
- output_idx_list_len = 0;
loadModel ();
}
*/
TFCore::~TFCore ()
{
- delete[]input_idx_list;
- delete[]output_idx_list;
}
/**
* @return 0 if OK. non-zero if error.
*/
int
-TFCore::getInputTensorDim (int idx, tensor_dim dim, tensor_type * type)
+TFCore::getInputTensorDim (tensor_dim dim, tensor_type * type,
+ unsigned int *num_tensors)
{
- if (idx >= input_size) {
- return -1;
- }
- int ret = getTensorDim (input_idx_list[idx], dim, type);
+ int ret = getTensorDim (dim, type);
return ret;
}
* @return 0 if OK. non-zero if error.
*/
int
-TFCore::getOutputTensorDim (int idx, tensor_dim dim, tensor_type * type)
+TFCore::getOutputTensorDim (tensor_dim dim, tensor_type * type,
+ unsigned int *num_tensors)
{
- if (idx >= output_size) {
- return -1;
- }
- int ret = getTensorDim (output_idx_list[idx], dim, type);
+ int ret = getTensorDim (dim, type);
return ret;
}
* @return 0 if OK. non-zero if error.
*/
int
-TFCore::getTensorDim (int tensor_idx, tensor_dim dim, tensor_type * type)
+TFCore::getTensorDim (tensor_dim dim, tensor_type * type)
{
return 0;
* @return 0 if OK. non-zero if error.
*/
int
-tf_core_getInputDim (void *tf, int idx, tensor_dim dim, tensor_type * type)
+tf_core_getInputDim (void *tf, tensor_dim dim, tensor_type * type,
+ unsigned int *num_tensors)
{
TFCore *c = (TFCore *) tf;
- return c->getInputTensorDim (idx, dim, type);
+ return c->getInputTensorDim (dim, type, num_tensors);
}
/**
* @return 0 if OK. non-zero if error.
*/
int
-tf_core_getOutputDim (void *tf, int idx, tensor_dim dim, tensor_type * type)
+tf_core_getOutputDim (void *tf, tensor_dim dim, tensor_type * type,
+ unsigned int *num_tensors)
{
TFCore *c = (TFCore *) tf;
- return c->getOutputTensorDim (idx, dim, type);
+ return c->getOutputTensorDim (dim, type, num_tensors);
}
/**
*/
static int
tflite_getInputDim (const GstTensor_Filter * filter, void **private_data,
- tensor_dim inputDimension, tensor_type * type)
+ GstTensor_TensorsMeta * meta)
{
tflite_data *tf;
tf = *private_data;
g_assert (filter->privateData && *private_data == filter->privateData);
- return tflite_core_getInputDim (tf->tflite_private_data, inputDimension,
- type);
+ int ret = tflite_core_getInputDim (tf->tflite_private_data, meta);
+ return ret;
}
/**
*/
static int
tflite_getOutputDim (const GstTensor_Filter * filter, void **private_data,
- tensor_dim outputDimension, tensor_type * type)
+ GstTensor_TensorsMeta * meta)
{
tflite_data *tf;
tf = *private_data;
g_assert (filter->privateData && *private_data == filter->privateData);
- return tflite_core_getOutputDim (tf->tflite_private_data, outputDimension,
- type);
+ int ret = tflite_core_getOutputDim (tf->tflite_private_data, meta);
+ return ret;
}
/**
TFLiteCore::setInputTensorProp ()
{
auto input_idx_list = interpreter->inputs ();
- inputTensorSize = input_idx_list.size ();
+ inputTensorMeta.num_tensors = input_idx_list.size ();
- for (int i = 0; i < inputTensorSize; i++) {
- inputTensorRank[i] = NNS_TENSOR_RANK_LIMIT;
+ for (int i = 0; i < inputTensorMeta.num_tensors; i++) {
+ inputTensorMeta.ranks[i] = NNS_TENSOR_RANK_LIMIT;
- if (getTensorDim (input_idx_list[i], inputDimension[i],
- &inputTensorRank[i])) {
+ if (getTensorDim (input_idx_list[i], inputTensorMeta.dims[i],
+ &inputTensorMeta.ranks[i])) {
return -1;
}
- inputType[i] =
+ inputTensorMeta.types[i] =
getTensorType (interpreter->tensor (input_idx_list[i])->type);
- }
+
#if (DBG)
- if (ret) {
- _print_log ("Failed to getInputTensorDim");
- } else {
- _print_log ("InputTensorDim idx[%d] type[%d] dim[%d:%d:%d:%d]",
- idx, *type, dim[0], dim[1], dim[2], dim[3]);
- }
+ _print_log ("inputTensorMeta[%d] >> type:%d, dim[%d:%d:%d:%d], rank: %d",
+ i, inputTensorMeta.types[i], inputTensorMeta.dims[i][0],
+ inputTensorMeta.dims[i][1], inputTensorMeta.dims[i][2],
+ inputTensorMeta.dims[i][3], inputTensorMeta.ranks[i]);
#endif
+ }
return 0;
}
TFLiteCore::setOutputTensorProp ()
{
auto output_idx_list = interpreter->outputs ();
- outputTensorSize = output_idx_list.size ();
+ outputTensorMeta.num_tensors = output_idx_list.size ();
- for (int i = 0; i < outputTensorSize; i++) {
- outputTensorRank[i] = NNS_TENSOR_RANK_LIMIT;
+ for (int i = 0; i < outputTensorMeta.num_tensors; i++) {
+ outputTensorMeta.ranks[i] = NNS_TENSOR_RANK_LIMIT;
- if (getTensorDim (output_idx_list[i], outputDimension[i],
- &outputTensorRank[i])) {
+ if (getTensorDim (output_idx_list[i], outputTensorMeta.dims[i],
+ &outputTensorMeta.ranks[i])) {
return -1;
}
- outputType[i] =
+ outputTensorMeta.types[i] =
getTensorType (interpreter->tensor (output_idx_list[i])->type);
- }
+
#if (DBG)
- if (ret) {
- _print_log ("Failed to getOutputTensorDim");
- } else {
- _print_log ("OutputTensorDim idx[%d] type[%d] dim[%d:%d:%d:%d]",
- idx, *type, dim[0], dim[1], dim[2], dim[3]);
- }
+ _print_log ("outputTensorMeta[%d] >> type:%d, dim[%d:%d:%d:%d], rank: %d",
+ i, outputTensorMeta.types[i], outputTensorMeta.dims[i][0],
+ outputTensorMeta.dims[i][1], outputTensorMeta.dims[i][2],
+ outputTensorMeta.dims[i][3], outputTensorMeta.ranks[i]);
#endif
+ }
return 0;
}
* @return 0 if OK. non-zero if error.
*/
int
-TFLiteCore::getTensorDim (int tensor_idx, tensor_dim dim, unsigned int *rank)
+TFLiteCore::getTensorDim (int tensor_idx, tensor_dim dim, int *rank)
{
int len = interpreter->tensor (tensor_idx)->dims->size;
*rank = len;
int
TFLiteCore::getInputTensorSize ()
{
- return inputTensorSize;
+ return inputTensorMeta.num_tensors;
}
/**
int
TFLiteCore::getOutputTensorSize ()
{
- return outputTensorSize;
+ return outputTensorMeta.num_tensors;
}
/**
* @param[out] dim : the array of the input tensors
* @param[out] type : the data type of the input tensors
* @todo : return whole array rather than index 0
- * @return the number of input tensors;
+ * @return 0 if OK. non-zero if error.
*/
int
-TFLiteCore::getInputTensorDim (tensor_dim dim, tensor_type * type)
+TFLiteCore::getInputTensorDim (GstTensor_TensorsMeta * meta)
{
- memcpy (dim, inputDimension[0], sizeof (tensor_dim));
- *type = inputType[0];
- printf ("[IN]\nDim: %d %d %d %d \nType: %d \nRank: %u\n",
- inputDimension[0][0], inputDimension[0][1], inputDimension[0][2],
- inputDimension[0][3], inputType[0], inputTensorRank[0]);
- return inputTensorSize;
+ for (int i = 0; i < inputTensorMeta.num_tensors; i++) {
+ memcpy (meta->dims[i], inputTensorMeta.dims[i], sizeof (meta->dims[i]));
+ meta->types[i] = inputTensorMeta.types[i];
+ meta->ranks[i] = inputTensorMeta.ranks[i];
+ }
+ meta->num_tensors = inputTensorMeta.num_tensors;
+ return 0;
}
/**
* @param[out] dim : the array of the tensors
* @param[out] type : the data type of the tensors
* @todo : return whole array rather than index 0
- * @return the number of output tensors;
+ * @return 0 if OK. non-zero if error.
*/
int
-TFLiteCore::getOutputTensorDim (tensor_dim dim, tensor_type * type)
+TFLiteCore::getOutputTensorDim (GstTensor_TensorsMeta * meta)
{
- memcpy (dim, outputDimension[0], sizeof (tensor_dim));
- *type = outputType[0];
- printf ("[OUT]\nDim: %d %d %d %d \nType: %d \nRank: %u\n",
- outputDimension[0][0], outputDimension[0][1], outputDimension[0][2],
- outputDimension[0][3], outputType[0], outputTensorRank[0]);
- return outputTensorSize;
+ for (int i = 0; i < outputTensorMeta.num_tensors; i++) {
+ memcpy (meta->dims, outputTensorMeta.dims[i], sizeof (meta->dims[i]));
+ meta->types[i] = outputTensorMeta.types[i];
+ meta->ranks[i] = outputTensorMeta.ranks[i];
+ }
+ meta->num_tensors = outputTensorMeta.num_tensors;
+ return 0;
}
/**
int sizeOfArray = NNS_TENSOR_RANK_LIMIT;
for (int i = 0; i < sizeOfArray; i++) {
- output_number_of_pixels *= inputDimension[0][i];
+ output_number_of_pixels *= inputTensorMeta.dims[0][i];
}
for (int i = 0; i < getInputTensorSize (); i++) {
inputTensors[0] = inptr;
for (int j = 0; j < output_number_of_pixels; j++) {
- if (inputType[i] == _NNS_FLOAT32) {
+ if (inputTensorMeta.types[i] == _NNS_FLOAT32) {
(interpreter->typed_tensor < float >(input))[j] =
((float) inputTensors[i][j] - 127.5f) / 127.5f;
- } else if (inputType[i] == _NNS_UINT8) {
+ } else if (inputTensorMeta.types[i] == _NNS_UINT8) {
(interpreter->typed_tensor < uint8_t > (input))[j] = inputTensors[i][j];
}
}
return -3;
}
- for (int i = 0; i < outputTensorSize; i++) {
+ for (int i = 0; i < outputTensorMeta.num_tensors; i++) {
- if (outputType[i] == _NNS_FLOAT32) {
+ if (outputTensorMeta.types[i] == _NNS_FLOAT32) {
outputTensors[i] =
(uint8_t *) interpreter->typed_output_tensor < float >(i);
- } else if (outputType[i] == _NNS_UINT8) {
+ } else if (outputTensorMeta.types[i] == _NNS_UINT8) {
outputTensors[i] = interpreter->typed_output_tensor < uint8_t > (i);
}
}
* @return 0 if OK. non-zero if error.
*/
int
-tflite_core_getInputDim (void *tflite, tensor_dim dim, tensor_type * type)
+tflite_core_getInputDim (void *tflite, GstTensor_TensorsMeta * meta)
{
TFLiteCore *c = (TFLiteCore *) tflite;
- int ret = c->getInputTensorDim (dim, type);
+ int ret = c->getInputTensorDim (meta);
return ret;
}
* @return 0 if OK. non-zero if error.
*/
int
-tflite_core_getOutputDim (void *tflite, tensor_dim dim, tensor_type * type)
+tflite_core_getOutputDim (void *tflite, GstTensor_TensorsMeta * meta)
{
TFLiteCore *c = (TFLiteCore *) tflite;
- int ret = c->getOutputTensorDim (dim, type);
+ int ret = c->getOutputTensorDim (meta);
return ret;
}
* @param[out] inputDimension uint32_t[NNS_TENSOR_RANK_LIMIT] (tensor_dim)
* @param[out] type Type of each element in the input tensor
*/
-typedef int (*NNS_custom_get_input_dimension)(void *private_data, const GstTensor_Filter_Properties *prop,
- tensor_dim inputDimension, tensor_type *type);
+typedef int (*NNS_custom_get_input_dimension) (void *private_data,
+ const GstTensor_Filter_Properties * prop, GstTensor_TensorsMeta * meta);
/**
* @brief Get output tensor type.
* @param[out] outputDimension uint32_t[NNS_TENSOR_RANK_LIMIT] (tensor_dim)
* @param[out] type Type of each element in the output tensor
*/
-typedef int (*NNS_custom_get_output_dimension)(void *private_data, const GstTensor_Filter_Properties *prop,
- tensor_dim outputDimension, tensor_type *type);
+typedef int (*NNS_custom_get_output_dimension) (void *private_data,
+ const GstTensor_Filter_Properties * prop, GstTensor_TensorsMeta * meta);
/**
* @brief Set input dim by framework. Let custom plutin set output dim accordingly.
double get_ms (struct timeval t);
int getInputTensorSize ();
int getOutputTensorSize ();
- int getInputTensorDim (int idx, tensor_dim dim, tensor_type * type);
- int getOutputTensorDim (int idx, tensor_dim dim, tensor_type * type);
+ int getInputTensorDim (tensor_dim dim, tensor_type * type,
+ unsigned int *num_tensors);
+ int getOutputTensorDim (tensor_dim dim, tensor_type * type,
+ unsigned int *num_tensors);
int getInputTensorDimSize ();
int getOutputTensorDimSize ();
int invoke (uint8_t * inptr, uint8_t ** outptr);
int node_size;
int input_size;
int output_size;
- int *input_idx_list;
- int *output_idx_list;
- int input_idx_list_len;
- int output_idx_list_len;
int getTensorType (int tensor_idx, tensor_type * type);
- int getTensorDim (int tensor_idx, tensor_dim dim, tensor_type * type);
+ int getTensorDim (tensor_dim dim, tensor_type * type);
};
/**
extern void *tf_core_new (const char *_model_path);
extern void tf_core_delete (void *tf);
extern const char *tf_core_getModelPath (void *tf);
- extern int tf_core_getInputDim (void *tf, int idx, tensor_dim dim,
- tensor_type * type);
- extern int tf_core_getOutputDim (void *tf, int idx, tensor_dim dim,
- tensor_type * type);
+ extern int tf_core_getInputDim (void *tf, tensor_dim dim,
+ tensor_type * type, unsigned int *num_tensors);
+ extern int tf_core_getOutputDim (void *tf, tensor_dim dim,
+ tensor_type * type, unsigned int *num_tensors);
extern int tf_core_getInputSize (void *tf);
extern int tf_core_getOutputSize (void *tf);
extern int tf_core_invoke (void *tf, uint8_t * inptr, uint8_t ** outptr);
int setOutputTensorProp ();
int getInputTensorSize ();
int getOutputTensorSize ();
- int getInputTensorDim (tensor_dim dim, tensor_type * type);
- int getOutputTensorDim (tensor_dim dim, tensor_type * type);
+ int getInputTensorDim (GstTensor_TensorsMeta * meta);
+ int getOutputTensorDim (GstTensor_TensorsMeta * meta);
int invoke (uint8_t * inptr, uint8_t ** outptr);
private:
tensors inputTensors; /**< The list of input tensors */
tensors outputTensors; /**< The list of output tensors */
-
- tensor_dim inputDimension[NNS_TENSOR_SIZE_LIMIT]; /**< The list of dimensions of each input tensors */
- tensor_dim outputDimension[NNS_TENSOR_SIZE_LIMIT]; /**< The list of dimensions of each output tensors */
- tensor_type inputType[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each input tensors */
- tensor_type outputType[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each output tensors */
-
- int inputTensorSize; /**< The number of input tensors */
- int outputTensorSize; /**< The number of output tensors */
-
- unsigned int inputTensorRank[NNS_TENSOR_SIZE_LIMIT]; /**< The rank of input tensors */
- unsigned int outputTensorRank[NNS_TENSOR_SIZE_LIMIT]; /**< The rank of output tensors */
+ GstTensor_TensorsMeta inputTensorMeta; /**< The meta of input tensors */
+ GstTensor_TensorsMeta outputTensorMeta; /**< The meta of input tensors */
std::unique_ptr < tflite::Interpreter > interpreter;
std::unique_ptr < tflite::FlatBufferModel > model;
double get_ms (struct timeval t);
_nns_tensor_type getTensorType (TfLiteType tfType);
- int getTensorDim (int tensor_idx, tensor_dim dim, unsigned int *rank);
+ int getTensorDim (int tensor_idx, tensor_dim dim, int *rank);
};
/**
extern void *tflite_core_new (const char *_model_path);
extern void tflite_core_delete (void *tflite);
extern const char *tflite_core_getModelPath (void *tflite);
- extern int tflite_core_getInputDim (void *tflite, tensor_dim dim,
- tensor_type * type);
- extern int tflite_core_getOutputDim (void *tflite, tensor_dim dim,
- tensor_type * type);
+ extern int tflite_core_getInputDim (void *tflite,
+ GstTensor_TensorsMeta * meta);
+ extern int tflite_core_getOutputDim (void *tflite,
+ GstTensor_TensorsMeta * meta);
extern int tflite_core_getOutputSize (void *tflite);
extern int tflite_core_getInputSize (void *tflite);
extern int tflite_core_invoke (void *tflite, uint8_t * inptr,
/**
* @brief Internal meta data exchange format for a other/tensors instance
*/
-typedef struct {
- unsigned int num_tensors; /**< Number of tensors in each frame */
- tensor_dim dims[NNS_TENSOR_SIZE_LIMIT]; /**< Array of tensor_dim, [num_tensors] */
- tensor_type types[NNS_TENSOR_SIZE_LIMIT]; /**< Array of tensor_type, [num_tensors] */
- unsigned int ranks[NNS_TENSOR_SIZE_LIMIT]; /**< Array of rank, [num_tensors] */
+typedef struct
+{
+ unsigned int num_tensors; /**< The number of tensors */
+ tensor_dim dims[NNS_TENSOR_SIZE_LIMIT]; /**< The list of dimensions of each tensors */
+ tensor_type types[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each tensors */
+ int ranks[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each tensors */
} GstTensor_TensorsMeta;
/**
int fwClosed; /**< true IF close() is called or tried. Use int instead of gboolean because this is refered by custom plugins. */
const char *modelFilename; /**< Filepath to the model file (as an argument for NNFW). char instead of gchar for non-glib custom plugins */
- tensor_dim inputDimension[NNS_TENSOR_SIZE_LIMIT]; /**< The list of dimensions of each input tensors */
- tensor_type inputType[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each input tensors */
- int inputTensorRank[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each input tensors */
int inputCapNegotiated; /**< @todo check if this is really needed */
- int inputTensorSize; /**< The number of input tensors */
+ GstTensor_TensorsMeta inputMeta;
- tensor_dim outputDimension[NNS_TENSOR_SIZE_LIMIT]; /**< The list of dimensions of each output tensors */
- tensor_type outputType[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each output tensors */
- int outputTensorRank[NNS_TENSOR_SIZE_LIMIT]; /**< The list of types for each input tensors */
int outputCapNegotiated; /**< @todo check if this is really needed */
- int outputTensorSize; /**< The number of output tensors */
+ GstTensor_TensorsMeta outputMeta;
const char *customProperties; /**< sub-plugin specific custom property values in string */
} GstTensor_Filter_Properties;
* @brief do_avg
*/
#define do_avg(type, sumtype) do {\
- sumtype *avg = (sumtype *) malloc(sizeof(sumtype) * prop->inputDimension[0][0]); \
+ sumtype *avg = (sumtype *) malloc(sizeof(sumtype) * prop->inputMeta.dims[0][0]); \
type *iptr = (type *) inptr; \
type *optr = (type *) outptr; \
- for (z = 0; z < prop->inputDimension[0][3]; z++) { \
- for (y = 0; y < prop->inputDimension[0][0]; y++) \
+ for (z = 0; z < prop->inputMeta.dims[0][3]; z++) { \
+ for (y = 0; y < prop->inputMeta.dims[0][0]; y++) \
avg[y] = 0; \
- for (y = 0; y < prop->inputDimension[0][2]; y++) { \
- for (x = 0; x < prop->inputDimension[0][1]; x++) { \
- for (c = 0; c < prop->inputDimension[0][0]; c++) { \
+ for (y = 0; y < prop->inputMeta.dims[0][2]; y++) { \
+ for (x = 0; x < prop->inputMeta.dims[0][1]; x++) { \
+ for (c = 0; c < prop->inputMeta.dims[0][0]; c++) { \
avg[c] += *(iptr + c + x * ix + y * iy + z * iz); \
} \
} \
} \
- for (c = 0; c < prop->inputDimension[0][0]; c++) { \
- *(optr + c + z * prop->inputDimension[0][0]) = (type) (avg[c] / xy); \
+ for (c = 0; c < prop->inputMeta.dims[0][0]; c++) { \
+ *(optr + c + z * prop->inputMeta.dims[0][0]) = (type) (avg[c] / xy); \
} \
} \
free(avg); \
pt_data *data = private_data;
uint32_t c, x, y, z;
- unsigned ix = prop->inputDimension[0][0];
- unsigned iy = prop->inputDimension[0][0] * prop->inputDimension[0][1];
+ unsigned ix = prop->inputMeta.dims[0][0];
+ unsigned iy = prop->inputMeta.dims[0][0] * prop->inputMeta.dims[0][1];
unsigned iz =
- prop->inputDimension[0][0] * prop->inputDimension[0][1] *
- prop->inputDimension[0][2];
- unsigned xy = prop->inputDimension[0][1] * prop->inputDimension[0][2];
+ prop->inputMeta.dims[0][0] * prop->inputMeta.dims[0][1] *
+ prop->inputMeta.dims[0][2];
+ unsigned xy = prop->inputMeta.dims[0][1] * prop->inputMeta.dims[0][2];
assert (data);
assert (inptr);
/* This assumes the limit is 4 */
assert (NNS_TENSOR_RANK_LIMIT == 4);
- assert (prop->inputDimension[0][0] == prop->outputDimension[0][0]);
- assert (prop->inputDimension[0][3] == prop->outputDimension[0][3]);
- assert (prop->inputType[0] == prop->outputType[0]);
+ assert (prop->inputMeta.dims[0][0] == prop->outputMeta.dims[0][0]);
+ assert (prop->inputMeta.dims[0][3] == prop->outputMeta.dims[0][3]);
+ assert (prop->inputMeta.types[0] == prop->outputMeta.types[0]);
- switch (prop->inputType[0]) {
+ switch (prop->inputMeta.types[0]) {
case _NNS_INT8:
do_avg (int8_t, int64_t);
break;
* @date 11 Jun 2018
* @brief Custom NNStreamer Filter Example 1. "Pass-Through"
* @author MyungJoo Ham <myungjoo.ham@samsung.com>
+ * @bug No known bugs except for NYI items
*
* this will supports "3x280x40" uint8 tensors (hardcoded dimensions)
*/
#define D2 (280)
#define D3 (40)
+/**
+ * @brief _pt_data
+ */
typedef struct _pt_data
{
uint32_t id; /***< Just for testing */
tensor_type type;
} pt_data;
+/**
+ * @brief _pt_data
+ */
static void *
pt_init (const GstTensor_Filter_Properties * prop)
{
return data;
}
+/**
+ * @brief _pt_data
+ */
static void
pt_exit (void *private_data, const GstTensor_Filter_Properties * prop)
{
free (data);
}
+/**
+ * @brief _pt_data
+ */
static int
get_inputDim (void *private_data, const GstTensor_Filter_Properties * prop,
- tensor_dim inputDimension, tensor_type * type)
+ GstTensor_TensorsMeta * meta)
{
pt_data *data = private_data;
int i;
g_assert (data);
g_assert (NNS_TENSOR_RANK_LIMIT >= 3);
- inputDimension[0] = D1;
- inputDimension[1] = D2;
- inputDimension[2] = D3;
+ meta->dims[0][0] = D1;
+ meta->dims[0][1] = D2;
+ meta->dims[0][2] = D3;
for (i = 3; i < NNS_TENSOR_RANK_LIMIT; i++)
- inputDimension[i] = 1;
- *type = _NNS_UINT8;
- return 0;
+ meta->dims[0][i] = 1;
+ meta->types[0] = _NNS_UINT8;
+ meta->num_tensors = 1;
return 0;
}
+/**
+ * @brief _pt_data
+ */
static int
get_outputDim (void *private_data, const GstTensor_Filter_Properties * prop,
- tensor_dim outputDimension, tensor_type * type)
+ GstTensor_TensorsMeta * meta)
{
pt_data *data = private_data;
int i;
g_assert (data);
g_assert (NNS_TENSOR_RANK_LIMIT >= 3);
- outputDimension[0] = D1;
- outputDimension[1] = D2;
- outputDimension[2] = D3;
+ meta->dims[0][0] = D1;
+ meta->dims[0][1] = D2;
+ meta->dims[0][2] = D3;
for (i = 3; i < NNS_TENSOR_RANK_LIMIT; i++)
- outputDimension[i] = 1;
- *type = _NNS_UINT8;
+ meta->dims[0][i] = 1;
+ meta->types[0] = _NNS_UINT8;
+ meta->num_tensors = 1;
return 0;
}
+/**
+ * @brief _pt_data
+ */
static int
pt_invoke (void *private_data, const GstTensor_Filter_Properties * prop,
const uint8_t * inptr, uint8_t * outptr)
g_assert (inptr);
g_assert (outptr);
- size = get_tensor_element_count (prop->outputDimension[0]) *
- tensor_element_size[prop->outputType[0]];
+ size = get_tensor_element_count (prop->outputMeta.dims[0]) *
+ tensor_element_size[prop->outputMeta.types[0]];
g_assert (inptr != outptr);
memcpy (outptr, inptr, size);
/* This assumes the limit is 4 */
assert (NNS_TENSOR_RANK_LIMIT == 4);
- assert (prop->inputDimension[0][0] == prop->outputDimension[0][0]);
- assert (prop->inputDimension[0][3] == prop->outputDimension[0][3]);
- assert (prop->inputType[0] == prop->outputType[0]);
+ assert (prop->inputMeta.dims[0][0] == prop->outputMeta.dims[0][0]);
+ assert (prop->inputMeta.dims[0][3] == prop->outputMeta.dims[0][3]);
+ assert (prop->inputMeta.types[0] == prop->outputMeta.types[0]);
- elementsize = tensor_element_size[prop->inputType[0]];
+ elementsize = tensor_element_size[prop->inputMeta.types[0]];
- ox = (data->new_x > 0) ? data->new_x : prop->outputDimension[0][1];
- oy = (data->new_y > 0) ? data->new_y : prop->outputDimension[0][2];
+ ox = (data->new_x > 0) ? data->new_x : prop->outputMeta.dims[0][1];
+ oy = (data->new_y > 0) ? data->new_y : prop->outputMeta.dims[0][2];
- oidx0 = prop->outputDimension[0][0];
- oidx1 = oidx0 * prop->outputDimension[0][1];
- oidx2 = oidx1 * prop->outputDimension[0][2];
+ oidx0 = prop->outputMeta.dims[0][0];
+ oidx1 = oidx0 * prop->outputMeta.dims[0][1];
+ oidx2 = oidx1 * prop->outputMeta.dims[0][2];
- iidx0 = prop->inputDimension[0][0];
- iidx1 = iidx0 * prop->inputDimension[0][1];
- iidx2 = iidx1 * prop->inputDimension[0][2];
+ iidx0 = prop->inputMeta.dims[0][0];
+ iidx1 = iidx0 * prop->inputMeta.dims[0][1];
+ iidx2 = iidx1 * prop->inputMeta.dims[0][2];
- for (z = 0; z < prop->inputDimension[0][3]; z++) {
+ for (z = 0; z < prop->inputMeta.dims[0][3]; z++) {
for (y = 0; y < oy; y++) {
for (x = 0; x < ox; x++) {
unsigned int c;
- for (c = 0; c < prop->inputDimension[0][0]; c++) {
+ for (c = 0; c < prop->inputMeta.dims[0][0]; c++) {
int sz;
/* Output[y'][x'] = Input[ y' * y / new-y ][ x' * x / new-x ]. Yeah This is Way too Simple. But this is just an example :D */
unsigned ix, iy;
- ix = x * prop->inputDimension[0][1] / ox;
- iy = y * prop->inputDimension[0][2] / oy;
+ ix = x * prop->inputMeta.dims[0][1] / ox;
+ iy = y * prop->inputMeta.dims[0][2] / oy;
- assert (ix >= 0 && iy >= 0 && ix < prop->inputDimension[0][1]
- && iy < prop->inputDimension[0][2]);
+ assert (ix >= 0 && iy >= 0 && ix < prop->inputMeta.dims[0][1]
+ && iy < prop->inputMeta.dims[0][2]);
/* outptr[z][y][x][c] = inptr[z][iy][ix][c]; */
for (sz = 0; sz < elementsize; sz++)
uint32_t iidx0, iidx1, iidx2;
*size =
- get_tensor_element_count (prop->outputDimension[0]) *
- tensor_element_size[prop->outputType[0]];
+ get_tensor_element_count (prop->outputMeta.dims[0]) *
+ tensor_element_size[prop->outputMeta.types[0]];
uint8_t *outptr = (uint8_t *) malloc (sizeof (uint8_t) * *size);
assert (data);
/* This assumes the limit is 4 */
assert (NNS_TENSOR_RANK_LIMIT == 4);
- assert (prop->inputDimension[0][0] == prop->outputDimension[0][0]);
- assert (prop->inputDimension[0][3] == prop->outputDimension[0][3]);
- assert (prop->inputType[0] == prop->outputType[0]);
+ assert (prop->inputMeta.dims[0][0] == prop->outputMeta.dims[0][0]);
+ assert (prop->inputMeta.dims[0][3] == prop->outputMeta.dims[0][3]);
+ assert (prop->inputMeta.types[0] == prop->outputMeta.types[0]);
- elementsize = tensor_element_size[prop->inputType[0]];
+ elementsize = tensor_element_size[prop->inputMeta.types[0]];
- ox = (data->new_x > 0) ? data->new_x : prop->outputDimension[0][1];
- oy = (data->new_y > 0) ? data->new_y : prop->outputDimension[0][2];
+ ox = (data->new_x > 0) ? data->new_x : prop->outputMeta.dims[0][1];
+ oy = (data->new_y > 0) ? data->new_y : prop->outputMeta.dims[0][2];
- oidx0 = prop->outputDimension[0][0];
- oidx1 = oidx0 * prop->outputDimension[0][1];
- oidx2 = oidx1 * prop->outputDimension[0][2];
+ oidx0 = prop->outputMeta.dims[0][0];
+ oidx1 = oidx0 * prop->outputMeta.dims[0][1];
+ oidx2 = oidx1 * prop->outputMeta.dims[0][2];
- iidx0 = prop->inputDimension[0][0];
- iidx1 = iidx0 * prop->inputDimension[0][1];
- iidx2 = iidx1 * prop->inputDimension[0][2];
+ iidx0 = prop->inputMeta.dims[0][0];
+ iidx1 = iidx0 * prop->inputMeta.dims[0][1];
+ iidx2 = iidx1 * prop->inputMeta.dims[0][2];
- for (z = 0; z < prop->inputDimension[0][3]; z++) {
+ for (z = 0; z < prop->inputMeta.dims[0][3]; z++) {
for (y = 0; y < oy; y++) {
for (x = 0; x < ox; x++) {
unsigned int c;
- for (c = 0; c < prop->inputDimension[0][0]; c++) {
+ for (c = 0; c < prop->inputMeta.dims[0][0]; c++) {
/* Output[y'][x'] = Input[ y' * y / new-y ][ x' * x / new-x ]. Yeah This is Way too Simple. But this is just an example :D */
unsigned ix, iy, sz;
- ix = x * prop->inputDimension[0][1] / ox;
- iy = y * prop->inputDimension[0][2] / oy;
+ ix = x * prop->inputMeta.dims[0][1] / ox;
+ iy = y * prop->inputMeta.dims[0][2] / oy;
- assert (ix >= 0 && iy >= 0 && ix < prop->inputDimension[0][1]
- && iy < prop->inputDimension[0][2]);
+ assert (ix >= 0 && iy >= 0 && ix < prop->inputMeta.dims[0][1]
+ && iy < prop->inputMeta.dims[0][2]);
/* outptr[z][y][x][c] = inptr[z][iy][ix][c]; */
for (sz = 0; sz < elementsize; sz++)