g_assert ((filter->prop.inputConfigured & _TFC_ALL) == _TFC_ALL &&
(filter->prop.outputConfigured & _TFC_ALL) == _TFC_ALL);
- /* allocate_in_invoke is NYI */
- g_assert (filter->prop.fw->allocate_in_invoke == FALSE);
-
- /* 1. Allocate outbuf */
+ /* 1. Allocate outbuf if allocate_in_invoke is FALSE */
g_assert (outbuf);
- outBufSize = tensor_element_size[filter->prop.outputType] *
- get_tensor_element_count (filter->prop.outputDimension);
- if (gst_buffer_get_size (outbuf) < outBufSize) {
- /** @todo: write a routine to say aloud when this happens */
- gst_buffer_set_size (outbuf, outBufSize);
- }
- debug_print (!filter->prop.silent, "outbuf = %lu / expected = %lu\n",
- gst_buffer_get_size (outbuf), outBufSize);
- g_assert (gst_buffer_get_size (outbuf) >= outBufSize);
- /* 2. Call the filter-subplugin callback, "invoke" */
- gst_buffer_map (inbuf, &inInfo, GST_MAP_READ);
- gst_buffer_map (outbuf, &outInfo, GST_MAP_WRITE);
- inptr = inInfo.data;
- outptr = outInfo.data;
+ if (filter->prop.fw->allocate_in_invoke == FALSE) {
+ outBufSize = tensor_element_size[filter->prop.outputType] *
+ get_tensor_element_count (filter->prop.outputDimension);
+ if (gst_buffer_get_size (outbuf) < outBufSize) {
+ /* @TODO: write a routine to say aloud when this happens */
+ gst_buffer_set_size (outbuf, outBufSize);
+ }
+ debug_print (!filter->prop.silent, "outbuf = %lu / expected = %lu\n",
+ gst_buffer_get_size (outbuf), outBufSize);
+ g_assert (gst_buffer_get_size (outbuf) >= outBufSize);
+
+ /* 2. Call the filter-subplugin callback, "invoke" */
+ gst_buffer_map (inbuf, &inInfo, GST_MAP_READ);
+ gst_buffer_map (outbuf, &outInfo, GST_MAP_WRITE);
+ inptr = inInfo.data;
+ outptr = outInfo.data;
- gst_tensor_filter_call (filter, retoutptr, invoke_NN, inptr, outptr);
- g_assert (outptr == retoutptr);
+ gst_tensor_filter_call (filter, retoutptr, invoke_NN, inptr, outptr);
+ g_assert (outptr == retoutptr);
- gst_buffer_unmap (inbuf, &inInfo);
- gst_buffer_unmap (outbuf, &outInfo);
+ gst_buffer_unmap (inbuf, &inInfo);
+ gst_buffer_unmap (outbuf, &outInfo);
+ } else {
+ GstMemory *mem;
+ gst_buffer_map (inbuf, &inInfo, GST_MAP_READ);
+ g_assert (gst_buffer_get_size (outbuf) == 0);
+
+ inptr = inInfo.data;
+ gst_tensor_filter_call (filter, retoutptr, invoke_NN, inptr, NULL);
+ gst_buffer_unmap (inbuf, &inInfo);
+
+ /* @TODO Performance: cache get_tensor_element_count * tensor_element_size */
+ mem = gst_memory_new_wrapped (0, retoutptr,
+ get_tensor_element_count (filter->prop.outputDimension) *
+ tensor_element_size[filter->prop.outputType],
+ 0,
+ get_tensor_element_count (filter->prop.outputDimension) *
+ tensor_element_size[filter->prop.outputType], NULL, NULL);
+ gst_buffer_insert_memory (outbuf, -1, mem);
+ }
/* 3. Return result! */
return GST_FLOW_OK;
GstTensor_Filter_CheckStatus ret =
get_tensor_from_padcap (srccap, dim, &type);
+ if (filter->prop.fw->allocate_in_invoke == TRUE) {
+ *othersize = 0; /* Do not allocate outbuf. invoke_NN will allocate! */
+ return TRUE;
+ }
+
g_assert ((ret & _TFC_ALL) == _TFC_ALL);
if (!filter->prop.silent) {
{
gchar *name; /**< Name of the neural network framework, searchable by FRAMEWORK property */
gboolean allow_in_place; /**< TRUE if InPlace transfer of input-to-output is allowed. Not supported in main, yet */
- gboolean allocate_in_invoke; /**< TRUE if invoke_NN is going to allocate outputptr by itself and return the address via outputptr. NYI. */
+ gboolean allocate_in_invoke; /**< TRUE if invoke_NN is going to allocate outputptr by itself and return the address via outputptr. Do not change this value after cap negotiation is complete (or the stream has been started). */
uint8_t *(*invoke_NN)(const GstTensor_Filter *filter, void **private_data, const uint8_t *inputptr, uint8_t *outputptr);
/**< Mandatory callback. Invoke the given network model.
custom_open (const GstTensor_Filter * filter, void **private_data)
{
int retval = custom_loadlib (filter, private_data);
+ internal_data *ptr;
g_assert (retval == 0); /* This must be called only once */
+
+ ptr = *private_data;
+ g_assert (!ptr->methods->invoke != !ptr->methods->allocate_invoke); /* XOR! */
+
+ if (ptr->methods->allocate_invoke)
+ NNS_support_custom.allocate_in_invoke = TRUE;
}
/**
g_assert (filter->privateData && *private_data == filter->privateData);
ptr = *private_data;
- retval = ptr->methods->invoke (ptr->customFW_private_data, &(filter->prop),
- inptr, outptr);
- if (retval == 0)
- return outptr;
- else
+ if (ptr->methods->invoke) {
+ retval = ptr->methods->invoke (ptr->customFW_private_data, &(filter->prop),
+ inptr, outptr);
+ if (retval == 0)
+ return outptr;
+ else
+ return NULL;
+ } else if (ptr->methods->allocate_invoke) {
+ size_t size;
+ uint8_t *retptr = ptr->methods->allocate_invoke (ptr->customFW_private_data,
+ &(filter->prop), inptr, &size);
+ g_assert (size == (get_tensor_element_count (filter->prop.outputDimension) *
+ tensor_element_size[filter->prop.outputType]));
+ return retptr;
+ } else {
return NULL;
+ }
}
/**
tensor_dim outputDimension, tensor_type *outputType);
/**
- * @brief Invoke the "main function".
+ * @brief Invoke the "main function". Without allocating output buffer. (fill in the given output buffer)
* @param[in] private_data The pointer returned by NNStreamer_custom_init.
* @param[in] prop Tensor_Filter's property values. Do not change its values.
* @param[in] inputPtr pointer to input tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller
- * @param[in] inputPtr pointer to output tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller
+ * @param[out] outputPtr pointer to output tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller
+ * @return 0 if success
*/
typedef int (*NNS_custom_invoke)(void *private_data, const GstTensor_Filter_Properties *prop,
const uint8_t *inputPtr, uint8_t *outputPtr);
/**
+ * @brief Invoke the "main function". Without allocating output buffer. (fill in the given output buffer)
+ * @param[in] private_data The pointer returned by NNStreamer_custom_init.
+ * @param[in] prop Tensor_Filter's property values. Do not change its values.
+ * @param[in] inputPtr pointer to input tensor, size = dim1 x dim2 x dim3 x dim4 x typesize, allocated by caller
+ * @param[out] size The allocated size.
+ * @return The output buffer allocated in the invoke function
+ */
+typedef uint8_t * (*NNS_custom_allocate_invoke)(void *private_data, const GstTensor_Filter_Properties *prop,
+ const uint8_t *inputPtr, size_t *size);
+
+/**
* @brief Custom Filter Class
*
* Note that exery function pointer is MANDATORY!
*/
struct _NNStreamer_custom_class {
+ int allocate_outbuf_in_invoke; /**< Set non-zero if invoke function is to allocate output buffer. Note that the allocated outbuf size MUST be consistent with output tensor dimension & type */
NNS_custom_init_func initfunc; /**< called before any other callbacks from tensor_filter_custom.c */
NNS_custom_exit_func exitfunc; /**< will not call other callbacks after this call */
NNS_custom_get_input_dimension getInputDim; /**< a custom filter is required to provide input tensor dimension unless setInputdim is defined. */
NNS_custom_get_output_dimension getOutputDim; /**< a custom filter is require dto provide output tensor dimension unless setInputDim is defined. */
NNS_custom_set_input_dimension setInputDim; /**< without getI/O-Dim, this allows framework to set input dimension and get output dimension from the custom filter according to the input dimension */
- NNS_custom_invoke invoke; /**< the main function, "invoke", that transforms input to output */
+ NNS_custom_invoke invoke; /**< the main function, "invoke", that transforms input to output. invoke is supposed to fill in the given output buffer. (invoke) XOR (allocate_invoke) MUST hold. */
+ NNS_custom_allocate_invoke allocate_invoke; /**< the main function, "allocate & invoke", that transforms input to output. allocate_invoke is supposed to allocate output buffer by itself. (invoke) XOR (allocate_invoke) MUST hold. */
};
typedef struct _NNStreamer_custom_class NNStreamer_custom_class;
ADD_LIBRARY(nnstreamer_customfilter_scaler SHARED nnstreamer_customfilter_example_scaler.c)
TARGET_LINK_LIBRARIES(nnstreamer_customfilter_scaler ${pkgs_LIBRARIES})
-TARGET_INCLUDE_DIRECTORIES(nnstreamer_customfilter_scaler PUBLIC ${pkgs_INCLUDE_DIRS})
TARGET_COMPILE_OPTIONS(nnstreamer_customfilter_scaler PUBLIC ${pkgs_CFLAGS_OTHER})
-INSTALL(TARGETS nnstreamer_customfilter_scaler
+ADD_LIBRARY(nnstreamer_customfilter_scaler_allocator SHARED nnstreamer_customfilter_example_scaler_allocator.c)
+
+TARGET_LINK_LIBRARIES(nnstreamer_customfilter_scaler_allocator ${pkgs_LIBRARIES})
+TARGET_COMPILE_OPTIONS(nnstreamer_customfilter_scaler_allocator PUBLIC ${pkgs_CFLAGS_OTHER})
+
+INSTALL(TARGETS nnstreamer_customfilter_scaler nnstreamer_customfilter_scaler_allocator
RUNTIME DESTINATION ${EXEC_PREFIX}
LIBRARY DESTINATION ${LIB_INSTALL_DIR}
ARCHIVE DESTINATION ${LIB_INSTALL_DIR}
--- /dev/null
+/**
+ * NNStreamer Custom Filter Example 3. Scaler - Allocator, to test "allocate_in_invoke" option
+ * Copyright (C) 2018 MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * LICENSE: LGPL-2.1
+ *
+ * @file nnstreamer_customfilter_example_scaler.c
+ * @date 22 Jun 2018
+ * @brief Custom NNStreamer Filter Example 3. "Scaler"
+ * @author MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This scales a tensor of [N][y][x][M] to [N][new-y][new-x][M]
+ *
+ * The custom property is to be given as, "custom=[new-x]x[new-y]", where new-x and new-y are unsigned integers.
+ * E.g., custom=640x480
+ *
+ * Output[y'][x'] = Input[ y' * y / new-y ][ x' * x / new-x ]. Yeah This is Way too Simple. But this is just an example :D
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <glib.h>
+#include <tensor_filter_custom.h>
+#include <tensor_common.h>
+
+typedef struct _pt_data
+{
+ uint32_t id; /***< Just for testing */
+ char *property; /***< The string given as "custom" property of tensor_filter element */
+ uint32_t new_y;
+ uint32_t new_x;
+} pt_data;
+
+static char *
+_strdup (const char *src)
+{
+ size_t len = strlen (src) + 1;
+ char *dest = (char *) malloc (sizeof (char) * len);
+ strncpy (dest, src, len - 1);
+ dest[len - 1] = '\0';
+ return dest;
+}
+
+static void *
+pt_init (const GstTensor_Filter_Properties * prop)
+{
+ pt_data *data = (pt_data *) malloc (sizeof (pt_data));
+
+ if (prop->customProperties && strlen (prop->customProperties) > 0)
+ data->property = _strdup (prop->customProperties);
+ else
+ data->property = NULL;
+ data->new_x = 0;
+ data->new_y = 0;
+
+ /* Parse property and set new_x, new_y */
+ if (data->property) {
+ const char s[7] = "xX:_/ ";
+ gchar **strv = g_strsplit_set (data->property, s, 3);
+ if (strv[0] != NULL) {
+ data->new_x = (uint32_t) g_ascii_strtoll (strv[0], NULL, 10);
+ if (data->new_x < 0)
+ data->new_x = 0;
+ } else {
+ data->new_x = 0;
+ }
+ if (strv[1] != NULL) {
+ data->new_y = (uint32_t) g_ascii_strtoll (strv[1], NULL, 10);
+ if (data->new_y < 0)
+ data->new_y = 0;
+ } else {
+ data->new_y = 0;
+ }
+ g_strfreev (strv);
+ }
+
+ data->id = 0;
+ return data;
+}
+
+static void
+pt_exit (void *private_data, const GstTensor_Filter_Properties * prop)
+{
+ pt_data *data = private_data;
+ assert (data);
+ if (data->property)
+ free (data->property);
+ free (data);
+}
+
+static int
+set_inputDim (void *private_data, const GstTensor_Filter_Properties * prop,
+ const tensor_dim iDim, const tensor_type iType,
+ tensor_dim oDim, tensor_type * oType)
+{
+ int i;
+ pt_data *data = private_data;
+ assert (data);
+
+ for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
+ oDim[i] = iDim[i];
+
+ /* Update [1] and [2] oDim with new-x, new-y */
+ if (data->new_x > 0)
+ oDim[1] = data->new_x;
+ if (data->new_y > 0)
+ oDim[2] = data->new_y;
+
+ *oType = iType;
+ return 0;
+}
+
+static uint8_t *
+pt_allocate_invoke (void *private_data,
+ const GstTensor_Filter_Properties * prop, const uint8_t * inptr,
+ size_t * size)
+{
+ pt_data *data = private_data;
+ uint32_t ox, oy, x, y, z;
+ uint32_t oidx0, oidx1, oidx2;
+ uint32_t iidx0, iidx1, iidx2;
+
+ *size =
+ get_tensor_element_count (prop->outputDimension) *
+ tensor_element_size[prop->outputType];
+ uint8_t *outptr = (uint8_t *) malloc (sizeof (uint8_t) * *size);
+
+ assert (data);
+ assert (inptr);
+ assert (outptr);
+
+ /* This assumes the limit is 4 */
+ assert (NNS_TENSOR_RANK_LIMIT == 4);
+
+ assert (prop->inputDimension[0] == prop->outputDimension[0]);
+ assert (prop->inputDimension[3] == prop->outputDimension[3]);
+ assert (prop->inputType == prop->outputType);
+
+ ox = (data->new_x > 0) ? data->new_x : prop->outputDimension[1];
+ oy = (data->new_y > 0) ? data->new_y : prop->outputDimension[2];
+
+ oidx0 = prop->outputDimension[0];
+ oidx1 = oidx0 * prop->outputDimension[1];
+ oidx2 = oidx1 * prop->outputDimension[2];
+
+ iidx0 = prop->inputDimension[0];
+ iidx1 = iidx0 * prop->inputDimension[1];
+ iidx2 = iidx1 * prop->inputDimension[2];
+
+ for (z = 0; z < prop->inputDimension[3]; z++) {
+ for (y = 0; y < oy; y++) {
+ for (x = 0; x < ox; x++) {
+ unsigned int c;
+ for (c = 0; c < prop->inputDimension[0]; c++) {
+ /* Output[y'][x'] = Input[ y' * y / new-y ][ x' * x / new-x ]. Yeah This is Way too Simple. But this is just an example :D */
+ unsigned ix, iy;
+
+ ix = x * prop->inputDimension[1] / ox;
+ iy = y * prop->inputDimension[2] / oy;
+
+ assert (ix >= 0 && iy >= 0 && ix < prop->inputDimension[1]
+ && iy < prop->inputDimension[2]);
+
+ /* outptr[z][y][x][c] = inptr[z][iy][ix][c]; */
+ *(outptr + c + x * oidx0 + y * oidx1 + z * oidx2) =
+ *(inptr + c + ix * iidx0 + iy * iidx1 + z * iidx2);
+ }
+ }
+ }
+ }
+
+ assert (inptr != outptr);
+
+ return outptr;
+}
+
+static NNStreamer_custom_class NNStreamer_custom_body = {
+ .initfunc = pt_init,
+ .exitfunc = pt_exit,
+ .setInputDim = set_inputDim,
+ .allocate_invoke = pt_allocate_invoke,
+};
+
+/* The dyn-loaded object */
+NNStreamer_custom_class *NNStreamer_custom = &NNStreamer_custom_body;
PATH_TO_MODEL_A="../../build/nnstreamer_example/custom_example_average/libnnstreamer_customfilter_average.so"
gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} videotestsrc num-buffers=1 ! video/x-raw,format=RGB,width=640,height=480,framerate=0/1 ! videoconvert ! video/x-raw, format=RGB ! tensor_converter ! tee name=t ! queue ! tensor_filter framework=\"custom\" model=\"${PATH_TO_MODEL_A}\" ! filesink location=\"testcase08.average.log\" sync=true t. ! queue ! filesink location=\"testcase08.direct.log\" sync=true" 8
+
+# Test scaler + in-invoke allocator (9)
+PATH_TO_MODEL_SI="../../build/nnstreamer_example/custom_example_scaler/libnnstreamer_customfilter_scaler_allocator.so"
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} videotestsrc num-buffers=1 ! video/x-raw,format=RGB,width=640,height=480,framerate=0/1 ! videoconvert ! video/x-raw, format=RGB ! tensor_converter ! tee name=t ! queue ! tensor_filter framework=\"custom\" model=\"${PATH_TO_MODEL_SI}\" custom=\"320x240\" ! filesink location=\"testcase09.scaled.log\" sync=true t. ! queue ! filesink location=\"testcase09.direct.log\" sync=true" 9
+python checkScaledTensor.py testcase09.direct.log 640 480 testcase09.scaled.log 320 240 3
+casereport 9 $? "Golden test comparison"
+
report