--- /dev/null
+/**
+ * GStreamer / NNStreamer tensor_decoder subplugin, "direct video"
+ * Copyright (C) 2018 Jijoong Moon <jijoong.moon@samsung.com>
+ * Copyright (C) 2018 MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ */
+/**
+ * @file tensordec-directvideo.c
+ * @date 04 Nov 2018
+ * @brief NNStreamer tensor-decoder subplugin, "direct video",
+ * which converts tensors to video directly.
+ *
+ * @see https://github.com/nnsuite/nnstreamer
+ * @author Jijoong Moon <jijoong.moon@samsung.com>
+ * @bug No known bugs except for NYI items
+ *
+ */
+
+#include <string.h>
+#include <glib.h>
+#include "tensordec.h"
+#include <tensor_common.h>
+
+/** @brief tensordec-plugin's TensorDecDef callback */
+static gboolean
+dv_init (GstTensorDec * self)
+{
+ self->plugin_data = NULL; /* We have no internal data */
+ return TRUE;
+}
+
+/** @brief tensordec-plugin's TensorDecDef callback */
+static void
+dv_exit (GstTensorDec * self)
+{
+ /* Nothing to do */
+ return;
+}
+
+/** @brief tensordec-plugin's TensorDecDef callback */
+static gboolean
+dv_setOption (GstTensorDec * self, int opNum, const gchar * param)
+{
+ /* We do not accept anything. */
+ return TRUE;
+}
+
+/** @brief tensordec-plugin's TensorDecDef callback */
+static GstCaps *
+dv_getOutputDim (GstTensorDec * self, const GstTensorConfig * config)
+{
+ /* Old gst_tensordec_video_caps_from_config () had this */
+ GstVideoFormat format;
+ gint width, height, fn, fd;
+ GstCaps *caps;
+
+ g_return_val_if_fail (config != NULL, NULL);
+
+ caps = gst_caps_from_string (GST_TENSOR_VIDEO_CAPS_STR);
+
+ switch (config->info.dimension[0]) {
+ case 1:
+ format = GST_VIDEO_FORMAT_GRAY8;
+ break;
+ case 3:
+ format = GST_VIDEO_FORMAT_RGB;
+ break;
+ case 4:
+ format = GST_VIDEO_FORMAT_BGRx;
+ break;
+ default:
+ format = GST_VIDEO_FORMAT_UNKNOWN;
+ break;
+ }
+
+ width = config->info.dimension[1];
+ height = config->info.dimension[2];
+ fn = config->rate_n;
+ fd = config->rate_d;
+
+ if (format != GST_VIDEO_FORMAT_UNKNOWN) {
+ const gchar *format_string = gst_video_format_to_string (format);
+ gst_caps_set_simple (caps, "format", G_TYPE_STRING, format_string, NULL);
+ }
+
+ if (width > 0) {
+ gst_caps_set_simple (caps, "width", G_TYPE_INT, width, NULL);
+ }
+
+ if (height > 0) {
+ gst_caps_set_simple (caps, "height", G_TYPE_INT, height, NULL);
+ }
+
+ if (fn > 0 && fd > 0) {
+ gst_caps_set_simple (caps, "framerate", GST_TYPE_FRACTION, fn, fd, NULL);
+ }
+
+ return gst_caps_simplify (caps);
+}
+
+/** @brief tensordec-plugin's TensorDecDef callback */
+static gsize
+dv_getTransformSize (GstTensorDec * self, GstCaps * caps,
+ gsize size, GstCaps * othercaps, GstPadDirection direction)
+{
+ return 0; /* I'll allocate. Do not allocate for me */
+}
+
+/** @brief tensordec-plugin's TensorDecDef callback */
+static GstFlowReturn
+dv_decode (GstTensorDec * self, const GstTensorMemory * input,
+ GstBuffer * outbuf)
+{
+ GstMapInfo out_info;
+ GstMemory *out_mem;
+ GstTensorConfig *config = &self->tensor_config;
+ uint32_t *dim = &(config->info.dimension[0]);
+
+ g_assert (outbuf);
+ g_assert (gst_buffer_get_size (outbuf) == 0);
+ g_assert (config->info.type == _NNS_UINT8);
+
+ if (0 == ((dim[0] * dim[1]) % 4)) {
+ /* No Padding Required */
+ out_mem = gst_allocator_alloc (NULL, input->size, NULL);
+ g_assert (gst_memory_map (out_mem, &out_info, GST_MAP_WRITE));
+ memcpy (out_info.data, input->data, input->size);
+ } else {
+ /* Do Padding */
+ size_t size;
+ int h;
+ uint8_t *ptr, *inp;
+
+ size = ((dim[0] * dim[1] - 1) / 4 + 1) * 4 * dim[2];
+ out_mem = gst_allocator_alloc (NULL, size, NULL);
+ g_assert (gst_memory_map (out_mem, &out_info, GST_MAP_WRITE));
+
+ ptr = (uint8_t *) out_info.data;
+ inp = (uint8_t *) input->data;
+ for (h = 0; h < dim[2]; h++) {
+ memcpy (ptr, inp, dim[0] * dim[1]);
+ inp += (dim[0] * dim[1]);
+ ptr += ((dim[0] * dim[1] - 1) / 4 + 1) * 4;
+ }
+ }
+ gst_memory_unmap (out_mem, &out_info);
+ gst_buffer_append_memory (outbuf, out_mem);
+
+ /** @todo Caller of dv_decode in tensordec.c should call gst_memory_unmap to inbuf */
+
+ return GST_FLOW_OK;
+}
+
+/** @brief Direct-Video tensordec-plugin TensorDecDef instance */
+static TensorDecDef directVideo = {
+ .modename = "direct_video",
+ .type = OUTPUT_VIDEO,
+ .init = dv_init,
+ .exit = dv_exit,
+ .setOption = dv_setOption,
+ .getOutputDim = dv_getOutputDim,
+ .getTransformSize = dv_getTransformSize,
+ .decode = dv_decode,
+};
+
+/** @brief Initialize this object for tensordec-plugin */
+__attribute__ ((constructor))
+ void init (void)
+{
+ tensordec_probe (&directVideo);
+}
+
+/** @brief Destruct this object for tensordec-plugin */
+__attribute__ ((destructor))
+ void fini (void)
+{
+ tensordec_exit (directVideo.modename);
+}
* @brief Decoder Mode string.
*/
static const gchar *mode_names[] = {
- [DIRECT_VIDEO] = "direct_video",
[IMAGE_LABELING] = "image_labeling",
[BOUNDING_BOXES] = "bounding_boxes",
NULL
/** GstBaseTransform vmethod implementations */
static GstFlowReturn gst_tensordec_transform (GstBaseTransform * trans,
GstBuffer * inbuf, GstBuffer * outbuf);
-static GstFlowReturn gst_tensordec_transform_ip (GstBaseTransform * trans,
- GstBuffer * buf);
static GstCaps *gst_tensordec_transform_caps (GstBaseTransform * trans,
GstPadDirection direction, GstCaps * caps, GstCaps * filter);
static GstCaps *gst_tensordec_fixate_caps (GstBaseTransform * trans,
/** Processing units */
trans_class->transform = GST_DEBUG_FUNCPTR (gst_tensordec_transform);
- trans_class->transform_ip = GST_DEBUG_FUNCPTR (gst_tensordec_transform_ip);
+
+ /**
+ * @todo We don't have inplace ops anymore.
+ * Need a mechanism to enable it for subplugins later
+ * for direct_* subplugins)
+ *
+ * trans_class->transform_ip =
+ * GST_DEBUG_FUNCPTR (gst_tensordec_transform_ip);
+ */
/** Negotiation units */
trans_class->transform_caps =
self->negotiated = FALSE;
self->add_padding = FALSE;
self->output_type = OUTPUT_UNKNOWN;
- self->mode = DIRECT_VIDEO;
+ self->mode = DECODE_MODE_UNKNOWN;
self->plugin_data = NULL;
self->option[0] = NULL;
self->option[1] = NULL;
}
/**
- * @brief copies sink pad buffer to src pad buffer (internal static function)
- * @param self "this" pointer
- * @param inbuf sink pad buffer
- * @param outbuf src pad buffer
- * @return GST_FLOW_OK if ok. other values represents error
- * @todo Not required with full pluginization.
- * OR Move to plugin after full pluginization.
- */
-static GstFlowReturn
-gst_tensordec_copy_buffer (GstTensorDec * self,
- GstBuffer * inbuf, GstBuffer * outbuf)
-{
- GstMapInfo inInfo, outInfo;
- uint8_t *inptr, *outptr;
- GstTensorConfig *config;
- unsigned int row, d0;
- unsigned int dest_idx = 0, src_idx = 0;
- size_t size, offset, size_out;
-
- g_assert (self->configured);
-
- config = &self->tensor_config;
-
- /** flag add_padding only for video */
- g_assert (self->add_padding);
- g_assert (self->output_type == OUTPUT_VIDEO);
-
- size = offset = config->info.dimension[0] * config->info.dimension[1];
-
- if (offset % 4)
- offset += 4 - (offset % 4);
-
- size_out = offset * config->info.dimension[2] * config->info.dimension[3];
-
- if (gst_buffer_get_size (outbuf) < size_out) {
- gst_buffer_set_size (outbuf, size_out);
- }
-
- gst_buffer_map (inbuf, &inInfo, GST_MAP_READ);
- gst_buffer_map (outbuf, &outInfo, GST_MAP_WRITE);
-
- inptr = inInfo.data;
- outptr = outInfo.data;
-
- for (d0 = 0; d0 < config->info.dimension[3]; d0++) {
- g_assert (d0 == 0);
- for (row = 0; row < config->info.dimension[2]; row++) {
- memcpy (outptr + dest_idx, inptr + src_idx, size);
- dest_idx += offset;
- src_idx += size;
- }
- }
-
- gst_buffer_unmap (inbuf, &inInfo);
- gst_buffer_unmap (outbuf, &outInfo);
-
- return GST_FLOW_OK;
-}
-
-/**
* @brief update top label index by given tensor data
* @param self "this" pointer
* @param scores given tensor data
gst_memory_unmap (in_mem, &in_info);
}
break;
- case DIRECT_VIDEO:
- res = gst_tensordec_copy_buffer (self, inbuf, outbuf);
- break;
case IMAGE_LABELING:
res = gst_tensordec_get_label (self, inbuf, outbuf);
break;
}
/**
- * @brief in-place transform. required vmethod for BaseTransform class.
- * This is allowed in direct-conversions only!
- */
-static GstFlowReturn
-gst_tensordec_transform_ip (GstBaseTransform * trans, GstBuffer * buf)
-{
- GstTensorDec *self;
-
- self = GST_TENSORDEC_CAST (trans);
-
- if (G_UNLIKELY (!self->negotiated))
- goto unknown_format;
- if (G_UNLIKELY (!self->configured))
- goto unknown_tensor;
-
- /* The only available direct conversion is direct-video w/o padding */
- g_assert (self->mode == DIRECT_VIDEO && !self->add_padding);
-
- /** DO NOTHING. THIS WORKS AS A PASSTHROUGH. We just remove metadata from video */
- return GST_FLOW_OK;
-
-unknown_format:
- GST_ELEMENT_ERROR (self, CORE, NOT_IMPLEMENTED, (NULL), ("unknown format"));
- return GST_FLOW_NOT_NEGOTIATED;
-unknown_tensor:
- GST_ELEMENT_ERROR (self, CORE, NOT_IMPLEMENTED, (NULL),
- ("unknown format for tensor"));
- return GST_FLOW_NOT_NEGOTIATED;
-}
-
-/**
* @brief configure tensor-srcpad cap from "proposed" cap.
*
* @trans ("this" pointer)
}
}
- if (self->mode == DECODE_MODE_PLUGIN) {
- gst_base_transform_set_in_place (trans, FALSE);
- } else if (self->mode == IMAGE_LABELING) {
- gst_base_transform_set_in_place (trans, FALSE);
- } else if (self->mode == DIRECT_VIDEO) {
- gst_base_transform_set_in_place (trans, !self->add_padding);
- } else {
- gst_base_transform_set_in_place (trans, TRUE);
- }
return self->negotiated;
}