void fini_dv (void) __attribute__ ((destructor));
#define DECODER_DV_VIDEO_CAPS_STR \
- GST_VIDEO_CAPS_MAKE ("{ GRAY8, RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, ABGR }") \
+ GST_VIDEO_CAPS_MAKE ("{ GRAY8, RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, ABGR, GRAY16_BE, GRAY16_LE }") \
", views = (int) 1, interlace-mode = (string) progressive"
/**
DIRECT_VIDEO_FORMAT_BGRA = 9,
DIRECT_VIDEO_FORMAT_ARGB = 10,
DIRECT_VIDEO_FORMAT_ABGR = 11,
+ DIRECT_VIDEO_FORMAT_GRAY16_BE = 12,
+ DIRECT_VIDEO_FORMAT_GRAY16_LE = 13,
} direct_video_formats;
/**
[DIRECT_VIDEO_FORMAT_BGRA] = "BGRA",
[DIRECT_VIDEO_FORMAT_ARGB] = "ARGB",
[DIRECT_VIDEO_FORMAT_ABGR] = "ABGR",
+ [DIRECT_VIDEO_FORMAT_GRAY16_BE] = "GRAY16_BE",
+ [DIRECT_VIDEO_FORMAT_GRAY16_LE] = "GRAY16_LE",
NULL,
};
case DIRECT_VIDEO_FORMAT_GRAY8:
format = GST_VIDEO_FORMAT_GRAY8;
break;
+ case DIRECT_VIDEO_FORMAT_GRAY16_BE:
+ format = GST_VIDEO_FORMAT_GRAY16_BE;
+ break;
+ case DIRECT_VIDEO_FORMAT_GRAY16_LE:
+ format = GST_VIDEO_FORMAT_GRAY16_LE;
+ break;
case DIRECT_VIDEO_FORMAT_UNKNOWN:
GST_WARNING ("Default format has been applied: GRAY8");
format = GST_VIDEO_FORMAT_GRAY8;
/** @brief get video output buffer size */
static size_t
-_get_video_xraw_bufsize (const tensor_dim dim)
+_get_video_xraw_bufsize (const tensor_dim dim, gsize data_size)
{
/* dim[0] is bpp and there is zeropadding only when dim[0]%4 > 0 */
- return (size_t)((dim[0] * dim[1] - 1) / 4 + 1) * 4 * dim[2];
+ return (size_t)((dim[0] * dim[1] - 1) / 4 + 1) * 4 * dim[2] * data_size;
}
/** @brief tensordec-plugin's GstTensorDecoderDef callback */
{
/* Direct video uses the first tensor only even if it's multi-tensor */
const uint32_t *dim = &(config->info.info[0].dimension[0]);
+ gsize data_size = gst_tensor_get_element_size (config->info.info[0].type);
+ gsize transform_size = 0;
UNUSED (pdata);
UNUSED (caps);
UNUSED (size);
UNUSED (othercaps);
if (direction == GST_PAD_SINK)
- return _get_video_xraw_bufsize (dim);
- else
- return 0; /** @todo NYI */
+ transform_size = _get_video_xraw_bufsize (dim, data_size);
+
+ return transform_size;
}
/** @brief tensordec-plugin's GstTensorDecoderDef callback */
GstMemory *out_mem;
/* Direct video uses the first tensor only even if it's multi-tensor */
const uint32_t *dim = &(config->info.info[0].dimension[0]);
- size_t size = _get_video_xraw_bufsize (dim);
+ gsize data_size = gst_tensor_get_element_size (config->info.info[0].type);
+
+ size_t size = _get_video_xraw_bufsize (dim, data_size);
UNUSED (pdata);
g_assert (outbuf);
if (gst_buffer_get_size (outbuf) > 0 && gst_buffer_get_size (outbuf) != size) {
gst_buffer_set_size (outbuf, size);
}
- g_assert (config->info.info[0].type == _NNS_UINT8);
if (gst_buffer_get_size (outbuf) == 0) {
out_mem = gst_allocator_alloc (NULL, size, NULL);
config->info.info[0].type = _NNS_UINT8;
config->info.info[0].dimension[0] = 1;
break;
+ case GST_VIDEO_FORMAT_GRAY16_BE:
+ case GST_VIDEO_FORMAT_GRAY16_LE:
+ config->info.info[0].type = _NNS_UINT16;
+ config->info.info[0].dimension[0] = 1;
+ break;
case GST_VIDEO_FORMAT_RGB:
case GST_VIDEO_FORMAT_BGR:
config->info.info[0].type = _NNS_UINT8;
break;
default:
GST_WARNING_OBJECT (self,
- "The given video caps with format \"%s\" is not supported. Please use GRAY8, RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, or ABGR.\n",
+ "The given video caps with format \"%s\" is not supported. Please use GRAY8, GRAY16_LE, GRAY16_BE, RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, or ABGR.\n",
GST_STR_NULL (gst_video_format_to_string (format)));
break;
}
switch (type) {
case _NNS_VIDEO:
/* video caps from tensor info */
- if (is_video_supported (self)
- && config.info.info[0].type == _NNS_UINT8) {
+ if (is_video_supported (self)) {
GValue supported_formats = G_VALUE_INIT;
gint colorspace, width, height;
switch (colorspace) {
case 1:
gst_tensor_converter_get_format_list (&supported_formats,
- "GRAY8", NULL);
+ "GRAY8", "GRAY16_BE", "GRAY16_LE", NULL);
break;
case 3:
gst_tensor_converter_get_format_list (&supported_formats,
* @brief Caps string for supported video format
*/
#define VIDEO_CAPS_STR \
- GST_VIDEO_CAPS_MAKE ("{ RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, ABGR, GRAY8 }") \
+ GST_VIDEO_CAPS_MAKE ("{ RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, ABGR, GRAY8, GRAY16_BE, GRAY16_LE }") \
", interlace-mode = (string) progressive"
#define append_video_caps_template(caps) \
GST_VIDEO_FORMAT_BGRA,
GST_VIDEO_FORMAT_ARGB,
GST_VIDEO_FORMAT_ABGR,
- GST_VIDEO_FORMAT_I420
+ GST_VIDEO_FORMAT_I420,
+ GST_VIDEO_FORMAT_GRAY16_BE,
+ GST_VIDEO_FORMAT_GRAY16_LE
} GstVideoFormat;
#define gst_video_info_init(i) memset (i, 0, sizeof (GstVideoInfo))
callCompareTest test_06_raw_2.log test_06_decoded_2.log 6-3 "Compare for case 6-3" 1 0
fi
+
+# Test for GRAY16_BE
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} videotestsrc num-buffers=1 ! videoconvert ! videoscale ! video/x-raw, width=160, height=120, framerate=5/1,format=GRAY16_BE ! tee name =t t. ! queue ! tensor_converter ! tensor_decoder mode=direct_video option1=GRAY16_BE ! filesink location=\"testcase7_dv.raw\" sync=true t. ! queue ! filesink location=\"testcase7_origin.raw\" sync=true" 7 0 0 $PERFORMANCE
+callCompareTest testcase7_origin.raw testcase7_dv.raw 7 "Compare for case 7" 0 0
+
+# Test for GRAY16_LE
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} videotestsrc num-buffers=1 ! videoconvert ! videoscale ! video/x-raw, width=160, height=120, framerate=5/1,format=GRAY16_LE ! tee name =t t. ! queue ! tensor_converter ! tensor_decoder mode=direct_video option1=GRAY16_LE ! filesink location=\"testcase8_dv.raw\" sync=true t. ! queue ! filesink location=\"testcase8_origin.raw\" sync=true" 8 0 0 $PERFORMANCE
+callCompareTest testcase8_origin.raw testcase8_dv.raw 8 "Compare for case 8" 0 0
+
rm *.log *.bmp *.png *.golden *.raw
report
TEST_TYPE_VIDEO_GRAY8, /**< pipeline for video (GRAY8) */
TEST_TYPE_VIDEO_GRAY8_PADDING, /**< pipeline for video (GRAY8), remove padding */
TEST_TYPE_VIDEO_GRAY8_3F_PADDING, /**< pipeline for video (GRAY8) 3 frames, remove padding */
+ TEST_TYPE_VIDEO_GRAY16_BE, /**< pipeline for video (GRAY16_BE) */
+ TEST_TYPE_VIDEO_GRAY16_LE, /**< pipeline for video (GRAY16_LE) */
TEST_TYPE_AUDIO_S8, /**< pipeline for audio (S8) */
TEST_TYPE_AUDIO_U8_100F, /**< pipeline for audio (U8) 100 frames */
TEST_TYPE_AUDIO_S16, /**< pipeline for audio (S16) */
"tensor_converter frames-per-tensor=3 ! tensor_sink name=test_sink",
option.num_buffers, fps);
break;
+ case TEST_TYPE_VIDEO_GRAY16_BE:
+ /** video 160x120 GRAY16_BE */
+ str_pipeline = g_strdup_printf ("videotestsrc num-buffers=%d ! videoconvert ! video/x-raw,width=160,height=120,format=GRAY16_BE,framerate=(fraction)%lu/1 ! "
+ "tensor_converter ! tensor_sink name=test_sink",
+ option.num_buffers, fps);
+ break;
+ case TEST_TYPE_VIDEO_GRAY16_LE:
+ /** video 160x120 GRAY16_LE */
+ str_pipeline = g_strdup_printf ("videotestsrc num-buffers=%d ! videoconvert ! video/x-raw,width=160,height=120,format=GRAY16_LE,framerate=(fraction)%lu/1 ! "
+ "tensor_converter ! tensor_sink name=test_sink",
+ option.num_buffers, fps);
+ break;
case TEST_TYPE_AUDIO_S8:
/** audio sample rate 16000 (8 bits, signed, little endian) */
str_pipeline = g_strdup_printf (
_free_test_data (option);
}
+
+/**
+ * @brief Test for video format GRAY16_BE.
+ */
+TEST (tensorStreamTest, videoGray16BE)
+{
+ const guint num_buffers = 5;
+ TestOption option = { num_buffers, TEST_TYPE_VIDEO_GRAY16_BE };
+
+ ASSERT_TRUE (_setup_pipeline (option));
+
+ gst_element_set_state (g_test_data.pipeline, GST_STATE_PLAYING);
+ g_main_loop_run (g_test_data.loop);
+
+ EXPECT_TRUE (_wait_pipeline_process_buffers (num_buffers));
+ gst_element_set_state (g_test_data.pipeline, GST_STATE_NULL);
+
+ /** check eos message */
+ EXPECT_EQ (g_test_data.status, TEST_EOS);
+
+ /** check received buffers and signals */
+ EXPECT_EQ (g_test_data.received, num_buffers);
+ EXPECT_EQ (g_test_data.mem_blocks, 1U);
+ EXPECT_EQ (g_test_data.received_size, 160U * 120 * 2);
+
+ /** check timestamp */
+ EXPECT_FALSE (g_test_data.invalid_timestamp);
+
+ /** check tensor config for video */
+ EXPECT_TRUE (gst_tensors_config_validate (&g_test_data.tensors_config));
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].type, _NNS_UINT16);
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[0], 1U);
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[1], 160U);
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[2], 120U);
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[3], 1U);
+ EXPECT_EQ (g_test_data.tensors_config.rate_n, (int) fps);
+ EXPECT_EQ (g_test_data.tensors_config.rate_d, 1);
+
+ EXPECT_FALSE (g_test_data.test_failed);
+ _free_test_data (option);
+}
+
+/**
+ * @brief Test for video format GRAY16_LE.
+ */
+TEST (tensorStreamTest, videoGray16LE)
+{
+ const guint num_buffers = 5;
+ TestOption option = { num_buffers, TEST_TYPE_VIDEO_GRAY16_LE };
+
+ ASSERT_TRUE (_setup_pipeline (option));
+
+ gst_element_set_state (g_test_data.pipeline, GST_STATE_PLAYING);
+ g_main_loop_run (g_test_data.loop);
+
+ EXPECT_TRUE (_wait_pipeline_process_buffers (num_buffers));
+ gst_element_set_state (g_test_data.pipeline, GST_STATE_NULL);
+
+ /** check eos message */
+ EXPECT_EQ (g_test_data.status, TEST_EOS);
+
+ /** check received buffers and signals */
+ EXPECT_EQ (g_test_data.received, num_buffers);
+ EXPECT_EQ (g_test_data.mem_blocks, 1U);
+ EXPECT_EQ (g_test_data.received_size, 160U * 120 * 2);
+
+ /** check timestamp */
+ EXPECT_FALSE (g_test_data.invalid_timestamp);
+
+ /** check tensor config for video */
+ EXPECT_TRUE (gst_tensors_config_validate (&g_test_data.tensors_config));
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].type, _NNS_UINT16);
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[0], 1U);
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[1], 160U);
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[2], 120U);
+ EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[3], 1U);
+ EXPECT_EQ (g_test_data.tensors_config.rate_n, (int) fps);
+ EXPECT_EQ (g_test_data.tensors_config.rate_d, 1);
+
+ EXPECT_FALSE (g_test_data.test_failed);
+ _free_test_data (option);
+}
+
/**
* @brief Test for audio format S8.
*/