From ac6c52b3adcf755a46c6dac5fdd197bb77cf9125 Mon Sep 17 00:00:00 2001 From: gichan2-jang Date: Tue, 22 Aug 2023 12:24:37 +0900 Subject: [PATCH] [Converter/decoder] Support GRAY16 format Support GRAY16 format for tnesor_converter and tensor_decoder::direct_video. Signed-off-by: gichan2-jang --- .../tensor_decoder/tensordec-directvideo.c | 29 +++++-- gst/nnstreamer/elements/gsttensor_converter.c | 12 ++- .../gsttensor_converter_media_info_video.h | 2 +- .../elements/gsttensor_converter_media_no_video.h | 4 +- tests/nnstreamer_decoder/runTest.sh | 9 ++ tests/nnstreamer_sink/unittest_sink.cc | 97 ++++++++++++++++++++++ 6 files changed, 139 insertions(+), 14 deletions(-) diff --git a/ext/nnstreamer/tensor_decoder/tensordec-directvideo.c b/ext/nnstreamer/tensor_decoder/tensordec-directvideo.c index 75230bc..f438355 100644 --- a/ext/nnstreamer/tensor_decoder/tensordec-directvideo.c +++ b/ext/nnstreamer/tensor_decoder/tensordec-directvideo.c @@ -39,7 +39,7 @@ void init_dv (void) __attribute__ ((constructor)); void fini_dv (void) __attribute__ ((destructor)); #define DECODER_DV_VIDEO_CAPS_STR \ - GST_VIDEO_CAPS_MAKE ("{ GRAY8, RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, ABGR }") \ + GST_VIDEO_CAPS_MAKE ("{ GRAY8, RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, ABGR, GRAY16_BE, GRAY16_LE }") \ ", views = (int) 1, interlace-mode = (string) progressive" /** @@ -65,6 +65,8 @@ typedef enum DIRECT_VIDEO_FORMAT_BGRA = 9, DIRECT_VIDEO_FORMAT_ARGB = 10, DIRECT_VIDEO_FORMAT_ABGR = 11, + DIRECT_VIDEO_FORMAT_GRAY16_BE = 12, + DIRECT_VIDEO_FORMAT_GRAY16_LE = 13, } direct_video_formats; /** @@ -92,6 +94,8 @@ static const char *dv_formats[] = { [DIRECT_VIDEO_FORMAT_BGRA] = "BGRA", [DIRECT_VIDEO_FORMAT_ARGB] = "ARGB", [DIRECT_VIDEO_FORMAT_ABGR] = "ABGR", + [DIRECT_VIDEO_FORMAT_GRAY16_BE] = "GRAY16_BE", + [DIRECT_VIDEO_FORMAT_GRAY16_LE] = "GRAY16_LE", NULL, }; @@ -179,6 +183,12 @@ dv_getOutCaps (void **pdata, const GstTensorsConfig * config) case DIRECT_VIDEO_FORMAT_GRAY8: format = GST_VIDEO_FORMAT_GRAY8; break; + case DIRECT_VIDEO_FORMAT_GRAY16_BE: + format = GST_VIDEO_FORMAT_GRAY16_BE; + break; + case DIRECT_VIDEO_FORMAT_GRAY16_LE: + format = GST_VIDEO_FORMAT_GRAY16_LE; + break; case DIRECT_VIDEO_FORMAT_UNKNOWN: GST_WARNING ("Default format has been applied: GRAY8"); format = GST_VIDEO_FORMAT_GRAY8; @@ -267,10 +277,10 @@ dv_getOutCaps (void **pdata, const GstTensorsConfig * config) /** @brief get video output buffer size */ static size_t -_get_video_xraw_bufsize (const tensor_dim dim) +_get_video_xraw_bufsize (const tensor_dim dim, gsize data_size) { /* dim[0] is bpp and there is zeropadding only when dim[0]%4 > 0 */ - return (size_t)((dim[0] * dim[1] - 1) / 4 + 1) * 4 * dim[2]; + return (size_t)((dim[0] * dim[1] - 1) / 4 + 1) * 4 * dim[2] * data_size; } /** @brief tensordec-plugin's GstTensorDecoderDef callback */ @@ -280,15 +290,17 @@ dv_getTransformSize (void **pdata, const GstTensorsConfig * config, { /* Direct video uses the first tensor only even if it's multi-tensor */ const uint32_t *dim = &(config->info.info[0].dimension[0]); + gsize data_size = gst_tensor_get_element_size (config->info.info[0].type); + gsize transform_size = 0; UNUSED (pdata); UNUSED (caps); UNUSED (size); UNUSED (othercaps); if (direction == GST_PAD_SINK) - return _get_video_xraw_bufsize (dim); - else - return 0; /** @todo NYI */ + transform_size = _get_video_xraw_bufsize (dim, data_size); + + return transform_size; } /** @brief tensordec-plugin's GstTensorDecoderDef callback */ @@ -300,14 +312,15 @@ dv_decode (void **pdata, const GstTensorsConfig * config, GstMemory *out_mem; /* Direct video uses the first tensor only even if it's multi-tensor */ const uint32_t *dim = &(config->info.info[0].dimension[0]); - size_t size = _get_video_xraw_bufsize (dim); + gsize data_size = gst_tensor_get_element_size (config->info.info[0].type); + + size_t size = _get_video_xraw_bufsize (dim, data_size); UNUSED (pdata); g_assert (outbuf); if (gst_buffer_get_size (outbuf) > 0 && gst_buffer_get_size (outbuf) != size) { gst_buffer_set_size (outbuf, size); } - g_assert (config->info.info[0].type == _NNS_UINT8); if (gst_buffer_get_size (outbuf) == 0) { out_mem = gst_allocator_alloc (NULL, size, NULL); diff --git a/gst/nnstreamer/elements/gsttensor_converter.c b/gst/nnstreamer/elements/gsttensor_converter.c index 56847fc..79d51d6 100644 --- a/gst/nnstreamer/elements/gsttensor_converter.c +++ b/gst/nnstreamer/elements/gsttensor_converter.c @@ -1465,6 +1465,11 @@ gst_tensor_converter_parse_video (GstTensorConverter * self, config->info.info[0].type = _NNS_UINT8; config->info.info[0].dimension[0] = 1; break; + case GST_VIDEO_FORMAT_GRAY16_BE: + case GST_VIDEO_FORMAT_GRAY16_LE: + config->info.info[0].type = _NNS_UINT16; + config->info.info[0].dimension[0] = 1; + break; case GST_VIDEO_FORMAT_RGB: case GST_VIDEO_FORMAT_BGR: config->info.info[0].type = _NNS_UINT8; @@ -1483,7 +1488,7 @@ gst_tensor_converter_parse_video (GstTensorConverter * self, break; default: GST_WARNING_OBJECT (self, - "The given video caps with format \"%s\" is not supported. Please use GRAY8, RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, or ABGR.\n", + "The given video caps with format \"%s\" is not supported. Please use GRAY8, GRAY16_LE, GRAY16_BE, RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, or ABGR.\n", GST_STR_NULL (gst_video_format_to_string (format))); break; } @@ -1937,8 +1942,7 @@ gst_tensor_converter_get_possible_media_caps (GstTensorConverter * self) switch (type) { case _NNS_VIDEO: /* video caps from tensor info */ - if (is_video_supported (self) - && config.info.info[0].type == _NNS_UINT8) { + if (is_video_supported (self)) { GValue supported_formats = G_VALUE_INIT; gint colorspace, width, height; @@ -1946,7 +1950,7 @@ gst_tensor_converter_get_possible_media_caps (GstTensorConverter * self) switch (colorspace) { case 1: gst_tensor_converter_get_format_list (&supported_formats, - "GRAY8", NULL); + "GRAY8", "GRAY16_BE", "GRAY16_LE", NULL); break; case 3: gst_tensor_converter_get_format_list (&supported_formats, diff --git a/gst/nnstreamer/elements/gsttensor_converter_media_info_video.h b/gst/nnstreamer/elements/gsttensor_converter_media_info_video.h index d5a282c..99879d1 100644 --- a/gst/nnstreamer/elements/gsttensor_converter_media_info_video.h +++ b/gst/nnstreamer/elements/gsttensor_converter_media_info_video.h @@ -26,7 +26,7 @@ * @brief Caps string for supported video format */ #define VIDEO_CAPS_STR \ - GST_VIDEO_CAPS_MAKE ("{ RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, ABGR, GRAY8 }") \ + GST_VIDEO_CAPS_MAKE ("{ RGB, BGR, RGBx, BGRx, xRGB, xBGR, RGBA, BGRA, ARGB, ABGR, GRAY8, GRAY16_BE, GRAY16_LE }") \ ", interlace-mode = (string) progressive" #define append_video_caps_template(caps) \ diff --git a/gst/nnstreamer/elements/gsttensor_converter_media_no_video.h b/gst/nnstreamer/elements/gsttensor_converter_media_no_video.h index 415e2e4..971dabc 100644 --- a/gst/nnstreamer/elements/gsttensor_converter_media_no_video.h +++ b/gst/nnstreamer/elements/gsttensor_converter_media_no_video.h @@ -38,7 +38,9 @@ typedef enum { GST_VIDEO_FORMAT_BGRA, GST_VIDEO_FORMAT_ARGB, GST_VIDEO_FORMAT_ABGR, - GST_VIDEO_FORMAT_I420 + GST_VIDEO_FORMAT_I420, + GST_VIDEO_FORMAT_GRAY16_BE, + GST_VIDEO_FORMAT_GRAY16_LE } GstVideoFormat; #define gst_video_info_init(i) memset (i, 0, sizeof (GstVideoInfo)) diff --git a/tests/nnstreamer_decoder/runTest.sh b/tests/nnstreamer_decoder/runTest.sh index e46806d..c8116d4 100644 --- a/tests/nnstreamer_decoder/runTest.sh +++ b/tests/nnstreamer_decoder/runTest.sh @@ -94,6 +94,15 @@ else callCompareTest test_06_raw_2.log test_06_decoded_2.log 6-3 "Compare for case 6-3" 1 0 fi + +# Test for GRAY16_BE +gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} videotestsrc num-buffers=1 ! videoconvert ! videoscale ! video/x-raw, width=160, height=120, framerate=5/1,format=GRAY16_BE ! tee name =t t. ! queue ! tensor_converter ! tensor_decoder mode=direct_video option1=GRAY16_BE ! filesink location=\"testcase7_dv.raw\" sync=true t. ! queue ! filesink location=\"testcase7_origin.raw\" sync=true" 7 0 0 $PERFORMANCE +callCompareTest testcase7_origin.raw testcase7_dv.raw 7 "Compare for case 7" 0 0 + +# Test for GRAY16_LE +gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} videotestsrc num-buffers=1 ! videoconvert ! videoscale ! video/x-raw, width=160, height=120, framerate=5/1,format=GRAY16_LE ! tee name =t t. ! queue ! tensor_converter ! tensor_decoder mode=direct_video option1=GRAY16_LE ! filesink location=\"testcase8_dv.raw\" sync=true t. ! queue ! filesink location=\"testcase8_origin.raw\" sync=true" 8 0 0 $PERFORMANCE +callCompareTest testcase8_origin.raw testcase8_dv.raw 8 "Compare for case 8" 0 0 + rm *.log *.bmp *.png *.golden *.raw report diff --git a/tests/nnstreamer_sink/unittest_sink.cc b/tests/nnstreamer_sink/unittest_sink.cc index 5afe90b..5d8d7d3 100644 --- a/tests/nnstreamer_sink/unittest_sink.cc +++ b/tests/nnstreamer_sink/unittest_sink.cc @@ -79,6 +79,8 @@ typedef enum { TEST_TYPE_VIDEO_GRAY8, /**< pipeline for video (GRAY8) */ TEST_TYPE_VIDEO_GRAY8_PADDING, /**< pipeline for video (GRAY8), remove padding */ TEST_TYPE_VIDEO_GRAY8_3F_PADDING, /**< pipeline for video (GRAY8) 3 frames, remove padding */ + TEST_TYPE_VIDEO_GRAY16_BE, /**< pipeline for video (GRAY16_BE) */ + TEST_TYPE_VIDEO_GRAY16_LE, /**< pipeline for video (GRAY16_LE) */ TEST_TYPE_AUDIO_S8, /**< pipeline for audio (S8) */ TEST_TYPE_AUDIO_U8_100F, /**< pipeline for audio (U8) 100 frames */ TEST_TYPE_AUDIO_S16, /**< pipeline for audio (S16) */ @@ -664,6 +666,18 @@ _setup_pipeline (TestOption &option) "tensor_converter frames-per-tensor=3 ! tensor_sink name=test_sink", option.num_buffers, fps); break; + case TEST_TYPE_VIDEO_GRAY16_BE: + /** video 160x120 GRAY16_BE */ + str_pipeline = g_strdup_printf ("videotestsrc num-buffers=%d ! videoconvert ! video/x-raw,width=160,height=120,format=GRAY16_BE,framerate=(fraction)%lu/1 ! " + "tensor_converter ! tensor_sink name=test_sink", + option.num_buffers, fps); + break; + case TEST_TYPE_VIDEO_GRAY16_LE: + /** video 160x120 GRAY16_LE */ + str_pipeline = g_strdup_printf ("videotestsrc num-buffers=%d ! videoconvert ! video/x-raw,width=160,height=120,format=GRAY16_LE,framerate=(fraction)%lu/1 ! " + "tensor_converter ! tensor_sink name=test_sink", + option.num_buffers, fps); + break; case TEST_TYPE_AUDIO_S8: /** audio sample rate 16000 (8 bits, signed, little endian) */ str_pipeline = g_strdup_printf ( @@ -2558,6 +2572,89 @@ TEST (tensorStreamTest, videoGray83fPadding) _free_test_data (option); } + +/** + * @brief Test for video format GRAY16_BE. + */ +TEST (tensorStreamTest, videoGray16BE) +{ + const guint num_buffers = 5; + TestOption option = { num_buffers, TEST_TYPE_VIDEO_GRAY16_BE }; + + ASSERT_TRUE (_setup_pipeline (option)); + + gst_element_set_state (g_test_data.pipeline, GST_STATE_PLAYING); + g_main_loop_run (g_test_data.loop); + + EXPECT_TRUE (_wait_pipeline_process_buffers (num_buffers)); + gst_element_set_state (g_test_data.pipeline, GST_STATE_NULL); + + /** check eos message */ + EXPECT_EQ (g_test_data.status, TEST_EOS); + + /** check received buffers and signals */ + EXPECT_EQ (g_test_data.received, num_buffers); + EXPECT_EQ (g_test_data.mem_blocks, 1U); + EXPECT_EQ (g_test_data.received_size, 160U * 120 * 2); + + /** check timestamp */ + EXPECT_FALSE (g_test_data.invalid_timestamp); + + /** check tensor config for video */ + EXPECT_TRUE (gst_tensors_config_validate (&g_test_data.tensors_config)); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].type, _NNS_UINT16); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[0], 1U); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[1], 160U); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[2], 120U); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[3], 1U); + EXPECT_EQ (g_test_data.tensors_config.rate_n, (int) fps); + EXPECT_EQ (g_test_data.tensors_config.rate_d, 1); + + EXPECT_FALSE (g_test_data.test_failed); + _free_test_data (option); +} + +/** + * @brief Test for video format GRAY16_LE. + */ +TEST (tensorStreamTest, videoGray16LE) +{ + const guint num_buffers = 5; + TestOption option = { num_buffers, TEST_TYPE_VIDEO_GRAY16_LE }; + + ASSERT_TRUE (_setup_pipeline (option)); + + gst_element_set_state (g_test_data.pipeline, GST_STATE_PLAYING); + g_main_loop_run (g_test_data.loop); + + EXPECT_TRUE (_wait_pipeline_process_buffers (num_buffers)); + gst_element_set_state (g_test_data.pipeline, GST_STATE_NULL); + + /** check eos message */ + EXPECT_EQ (g_test_data.status, TEST_EOS); + + /** check received buffers and signals */ + EXPECT_EQ (g_test_data.received, num_buffers); + EXPECT_EQ (g_test_data.mem_blocks, 1U); + EXPECT_EQ (g_test_data.received_size, 160U * 120 * 2); + + /** check timestamp */ + EXPECT_FALSE (g_test_data.invalid_timestamp); + + /** check tensor config for video */ + EXPECT_TRUE (gst_tensors_config_validate (&g_test_data.tensors_config)); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].type, _NNS_UINT16); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[0], 1U); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[1], 160U); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[2], 120U); + EXPECT_EQ (g_test_data.tensors_config.info.info[0].dimension[3], 1U); + EXPECT_EQ (g_test_data.tensors_config.rate_n, (int) fps); + EXPECT_EQ (g_test_data.tensors_config.rate_d, 1); + + EXPECT_FALSE (g_test_data.test_failed); + _free_test_data (option); +} + /** * @brief Test for audio format S8. */ -- 2.7.4