test('unittest_sink', unittest_sink, timeout: 120, env: testenv)
- # RUN unittest_tensorRegion
- if tflite2_support_is_available
- message('ssd_mobilenet_v2_coco.tflite model will be downloaded')
- download_url = 'https://github.com/nnsuite/testcases/raw/master/DeepLearningModels/tensorflow-lite/ssd_mobilenet_v2_coco'
- filename = 'ssd_mobilenet_v2_coco.tflite'
- unittest_model = join_paths(unittest_base_dir, 'tests', 'test_models', 'models')
- r = run_command('wget', '-nc', '-P', unittest_model, download_url + filename)
- if r.returncode() == 0
- output = r.stdout().strip()
- else
- errortxt = r.stderr().strip()
- endif
-
- unittest_tensorRegion = executable('unittest_tensorRegion',
- join_paths('nnstreamer_decoder_tensorRegion', 'unittest_tensorRegion.cc'),
- dependencies: [nnstreamer_unittest_deps],
- install: get_option('install-test'),
- install_dir: unittest_install_dir
- )
- test('unittest_tensorRegion', unittest_tensorRegion, env: testenv)
- endif
+ # RUN unittest_tensor_region
+ unittest_tensor_region = executable('unittest_tensor_region',
+ join_paths('nnstreamer_decoder_tensor_region', 'unittest_tensor_region.cc'),
+ dependencies: [nnstreamer_unittest_deps],
+ install: get_option('install-test'),
+ install_dir: unittest_install_dir
+ )
+ test('unittest_tensor_region', unittest_tensor_region, env: testenv)
# Run unittest_plugins
unittest_plugins = executable('unittest_plugins',
--- /dev/null
+##\r
+# SPDX-License-Identifier: LGPL-2.1-only\r
+#\r
+# Copyright (C) 2023 Harsh Jain <hjain24in@gmail.com>\r
+#\r
+# @file generateResults.py\r
+# @brief To generate Golden Results\r
+# @author Harsh Jain <hjain24in@gmail.com>\r
+\r
+import gi\r
+gi.require_version('Gst', '1.0')\r
+from gi.repository import Gst\r
+from PIL import Image\r
+import numpy as np\r
+import sys\r
+\r
+def resize_and_crop_image(input_image, output_image, x, y, width, height, new_width, new_height):\r
+ Gst.init(None)\r
+\r
+ pipeline_str = (\r
+ f"filesrc location={input_image} ! decodebin ! "\r
+ f"videoconvert ! videoscale ! video/x-raw,width={new_width},height={new_height},format=RGB ! "\r
+ f"videoconvert ! video/x-raw,format=RGBx ! appsink name=sinkx"\r
+ )\r
+\r
+ pipeline = Gst.parse_launch(pipeline_str)\r
+\r
+ # Create a GstAppSink to receive the raw RGB data\r
+ appsink = pipeline.get_by_name("sinkx")\r
+ appsink.set_property("sync", False)\r
+ appsink.set_property("max-buffers", 1)\r
+\r
+ # Start the pipeline\r
+ pipeline.set_state(Gst.State.PLAYING)\r
+\r
+ # Wait until the first buffer is received\r
+ sample = appsink.emit("pull-sample")\r
+\r
+ # Extract the raw RGB data from the sample\r
+ buffer = sample.get_buffer()\r
+ result, map_info = buffer.map(Gst.MapFlags.READ)\r
+ raw_data = np.frombuffer(map_info.data, dtype=np.uint8)\r
+\r
+ # Unmap the buffer\r
+ buffer.unmap(map_info)\r
+\r
+ # Stop the pipeline\r
+ pipeline.set_state(Gst.State.NULL)\r
+\r
+ # Reshape the raw data into the image dimensions\r
+ image_data = raw_data.reshape((new_height, new_width, 4))\r
+\r
+ # Crop the image\r
+ cropped_image = image_data[y:y+height, x:x+width, :]\r
+\r
+ # Save the cropped image as output_image\r
+ with open(output_image, "wb") as file:\r
+ file.write(cropped_image.tobytes())\r
+\r
+# Example usage\r
+input_image = sys.argv[1]\r
+output_image = sys.argv[2]\r
+x = int(sys.argv[3])\r
+y = int(sys.argv[4])\r
+width = int(sys.argv[5])\r
+height = int(sys.argv[6])\r
+new_width = 300 # Resized width\r
+new_height = 300 # Resized height\r
+\r
+resize_and_crop_image(input_image, output_image, x, y, width, height, new_width, new_height)\r
\r
# Create the GStreamer pipeline\r
pipeline_str = """\r
- filesrc location={} ! decodebin ! videoconvert ! videoscale ! video/x-raw,width=640,height=480,format=RGB ! videoscale ! video/x-raw,width=300,height=300,format=RGB ! tensor_converter ! tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! tensor_filter framework=tensorflow2-lite model=ssd_mobilenet_v2_coco.tflite ! tensor_decoder mode=tensor_region option1=1 option2=../nnstreamer_decoder_boundingbox/coco_labels_list.txt option3=../nnstreamer_decoder_boundingbox/box_priors.txt ! appsink name=output\r
+ filesrc location={} ! decodebin ! videoconvert ! videoscale ! video/x-raw,width=300,height=300,format=RGB ! tensor_converter ! tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! tensor_filter framework=tensorflow2-lite model=../test_models/models/ssd_mobilenet_v2_coco.tflite ! tensor_decoder mode=tensor_region option1=1 option2=../nnstreamer_decoder_boundingbox/coco_labels_list.txt option3=../nnstreamer_decoder_boundingbox/box_priors.txt ! appsink name=output\r
""".format(image_path)\r
\r
pipeline = Gst.parse_launch(pipeline_str)\r
PATH_TO_LABELS="../nnstreamer_decoder_boundingbox/coco_labels_list.txt"
PATH_TO_BOX_PRIORS="../nnstreamer_decoder_boundingbox/box_priors.txt"
PATH_TO_MODEL="../test_models/models/ssd_mobilenet_v2_coco.tflite"
+CASESTART=0
+CASEEND=1
-gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} filesrc location=${PATH_TO_IMAGE} ! pngdec ! videoconvert ! videoscale ! video/x-raw,width=300,height=300,format=RGB,framerate=0/1 ! tee name=t t. ! queue ! tensor_converter ! crop.raw t. ! queue ! tensor_converter ! tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! tensor_filter framework=tensorflow2-lite model=${PATH_TO_MODEL} ! tensor_decoder mode=tensor_region option1=1 option2=${PATH_TO_LABELS} option3=${PATH_TO_BOX_PRIORS} ! crop.info tensor_crop name=crop ! other/tensors,format=flexible ! tensor_converter ! tensor_decoder mode=direct_video ! videoconvert ! videoscale ! video/x-raw,width=300,height=300,format=RGB ! pngenc ! filesink location=tensor_region_output_orange.png
-" 0 0 0 $PERFORMANCE
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} \
+ filesrc location=${PATH_TO_IMAGE} ! pngdec ! videoconvert ! videoscale ! video/x-raw,width=300,height=300,format=RGB,framerate=0/1 ! tensor_converter ! crop.raw \
+ filesrc location=mobilenet_ssd_tensor.0 blocksize=-1 ! application/octet-stream ! tensor_converter name=el1 input-dim=4:1:1917:1 input-type=float32 ! mux.sink_0 \
+ filesrc location=mobilenet_ssd_tensor.1 blocksize=-1 ! application/octet-stream ! tensor_converter name=el2 input-dim=91:1917:1 input-type=float32 ! mux.sink_1 \
+ tensor_mux name=mux ! other/tensors,format=static ! tensor_decoder mode=tensor_region option1=1 option2=${PATH_TO_LABELS} option3=${PATH_TO_BOX_PRIORS} ! crop.info\
+ tensor_crop name=crop ! other/tensors,format=flexible ! tensor_converter ! tensor_decoder mode=direct_video ! videoconvert ! video/x-raw,format=RGBx ! filesink location=tensor_region_output_orange.txt " 0 0 0 $PERFORMANCE
+python3 generateResults.py ${PATH_TO_IMAGE} "tensor_region_orange.txt" 58 62 219 211
-callCompareTest tensor_region_orange.png tensor_region_output_orange.png 0 "mobilenet-ssd Decode 1" 0
-rm tensor_region_output_*
+callCompareTest tensor_region_orange.txt tensor_region_output_orange.txt 0 "mobilenet-ssd Decode 1" 0
+rm tensor_region_*
report
g_signal_emit_by_name(sink, "pull-sample", &sample);\r
\r
/** Expected values of cropping info for orange.png */\r
- guint32 expected_values[] = {58, 61, 219, 213};\r
+ guint32 expected_values[] = {58, 62, 219, 211};\r
\r
if (sample != nullptr) {\r
GstBuffer* outbuf = gst_sample_get_buffer(sample);\r
if (root_path == nullptr)\r
root_path = "..";\r
\r
- const gchar* image_path = g_build_filename(root_path, "tests", "test_models", "data",\r
- "orange.png", nullptr);\r
- const gchar* model = g_build_filename(root_path, "tests", "test_models", "models", "ssd_mobilenet_v2_coco.tflite", nullptr);\r
+ const gchar* tensor_0 = g_build_filename(root_path, "tests", "nnstreamer_decoder_tensorRegion", "mobilenet_ssd_tensor.0", nullptr);\r
+ const gchar* tensor_1 = g_build_filename(root_path, "tests", "nnstreamer_decoder_tensorRegion", "mobilenet_ssd_tensor.1", nullptr);\r
const gchar* labels_path = g_build_filename(root_path, "tests", "test_models", "labels", "labels.txt", nullptr);\r
const gchar* box_priors_path = g_build_filename(root_path, "tests", "nnstreamer_decoder_boundingbox", "box_priors.txt", nullptr);\r
\r
- ASSERT_TRUE(g_file_test(image_path, G_FILE_TEST_EXISTS));\r
- ASSERT_TRUE(g_file_test(model, G_FILE_TEST_EXISTS));\r
+ ASSERT_TRUE(g_file_test(tensor_0, G_FILE_TEST_EXISTS));\r
+ ASSERT_TRUE(g_file_test(tensor_1, G_FILE_TEST_EXISTS));\r
ASSERT_TRUE(g_file_test(labels_path, G_FILE_TEST_EXISTS));\r
ASSERT_TRUE(g_file_test(box_priors_path, G_FILE_TEST_EXISTS));\r
\r
/** Create the GStreamer pipeline */\r
gchar* pipeline_str = g_strdup_printf(\r
- "filesrc location=%s ! decodebin ! videoconvert ! videoscale ! "\r
- "video/x-raw,width=640,height=480,format=RGB ! videoscale ! "\r
- "video/x-raw,width=300,height=300,format=RGB ! tensor_converter ! "\r
- "tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! "\r
- "tensor_filter framework=tensorflow2-lite model=%s ! "\r
- "tensor_decoder mode=tensor_region option1=1 option2=%s option3=%s ! "\r
- "appsink name=sinkx",\r
- image_path, model, labels_path, box_priors_path);\r
+ "multifilesrc name=fs1 location=%s start-index=0 stop-index=1 caps=application/octet-stream ! tensor_converter name=el1 input-dim=4:1:1917:1 input-type=float32 ! mux.sink_0 \\r
+ multifilesrc name=fs2 location=%s start-index=0 stop-index=1 caps=application/octet-stream ! tensor_converter name=el2 input-dim=91:1917:1 input-type=float32 ! mux.sink_1 \\r
+ tensor_mux name=mux ! other/tensors,format=static ! tensor_decoder mode=tensor_region option1=1 option2=%s option3=%s ! appsink name=sinkx ",\r
+ tensor_0, tensor_1, labels_path, box_priors_path);\r
\r
GstElement* pipeline = gst_parse_launch(pipeline_str, nullptr);\r
g_free(pipeline_str);\r
}
#endif /** ENABLE_FLATBUF && ENABLE_PROTOBUF */
-
/**
* @brief Data structure for tensor-crop test.
*/