See [Getting started](getting-started.md) : **Approach 2.** Build with Cmake
-- Build example (DEV_ROOT=/dev, and gst plugin path is ```/dev/lib```)
+- Build example (set your path for NNST_ROOT, then gst plugin path is ```$NNST_ROOT/lib```)
```
# prepare
$ sudo apt-get install python-gi python3-gi # for python example
$ sudo apt-get install python-gst-1.0 python3-gst-1.0
$ sudo apt-get install python-gst-1.0-dbg python3-gst-1.0-dbg
-$ export DEV_ROOT=/dev
-$ export GST_PLUGIN_PATH=$GST_PLUGIN_PATH:$DEV_ROOT/lib
+$ export NNST_ROOT=$HOME/nnstreamer # set your own path
+$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$NNST_ROOT/lib
+$ export GST_PLUGIN_PATH=$GST_PLUGIN_PATH:$NNST_ROOT/lib
```
```
# build example
$ mkdir -p build # We recommend to build in a "build" directory
$ cd build
$ rm -rf * # Ensure the build directory is empty
-$ cmake -DCMAKE_INSTALL_PREFIX=${DEV_ROOT} -DINCLUDE_INSTALL_DIR=${DEV_ROOT}/include ..
-$ make install # Install nnstreamer plugin libraries into /dev/lib
+$ cmake -DCMAKE_INSTALL_PREFIX=${NNST_ROOT} -DINCLUDE_INSTALL_DIR=${NNST_ROOT}/include ..
+$ make install # Install nnstreamer plugin libraries into $NNST_ROOT/lib
$ cd ..
```
-## Example : filter
+## Example : filter for image classification
```
-v4l2src -- tee -- textoverlay -- videoconvert -- xvimagesink
+v4l2src -- tee -- textoverlay -- videoconvert -- ximagesink
|
--- videoscale -- tensor_converter -- tensor_filter -- tensor_sink
```
NNStreamer example for image recognition.
+- Download tflite moel 'Mobilenet_1.0_224_quant' from [Here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/g3doc/models.md#image-classification-quantized-models)
Displays video sink.
```
# for python example
$ cd nnstreamer_example/example_filter
-$ python nnstreamer_example_filter.py
+$ python nnstreamer_example_filter.py
```
-## Example : video mixer
+## Example : video mixer with NNStreamer plug-in
```
-v4l2src -- tee ------------------------------------------ videomixer -- videoconvert -- xvimagesink (Mixed)
+v4l2src -- tee ------------------------------------------ videomixer -- videoconvert -- ximagesink (Mixed)
| |
--- tensor_converter -- tensordec -- videoscale ---
|
- --- videoconvert -- xvimagesink (Original)
+ --- videoconvert -- ximagesink (Original)
```
Displays two video sinks,
-
+
1. Original from cam
2. Mixed : original + scaled (tensor_converter-tensor_dec-videoscale)
+In pipeline, converter-decoder passes video frame.
+
- Run example
```
$ cd build/nnstreamer_example/example_cam
```
videotestsrc -- tensor_converter -- tensor_sink
[push buffer from tensor_sink to appsrc]
-appsrc -- tensordec -- videoconvert -- xvimagesink
+appsrc -- tensordec -- videoconvert -- ximagesink
```
Displays video sink.
-Tensor sink receives buffer and pushes it into 2nd pipeline.
-
+Tensor sink receives buffer and pushes it into appsrc in 2nd pipeline.
+
- Run example
```
$ cd build/nnstreamer_example/example_sink
/** init pipeline */
str_pipeline =
g_strdup_printf
- ("v4l2src name=cam_src ! "
+ ("v4l2src name=cam_src ! videoconvert ! "
"video/x-raw,width=%d,height=%d,format=RGB,framerate=30/1 ! tee name=t_raw "
"videomixer name=mix "
"sink_0::xpos=0 sink_0::ypos=0 sink_0::zorder=0 "
"sink_1::xpos=0 sink_1::ypos=0 sink_1::zorder=1 sink_1::alpha=0.7 ! "
- "videoconvert ! xvimagesink name=img_mixed "
+ "videoconvert ! ximagesink name=img_mixed "
"t_raw. ! queue ! mix.sink_0 "
"t_raw. ! queue ! tensor_converter ! tensordec ! videoscale ! video/x-raw,width=%d,height=%d ! mix.sink_1 "
- "t_raw. ! queue ! videoconvert ! xvimagesink name=img_origin",
+ "t_raw. ! queue ! videoconvert ! ximagesink name=img_origin",
width, height, width / 2, height / 2);
g_app.pipeline = gst_parse_launch (str_pipeline, NULL);
g_free (str_pipeline);
* NNStreamer example for image recognition.
*
* Pipeline :
- * v4l2src -- tee -- textoverlay -- videoconvert -- xvimagesink
+ * v4l2src -- tee -- textoverlay -- videoconvert -- ximagesink
* |
* --- videoscale -- tensor_converter -- tensor_filter -- tensor_sink
*
- * This app displays video sink (xvimagesink).
+ * This app displays video sink.
*
* 'tensor_filter' for image recognition.
* Download tflite moel 'Mobilenet_1.0_224_quant' from below link,
}
/**
+ * @brief Function to print qos message.
+ */
+static void
+_parse_qos_message (GstMessage * message)
+{
+ GstFormat format;
+ guint64 processed;
+ guint64 dropped;
+
+ gst_message_parse_qos_stats (message, &format, &processed, &dropped);
+ _print_log ("format[%d] processed[%" G_GUINT64_FORMAT "] dropped[%"
+ G_GUINT64_FORMAT "]", format, processed, dropped);
+}
+
+/**
* @brief Callback for message.
*/
static void
_print_log ("received start message");
break;
+ case GST_MESSAGE_QOS:
+ _parse_qos_message (message);
+ break;
+
default:
break;
}
/** init pipeline */
str_pipeline =
g_strdup_printf
- ("v4l2src name=cam_src ! "
+ ("v4l2src name=cam_src ! videoconvert ! "
"video/x-raw,width=640,height=480,format=RGB,framerate=30/1 ! tee name=t_raw "
"t_raw. ! queue ! textoverlay name=tensor_res font-desc=\"Sans, 24\" ! "
- "videoconvert ! xvimagesink name=img_tensor "
+ "videoconvert ! ximagesink name=img_tensor "
"t_raw. ! queue ! videoscale ! video/x-raw,width=%d,height=%d ! tensor_converter ! "
"tensor_filter framework=tensorflow-lite model=%s ! "
"tensor_sink name=tensor_sink",
NNStreamer example for image recognition.
Pipeline :
-v4l2src -- tee -- textoverlay -- videoconvert -- xvimagesink
+v4l2src -- tee -- textoverlay -- videoconvert -- ximagesink
|
--- videoscale -- tensor_converter -- tensor_filter -- tensor_sink
-This app displays video sink (xvimagesink).
+This app displays video sink.
'tensor_filter' for image recognition.
Download tflite moel 'Mobilenet_1.0_224_quant' from below link,
# init pipeline
self.pipeline = Gst.parse_launch(
- 'v4l2src name=cam_src ! '
+ 'v4l2src name=cam_src ! videoconvert ! '
'video/x-raw,width=640,height=480,format=RGB,framerate=30/1 ! tee name=t_raw '
't_raw. ! queue ! textoverlay name=tensor_res font-desc=\"Sans, 24\" ! '
- 'videoconvert ! xvimagesink name=img_tensor '
+ 'videoconvert ! ximagesink name=img_tensor '
't_raw. ! queue ! videoscale ! video/x-raw,width=224,height=224 ! tensor_converter ! '
'tensor_filter framework=tensorflow-lite model=' + self.tflite_model + ' ! '
'tensor_sink name=tensor_sink'
print('[warning]', error, debug)
elif message.type == Gst.MessageType.STREAM_START:
print('received start message')
+ elif message.type == Gst.MessageType.QOS:
+ data_format, processed, dropped = message.parse_qos_stats()
+ print('[qos]', data_format, processed, dropped)
def on_new_data(self, sink, buffer):
"""Callback for tensor sink signal.
*
* [1st pipeline : videotestsrc-tensor_converter-tensor_sink]
* push buffer to appsrc
- * [2nd pipeline : appsrc-tensordec-videoconvert-xvimagesink]
+ * [2nd pipeline : appsrc-tensordec-videoconvert-ximagesink]
*
* Run example :
* Before running this example, GST_PLUGIN_PATH should be updated for nnstreamer plug-in.
/** init player pipeline */
str_pipeline =
g_strdup_printf
- ("appsrc name=player_src ! tensordec ! videoconvert ! xvimagesink");
+ ("appsrc name=player_src ! tensordec ! videoconvert ! ximagesink");
g_app.player_pipeline = gst_parse_launch (str_pipeline, NULL);
g_free (str_pipeline);
_check_cond_err (g_app.player_pipeline != NULL);