/**
* SECTION:element-assrender
+ * @title: assrender
*
* Renders timestamped SSA/ASS subtitles on top of a video stream.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=/path/to/mkv ! matroskademux name=d ! queue ! mpegaudioparse ! mpg123audiodec ! audioconvert ! autoaudiosink d. ! queue ! h264parse ! avdec_h264 ! videoconvert ! r. d. ! queue ! "application/x-ass" ! assrender name=r ! videoconvert ! autovideosink
* ]| This pipeline demuxes a Matroska file with h.264 video, MP3 audio and embedded ASS subtitles and renders the subtitles on top of the video.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-bs2b
+ * @title: bs2b
*
- * Improve headphone listening of stereo audio records using the bs2b library.
+ * Improve headphone listening of stereo audio records using the bs2b library.
* It does so by mixing the left and right channel in a way that simulates
* a stereo speaker setup while using headphones.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 audiotestsrc ! "audio/x-raw,channel-mask=(bitmask)0x1" ! interleave name=i ! bs2b ! autoaudiosink audiotestsrc freq=330 ! "audio/x-raw,channel-mask=(bitmask)0x2" ! i.
* ]| Play two independent sine test sources and crossfeed them.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-chromaprint
+ * @title: chromaprint
*
* The chromaprint element calculates an acoustic fingerprint for an
* audio stream which can be used to identify a song and look up
* further metadata from the <ulink url="http://acoustid.org/">Acoustid</ulink>
* and Musicbrainz databases.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -m uridecodebin uri=file:///path/to/song.ogg ! audioconvert ! chromaprint ! fakesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-curlsink
+ * @title: curlsink
* @short_description: sink that uploads data to a server using libcurl
* @see_also:
*
* This is a network sink that uses libcurl as a client to upload data to
* a server (e.g. a HTTP/FTP server).
*
- * <refsect2>
- * <title>Example launch line (upload a JPEG file to an HTTP server)</title>
+ * ## Example launch line (upload a JPEG file to an HTTP server)
* |[
* gst-launch-1.0 filesrc location=image.jpg ! jpegparse ! curlsink \
* file-name=image.jpg \
* content-type=image/jpeg \
* use-content-length=false
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-curlfilesink
+ * @title: curlfilesink
* @short_description: sink that uploads data to a server using libcurl
* @see_also:
*
* This is a network sink that uses libcurl as a client to upload data to
* a local or network drive.
*
- * <refsect2>
- * <title>Example launch line (upload a JPEG file to /home/test/images
- * directory)</title>
+ * ## Example launch line (upload a JPEG file to /home/test/images directory)
* |[
* gst-launch-1.0 filesrc location=image.jpg ! jpegparse ! curlfilesink \
* file-name=image.jpg \
* location=file:///home/test/images/
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-curlftpsink
+ * @title: curlftpsink
* @short_description: sink that uploads data to a server using libcurl
* @see_also:
*
* This is a network sink that uses libcurl as a client to upload data to
* an FTP server.
*
- * <refsect2>
- * <title>Example launch line (upload a JPEG file to /home/test/images
- * directory)</title>
+ * ## Example launch line
+ *
+ * Upload a JPEG file to /home/test/images * directory)
+ *
* |[
* gst-launch-1.0 filesrc location=image.jpg ! jpegparse ! curlftpsink \
* file-name=image.jpg \
* location=ftp://192.168.0.1/images/
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-curlhttpsink
+ * @title: curlhttpsink
* @short_description: sink that uploads data to a server using libcurl
* @see_also:
*
* This is a network sink that uses libcurl as a client to upload data to
* an HTTP server.
*
- * <refsect2>
- * <title>Example launch line (upload a JPEG file to an HTTP server)</title>
+ * ## Example launch line
+ *
+ * Upload a JPEG file to an HTTP server.
+ *
* |[
* gst-launch-1.0 filesrc location=image.jpg ! jpegparse ! curlhttpsink \
* file-name=image.jpg \
* content-type=image/jpeg \
* use-content-length=false
* ]|
- * </refsect2>
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-curlsftpsink
+ * @title: curlsftpsink
* @short_description: sink that uploads data to a server using libcurl
* @see_also:
*
* This is a network sink that uses libcurl as a client to upload data to
* a SFTP (SSH File Transfer Protocol) server.
*
- * <refsect2>
- * <title>Example launch line (upload a file to /home/john/sftp_tests/)</title>
+ * ## Example launch line
+ *
+ * Upload a file to /home/john/sftp_tests/
+ *
* |[
* gst-launch-1.0 filesrc location=/home/jdoe/some.file ! curlsftpsink \
* file-name=some.file.backup \
* ssh-priv-keyfile=/home/jdoe/.ssh/id_rsa \
* create-dirs=TRUE
* ]|
- * </refsect2>
- *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-curlsink
+ * @title: curlsink
* @short_description: sink that uploads data to a server using libcurl
* @see_also:
*
* This is a network sink that uses libcurl as a client to upload data to
* an SMTP server.
*
- * <refsect2>
- * <title>Example launch line (upload a JPEG file to an SMTP server)</title>
+ * ## Example launch line
+ *
+ * Upload a JPEG file to an SMTP server.
+ *
* |[
* gst-launch-1.0 filesrc location=image.jpg ! jpegparse ! curlsmtpsink \
* file-name=image.jpg \
* use-ssl=TRUE \
* insecure=TRUE
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-curlsshsink
+ * @title: curlsshsink
* @short_description: sink that uploads data to a server using libcurl
* @see_also:
*
/**
* SECTION:element-curltlssink
+ * @title: curltlssink
* @short_description: sink that uploads data to a server using libcurl
* @see_also:
*
/**
* SECTION:element-daaladec
+ * @title: daaladec
* @see_also: daalaenc, oggdemux
*
* This element decodes daala streams into raw video
* video codec maintained by the <ulink url="http://www.xiph.org/">Xiph.org
* Foundation</ulink>.
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v filesrc location=videotestsrc.ogg ! oggdemux ! daaladec ! xvimagesink
* ]| This example pipeline will decode an ogg stream and decodes the daala video. Refer to
* the daalaenc example to create the ogg file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-daalaenc
+ * @title: daalaenc
* @see_also: daaladec, oggmux
*
* This element encodes raw video into a Daala stream.
* video codec maintained by the <ulink url="http://www.xiph.org/">Xiph.org
* Foundation</ulink>.
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v videotestsrc num-buffers=1000 ! daalaenc ! oggmux ! filesink location=videotestsrc.ogg
* ]| This example pipeline will encode a test video source to daala muxed in an
* ogg container. Refer to the daaladec documentation to decode the create
* stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-dashdemux
+ * @title: dashdemux
*
* DASH demuxer element.
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 playbin uri="http://www-itec.uni-klu.ac.at/ftp/datasets/mmsys12/RedBullPlayStreets/redbull_4s/RedBullPlayStreets_4s_isoffmain_DIS_23009_1_v_2_1c2_2011_08_30.mpd"
* ]|
/**
* SECTION:element-dc1394src
+ * @title: dc1394src
*
* Source for IIDC (Instrumentation & Industrial Digital Camera) firewire
* cameras. If several cameras are connected to the system, the desired one
* corresponding video formats are exposed in the capabilities.
* The Bayer pattern is left unspecified.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 -v dc1394src ! videoconvert ! autovideosink
* ]| Capture and display frames from the first camera available in the system.
* ! "video/x-bayer,format=gbrg,width=1280,height=960,framerate=15/2" \
* ! bayer2rgb ! videoconvert ! autovideosink
* ]| Capture and display frames from a specific camera in the desired format.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-dfbvideosink
+ * @title: dfbvideosink
*
* DfbVideoSink renders video frames using the
* <ulink url="http://www.directfb.org/">DirectFB</ulink> library.
* Rendering can happen in two different modes :
- * <itemizedlist>
- * <listitem>
- * <para>
- * Standalone: this mode will take complete control of the monitor forcing
+ *
+ * * Standalone: this mode will take complete control of the monitor forcing
* <ulink url="http://www.directfb.org/">DirectFB</ulink> to fullscreen layout.
* This is convenient to test using the gst-launch-1.0 command line tool or
* other simple applications. It is possible to interrupt playback while
* being in this mode by pressing the Escape key.
- * </para>
- * <para>
* This mode handles navigation events for every input device supported by
* the <ulink url="http://www.directfb.org/">DirectFB</ulink> library, it will
* look for available video modes in the fb.modes file and try to switch
- * the framebuffer video mode to the most suitable one. Depending on
+ * the framebuffer video mode to the most suitable one. Depending on
* hardware acceleration capabilities the element will handle scaling or not.
* If no acceleration is available it will do clipping or centering of the
* video frames respecting the original aspect ratio.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * Embedded: this mode will render video frames in a
+ *
+ * * Embedded: this mode will render video frames in a
* #GstDfbVideoSink:surface provided by the
* application developer. This is a more advanced usage of the element and
- * it is required to integrate video playback in existing
+ * it is required to integrate video playback in existing
* <ulink url="http://www.directfb.org/">DirectFB</ulink> applications.
- * </para>
- * <para>
* When using this mode the element just renders to the
- * #GstDfbVideoSink:surface provided by the
+ * #GstDfbVideoSink:surface provided by the
* application, that means it won't handle navigation events and won't resize
* the #GstDfbVideoSink:surface to fit video
* frames geometry. Application has to implement the necessary code to grab
* informations about the negotiated geometry and resize there
* #GstDfbVideoSink:surface accordingly.
- * </para>
- * </listitem>
- * </itemizedlist>
- * For both modes the element implements a buffer pool allocation system to
- * optimize memory allocation time and handle reverse negotiation. Indeed if
+ *
+ * For both modes the element implements a buffer pool allocation system to
+ * optimize memory allocation time and handle reverse negotiation. Indeed if
* you insert an element like videoscale in the pipeline the video sink will
* negotiate with it to try get a scaled video for either the fullscreen layout
* or the application provided external #GstDfbVideoSink:surface.
*
- * <refsect2>
- * <title>Example application</title>
- * <para>
+ * ## Example application
+ *
* <include xmlns="http://www.w3.org/2003/XInclude" href="element-dfb-example.xml" />
- * </para>
- * </refsect2>
- * <refsect2>
- * <title>Example pipelines</title>
+ *
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v videotestsrc ! dfbvideosink hue=20000 saturation=40000 brightness=25000
* ]| test the colorbalance interface implementation in dfbvideosink
- * </refsect2>
*/
#ifdef HAVE_CONFIG_H
dfbvideosink->backbuffer = FALSE;
dfbvideosink->pixel_format = DSPF_UNKNOWN;
- /* If we do it all by ourself we create the DirectFB context, get the
+ /* If we do it all by ourself we create the DirectFB context, get the
primary layer and use a fullscreen configuration */
if (!dfbvideosink->ext_surface) {
GST_DEBUG_OBJECT (dfbvideosink, "no external surface, taking over "
/**
* SECTION:element-dtsdec
+ * @title: dtsdec
*
* Digital Theatre System (DTS) audio decoder
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 dvdreadsrc title=1 ! mpegpsdemux ! dtsdec ! audioresample ! audioconvert ! alsasink
* ]| Play a DTS audio track from a dvd.
* |[
* gst-launch-1.0 filesrc location=abc.dts ! dtsdec ! audioresample ! audioconvert ! alsasink
* ]| Decode a standalone file and play it.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-faac
+ * @title: faac
* @see_also: faad
*
* faac encodes raw audio to AAC (MPEG-4 part 3) streams.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc wave=sine num-buffers=100 ! audioconvert ! faac ! matroskamux ! filesink location=sine.mkv
* ]| Encode a sine beep as aac and write to matroska container.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-faad
+ * @title: faad
* @seealso: faac
*
* faad decodes AAC (MPEG-4 part 3) stream.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 filesrc location=example.mp4 ! qtdemux ! faad ! audioconvert ! audioresample ! autoaudiosink
* ]| Play aac from mp4 file.
* |[
* gst-launch-1.0 filesrc location=example.adts ! faad ! audioconvert ! audioresample ! autoaudiosink
* ]| Play standalone aac bitstream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-fluiddec
+ * @title: fluiddec
* @see_also: timidity, wildmidi
*
* This element renders midi-events as audio streams using
* <ulink url="http://fluidsynth.sourceforge.net//">Fluidsynth</ulink>.
* It offers better sound quality compared to the timidity or wildmidi element.
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 filesrc location=song.mid ! midiparse ! fluiddec ! pulsesink
* ]| This example pipeline will parse the midi and render to raw audio which is
* played via pulseaudio.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glbumper
+ * @title: glbumper
*
* Bump mapping using the normal method.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 -v videotestsrc ! glupload ! glbumper location=normalmap.bmp ! glimagesink
* ]| A pipeline to test normal mapping.
* FBO (Frame Buffer Object) and GLSL (OpenGL Shading Language) are required.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glcolorbalance
+ * @title: glcolorbalance
*
* Adjusts brightness, contrast, hue, saturation on a video stream.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! glupload ! glcolorbalance saturation=0.0 ! glcolorconvert ! gldownload ! ximagesink
* ]| This pipeline converts the image to black and white by setting the
* saturation to 0.0.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glcolorscale
+ * @title: glcolorscale
*
* video frame scaling and colorspace conversion.
*
- * <refsect2>
- * <title>Scaling and Color space conversion</title>
- * <para>
+ * ## Scaling and Color space conversion
+ *
* Equivalent to glupload ! gldownload.
- * </para>
- * </refsect2>
- * <refsect2>
- * <title>Examples</title>
+ *
+ * ## Examples
* |[
* gst-launch-1.0 -v videotestsrc ! video/x-raw ! glcolorscale ! ximagesink
* ]| A pipeline to test colorspace conversion.
* video/x-raw, width=320, height=240, format=YV12 ! videoconvert ! autovideosink
* ]| A pipeline to test hardware scaling and colorspace conversion.
* FBO and GLSL are required.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-deinterlace
+ * @title: deinterlace
*
* Deinterlacing using based on fragment shaders.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! glupload ! gldeinterlace ! glimagesink
* ]|
* FBO (Frame Buffer Object) and GLSL (OpenGL Shading Language) are required.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gldifferencematte.
+ * @title: gldifferencematte.
*
* Saves a background frame and replace it with a pixbuf.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! glupload ! gldifferencemate location=backgroundimagefile ! glimagesink
* ]|
* FBO (Frame Buffer Object) and GLSL (OpenGL Shading Language) are required.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gleffects.
+ * @title: gleffects.
*
* GL Shading Language effects.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! glupload ! gleffects effect=5 ! glimagesink
* ]|
* FBO (Frame Buffer Object) and GLSL (OpenGL Shading Language) are required.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glfilterapp
+ * @title: glfilterapp
*
* The resize and redraw callbacks can be set from a client code.
*
- * <refsect2>
- * <title>CLient callbacks</title>
- * <para>
- * The graphic scene can be written from a client code through the
+ * ## CLient callbacks
+ *
+ * The graphic scene can be written from a client code through the
* two glfilterapp properties.
- * </para>
- * </refsect2>
- * <refsect2>
- * <title>Examples</title>
+ *
+ * ## Examples
* see gst-plugins-gl/tests/examples/generic/recordgraphic
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glfiltercube
+ * @title: glfiltercube
*
* The resize and redraw callbacks can be set from a client code.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 -v videotestsrc ! glfiltercube ! glimagesink
* ]| A pipeline to mpa textures on the 6 cube faces..
* gst-launch-1.0 -v videotestsrc ! video/x-raw, width=640, height=480 ! glfiltercube ! glimagesink
* ]| Resize scene before drawing the cube.
* The scene size is greater than the input video size.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glfilterglass
+ * @title: glfilterglass
*
* Map textures on moving glass.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 -v videotestsrc ! glfilterglass ! glimagesink
* ]| A pipeline inspired from http://www.mdk.org.pl/2007/11/17/gl-colorspace-conversions
* |[
* gst-launch-1.0 -v videotestsrc ! glfilterglass ! video/x-raw, width=640, height=480 ! glimagesink
* ]| The scene is greater than the input size.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glfilterreflectedscreen
+ * @title: glfilterreflectedscreen
*
* Map Video Texture upon a screen, on a reflecting surface
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! glupload ! glfilterreflectedscreen ! glimagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glshader
+ * @title: glshader
*
* OpenGL fragment shader filter
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! glupload ! glshader fragment="\"`cat myshader.frag`\"" ! glimagesink
* ]|
* uniform float time;
* uniform float width;
* uniform float height;
- *
+ *
* void main () {
* gl_FragColor = texture2D( tex, v_texcoord );
* }
* ]|
*
- * </refsect2>
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
/**
* SECTION:element-glimagesink
+ * @title: glimagesink
*
* glimagesink renders video frames to a drawable on a local or remote
* display using OpenGL. This element can receive a Window ID from the
* See the #GstGLDisplay documentation for a list of environment variables that
* can override window/platform detection.
*
- * <refsect2>
- * <title>Scaling</title>
- * <para>
+ * ## Scaling
+ *
* Depends on the driver, OpenGL handles hardware accelerated
* scaling of video frames. This means that the element will just accept
* incoming video frames no matter their geometry and will then put them to the
* drawable scaling them on the fly. Using the #GstGLImageSink:force-aspect-ratio
* property it is possible to enforce scaling with a constant aspect ratio,
* which means drawing black borders around the video frame.
- * </para>
- * </refsect2>
- * <refsect2>
- * <title>Events</title>
- * <para>
+ *
+ * ## Events
+ *
* Through the gl thread, glimagesink handle some events coming from the drawable
* to manage its appearance even when the data is not flowing (GST_STATE_PAUSED).
* That means that even when the element is paused, it will receive expose events
* from the drawable and draw the latest frame with correct borders/aspect-ratio.
- * </para>
- * </refsect2>
- * <refsect2>
- * <title>Examples</title>
+ *
+ * ## Examples
* |[
* gst-launch-1.0 -v videotestsrc ! video/x-raw ! glimagesink
* ]| A pipeline to test hardware scaling.
* ]| The graphic FPS scene can be greater than the input video FPS.
* The graphic scene can be written from a client code through the
* two glfilterapp properties.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glmosaic
+ * @title: glmosaic
*
* glmixer sub element. N gl sink pads to 1 source pad.
* N + 1 OpenGL contexts shared together.
* N <= 6 because the rendering is more a like a cube than a mosaic
* Each opengl input stream is rendered on a cube face
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! video/x-raw, format=YUY2 ! queue ! glmosaic name=m ! glimagesink \
* videotestsrc pattern=12 ! video/x-raw, format=I420, framerate=5/1, width=100, height=200 ! queue ! m. \
* videotestsrc ! gleffects effect=6 ! queue ! m.
* ]|
* FBO (Frame Buffer Object) is required.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gloverlay
+ * @title: gloverlay
*
* Overlay GL video texture with a PNG image
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! gloverlay location=image.jpg ! glimagesink
* ]|
* FBO (Frame Buffer Object) is required.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glstereomix
+ * @title: glstereomix
*
* Combine 2 input streams to produce a stereoscopic output
* stream. Input views are taken from the left pad and right pad
* The multiview representation on the output is chosen according to
* the downstream caps.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 -v videotestsrc pattern=ball name=left \
* videotestsrc name=right glstereomix name=mix \
* right. ! video/x-raw,width=640,height=480 ! glupload ! mix. \
* mix. ! video/x-raw'(memory:GLMemory)',multiview-mode=top-bottom ! \
* glcolorconvert ! gldownload ! queue ! x264enc ! h264parse ! \
- * mp4mux ! progressreport ! filesink location=output.mp4
+ * mp4mux ! progressreport ! filesink location=output.mp4
* ]| Mix the input from a camera to the left view, and videotestsrc to the right view,
* and encode as a top-bottom frame packed H.264 video.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
/**
* SECTION:element-glstereosplit
+ * @title: glstereosplit
*
* Receive a stereoscopic video stream and split into left/right
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! glstereosplit name=s ! queue ! glimagesink s. ! queue ! glimagesink
* ]|
* FBO (Frame Buffer Object) and GLSL (OpenGL Shading Language) are required.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gltestsrc
+ * @title: gltestsrc
*
- * <refsect2>
- * <para>
* The gltestsrc element is used to produce test video texture.
* The video test produced can be controlled with the "pattern"
* property.
- * </para>
- * <title>Example launch line</title>
- * <para>
- * <programlisting>
+ *
+ * ## Example launch line
+ *
+ * |[
* gst-launch-1.0 -v gltestsrc pattern=smpte ! glimagesink
- * </programlisting>
+ * ]|
* Shows original SMPTE color bars in a window.
- * </para>
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gltransformation
+ * @title: gltransformation
*
* Transforms video on the GPU.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 gltestsrc ! gltransformation rotation-z=45 ! glimagesink
* ]| A pipeline to rotate by 45 degrees
* |[
* gst-launch-1.0 gltestsrc ! gltransformation rotation-x=-45 ortho=True ! glimagesink
* ]| Rotate the video around the X-Axis by -45° with an orthographic projection
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glvideo_flip
+ * @title: glvideo_flip
*
* Transforms video on the GPU.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! glupload ! glvideoflip method=clockwise ! glimagesinkelement
* ]| This pipeline flips the test image 90 degrees clockwise.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glvideomixer
+ * @title: glvideomixer
*
* Composites a number of streams into a single output scene using OpenGL in
* a similar fashion to compositor and videomixer. See the compositor plugin
* for documentation about the #GstGLVideoMixerPad properties.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 glvideomixer name=m ! glimagesink \
* videotestsrc ! video/x-raw, format=YUY2 ! glupload ! glcolorconvert ! m. \
* videotestsrc ! glupload ! glfiltercube ! queue ! m. \
* videotestsrc ! glupload ! gleffects effect=6 ! queue ! m.gst-launch-1.0 glvideomixer name=m ! glimagesink \
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-glviewconvert
+ * @title: glviewconvert
*
* Convert stereoscopic video between different representations using fragment shaders.
*
* The element can use either property settings or caps negotiation to choose the
* input and output formats to process.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 videotestsrc ! glupload ! glviewconvert ! glimagesink
* ]| Simple placebo example demonstrating identity passthrough of mono video
* ]| Force re-interpretation of the input checkers pattern as a side-by-side stereoscopic
* image and display in glimagesink.
* FBO (Frame Buffer Object) and GLSL (OpenGL Shading Language) are required.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:plugin-opengl
+ * @title: GstOpengl
*
* Cross-platform OpenGL plugin.
- * <refsect2>
- * <title>Debugging</title>
- * </refsect2>
- * <refsect2>
- * <title>Examples</title>
+ *
+ * ## Debugging
+ *
+ * ## Examples
* |[
* gst-launch-1.0 --gst-debug=gldisplay:3 videotestsrc ! glimagesink
* ]| A debugging pipeline.
|[
* GST_DEBUG=gl*:6 gst-launch-1.0 videotestsrc ! glimagesink
* ]| A debugging pipelines related to shaders.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:gtkgstsink
+ * @title: GstGtkBaseSink
*
*/
*/
/**
- * SECTION:gstgtkglsink
- *
+ * SECTION:element-gtkglsink
+ * @title: gtkglsink
*/
#ifdef HAVE_CONFIG_H
*/
/**
- * SECTION:gtkgstsink
+ * SECTION:element-gtkgstsink
+ * @title: gtkgstsink
*
*/
/**
* SECTION:gtkgstglwidget
+ * @title: GtkGstGlWidget
* @short_description: a #GtkGLArea that renders GStreamer video #GstBuffers
* @see_also: #GtkGLArea, #GstBuffer
*
/**
* SECTION:gtkgstwidget
+ * @title: GtkGstWidget
* @short_description: a #GtkWidget that renders GStreamer video #GstBuffers
* @see_also: #GtkDrawingArea, #GstBuffer
*
*/
/**
* SECTION:element-hlsdemux
+ * @title: hlsdemux
*
* HTTP Live Streaming demuxer element.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 souphttpsrc location=http://devimages.apple.com/iphone/samples/bipbop/gear4/prog_index.m3u8 ! hlsdemux ! decodebin ! videoconvert ! videoscale ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-hlssink
+ * @title: hlssink
*
* HTTP Live Streaming sink/server
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc is-live=true ! x264enc ! mpegtsmux ! hlssink max-files=5
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
/**
* SECTION:element-iqa
+ * @title: iqa
* @short_description: Image Quality Assessment plugin.
*
* IQA will perform full reference image quality assessment, with the
* sink_2\=\(double\)0.0082939683976297474\;",
* time=(guint64)0;
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -m uridecodebin uri=file:///test/file/1 ! iqa name=iqa do-dssim=true \
* ! videoconvert ! autovideosink uridecodebin uri=file:///test/file/2 ! iqa.
* ]| This pipeline will output messages to the console for each set of compared frames.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-katedec
+ * @title: katedec
* @see_also: oggdemux
*
- * <refsect2>
- * <para>
* This element decodes Kate streams
* <ulink url="http://libkate.googlecode.com/">Kate</ulink> is a free codec
* for text based data, such as subtitles. Any number of kate streams can be
* embedded in an Ogg stream.
- * </para>
- * <para>
+ *
* libkate (see above url) is needed to build this plugin.
- * </para>
- * <title>Example pipeline</title>
- * <para>
+ *
+ * ## Example pipeline
+ *
* This explicitely decodes a Kate stream:
- * <programlisting>
+ * |[
* gst-launch-1.0 filesrc location=test.ogg ! oggdemux ! katedec ! fakesink silent=TRUE
- * </programlisting>
- * </para>
- * <para>
+ * ]|
+ *
* This will automatically detect and use any Kate streams multiplexed
* in an Ogg stream:
- * <programlisting>
+ * |[
* gst-launch-1.0 playbin uri=file:///tmp/test.ogg
- * </programlisting>
- * </para>
- * </refsect2>
+ * ]|
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-kateenc
+ * @title: kateenc
* @see_also: oggmux
*
- * <refsect2>
- * <para>
* This element encodes Kate streams
* <ulink url="http://libkate.googlecode.com/">Kate</ulink> is a free codec
* for text based data, such as subtitles. Any number of kate streams can be
* embedded in an Ogg stream.
- * </para>
- * <para>
+ *
* libkate (see above url) is needed to build this plugin.
- * </para>
- * <title>Example pipeline</title>
- * <para>
+ *
+ * ## Example pipeline
+ *
* This encodes a DVD SPU track to a Kate stream:
- * <programlisting>
+ * |[
* gst-launch-1.0 dvdreadsrc ! dvddemux ! dvdsubparse ! kateenc category=spu-subtitles ! oggmux ! filesink location=test.ogg
- * </programlisting>
- * </para>
- * </refsect2>
+ * ]|
+ *
*/
/* FIXME:
/**
* SECTION:element-kateparse
+ * @title: kateparse
* @short_description: parses kate streams
* @see_also: katedec, vorbisparse, oggdemux, theoraparse
*
- * <refsect2>
- * <para>
* The kateparse element will parse the header packets of the Kate
* stream and put them as the streamheader in the caps. This is used in the
* multifdsink case where you want to stream live kate streams to multiple
* clients, each client has to receive the streamheaders first before they can
* consume the kate packets.
- * </para>
- * <para>
+ *
* This element also makes sure that the buffers that it pushes out are properly
* timestamped and that their offset and offset_end are set. The buffers that
* kateparse outputs have all of the metadata that oggmux expects to receive,
* which allows you to (for example) remux an ogg/kate file.
- * </para>
- * <title>Example pipelines</title>
- * <para>
- * <programlisting>
+ *
+ * ## Example pipelines
+ *
+ * |[
* gst-launch-1.0 -v filesrc location=kate.ogg ! oggdemux ! kateparse ! fakesink
- * </programlisting>
+ * ]|
* This pipeline shows that the streamheader is set in the caps, and that each
* buffer has the timestamp, duration, offset, and offset_end set.
- * </para>
- * <para>
- * <programlisting>
+ *
+ * |[
* gst-launch-1.0 filesrc location=kate.ogg ! oggdemux ! kateparse \
* ! oggmux ! filesink location=kate-remuxed.ogg
- * </programlisting>
+ * ]|
* This pipeline shows remuxing. kate-remuxed.ogg might not be exactly the same
* as kate.ogg, but they should produce exactly the same decoded data.
- * </para>
- * </refsect2>
*
*/
/**
* SECTION:element-katetag
+ * @title: katetag
* @see_also: #oggdemux, #oggmux, #kateparse, #GstTagSetter
* @short_description: retags kate streams
*
- * <refsect2>
- * <para>
* The katetag element can change the tag contained within a raw
* kate stream. Specifically, it modifies the comments header packet
* of the kate stream, as well as the language and category of the
* kate stream.
- * </para>
- * <para>
+ *
* The element will also process the stream as the #kateparse element does
* so it can be used when remuxing an Ogg Kate stream, without additional
* elements.
- * </para>
- * <para>
+ *
* Applications can set the tags to write using the #GstTagSetter interface.
* Tags contained within the kate stream will be picked up
* automatically (and merged according to the merge mode set via the tag
* setter interface).
- * </para>
- * <title>Example pipelines</title>
- * <para>
+ *
+ * ## Example pipelines
+ *
* This element is only useful with gst-launch-1.0 for modifying the language
* and/or category (which are properties of the stream located in the kate
* beginning of stream header), because it does not support setting the tags
* on a #GstTagSetter interface. Conceptually, the element will usually be
* used like:
- * <programlisting>
+ * |[
* gst-launch-1.0 -v filesrc location=foo.ogg ! oggdemux ! katetag ! oggmux ! filesink location=bar.ogg
- * </programlisting>
- * </para>
- * <para>
+ * ]|
+ *
* This pipeline will set the language and category of the stream to the
* given values:
- * <programlisting>
+ * |[
* gst-launch-1.0 -v filesrc location=foo.ogg ! oggdemux ! katetag language=pt_BR category=subtitles ! oggmux ! filesink location=bar.ogg
- * </programlisting>
- * </para>
- * </refsect2>
+ * ]|
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-tiger
+ * @title: tiger
* @see_also: katedec
*
- * <refsect2>
- * <para>
* This element decodes and renders Kate streams
* <ulink url="http://libkate.googlecode.com/">Kate</ulink> is a free codec
* for text based data, such as subtitles. Any number of kate streams can be
* embedded in an Ogg stream.
- * </para>
- * <para>
+ *
* libkate (see above url) and <ulink url="http://libtiger.googlecode.com/">libtiger</ulink>
* are needed to build this element.
- * </para>
- * <title>Example pipeline</title>
- * <para>
+ *
+ * ## Example pipeline
+ *
* This pipeline renders a Kate stream on top of a Theora video multiplexed
* in the same stream:
- * <programlisting>
+ * |[
* gst-launch-1.0 \
* filesrc location=video.ogg ! oggdemux name=demux \
* demux. ! queue ! theoradec ! videoconvert ! tiger name=tiger \
* demux. ! queue ! kateparse ! tiger. \
* tiger. ! videoconvert ! autovideosink
- * </programlisting>
- * </para>
- * </refsect2>
+ * ]|
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-ladspa
+ * @title: ladspa
* @short_description: bridge for LADSPA (Linux Audio Developer's Simple Plugin API)
* @see_also: #GstAudioConvert #GstAudioResample, #GstAudioTestSrc, #GstAutoAudioSink
*
* element classification. The functionality you get depends on the LADSPA plugins
* you have installed.
*
- * <refsect2>
- * <title>Example LADSPA line without this plugins</title>
+ * ## Example LADSPA line without this plugins
* |[
* (padsp) listplugins
* (padsp) analyseplugin cmt.so amp_mono
* (padsp) applyplugin testin.wav testout.wav cmt.so amp_mono 2
* gst-launch-1.0 playbin uri=file://"$PWD"/testout.wav
* ]| Decode any audio file into wav with the format expected for the specific ladspa plugin to be applied, apply the ladspa filter and play it.
- * </refsect2>
*
* Now with this plugin:
*
- * <refsect2>
- * <title>Example LADSPA line with this plugins</title>
+ * ## Example LADSPA line with this plugins
* |[
* gst-launch-1.0 autoaudiosrc ! ladspa-cmt-so-amp-mono gain=2 ! ladspa-caps-so-plate ! ladspa-tap-echo-so-tap-stereo-echo l-delay=500 r-haas-delay=500 ! tee name=myT myT. ! queue ! autoaudiosink myT. ! queue ! audioconvert ! goom ! videoconvert ! xvimagesink pixel-aspect-ratio=3/4
* ]| Get audio input, filter it through CAPS Plate and TAP Stereo Echo, play it and show a visualization (recommended hearphones).
- * </refsect2>
*
* In case you wonder the plugin naming scheme, quoting ladspa.h:
* "Plugin types should be identified by file and label rather than by
* on top of the audio in and out one, so some parameters are readable too.
*
* You can see the listing of plugins available with:
- * <refsect2>
- * <title>Inspecting the plugins list</title>
+ *
+ * ## Inspecting the plugins list
* |[
* gst-inspect ladspa
* ]| List available LADSPA plugins on gstreamer.
- * </refsect2>
*
* You can see the parameters of any plugin with:
- * <refsect2>
- * <title>Inspecting the plugins</title>
+ *
+ * ## Inspecting the plugins
* |[
* gst-inspect ladspa-retro-flange-1208-so-retroflange
* ]| List details of the plugin, parameters, range and defaults included.
- * </refsect2>
*
* The elements categorize in:
- * <itemizedlist>
- * <listitem><para>Filter/Effect/Audio/LADSPA:</para>
- * <refsect2>
- * <title>Example Filter/Effect/Audio/LADSPA line with this plugins</title>
+ *
+ * * Filter/Effect/Audio/LADSPA:
+ *
+ * ## Example Filter/Effect/Audio/LADSPA line with this plugins
* |[
* gst-launch-1.0 filesrc location="$myfile" ! decodebin ! audioconvert ! audioresample ! ladspa-calf-so-reverb decay-time=15 high-frq-damp=20000 room-size=5 diffusion=1 wet-amount=2 dry-amount=2 pre-delay=50 bass-cut=20000 treble-cut=20000 ! ladspa-tap-echo-so-tap-stereo-echo l-delay=500 r-haas-delay=500 ! autoaudiosink
* ]| Decode any audio file, filter it through Calf Reverb LADSPA then TAP Stereo Echo, and play it.
- * </refsect2>
- * </listitem>
- * <listitem><para>Source/Audio/LADSPA:</para>
- * <refsect2>
- * <title>Example Source/Audio/LADSPA line with this plugins</title>
+ *
+ * * Source/Audio/LADSPA:
+ *
+ * ## Example Source/Audio/LADSPA line with this plugins
* |[
* gst-launch-1.0 ladspasrc-sine-so-sine-fcac frequency=220 amplitude=100 ! audioconvert ! autoaudiosink
* ]| Generate a sine wave with Sine Oscillator (Freq:control, Amp:control) and play it.
- * </refsect2>
- * <refsect2>
- * <title>Example Source/Audio/LADSPA line with this plugins</title>
+ *
+ * ## Example Source/Audio/LADSPA line with this plugins
* |[
* gst-launch-1.0 ladspasrc-caps-so-click bpm=240 volume=1 ! autoaudiosink
* ]| Generate clicks with CAPS Click - Metronome at 240 beats per minute and play it.
- * </refsect2>
- * <refsect2>
- * <title>Example Source/Audio/LADSPA line with this plugins</title>
+ *
+ * ## Example Source/Audio/LADSPA line with this plugins
* |[
* gst-launch-1.0 ladspasrc-random-1661-so-random-fcsc-oa ! ladspa-cmt-so-amp-mono gain=1.5 ! ladspa-caps-so-plate ! tee name=myT myT. ! queue ! autoaudiosink myT. ! queue ! audioconvert ! wavescope ! videoconvert ! autovideosink
* ]| Generate random wave, filter it trhough Mono Amplifier and Versatile Plate Reverb, and play, while showing, it.
- * </refsect2>
- * </listitem>
- * <listitem><para>Sink/Audio/LADSPA:</para>
- * <refsect2>
- * <title>Example Sink/Audio/LADSPA line with this plugins</title>
+ *
+ * * Sink/Audio/LADSPA:
+ *
+ * ## Example Sink/Audio/LADSPA line with this plugins
* |[
* gst-launch-1.0 autoaudiosrc ! ladspa-cmt-so-amp-mono gain=2 ! ladspa-caps-so-plate ! ladspa-tap-echo-so-tap-stereo-echo l-delay=500 r-haas-delay=500 ! tee name=myT myT. ! audioconvert ! audioresample ! queue ! ladspasink-cmt-so-null-ai myT. ! audioconvert ! audioresample ! queue ! goom ! videoconvert ! xvimagesink pixel-aspect-ratio=3/4
* ]| Get audio input, filter it trhough Mono Amplifier, CAPS Plate LADSPA and TAP Stereo Echo, explicitily anulate audio with Null (Audio Output), and play a visualization (recommended hearphones).
- * </refsect2>
- * </listitem>
- * </itemizedlist>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-libde265dec
+ * @title: libde265dec
*
* Decodes HEVC/H.265 video.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=bitstream.hevc ! 'video/x-hevc,stream-format=byte-stream,framerate=25/1' ! libde265dec ! autovideosink
* ]| The above pipeline decodes the HEVC/H.265 bitstream and renders it to the screen.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-lv2
+ * @title: lv2
* @short_description: bridge for LV2.
*
* LV2 is a standard for plugins and matching host applications,
/**
* SECTION:element-openalsink
+ * @title: openalsink
* @see_also: openalsrc
* @short_description: capture raw audio samples through OpenAL
*
*
* Unfortunately the capture API doesn't have a format enumeration/check. all you can do is try opening it and see if it works.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 audiotestsrc ! audioconvert ! volume volume=0.5 ! openalsink
* ]| will play a sine wave (continuous beep sound) through OpenAL.
* |[
* gst-launch-1.0 openalsrc ! "audio/x-raw,format=S16LE,rate=44100" ! audioconvert ! volume volume=0.25 ! openalsink
* ]| will capture and play audio through OpenAL.
- * </refsect2>
+ *
*/
/*
/**
* SECTION:element-openalsrc
+ * @title: openalsrc
* @see_also: openalsink
* @short_description: capture raw audio samples through OpenAL
*
* This element captures raw audio samples through OpenAL.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v openalsrc ! audioconvert ! wavenc ! filesink location=stream.wav
* ]| * will capture sound through OpenAL and encode it to a wav file.
* |[
* gst-launch-1.0 openalsrc ! "audio/x-raw,format=S16LE,rate=44100" ! audioconvert ! volume volume=0.25 ! openalsink
* ]| will capture and play audio through OpenAL.
- * </refsect2>
+ *
*/
/*
/**
* SECTION:element-opusparse
+ * @title: opusparse
* @see_also: opusenc, opusdec
*
* This element parses OPUS packets.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=opusdata ! opusparse ! opusdec ! audioconvert ! audioresample ! alsasink
* ]| Decode and plays an unmuxed Opus file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
return type;
}
-/** Audio decoder subclass */
+/* Audio decoder subclass */
static GstStaticPadTemplate audio_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
{
}
-/** Video decoder subclass */
+/* Video decoder subclass */
static GstStaticPadTemplate video_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
/**
* SECTION:element-input-selector
+ * @title: input-selector
* @see_also: #GstOutputSelector
*
* Direct one out of N input streams to the output pad.
* The input pads are from a GstPad subclass and have additional
* properties, which users may find useful, namely:
*
- * <itemizedlist>
- * <listitem>
- * "running-time": Running time of stream on pad (#gint64)
- * </listitem>
- * <listitem>
- * "tags": The currently active tags on the pad (#GstTagList, boxed type)
- * </listitem>
- * <listitem>
- * "active": If the pad is currently active (#gboolean)
- * </listitem>
- * <listitem>
- * "always-ok" : Make an inactive pads return #GST_FLOW_OK instead of
- * #GST_FLOW_NOT_LINKED
- * </listitem>
- * </itemizedlist>
+ * * "running-time": Running time of stream on pad (#gint64)
+ * * "tags": The currently active tags on the pad (#GstTagList, boxed type)
+ * * "active": If the pad is currently active (#gboolean)
+ * * "always-ok" : Make an inactive pads return #GST_FLOW_OK instead of #GST_FLOW_NOT_LINKED
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-rsvgdec
+ * @title: rsvgdec
*
* This elements renders SVG graphics.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 filesrc location=image.svg ! rsvgdec ! imagefreeze ! videoconvert ! autovideosink
* ]| render and show a svg image.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rsvgoverlay
+ * @title: rsvgoverlay
*
* This elements overlays SVG graphics over the video. SVG data can
* either be specified through properties, or fed through the
* the values of the x/y/width/height attributes, by setting
* height-/width-relative to 1.0. and all other attributes to 0.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 -v videotestsrc ! videoconvert ! rsvgoverlay location=foo.svg ! videoconvert ! autovideosink
* ]| specifies the SVG location through the filename property.
* |[
* gst-launch-1.0 -v videotestsrc ! videoconvert ! rsvgoverlay data='<svg viewBox="0 0 800 600"><image x="80%" y="80%" width="10%" height="10%" xlink:href="foo.jpg" /></svg>' ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtmpsink
+ * @title: rtmpsink
*
* This element delivers data to a streaming server via RTMP. It uses
* librtmp, and supports any protocols/urls that librtmp supports.
* for librtmp, such as 'flashver=version'. See the librtmp documentation
* for more detail
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! ffenc_flv ! flvmux ! rtmpsink location='rtmp://localhost/path/to/stream live=1'
* ]| Encode a test video stream to FLV video format and stream it via RTMP.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtmpsrc
+ * @title: rtmpsrc
*
* This plugin reads data from a local or remote location specified
* by an URI. This location can be specified using any protocol supported by
* the RTMP library, i.e. rtmp, rtmpt, rtmps, rtmpe, rtmfp, rtmpte and rtmpts.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 -v rtmpsrc location=rtmp://somehost/someurl ! fakesink
* ]| Open an RTMP location and pass its content to fakesink.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-sbdec
+ * @title: sbdec
*
* This element decodes a Bluetooth SBC audio streams to raw integer PCM audio
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=audio.sbc ! sbcparse ! sbcdec ! audioconvert ! audioresample ! autoaudiosink
* ]| Decode a raw SBC file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-sbenc
+ * @title: sbenc
*
* This element encodes raw integer PCM audio into a Bluetooth SBC audio.
*
* allocation-mode can be set by adding a capsfilter element with appropriate
* filtercaps after the sbcenc encoder element.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v audiotestsrc ! sbcenc ! rtpsbcpay ! udpsink
* ]| Encode a sine wave into SBC, RTP payload it and send over the network using UDP
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-mssdemux
+ * @title: mssdemux
*
* Demuxes a Microsoft's Smooth Streaming manifest into its audio and/or video streams.
*
- *
*/
/*
/**
* SECTION:element-dtmfdetect
+ * @title: dtmfdetect
* @short_description: Detects DTMF tones
*
* This element will detect DTMF tones and emit messages.
*
- * The message is called <classname>"dtmf-event"</classname> and has
- * the following fields:
- * <itemizedlist>
- * <listitem>
- * <para>
- * gint <classname>type</classname> (0-1):
- * The application uses this field to specify which of the two methods
+ * The message is called `dtmf-event` and has the following fields:
+ *
+ * * gint `type` (0-1): The application uses this field to specify which of the two methods
* specified in RFC 2833 to use. The value should be 0 for tones and 1 for
* named events. Tones are specified by their frequencies and events are
* specfied by their number. This element can only take events as input.
* Do not confuse with "method" which specified the output.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * gint <classname>number</classname> (0-16):
- * The event number.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * gint <classname>method</classname> (2):
- * This field will always been 2 (ie sound) from this element.
- * </para>
- * </listitem>
- * </itemizedlist>
+ * * gint `number` (0-16): The event number.
+ * * gint `method` (2): This field will always been 2 (ie sound) from this element.
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-spanplc
+ * @title: spanplc
*
* The spanplc (Packet Loss Concealment) element provides a synthetic
* fill-in signal, to minimise the audible effect of lost packets in
/**
* SECTION:element-srtpdec
+ * @title: srtpdec
* @see_also: srtpenc
*
* gstrtpdec acts as a decoder that removes security from SRTP and SRTCP
* other means. If no rollover counter is provided by the user, 0 is
* used by default.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 udpsrc port=5004 caps='application/x-srtp, payload=(int)8, ssrc=(uint)1356955624, srtp-key=(buffer)012345678901234567890123456789012345678901234567890123456789, srtp-cipher=(string)aes-128-icm, srtp-auth=(string)hmac-sha1-80, srtcp-cipher=(string)aes-128-icm, srtcp-auth=(string)hmac-sha1-80' ! srtpdec ! rtppcmadepay ! alawdec ! pulsesink
* ]| Receive PCMA SRTP packets through UDP using caps to specify
* gst-launch-1.0 audiotestsrc ! alawenc ! rtppcmapay ! 'application/x-rtp, payload=(int)8, ssrc=(uint)1356955624' ! srtpenc key="012345678901234567890123456789012345678901234567890123456789" ! udpsink port=5004
* ]| Send PCMA SRTP packets through UDP, nothing how the SSRC is forced so
* that the receiver will recognize it.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
- * SECTION:gst-plugin-bad-plugins-srtpenc
+ * SECTION:element-srtpenc
+ * @title: srtpenc
* @see_also: srtpdec
*
* gstrtpenc acts as an encoder that adds security to RTP and RTCP
/**
* SECTION:element-teletextdec
+ * @title: teletextdec
*
* Decode a stream of raw VBI packets containing teletext information to a RGBA
* stream.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v -m filesrc location=recording.mpeg ! tsdemux ! teletextdec ! videoconvert ! ximagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-ttmlparse
+ * @title: ttmlparse
*
* Parses timed text subtitle files described using Timed Text Markup Language
* (TTML). Currently, only the EBU-TT-D profile of TTML, designed for
* elements. A downstream renderer element uses this information to correctly
* render the text on top of video frames.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 filesrc location=<media file location> ! video/quicktime ! qtdemux name=q ttmlrender name=r q. ! queue ! h264parse ! avdec_h264 ! autovideoconvert ! r.video_sink filesrc location=<subtitle file location> blocksize=16777216 ! queue ! ttmlparse ! r.text_sink r. ! ximagesink q. ! queue ! aacparse ! avdec_aac ! audioconvert ! alsasink
* ]| Parse and render TTML subtitles contained in a single XML file over an
* MP4 stream containing H.264 video and AAC audio.
- * </refsect2>
+ *
*/
#include <stdio.h>
/**
* SECTION:element-ttmlrender
+ * @title: ttmlrender
*
* Renders timed text on top of a video stream. It receives text in buffers
* from a ttmlparse element; each text string is in its own #GstMemory within
* the GstBuffer, and the styling and layout associated with each text string
* is in metadata attached to the #GstBuffer.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 filesrc location=<media file location> ! video/quicktime ! qtdemux name=q ttmlrender name=r q. ! queue ! h264parse ! avdec_h264 ! autovideoconvert ! r.video_sink filesrc location=<subtitle file location> blocksize=16777216 ! queue ! ttmlparse ! r.text_sink r. ! ximagesink q. ! queue ! aacparse ! avdec_aac ! audioconvert ! alsasink
* ]| Parse and render TTML subtitles contained in a single XML file over an
* MP4 stream containing H.264 video and AAC audio:
- * </refsect2>
+ *
*/
#include <gst/video/video.h>
/**
* SECTION:gstsubtitle
+ * @title: GstSubtitle
* @short_description: Library for describing sets of static subtitles.
*
* This library enables the description of static text scenes made up of a
/**
* SECTION:gstsubtitlemeta
+ * @title: GstSubtitleMeta
* @short_description: Metadata class for timed-text subtitles.
*
* The GstSubtitleMeta class enables the layout and styling information needed
/**
* SECTION:element-voaacenc
+ * @title: voaacenc
*
- * AAC audio encoder based on vo-aacenc library
+ * AAC audio encoder based on vo-aacenc library
* <ulink url="http://sourceforge.net/projects/opencore-amr/files/vo-aacenc/">vo-aacenc library source file</ulink>.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=abc.wav ! wavparse ! audioresample ! audioconvert ! voaacenc ! filesink location=abc.aac
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-voamrwbenc
+ * @title: voamrwbenc
* @see_also: #GstAmrWbDec, #GstAmrWbParse
*
- * AMR wideband encoder based on the
+ * AMR wideband encoder based on the
* <ulink url="http://www.penguin.cz/~utx/amr">reference codec implementation</ulink>.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch filesrc location=abc.wav ! wavparse ! audioresample ! audioconvert ! voamrwbenc ! filesink location=abc.amr
* ]|
* Please note that the above stream misses the header, that is needed to play
* the stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:plugin-vulkan
+ * @title: vulkan
*
* Cross-platform Vulkan plugin.
*/
/**
* SECTION:vkbuffermemory
+ * @title: vkbuffermemory
* @short_description: memory subclass for Vulkan buffer memory
* @see_also: #GstMemory, #GstAllocator
*
/**
* gst_is_vulkan_buffer_memory:
* @mem:a #GstMemory
- *
+ *
* Returns: whether the memory at @mem is a #GstVulkanBufferMemory
*/
gboolean
/**
* SECTION:vkbufferpool
+ * @title: GstVulkanBufferPool
* @short_description: buffer pool for #GstVulkanBufferMemory objects
* @see_also: #GstBufferPool, #GstVulkanBufferMemory
*
*
* A #GstVulkanBufferPool is created with gst_vulkan_buffer_pool_new()
*
- * #GstVulkanBufferPool implements the VideoMeta buffer pool option
+ * #GstVulkanBufferPool implements the VideoMeta buffer pool option
* #GST_BUFFER_POOL_OPTION_VIDEO_META
*/
/**
* SECTION:vkimagememory
+ * @title: GstVkImageMemory
* @short_description: memory subclass for Vulkan image memory
* @see_also: #GstMemory, #GstAllocator
*
/**
* gst_is_vulkan_image_memory:
* @mem:a #GstMemory
- *
+ *
* Returns: whether the memory at @mem is a #GstVulkanImageMemory
*/
gboolean
/**
* SECTION:vkmemory
+ * @title: GstVkMemory
* @short_description: memory subclass for Vulkan device memory
* @see_also: #GstMemory, #GstAllocator
*
* GstVulkanMemory is a #GstMemory subclass providing support for the mapping of
- * Vulkan device memory.
+ * Vulkan device memory.
*/
/* WARNING: while suballocation is allowed, nothing prevents aliasing which
/**
* gst_is_vulkan_memory:
* @mem:a #GstMemory
- *
+ *
* Returns: whether the memory at @mem is a #GstVulkanMemory
*/
gboolean
/**
* SECTION:element-vulkansink
+ * @title: vulkansink
*
* vulkansink renders video frames to a drawable on a local or remote
* display using Vulkan.
/**
* SECTION:element-vulkanupload
+ * @title: vulkanupload
*
* vulkanupload uploads data into Vulkan memory objects.
*/
*/
/**
- * SECTION:gstglwindow
+ * SECTION:vkwindow
* @short_description: window/surface abstraction
* @title: GstVulkanWindow
* @see_also: #GstGLContext, #GstGLDisplay
/**
* SECTION:element-waylandsink
+ * @title: waylandsink
*
* The waylandsink is creating its own window and render the decoded video frames to that.
* Setup the Wayland environment as described in
* <ulink url="http://wayland.freedesktop.org/building.html">Wayland</ulink> home page.
* The current implementaion is based on weston compositor.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v videotestsrc ! waylandsink
* ]| test the video rendering in wayland
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-wildmidi
+ * @title: wildmidi
* @see_also: timidity
*
* This element renders midi-files as audio streams using
* uses the same sound-patches as timidity (it tries the path in $WILDMIDI_CFG,
* $HOME/.wildmidirc and /etc/wildmidi.cfg)
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 filesrc location=song.mid ! wildmidi ! alsasink
* ]| This example pipeline will parse the midi and render to raw audio which is
* played via alsa.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-x265enc
+ * @title: x265enc
*
* This element encodes raw video into H265 compressed data.
*
/**
* SECTION:element-zbar
+ * @title: zbar
*
* Detect bar codes in the video streams and send them as element messages to
* the #GstBus if .#GstZBar:message property is %TRUE.
* If the .#GstZBar:attach-frame property is %TRUE, the posted barcode message
* includes a sample of the frame where the barcode was detected (Since 1.6).
*
- * The element generate messages named
- * <classname>"barcode"</classname>. The structure containes these
- * fields:
- * <itemizedlist>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"timestamp"</classname>:
- * the timestamp of the buffer that triggered the message.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * gchar*
- * <classname>"type"</classname>:
- * the symbol type.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * gchar*
- * <classname>"symbol"</classname>:
- * the deteted bar code data.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * gint
- * <classname>"quality"</classname>:
- * an unscaled, relative quantity: larger values are better than smaller
+ * The element generate messages named`barcode`. The structure containes these fields:
+ *
+ * * #GstClockTime `timestamp`: the timestamp of the buffer that triggered the message.
+ * * gchar * `type`: the symbol type.
+ * * gchar * `symbol`: the deteted bar code data.
+ * * gint `quality`: an unscaled, relative quantity: larger values are better than smaller
* values.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * GstSample
- * <classname>"frame"</classname>:
- * the frame in which the barcode message was detected, if
+ * * GstSample `frame`: the frame in which the barcode message was detected, if
* the .#GstZBar:attach-frame property was set to %TRUE (Since 1.6)
- * </para>
- * </listitem>
- * </itemizedlist>
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 -m v4l2src ! videoconvert ! zbar ! videoconvert ! xvimagesink
* ]| This pipeline will detect barcodes and send them as messages.
* |[
* gst-launch-1.0 -m v4l2src ! tee name=t ! queue ! videoconvert ! zbar ! fakesink t. ! queue ! xvimagesink
* ]| Same as above, but running the filter on a branch to keep the display in color
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION: gstaggregator
+ * @title: GstAggregator
* @short_description: manages a set of pads with the purpose of
* aggregating their buffers.
* @see_also: gstcollectpads for historical reasons.
*
* Manages a set of pads with the purpose of aggregating their buffers.
* Control is given to the subclass when all pads have data.
- * <itemizedlist>
- * <listitem><para>
- * Base class for mixers and muxers. Subclasses should at least implement
+ *
+ * * Base class for mixers and muxers. Subclasses should at least implement
* the #GstAggregatorClass.aggregate() virtual method.
- * </para></listitem>
- * <listitem><para>
- * When data is queued on all pads, tha aggregate vmethod is called.
- * </para></listitem>
- * <listitem><para>
- * One can peek at the data on any given GstAggregatorPad with the
+ *
+ * * When data is queued on all pads, tha aggregate vmethod is called.
+ *
+ * * One can peek at the data on any given GstAggregatorPad with the
* gst_aggregator_pad_get_buffer () method, and take ownership of it
* with the gst_aggregator_pad_steal_buffer () method. When a buffer
* has been taken with steal_buffer (), a new buffer can be queued
* on that pad.
- * </para></listitem>
- * <listitem><para>
- * If the subclass wishes to push a buffer downstream in its aggregate
+ *
+ * * If the subclass wishes to push a buffer downstream in its aggregate
* implementation, it should do so through the
* gst_aggregator_finish_buffer () method. This method will take care
* of sending and ordering mandatory events such as stream start, caps
* and segment.
- * </para></listitem>
- * <listitem><para>
- * Same goes for EOS events, which should not be pushed directly by the
+ *
+ * * Same goes for EOS events, which should not be pushed directly by the
* subclass, it should instead return GST_FLOW_EOS in its aggregate
* implementation.
- * </para></listitem>
- * <listitem><para>
- * Note that the aggregator logic regarding gap event handling is to turn
+ *
+ * * Note that the aggregator logic regarding gap event handling is to turn
* these into gap buffers with matching PTS and duration. It will also
* flag these buffers with GST_BUFFER_FLAG_GAP and GST_BUFFER_FLAG_DROPPABLE
* to ease their identification and subsequent processing.
- * </para></listitem>
- * </itemizedlist>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:gsth264parser
+ * @title: GstH264Parser
* @short_description: Convenience library for h264 video
* bitstream parsing.
*
* It offers bitstream parsing in both AVC (length-prefixed) and Annex B
* (0x000001 start code prefix) format. To identify a NAL unit in a bitstream
* and parse its headers, first call:
- * <itemizedlist>
- * <listitem>
- * #gst_h264_parser_identify_nalu to identify a NAL unit in an Annex B type bitstream
- * </listitem>
- * <listitem>
- * #gst_h264_parser_identify_nalu_avc to identify a NAL unit in an AVC type bitstream
- * </listitem>
- * </itemizedlist>
+ *
+ * * #gst_h264_parser_identify_nalu to identify a NAL unit in an Annex B type bitstream
+ *
+ * * #gst_h264_parser_identify_nalu_avc to identify a NAL unit in an AVC type bitstream
*
* The following functions are then available for parsing the structure of the
* #GstH264NalUnit, depending on the #GstH264NalUnitType:
- * <itemizedlist>
- * <listitem>
- * From #GST_H264_NAL_SLICE to #GST_H264_NAL_SLICE_IDR: #gst_h264_parser_parse_slice_hdr
- * </listitem>
- * <listitem>
- * #GST_H264_NAL_SEI: #gst_h264_parser_parse_sei
- * </listitem>
- * <listitem>
- * #GST_H264_NAL_SPS: #gst_h264_parser_parse_sps
- * </listitem>
- * <listitem>
- * #GST_H264_NAL_PPS: #gst_h264_parser_parse_pps
- * </listitem>
- * <listitem>
- * Any other: #gst_h264_parser_parse_nal
- * </listitem>
- * </itemizedlist>
+ *
+ * * From #GST_H264_NAL_SLICE to #GST_H264_NAL_SLICE_IDR: #gst_h264_parser_parse_slice_hdr
+ *
+ * * #GST_H264_NAL_SEI: #gst_h264_parser_parse_sei
+ *
+ * * #GST_H264_NAL_SPS: #gst_h264_parser_parse_sps
+ *
+ * * #GST_H264_NAL_PPS: #gst_h264_parser_parse_pps
+ *
+ * * Any other: #gst_h264_parser_parse_nal
*
* One of these functions *must* be called on every NAL unit in the bitstream,
* in order to keep the internal structures of the #GstH264NalParser up to
* type, if no special parsing of the current NAL unit is required by the
* application.
*
- * For more details about the structures, look at the ITU-T H.264 and ISO/IEC 14496-10 – MPEG-4
+ * For more details about the structures, look at the ITU-T H.264 and ISO/IEC 14496-10 – MPEG-4
* Part 10 specifications, available at:
*
- * <itemizedlist>
- * <listitem>
- * ITU-T H.264: http://www.itu.int/rec/T-REC-H.264
- * </listitem>
- * <listitem>
- * ISO/IEC 14496-10: http://www.iso.org/iso/iso_catalogue/catalogue_tc/catalogue_detail.htm?csnumber=56538
- * </listitem>
- * </itemizedlist>
+ * * ITU-T H.264: http://www.itu.int/rec/T-REC-H.264
+ *
+ * * ISO/IEC 14496-10: http://www.iso.org/iso/iso_catalogue/catalogue_tc/catalogue_detail.htm?csnumber=56538
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:gsth265parser
+ * @title: GstH265Parser
* @short_description: Convenience library for h265 video bitstream parsing.
*
* It offers you bitstream parsing in HEVC mode and non-HEVC mode. To identify
* Nals in a bitstream and parse its headers, you should call:
- * <itemizedlist>
- * <listitem>
- * gst_h265_parser_identify_nalu() to identify the following nalu in
+ *
+ * * gst_h265_parser_identify_nalu() to identify the following nalu in
* non-HEVC bitstreams
- * </listitem>
- * <listitem>
- * gst_h265_parser_identify_nalu_hevc() to identify the nalu in
+ *
+ * * gst_h265_parser_identify_nalu_hevc() to identify the nalu in
* HEVC bitstreams
- * </listitem>
- * </itemizedlist>
*
* Then, depending on the #GstH265NalUnitType of the newly parsed #GstH265NalUnit,
* you should call the differents functions to parse the structure:
- * <itemizedlist>
- * <listitem>
- * From #GST_H265_NAL_SLICE_TRAIL_N to #GST_H265_NAL_SLICE_CRA_NUT: gst_h265_parser_parse_slice_hdr()
- * </listitem>
- * <listitem>
- * #GST_H265_NAL_SEI: gst_h265_parser_parse_sei()
- * </listitem>
- * <listitem>
- * #GST_H265_NAL_VPS: gst_h265_parser_parse_vps()
- * </listitem>
- * <listitem>
- * #GST_H265_NAL_SPS: gst_h265_parser_parse_sps()
- * </listitem>
- * <listitem>
- * #GST_H265_NAL_PPS: #gst_h265_parser_parse_pps()
- * </listitem>
- * <listitem>
- * Any other: gst_h265_parser_parse_nal()
- * </listitem>
- * </itemizedlist>
+ *
+ * * From #GST_H265_NAL_SLICE_TRAIL_N to #GST_H265_NAL_SLICE_CRA_NUT: gst_h265_parser_parse_slice_hdr()
+ *
+ * * #GST_H265_NAL_SEI: gst_h265_parser_parse_sei()
+ *
+ * * #GST_H265_NAL_VPS: gst_h265_parser_parse_vps()
+ *
+ * * #GST_H265_NAL_SPS: gst_h265_parser_parse_sps()
+ *
+ * * #GST_H265_NAL_PPS: #gst_h265_parser_parse_pps()
+ *
+ * * Any other: gst_h265_parser_parse_nal()
*
* Note: You should always call gst_h265_parser_parse_nal() if you don't
* actually need #GstH265NalUnitType to be parsed for your personal use, in
* For more details about the structures, look at the ITU-T H.265
* specifications, you can download them from:
*
- * <itemizedlist>
- * <listitem>
- * ITU-T H.265: http://www.itu.int/rec/T-REC-H.265
- * </listitem>
- * </itemizedlist>
+ * * ITU-T H.265: http://www.itu.int/rec/T-REC-H.265
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:gstjpeg2000sampling
+ * @title: GstJpeg2000Sampling
* @short_description: Manage JPEG 2000 sampling and colorspace fields
*
*/
/**
* SECTION:gstjpegparser
+ * @title: GstJpegParser
* @short_description: Convenience library for JPEG bitstream parsing.
*
- * <refsect2>
- * <para>
* Provides useful functions for parsing JPEG images
- * </para>
- * </refsect2>
+ *
*/
#include <string.h>
*/
/**
* SECTION:gstmpeg4parser
+ * @title: GstMpeg4Parser
* @short_description: Convenience library for parsing mpeg4 part 2 video
* bitstream.
*
/**
* SECTION:gstmpegvideoparser
+ * @title: GstMpegvideoParser
* @short_description: Convenience library for mpeg1 and 2 video
* bitstream parsing.
*
- * <refsect2>
- * <para>
* Provides useful functions for mpeg videos bitstream parsing.
- * </para>
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:gstvc1parser
+ * @title: GstVc1Parser
* @short_description: Convenience library for parsing vc1 video
* bitstream.
*
/**
* SECTION:gstvp8parser
+ * @title: GstVp8Parser
* @short_description: Convenience library for parsing vp8 video bitstream.
*
* For more details about the structures, you can refer to the
* index
*
* Dequantization indices.
- */
+ */
struct _GstVp8QuantIndices
{
guint8 y_ac_qi;
*/
/**
* SECTION:gstvp9parser
+ * @title: GstVp9Parser
* @short_description: Convenience library for parsing vp9 video bitstream.
*
* For more details about the structures, you can refer to the
/**
* SECTION:gstglapi
+ * @title: GstGlApi
* @short_description: OpenGL API specific functionality
* @see_also: #GstGLDisplay, #GstGLContext
*
/**
* SECTION:gstglbasememory
+ * @title: GstGlBaseMemory
* @short_description: memory subclass for GL buffers
* @see_also: #GstMemory, #GstAllocator
*
/**
* gst_is_gl_base_memory:
* @mem:a #GstMemory
- *
+ *
* Returns: whether the memory at @mem is a #GstGLBaseMemory
*
* Since: 1.8
/**
* SECTION:gstglbuffer
+ * @title: GstGlBuffer
* @short_description: memory subclass for GL buffers
* @see_also: #GstMemory, #GstAllocator
*
* GstGLBuffer is a #GstMemory subclass providing support for the mapping of
- * GL buffers.
+ * GL buffers.
*
* Data is uploaded or downloaded from the GPU as is necessary.
*/
/**
* gst_is_gl_buffer:
* @mem:a #GstMemory
- *
+ *
* Returns: whether the memory at @mem is a #GstGLBuffer
*
* Since: 1.8
/**
* SECTION:gstglbufferpool
+ * @title: GstGlBufferPool
* @short_description: buffer pool for #GstGLBaseMemory objects
* @see_also: #GstBufferPool, #GstGLBaseMemory, #GstGLMemory
*
*
* A #GstGLBufferPool is created with gst_gl_buffer_pool_new()
*
- * #GstGLBufferPool implements the VideoMeta buffer pool option
+ * #GstGLBufferPool implements the VideoMeta buffer pool option
* %GST_BUFFER_POOL_OPTION_VIDEO_META, the VideoAligment buffer pool option
* %GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT as well as the OpenGL specific
* %GST_BUFFER_POOL_OPTION_GL_SYNC_META buffer pool option.
/**
* SECTION:gstglcolorconvert
+ * @title: GstGlColorConvert
* @short_description: convert between video color spaces and formats
* @see_also: #GstGLUpload, #GstGLMemory, #GstGLBaseMemory
*
* @inbuf: (transfer none): the #GstGLMemory filled #GstBuffer to convert
*
* Converts the data contained by @inbuf using the formats specified by the
- * #GstCaps passed to gst_gl_color_convert_set_caps()
+ * #GstCaps passed to gst_gl_color_convert_set_caps()
*
* Returns: (transfer full): a converted #GstBuffer or %NULL
*
* @context_type: a #GstGLPlatform specifying the type of context in @handle
* @available_apis: a #GstGLAPI containing the available OpenGL apis in @handle
*
- * Wraps an existing OpenGL context into a #GstGLContext.
+ * Wraps an existing OpenGL context into a #GstGLContext.
*
* Note: The caller is responsible for ensuring that the OpenGL context
* represented by @handle stays alive while the returned #GstGLContext is
* @title: GstGLDisplay
* @see_also: #GstContext, #GstGLContext, #GstGLWindow
*
- * #GstGLDisplay represents a connection to the underlying windowing system.
+ * #GstGLDisplay represents a connection to the underlying windowing system.
* Elements are required to make use of #GstContext to share and propogate
* a #GstGLDisplay.
*
* - GST_GL_API influences the OpenGL API requested by the OpenGL platform.
* Common values are 'opengl' and 'gles2'.
*
- * <note>Certain window systems require a special function to be called to
- * initialize threading support. As this GStreamer GL library does not preclude
- * concurrent access to the windowing system, it is strongly advised that
- * applications ensure that threading support has been initialized before any
- * other toolkit/library functionality is accessed. Failure to do so could
- * result in sudden application abortion during execution. The most notably
- * example of such a function is X11's XInitThreads().</note>
+ * > Certain window systems require a special function to be called to
+ * > initialize threading support. As this GStreamer GL library does not preclude
+ * > concurrent access to the windowing system, it is strongly advised that
+ * > applications ensure that threading support has been initialized before any
+ * > other toolkit/library functionality is accessed. Failure to do so could
+ * > result in sudden application abortion during execution. The most notably
+ * > example of such a function is X11's XInitThreads\().
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:gstglfilter
+ * @title: GstGlFilter
* @short_description: GstBaseTransform subclass for dealing with RGBA textures
* @see_also: #GstBaseTransform, #GstGLContext, #GstGLFramebuffer
*
* gst_gl_filter_draw_fullscreen_quad:
* @filter: a #GstGLFilter
*
- * Render a fullscreen quad using the current GL state. The only GL state this
+ * Render a fullscreen quad using the current GL state. The only GL state this
* modifies is the necessary vertex/index buffers and, if necessary, a
* Vertex Array Object for drawing a fullscreen quad. Framebuffer state,
* any shaders, viewport state, etc must be setup by the caller.
/**
* SECTION:gstglformat
+ * @title: GstGlFormat
* @short_description: utilities for dealing with OpenGL formats
* @see_also: #GstGLBaseMemory, #GstGLMemory, #GstGLFramebuffer, #GstGLBuffer
*
/**
* SECTION:gstglmemory
+ * @title: GstGlMemory
* @short_description: memory subclass for GL textures
* @see_also: #GstMemory, #GstAllocator, #GstGLBufferPool
*
* GstGLMemory is a #GstGLBaseMemory subclass providing support for the mapping of
- * OpenGL textures.
+ * OpenGL textures.
*
* #GstGLMemory is created or wrapped through gst_gl_base_memory_alloc()
* with #GstGLVideoAllocationParams.
/**
* gst_is_gl_memory:
* @mem:a #GstMemory
- *
+ *
* Returns: whether the memory at @mem is a #GstGLMemory
*
* Since: 1.4
/**
* SECTION:gstglmemorypbo
+ * @title: GstGLMemoryPBO
* @short_description: memory subclass for GL textures
* @see_also: #GstMemory, #GstAllocator, #GstGLBufferPool
*
/**
* gst_is_gl_memory_pbo:
* @mem:a #GstMemory
- *
+ *
* Returns: whether the memory at @mem is a #GstGLMemoryPBO
*
* Since: 1.8
/**
* SECTION:gstgloverlaycompositor
+ * @title: GstGLOverlayCompositor
* @short_description: Composite multiple overlays using OpenGL
* @see_also: #GstGLMemory, #GstGLContext
*/
* SECTION:gstglquery
* @short_description: OpenGL query abstraction
* @title: GstGLQuery
- * @see_also:
+ * @see_also:
*
* A #GstGLQuery represents and holds an OpenGL query object. Various types of
* queries can be run or counters retrieved.
/**
* SECTION:gstglrenderbuffer
+ * @title: GstGLRenderBuffer
* @short_description: memory subclass for GL renderbuffer objects
* @see_also: #GstMemory, #GstAllocator
*
* GstGLRenderbuffer is a #GstGLBaseMemory subclass providing support for
- * OpenGL renderbuffers.
+ * OpenGL renderbuffers.
*
* #GstGLRenderbuffer is created or wrapped through gst_gl_base_memory_alloc()
* with #GstGLRenderbufferAllocationParams.
/**
* gst_is_gl_renderbuffer:
* @mem:a #GstMemory
- *
+ *
* Returns: whether the memory at @mem is a #GstGLRenderbuffer
*
* Since: 1.10
/**
* SECTION:gstglshader
+ * @title: GstGLShader
* @short_description: object representing an OpenGL shader program
* @see_also: #GstGLSLStage
*/
/**
* SECTION:gstglsl
+ * @title: GstGLSL
* @short_description: helpers for dealing with OpenGL shaders
* @see_also: #GstGLSLStage, #GstGLShader
*/
/**
* SECTION:gstglsyncmeta
+ * @title: GstGLSyncMeta
* @short_description: synchronization primitives
* @see_also: #GstGLBaseMemory, #GstGLContext
*
/**
* SECTION:gstglupload
+ * @title: GstGLUpload
* @short_description: an object that uploads to GL textures
* @see_also: #GstGLDownload, #GstGLMemory
*
/**
* SECTION:gstglutils
+ * @title: GstGLUtils
* @short_description: some miscellaneous utilities for OpenGL
* @see_also: #GstGLContext
*/
/**
* SECTION:gstglviewconvert
+ * @title: GstGLViewConvert
* @short_description: convert between steroscopic/multiview video formats
* @see_also: #GstGLColorConvert, #GstGLContext
*
* @inbuf: (transfer none): the #GstGLMemory filled #GstBuffer to convert
*
* Converts the data contained by @inbuf using the formats specified by the
- * #GstCaps passed to gst_gl_view_convert_set_caps()
+ * #GstCaps passed to gst_gl_view_convert_set_caps()
*
* Returns: (transfer full): a converted #GstBuffer or %NULL
*
/* GST_MTS_DESC_DVB_SERVICE (0x48) */
/**
* GstMpegtsDVBServiceType:
- *
+ *
* The type of service of a channel.
*
- * As specified in Table 87 of ETSI EN 300 468 v1.13.1
+ * As specified in Table 87 of ETSI EN 300 468 v1.13.1
*/
typedef enum {
GST_DVB_SERVICE_RESERVED_00 = 0x00,
/*
- * gst-scte-section.h -
+ * gst-scte-section.h -
* Copyright (C) 2013, CableLabs, Louisville, CO 80027
- *
+ *
* Authors:
* RUIH Team <ruih@cablelabs.com>
*
/**
* SECTION:gstplayer-gmaincontextsignaldispatcher
+ * @title: GstPlayerGMainContextSignalDispatcher
* @short_description: Player GLib MainContext dispatcher
*
*/
/**
* SECTION:gstplayer-mediainfo
+ * @title: GstPlayerMediaInfo
* @short_description: Player Media Information
*
*/
/**
* SECTION:gstplayer-videooverlayvideorenderer
+ * @title: GstPlayerVideoOverlayVideoRenderer
* @short_description: Player Video Overlay Video Renderer
*
*/
/**
* SECTION:gstplayer-visualization
+ * @title: GstPlayerVisualization
* @short_description: Player Visualization
*
*/
/**
* SECTION:gstplayer
+ * @title: GstPlayer
* @short_description: Player
*
*/
/**
* SECTION:gstvideoaggregator
+ * @title: GstVideoAggregator
* @short_description: Base class for video aggregators
*
* VideoAggregator can accept AYUV, ARGB and BGRA video streams. For each of the requested
/**
* SECTION:element-accurip
+ * @title: accurip
* @short_desc: Computes an AccurateRip CRC
*
* The accurip element calculates a CRC for an audio stream which can be used
* <ulink url="http://accuraterip.com/">AccurateRip</ulink>. This database
* is used to check for a CD rip accuracy.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -m uridecodebin uri=file:///path/to/song.flac ! audioconvert ! accurip ! fakesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-aiffmux
+ * @title: aiffmux
*
* Format an audio stream into the Audio Interchange File Format
*
/**
* SECTION:element-aiffparse
+ * @title: aiffparse
*
- * <refsect2>
- * <para>
* Parse a .aiff file into raw or compressed audio.
- * </para>
- * <para>
+ *
* The aiffparse element supports both push and pull mode operations, making it
* possible to stream from a network source.
- * </para>
- * <title>Example launch line</title>
- * <para>
- * <programlisting>
+ *
+ * ## Example launch line
+ *
+ * |[
* gst-launch-1.0 filesrc location=sine.aiff ! aiffparse ! audioconvert ! alsasink
- * </programlisting>
+ * ]|
* Read a aiff file and output to the soundcard using the ALSA element. The
* aiff file is assumed to contain raw uncompressed samples.
- * </para>
- * <para>
- * <programlisting>
+ *
+ * |[
* gst-launch-1.0 souphttpsrc location=http://www.example.org/sine.aiff ! queue ! aiffparse ! audioconvert ! alsasink
- * </programlisting>
+ * ]|
* Stream data from a network url.
- * </para>
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-asfmux
+ * @title: asfmux
*
* Muxes media into an ASF file/stream.
*
* stream number of the stream that goes through that pad. Stream numbers
* are assigned sequentially, starting from 1.
*
- * <refsect2>
- * <title>Example launch lines</title>
- * <para>(write everything in one line, without the backslash characters)</para>
+ * ## Example launch lines
+ *
+ * (write everything in one line, without the backslash characters)
* |[
* gst-launch-1.0 videotestsrc num-buffers=250 \
* ! "video/x-raw,format=(string)I420,framerate=(fraction)25/1" ! avenc_wmv2 \
* ]| This creates an ASF file containing an WMV video stream
* with a test picture and WMA audio stream of a test sound.
*
- * <title>Live streaming</title>
+ * ## Live streaming
* asfmux and rtpasfpay are capable of generating a live asf stream.
- * asfmux has to set its 'streamable' property to true, because in this
+ * asfmux has to set its 'streamable' property to true, because in this
* mode it won't try to seek back to the start of the file to replace
* some fields that couldn't be known at the file start. In this mode,
* it won't also send indexes at the end of the data packets (the actual
* media content)
* the following pipelines are an example of this usage.
- * <para>(write everything in one line, without the backslash characters)</para>
+ * (write everything in one line, without the backslash characters)
* Server (sender)
* |[
* gst-launch-1.0 -ve videotestsrc ! avenc_wmv2 ! asfmux name=mux streamable=true \
* ! videoconvert ! autovideosink \
* d. ! queue ! audioconvert ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
text = g_value_get_string (&value);
/* +1 -> because of the \0 at the end
- * 2* -> because we have uft8, and asf demands utf16
+ * 2* -> because we have uft8, and asf demands utf16
*/
content_size = 2 * (1 + g_utf8_strlen (text, -1));
/* size of the tag content in utf16 +
* size of the tag name +
* 3 uint16 (size of the tag name string,
- * size of the tag content string and
+ * size of the tag content string and
* type of content
*/
asftags->ext_cont_desc_size += content_size +
* size needed for the default and extended content description objects.
* This results and a copy of the #GstTagList
* are stored in the #GstAsfTags. We store a copy so that
- * the sizes estimated here mantain the same until they are
+ * the sizes estimated here mantain the same until they are
* written to the asf file.
*/
static void
text = g_value_get_string (&value);
/* +1 -> because of the \0 at the end
- * 2* -> because we have uft8, and asf demands utf16
+ * 2* -> because we have uft8, and asf demands utf16
*/
content_size = 2 * (1 + g_utf8_strlen (text, -1));
}
* @size_buf: pointer to the memory position to write the size of the string
* @str_buf: pointer to the memory position to write the string
* @str: the string to be writen (in UTF-8)
- * @use32: if the string size should be writen with 32 bits (if true)
+ * @use32: if the string size should be writen with 32 bits (if true)
* or with 16 (if false)
*
* Writes a string with its size as it is needed in many asf objects.
GST_LOG_OBJECT (asfmux, "Writing extended content description string: "
"%s", str);
- /*
+ /*
* Covert the string to utf16
* Also force the last bytes to null terminated,
* tags were with extra weird characters without it.
*
* Checks if a string tag with tagname exists in the taglist. If it
* exists it is writen as an UTF-16LE to data_buf and its size in bytes
- * is writen to size_buf. It is used for writing content description
+ * is writen to size_buf. It is used for writing content description
* object fields.
*
* Returns: the size of the string
/* Data object size. This is always >= ASF_DATA_OBJECT_SIZE. The standard
* specifically accepts the value 0 in live streams, but WMP is not accepting
* this while streaming using WMSP, so we default to minimum size also for
- * live streams. Otherwise this field must be updated later on when we know
+ * live streams. Otherwise this field must be updated later on when we know
* the complete stream size.
*/
GST_WRITE_UINT64_LE (*buf + 16, ASF_DATA_OBJECT_SIZE);
gst_asf_mux_write_header_object (asfmux, &bufdata, map.size -
ASF_DATA_OBJECT_SIZE, 2 + stream_num);
- /* get the position of the file properties object for
+ /* get the position of the file properties object for
* updating it in gst_asf_mux_stop_file */
asfmux->file_properties_object_position = bufdata - map.data;
gst_asf_mux_write_file_properties (asfmux, &bufdata);
/**
* gst_asf_mux_stop_file:
* @asfmux: #GstAsfMux
- *
+ *
* Finalizes the asf stream by pushing the indexes after
* the data object. Also seeks back to the header positions
* to rewrite some fields such as the total number of bytes
play_duration = pad->play_duration;
}
- /* going back to file properties object to fill in
+ /* going back to file properties object to fill in
* values we didn't know back then */
GST_DEBUG_OBJECT (asfmux,
"Sending new segment to file properties object position");
audiopad->audioinfo.rate = (guint32) rate;
/* taken from avimux
- * codec initialization data, if any
+ * codec initialization data, if any
*/
codec_data = gst_structure_get_value (structure, "codec_data");
if (codec_data) {
videopad->vidinfo.height = (gint32) height;
/* taken from avimux
- * codec initialization data, if any
+ * codec initialization data, if any
*/
codec_data = gst_structure_get_value (structure, "codec_data");
if (codec_data) {
/**
* gst_asf_generate_file_id:
- *
+ *
* Generates a random GUID
*
* Returns: The generated GUID
/**
* gst_asf_get_var_size_field_len:
* @field_type: the asf field type
- *
+ *
* Returns: the size in bytes of a variable of field_type type
*/
guint
* gst_asf_file_info_new:
*
* Creates a new #GstAsfFileInfo
- *
+ *
* Returns: the created struct
*/
GstAsfFileInfo *
/**
* gst_asf_file_info_reset:
* @info: the #GstAsfFileInfo to be reset
- *
+ *
* resets the data of a #GstFileInfo
*/
void
*/
/**
* SECTION:element-gstaudiochannelmix
+ * @title: gstaudiochannelmix
*
* The audiochannelmix element mixes channels in stereo audio based on
* properties set on the element. The primary purpose is reconstruct
* equal left/right channels on an input stream that has audio in only
* one channel.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v audiotestsrc ! audiochannelmix ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-audiointerleave
- *
+ * @title: audiointerleave
*
*/
/**
* GstInterleave:channel-positions
- *
+ *
* Channel positions: This property controls the channel positions
* that are used on the src caps. The number of elements should be
* the same as the number of sink pads and the array should contain
/**
* GstInterleave:channel-positions-from-input
- *
+ *
* Channel positions from input: If this property is set to %TRUE the channel
* positions will be taken from the input caps if valid channel positions for
* the output can be constructed from them. If this is set to %TRUE setting the
*/
/**
* SECTION:element-audiomixer
+ * @title: audiomixer
*
* The audiomixer allows to mix several streams into one by adding the data.
* Mixed data is clamped to the min/max values of the data format.
* The input pads are from a GstPad subclass and have additional
* properties to mute each pad individually and set the volume:
*
- * <itemizedlist>
- * <listitem>
- * "mute": Whether to mute the pad or not (#gboolean)
- * </listitem>
- * <listitem>
- * "volume": The volume of the pad, between 0.0 and 10.0 (#gdouble)
- * </listitem>
- * </itemizedlist>
+ * * "mute": Whether to mute the pad or not (#gboolean)
+ * * "volume": The volume of the pad, between 0.0 and 10.0 (#gdouble)
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc freq=100 ! audiomixer name=mix ! audioconvert ! alsasink audiotestsrc freq=500 ! mix.
* ]| This pipeline produces two sine waves mixed together.
- * </refsect2>
*
*/
/**
* SECTION:element-audiomixmatrix
+ * @title: audiomixmatrix
* @short_description: Transform input/output channels according to a matrix
*
* This element transforms a given number of input channels into a given
* are automatically negotiated and the transformation matrix is a truncated
* identity matrix.
*
- * <refsect2>
- * <title>Example matrix generation code</title>
+ * ## Example matrix generation code
* To generate the matrix using code:
*
* |[
* g_object_set_property (G_OBJECT (audiomixmatrix), "matrix", &v);
* g_value_unset (&v);
* ]|
- * </refsect2>
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc ! audio/x-raw,channels=4 ! audiomixmatrix in-channels=4 out-channels=2 channel-mask=-1 matrix="<<(double)1, (double)0, (double)0, (double)0>, <0.0, 1.0, 0.0, 0.0>>" ! audio/x-raw,channels=2 ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-spacescope
+ * @title: spacescope
* @see_also: goom
*
* Spacescope is a simple audio visualisation element. It maps the left and
* right channel to x and y coordinates.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc ! audioconvert ! spacescope ! ximagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
*/
/**
* SECTION:element-spectrascope
+ * @title: spectrascope
* @see_also: goom
*
* Spectrascope is a simple spectrum visualisation element. It renders the
* frequency spectrum as a series of bars.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc ! audioconvert ! spectrascope ! ximagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
*/
/**
* SECTION:element-synaescope
+ * @title: synaescope
* @see_also: goom
*
* Synaescope is an audio visualisation element. It analyzes frequencies and
* out-of phase properties of audio and draws this as clouds of stars.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc ! audioconvert ! synaescope ! ximagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
*/
/**
* SECTION:element-wavescope
+ * @title: wavescope
* @see_also: goom
*
* Wavescope is a simple audio visualisation element. It renders the waveforms
* like on an oscilloscope.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc ! audioconvert ! wavescope ! ximagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
*/
/**
* SECTION:element-autoconvert
+ * @title: autoconvert
*
* The #autoconvert element has one sink and one source pad. It will look for
* other elements that also have one sink and one source pad.
/**
* SECTION:element-bayer2rgb
+ * @title: bayer2rgb
*
* Decodes raw camera bayer (fourcc BA81) to RGB.
*/
/**
* SECTION:camerabingeneral
+ * @title: GstCameraBin2
* @short_description: helper functions for #GstCameraBin2 and it's modules
*
* Common helper functions for #GstCameraBin2, #GstCameraBin2Image and
*/
/**
* SECTION:element-camerabin
+ * @title: camerabin
*
* CameraBin is a high-level camera object that encapsulates gstreamer
* elements, providing an API for controlling a digital camera.
*
- * <note>
- * Note that camerabin is still UNSTABLE and under development.
- * </note>
+ * > Note that camerabin is still UNSTABLE and under development.
*
* CameraBin has the following main features:
- * <itemizedlist>
- * <listitem>
- * Record videos
- * </listitem>
- * <listitem>
- * Capture pictures
- * </listitem>
- * <listitem>
- * Display a viewfinder
- * </listitem>
- * <listitem>
- * Post preview images for each capture (video and image)
- * </listitem>
- * </itemizedlist>
*
- * <refsect2>
- * <title>Usage</title>
- * <para>
+ * * Record videos
+ * * Capture pictures
+ * * Display a viewfinder
+ * * Post preview images for each capture (video and image)
+ *
+ * ## Usage
+ *
* Camerabin can be created using gst_element_factory_make() just like
* any other element. Video or image capture mode can be selected using
* the #GstCameraBin:mode property and the file to save the capture is
* In both modes, if #GstCameraBin:post-previews is %TRUE, a #GstBuffer
* will be post to the #GstBus in a field named 'buffer', in a
* 'preview-image' message of type %GST_MESSAGE_ELEMENT.
- * </para>
- * </refsect2>
+ *
- * <refsect2>
- * <title>Customization</title>
- * <para>
+ *
+ * ## Customization
+ *
* Camerabin provides various customization properties, allowing the user
* to set custom filters, selecting the viewfinder sink and formats to
* use to encode the captured images/videos.
* of its branches: video capture, image capture, viewfinder and preview.
* Check #GstCameraBin:video-filter, #GstCameraBin:image-filter,
* #GstCameraBin:viewfinder-filter and #GstCameraBin:preview-filter.
- * </para>
- * </refsect2>
*
- * <refsect2>
- * <title>Example launch line</title>
- * <para>
+ * ## Example launch line
+ *
* Unfortunately, camerabin can't be really used from gst-launch-1.0, as you
* need to send signals to control it. The following pipeline might be able
* to show the viewfinder using all the default elements.
* |[
* gst-launch-1.0 -v -m camerabin
* ]|
- * </para>
- * </refsect2>
+ *
*/
/**
* SECTION:element-digitalzoom
+ * @title: digitalzoom
*
* Does digital zooming by cropping and scaling an image.
*
*/
/**
* SECTION:element-gstviewfinderbin
+ * @title: gstviewfinderbin
*
* The gstviewfinderbin element is a displaying element for camerabin2.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! viewfinderbin
* ]|
* Feeds the viewfinderbin with video test data.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-wrappercamerabinsrc
+ * @title: wrappercamerabinsrc
*
* A camera bin src element that wraps a default video source with a single
* pad into the 3pad model that camerabin2 expects.
/**
* SECTION:element-chromahold
- *
+ * @title: chromahold
+ *
* The chromahold element will remove all color information for
* all colors except a single one and converts them to grayscale.
*
/**
* SECTION:element-coloreffects
+ * @title: coloreffects
*
* Map colors of the video input to a lookup table
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! coloreffects preset=heat ! videoconvert !
* autovideosink
* ]| This pipeline shows the effect of coloreffects on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-compositor
+ * @title: compositor
*
* Compositor can accept AYUV, ARGB and BGRA video streams. For each of the requested
* sink pads it will compare the incoming geometry and framerate to define the
* biggest incoming video stream and the framerate of the fastest incoming one.
*
* Compositor will do colorspace conversion.
- *
+ *
* Individual parameters for each input stream can be configured on the
* #GstCompositorPad:
*
- * <itemizedlist>
- * <listitem>
- * "xpos": The x-coordinate position of the top-left corner of the picture
- * (#gint)
- * </listitem>
- * <listitem>
- * "ypos": The y-coordinate position of the top-left corner of the picture
- * (#gint)
- * </listitem>
- * <listitem>
- * "width": The width of the picture; the input will be scaled if necessary
- * (#gint)
- * </listitem>
- * <listitem>
- * "height": The height of the picture; the input will be scaled if necessary
- * (#gint)
- * </listitem>
- * <listitem>
- * "alpha": The transparency of the picture; between 0.0 and 1.0. The blending
- * is a simple copy when fully-transparent (0.0) and fully-opaque (1.0).
- * (#gdouble)
- * </listitem>
- * <listitem>
- * "zorder": The z-order position of the picture in the composition
- * (#guint)
- * </listitem>
- * </itemizedlist>
+ * * "xpos": The x-coordinate position of the top-left corner of the picture (#gint)
+ * * "ypos": The y-coordinate position of the top-left corner of the picture (#gint)
+ * * "width": The width of the picture; the input will be scaled if necessary (#gint)
+ * * "height": The height of the picture; the input will be scaled if necessary (#gint)
+ * * "alpha": The transparency of the picture; between 0.0 and 1.0. The blending
+ * is a simple copy when fully-transparent (0.0) and fully-opaque (1.0). (#gdouble)
+ * * "zorder": The z-order position of the picture in the composition (#guint)
*
- * <refsect2>
- * <title>Sample pipelines</title>
+ * ## Sample pipelines
* |[
* gst-launch-1.0 \
* videotestsrc pattern=1 ! \
* compositor name=comp ! videoconvert ! ximagesink \
* videotestsrc ! \
* video/x-raw, framerate=\(fraction\)5/1, width=320, height=240 ! comp.
- * ]| A pipeline to demostrate bgra comping. (This does not demonstrate alpha blending).
+ * ]| A pipeline to demostrate bgra comping. (This does not demonstrate alpha blending).
* |[
* gst-launch-1.0 videotestsrc pattern=1 ! \
* video/x-raw,format =I420, framerate=\(fraction\)10/1, width=100, height=100 ! \
* "video/x-raw,format=AYUV,width=800,height=600,framerate=(fraction)10/1" ! \
* timeoverlay ! queue2 ! comp.
* ]| A pipeline to demonstrate synchronized compositing (the second stream starts after 3 seconds)
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-fpsdisplaysink
+ * @title: fpsdisplaysink
*
* Can display the current and average framerate as a testoverlay or on stdout.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 videotestsrc ! fpsdisplaysink
* gst-launch-1.0 videotestsrc ! fpsdisplaysink text-overlay=false
* gst-launch-1.0 filesrc location=video.avi ! decodebin name=d ! queue ! fpsdisplaysink d. ! queue ! fakesink sync=true
* gst-launch-1.0 playbin uri=file:///path/to/video.avi video-sink="fpsdisplaysink" audio-sink=fakesink
* ]|
- * </refsect2>
+ *
*/
/* FIXME:
* - can we avoid plugging the textoverlay?
*/
/**
* SECTION:element-gstchopmydata
+ * @title: gstchopmydata
*
* The chopmydata element takes an incoming stream and chops it up
* into randomly sized buffers. Size of outgoing buffers are determined
* by the max-size, min-size, and step-size properties.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v audiotestsrc num-buffers=10 ! chopmydata min-size=100
* max-size=200 step-size=2 ! fakesink -v
* ]|
- *
+ *
* This pipeline will create 10 buffers that are by default 2048 bytes
* each (1024 samples each), and chop them up into buffers that range
* in size from 100 bytes to 200 bytes, with the restriction that sizes
* are a multiple of 2. This restriction is important, because the
* default sample size for audiotestsrc is 2 bytes (one channel, 16-bit
* audio).
- *
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-debugspy
+ * @title: debugspy
*
* A spy element that can provide information on buffers going through it, with
* bus messages.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -m videotestsrc ! debugspy ! fakesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-errorignore
+ * @title: errorignore
*
* Passes through all packets, until it encounters GST_FLOW_ERROR or
* GST_FLOW_NOT_NEGOTIATED (configurable). At that point it will unref the
- * buffers and return GST_FLOW_OK (configurable) - until the next
+ * buffers and return GST_FLOW_OK (configurable) - until the next
* READY_TO_PAUSED, RECONFIGURE or FLUSH_STOP.
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! errorignore ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstwatchdog
+ * @title: watchdog
*
* The watchdog element watches buffers and events flowing through
* a pipeline. If no buffers are seen for a configurable amount of
* This element is currently intended for transcoding pipelines,
* although may be useful in other contexts.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v fakesrc ! watchdog ! fakesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:dvb-sub
+ * @title: GstDvbSub
* @short_description: a DVB subtitle parsing class
* @stability: Unstable
*
/**
* DVBSubtitleWindow
- * @version: version
+ * @version: version
* @display_window_flag: window_* are valid
* @display_width: assumed width of display
* @display_height: assumed height of display
/**
* SECTION:element-dvbsuboverlay
+ * @title: dvbsuboverlay
*
* Renders DVB subtitles on top of a video stream.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[ FIXME
* gst-launch-1.0 -v filesrc location=/path/to/ts ! mpegtsdemux name=d ! queue ! mpegaudioparse ! mpg123audiodec ! audioconvert ! autoaudiosink \
* d. ! queue ! mpegvideoparse ! mpeg2dec ! videoconvert ! r. \
* d. ! queue ! "subpicture/x-dvb" ! dvbsuboverlay name=r ! videoconvert ! autovideosink
* ]| This pipeline demuxes a MPEG-TS file with MPEG2 video, MP3 audio and embedded DVB subtitles and renders the subtitles on top of the video.
- * </refsect2>
+ *
*/
*/
/**
* SECTION:element-dvdspu
+ * @title: dvdspu
*
* DVD sub picture overlay element.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* FIXME: gst-launch-1.0 ...
* ]| FIXME: description for the sample launch pipeline
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
# include <config.h>
/**
* SECTION:element-festival
- *
+ * @title: festival
+ *
* This element connects to a
* <ulink url="http://www.festvox.org/festival/index.html">festival</ulink>
* server process and uses it to synthesize speech. Festival need to run already
* in server mode, started as <screen>festival --server</screen>
- *
- * <refsect2>
- * <title>Example pipeline</title>
+ *
+ * ## Example pipeline
* |[
* echo 'Hello G-Streamer!' | gst-launch-1.0 fdsrc fd=0 ! festival ! wavparse ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-fieldanalysis
+ * @title: fieldanalysis
*
* Analyse fields from video buffers to identify whether the buffers are
* progressive/telecined/interlaced and, if telecined, the telecine pattern
* used.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v uridecodebin uri=/path/to/foo.bar ! fieldanalysis ! deinterlace ! videoconvert ! autovideosink
* ]| This pipeline will analyse a video stream with default metrics and thresholds and output progressive frames.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-freeverb
+ * @title: freeverb
*
* Reverberation/room effect.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc wave=saw ! freeverb ! autoaudiosink
* gst-launch-1.0 filesrc location="melo1.ogg" ! decodebin ! audioconvert ! freeverb ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
/* FIXME:
/**
* SECTION:element-burn
+ * @title: burn
*
* Burn adjusts the colors of a video stream in realtime.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! burn ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of burn on a test stream
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-chromium
+ * @title: chromium
*
* Chromium breaks the colors of a video stream in realtime.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! chromium ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of chromium on a test stream
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-dilate
+ * @title: dilate
*
* Dilate adjusts the colors of a video stream in realtime.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! dilate ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of dilate on a test stream
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-dodge
+ * @title: dodge
*
* Dodge saturates the colors of a video stream in realtime.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! dodge ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of dodge on a test stream
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-exclusion
+ * @title: exclusion
*
* Exclusion saturates the colors of a video stream in realtime.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! exclusion ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of exclusion on a test stream
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gaussianblur
+ * @title: gaussianblur
*
* Gaussianblur blurs the video stream in realtime.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! gaussianblur ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of gaussianblur on a test stream
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-solarize
+ * @title: solarize
*
* Solarize does a smart inverse in a video stream in realtime.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! solarize ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of solarize on a test stream
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:gstdataprotocol
+ * @title: GstDataProtocol
* @short_description: Serialization of caps, buffers and events.
* @see_also: #GstCaps, #GstEvent, #GstBuffer
*
/**
* SECTION:element-gdpdepay
+ * @title: gdpdepay
* @see_also: gdppay
*
* This element depayloads GStreamer Data Protocol buffers back to deserialized
* buffers and events.
*
- * <refsect2>
* |[
* gst-launch-1.0 -v -m filesrc location=test.gdp ! gdpdepay ! xvimagesink
* ]| This pipeline plays back a serialized video stream as created in the
* example for gdppay.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gdppay
+ * @title: gdppay
* @see_also: gdpdepay
*
* This element payloads GStreamer buffers and events using the
* GStreamer Data Protocol.
*
- * <refsect2>
* |[
* gst-launch-1.0 -v -m videotestsrc num-buffers=50 ! gdppay ! filesink location=test.gdp
* ]| This pipeline creates a serialized video stream that can be played back
* with the example shown in gdpdepay.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-bulge
+ * @title: bulge
* @see_also: geometrictransform
*
* Bugle is a geometric image transform element. It adds a protuberance in the
* center point.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! bulge ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-circle
+ * @title: circle
* @see_also: geometrictransform
*
* Circle is a geometric image transform element. It warps the picture into an
* arc shaped form.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! circle ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-diffuse
+ * @title: diffuse
* @see_also: geometrictransform
*
* Diffuse is a geometric image transform element. It diffuses the image by
* moving its pixels in random directions.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! diffuse ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-fisheye
+ * @title: fisheye
* @see_also: geometrictransform
*
* Fisheye is a geometric image transform element. It simulates a fisheye lens
* by zooming on the center of the image and compressing the edges.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! fisheye ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-kaleidoscope
+ * @title: kaleidoscope
* @see_also: geometrictransform
*
* The kaleidscope element applies 'kaleidoscope' geometric transform to the
* image.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! kaleidoscope ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-marble
+ * @title: marble
* @see_also: geometrictransform
*
* Marble is a geometric image transform element. It applies a marbling effect
* to the image.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! marble ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-mirror
+ * @title: mirror
* @see_also: geometrictransform
*
* Mirror is a geometric transform element. It splits the image into two halves
* and reflects one over each other.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! mirror ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-perspective
+ * @title: perspective
* @see_also: geometrictransform
*
* The perspective element applies a 2D perspective transform.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! perspective ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
/* FIXME: suppress warnings for deprecated API such as GValueArray
/**
* SECTION:element-pinch
+ * @title: pinch
* @see_also: geometrictransform
*
* Pinch applies a 'pinch' geometric transform to the image.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! pinch ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rotate
+ * @title: rotate
* @see_also: geometrictransform
*
* The rotate element transforms the image by rotating it by a specified angle.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! rotate angle=0.78 ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-sphere
+ * @title: sphere
* @see_also: geometrictransform
*
* The sphere element applies a 'sphere' geometric transform to the image.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! sphere ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-square
+ * @title: square
* @see_also: geometrictransform
*
* The square element distorts the center part of the image into a square.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! square zoom=100 ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-stretch
+ * @title: stretch
* @see_also: geometrictransform
*
* The stretch element stretches the image in a circle around the center point.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! stretch ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-tunnel
+ * @title: tunnel
* @see_also: geometrictransform
*
* Tunnel is a geometric image transform element. It applies a light tunnel
* effect.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! tunnel ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-twirl
+ * @title: twirl
* @see_also: geometrictransform
*
* The twirl element twists the image from the center out.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! twirl ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-waterripple
+ * @title: waterripple
* @see_also: geometrictransform
*
* The waterripple element creates a water ripple effect on the image.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! waterripple ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-id3mux
+ * @title: id3mux
* @see_also: #GstID3Demux, #GstTagSetter
*
* This element adds ID3v2 tags to the beginning of a stream, and ID3v1 tags
* Tags sent by upstream elements will be picked up automatically (and merged
* according to the merge mode set via the tag setter interface).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=foo.ogg ! decodebin ! audioconvert ! id3mux ! filesink location=foo.mp3
* ]| A pipeline that transcodes a file from Ogg/Vorbis to mp3 format with
* |[
* gst-launch-1.0 -m filesrc location=foo.mp3 ! id3demux ! fakesink silent=TRUE
* ]| Verify that tags have been written.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstinteraudiosink
+ * @title: gstinteraudiosink
*
* The interaudiosink element is an audio sink element. It is used
* in connection with a interaudiosrc element in a different pipeline,
* similar to intervideosink and intervideosrc.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v audiotestsrc ! queue ! interaudiosink
* ]|
* audio.
* See the gstintertest.c example in the gst-plugins-bad source code for
* more details.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstinteraudiosrc
+ * @title: gstinteraudiosrc
*
* The interaudiosrc element is an audio source element. It is used
* in connection with a interaudiosink element in a different pipeline.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v interaudiosrc ! queue ! autoaudiosink
* ]|
- *
+ *
* The interaudiosrc element cannot be used effectively with gst-launch-1.0,
* as it requires a second pipeline in the application to send audio.
* See the gstintertest.c example in the gst-plugins-bad source code for
* more details.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstintersubsink
+ * @title: gstintersubsink
*
* The intersubsink element is a subtitle sink element. It is used
* in connection with a intersubsrc element in a different pipeline.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v ... ! intersubsink
* ]|
- *
+ *
* The intersubsink element cannot be used effectively with gst-launch-1.0,
* as it requires a second pipeline in the application to send audio.
* See the gstintertest.c example in the gst-plugins-bad source code for
* more details.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstintersubsrc
+ * @title: gstintersubsrc
*
* The intersubsrc element is a subtitle source element. It is used
* in connection with a intersubsink element in a different pipeline,
* similar to interaudiosink and interaudiosrc.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v intersubsrc ! kateenc ! oggmux ! filesink location=out.ogv
* ]|
- *
+ *
* The intersubsrc element cannot be used effectively with gst-launch-1.0,
* as it requires a second pipeline in the application to send subtitles.
* See the gstintertest.c example in the gst-plugins-bad source code for
* more details.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstintervideosink
+ * @title: gstintervideosink
*
* The intervideosink element is a video sink element. It is used
* in connection with an intervideosrc element in a different pipeline,
* similar to interaudiosink and interaudiosrc.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! intervideosink
* ]|
- *
+ *
* The intervideosink element cannot be used effectively with gst-launch-1.0,
* as it requires a second pipeline in the application to send video to.
* See the gstintertest.c example in the gst-plugins-bad source code for
* more details.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstintervideosrc
+ * @title: gstintervideosrc
*
* The intervideosrc element is a video source element. It is used
* in connection with a intervideosink element in a different pipeline,
* similar to interaudiosink and interaudiosrc.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v intervideosrc ! queue ! xvimagesink
* ]|
- *
+ *
* The intersubsrc element cannot be used effectively with gst-launch-1.0,
* as it requires a second pipeline in the application to send subtitles.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-interlace
+ * @title: interlace
*
* The interlace element takes a non-interlaced raw video stream as input,
* creates fields out of each frame, then combines fields into interlaced
* frames to output as an interlaced video stream. It can also produce
* telecined streams from progressive input.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc pattern=ball ! interlace ! xvimagesink
* ]|
* |[
* gst-launch-1.0 -v filesrc location=/path/to/file ! decodebin ! videorate !
* videoscale ! video/x-raw,format=\(string\)I420,width=720,height=480,
- * framerate=60000/1001,pixel-aspect-ratio=11/10 !
+ * framerate=60000/1001,pixel-aspect-ratio=11/10 !
* interlace top-field-first=false ! autovideosink
* ]|
* This pipeline converts a progressive video stream into an interlaced
* This pipeline converts a 24 frames per second progressive film stream into a
* 30000/1001 2:3:2:3... pattern telecined stream suitable for displaying film
* content on NTSC.
- * </refsect2>
+ *
*/
*/
/**
* SECTION:element-gstcombdetect
+ * @title: gstcombdetect
*
* The combdetect element detects if combing artifacts are present in
* a raw video stream, and if so, marks them with a zebra stripe
* pattern.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=file.mov ! decodebin ! combdetect !
* xvimagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstivtc
+ * @title: gstivtc
*
* The ivtc element is an inverse telecine filter. It takes interlaced
* video that was created from progressive content using a telecine
* filter, and reconstructs the original progressive content.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc pattern=ball ! video/x-raw,framerate=24/1 !
* interlace !
* converts it to a 60 fields per second interlaced stream. Then the
* stream is inversed telecine'd back to 24 fps, yielding approximately
* the original videotestsrc content.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstjp2kdecimator
+ * @title: gstjp2kdecimator
*
* The jp2kdecimator element removes information from JPEG2000 images without reencoding.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc num-buffers=1 ! jp2kenc ! \
* gstjp2kdecimator max-decomposition-levels=2 ! jp2kdec ! \
* ]|
* This pipelines encodes a test image to JPEG2000, only keeps 3 decomposition levels
* decodes the decimated image again and shows it on the screen.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-jifmux
+ * @title: jifmux
* @short_description: JPEG interchange format writer
*
* Writes a JPEG image as JPEG/EXIF or JPEG/JFIF including various metadata. The
* jpeg image received on the sink pad should be minimal (e.g. should not
* contain metadata already).
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc num-buffers=1 ! jpegenc ! jifmux ! filesink location=...
* ]|
* The above pipeline renders a frame, encodes to jpeg, adds metadata and writes
* it to disk.
- * </refsect2>
+ *
*/
/*
jpeg interchange format:
/**
* SECTION:element-jpegparse
+ * @title: jpegparse
* @short_description: JPEG parser
*
* Parses a JPEG stream into JPEG images. It looks for EOI boundaries to
* image header searching for image properties such as width and height
* among others. Jpegparse can also extract metadata (e.g. xmp).
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v souphttpsrc location=... ! jpegparse ! matroskamux ! filesink location=...
* ]|
* The above pipeline fetches a motion JPEG stream from an IP camera over
* HTTP and stores it in a matroska file.
- * </refsect2>
+ *
*/
/* FIXME: output plain JFIF APP marker only. This provides best code reuse.
* JPEG decoders would not need to handle this part anymore. Also when remuxing
/**
* SECTION:element-midiparse
+ * @title: midiparse
* @see_also: fluiddec
*
* This element parses midi-files into midi events. You would need a midi
* renderer such as fluidsynth to convert the events into raw samples.
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 filesrc location=song.mid ! midiparse ! fluiddec ! pulsesink
* ]| This example pipeline will parse the midi and render to raw audio which is
* played via pulseaudio.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-mxfdemux
+ * @title: mxfdemux
*
* mxfdemux demuxes an MXF file into the different contained streams.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=/path/to/mxf ! mxfdemux ! audioconvert ! autoaudiosink
* ]| This pipeline demuxes an MXF file and outputs one of the contained raw audio streams.
- * </refsect2>
+ *
*/
/* TODO:
/**
* SECTION:element-mxfmux
+ * @title: mxfmux
*
* mxfmux muxes different streams into an MXF file.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=/path/to/audio ! decodebin ! queue ! mxfmux name=m ! filesink location=file.mxf filesrc location=/path/to/video ! decodebin ! queue ! m.
* ]| This pipeline muxes an audio and video file into a single MXF file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-irtspparse
+ * @title: irtspparse
* @short_description: Interleaved RTSP parser
* @see_also: #GstPcapParse
*
* so-called "channels" from received interleaved (TCP) RTSP data
* (typically extracted from some network capture).
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=h264crasher.pcap ! pcapparse ! irtspparse
* ! rtph264depay ! ffdec_h264 ! fakesink
* ]| Read from a pcap dump file using filesrc, extract the raw TCP packets,
* depayload and decode them.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-pcapparse
+ * @title: pcapparse
*
* Extracts payloads from Ethernet-encapsulated IP packets.
* Use #GstPcapParse:src-ip, #GstPcapParse:dst-ip,
* #GstPcapParse:src-port and #GstPcapParse:dst-port to restrict which packets
* should be included.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 filesrc location=h264crasher.pcap ! pcapparse ! rtph264depay
* ! ffdec_h264 ! fakesink
* ]| Read from a pcap dump file using filesrc, extract the raw UDP packets,
* depayload and decode them.
- * </refsect2>
+ *
*/
/* TODO:
/**
* SECTION:element-pnmdec
+ * @title: pnmdec
*
* Decodes pnm images.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=test.pnm ! pnmdec ! videoconvert ! autovideosink
* ]| The above pipeline reads a pnm file and renders it to the screen.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-pnmenc
+ * @title: pnmenc
*
* Encodes pnm images. This plugin supports both raw and ASCII encoding.
* To enable ASCII encoding, set the parameter ascii to TRUE. If you omit
* the parameter or set it to FALSE, the output will be raw encoded.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc num_buffers=1 ! videoconvert ! "video/x-raw,format=GRAY8" ! pnmenc ascii=true ! filesink location=test.pnm
* ]| The above pipeline writes a test pnm file (ASCII encoding).
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-audioparse
+ * @title: audioparse
*
* Converts a byte stream into audio frames.
*
- * <note>This element is deprecated. Use #GstRawAudioParse instead.</note>
+ * This element is deprecated. Use #GstRawAudioParse instead.
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-videoparse
+ * @title: videoparse
*
* Converts a byte stream into video frames.
*
- * <note>This element is deprecated. Use #GstRawVideoParse instead.</note>
+ * > This element is deprecated. Use #GstRawVideoParse instead.
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-removesilence
+ * @title: removesilence
*
* Removes all silence periods from an audio stream, dropping silence buffers.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v -m filesrc location="audiofile" ! decodebin ! removesilence remove=true ! wavenc ! filesink location=without_audio.wav
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-sdpdemux
+ * @title: sdpdemux
*
* sdpdemux currently understands SDP as the input format of the session description.
* For each stream listed in the SDP a new stream_\%u pad will be created
* with caps derived from the SDP media description. This is a caps of mime type
* "application/x-rtp" that can be connected to any available RTP depayloader
- * element.
- *
+ * element.
+ *
* sdpdemux will internally instantiate an RTP session manager element
* that will handle the RTCP messages to and from the server, jitter removal,
- * packet reordering along with providing a clock for the pipeline.
- *
- * sdpdemux acts like a live element and will therefore only generate data in the
+ * packet reordering along with providing a clock for the pipeline.
+ *
+ * sdpdemux acts like a live element and will therefore only generate data in the
* PLAYING state.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 souphttpsrc location=http://some.server/session.sdp ! sdpdemux ! fakesink
* ]| Establish a connection to an HTTP server that contains an SDP session description
* that gets parsed by sdpdemux and send the raw RTP packets to a fakesink.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-sirendec
+ * @title: sirendec
*
* This decodes audio buffers from the Siren 16 codec (a 16khz extension of
* G.722.1) that is meant to be compatible with the Microsoft Windows Live
*/
/**
* SECTION:element-sirenenc
+ * @title: sirenenc
*
* This encodes audio buffers into the Siren 16 codec (a 16khz extension of
* G.722.1) that is meant to be compatible with the Microsoft Windows Live
/**
* SECTION:element-speed
+ * @title: speed
*
* Plays an audio stream at a different speed (by resampling the audio).
- *
+ *
* Do not use this element. Either use the 'pitch' element, or do a seek with
* a non-1.0 rate parameter, this will have the same effect as using the speed
* element (but relies on the decoder/demuxer to handle this correctly, also
* requires a fairly up-to-date gst-plugins-base, as of February 2007).
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=test.ogg ! decodebin ! audioconvert ! speed speed=1.5 ! audioconvert ! audioresample ! autoaudiosink
* ]| Plays an .ogg file at 1.5x speed.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-stereo
+ * @title: stereo
*
* Create a wide stereo effect.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=sine.ogg ! oggdemux ! vorbisdec ! audioconvert ! stereo ! audioconvert ! audioresample ! alsasink
* ]| Play an Ogg/Vorbis file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-avwait
+ * @title: avwait
*
* This element will drop all buffers until a specific timecode or running
* time has been reached. It will then pass-through both audio and video,
* the video). In the "audio-after-video" mode, it only drops audio buffers
* until video has started.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location="my_file" ! decodebin name=d ! "audio/x-raw" ! avwait name=l target-timecode-str="00:00:04:00" ! autoaudiosink d. ! "video/x-raw" ! timecodestamper ! l. l. ! queue ! timeoverlay time-mode=time-code ! autovideosink
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-timecodestamper
+ * @title: timecodestamper
* @short_description: Attach a timecode into incoming video frames
*
* This element attaches a timecode into every incoming video frame. It starts
* counting from the stream time of each segment start, which it converts into
* a timecode.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! timecodestamper ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstscenechange
+ * @title: gstscenechange
*
* The scenechange element detects scene changes (also known as shot
* changes) in a video stream, and sends a signal when this occurs.
*
* The scenechange element does not work with compressed video.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=some_file.ogv ! decodebin !
* scenechange ! theoraenc ! fakesink
* ]|
- * </refsect2>
+ *
*/
/*
* The algorithm used for scene change detection is a modification
*/
/**
* SECTION:element-gstvideodiff
+ * @title: gstvideodiff
*
* The videodiff element highlights the difference between a frame and its
* previous on the luma plane.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc pattern=ball ! videodiff ! videoconvert ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstzebrastripe
+ * @title: gstzebrastripe
*
* The zebrastripe element marks areas of images in a video stream
* that are brighter than a threshold with a diagonal zebra stripe
* threshold setting of 70 is often used to properly adjust skin
* tones.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! zebrastripe ! xvimagesink
* ]|
* property setting can be calculated from IRE by using the formula
* percent = (IRE * 1.075) - 7.5. Note that 100 IRE corresponds to
* 100 %, and 70 IRE corresponds to 68 %.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-videoframe-audiolevel
+ * @title: videoframe-audiolevel
*
* This element acts like a synchronized audio/video "level". It gathers
* all audio buffers sent between two video frames, and then sends a message
* that contains the RMS value of all samples for these buffers.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -m filesrc location="file.mkv" ! decodebin name=d ! "audio/x-raw" ! videoframe-audiolevel name=l ! autoaudiosink d. ! "video/x-raw" ! l. l. ! queue ! autovideosink ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstdiracparse
+ * @title: gstdiracparse
*
* The gstdiracparse element does FIXME stuff.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v fakesrc ! gstdiracparse ! FIXME ! fakesink
* ]|
* FIXME Describe what the pipeline does.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-simplevideomark
+ * @title: simplevideomark
* @see_also: #GstVideoDetect
*
* This plugin produces #GstSimpleVideoMark::pattern-count squares in the bottom left
- * corner of the video frames. The squares have a width and height of
+ * corner of the video frames. The squares have a width and height of
* respectively #GstSimpleVideoMark:pattern-width and #GstSimpleVideoMark:pattern-height.
* Even squares will be black and odd squares will be white.
- *
+ *
* After writing the pattern, #GstSimpleVideoMark:pattern-data-count squares after the
* pattern squares are produced as the bitarray given in
* #GstSimpleVideoMark:pattern-data. 1 bits will produce white squares and 0 bits will
* produce black squares.
- *
+ *
* The element can be enabled with the #GstSimpleVideoMark:enabled property. It is
* mostly used together with the #GstVideoDetect plugin.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! simplevideomark ! videoconvert ! ximagesink
* ]| Add the default black/white squares at the bottom left of the video frames.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-simplevideomarkdetect
+ * @title: simplevideomarkdetect
* @see_also: #GstVideoMark
*
* This plugin detects #GstSimpleVideoMarkDetect:pattern-count squares in the bottom left
* corner of the video frames. The squares have a width and height of
* respectively #GstSimpleVideoMarkDetect:pattern-width and #GstSimpleVideoMarkDetect:pattern-height.
* Even squares must be black and odd squares must be white.
- *
+ *
* When the pattern has been found, #GstSimpleVideoMarkDetect:pattern-data-count squares
* after the pattern squares are read as a bitarray. White squares represent a 1
* bit and black squares a 0 bit. The bitarray will will included in the element
* message that is posted (see below).
- *
+ *
* After the pattern has been found and the data pattern has been read, an
- * element message called <classname>"GstSimpleVideoMarkDetect"</classname> will
+ * element message called `GstSimpleVideoMarkDetect` will
* be posted on the bus. If the pattern is no longer found in the frame, the
* same element message is posted with the have-pattern field set to #FALSE.
* The message is only posted if the #GstSimpleVideoMarkDetect:message property is #TRUE.
- *
+ *
* The message's structure contains these fields:
- * <itemizedlist>
- * <listitem>
- * <para>
- * #gboolean
- * <classname>"have-pattern"</classname>:
- * if the pattern was found. This field will be set to #TRUE for as long as
+ *
+ * * #gboolean`have-pattern`: if the pattern was found. This field will be set to #TRUE for as long as
* the pattern was found in the frame and set to FALSE for the first frame
* that does not contain the pattern anymore.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"timestamp"</classname>:
- * the timestamp of the buffer that triggered the message.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"stream-time"</classname>:
- * the stream time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"running-time"</classname>:
- * the running_time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"duration"</classname>:
- * the duration of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"data"</classname>:
- * the data-pattern found after the pattern or 0 when have-signal is #FALSE.
- * </para>
- * </listitem>
- * </itemizedlist>
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * * #GstClockTime `timestamp`: the timestamp of the buffer that triggered the message.
+ *
+ * * #GstClockTime `stream-time`: the stream time of the buffer.
+ *
+ * * #GstClockTime `running-time`: the running_time of the buffer.
+ *
+ * * #GstClockTime `duration`: the duration of the buffer.
+ *
+ * * #guint64 `data`: the data-pattern found after the pattern or 0 when have-signal is #FALSE.
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! simplevideomarkdetect ! videoconvert ! ximagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-videoanalyse
+ * @title: videoanalyse
*
* This plugin analyses every video frame and if the #GstVideoAnalyse:message
* property is #TRUE, posts an element message with video statistics called
- * <classname>"GstVideoAnalyse"</classname>.
+ * `GstVideoAnalyse`.
*
* The message's structure contains these fields:
- * <itemizedlist>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"timestamp"</classname>:
- * the timestamp of the buffer that triggered the message.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"stream-time"</classname>:
- * the stream time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"running-time"</classname>:
- * the running_time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"duration"</classname>:
- * the duration of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #gdouble
- * <classname>"luma-average"</classname>:
- * the average brightness of the frame. Range: 0.0-1.0
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #gdouble
- * <classname>"luma-variance"</classname>:
- * the brightness variance of the frame.
- * </para>
- * </listitem>
- * </itemizedlist>
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * * #GstClockTime `timestamp`: the timestamp of the buffer that triggered the message.
+ *
+ * * #GstClockTime `stream-time`: the stream time of the buffer.
+ *
+ * * #GstClockTime `running-time`: the running_time of the buffer.
+ *
+ * * #GstClockTime`duration`:the duration of the buffer.
+ *
+ * * #gdouble`luma-average`: the average brightness of the frame. Range: 0.0-1.0
+ *
+ * * #gdouble`luma-variance`: the brightness variance of the frame.
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 -m videotestsrc ! videoanalyse ! videoconvert ! ximagesink
- * ]| This pipeline emits messages to the console for each frame that has been analysed.
- * </refsect2>
+ * ]| This pipeline emits messages to the console for each frame that has been analysed.
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gsty4mdec
+ * @title: gsty4mdec
*
* The gsty4mdec element decodes uncompressed video in YUV4MPEG format.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=file.y4m ! y4mdec ! xvimagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
* Boston, MA 02110-1335, USA.
*/
/**
- * SECTION:element-gstyadif
+ * SECTION:element-yadif
+ * @title: yadif
*
* The yadif element deinterlaces video, using the YADIF deinterlacing
* filter copied from Libav. This element only handles the simple case
* inverse telecine and deinterlace cases that are handled by the
* deinterlace element.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc pattern=ball ! interlace ! yadif ! xvimagesink
* ]|
* This pipeline creates an interlaced test pattern, and then deinterlaces
* it using the yadif filter.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-ahcsrc
+ * @title: ahcsrc
*
* ahcsrc can be used to capture video from android devices. It uses the
* android.hardware.Camera Java API to capture from the system's cameras.
* so it can be loaded into the virtual machine.
* In order for it to work, an environment variable must be set to a writable
* directory.
- * The source will look for the environment variable “TMP” which must contain
+ * The source will look for the environment variable “TMP� which must contain
* the absolute path to a writable directory.
* It can be retreived using the following Java code :
* |[
* Where the @context variable is an object of type android.content.Context
* (including its subclasses android.app.Activity or android.app.Application).
* Another optional environment variable can be set for pointing to the
- * optimized dex classes directory. If the environment variable “DEX” is
- * available, it will be used, otherwise, the directory in the “TMP” environment
+ * optimized dex classes directory. If the environment variable “DEX� is
+ * available, it will be used, otherwise, the directory in the “TMP� environment
* variable will be used for the optimized dex directory.
* The system dex directory can be obtained using the following Java code :
* |[
- * context.getDir(“dex”, 0).getAbsolutePath();
+ * context.getDir("dex", 0).getAbsolutePath();
* ]|
*
- * <note>
- * Those environment variable must be set before gst_init is called from
- * the native code.
- * </note>
+ * > Those environment variable must be set before gst_init is called from
+ * > the native code.
*
- * <note>
- * If the “TMP” environment variable is not available or the directory is not
- * writable or any other issue happens while trying to load the embedded jar
- * file, then the source will fallback on trying to load the class directly
- * from the running application.
- * The file com/gstreamer/GstAhcCallback.java in the source's directory can be
- * copied into the Android application so it can be loaded at runtime
- * as a fallback mechanism.
- * </note>
+ * > If the "TMP" environment variable is not available or the directory is not
+ * > writable or any other issue happens while trying to load the embedded jar
+ * > file, then the source will fallback on trying to load the class directly
+ * > from the running application.
+ * > The file com/gstreamer/GstAhcCallback.java in the source's directory can be
+ * > copied into the Android application so it can be loaded at runtime
+ * > as a fallback mechanism.
*
*/
* Boston, MA 02110-1301, USA.
*/
/**
- * SECTION:element-gstahssrc
+ * SECTION:element-ahssrc
+ * @title: gstahssrc
*
* The ahssrc element reads data from Android device sensors
* (android.hardware.Sensor).
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch -v ahssrc ! fakesink
* ]|
* Push Android sensor data into a fakesink.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
* Boston, MA 02110-1335, USA.
*/
/**
- * SECTION:element-gstatdec
+ * SECTION:element-atdec
+ * @title: atdec
*
* AudioToolbox based decoder.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=file.mov ! qtdemux ! queue ! aacparse ! atdec ! autoaudiosink
* ]|
* Decode aac audio from a mov file
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
* Boston, MA 02110-1335, USA.
*/
/**
- * SECTION:element-gstvtdec
+ * SECTION:element-vtdec
+ * @title: gstvtdec
*
* Apple VideoToolbox based decoder.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=file.mov ! qtdemux ! queue ! h264parse ! vtdec ! videoconvert ! autovideosink
* ]|
* Decode h264 video from a mov file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
WINDOW_VISIBILITY_ERROR = 4
} WindowHandleVisibility;
-/** FWD DECLS **/
+/* FWD DECLS */
static gboolean d3d_hidden_window_thread (GstD3DVideoSinkClass * klass);
static gboolean d3d_window_wndproc_set (GstD3DVideoSink * sink);
#define WM_QUIT_THREAD WM_USER+0
-/** Helpers **/
+/* Helpers */
#define ERROR_CHECK_HR(hr) \
if(hr != S_OK) { \
#define D3DFMT_NV12 MAKEFOURCC ('N', 'V', '1', '2')
#endif
-/** FORMATS **/
+/* FORMATS */
#define CASE(x) case x: return #x;
static const gchar *
return ret;
}
-/** Windows for rendering (User Set or Internal) **/
+/* Windows for rendering (User Set or Internal) */
static void
d3d_window_wndproc_unset (GstD3DVideoSink * sink)
return TRUE;
}
-/** D3D Lost and Reset Device **/
+/* D3D Lost and Reset Device */
static void
d3d_notify_device_lost (GstD3DVideoSink * sink)
UNLOCK_SINK (sink);
}
-/** Swap Chains **/
+/* Swap Chains */
static gboolean
d3d_init_swap_chain (GstD3DVideoSink * sink, HWND hWnd)
}
-/** D3D Window Proc Functions **/
+/* D3D Window Proc Functions */
static LRESULT APIENTRY
d3d_wnd_proc (HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam)
return ret;
}
-/** Internal Window **/
+/* Internal Window */
static LRESULT APIENTRY
d3d_wnd_proc_internal (HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam)
return dat.hWnd;
}
-/*** D3D Video Class Methdos ***/
+/* D3D Video Class Methdos */
gboolean
d3d_class_init (GstD3DVideoSink * sink)
UNLOCK_CLASS (NULL, klass);
}
-/** Hidden Window Loop Thread **/
+/* Hidden Window Loop Thread */
static LRESULT APIENTRY
D3DHiddenWndProc (HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam)
GST_DEBUG_CATEGORY (gst_d3dvideosink_debug);
#define GST_CAT_DEFAULT gst_d3dvideosink_debug
-/** FWD DECLS **/
+/* FWD DECLS */
/* GstXOverlay Interface */
static void
gst_d3dvideosink_video_overlay_interface_init (GstVideoOverlayInterface *
g_rec_mutex_init (&sink->lock);
}
-/** GObject Functions **/
+/* GObject Functions */
static void
gst_d3dvideosink_finalize (GObject * gobject)
}
}
-/** GstBaseSinkClass Functions **/
+/* GstBaseSinkClass Functions */
static GstCaps *
gst_d3dvideosink_get_caps (GstBaseSink * basesink, GstCaps * filter)
return TRUE;
}
-/** PUBLIC FUNCTIONS **/
+/* PUBLIC FUNCTIONS */
/* Iterface Registrations */
}
}
-/** PRIVATE FUNCTIONS **/
+/* PRIVATE FUNCTIONS */
/* Plugin entry point */
/**
* SECTION:element-directsoundsrc
+ * @title: directsoundsrc
*
* Reads audio data using the DirectSound API.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v directsoundsrc ! audioconvert ! vorbisenc ! oggmux ! filesink location=dsound.ogg
* ]| Record from DirectSound and encode to Ogg/Vorbis.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-dvbsrc
+ * @title: dvbsrc
*
* dvbsrc can be used to capture media from DVB cards. Supported DTV
* broadcasting standards include DVB-T/C/S, ATSC, ISDB-T and DTMB.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 dvbsrc modulation="QAM 64" trans-mode=8k bandwidth=8 frequency=514000000 code-rate-lp=AUTO code-rate-hp=2/3 guard=4 hierarchy=0 ! mpegtsdemux name=demux ! queue max-size-buffers=0 max-size-time=0 ! mpegvideoparse ! mpegvideoparse ! mpeg2dec ! xvimagesink demux. ! queue max-size-buffers=0 max-size-time=0 ! mpegaudioparse ! mpg123audiodec ! audioconvert ! pulsesink
* ]| Captures a full transport stream from DVB card 0 that is a DVB-T card at tuned frequency 514000000 Hz with other parameters as seen in the pipeline and renders the first TV program on the transport stream.
* |[
* gst-launch-1.0 dvbsrc frequency=503000000 delsys="atsc" modulation="8vsb" pids=48:49:52 ! decodebin name=dec dec. ! videoconvert ! autovideosink dec. ! audioconvert ! autoaudiosink
* ]| Captures and renders KOFY-HD in San Jose, California. This is an ATSC broadcast, PMT ID 48, Audio/Video elementary stream PIDs 49 and 52 respectively.
- * </refsect2>
+ *
*/
/*
/**
* SECTION:element-kmssink
+ * @title: kmssink
* @short_description: A KMS/DRM based video sink
*
* kmssink is a simple video sink that renders video frames directly
* in a plane of a DRM device.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! kmssink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-openslessink
+ * @title: openslessink
* @see_also: openslessrc
*
* This element renders raw audio samples using the OpenSL ES API in Android OS.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=music.ogg ! oggdemux ! vorbisdec ! audioconvert ! audioresample ! opeslessink
* ]| Play an Ogg/Vorbis file.
- * </refsect2>
*
*/
/**
* SECTION:element-openslessrc
+ * @title: openslessrc
* @see_also: openslessink
*
* This element reads data from default audio input using the OpenSL ES API in Android OS.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v openslessrc ! audioconvert ! vorbisenc ! oggmux ! filesink location=recorded.ogg
* ]| Record from default audio input and encode to Ogg/Vorbis.
- * </refsect2>
*
*/
*/
/**
* SECTION:element-shmsink
+ * @title: shmsink
*
* Send data over shared memory to the matching source.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 -v videotestsrc ! "video/x-raw, format=YUY2, color-matrix=sdtv, \
* chroma-site=mpeg2, width=(int)320, height=(int)240, framerate=(fraction)30/1" \
* ! shmsink socket-path=/tmp/blah shm-size=2000000
* ]| Send video to shm buffers.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
*/
/**
* SECTION:element-shmsrc
+ * @title: shmsrc
*
* Receive data from the shared memory sink.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 shmsrc socket-path=/tmp/blah ! \
* "video/x-raw, format=YUY2, color-matrix=sdtv, \
* chroma-site=mpeg2, width=(int)320, height=(int)240, framerate=(fraction)30/1" \
* ! queue ! videoconvert ! autovideosink
* ]| Render video from shm buffers.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-tinyalsasink
+ * @title: tinyalsasink
* @see_also: alsasink
*
* This element renders raw audio samples using the ALSA audio API via the
* tinyalsa library.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v uridecodebin uri=file:///path/to/audio.ogg ! audioconvert ! audioresample ! tinyalsasink
* ]| Play an Ogg/Vorbis file and output audio via ALSA using the tinyalsa
* library.
- * </refsect2>
+ *
*/
#include <gst/audio/gstaudiobasesink.h>
/**
* SECTION:element-uvch264mjpgdemux
+ * @title: uvch264mjpgdemux
* @short_description: UVC H264 compliant MJPG demuxer
*
* Parses a MJPG stream from a UVC H264 compliant encoding camera and extracts
/**
* SECTION:element-uvch264-src
+ * @title: uvch264-src
*
* A camera bin src element that wraps v4l2src and implements UVC H264
* Extension Units (XU) to control the H264 encoder in the camera
* @flow_lock: used to protect data flow routines from external calls such as
* events from @event_thread or methods from the #GstXOverlay interface
* @par: used to override calculated pixel aspect ratio from @xcontext
- * @synchronous: used to store if XSynchronous should be used or not (for
+ * @synchronous: used to store if XSynchronous should be used or not (for
* debugging purpose only)
* @handle_events: used to know if we should handle select XEvents or not
*
* gst_vdp_video_memory_alloc:
* @device: a #GstVdpDevice
* @info: the #GstVideoInfo describing the format to use
- *
+ *
* Returns: a GstMemory object with a VdpVideoSurface specified by @info
* from @device
*/
/**
* SECTION:element-vdpauvideopostprocess
+ * @title: vdpauvideopostprocess
*
* FIXME:Describe vdpaumpegdec here.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v -m fakesrc ! vdpauvideopostprocess ! fakesink silent=TRUE
* ]|
- * </refsect2>
+ *
*/
/*
/**
* SECTION:element-wasapisink
+ * @title: wasapisink
*
* Provides audio playback using the Windows Audio Session API available with
* Vista and newer.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v audiotestsrc samplesperbuffer=160 ! wasapisink
* ]| Generate 20 ms buffers and render to the default audio device.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
# include <config.h>
/**
* SECTION:element-wasapisrc
+ * @title: wasapisrc
*
* Provides audio capture from the Windows Audio Session API available with
* Vista and newer.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v wasapisrc ! fakesink
* ]| Capture from the default audio device and render to fakesink.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
# include <config.h>
/**
* SECTION:element-ksvideosrc
+ * @title: ksvideosrc
*
* Provides low-latency video capture from WDM cameras on Windows.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v ksvideosrc do-stats=TRUE ! videoconvert ! dshowvideosink
* ]| Capture from a camera and render using dshowvideosink.
* gst-launch-1.0 -v ksvideosrc do-stats=TRUE ! image/jpeg, width=640, height=480
* ! jpegdec ! videoconvert ! dshowvideosink
* ]| Capture from an MJPEG camera and render using dshowvideosink.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-dx9screencapsrc
+ * @title: dx9screencapsrc
*
* This element uses DirectX to capture the desktop or a portion of it.
* The default is capturing the whole desktop, but #GstDX9ScreenCapSrc:x,
* Use #GstDX9ScreenCapSrc:monitor for changing which monitor to capture
* from.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 dx9screencapsrc ! videoconvert ! dshowvideosink
* ]| Capture the desktop and display it.
* gst-launch-1.0 dx9screencapsrc x=100 y=100 width=320 height=240 !
* videoconvert ! dshowvideosink
* ]| Capture a portion of the desktop and display it.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gdiscreencapsrc
+ * @title: gdiscreencapsrc
*
* This element uses GDI to capture the desktop or a portion of it.
* The default is capturing the whole desktop, but #GstGDIScreenCapSrc:x,
*
* Set #GstGDIScreenCapSrc:cursor to TRUE to include the mouse cursor.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 gdiscreencapsrc ! videoconvert ! dshowvideosink
* ]| Capture the desktop and display it.
* ! videoconvert ! dshowvideosink
* ]| Capture a portion of the desktop, including the mouse cursor, and
* display it.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H