*/
/**
* SECTION:element-aasink
+ * @title: aasink
* @see_also: #GstCACASink
*
* Displays video as b/w ascii art.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=test.avi ! decodebin ! videoconvert ! aasink
* ]| This pipeline renders a video to ascii art into a separate window.
* |[
* gst-launch-1.0 filesrc location=test.avi ! decodebin ! videoconvert ! aasink driver=curses
* ]| This pipeline renders a video to ascii art into the current terminal.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-cairooverlay
+ * @title: cairooverlay
*
* cairooverlay renders an overlay using a application provided render function.
*
* The full example can be found in tests/examples/cairo/cairo_overlay.c
- * <refsect2>
- * <title>Example code</title>
+ *
+ * ## Example code
* |[
*
* #include <gst/gst.h>
* int width;
* int height;
* } CairoOverlayState;
- *
+ *
* ...
*
* static void
* }
*
* static void
- * draw_overlay (GstElement * overlay, cairo_t * cr, guint64 timestamp,
+ * draw_overlay (GstElement * overlay, cairo_t * cr, guint64 timestamp,
* guint64 duration, gpointer user_data)
* {
* CairoOverlayState *s = (CairoOverlayState *)user_data;
* cairo_move_to (cr, 0, 0);
* cairo_curve_to (cr, 0,-30, -50,-30, -50,0);
* cairo_curve_to (cr, -50,30, 0,35, 0,60 );
- * cairo_curve_to (cr, 0,35, 50,30, 50,0 ); *
+ * cairo_curve_to (cr, 0,35, 50,30, 50,0 ); *
* cairo_curve_to (cr, 50,-30, 0,-30, 0,0 );
* cairo_set_source_rgba (cr, 0.9, 0.0, 0.1, 0.7);
* cairo_fill (cr);
*
* g_signal_connect (cairo_overlay, "draw", G_CALLBACK (draw_overlay),
* overlay_state);
- * g_signal_connect (cairo_overlay, "caps-changed",
+ * g_signal_connect (cairo_overlay, "caps-changed",
* G_CALLBACK (prepare_overlay), overlay_state);
* ...
*
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
* @cr: Cairo context to draw to.
* @timestamp: Timestamp (see #GstClockTime) of the current buffer.
* @duration: Duration (see #GstClockTime) of the current buffer.
- *
+ *
* This signal is emitted when the overlay should be drawn.
*/
gst_cairo_overlay_signals[SIGNAL_DRAW] =
* GstCairoOverlay::caps-changed:
* @overlay: Overlay element emitting the signal.
* @caps: The #GstCaps of the element.
- *
+ *
* This signal is emitted when the caps of the element has changed.
*/
gst_cairo_overlay_signals[SIGNAL_CAPS_CHANGED] =
/**
* SECTION:element-dvdec
+ * @title: dvdec
*
* dvdec decodes DV video into raw video. The element expects a full DV frame
* as input, which is 120000 bytes for NTSC and 144000 for PAL video.
*
* This element can perform simple frame dropping with the #GstDVDec:drop-factor
- * property. Setting this property to a value N > 1 will only decode every
+ * property. Setting this property to a value N > 1 will only decode every
* Nth frame.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=test.dv ! dvdemux name=demux ! dvdec ! xvimagesink
* ]| This pipeline decodes and renders the raw DV stream to a videosink.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-dvdemux
+ * @title: dvdemux
*
* dvdemux splits raw DV into its audio and video components. The audio will be
* decoded raw samples and the video will be encoded DV video.
* This element can operate in both push and pull mode depending on the
* capabilities of the upstream peer.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=test.dv ! dvdemux name=demux ! queue ! audioconvert ! alsasink demux. ! queue ! dvdec ! xvimagesink
* ]| This pipeline decodes and renders the raw DV stream to an audio and a videosink.
- * </refsect2>
+ *
*/
/* DV output has two modes, normal and wide. The resolution is the same in both
/**
* SECTION:element-flacdec
+ * @title: flacdec
* @see_also: #GstFlacEnc
*
* flacdec decodes FLAC streams.
* <ulink url="http://flac.sourceforge.net/">FLAC</ulink>
* is a Free Lossless Audio Codec.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=media/small/dark.441-16-s.flac ! flacparse ! flacdec ! audioconvert ! audioresample ! autoaudiosink
* ]|
* |[
* gst-launch-1.0 souphttpsrc location=http://gstreamer.freedesktop.org/media/small/dark.441-16-s.flac ! flacparse ! flacdec ! audioconvert ! audioresample ! queue min-threshold-buffers=10 ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-flacenc
+ * @title: flacenc
* @see_also: #GstFlacDec
*
* flacenc encodes FLAC streams.
* is a Free Lossless Audio Codec. FLAC audio can directly be written into
* a file, or embedded into containers such as oggmux or matroskamux.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc num-buffers=100 ! flacenc ! filesink location=beep.flac
* ]| Encode a short sine wave into FLAC
* |[
* gst-launch-1.0 cdparanoiasrc track=5 ! queue ! audioconvert ! flacenc ! filesink location=track5.flac
* ]| Rip track 5 of an audio CD and encode it losslessly to a FLAC file
- * </refsect2>
+ *
*/
/* TODO: - We currently don't handle discontinuities in the stream in a useful
/**
* SECTION:element-flactag
+ * @title: flactag
* @see_also: #flacenc, #flacdec, #GstTagSetter
*
* The flactag element can change the tag contained within a raw
* automatically (and merged according to the merge mode set via the tag
* setter interface).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=foo.flac ! flactag ! filesink location=bar.flac
* ]| This element is not useful with gst-launch, because it does not support
* setting the tags on a #GstTagSetter interface. Conceptually, the element
* will usually be used in this order though.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gdkpixbufoverlay
+ * @title: gdkpixbufoverlay
*
* The gdkpixbufoverlay element overlays an image loaded from file onto
* a video stream.
*
* Negative offsets are also not yet supported.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! gdkpixbufoverlay location=image.png ! autovideosink
* ]|
* Overlays the image in image.png onto the test video picture produced by
* videotestsrc.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-gdkpixbufsink
+ * @title: gdkpixbufsink
*
* This sink element takes RGB or RGBA images as input and wraps them into
* #GdkPixbuf objects, for easy saving to file via the
*
* There are two ways to use this element and obtain the #GdkPixbuf objects
* created:
- * <itemizedlist>
- * <listitem>
- * Watching for element messages named <classname>"preroll-pixbuf"
- * </classname> or <classname>"pixbuf"</classname> on the bus, which
+ *
+ * * Watching for element messages named `preroll-pixbuf` or `pixbuf` on the bus, which
* will be posted whenever an image would usually be rendered. See below for
* more details on these messages and how to extract the pixbuf object
* contained in them.
- * </listitem>
- * <listitem>
- * Retrieving the current pixbuf via the #GstGdkPixbufSink:last-pixbuf property
+ *
+ * * Retrieving the current pixbuf via the #GstGdkPixbufSink:last-pixbuf property
* when needed. This is the easiest way to get at pixbufs for snapshotting
* purposes - just wait until the pipeline is prerolled (ASYNC_DONE message
* on the bus), then read the property. If you use this method, you may want
* to disable message posting by setting the #GstGdkPixbufSink:post-messages
* property to %FALSE. This avoids unnecessary memory overhead.
- * </listitem>
- * </itemizedlist>
*
* The primary purpose of this element is to abstract away the #GstBuffer to
* #GdkPixbuf conversion. Other than that it's very similar to the fakesink
* ximagesink, xvimagesink or some other suitable video sink in connection
* with the #GstXOverlay interface instead if you want to do video playback.
*
- * <refsect2>
- * <title>Message details</title>
+ * ## Message details
+ *
* As mentioned above, this element will by default post element messages
- * containing structures named <classname>"preroll-pixbuf"
- * </classname> or <classname>"pixbuf"</classname> on the bus (this
+ * containing structures named `preroll-pixbuf`
+ * ` or `pixbuf` on the bus (this
* can be disabled by setting the #GstGdkPixbufSink:post-messages property
* to %FALSE though). The element message structure has the following fields:
- * <itemizedlist>
- * <listitem>
- * <classname>"pixbuf"</classname>: the #GdkPixbuf object
- * </listitem>
- * <listitem>
- * <classname>"pixel-aspect-ratio"</classname>: the pixel aspect
- * ratio (PAR) of the input image (this field contains a #GstFraction); the
+ *
+ * * `pixbuf`: the #GdkPixbuf object
+ * * `pixel-aspect-ratio`: the pixel aspect ratio (PAR) of the input image
+ * (this field contains a #GstFraction); the
* PAR is usually 1:1 for images, but is often something non-1:1 in the case
* of video input. In this case the image may be distorted and you may need
* to rescale it accordingly before saving it to file or displaying it. This
* according to the size of the output window, in which case it is much more
* efficient to only scale once rather than twice). You can put a videoscale
* element and a capsfilter element with
- * <literal>video/x-raw-rgb,pixel-aspect-ratio=(fraction)1/1</literal> caps
+ * `video/x-raw-rgb,pixel-aspect-ratio=(fraction)1/1` caps
* in front of this element to make sure the pixbufs always have a 1:1 PAR.
- * </listitem>
- * </itemizedlist>
- * </refsect2>
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -m -v videotestsrc num-buffers=1 ! gdkpixbufsink
* ]| Process one single test image as pixbuf (note that the output you see will
* be slightly misleading. The message structure does contain a valid pixbuf
* object even if the structure string says '(NULL)').
- * </refsect2>
*/
#ifdef HAVE_CONFIG_H
* Get the jack client connection for @id and @server. Connections to the same
* @id and @server will receive the same physical Jack client connection and
* will therefore be scheduled in the same process callback.
- *
+ *
* Returns: a #GstJackAudioClient.
*/
GstJackAudioClient *
/**
* SECTION:element-jackaudiosink
+ * @title: jackaudiosink
* @see_also: #GstAudioBaseSink, #GstAudioRingBuffer
*
* A Sink that outputs data to Jack ports.
- *
- * It will create N Jack ports named out_<name>_<num> where
+ *
+ * It will create N Jack ports named out_<name>_<num> where
* <name> is the element name and <num> is starting from 1.
* Each port corresponds to a gstreamer channel.
- *
+ *
* The samplerate as exposed on the caps is always the same as the samplerate of
* the jack server.
- *
+ *
* When the #GstJackAudioSink:connect property is set to auto, this element
* will try to connect each output port to a random physical jack input pin. In
* this mode, the sink will expose the number of physical channels on its pad
* caps.
- *
+ *
* When the #GstJackAudioSink:connect property is set to none, the element will
* accept any number of input channels and will create (but not connect) an
* output port for each channel.
- *
+ *
* The element will generate an error when the Jack server is shut down when it
* was PAUSED or PLAYING. This element does not support dynamic rate and buffer
* size changes at runtime.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc ! jackaudiosink
* ]| Play a sine wave to using jack.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* GstJackAudioSink:
- *
+ *
* Opaque #GstJackAudioSink.
*/
struct _GstJackAudioSink {
/**
* SECTION:element-jackaudiosrc
+ * @title: jackaudiosrc
* @see_also: #GstAudioBaseSrc, #GstAudioRingBuffer
*
* A Src that inputs data from Jack ports.
- *
- * It will create N Jack ports named in_<name>_<num> where
+ *
+ * It will create N Jack ports named in_<name>_<num> where
* <name> is the element name and <num> is starting from 1.
* Each port corresponds to a gstreamer channel.
- *
+ *
* The samplerate as exposed on the caps is always the same as the samplerate of
* the jack server.
- *
+ *
* When the #GstJackAudioSrc:connect property is set to auto, this element
- * will try to connect each input port to a random physical jack output pin.
- *
+ * will try to connect each input port to a random physical jack output pin.
+ *
* When the #GstJackAudioSrc:connect property is set to none, the element will
* accept any number of output channels and will create (but not connect) an
* input port for each channel.
- *
+ *
* The element will generate an error when the Jack server is shut down when it
* was PAUSED or PLAYING. This element does not support dynamic rate and buffer
* size changes at runtime.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 jackaudiosrc connect=0 ! jackaudiosink connect=0
* ]| Get audio input into gstreamer from jack.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-jpegdec
+ * @title: jpegdec
*
* Decodes jpeg images.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=mjpeg.avi ! avidemux ! queue ! jpegdec ! videoconvert ! videoscale ! autovideosink
* ]| The above pipeline decode the mjpeg stream and renders it to the screen.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-jpegenc
+ * @title: jpegenc
*
* Encodes jpeg images.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc num-buffers=50 ! video/x-raw, framerate='(fraction)'5/1 ! jpegenc ! avimux ! filesink location=mjpeg.avi
* ]| a pipeline to mux 5 JPEG frames per second into a 10 sec. long motion jpeg
* avi.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-smokedec
+ * @title: smokedec
*
* Decodes images in smoke format.
*/
*/
/**
* SECTION:element-smokeenc
+ * @title: smokeenc
*
* Encodes images in smoke format.
*/
*/
/**
* SECTION:element-cacasink
+ * @title: cacasink
* @see_also: #GstAASink
*
* Displays video as color ascii art.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* CACA_GEOMETRY=160x60 CACA_FONT=5x7 gst-launch-1.0 filesrc location=test.avi ! decodebin ! videoconvert ! cacasink
* ]| This pipeline renders a video to ascii art into a separate window using a
* |[
* CACA_DRIVER=ncurses gst-launch-1.0 filesrc location=test.avi ! decodebin ! videoconvert ! cacasink
* ]| This pipeline renders a video to ascii art into the current terminal.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-pngdec
+ * @title: pngdec
*
* Decodes png images. If there is no framerate set on sink caps, it sends EOS
* after the first picture.
*/
/**
* SECTION:element-pngenc
+ * @title: pngenc
*
* Encodes png images.
*/
/**
* SECTION:element-pulsesink
+ * @title: pulsesink
* @see_also: pulsesrc
*
* This element outputs audio to a
* <ulink href="http://www.pulseaudio.org">PulseAudio sound server</ulink>.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=sine.ogg ! oggdemux ! vorbisdec ! audioconvert ! audioresample ! pulsesink
* ]| Play an Ogg/Vorbis file.
* gst-launch-1.0 -v audiotestsrc ! pulsesink stream-properties="props,media.title=test"
* ]| Play a sine wave and set a stream property. The property can be checked
* with "pactl list".
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-pulsesrc
+ * @title: pulsesrc
* @see_also: pulsesink
*
* This element captures audio from a
* <ulink href="http://www.pulseaudio.org">PulseAudio sound server</ulink>.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v pulsesrc ! audioconvert ! vorbisenc ! oggmux ! filesink location=alsasrc.ogg
* ]| Record from a sound card using pulseaudio and encode to Ogg/Vorbis.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-dv1394src
+ * @title: dv1394src
*
* Read DV (digital video) data from firewire port.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 dv1394src ! queue ! dvdemux name=d ! queue ! dvdec ! xvimagesink d. ! queue ! alsasink
* ]| This pipeline captures from the firewire port and displays it (might need
* format converters for audio/video).
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-hdv1394src
+ * @title: hdv1394src
*
* Read MPEG-TS data from firewire port.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 hdv1394src ! queue ! decodebin name=d ! queue ! xvimagesink d. ! queue ! alsasink
* ]| captures from the firewire port and plays the streams.
* |[
* gst-launch-1.0 hdv1394src ! queue ! filesink location=mydump.ts
* ]| capture to a disk file
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-shout2send
+ * @title: shout2send
*
* shout2send pushes a media stream to an Icecast server
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 uridecodebin uri=file:///path/to/audiofile ! audioconvert ! vorbisenc ! oggmux ! shout2send mount=/stream.ogg port=8000 username=source password=somepassword ip=server_IP_address_or_hostname
* ]| This pipeline demuxes, decodes, re-encodes and re-muxes an audio
* media file into oggvorbis and sends the resulting stream to an Icecast
* server. Properties mount, port, username and password are all server-config
* dependent.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-gstsouphttpclientsink
+ * @title: gstsouphttpclientsink
*
* The souphttpclientsink element sends pipeline data to an HTTP server
* using HTTP PUT commands.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc num-buffers=300 ! theoraenc ! oggmux !
* souphttpclientsink location=http://server/filename.ogv
* ]|
- *
+ *
* This example encodes 10 seconds of video and sends it to the HTTP
* server "server" using HTTP PUT commands.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-souphttpsrc
+ * @title: souphttpsrc
*
* This plugin reads data from a remote location specified by a URI.
* Supported protocols are 'http', 'https'.
* need to use the #ICYDemux element as follow-up element to extract the Icecast
* metadata and to determine the underlying media type.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v souphttpsrc location=https://some.server.org/index.html
* ! filesink location=/home/joe/server.html
* These are used by the mime/multipart demultiplexer to emit timestamps
* on the JPEG-encoded video frame buffers. This allows the Matroska
* multiplexer to timestamp the frames in the resulting file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-speexdec
+ * @title: speexdec
* @see_also: speexenc, oggdemux
*
* This element decodes a Speex stream to raw integer audio.
* audio codec maintained by the <ulink url="http://www.xiph.org/">Xiph.org
* Foundation</ulink>.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=speex.ogg ! oggdemux ! speexdec ! audioconvert ! audioresample ! alsasink
* ]| Decode an Ogg/Speex file. To create an Ogg/Speex file refer to the
* documentation of speexenc.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-speexenc
+ * @title: speexenc
* @see_also: speexdec, oggmux
*
* This element encodes audio as a Speex stream.
* audio codec maintained by the <ulink url="http://www.xiph.org/">Xiph.org
* Foundation</ulink>.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 audiotestsrc num-buffers=100 ! speexenc ! oggmux ! filesink location=beep.ogg
* ]| Encode an Ogg/Speex file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-vp8dec
+ * @title: vp8dec
* @see_also: vp8enc, matroskademux
*
* This element decodes VP8 streams into raw video.
* </ulink>. It's the successor of On2 VP3, which was the base of the
* Theora video codec.
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v filesrc location=videotestsrc.webm ! matroskademux ! vp8dec ! videoconvert ! videoscale ! autovideosink
* ]| This example pipeline will decode a WebM stream and decodes the VP8 video.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-vp8enc
+ * @title: vp8enc
* @see_also: vp8dec, webmmux, oggmux
*
* This element encodes raw video into a VP8 stream.
* for explanation, examples for useful encoding parameters and more details
* on the encoding parameters.
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v videotestsrc num-buffers=1000 ! vp8enc ! webmmux ! filesink location=videotestsrc.webm
* ]| This example pipeline will encode a test video source to VP8 muxed in an
* WebM container.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-vp9dec
+ * @title: vp9dec
* @see_also: vp9enc, matroskademux
*
* This element decodes VP9 streams into raw video.
* </ulink>. It's the successor of On2 VP3, which was the base of the
* Theora video codec.
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v filesrc location=videotestsrc.webm ! matroskademux ! vp9dec ! videoconvert ! videoscale ! autovideosink
* ]| This example pipeline will decode a WebM stream and decodes the VP9 video.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-vp9enc
+ * @title: vp9enc
* @see_also: vp9dec, webmmux, oggmux
*
* This element encodes raw video into a VP9 stream.
* for explanation, examples for useful encoding parameters and more details
* on the encoding parameters.
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v videotestsrc num-buffers=1000 ! vp9enc ! webmmux ! filesink location=videotestsrc.webm
* ]| This example pipeline will encode a test video source to VP9 muxed in an
* WebM container.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-wavpackdec
+ * @title: wavpackdec
*
* WavpackDec decodes framed (for example by the WavpackParse element)
* Wavpack streams and decodes them to raw audio.
* <ulink url="http://www.wavpack.com/">Wavpack</ulink> is an open-source
* audio codec that features both lossless and lossy encoding.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=test.wv ! wavpackparse ! wavpackdec ! audioconvert ! audioresample ! autoaudiosink
* ]| This pipeline decodes the Wavpack file test.wv into raw audio buffers and
* tries to play it back using an automatically found audio sink.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-wavpackenc
+ * @title: wavpackenc
*
* WavpackEnc encodes raw audio into a framed Wavpack stream.
* <ulink url="http://www.wavpack.com/">Wavpack</ulink> is an open-source
* audio codec that features both lossless and lossy encoding.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc num-buffers=500 ! audioconvert ! wavpackenc ! filesink location=sinewave.wv
* ]| This pipeline encodes audio from audiotestsrc into a Wavpack file. The audioconvert element is needed
* gst-launch-1.0 cdda://1 ! audioconvert ! wavpackenc bitrate=128000 ! filesink location=track1.wv
* ]| This pipeline encodes audio from an audio CD into a Wavpack file using
* lossy encoding at a certain bitrate (the file will be fairly small).
- * </refsect2>
+ *
*/
/*
/**
* SECTION:element-alpha
- *
+ * @title: alpha
+ *
* The alpha element adds an alpha channel to a video stream. The values
* of the alpha channel can be either be set to a constant or can be
* dynamically calculated via chroma keying, e.g. blue can be set as
typedef struct _GstAlpha GstAlpha;
typedef struct _GstAlphaClass GstAlphaClass;
-
-/**
+/**
* GstAlphaMethod:
* @ALPHA_METHOD_SET: Set/adjust alpha channel
* @ALPHA_METHOD_GREEN: Chroma Key green
/**
* SECTION:element-alphacolor
+ * @title: alphacolor
*
* The alphacolor element does memory-efficient (in-place) colourspace
* conversion from RGBA to AYUV or AYUV to RGBA while preserving the
/**
* SECTION:element-apedemux
+ * @title: apedemux
*
* apedemux accepts data streams with APE tags at the start or at the end
* (or both). The mime type of the data between the tag blocks is detected
* wavparse or musepackdec, can operate on files containing APE tag
* information.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -t filesrc location=file.mpc ! apedemux ! fakesink
* ]| This pipeline should read any available APE tag information and output it.
* The contents of the file inside the APE tag regions should be detected, and
* the appropriate mime type set on buffers produced from apedemux.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
/**
* SECTION:element-audioamplify
+ * @title: audioamplify
*
* Amplifies an audio stream by a given factor and allows the selection of different clipping modes.
* The difference between the clipping modes is best evaluated by testing.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc wave=saw ! audioamplify amplification=1.5 ! alsasink
* gst-launch-1.0 filesrc location="melo1.ogg" ! oggdemux ! vorbisdec ! audioconvert ! audioamplify amplification=1.5 clipping-method=wrap-negative ! alsasink
* gst-launch-1.0 audiotestsrc wave=saw ! audioconvert ! audioamplify amplification=1.5 clipping-method=wrap-positive ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-audiochebband
+ * @title: audiochebband
*
* Attenuates all frequencies outside (bandpass) or inside (bandreject) of a frequency
* band. The number of poles and the ripple parameter control the rolloff.
*
* As a special case, a Chebyshev type 1 filter with no ripple is a Butterworth filter.
*
- * <note>
- * Be warned that a too large number of poles can produce noise. The most poles are possible with
- * a cutoff frequency at a quarter of the sampling rate.
- * </note>
+ * > Be warned that a too large number of poles can produce noise. The most poles are possible with
+ * > a cutoff frequency at a quarter of the sampling rate.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc freq=1500 ! audioconvert ! audiochebband mode=band-pass lower-frequency=1000 upper-frequency=6000 poles=4 ! audioconvert ! alsasink
* gst-launch-1.0 filesrc location="melo1.ogg" ! oggdemux ! vorbisdec ! audioconvert ! audiochebband mode=band-reject lower-frequency=1000 upper-frequency=4000 ripple=0.2 ! audioconvert ! alsasink
* gst-launch-1.0 audiotestsrc wave=white-noise ! audioconvert ! audiochebband mode=band-pass lower-frequency=1000 upper-frequency=4000 type=2 ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-audiocheblimit
+ * @title: audiocheblimit
*
* Attenuates all frequencies above the cutoff frequency (low-pass) or all frequencies below the
* cutoff frequency (high-pass). The number of poles and the ripple parameter control the rolloff.
*
* As a special case, a Chebyshev type 1 filter with no ripple is a Butterworth filter.
*
- * <note><para>
- * Be warned that a too large number of poles can produce noise. The most poles are possible with
- * a cutoff frequency at a quarter of the sampling rate.
- * </para></note>
+ * > Be warned that a too large number of poles can produce noise. The most poles are possible with
+ * > a cutoff frequency at a quarter of the sampling rate.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc freq=1500 ! audioconvert ! audiocheblimit mode=low-pass cutoff=1000 poles=4 ! audioconvert ! alsasink
* gst-launch-1.0 filesrc location="melo1.ogg" ! oggdemux ! vorbisdec ! audioconvert ! audiocheblimit mode=high-pass cutoff=400 ripple=0.2 ! audioconvert ! alsasink
* gst-launch-1.0 audiotestsrc wave=white-noise ! audioconvert ! audiocheblimit mode=low-pass cutoff=800 type=2 ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-audiodynamic
+ * @title: audiodynamic
*
* This element can act as a compressor or expander. A compressor changes the
* amplitude of all samples above a specific threshold with a specific ratio,
* a expander does the same for all samples below a specific threshold. If
* soft-knee mode is selected the ratio is applied smoothly.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc wave=saw ! audiodynamic characteristics=soft-knee mode=compressor threshold=0.5 ratio=0.5 ! alsasink
* gst-launch-1.0 filesrc location="melo1.ogg" ! oggdemux ! vorbisdec ! audioconvert ! audiodynamic characteristics=hard-knee mode=expander threshold=0.2 ratio=4.0 ! alsasink
* gst-launch-1.0 audiotestsrc wave=saw ! audioconvert ! audiodynamic ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
/* TODO: Implement attack and release parameters */
/**
* SECTION:element-audioecho
+ * @title: audioecho
*
* audioecho adds an echo or (simple) reverb effect to an audio stream. The echo
* delay, intensity and the percentage of feedback can be configured.
* channels that are configured surround channels for the delay are
* selected using the surround-channels mask property.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 autoaudiosrc ! audioconvert ! audioecho delay=500000000 intensity=0.6 feedback=0.4 ! audioconvert ! autoaudiosink
* gst-launch-1.0 filesrc location="melo1.ogg" ! decodebin ! audioconvert ! audioecho delay=50000000 intensity=0.6 feedback=0.4 ! audioconvert ! autoaudiosink
* gst-launch-1.0 audiotestsrc ! audioconvert ! audio/x-raw,channels=4 ! audioecho surround-delay=true delay=500000000 ! audioconvert ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-audiofirfilter
+ * @title: audiofirfilter
*
* audiofirfilter implements a generic audio <ulink url="http://en.wikipedia.org/wiki/Finite_impulse_response">FIR filter</ulink>. Before usage the
* "kernel" property has to be set to the filter kernel that should be
* "rate-changed" signal can be used. This should be done for most
* FIR filters as they're depending on the sampling rate.
*
- * <refsect2>
- * <title>Example application</title>
- * <informalexample><programlisting language="C">
+ * ## Example application
+ * <programlisting language="C">
* <xi:include xmlns:xi="http://www.w3.org/2003/XInclude" parse="text" href="../../../../tests/examples/audiofx/firfilter-example.c" />
- * </programlisting></informalexample>
- * </refsect2>
+ * ]|
+ *
*/
/* FIXME 0.11: suppress warnings for deprecated API such as GValueArray
/**
* SECTION:element-audioiirfilter
+ * @title: audioiirfilter
*
* audioiirfilter implements a generic audio <ulink url="http://en.wikipedia.org/wiki/Infinite_impulse_response">IIR filter</ulink>. Before usage the
* "a" and "b" properties have to be set to the filter coefficients that
* "rate-changed" signal can be used. This should be done for most
* IIR filters as they're depending on the sampling rate.
*
- * <refsect2>
- * <title>Example application</title>
- * <informalexample><programlisting language="C">
+ * ## Example application
+ * <programlisting language="C">
* <xi:include xmlns:xi="http://www.w3.org/2003/XInclude" parse="text" href="../../../../tests/examples/audiofx/iirfilter-example.c" />
- * </programlisting></informalexample>
- * </refsect2>
+ * ]|
+ *
*/
/* FIXME 0.11: suppress warnings for deprecated API such as GValueArray
/**
* SECTION:element-audioinvert
+ * @title: audioinvert
*
* Swaps upper and lower half of audio samples. Mixing an inverted sample on top of
* the original with a slight delay can produce effects that sound like resonance.
* Creating a stereo sample from a mono source, with one channel inverted produces wide-stereo sounds.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc wave=saw ! audioinvert degree=0.4 ! alsasink
* gst-launch-1.0 filesrc location="melo1.ogg" ! oggdemux ! vorbisdec ! audioconvert ! audioinvert degree=0.4 ! alsasink
* gst-launch-1.0 audiotestsrc wave=saw ! audioconvert ! audioinvert degree=0.4 ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-audiokaraoke
+ * @title: audiokaraoke
*
* Remove the voice from audio by filtering the center channel.
* This plugin is useful for karaoke applications.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=song.ogg ! oggdemux ! vorbisdec ! audiokaraoke ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-audiopanorama
+ * @title: audiopanorama
*
* Stereo panorama effect with controllable pan position. One can choose between the default psychoacoustic panning method,
* which keeps the same perceived loudness, and a simple panning method that just controls the volume on one channel.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc wave=saw ! audiopanorama panorama=-1.00 ! alsasink
* gst-launch-1.0 filesrc location="melo1.ogg" ! oggdemux ! vorbisdec ! audioconvert ! audiopanorama panorama=-1.00 ! alsasink
* gst-launch-1.0 audiotestsrc wave=saw ! audioconvert ! audiopanorama panorama=-1.00 ! audioconvert ! alsasink
* gst-launch-1.0 audiotestsrc wave=saw ! audioconvert ! audiopanorama method=simple panorama=-0.50 ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*
* Panning method: psychoacoustic mode keeps the same perceived loudness,
* while simple mode just controls the volume of one channel. It's merely
- * a matter of taste which method should be chosen.
+ * a matter of taste which method should be chosen.
*/
g_object_class_install_property (gobject_class, PROP_METHOD,
g_param_spec_enum ("method", "Panning method",
/**
* SECTION:element-audiowsincband
+ * @title: audiowsincband
*
* Attenuates all frequencies outside (bandpass) or inside (bandreject) of a frequency
* band. The length parameter controls the rolloff, the window parameter
* a much better rolloff when using a larger kernel size and almost linear phase. The only
* disadvantage is the much slower execution time with larger kernels.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc freq=1500 ! audioconvert ! audiowsincband mode=band-pass lower-frequency=3000 upper-frequency=10000 length=501 window=blackman ! audioconvert ! alsasink
* gst-launch-1.0 filesrc location="melo1.ogg" ! oggdemux ! vorbisdec ! audioconvert ! audiowsincband mode=band-reject lower-frequency=59 upper-frequency=61 length=10001 window=hamming ! audioconvert ! alsasink
* gst-launch-1.0 audiotestsrc wave=white-noise ! audioconvert ! audiowsincband mode=band-pass lower-frequency=1000 upper-frequency=2000 length=31 ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-audiowsinclimit
+ * @title: audiowsinclimit
*
* Attenuates all frequencies above the cutoff frequency (low-pass) or all frequencies below the
* cutoff frequency (high-pass). The length parameter controls the rolloff, the window parameter
* a much better rolloff when using a larger kernel size and almost linear phase. The only
* disadvantage is the much slower execution time with larger kernels.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc freq=1500 ! audioconvert ! audiowsinclimit mode=low-pass cutoff=1000 length=501 ! audioconvert ! alsasink
* gst-launch-1.0 filesrc location="melo1.ogg" ! oggdemux ! vorbisdec ! audioconvert ! audiowsinclimit mode=high-pass cutoff=15000 length=501 ! audioconvert ! alsasink
* gst-launch-1.0 audiotestsrc wave=white-noise ! audioconvert ! audiowsinclimit mode=low-pass cutoff=1000 length=10001 window=blackman ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-scaletempo
+ * @title: scaletempo
*
* Scale tempo while maintaining pitch
* (WSOLA-like technique with cross correlation)
*
* Use Sceletempo to apply playback rates without the chipmunk effect.
*
- * <refsect2>
- * <title>Example pipelines</title>
- * <para>
+ * ## Example pipelines
+ *
* |[
* filesrc location=media.ext ! decodebin name=d \
* d. ! queue ! audioconvert ! audioresample ! scaletempo ! audioconvert ! audioresample ! autoaudiosink \
* correlation (roughly a dot-product). Scaletempo consumes most of its CPU
* cycles here. One can use the #GstScaletempo:search propery to tune how far
* the algoritm looks.
- * </para>
- * </refsect2>
+ *
*/
/*
/**
* SECTION:element-aacparse
+ * @title: aacparse
* @short_description: AAC parser
* @see_also: #GstAmrParse
*
* be determined either. However, ADTS format AAC clips can be seeked, and parser
* can also estimate playback position and clip duration.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=abc.aac ! aacparse ! faad ! audioresample ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-ac3parse
+ * @title: ac3parse
* @short_description: AC3 parser
* @see_also: #GstAmrParse, #GstAACParse
*
* This is an AC3 parser.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=abc.ac3 ! ac3parse ! a52dec ! audioresample ! audioconvert ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
/* TODO:
/**
* SECTION:element-amrparse
+ * @title: amrparse
* @short_description: AMR parser
* @see_also: #GstAmrnbDec, #GstAmrnbEnc
*
* This is an AMR parser capable of handling both narrow-band and wideband
* formats.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=abc.amr ! amrparse ! amrdec ! audioresample ! audioconvert ! alsasink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-dcaparse
+ * @title: dcaparse
* @short_description: DCA (DTS Coherent Acoustics) parser
* @see_also: #GstAmrParse, #GstAACParse, #GstAc3Parse
*
* This is a DCA (DTS Coherent Acoustics) parser.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=abc.dts ! dcaparse ! dtsdec ! audioresample ! audioconvert ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
/* TODO:
/**
* SECTION:element-flacparse
+ * @title: flacparse
* @see_also: flacdec, oggdemux, vorbisparse
*
* The flacparse element will parse the header packets of the FLAC
* which allows you to (for example) remux an ogg/flac or convert a native FLAC
* format file to an ogg bitstream.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v filesrc location=sine.flac ! flacparse ! identity \
* ! oggmux ! filesink location=sine-remuxed.ogg
* ]| This pipeline converts a native FLAC format file to an ogg bitstream.
* It also illustrates that the streamheader is set in the caps, and that each
* buffer has the timestamp, duration, offset, and offset_end set.
- * </refsect2>
*
*/
*/
/**
* SECTION:element-mpegaudioparse
+ * @title: mpegaudioparse
* @short_description: MPEG audio parser
* @see_also: #GstAmrParse, #GstAACParse
*
* Parses and frames mpeg1 audio streams. Provides seeking.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=test.mp3 ! mpegaudioparse ! mpg123audiodec
* ! audioconvert ! audioresample ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
/* FIXME: we should make the base class (GstBaseParse) aware of the
/**
* SECTION:element-sbcparse
+ * @title: sbcparse
* @see_also: sbcdec, sbcenc
*
* The sbcparse element will parse a bluetooth SBC audio stream into
*/
/**
* SECTION:element-wavpackparse
+ * @title: wavpackparse
* @short_description: Wavpack parser
* @see_also: #GstAmrParse, #GstAACParse
*
* This is an Wavpack parser.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=abc.wavpack ! wavpackparse ! wavpackdec ! audioresample ! audioconvert ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-auparse
+ * @title: auparse
*
* Parses .au files mostly originating from sun os based computers.
*/
/**
* SECTION:element-autoaudiosink
+ * @title: autoaudiosink
* @see_also: autovideosink, alsasink, osssink
*
* autoaudiosink is an audio sink that automatically detects an appropriate
* that have <quote>Sink</quote> and <quote>Audio</quote> in the class field
* of their element information, and also have a non-zero autoplugging rank.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v -m audiotestsrc ! audioconvert ! audioresample ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-autoaudiosrc
+ * @title: autoaudiosrc
* @see_also: autovideosrc, alsasrc, osssrc
*
* autoaudiosrc is an audio source that automatically detects an appropriate
* that have <quote>Source</quote> and <quote>Audio</quote> in the class field
* of their element information, and also have a non-zero autoplugging rank.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v -m autoaudiosrc ! audioconvert ! audioresample ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-autovideosink
+ * @title: autovideosink
* @see_also: autoaudiosink, ximagesink, xvimagesink, sdlvideosink
*
* autovideosink is a video sink that automatically detects an appropriate
* that have <quote>Sink</quote> and <quote>Video</quote> in the class field
* of their element information, and also have a non-zero autoplugging rank.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v -m videotestsrc ! autovideosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-autovideosrc
+ * @title: autovideosrc
* @see_also: autoaudiosrc, v4l2src, v4lsrc
*
* autovideosrc is a video src that automatically detects an appropriate
* that have <quote>Source</quote> and <quote>Video</quote> in the class field
* of their element information, and also have a non-zero autoplugging rank.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v -m autovideosrc ! xvimagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-avidemux
+ * @title: avidemux
*
* Demuxes an .avi file into raw or compressed audio and/or video streams.
*
* This element supports both push and pull-based scheduling, depending on the
* capabilities of the upstream elements.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=test.avi ! avidemux name=demux demux.audio_00 ! decodebin ! audioconvert ! audioresample ! autoaudiosink demux.video_00 ! queue ! decodebin ! videoconvert ! videoscale ! autovideosink
* ]| Play (parse and decode) an .avi file and try to output it to
* an automatically detected soundcard and videosink. If the AVI file contains
* compressed audio or video data, this will only work if you have the
* right decoder elements/plugins installed.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-avimux
+ * @title: avimux
*
* Muxes raw or compressed audio and/or video streams into an AVI file.
*
- * <refsect2>
- * <title>Example launch lines</title>
- * <para>(write everything in one line, without the backslash characters)</para>
+ * ## Example launch lines
+ * (write everything in one line, without the backslash characters)
* |[
* gst-launch-1.0 videotestsrc num-buffers=250 \
* ! 'video/x-raw,format=(string)I420,width=320,height=240,framerate=(fraction)25/1' \
* ]| This will create an .AVI file containing the same test video and sound
* as above, only that both streams will be compressed this time. This will
* only work if you have the necessary encoder elements installed of course.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-avisubtitle
+ * @title: avisubtitle
*
- * <refsect2>
- * <para>
* Parses the subtitle stream from an avi file.
- * </para>
- * <title>Example launch line</title>
- * <para>
- * <programlisting>
+ *
+ * ## Example launch line
+ *
+ * |[
* gst-launch-1.0 filesrc location=subtitle.avi ! avidemux name=demux ! queue ! avisubtitle ! subparse ! textoverlay name=overlay ! videoconvert ! autovideosink demux. ! queue ! decodebin ! overlay.
- * </programlisting>
+ * ]|
* This plays an avi file with a video and subtitle stream.
- * </para>
- * </refsect2>
+ *
*/
/* example of a subtitle chunk in an avi file
*/
/**
* SECTION:element-cutter
+ * @title: cutter
*
* Analyses the audio signal for periods of silence. The start and end of
* silence is signalled by bus messages named
- * <classname>"cutter"</classname>.
+ * `cutter`.
+ *
* The message's structure contains two fields:
- * <itemizedlist>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"timestamp"</classname>:
- * the timestamp of the buffer that triggered the message.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * gboolean
- * <classname>"above"</classname>:
- * %TRUE for begin of silence and %FALSE for end of silence.
- * </para>
- * </listitem>
- * </itemizedlist>
*
- * <refsect2>
- * <title>Example launch line</title>
+ * * #GstClockTime `timestamp`: the timestamp of the buffer that triggered the message.
+ * * gboolean `above`: %TRUE for begin of silence and %FALSE for end of silence.
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 -m filesrc location=foo.ogg ! decodebin ! audioconvert ! cutter ! autoaudiosink
* ]| Show cut messages.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-breakmydata
+ * @title: breakmydata
*
* This element modifies the contents of the buffer it is passed randomly
* according to the parameters set.
/**
* SECTION:element-capssetter
+ * @title: capssetter
*
* Sets or merges caps on a stream's buffers. That is, a buffer's caps are
* updated using (fields of) #GstCapsSetter:caps. Note that this may contain
* multiple structures (though not likely recommended), but each of these must
* be fixed (or will otherwise be rejected).
- *
+ *
* If #GstCapsSetter:join is %TRUE, then the incoming caps' mime-type is
* compared to the mime-type(s) of provided caps and only matching structure(s)
* are considered for updating.
- *
+ *
* If #GstCapsSetter:replace is %TRUE, then any caps update is preceded by
* clearing existing fields, making provided fields (as a whole) replace
* incoming ones. Otherwise, no clearing is performed, in which case provided
* fields are added/merged onto incoming caps
- *
+ *
* Although this element might mainly serve as debug helper,
* it can also practically be used to correct a faulty pixel-aspect-ratio,
* or to modify a yuv fourcc value to effectively swap chroma components or such
/**
* SECTION:element-pushfilesrc
+ * @title: pushfilesrc
* @see_also: filesrc
*
* This element is only useful for debugging purposes. It implements an URI
* connection with the playbin element (which creates a source based on the
* URI passed).
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -m playbin uri=pushfile:///home/you/some/file.ogg
* ]| This plays back the given file using playbin, with the demuxer operating
* push-based.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-taginject
+ * @title: taginject
*
* Element that injects new metadata tags, but passes incoming data through
* unmodified.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 audiotestsrc num-buffers=100 ! taginject tags="title=testsrc,artist=gstreamer" ! vorbisenc ! oggmux ! filesink location=test.ogg
* ]| set title and artist
* |[
* gst-launch-1.0 audiotestsrc num-buffers=100 ! taginject tags="keywords=\{\"testone\",\"audio\"\},title=\"audio\ testtone\"" ! vorbisenc ! oggmux ! filesink location=test.ogg
* ]| set keywords and title demonstrating quoting of special chars and handling lists
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-progressreport
+ * @title: progressreport
*
* The progressreport element can be put into a pipeline to report progress,
* which is done by doing upstream duration and position queries in regular
* is in reference to an internal point of a pipeline and not the pipeline as
* a whole).
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -m filesrc location=foo.ogg ! decodebin ! progressreport update-freq=1 ! audioconvert ! audioresample ! autoaudiosink
* ]| This shows a progress query where a duration is available.
* |[
* gst-launch-1.0 -m audiotestsrc ! progressreport update-freq=1 ! audioconvert ! autoaudiosink
* ]| This shows a progress query where no duration is available.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-rndbuffersize
+ * @title: rndbuffersize
*
* This element pulls buffers with random sizes from the source.
*/
/**
* SECTION:element-deinterlace
+ * @title: deinterlace
*
* deinterlace deinterlaces interlaced video frames to progressive video frames.
* For this different algorithms can be selected which will be described later.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=/path/to/file ! decodebin ! videoconvert ! deinterlace ! videoconvert ! autovideosink
* ]| This pipeline deinterlaces a video file with the default deinterlacing options.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
* the "method" child via the #GstChildProxy interface and
* setting the appropiate properties on it.
*
- * <itemizedlist>
- * <listitem>
- * <para>
- * tomsmocomp
- * Motion Adaptive: Motion Search
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * greedyh
- * Motion Adaptive: Advanced Detection
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * greedyl
- * Motion Adaptive: Simple Detection
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * vfir
- * Blur vertical
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * linear
- * Linear interpolation
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * linearblend
- * Linear interpolation in time domain. Any motion causes significant
- * ghosting, so this method should not be used.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * scalerbob
- * Double lines
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * weave
- * Weave. Bad quality, do not use.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * weavetff
- * Progressive: Top Field First. Bad quality, do not use.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * weavebff
- * Progressive: Bottom Field First. Bad quality, do not use.
- * </para>
- * </listitem>
- * </itemizedlist>
+ * * tomsmocomp Motion Adaptive: Motion Search
+ * * greedyh Motion Adaptive: Advanced Detection
+ * * greedyl Motion Adaptive: Simple Detection
+ * * vfir Blur vertical
+ * * linear Linear interpolation
+ * * linearblend Linear interpolation in time domain.
+ * Any motion causes significant ghosting, so this
+ * method should not be used.
+ * * scalerbob Double lines
+ * * weave Weave. Bad quality, do not use.
+ * * weavetff Progressive: Top Field First. Bad quality, do not use.
+ * * weavebff Progressive: Bottom Field First. Bad quality, do not use.
*/
g_object_class_install_property (gobject_class, PROP_METHOD,
g_param_spec_enum ("method",
/**
* SECTION:element-dtmfsrc
+ * @title: dtmfsrc
* @see_also: rtpdtmsrc, rtpdtmfmuxx
*
* The DTMFSrc element generates DTMF (ITU-T Q.23 Specification) tone packets on request
* DTMFSrc element inside the pipeline) about the start of a DTMF named
* event '1' of volume -25 dBm0:
*
- * <programlisting>
+ * |[
* structure = gst_structure_new ("dtmf-event",
* "type", G_TYPE_INT, 1,
* "number", G_TYPE_INT, 1,
*
* event = gst_event_new_custom (GST_EVENT_CUSTOM_UPSTREAM, structure);
* gst_element_send_event (pipeline, event);
- * </programlisting>
+ * ]|
*
* When a DTMF tone actually starts or stop, a "dtmf-event-processed"
* element #GstMessage with the same fields as the "dtmf-event"
*/
/**
* SECTION:element-rtpdtmfdepay
+ * @title: rtpdtmfdepay
* @see_also: rtpdtmfsrc, rtpdtmfmux
*
* This element takes RTP DTMF packets and produces sound. It also emits a
/**
* SECTION:element-rtpdtmfsrc
+ * @title: rtpdtmfsrc
* @see_also: dtmfsrc, rtpdtmfdepay, rtpdtmfmux
*
* The RTPDTMFSrc element generates RTP DTMF (RFC 2833) event packets on request
* RTPDTMFSrc element inside the pipeline) about the start of an RTP DTMF named
* event '1' of volume -25 dBm0:
*
- * <programlisting>
+ * |[
* structure = gst_structure_new ("dtmf-event",
* "type", G_TYPE_INT, 1,
* "number", G_TYPE_INT, 1,
*
* event = gst_event_new_custom (GST_EVENT_CUSTOM_UPSTREAM, structure);
* gst_element_send_event (pipeline, event);
- * </programlisting>
+ * ]|
*
* When a DTMF tone actually starts or stop, a "dtmf-event-processed"
* element #GstMessage with the same fields as the "dtmf-event"
/**
* SECTION:element-agingtv
+ * @title: agingtv
*
* AgingTV ages a video stream in realtime, changes the colors and adds
* scratches and dust.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! agingtv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of agingtv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-dicetv
+ * @title: dicetv
*
* DiceTV 'dices' the screen up into many small squares, each defaulting
* to a size of 16 pixels by 16 pixels.. Each square is rotated randomly
* counterclockwise). The direction of each square normally remains
* consistent between each frame.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! dicetv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of dicetv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-edgetv
+ * @title: edgetv
*
* EdgeTV detects edges and display it in good old low resolution
* computer way.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! edgetv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of edgetv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-optv
+ * @title: optv
*
* Traditional black-white optical animation is now resurrected as a
* real-time video effect. Input images are binarized and combined with
* various optical pattern.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! optv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of optv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-quarktv
+ * @title: quarktv
*
* QuarkTV disolves moving objects. It picks up pixels from
* the last frames randomly.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! quarktv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of quarktv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-radioactv
+ * @title: radioactv
*
* RadioacTV does *NOT* detect a radioactivity. It detects a difference
* from previous frame and blurs it.
- *
+ *
* RadioacTV has 4 mode, normal, strobe1, strobe2 and trigger.
* In trigger mode, effect appears only when the trigger property is %TRUE.
*
* current frame and previous frame dropped, while strobe2 mode uses the difference from
* previous frame displayed. The effect of strobe2 is stronger than strobe1.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! radioactv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of radioactv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-revtv
+ * @title: revtv
*
* RevTV acts like a video waveform monitor for each line of video
* processed. This creates a pseudo 3D effect based on the brightness
* of the video along each line.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! revtv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of revtv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rippletv
+ * @title: rippletv
*
* RippleTV does ripple mark effect on the video input. The ripple is caused
* by motion or random rain drops.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! rippletv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of rippletv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-shagadelictv
+ * @title: shagadelictv
*
* Oh behave, ShagedelicTV makes images shagadelic!
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! shagadelictv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of shagadelictv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-streaktv
+ * @title: streaktv
*
* StreakTV makes after images of moving objects.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! streaktv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of streaktv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-vertigotv
+ * @title: vertigotv
*
* VertigoTV is a loopback alpha blending effector with rotating and scaling.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! vertigotv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of vertigotv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-warptv
+ * @title: warptv
*
* WarpTV does realtime goo'ing of the video input.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! warptv ! videoconvert ! autovideosink
* ]| This pipeline shows the effect of warptv on a test stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-equalizer-10bands
+ * @title: equalizer-10bands
*
* The 10 band equalizer element allows to change the gain of 10 equally distributed
* frequency bands between 30 Hz and 15 kHz.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=song.ogg ! oggdemux ! vorbisdec ! audioconvert ! equalizer-10bands band2=3.0 ! alsasink
* ]| This raises the volume of the 3rd band which is at 119 Hz by 3 db.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-equalizer-3bands
+ * @title: equalizer-3bands
*
* The 3-band equalizer element allows to change the gain of a low frequency,
* medium frequency and high frequency band.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=song.ogg ! oggdemux ! vorbisdec ! audioconvert ! equalizer-3bands band1=6.0 ! alsasink
* ]| This raises the volume of the 2nd band, which is at 1110 Hz, by 6 db.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-equalizer-nbands
+ * @title: equalizer-nbands
*
* The n-band equalizer element is a fully parametric equalizer. It allows to
* select between 1 and 64 bands and has properties on each band to change
* the center frequency, band width and gain.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=song.ogg ! oggdemux ! vorbisdec ! audioconvert ! equalizer-nbands num-bands=15 band5::gain=6.0 ! alsasink
* ]| This make the equalizer use 15 bands and raises the volume of the 5th band by 6 db.
- * </refsect2>
- * <refsect2>
- * <title>Example code</title>
+ *
+ * ## Example code
* |[
* #include <gst/gst.h>
- *
+ *
* ...
* typedef struct {
* gfloat freq;
* gfloat width;
* gfloat gain;
* } GstEqualizerBandState;
- *
+ *
* ...
- *
+ *
* GstElement *equalizer;
* GObject *band;
* gint i;
* {6000.0, 1000.0, 6.0},
* {3000.0, 120.0, 2.0}
* };
- *
+ *
* ...
- *
+ *
* equalizer = gst_element_factory_make ("equalizer-nbands", "equalizer");
* g_object_set (G_OBJECT (equalizer), "num-bands", 5, NULL);
- *
+ *
* ...
- *
+ *
* for (i = 0; i < 5; i++) {
* band = gst_child_proxy_get_child_by_index (GST_CHILD_PROXY (equalizer), i);
* g_object_set (G_OBJECT (band), "freq", state[i].freq,
* "gain", state[i].gain);
* g_object_unref (G_OBJECT (band));
* }
- *
+ *
* ...
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-flvdemux
+ * @title: flvdemux
*
* flvdemux demuxes an FLV file into the different contained streams.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=/path/to/flv ! flvdemux ! audioconvert ! autoaudiosink
* ]| This pipeline demuxes an FLV file and outputs the contained raw audio streams.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-flvmux
+ * @title: flvmux
*
* flvmux muxes different streams into an FLV file.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v flvmux name=mux ! filesink location=test.flv audiotestsrc samplesperbuffer=44100 num-buffers=10 ! faac ! mux. videotestsrc num-buffers=250 ! video/x-raw,framerate=25/1 ! x264enc ! mux.
* ]| This pipeline encodes a test audio and video stream and muxes both into an FLV file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:gstindex
+ * @title: GstIndex
* @short_description: Generate indexes on objects
* @see_also: #GstIndexFactory
*
* to a string. That string will be used to register or look up an id
* in the index.
*
- * <note>
- * The caller must not hold @writer's #GST_OBJECT_LOCK, as the default
- * resolver may call functions that take the object lock as well, and
- * the lock is not recursive.
- * </note>
+ * > The caller must not hold @writer's #GST_OBJECT_LOCK, as the default
+ * > resolver may call functions that take the object lock as well, and
+ * > the lock is not recursive.
*
* Returns: TRUE if the writer would be mapped to an id.
*/
*/
/**
* SECTION:element-flxdec
+ * @title: flxdec
*
* This element decodes fli/flc/flx-video into raw video
*/
int mustInitBuffers;
int interlace_start;
- /** modif by jeko : fixedpoint : buffration = (16:16) (donc 0<=buffration<=2^16) */
+ /* modif by jeko : fixedpoint : buffration = (16:16) (donc 0<=buffration<=2^16) */
int buffratio;
int *firedec;
- /** modif d'optim by Jeko : precalcul des 4 coefs resultant des 2 pos */
+ /* modif d'optim by Jeko : precalcul des 4 coefs resultant des 2 pos */
int precalCoef[BUFFPOINTNB][BUFFPOINTNB];
- /** calculatePXandPY statics */
+ /* calculatePXandPY statics */
int wave;
int wavesp;
}
}
-/** generate the water fx horizontal direction buffer */
+/* generate the water fx horizontal direction buffer */
static void
generateTheWaterFXHorizontalDirectionBuffer (PluginInfo * goomInfo,
ZoomFilterFXWrapperData * data)
-/**
-* Main work for the dynamic displacement map.
+/*
+ * Main work for the dynamic displacement map.
*
* Reads data from pix1, write to pix2.
*
* Useful datas for this FX are stored in ZoomFilterData.
- *
* If you think that this is a strange function name, let me say that a long time ago,
* there has been a slow version and a gray-level only one. Then came these function,
* fast and workin in RGB colorspace ! nice but it only was applying a zoom to the image.
if (!BVAL (data->enabled_bp))
return;
- /** changement de taille **/
+ /* changement de taille */
if ((data->prevX != resx) || (data->prevY != resy)) {
data->prevX = resx;
data->prevY = resy;
if (data->interlace_start != -2)
zf = NULL;
- /** changement de config **/
+ /* changement de config */
if (zf) {
data->reverse = zf->reverse;
data->general_speed = (float) (zf->vitesse - 128) / 128.0f;
data->hPlaneEffect = 0;
data->noisify = 2;
- /** modif by jeko : fixedpoint : buffration = (16:16) (donc 0<=buffration<=2^16) */
+ /* modif by jeko : fixedpoint : buffration = (16:16) (donc 0<=buffration<=2^16) */
data->buffratio = 0;
data->firedec = 0;
_this->params = &data->params;
_this->fx_data = (void *) data;
- /** modif d'optim by Jeko : precalcul des 4 coefs resultant des 2 pos */
+ /* modif d'optim by Jeko : precalcul des 4 coefs resultant des 2 pos */
generatePrecalCoef (data->precalCoef);
}
#if 1
/* ndef COLOR_BGRA */
-/** position des composantes **/
+/* position des composantes */
#define BLEU 0
#define VERT 1
#define ROUGE 2
int middleX, middleY; /* milieu de l'effet */
char reverse; /* inverse la vitesse */
char mode; /* type d'effet � appliquer (cf les #define) */
- /** @since June 2001 */
+ /* @since June 2001 */
int hPlaneEffect; /* deviation horitontale */
int vPlaneEffect; /* deviation verticale */
- /** @since April 2002 */
+ /* @since April 2002 */
int waveEffect; /* applique une "surcouche" de wave effect */
int hypercosEffect; /* applique une "surcouche de hypercos effect */
#define STATES_MAX_NB 128
-/**
+/*
* Gives informations about the sound.
*/
struct _SOUND_INFO {
};
-/**
+/*
* Allows FXs to know the current state of the plugin.
*/
struct _PLUGIN_INFO {
int nbVisuals;
VisualFX **visuals; /* pointers on all the visual fx */
- /** The known FX */
+ /* The known FX */
VisualFX convolve_fx;
VisualFX star_fx;
VisualFX zoomFilter_fx;
VisualFX tentacles_fx;
VisualFX ifs_fx;
- /** image buffers */
+ /* image buffers */
guint32 *pixel;
guint32 *back;
Pixel *p1, *p2;
Pixel *conv;
Pixel *outputBuf;
- /** state of goom */
+ /* state of goom */
guint32 cycle;
GoomState states[STATES_MAX_NB];
int statesNumber;
GoomState *curGState;
- /** effet de ligne.. */
+ /* effet de ligne.. */
GMLine *gmline1;
GMLine *gmline2;
- /** sinus table */
+ /* sinus table */
int sintable[0x10000];
/* INTERNALS */
- /** goom_update internals.
+ /* goom_update internals.
* I took all static variables from goom_update and put them here.. for the moment.
*/
struct {
/**
* SECTION:element-goom
+ * @title: goom
* @see_also: synaesthesia
*
* Goom is an audio visualisation element. It creates warping structures
* based on the incoming audio signal.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v audiotestsrc ! goom ! videoconvert ! xvimagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
| (col[ROUGE] << (ROUGE * 8));
}
-/** VISUAL_FX WRAPPER FOR IFS */
+/* VISUAL_FX WRAPPER FOR IFS */
static void
ifs_vfx_apply (VisualFX * _this, Pixel * src, Pixel * dest,
#include "goom_plugin_info.h"
#include "goom_config.h"
-/** change les donnees du SoundInfo */
+/* change les donnees du SoundInfo */
void evaluate_sound(gint16 data[2][512], SoundInfo *sndInfo);
#endif
int middleY;
char reverse;
char mode;
- /** @since June 2001 */
+ /* @since June 2001 */
int hPlaneEffect;
int vPlaneEffect;
char noisify;
typedef struct
{
-/**-----------------------------------------------------**
- ** SHARED DATA **
- **-----------------------------------------------------**/
+/*-----------------------------------------------------*
+ * SHARED DATA *
+ *-----------------------------------------------------*/
guint32 *pixel;
guint32 *back;
guint32 *p1, *p2;
/**
* SECTION:element-goom2k1
+ * @title: goom2k1
* @see_also: goom, synaesthesia
*
* Goom2k1 is an audio visualisation element. It creates warping structures
* based on the incoming audio signal. Goom2k1 is the older version of the
* visualisation. Also available is goom2k4, with a different look.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v audiotestsrc ! goom2k1 ! videoconvert ! xvimagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-icydemux
+ * @title: icydemux
*
* icydemux accepts data streams with ICY metadata at known intervals, as
* transmitted from an upstream element (usually read as response headers from
* an HTTP stream). The mime type of the data between the tag blocks is
* detected using typefind functions, and the appropriate output mime type set
- * on outgoing buffers.
+ * on outgoing buffers.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 souphttpsrc location=http://some.server/ iradio-mode=true ! icydemux ! fakesink -t
* ]| This pipeline should read any available ICY tag information and output it.
* The contents of the stream should be detected, and the appropriate mime
* type set on buffers produced from icydemux. (Using gnomevfssrc, neonhttpsrc
* or giosrc instead of souphttpsrc should also work.)
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
/**
* SECTION:element-id3demux
+ * @title: id3demux
*
* id3demux accepts data streams with either (or both) ID3v2 regions at the
* start, or ID3v1 at the end. The mime type of the data between the tag blocks
* is detected using typefind functions, and the appropriate output mime type
- * set on outgoing buffers.
+ * set on outgoing buffers.
*
* The element is only able to read ID3v1 tags from a seekable stream, because
* they are at the end of the stream. That is, when get_range mode is supported
* This id3demux element replaced an older element with the same name which
* relied on libid3tag from the MAD project.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=file.mp3 ! id3demux ! fakesink -t
* ]| This pipeline should read any available ID3 tag information and output it.
* The contents of the file inside the ID3 tag regions should be detected, and
* the appropriate mime type set on buffers produced from id3demux.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
/**
* SECTION:element-imagefreeze
+ * @title: imagefreeze
*
* The imagefreeze element generates a still frame video stream from
* the input. It duplicates the first frame with the framerate requested
* by downstream, allows seeking and answers queries.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=some.png ! decodebin ! imagefreeze ! autovideosink
* ]| This pipeline shows a still frame stream of a PNG file.
- * </refsect2>
+ *
*/
/* This is based on the imagefreeze element from PiTiVi:
/**
* SECTION:element-deinterleave
+ * @title: deinterleave
* @see_also: interleave
*
* Splits one interleaved multichannel audio stream into many mono audio streams.
- *
+ *
* This element handles all raw audio formats and supports changing the input caps as long as
* all downstream elements can handle the new caps and the number of channels and the channel
* positions stay the same. This restriction will be removed in later versions by adding or
* removing some source pads as required.
- *
+ *
* In most cases a queue and an audioconvert element should be added after each source pad
* before further processing of the audio data.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=/path/to/file.mp3 ! decodebin ! audioconvert ! "audio/x-raw,channels=2 ! deinterleave name=d d.src_0 ! queue ! audioconvert ! vorbisenc ! oggmux ! filesink location=channel1.ogg d.src_1 ! queue ! audioconvert ! vorbisenc ! oggmux ! filesink location=channel2.ogg
* ]| Decodes an MP3 file and encodes the left and right channel into separate
* ]| Decodes and deinterleaves a Stereo MP3 file into separate channels and
* then interleaves the channels again to a WAV file with the channel with the
* channels exchanged.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* GstDeinterleave:keep-positions
- *
+ *
* Keep positions: When enable the caps on the output buffers will
* contain the original channel positions. This can be used to correctly
* interleave the output again later but can also lead to unwanted effects
/**
* SECTION:element-interleave
+ * @title: interleave
* @see_also: deinterleave
*
* Merges separate mono inputs into one interleaved stream.
- *
+ *
* This element handles all raw floating point sample formats and all signed integer sample formats. The first
* caps on one of the sinkpads will set the caps of the output so usually an audioconvert element should be
* placed before every sinkpad of interleave.
- *
+ *
* It's possible to change the number of channels while the pipeline is running by adding or removing
* some of the request pads but this will change the caps of the output buffers. Changing the input
* caps is _not_ supported yet.
- *
+ *
* The channel number of every sinkpad in the out can be retrieved from the "channel" property of the pad.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=file.mp3 ! decodebin ! audioconvert ! "audio/x-raw,channels=2" ! deinterleave name=d interleave name=i ! audioconvert ! wavenc ! filesink location=test.wav d.src_0 ! queue ! audioconvert ! i.sink_1 d.src_1 ! queue ! audioconvert ! i.sink_0
* ]| Decodes and deinterleaves a Stereo MP3 file into separate channels and
* channel-masks defined in the sink pads ensures a sane mapping of the mono
* streams into the stereo stream. NOTE: the proper way to map channels in
* code is by using the channel-positions property of the interleave element.
- * </refsect2>
+ *
*/
/* FIXME 0.11: suppress warnings for deprecated API such as GValueArray
/**
* GstInterleave:channel-positions
- *
+ *
* Channel positions: This property controls the channel positions
* that are used on the src caps. The number of elements should be
* the same as the number of sink pads and the array should contain
/**
* GstInterleave:channel-positions-from-input
- *
+ *
* Channel positions from input: If this property is set to %TRUE the channel
* positions will be taken from the input caps if valid channel positions for
* the output can be constructed from them. If this is set to %TRUE setting the
/**
* SECTION:element-qtmoovrecover
+ * @title: qtmoovrecover
* @short_description: Utility element for recovering unfinished quicktime files
*
- * <refsect2>
- * <para>
* This element recovers quicktime files created with qtmux using the moov
* recovery feature.
- * </para>
- * <title>Example pipelines</title>
- * <para>
- * <programlisting>
+ *
+ * ## Example pipelines
+ *
+ * |[
* TODO
- * </programlisting>
- * </para>
- * </refsect2>
+ * ]|
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-mp4mux
+ * @title: mp4mux
* @short_description: Muxer for ISO MPEG-4 (.mp4) files
*
* This element merges streams (audio and video) into ISO MPEG-4 (.mp4) files.
* #GstMp4Mux:streamable allows foregoing to add index metadata (at the end of
* file).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 gst-launch-1.0 v4l2src num-buffers=50 ! queue ! x264enc ! mp4mux ! filesink location=video.mp4
* ]|
* Records a video stream captured from a v4l2 device, encodes it into H.264
* and muxes it into an mp4 file.
- * </refsect2>
+ *
*/
/* ============================= 3gppmux ==================================== */
/**
* SECTION:element-3gppmux
+ * @title: 3gppmux
* @short_description: Muxer for 3GPP (.3gp) files
*
* This element merges streams (audio and video) into 3GPP (.3gp) files.
* #Gst3GPPMux:streamable allows foregoing to add index metadata (at the end of
* file).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 v4l2src num-buffers=50 ! queue ! ffenc_h263 ! 3gppmux ! filesink location=video.3gp
* ]|
* Records a video stream captured from a v4l2 device, encodes it into H.263
* and muxes it into an 3gp file.
- * </refsect2>
*
* Documentation last reviewed on 2011-04-21
*/
/**
* SECTION:element-mj2mux
+ * @title: mj2mux
* @short_description: Muxer for Motion JPEG-2000 (.mj2) files
*
* This element merges streams (audio and video) into MJ2 (.mj2) files.
* #GstMJ2Mux:streamable allows foregoing to add index metadata (at the end of
* file).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 v4l2src num-buffers=50 ! queue ! jp2kenc ! mj2mux ! filesink location=video.mj2
* ]|
* Records a video stream captured from a v4l2 device, encodes it into JPEG-2000
* and muxes it into an mj2 file.
- * </refsect2>
*
* Documentation last reviewed on 2011-04-21
*/
/**
* SECTION:element-ismlmux
+ * @title: ismlmux
* @short_description: Muxer for ISML smooth streaming (.isml) files
*
* This element merges streams (audio and video) into MJ2 (.mj2) files.
* #GstISMLMux:streamable allows foregoing to add index metadata (at the end of
* file).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 v4l2src num-buffers=50 ! queue ! jp2kenc ! mj2mux ! filesink location=video.mj2
* ]|
* Records a video stream captured from a v4l2 device, encodes it into JPEG-2000
* and muxes it into an mj2 file.
- * </refsect2>
*
* Documentation last reviewed on 2011-04-21
*/
/**
* SECTION:element-qtmux
+ * @title: qtmux
* @short_description: Muxer for quicktime(.mov) files
*
* This element merges streams (audio and video) into QuickTime(.mov) files.
* a fixed sample size (such as raw audio and Prores Video) and that don't
* have reordered samples.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 v4l2src num-buffers=500 ! video/x-raw,width=320,height=240 ! videoconvert ! qtmux ! filesink location=video.mov
* ]|
* Records a video stream captured from a v4l2 device and muxes it into a qt file.
- * </refsect2>
+ *
*/
/*
/**
* SECTION:element-qtdemux
+ * @title: qtdemux
*
* Demuxes a .mov file into raw or compressed audio and/or video streams.
*
* This element supports both push and pull-based scheduling, depending on the
* capabilities of the upstream elements.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=test.mov ! qtdemux name=demux demux.audio_0 ! queue ! decodebin ! audioconvert ! audioresample ! autoaudiosink demux.video_0 ! queue ! decodebin ! videoconvert ! videoscale ! autovideosink
* ]| Play (parse and decode) a .mov file and try to output it to
* an automatically detected soundcard and videosink. If the MOV file contains
* compressed audio or video data, this will only work if you have the
* right decoder elements/plugins installed.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-alawdec
+ * @title: alawdec
*
* This element decodes alaw audio. Alaw coding is also known as G.711.
*/
*/
/**
* SECTION:element-alawenc
+ * @title: alawenc
*
* This element encode alaw audio. Alaw coding is also known as G.711.
*/
for (i = 0; i < numsamples; i++) {
sample = in[i];
- /** get the sample into sign-magnitude **/
+ /* get the sample into sign-magnitude */
sign = (sample >> 8) & 0x80; /* set aside the sign */
if (sign != 0) {
sample = -sample; /* get magnitude */
if (((guint16) sample) > CLIP)
sample = CLIP; /* clip the magnitude */
- /** convert from 16 bit linear to ulaw **/
+ /* convert from 16 bit linear to ulaw */
sample = sample + BIAS;
exponent = exp_lut[(sample >> 7) & 0xFF];
mantissa = (sample >> (exponent + 3)) & 0x0F;
*/
/**
* SECTION:element-mulawdec
+ * @title: mulawdec
*
* This element decodes mulaw audio. Mulaw coding is also known as G.711.
*/
*/
/**
* SECTION:element-mulawenc
+ * @title: mulawenc
*
* This element encode mulaw audio. Mulaw coding is also known as G.711.
*/
/**
* SECTION:element-level
+ * @title: level
*
* Level analyses incoming audio buffers and, if the #GstLevel:message property
* is %TRUE, generates an element message named
- * <classname>"level"</classname>:
- * after each interval of time given by the #GstLevel:interval property.
+ * `level`: after each interval of time given by the #GstLevel:interval property.
* The message's structure contains these fields:
- * <itemizedlist>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"timestamp"</classname>:
- * the timestamp of the buffer that triggered the message.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"stream-time"</classname>:
- * the stream time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"running-time"</classname>:
- * the running_time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"duration"</classname>:
- * the duration of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"endtime"</classname>:
- * the end time of the buffer that triggered the message as stream time (this
- * is deprecated, as it can be calculated from stream-time + duration)
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GValueArray of #gdouble
- * <classname>"peak"</classname>:
- * the peak power level in dB for each channel
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GValueArray of #gdouble
- * <classname>"decay"</classname>:
- * the decaying peak power level in dB for each channel
+ *
+ * * #GstClockTime `timestamp`: the timestamp of the buffer that triggered the message.
+ * * #GstClockTime `stream-time`: the stream time of the buffer.
+ * * #GstClockTime `running-time`: the running_time of the buffer.
+ * * #GstClockTime `duration`: the duration of the buffer.
+ * * #GstClockTime `endtime`: the end time of the buffer that triggered the message as
+ * stream time (this is deprecated, as it can be calculated from stream-time + duration)
+ * * #GValueArray of #gdouble `peak`: the peak power level in dB for each channel
+ * * #GValueArray of #gdouble `decay`: the decaying peak power level in dB for each channel
* The decaying peak level follows the peak level, but starts dropping if no
* new peak is reached after the time given by the #GstLevel:peak-ttl.
* When the decaying peak level drops, it does so at the decay rate as
* specified by the #GstLevel:peak-falloff.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GValueArray of #gdouble
- * <classname>"rms"</classname>:
- * the Root Mean Square (or average power) level in dB for each channel
- * </para>
- * </listitem>
- * </itemizedlist>
+ * * #GValueArray of #gdouble `rms`: the Root Mean Square (or average power) level in dB
+ * for each channel
+ *
+ * ## Example application
*
- * <refsect2>
- * <title>Example application</title>
* <informalexample><programlisting language="C">
* <xi:include xmlns:xi="http://www.w3.org/2003/XInclude" parse="text" href="../../../../tests/examples/level/level-example.c" />
* </programlisting></informalexample>
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-matroskademux
+ * @title: matroskademux
*
* matroskademux demuxes a Matroska file into the different contained streams.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=/path/to/mkv ! matroskademux ! vorbisdec ! audioconvert ! audioresample ! autoaudiosink
* ]| This pipeline demuxes a Matroska file and outputs the contained Vorbis audio.
- * </refsect2>
+ *
*/
/**
* SECTION:element-matroskamux
+ * @title: matroskamux
*
* matroskamux muxes different input streams into a Matroska file.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=/path/to/mp3 ! mpegaudioparse ! matroskamux name=mux ! filesink location=test.mkv filesrc location=/path/to/theora.ogg ! oggdemux ! theoraparse ! mux.
* ]| This pipeline muxes an MP3 file and a Ogg Theora video into a Matroska file.
* |[
* gst-launch-1.0 -v audiotestsrc num-buffers=100 ! audioconvert ! vorbisenc ! matroskamux ! filesink location=test.mka
* ]| This pipeline muxes a 440Hz sine wave encoded with the Vorbis codec into a Matroska file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-matroskaparse
+ * @title: matroskaparse
*
* matroskaparse parsees a Matroska file into the different contained streams.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v filesrc location=/path/to/mkv ! matroskaparse ! vorbisdec ! audioconvert ! audioresample ! autoaudiosink
* ]| This pipeline parsees a Matroska file and outputs the contained Vorbis audio.
- * </refsect2>
+ *
*/
/**
* SECTION:element-webmmux
+ * @title: webmmux
*
* webmmux muxes VP8 video and Vorbis audio streams into a WebM file.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 webmmux name=mux ! filesink location=newfile.webm \
* uridecodebin uri=file:///path/to/somefile.ogv name=demux \
* videotestsrc num-buffers=250 ! video/x-raw,framerate=25/1 ! videoconvert ! vp8enc ! queue ! mux.video_0 \
* audiotestsrc samplesperbuffer=44100 num-buffers=10 ! audio/x-raw,rate=44100 ! vorbisenc ! queue ! mux.audio_0
* ]| This pipeline muxes a test video and a sine wave into a WebM file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-monoscope
+ * @title: monoscope
* @see_also: goom
*
* Monoscope is an audio visualisation element. It creates a coloured
* curve of the audio signal like on an oscilloscope.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v audiotestsrc ! audioconvert ! monoscope ! videoconvert ! ximagesink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-multifilesink
+ * @title: multifilesink
* @see_also: #GstFileSrc
*
* Write incoming data to a series of sequentially-named files.
* be substituted with the index for each filename.
*
* If the #GstMultiFileSink:post-messages property is %TRUE, it sends an application
- * message named
- * <classname>"GstMultiFileSink"</classname> after writing each
- * buffer.
+ * message named `GstMultiFileSink` after writing each buffer.
*
* The message's structure contains these fields:
- * <itemizedlist>
- * <listitem>
- * <para>
- * #gchar *
- * <classname>"filename"</classname>:
- * the filename where the buffer was written.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #gint
- * <classname>"index"</classname>:
- * the index of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"timestamp"</classname>:
- * the timestamp of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"stream-time"</classname>:
- * the stream time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"running-time"</classname>:
- * the running_time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"duration"</classname>:
- * the duration of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"offset"</classname>:
- * the offset of the buffer that triggered the message.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"offset-end"</classname>:
- * the offset-end of the buffer that triggered the message.
- * </para>
- * </listitem>
- * </itemizedlist>
*
- * <refsect2>
- * <title>Example launch line</title>
+ * * #gchar *`filename`: the filename where the buffer was written.
+ * * #gint `index`: index of the buffer.
+ * * #GstClockTime `timestamp`: the timestamp of the buffer.
+ * * #GstClockTime `stream-time`: the stream time of the buffer.
+ * * #GstClockTime running-time`: the running_time of the buffer.
+ * * #GstClockTime `duration`: the duration of the buffer.
+ * * #guint64 `offset`: the offset of the buffer that triggered the message.
+ * * #guint64 `offset-end`: the offset-end of the buffer that triggered the message.
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc ! multifilesink
* gst-launch-1.0 videotestsrc ! multifilesink post-messages=true location="frame%d"
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-multifilesrc
+ * @title: multifilesrc
* @see_also: #GstFileSrc
*
* Reads buffers from sequentially named files. If used together with an image
*
* File names are created by replacing "\%d" with the index using printf().
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 multifilesrc location="img.%04d.png" index=0 caps="image/png,framerate=\(fraction\)12/1" ! \
* pngdec ! videoconvert ! videorate ! theoraenc ! oggmux ! \
* filesink location="images.ogg"
* ]| This pipeline creates a video file "images.ogg" by joining multiple PNG
* files named img.0000.png, img.0001.png, etc.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-splitfilesrc
+ * @title: splitfilesrc
* @see_also: #GstFileSrc, #GstMultiFileSrc
*
* Reads data from multiple files, presenting those files as one continuous
* (and expects) shell-style wildcards (but only for the filename, not for
* directories). The results will be sorted.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 splitfilesrc location="/path/to/part-*.mpg" ! decodebin ! ...
* ]| Plays the different parts as if they were one single MPEG file.
* |[
* gst-launch-1.0 playbin uri="splitfile://path/to/foo.avi.*"
* ]| Plays the different parts as if they were one single AVI file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-splitmuxsink
+ * @title: splitmuxsink
* @short_description: Muxer wrapper for splitting output stream by size or time
*
* This element wraps a muxer and a sink, and starts a new file when the mux
* muxer-factory and sink-factory properties are used to construct the new
* objects, together with muxer-properties and sink-properties.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -e v4l2src num-buffers=500 ! video/x-raw,width=320,height=240 ! videoconvert ! queue ! timeoverlay ! x264enc key-int-max=10 ! h264parse ! splitmuxsink location=video%02d.mov max-size-time=10000000000 max-size-bytes=1000000
* ]|
* Records a video stream captured from a v4l2 device and muxer it into
* streamable Matroska files, splitting as needed to limit size/duration to 10
* seconds. Each file will finalize asynchronously.
- * </refsect2>
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-splitmuxsrc
+ * @title: splitmuxsrc
* @short_description: Split Demuxer bin that recombines files created by
* the splitmuxsink element.
*
* streams in each file part at the demuxed elementary level, rather than
* as a single larger bytestream.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 splitmuxsrc location=video*.mov ! decodebin ! xvimagesink
* ]| Demux each file part and output the video stream as one continuous stream
* |[
* gst-launch-1.0 playbin uri="splitmux://path/to/foo.mp4.*"
* ]| Play back a set of files created by splitmuxsink
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-multipartdemux
+ * @title: multipartdemux
* @see_also: #GstMultipartMux
*
- * MultipartDemux uses the Content-type field of incoming buffers to demux and
- * push data to dynamic source pads. Most of the time multipart streams are
+ * MultipartDemux uses the Content-type field of incoming buffers to demux and
+ * push data to dynamic source pads. Most of the time multipart streams are
* sequential JPEG frames generated from a live source such as a network source
* or a camera.
*
* be configured specifically with the #GstMultipartDemux:boundary property
* otherwise it will be autodetected.
*
- * <refsect2>
- * <title>Sample pipelines</title>
+ * ## Sample pipelines
* |[
* gst-launch-1.0 filesrc location=/tmp/test.multipart ! multipartdemux ! image/jpeg,framerate=\(fraction\)5/1 ! jpegparse ! jpegdec ! videoconvert ! autovideosink
* ]| a simple pipeline to demux a multipart file muxed with #GstMultipartMux
* containing JPEG frames.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-multipartmux
+ * @title: multipartmux
*
* MultipartMux uses the #GstCaps of the sink pad as the Content-type field for
- * incoming buffers when muxing them to a multipart stream. Most of the time
+ * incoming buffers when muxing them to a multipart stream. Most of the time
* multipart streams are sequential JPEG frames.
*
- * <refsect2>
- * <title>Sample pipelines</title>
+ * ## Sample pipelines
* |[
* gst-launch-1.0 videotestsrc ! video/x-raw, framerate='(fraction)'5/1 ! jpegenc ! multipartmux ! filesink location=/tmp/test.multipart
* ]| a pipeline to mux 5 JPEG frames per second into a multipart stream
* stored to a file.
- * </refsect2>
+ *
*/
/* FIXME: drop/merge tag events, or at least send them delayed after stream-start */
/**
* SECTION:element-rganalysis
+ * @title: rganalysis
* @see_also: #GstRgVolume
*
* This element analyzes raw audio sample data in accordance with the proposed
* posted on the message bus with a tag message. The EOS event is forwarded as
* normal afterwards. Result tag lists at least contain the tags
* #GST_TAG_TRACK_GAIN, #GST_TAG_TRACK_PEAK and #GST_TAG_REFERENCE_LEVEL.
- *
+ *
* Because the generated metadata tags become available at the end of streams,
* downstream muxer and encoder elements are normally unable to save them in
* their output since they generally save metadata in the file header.
* needed for album processing (see #GstRgAnalysis:num-tracks property) since
* the album gain and peak values need to be associated with all tracks of an
* album, not just the last one.
- *
- * <refsect2>
- * <title>Example launch lines</title>
+ *
+ * ## Example launch lines
* |[
* gst-launch-1.0 -t audiotestsrc wave=sine num-buffers=512 ! rganalysis ! fakesink
* ]| Analyze a simple test waveform
* gst-launch-1.0 -t gnomevfssrc location=http://replaygain.hydrogenaudio.org/ref_pink.wav \
* ! wavparse ! rganalysis ! fakesink
* ]| Analyze the pink noise reference file
- * <para>
+ *
* The above launch line yields a result gain of +6 dB (instead of the expected
* +0 dB). This is not in error, refer to the #GstRgAnalysis:reference-level
* property documentation for more information.
- * </para>
- * </refsect2>
- * <refsect2>
- * <title>Acknowledgements</title>
- * <para>
+ *
+ * ## Acknowledgements
+ *
* This element is based on code used in the <ulink
* url="http://sjeng.org/vorbisgain.html">vorbisgain</ulink> program and many
* others. The relevant parts are copyrighted by David Robinson, Glen Sawyer
* and Frank Klemm.
- * </para>
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
* GstRgAnalysis:num-tracks:
*
* Number of remaining album tracks.
- *
+ *
* Analyzing several streams sequentially and assigning them a common result
* gain is known as "album processing". If this gain is used during playback
* (by switching to "album mode"), all tracks of an album receive the same
/**
* SECTION:element-rglimiter
+ * @title: rglimiter
* @see_also: #GstRgVolume
*
* This element applies signal compression/limiting to raw audio data. It
* performs strict hard limiting with soft-knee characteristics, using a
* threshold of -6 dB. This type of filter is mentioned in the proposed <ulink
* url="http://replaygain.org">ReplayGain standard</ulink>.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=filename.ext ! decodebin ! audioconvert \
* ! rgvolume pre-amp=6.0 headroom=10.0 ! rglimiter \
* ! audioconvert ! audioresample ! alsasink
* ]|Playback of a file
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rgvolume
+ * @title: rgvolume
* @see_also: #GstRgLimiter, #GstRgAnalysis
*
* This element applies volume changes to streams as lined out in the proposed
* <ulink url="http://replaygain.org">ReplayGain standard</ulink>. It
* interprets the ReplayGain meta data tags and carries out the adjustment (by
* using a volume element internally). The relevant tags are:
- * <itemizedlist>
- * <listitem>#GST_TAG_TRACK_GAIN</listitem>
- * <listitem>#GST_TAG_TRACK_PEAK</listitem>
- * <listitem>#GST_TAG_ALBUM_GAIN</listitem>
- * <listitem>#GST_TAG_ALBUM_PEAK</listitem>
- * <listitem>#GST_TAG_REFERENCE_LEVEL</listitem>
- * </itemizedlist>
+ *
+ * * #GST_TAG_TRACK_GAIN
+ * * #GST_TAG_TRACK_PEAK
+ * * #GST_TAG_ALBUM_GAIN
+ * * #GST_TAG_ALBUM_PEAK
+ * * #GST_TAG_REFERENCE_LEVEL
+ *
* The information carried by these tags must have been calculated beforehand by
* performing the ReplayGain analysis. This is implemented by the <link
* linkend="GstRgAnalysis">rganalysis</link> element.
- *
+ *
* The signal compression/limiting recommendations outlined in the proposed
* standard are not implemented by this element. This has to be handled by
* separate elements because applications might want to have additional filters
* between the volume adjustment and the limiting stage. A basic limiter is
* included with this plugin: The <link linkend="GstRgLimiter">rglimiter</link>
* element applies -6 dB hard limiting as mentioned in the ReplayGain standard.
- *
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=filename.ext ! decodebin ! audioconvert \
* ! rgvolume ! audioconvert ! audioresample ! alsasink
* ]| Playback of a file
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
* presence of ReplayGain tags in the stream, this is set according to one of
* these simple formulas:
*
- * <itemizedlist>
- * <listitem>#GstRgVolume:pre-amp + album gain of the stream</listitem>
- * <listitem>#GstRgVolume:pre-amp + track gain of the stream</listitem>
- * <listitem>#GstRgVolume:pre-amp + #GstRgVolume:fallback-gain</listitem>
- * </itemizedlist>
+ *
+ * * #GstRgVolume:pre-amp + album gain of the stream
+ * * #GstRgVolume:pre-amp + track gain of the stream
+ * * #GstRgVolume:pre-amp + #GstRgVolume:fallback-gain
+ *
*/
g_object_class_install_property (gobject_class, PROP_TARGET_GAIN,
g_param_spec_double ("target-gain", "Target-gain",
/**
* SECTION:element-rtpL16depay
+ * @title: rtpL16depay
* @see_also: rtpL16pay
*
* Extract raw audio from RTP packets according to RFC 3551.
* For detailed information see: http://www.rfc-editor.org/rfc/rfc3551.txt
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 udpsrc caps='application/x-rtp, media=(string)audio, clock-rate=(int)44100, encoding-name=(string)L16, encoding-params=(string)1, channels=(int)1, payload=(int)96' ! rtpL16depay ! pulsesink
* ]| This example pipeline will depayload an RTP raw audio stream. Refer to
* the rtpL16pay example to create the RTP stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpL16pay
+ * @title: rtpL16pay
* @see_also: rtpL16depay
*
* Payload raw audio into RTP packets according to RFC 3551.
* For detailed information see: http://www.rfc-editor.org/rfc/rfc3551.txt
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v audiotestsrc ! audioconvert ! rtpL16pay ! udpsink
* ]| This example pipeline will payload raw audio. Refer to
* the rtpL16depay example to depayload and play the RTP stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpL24depay
+ * @title: rtpL24depay
* @see_also: rtpL24pay
*
* Extract raw audio from RTP packets according to RFC 3190, section 4.
* For detailed information see: http://www.rfc-editor.org/rfc/rfc3190.txt
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 udpsrc caps='application/x-rtp, media=(string)audio, clock-rate=(int)44100, encoding-name=(string)L24, encoding-params=(string)1, channels=(int)1, payload=(int)96' ! rtpL24depay ! pulsesink
* ]| This example pipeline will depayload an RTP raw audio stream. Refer to
* the rtpL24pay example to create the RTP stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpL24pay
+ * @title: rtpL24pay
* @see_also: rtpL24depay
*
* Payload raw 24-bit audio into RTP packets according to RFC 3190, section 4.
* For detailed information see: http://www.rfc-editor.org/rfc/rfc3190.txt
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v audiotestsrc ! audioconvert ! rtpL24pay ! udpsink
* ]| This example pipeline will payload raw audio. Refer to
* the rtpL24depay example to depayload and play the RTP stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpac3depay
+ * @title: rtpac3depay
* @see_also: rtpac3pay
*
* Extract AC3 audio from RTP packets according to RFC 4184.
* For detailed information see: http://www.rfc-editor.org/rfc/rfc4184.txt
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 udpsrc caps='application/x-rtp, media=(string)audio, clock-rate=(int)44100, encoding-name=(string)AC3, payload=(int)96' ! rtpac3depay ! a52dec ! pulsesink
* ]| This example pipeline will depayload and decode an RTP AC3 stream. Refer to
* the rtpac3pay example to create the RTP stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpac3pay
+ * @title: rtpac3pay
* @see_also: rtpac3depay
*
* Payload AC3 audio into RTP packets according to RFC 4184.
* For detailed information see: http://www.rfc-editor.org/rfc/rfc4184.txt
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v audiotestsrc ! avenc_ac3 ! rtpac3pay ! udpsink
* ]| This example pipeline will encode and payload AC3 stream. Refer to
* the rtpac3depay example to depayload and decode the RTP stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpamrdepay
+ * @title: rtpamrdepay
* @see_also: rtpamrpay
*
* Extract AMR audio from RTP packets according to RFC 3267.
* For detailed information see: http://www.rfc-editor.org/rfc/rfc3267.txt
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 udpsrc caps='application/x-rtp, media=(string)audio, clock-rate=(int)8000, encoding-name=(string)AMR, encoding-params=(string)1, octet-align=(string)1, payload=(int)96' ! rtpamrdepay ! amrnbdec ! pulsesink
* ]| This example pipeline will depayload and decode an RTP AMR stream. Refer to
* the rtpamrpay example to create the RTP stream.
- * </refsect2>
+ *
*/
/*
/**
* SECTION:element-rtpamrpay
+ * @title: rtpamrpay
* @see_also: rtpamrdepay
*
* Payload AMR audio into RTP packets according to RFC 3267.
* For detailed information see: http://www.rfc-editor.org/rfc/rfc3267.txt
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 -v audiotestsrc ! amrnbenc ! rtpamrpay ! udpsink
* ]| This example pipeline will encode and payload an AMR stream. Refer to
* the rtpamrdepay example to depayload and decode the RTP stream.
- * </refsect2>
+ *
*/
/* references:
/**
* SECTION:element-rtpbvdepay
+ * @title: rtpbvdepay
* @see_also: rtpbvpay
*
* Extract BroadcomVoice audio from RTP packets according to RFC 4298.
/**
* SECTION:element-rtpbvpay
+ * @title: rtpbvpay
* @see_also: rtpbvdepay
*
* Payload BroadcomVoice audio into RTP packets according to RFC 4298.
/**
* SECTION:element-rtph261depay
+ * @title: rtph261depay
* @see_also: rtph261pay
*
* Extract encoded H.261 video frames from RTP packets according to RFC 4587.
* aggregates the extracted stream until a complete frame is received before
* it pushes it downstream.
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 udpsrc caps='application/x-rtp, payload=31' ! rtph261depay ! avdec_h261 ! autovideosink
* ]| This example pipeline will depayload and decode an RTP H.261 video stream.
* Refer to the rtph261pay example to create the RTP stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtph261pay
+ * @title: rtph261pay
* @see_also: rtph261depay
*
* Payload encoded H.261 video frames into RTP packets according to RFC 4587.
* encoder does not produce a continuous bit-stream but the decoder requires
* it.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! avenc_h261 ! rtph261pay ! udpsink
* ]| This will encode a test video and payload it. Refer to the rtph261depay
* example to depayload and play the RTP stream.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
GST_STATIC_CAPS ("application/x-rtp, "
"media = (string) \"video\", "
"clock-rate = (int) 90000, " "encoding-name = (string) \"H264\"")
- /** optional parameters **/
+ /* optional parameters */
/* "profile-level-id = (string) ANY, " */
/* "max-mbps = (string) ANY, " */
/* "max-fs = (string) ANY, " */
GST_STATIC_CAPS ("application/x-rtp, "
"media = (string) \"video\", "
"clock-rate = (int) 90000, " "encoding-name = (string) \"H265\"")
- /** optional parameters **/
+ /* optional parameters */
/* "profile-space = (int) [ 0, 3 ], " */
/* "profile-id = (int) [ 0, 31 ], " */
/* "tier-flag = (int) [ 0, 1 ], " */
"media = (string) \"video\", "
"payload = (int) " GST_RTP_PAYLOAD_DYNAMIC_STRING ", "
"clock-rate = (int) 90000, " "encoding-name = (string) \"H265\"")
- /** optional parameters **/
+ /* optional parameters */
/* "profile-space = (int) [ 0, 3 ], " */
/* "profile-id = (int) [ 0, 31 ], " */
/* "tier-flag = (int) [ 0, 1 ], " */
/**
* SECTION:element-rtpj2kdepay
+ * @title: rtpj2kdepay
*
* Depayload an RTP-payloaded JPEG 2000 image into RTP packets according to RFC 5371
* and RFC 5372.
/**
* SECTION:element-rtpj2kpay
+ * @title: rtpj2kpay
*
* Payload encode JPEG 2000 images into RTP packets according to RFC 5371
* and RFC 5372.
* codestream. A "packetization unit" is defined as either a JPEG 2000 main header,
* a JPEG 2000 tile-part header, or a JPEG 2000 packet.
*
- *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpjpegpay
+ * @title: rtpjpegpay
*
* Payload encode JPEG pictures into RTP packets according to RFC 2435.
* For detailed information see: http://www.rfc-editor.org/rfc/rfc2435.txt
/**
* SECTION:element-rtpklvdepay
+ * @title: rtpklvdepay
* @see_also: rtpklvpay
*
* Extract KLV metadata from RTP packets according to RFC 6597.
* For detailed information see: http://tools.ietf.org/html/rfc6597
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 udpsrc caps='application/x-rtp, media=(string)application, clock-rate=(int)90000, encoding-name=(string)SMPTE336M' ! rtpklvdepay ! fakesink dump=true
* ]| This example pipeline will depayload an RTP KLV stream and display
* a hexdump of the KLV data on stdout.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
/**
* SECTION:element-rtpklvpay
+ * @title: rtpklvpay
* @see_also: rtpklvdepay
*
* Payloads KLV metadata into RTP packets according to RFC 6597.
* For detailed information see: http://tools.ietf.org/html/rfc6597
*
- * <refsect2>
- * <title>Example pipeline</title>
+ * ## Example pipeline
* |[
* gst-launch-1.0 filesrc location=video-with-klv.ts ! tsdemux ! rtpklvpay ! udpsink
* ]| This example pipeline will payload an RTP KLV stream extracted from an
* MPEG-TS stream and send it via UDP to an RTP receiver.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
/**
* SECTION:element-rtpstreamdepay
+ * @title: rtpstreamdepay
*
* Implements stream depayloading of RTP and RTCP packets for connection-oriented
* transport protocols according to RFC4571.
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc ! "audio/x-raw,rate=48000" ! vorbisenc ! rtpvorbispay config-interval=1 ! rtpstreampay ! tcpserversink port=5678
* gst-launch-1.0 tcpclientsrc port=5678 host=127.0.0.1 do-timestamp=true ! "application/x-rtp-stream,media=audio,clock-rate=48000,encoding-name=VORBIS" ! rtpstreamdepay ! rtpvorbisdepay ! decodebin ! audioconvert ! audioresample ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpstreampay
+ * @title: rtpstreampay
*
* Implements stream payloading of RTP and RTCP packets for connection-oriented
* transport protocols according to RFC4571.
- * <refsect2>
- * <title>Example launch line</title>
+ *
+ * ## Example launch line
* |[
* gst-launch-1.0 audiotestsrc ! "audio/x-raw,rate=48000" ! vorbisenc ! rtpvorbispay config-interval=1 ! rtpstreampay ! tcpserversink port=5678
* gst-launch-1.0 tcpclientsrc port=5678 host=127.0.0.1 do-timestamp=true ! "application/x-rtp-stream,media=audio,clock-rate=48000,encoding-name=VORBIS" ! rtpstreamdepay ! rtpvorbisdepay ! decodebin ! audioconvert ! audioresample ! autoaudiosink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpbin
+ * @title: rtpbin
* @see_also: rtpjitterbuffer, rtpsession, rtpptdemux, rtpssrcdemux
*
* RTP bin combines the functions of #GstRtpSession, #GstRtpSsrcDemux,
* and 1 or more sink_\%u pads. A session will be made for each sink_\%u pad
* when the corresponding recv_rtp_sink_\%u pad is requested on #GstRtpBin.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 udpsrc port=5000 caps="application/x-rtp, ..." ! .recv_rtp_sink_0 \
* rtpbin ! rtptheoradepay ! theoradec ! xvimagesink
* synchronisation.
* Send RTCP reports for session 0 on port 5005 and RTCP reports for session 1
* on port 5007.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpdtmfmux
+ * @title: rtpdtmfmux
* @see_also: rtpdtmfsrc, dtmfsrc, rtpmux
*
* The RTP "DTMF" Muxer muxes multiple RTP streams into a valid RTP
/**
* SECTION:element-rtpjitterbuffer
+ * @title: rtpjitterbuffer
*
* This element reorders and removes duplicate RTP packets as they are received
* from a network source.
*
* This element will automatically be used inside rtpbin.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 rtspsrc location=rtsp://192.168.1.133:8554/mpeg1or2AudioVideoTest ! rtpjitterbuffer ! rtpmpvdepay ! mpeg2dec ! xvimagesink
* ]| Connect to a streaming server and decode the MPEG video. The jitterbuffer is
* inserted into the pipeline to smooth out network jitter and to reorder the
* out-of-order RTP packets.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
* Various jitterbuffer statistics. This property returns a GstStructure
* with name application/x-rtp-jitterbuffer-stats with the following fields:
*
- * <itemizedlist>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"num-pushed"</classname>:
- * the number of packets pushed out.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"num-lost"</classname>:
- * the number of packets considered lost.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"num-late"</classname>:
- * the number of packets arriving too late.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"num-duplicates"</classname>:
- * the number of duplicate packets.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"rtx-count"</classname>:
- * the number of retransmissions requested.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"rtx-success-count"</classname>:
- * the number of successful retransmissions.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #gdouble
- * <classname>"rtx-per-packet"</classname>:
- * average number of RTX per packet.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"rtx-rtt"</classname>:
- * average round trip time per RTX.
- * </para>
- * </listitem>
- * </itemizedlist>
+ * * #guint64 `num-pushed`: the number of packets pushed out.
+ * * #guint64 `num-lost`: the number of packets considered lost.
+ * * #guint64 `num-late`: the number of packets arriving too late.
+ * * #guint64 `num-duplicates`: the number of duplicate packets.
+ * * #guint64 `rtx-count`: the number of retransmissions requested.
+ * * #guint64 `rtx-success-count`: the number of successful retransmissions.
+ * * #gdouble `rtx-per-packet`: average number of RTX per packet.
+ * * #guint64 `rtx-rtt`: average round trip time per RTX.
*
* Since: 1.4
*/
/**
* SECTION:element-rtpmux
+ * @title: rtpmux
* @see_also: rtpdtmfmux
*
* The rtp muxer takes multiple RTP streams having the same clock-rate and
* muxes into a single stream with a single SSRC.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 rtpmux name=mux ! udpsink host=127.0.0.1 port=8888 \
* alsasrc ! alawenc ! rtppcmapay ! \
* In this example, an audio stream is captured from ALSA and another is
* generated, both are encoded into different payload types and muxed together
* so they can be sent on the same port.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
-/*
+/*
* RTP Demux element
*
* Copyright (C) 2005 Nokia Corporation.
/**
* SECTION:element-rtpptdemux
+ * @title: rtpptdemux
*
* rtpptdemux acts as a demuxer for RTP packets based on the payload type of
* the packets. Its main purpose is to allow an application to easily receive
* and decode an RTP stream with multiple payload types.
- *
+ *
* For each payload type that is detected, a new pad will be created and the
* #GstRtpPtDemux::new-payload-type signal will be emitted. When the payload for
* the RTP stream changes, the #GstRtpPtDemux::payload-type-change signal will be
* emitted.
- *
+ *
* The element will try to set complete and unique application/x-rtp caps
* on the output pads based on the result of the #GstRtpPtDemux::request-pt-map
* signal.
- *
- * <refsect2>
- * <title>Example pipelines</title>
+ *
+ * ## Example pipelines
* |[
* gst-launch-1.0 udpsrc caps="application/x-rtp" ! rtpptdemux ! fakesink
* ]| Takes an RTP stream and send the RTP packets with the first detected
* payload type to fakesink, discarding the other payload types.
- * </refsect2>
+ *
*/
/*
*/
struct _GstRtpPtDemuxPad
{
- GstPad *pad; /**< pointer to the actual pad */
- gint pt; /**< RTP payload-type attached to pad */
+ GstPad *pad; /*< pointer to the actual pad */
+ gint pt; /*< RTP payload-type attached to pad */
gboolean newcaps;
};
struct _GstRtpPtDemux
{
- GstElement parent; /**< parent class */
+ GstElement parent; /*< parent class */
- GstPad *sink; /**< the sink pad */
- guint16 last_pt; /**< pt of the last packet 0xFFFF if none */
- GSList *srcpads; /**< a linked list of GstRtpPtDemuxPad objects */
- GValue ignored_pts; /**< a GstValueArray of payload types that will not have pads created for */
+ GstPad *sink; /*< the sink pad */
+ guint16 last_pt; /*< pt of the last packet 0xFFFF if none */
+ GSList *srcpads; /*< a linked list of GstRtpPtDemuxPad objects */
+ GValue ignored_pts; /*< a GstValueArray of payload types that will not have pads created for */
};
struct _GstRtpPtDemuxClass
/**
* SECTION:element-rtprtxqueue
+ * @title: rtprtxqueue
*
* rtprtxqueue maintains a queue of transmitted RTP packets, up to a
* configurable limit (see #GstRTPRtxQueue::max-size-time,
* See also #GstRtpRtxSend, #GstRtpRtxReceive
*
* # Example pipelines
+ *
* |[
* gst-launch-1.0 rtpbin name=b rtp-profile=avpf \
* audiotestsrc is-live=true ! opusenc ! rtpopuspay pt=96 ! rtprtxqueue ! b.send_rtp_sink_0 \
* b.send_rtp_src_0 ! identity drop-probability=0.01 ! udpsink host="127.0.0.1" port=5000 \
* udpsrc port=5001 ! b.recv_rtcp_sink_0 \
* b.send_rtcp_src_0 ! udpsink host="127.0.0.1" port=5002 sync=false async=false
- * ]| Sender pipeline
+ * ]|
+ * Sender pipeline
+ *
* |[
* gst-launch-1.0 rtpbin name=b rtp-profile=avpf do-retransmission=true \
* udpsrc port=5000 caps="application/x-rtp,media=(string)audio,clock-rate=(int)48000,encoding-name=(string)OPUS,payload=(int)96" ! \
* b. ! rtpopusdepay ! opusdec ! audioconvert ! audioresample ! autoaudiosink \
* udpsrc port=5002 ! b.recv_rtcp_sink_0 \
* b.send_rtcp_src_0 ! udpsink host="127.0.0.1" port=5001 sync=false async=false
- * ]| Receiver pipeline
+ * ]|
+ * Receiver pipeline
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtprtxreceive
+ * @title: rtprtxreceive
* @see_also: rtprtxsend, rtpsession, rtpjitterbuffer
*
* rtprtxreceive listens to the retransmission events from the
* rtpbin instead, with its #GstRtpBin::request-aux-sender and
* #GstRtpBin::request-aux-receiver signals. See #GstRtpBin.
*
- * # Example pipelines
+ * ## Example pipelines
+ *
* |[
* gst-launch-1.0 rtpsession name=rtpsession rtp-profile=avpf \
* audiotestsrc is-live=true ! opusenc ! rtpopuspay pt=96 ! \
* sync=false async=false
* ]| Send audio stream through port 5000 (5001 and 5002 are just the rtcp
* link with the receiver)
+ *
* |[
* gst-launch-1.0 rtpsession name=rtpsession rtp-profile=avpf \
* udpsrc port=5000 caps="application/x-rtp,media=(string)audio,clock-rate=(int)48000,encoding-name=(string)OPUS,payload=(int)96" ! \
* rtpsession.send_rtcp_src ! \
* udpsink host="127.0.0.1" port=5001 sync=false async=false \
* udpsrc port=5002 ! rtpsession.recv_rtcp_sink
- * ]| Receive audio stream from port 5000 (5001 and 5002 are just the rtcp
+ * ]|
+ * Receive audio stream from port 5000 (5001 and 5002 are just the rtcp
* link with the sender)
*
* In this example we can see a simple streaming of an OPUS stream with some
* udpsrc port=5001 ! rtpsession.recv_rtcp_sink \
* rtpsession.send_rtcp_src ! udpsink host="127.0.0.1" port=5002 \
* sync=false async=false
- * ]| Send two audio streams to port 5000.
+ * ]|
+ * Send two audio streams to port 5000.
* |[
* gst-launch-1.0 rtpsession name=rtpsession rtp-profile=avpf \
* udpsrc port=5000 caps="application/x-rtp,media=(string)audio,clock-rate=(int)48000,encoding-name=(string)OPUS,payload=(int)97" ! \
* udpsrc port=5002 ! rtpsession.recv_rtcp_sink \
* rtpsession.send_rtcp_src ! udpsink host="127.0.0.1" port=5001 \
* sync=false async=false
- * ]| Receive two audio streams from port 5000.
+ * ]|
+ * Receive two audio streams from port 5000.
*
* In this example we are streaming two streams of the same type through the
* same port. They, however, are using a different SSRC (ssrc is randomly
/**
* SECTION:element-rtprtxsend
+ * @title: rtprtxsend
*
* See #GstRtpRtxReceive for examples
- *
+ *
* The purpose of the sender RTX object is to keep a history of RTP packets up
* to a configurable limit (max-size-time or max-size-packets). It will listen
* for upstream custom retransmission events (GstRTPRetransmissionRequest) that
/**
* SECTION:element-rtpsession
+ * @title: rtpsession
* @see_also: rtpjitterbuffer, rtpbin, rtpptdemux, rtpssrcdemux
*
* The RTP session manager models participants with unique SSRC in an RTP
* functionality can be activated.
*
* The session manager currently implements RFC 3550 including:
- * <itemizedlist>
- * <listitem>
- * <para>RTP packet validation based on consecutive sequence numbers.</para>
- * </listitem>
- * <listitem>
- * <para>Maintainance of the SSRC participant database.</para>
- * </listitem>
- * <listitem>
- * <para>Keeping per participant statistics based on received RTCP packets.</para>
- * </listitem>
- * <listitem>
- * <para>Scheduling of RR/SR RTCP packets.</para>
- * </listitem>
- * <listitem>
- * <para>Support for multiple sender SSRC.</para>
- * </listitem>
- * </itemizedlist>
+ *
+ * * RTP packet validation based on consecutive sequence numbers.
+ *
+ * * Maintainance of the SSRC participant database.
+ *
+ * * Keeping per participant statistics based on received RTCP packets.
+ *
+ * * Scheduling of RR/SR RTCP packets.
+ *
+ * * Support for multiple sender SSRC.
*
* The rtpsession will not demux packets based on SSRC or payload type, nor will
* it correct for packet reordering and jitter. Use #GstRtpsSrcDemux,
* mapping. One can clear the cached values with the #GstRtpSession::clear-pt-map
* signal.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 udpsrc port=5000 caps="application/x-rtp, ..." ! .recv_rtp_sink rtpsession .recv_rtp_src ! rtptheoradepay ! theoradec ! xvimagesink
* ]| Receive theora RTP packets from port 5000 and send them to the depayloader,
* correctly because the second udpsink will not preroll correctly (no RTCP
* packets are sent in the PAUSED state). Applications should manually set and
* keep (see gst_element_set_locked_state()) the RTCP udpsink to the PLAYING state.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-rtpssrcdemux
+ * @title: rtpssrcdemux
*
* rtpssrcdemux acts as a demuxer for RTP packets based on the SSRC of the
* packets. Its main purpose is to allow an application to easily receive and
* decode an RTP stream with multiple SSRCs.
- *
+ *
* For each SSRC that is detected, a new pad will be created and the
- * #GstRtpSsrcDemux::new-ssrc-pad signal will be emitted.
- *
- * <refsect2>
- * <title>Example pipelines</title>
+ * #GstRtpSsrcDemux::new-ssrc-pad signal will be emitted.
+ *
+ * ## Example pipelines
* |[
* gst-launch-1.0 udpsrc caps="application/x-rtp" ! rtpssrcdemux ! fakesink
* ]| Takes an RTP stream and send the RTP packets with the first detected SSRC
* to fakesink, discarding the other SSRCs.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*
* Get a GValue Array of all sources in the session.
*
- * <example>
- * <title>Getting the #RTPSources of a session
- * <programlisting>
+ * ## Getting the #RTPSources of a session
+ * |[
* {
* GValueArray *arr;
* GValue *val;
* }
* g_value_array_free (arr);
* }
- * </programlisting>
- * </example>
+ * ]|
*/
g_object_class_install_property (gobject_class, PROP_SOURCES,
g_param_spec_boxed ("sources", "Sources",
/**
* SECTION:element-rtpdec
+ * @title: rtpdec
*
* A simple RTP session manager used internally by rtspsrc.
*/
* GstRTPDec::on-new-ssrc:
* @rtpbin: the object which received the signal
* @session: the session
- * @ssrc: the SSRC
+ * @ssrc: the SSRC
*
* Notify of a new SSRC that entered @session.
*/
* GstRTPDec::on-ssrc_collision:
* @rtpbin: the object which received the signal
* @session: the session
- * @ssrc: the SSRC
+ * @ssrc: the SSRC
*
* Notify when we have an SSRC collision
*/
* GstRTPDec::on-ssrc_validated:
* @rtpbin: the object which received the signal
* @session: the session
- * @ssrc: the SSRC
+ * @ssrc: the SSRC
*
* Notify of a new SSRC that became validated.
*/
* GstRTPDec::on-bye-ssrc:
* @rtpbin: the object which received the signal
* @session: the session
- * @ssrc: the SSRC
+ * @ssrc: the SSRC
*
* Notify of an SSRC that became inactive because of a BYE packet.
*/
* GstRTPDec::on-bye-timeout:
* @rtpbin: the object which received the signal
* @session: the session
- * @ssrc: the SSRC
+ * @ssrc: the SSRC
*
* Notify of an SSRC that has timed out because of BYE
*/
* GstRTPDec::on-timeout:
* @rtpbin: the object which received the signal
* @session: the session
- * @ssrc: the SSRC
+ * @ssrc: the SSRC
*
* Notify of an SSRC that has timed out
*/
/**
* SECTION:element-shapewipe
+ * @title: shapewipe
*
* The shapewipe element provides custom transitions on video streams
* based on a grayscale bitmap. The state of the transition can be
* <ulink url="http://cinelerra.org/transitions.php">Cinelerra transition</ulink>
* page.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! video/x-raw,format=AYUV,width=640,height=480 ! shapewipe position=0.5 name=shape ! videomixer name=mixer ! videoconvert ! autovideosink filesrc location=mask.png ! typefind ! decodebin ! videoconvert ! videoscale ! queue ! shape.mask_sink videotestsrc pattern=snow ! video/x-raw,format=AYUV,width=640,height=480 ! queue ! mixer.
* ]| This pipeline adds the transition from mask.png with position 0.5 to an SMPTE test screen and snow.
- * </refsect2>
+ *
*/
/**
* SECTION:element-smpte
+ * @title: smpte
*
* smpte can accept I420 video streams with the same width, height and
* framerate. The two incoming buffers are blended together using an effect
* higher presision will create a mask with smoother gradients in order to avoid
* banding.
*
- * <refsect2>
- * <title>Sample pipelines</title>
+ * ## Sample pipelines
* |[
* gst-launch-1.0 -v videotestsrc pattern=1 ! smpte name=s border=20000 type=234 duration=2000000000 ! videoconvert ! ximagesink videotestsrc ! s.
* ]| A pipeline to demonstrate the smpte transition.
* It shows a pinwheel transition a from a snow videotestsrc to an smpte
* pattern videotestsrc. The transition will take 2 seconds to complete. The
* edges of the transition are smoothed with a 20000 big border.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-smptealpha
+ * @title: smptealpha
*
* smptealpha can accept an I420 or AYUV video stream. An alpha channel is added
* using an effect specific SMPTE mask in the I420 input case. In the AYUV case,
* A higher presision will create a mask with smoother gradients in order to
* avoid banding.
*
- * <refsect2>
- * <title>Sample pipelines</title>
- * <para>
+ * ## Sample pipelines
+ *
* Here is a pipeline to demonstrate the smpte transition :
- * <programlisting>
+ * |[
* gst-launch-1.0 -v videotestsrc ! smptealpha border=20000 type=44
* position=0.5 ! videomixer ! videoconvert ! ximagesink
- * </programlisting>
+ * ]|
* This shows a midway bowtie-h transition a from a videotestsrc to a
* transparent image. The edges of the transition are smoothed with a
* 20000 big border.
- * </para>
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-spectrum
+ * @title: spectrum
*
* The Spectrum element analyzes the frequency spectrum of an audio signal.
* If the #GstSpectrum:post-messages property is %TRUE, it sends analysis results
* as element messages named
- * <classname>"spectrum"</classname> after each interval of time given
+ * `spectrum` after each interval of time given
* by the #GstSpectrum:interval property.
*
* The message's structure contains some combination of these fields:
- * <itemizedlist>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"timestamp"</classname>:
- * the timestamp of the buffer that triggered the message.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"stream-time"</classname>:
- * the stream time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"running-time"</classname>:
- * the running_time of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"duration"</classname>:
- * the duration of the buffer.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstClockTime
- * <classname>"endtime"</classname>:
- * the end time of the buffer that triggered the message as stream time (this
+ *
+ * * #GstClockTime `timestamp`: the timestamp of the buffer that triggered the message.
+ * * #GstClockTime `stream-time`: the stream time of the buffer.
+ * * #GstClockTime `running-time`: the running_time of the buffer.
+ * * #GstClockTime `duration`: the duration of the buffer.
+ * * #GstClockTime `endtime`: the end time of the buffer that triggered the message as stream time (this
* is deprecated, as it can be calculated from stream-time + duration)
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstValueList of #gfloat
- * <classname>"magnitude"</classname>:
- * the level for each frequency band in dB. All values below the value of the
+ * * #GstValueList of #gfloat `magnitude`: the level for each frequency band in dB.
+ * All values below the value of the
* #GstSpectrum:threshold property will be set to the threshold. Only present
* if the #GstSpectrum:message-magnitude property is %TRUE.
- * </para>
- * </listitem>
- * <listitem>
- * <para>
- * #GstValueList of #gfloat
- * <classname>"phase"</classname>:
- * The phase for each frequency band. The value is between -pi and pi. Only
+ * * #GstValueList of #gfloat `phase`: The phase for each frequency band. The value is between -pi and pi. Only
* present if the #GstSpectrum:message-phase property is %TRUE.
- * </para>
- * </listitem>
- * </itemizedlist>
*
* If #GstSpectrum:multi-channel property is set to true. magnitude and phase
* fields will be each a nested #GstValueArray. The first dimension are the
* channels and the second dimension are the values.
*
- * <refsect2>
- * <title>Example application</title>
+ * ## Example application
+ *
* <informalexample><programlisting language="C">
* <xi:include xmlns:xi="http://www.w3.org/2003/XInclude" parse="text" href="../../../../tests/examples/spectrum/spectrum-example.c" />
* </programlisting></informalexample>
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-multiudpsink
+ * @title: multiudpsink
* @see_also: udpsink, multifdsink
*
* multiudpsink is a network sink that sends UDP packets to multiple
*/
/**
* SECTION:element-udpsink
+ * @title: udpsink
* @see_also: udpsrc, multifdsink
*
* udpsink is a network sink that sends UDP packets to the network.
* It can be combined with RTP payloaders to implement RTP streaming.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 -v audiotestsrc ! udpsink
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
/**
* SECTION:element-udpsrc
+ * @title: udpsrc
* @see_also: udpsink, multifdsink
*
* udpsrc is a network source that reads UDP packets from the network.
* type URIs.
*
* If the #GstUDPSrc:timeout property is set to a value bigger than 0, udpsrc
- * will generate an element message named
- * <classname>"GstUDPSrcTimeout"</classname>
+ * will generate an element message named `GstUDPSrcTimeout`
* if no data was received in the given timeout.
+ *
* The message's structure contains one field:
- * <itemizedlist>
- * <listitem>
- * <para>
- * #guint64
- * <classname>"timeout"</classname>: the timeout in microseconds that
- * expired when waiting for data.
- * </para>
- * </listitem>
- * </itemizedlist>
+ *
+ * * #guint64 `timeout`: the timeout in microseconds that expired when waiting for data.
+ *
* The message is typically used to detect that no UDP arrives in the receiver
* because it is blocked by a firewall.
*
* with the #GstUDPSrc:close-socket property, in which case the
* application is responsible for closing the file descriptor.
*
- * <refsect2>
- * <title>Examples</title>
+ * ## Examples
* |[
* gst-launch-1.0 -v udpsrc ! fakesink dump=1
* ]| A pipeline to read from the default port and dump the udp packets.
* |[
* gst-launch-1.0 -v udpsrc port=0 ! fakesink
* ]| read udp packets from a free port.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
*/
/**
* SECTION:element-videobox
+ * @title: videobox
* @see_also: #GstVideoCrop
*
* This plugin crops or enlarges the image. It takes 4 values as input, a
* top, bottom, left and right offset. Positive values will crop that much
* pixels from the respective border of the image, negative values will add
- * that much pixels. When pixels are added, you can specify their color.
+ * that much pixels. When pixels are added, you can specify their color.
* Some predefined colors are usable with an enum property.
- *
+ *
* The plugin is alpha channel aware and will try to negotiate with a format
* that supports alpha channels first. When alpha channel is active two
* other properties, alpha and border_alpha can be used to set the alpha
* values of the inner picture and the border respectively. an alpha value of
* 0.0 means total transparency, 1.0 is opaque.
- *
- * The videobox plugin has many uses such as doing a mosaic of pictures,
+ *
+ * The videobox plugin has many uses such as doing a mosaic of pictures,
* letterboxing video, cutting out pieces of video, picture in picture, etc..
*
* Setting autocrop to true changes the behavior of the plugin so that
* input and output dimensions, the crop values are selected so that the
* smaller frame is effectively centered in the larger frame. This
* involves either cropping or padding.
- *
+ *
* If you use autocrop there is little point in setting the other
* properties manually because they will be overriden if the caps change,
* but nothing stops you from doing so.
- *
+ *
* Sample pipeline:
* |[
* gst-launch-1.0 videotestsrc ! videobox autocrop=true ! \
/**
* SECTION:element-aspectratiocrop
+ * @title: aspectratiocrop
* @see_also: #GstVideoCrop
*
* This element crops video frames to a specified #GstAspectRatioCrop:aspect-ratio.
* If the aspect-ratio is already correct, the element will operate
* in pass-through mode.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! video/x-raw,height=640,width=480 ! aspectratiocrop aspect-ratio=16/9 ! ximagesink
* ]| This pipeline generates a videostream in 4/3 and crops it to 16/9.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-videocrop
+ * @title: videocrop
* @see_also: #GstVideoBox
*
* This element crops video frames, meaning it can remove parts of the
* Note that no special efforts are made to handle chroma-subsampled formats
* in the case of odd-valued cropping and compensate for sub-unit chroma plane
* shifts for such formats in the case where the #GstVideoCrop:left or
- * #GstVideoCrop:top property is set to an odd number. This doesn't matter for
+ * #GstVideoCrop:top property is set to an odd number. This doesn't matter for
* most use cases, but it might matter for yours.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 -v videotestsrc ! videocrop top=42 left=1 right=4 bottom=0 ! ximagesink
* ]|
- * </refsect2>
+ *
*/
/* TODO:
/**
* SECTION:element-gamma
+ * @title: gamma
*
* Performs gamma correction on a video stream.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! gamma gamma=2.0 ! videoconvert ! ximagesink
* ]| This pipeline will make the image "brighter".
* |[
* gst-launch-1.0 videotestsrc ! gamma gamma=0.5 ! videoconvert ! ximagesink
* ]| This pipeline will make the image "darker".
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-videobalance
+ * @title: videobalance
*
* Adjusts brightness, contrast, hue, saturation on a video stream.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! videobalance saturation=0.0 ! videoconvert ! ximagesink
* ]| This pipeline converts the image to black and white by setting the
* saturation to 0.0.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-videoflip
+ * @title: videoflip
*
* Flips and rotates video.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 videotestsrc ! videoflip method=clockwise ! videoconvert ! ximagesink
* ]| This pipeline flips the test image 90 degrees clockwise.
- * </refsect2>
+ *
*/
/**
* SECTION:element-videomixer
+ * @title: videomixer
*
* Videomixer can accept AYUV, ARGB and BGRA video streams. For each of the requested
* sink pads it will compare the incoming geometry and framerate to define the
* biggest incoming video stream and the framerate of the fastest incoming one.
*
* Videomixer will do colorspace conversion.
- *
+ *
* Individual parameters for each input stream can be configured on the
* #GstVideoMixer2Pad.
*
- * <refsect2>
- * <title>Sample pipelines</title>
+ * ## Sample pipelines
* |[
* gst-launch-1.0 \
* videotestsrc pattern=1 ! \
* videomixer name=mix ! videoconvert ! ximagesink \
* videotestsrc ! \
* video/x-raw, framerate=\(fraction\)5/1, width=320, height=240 ! mix.
- * ]| A pipeline to demostrate bgra mixing. (This does not demonstrate alpha blending).
+ * ]| A pipeline to demostrate bgra mixing. (This does not demonstrate alpha blending).
* |[
* gst-launch-1.0 videotestsrc pattern=1 ! \
* video/x-raw,format =I420, framerate=\(fraction\)10/1, width=100, height=100 ! \
* "video/x-raw,format=AYUV,width=800,height=600,framerate=(fraction)10/1" ! \
* timeoverlay ! queue2 ! mixer.
* ]| A pipeline to demonstrate synchronized mixing (the second stream starts after 3 seconds)
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-wavenc
+ * @title: wavenc
*
* Format an audio stream into the wav format.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 cdparanoiasrc mode=continuous ! queue ! audioconvert ! wavenc ! filesink location=cd.wav
* ]| Rip a whole audio CD into a single wav file, with the track table written into a CUE sheet inside the file
* |[
* gst-launch-1.0 cdparanoiasrc track=5 ! queue ! audioconvert ! wavenc ! filesink location=track5.wav
* ]| Rip track 5 of an audio CD into a single wav file containing unencoded raw audio samples.
- * </refsect2>
*
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-wavparse
+ * @title: wavparse
*
* Parse a .wav file into raw or compressed audio.
*
* Wavparse supports both push and pull mode operations, making it possible to
* stream from a network source.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 filesrc location=sine.wav ! wavparse ! audioconvert ! alsasink
* ]| Read a wav file and output to the soundcard using the ALSA element. The
* |[
* gst-launch-1.0 gnomevfssrc location=http://www.example.org/sine.wav ! queue ! wavparse ! audioconvert ! alsasink
* ]| Stream data from a network url.
- * </refsect2>
+ *
*/
/*
*/
/**
* SECTION:element-y4menc
+ * @title: y4menc
*
- * <refsect2>
- * <para>
* Creates a YU4MPEG2 raw video stream as defined by the mjpegtools project.
- * </para>
- * <title>Example launch line</title>
- * <para>
+ *
+ * ## Example launch line
+ *
* (write everything in one line, without the backslash characters)
- * <programlisting>
+ * |[
* gst-launch-1.0 videotestsrc num-buffers=250 \
* ! 'video/x-raw,format=(string)I420,width=320,height=240,framerate=(fraction)25/1' \
* ! y4menc ! filesink location=test.yuv
- * </programlisting>
- * </para>
- * </refsect2>
+ * ]|
*
*/
/**
* SECTION:element-directsoundsink
+ * @title: directsoundsink
*
* This element lets you output sound using the DirectSound API.
*
* your pipeline works under all circumstances (those conversion elements will
* act in passthrough-mode if no conversion is necessary).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v audiotestsrc ! audioconvert ! volume volume=0.1 ! directsoundsink
* ]| will output a sine wave (continuous beep sound) to your sound card (with
* |[
* gst-launch-1.0 -v filesrc location=music.ogg ! decodebin ! audioconvert ! audioresample ! directsoundsink
* ]| will play an Ogg/Vorbis audio file and output it.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-osssink
+ * @title: osssink
*
* This element lets you output sound using the Open Sound System (OSS).
*
* your pipeline works under all circumstances (those conversion elements will
* act in passthrough-mode if no conversion is necessary).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v audiotestsrc ! audioconvert ! volume volume=0.1 ! osssink
* ]| will output a sine wave (continuous beep sound) to your sound card (with
* |[
* gst-launch-1.0 -v filesrc location=music.ogg ! decodebin ! audioconvert ! audioresample ! osssink
* ]| will play an Ogg/Vorbis audio file and output it using the Open Sound System.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-osssrc
+ * @title: osssrc
*
* This element lets you record sound using the Open Sound System (OSS).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v osssrc ! audioconvert ! vorbisenc ! oggmux ! filesink location=mymusic.ogg
* ]| will record sound from your sound card using OSS and encode it to an
* Ogg/Vorbis file (this will only work if your mixer settings are right
* and the right inputs enabled etc.)
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
*/
/**
* SECTION:element-oss4sink
+ * @title: oss4sink
*
* This element lets you output sound using the Open Sound System (OSS)
* version 4.
- *
+ *
* Note that you should almost always use generic audio conversion elements
* like audioconvert and audioresample in front of an audiosink to make sure
* your pipeline works under all circumstances (those conversion elements will
* act in passthrough-mode if no conversion is necessary).
- *
- * <refsect2>
- * <title>Example pipelines</title>
+ *
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v audiotestsrc ! audioconvert ! volume volume=0.1 ! oss4sink
* ]| will output a sine wave (continuous beep sound) to your sound card (with
* gst-launch-1.0 -v filesrc location=music.ogg ! decodebin ! audioconvert ! audioresample ! oss4sink
* ]| will play an Ogg/Vorbis audio file and output it using the Open Sound System
* version 4.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-oss4src
+ * @title: oss4src
*
* This element lets you record sound using the Open Sound System (OSS)
* version 4.
- *
- * <refsect2>
- * <title>Example pipelines</title>
+ *
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v oss4src ! queue ! audioconvert ! vorbisenc ! oggmux ! filesink location=mymusic.ogg
* ]| will record sound from your sound card using OSS4 and encode it to an
* Ogg/Vorbis file (this will only work if your mixer settings are right
* and the right inputs areenabled etc.)
- * </refsect2>
+ *
*/
/* FIXME: make sure we're not doing ioctls from the app thread (e.g. via the
/**
* SECTION:element-osxaudiosink
+ * @title: osxaudiosink
*
* This element renders raw audio samples using the CoreAudio api.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 filesrc location=sine.ogg ! oggdemux ! vorbisdec ! audioconvert ! audioresample ! osxaudiosink
* ]| Play an Ogg/Vorbis file.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-osxaudiosrc
+ * @title: osxaudiosrc
*
* This element captures raw audio samples using the CoreAudio api.
*
- * <refsect2>
- * <title>Example launch line</title>
+ * ## Example launch line
* |[
* gst-launch-1.0 osxaudiosrc ! wavenc ! filesink location=audio.wav
* ]|
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-v4l2radio
+ * @title: v4l2radio
*
* v4l2radio can be used to control radio device
* and to tune it to different radiostations.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 v4l2radio device=/dev/radio0 frequency=101200000
* gst-launch-1.0 alsasrc device=hw:1 ! audioconvert ! audioresample ! alsasink
* ]|
* First pipeline tunes the radio device /dev/radio0 to station 101.2 MHz,
* second pipeline connects digital audio out (hw:1) to default sound card.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-v4l2sink
+ * @title: v4l2sink
*
* v4l2sink can be used to display video to v4l2 devices (screen overlays
* provided by the graphics hardware, tv-out, etc)
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 videotestsrc ! v4l2sink device=/dev/video1
* ]| This pipeline displays a test pattern on /dev/video1
* original video frame geometry so that the box can be drawn to the correct
* position. This also handles borders correctly, limiting coordinates to the
* image area
- * </refsect2>
+ *
*/
/**
* SECTION:element-v4l2src
+ * @title: v4l2src
*
* v4l2src can be used to capture video from v4l2 devices, like webcams and tv
* cards.
*
- * <refsect2>
- * <title>Example launch lines</title>
+ * ## Example launch lines
* |[
* gst-launch-1.0 v4l2src ! xvimagesink
* ]| This pipeline shows the video captured from /dev/video0 tv card and for
* gst-launch-1.0 v4l2src ! jpegdec ! xvimagesink
* ]| This pipeline shows the video captured from a webcam that delivers jpeg
* images.
- * </refsect2>
*
* Since 1.14, the use of libv4l2 has been disabled due to major bugs in the
* emulation layer. To enable usage of this library, set the environment
/**
* SECTION:gsttuner
+ * @title: TunEr.h
* @short_description: Interface for elements providing tuner operations
- *
- * <refsect2>
- * <para>
+ *
* The GstTuner interface is provided by elements that have the ability to
* tune into multiple input signals, for example TV or radio capture cards.
- * </para><para>
+ *
* The interpretation of 'tuning into' an input stream depends on the element
* implementing the interface. For v4lsrc, it might imply selection of an
- * input source and/or frequency to be configured on a TV card. Another
+ * input source and/or frequency to be configured on a TV card. Another
* GstTuner implementation might be to allow selection of an active input pad
* from multiple input pads.
- * </para><para>
+ *
* That said, the GstTuner interface functions are biased toward the
* TV capture scenario.
- * </para><para>
+ *
* The general parameters provided are for configuration are:
- * <itemizedlist>
- * <listitem>Selection of a current #GstTunerChannel. The current channel
- * represents the input source (e.g. Composite, S-Video etc for TV capture).
- * </listitem>
- * <listitem>The #GstTunerNorm for the channel. The norm chooses the
- * interpretation of the incoming signal for the current channel. For example,
- * PAL or NTSC, or more specific variants there-of.
- * </listitem>
- * <listitem>Channel frequency. If the current channel has the ability to tune
- * between multiple frequencies (if it has the GST_TUNER_CHANNEL_FREQUENCY flag)
- * then the frequency can be changed/retrieved via the
- * gst_tuner_set_frequency() and gst_tuner_get_frequency() methods.
- * </listitem>
- * </itemizedlist>
- * </para>
- * <para>
+ *
+ * * Selection of a current #GstTunerChannel. The current channel
+ * represents the input source (e.g. Composite, S-Video etc for TV capture).
+ * * The #GstTunerNorm for the channel. The norm chooses the
+ * interpretation of the incoming signal for the current channel. For example,
+ * PAL or NTSC, or more specific variants there-of.
+ * * Channel frequency. If the current channel has the ability to tune
+ * between multiple frequencies (if it has the GST_TUNER_CHANNEL_FREQUENCY flag)
+ * then the frequency can be changed/retrieved via the
+ * gst_tuner_set_frequency() and gst_tuner_get_frequency() methods.
+ *
* Where applicable, the signal strength can be retrieved and/or monitored
* via a signal.
- * </para>
- * </refsect2>
+ *
*/
/* FIXME 0.11: check if we need to add API for sometimes-supportedness
* checked using GST_TUNER_CHANNEL_HAS_FLAG (), with the proper flag
* being GST_TUNER_CHANNEL_FREQUENCY.
*
- * The frequency is in Hz, with minimum steps indicated by the
+ * The frequency is in Hz, with minimum steps indicated by the
* frequency_multiplicator provided in the #GstTunerChannel. The
* valid range is provided in the min_frequency and max_frequency properties
* of the #GstTunerChannel.
* GST_TUNER_CHANNEL_HAS_FLAG (), and the appropriate flag to check
* for is GST_TUNER_CHANNEL_FREQUENCY.
*
- * The valid range of the signal strength is indicated in the
+ * The valid range of the signal strength is indicated in the
* min_signal and max_signal properties of the #GstTunerChannel.
*
* Returns: Signal strength, or 0 on error.
* gst_tuner_find_norm_by_name:
* @tuner: A #GstTuner instance
* @norm: A string containing the name of a #GstTunerNorm
- *
+ *
* Look up a #GstTunerNorm by name.
*
* Returns: A #GstTunerNorm, or NULL if no norm with the provided name
* gst_tuner_find_channel_by_name:
* @tuner: A #GstTuner instance
* @channel: A string containing the name of a #GstTunerChannel
- *
+ *
* Look up a #GstTunerChannel by name.
*
* Returns: A #GstTunerChannel, or NULL if no channel with the provided name
*
* Called by elements implementing the #GstTuner interface when the
* current norm changes. Fires the #GstTuner::norm-changed signal.
- *
+ *
*/
void
gst_tuner_norm_changed (GstTuner * tuner, GstTunerNorm * norm)
*
* Called by elements implementing the #GstTuner interface when the
* incoming signal strength changes. Fires the #GstTuner::signal-changed
- * signal on the tuner and the #GstTunerChannel::signal-changed signal on
+ * signal on the tuner and the #GstTunerChannel::signal-changed signal on
* the channel.
*/
void
/**
* SECTION:gsttunerchannel
+ * @title: GstTunerChannel
* @short_description: A channel from an element implementing the #GstTuner
* interface.
*
- * <refsect2>
- * <para>The #GstTunerChannel object is provided by an element implementing
+ * The #GstTunerChannel object is provided by an element implementing
* the #GstTuner interface.
- * </para>
- * <para>
+ *
* GstTunerChannel provides a name and flags to determine the type and
* capabilities of the channel. If the GST_TUNER_CHANNEL_FREQUENCY flag is
* set, then the channel also information about the minimum and maximum
* frequency, and range of the reported signal strength.
- * </para>
- * </refsect2>
*/
enum
/**
* SECTION:gsttunernorm
+ * @title: TunErnorm.h
* @short_description: Encapsulates information about the data format(s)
* for a #GstTunerChannel.
*
- * <refsect2>
- * <para>The #GstTunerNorm object is created by an element implementing the
+ * The #GstTunerNorm object is created by an element implementing the
* #GstTuner interface and encapsulates the selection of a capture/output format
* for a selected #GstTunerChannel.
- * </para>
- * </refsect2>
+ *
*/
enum
/**
* SECTION:element-waveformsink
+ * @title: waveformsink
*
* This element lets you output sound using the Windows WaveForm API.
*
* your pipeline works under all circumstances (those conversion elements will
* act in passthrough-mode if no conversion is necessary).
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 -v audiotestsrc ! audioconvert ! volume volume=0.1 ! waveformsink
* ]| will output a sine wave (continuous beep sound) to your sound card (with
* |[
* gst-launch-1.0 -v filesrc location=music.ogg ! decodebin ! audioconvert ! audioresample ! waveformsink
* ]| will play an Ogg/Vorbis audio file and output it.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H
/**
* SECTION:element-ximagesrc
+ * @title: ximagesrc
*
* This element captures your X Display and creates raw RGB video. It uses
* the XDamage extension if available to only capture areas of the screen that
* available to also capture your mouse pointer. By default it will fixate to
* 25 frames per second.
*
- * <refsect2>
- * <title>Example pipelines</title>
+ * ## Example pipelines
* |[
* gst-launch-1.0 ximagesrc ! video/x-raw,framerate=5/1 ! videoconvert ! theoraenc ! oggmux ! filesink location=desktop.ogg
* ]| Encodes your X display to an Ogg theora video at 5 frames per second.
- * </refsect2>
+ *
*/
#ifdef HAVE_CONFIG_H