* Boston, MA 02111-1307, USA.
*/
+/**
+ * SECTION:element-ffmpegcolorspace
+ *
+ * <refsect2>
+ * <title>Example launch line</title>
+ * <para>
+ * <programlisting>
+ * gst-launch -v videotestsrc ! video/x-raw-yuv,format=\(fourcc\)YUY2 ! ffmpegcolorspace ! ximagesink
+ * </programlisting>
+ * </para>
+ * </refsect2>
+ */
+
#ifdef HAVE_CONFIG_H
-#include "config.h"
+# include "config.h"
#endif
-#include <gst/gst.h>
-#include <gst/base/gstbasetransform.h>
-#include <avcodec.h>
-
+#include "gstffmpegcolorspace.h"
#include "gstffmpegcodecmap.h"
-GST_DEBUG_CATEGORY (ffmpegcolorspace_debug);
+GST_DEBUG_CATEGORY_STATIC (ffmpegcolorspace_debug);
#define GST_CAT_DEFAULT ffmpegcolorspace_debug
-#define GST_TYPE_FFMPEGCSP \
- (gst_ffmpegcsp_get_type())
-#define GST_FFMPEGCSP(obj) \
- (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGCSP,GstFFMpegCsp))
-#define GST_FFMPEGCSP_CLASS(klass) \
- (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGCSP,GstFFMpegCsp))
-#define GST_IS_FFMPEGCSP(obj) \
- (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGCSP))
-#define GST_IS_FFMPEGCSP_CLASS(obj) \
- (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGCSP))
-
-typedef struct _GstFFMpegCsp GstFFMpegCsp;
-typedef struct _GstFFMpegCspClass GstFFMpegCspClass;
-
-struct _GstFFMpegCsp
-{
- GstBaseTransform element;
-
- gint width, height;
- gfloat fps;
- enum PixelFormat from_pixfmt, to_pixfmt;
- AVPicture from_frame, to_frame;
- AVPaletteControl *palette;
-};
-
-struct _GstFFMpegCspClass
-{
- GstBaseTransformClass parent_class;
-};
-
/* elementfactory information */
-static GstElementDetails ffmpegcsp_details = {
- "FFMPEG Colorspace converter",
- "Filter/Converter/Video",
- "Converts video from one colorspace to another",
- "Ronald Bultje <rbultje@ronald.bitfreak.net>",
-};
+static const GstElementDetails ffmpegcsp_details =
+GST_ELEMENT_DETAILS ("FFMPEG Colorspace converter",
+ "Filter/Converter/Video",
+ "Converts video from one colorspace to another",
+ "Ronald Bultje <rbultje@ronald.bitfreak.net>");
/* Stereo signals and args */
GstCaps * caps, guint * size);
static GstFlowReturn gst_ffmpegcsp_transform (GstBaseTransform * btrans,
GstBuffer * inbuf, GstBuffer * outbuf);
+#if 0
static GstFlowReturn gst_ffmpegcsp_transform_ip (GstBaseTransform * btrans,
GstBuffer * inbuf);
+#endif
static GstPadTemplate *sinktempl, *srctempl;
static GstElementClass *parent_class = NULL;
/*static guint gst_ffmpegcsp_signals[LAST_SIGNAL] = { 0 }; */
+/* copies the given caps */
static GstCaps *
gst_ffmpegcsp_caps_remove_format_info (GstCaps * caps)
{
int i;
GstStructure *structure;
GstCaps *rgbcaps;
+ GstCaps *graycaps;
caps = gst_caps_copy (caps);
gst_structure_remove_field (structure, "green_mask");
gst_structure_remove_field (structure, "blue_mask");
gst_structure_remove_field (structure, "alpha_mask");
+ gst_structure_remove_field (structure, "palette_data");
}
gst_caps_do_simplify (caps);
gst_structure_set_name (structure, "video/x-raw-rgb");
}
+ graycaps = gst_caps_copy (caps);
+ for (i = 0; i < gst_caps_get_size (graycaps); i++) {
+ structure = gst_caps_get_structure (graycaps, i);
+
+ gst_structure_set_name (structure, "video/x-raw-gray");
+ }
+
+ gst_caps_append (caps, graycaps);
gst_caps_append (caps, rgbcaps);
return caps;
}
+/* The caps can be transformed into any other caps with format info removed.
+ * However, we should prefer passthrough, so if passthrough is possible,
+ * put it first in the list. */
static GstCaps *
gst_ffmpegcsp_transform_caps (GstBaseTransform * btrans,
GstPadDirection direction, GstCaps * caps)
{
GstFFMpegCsp *space;
+ GstCaps *template;
GstCaps *result;
space = GST_FFMPEGCSP (btrans);
- result = gst_ffmpegcsp_caps_remove_format_info (caps);
+ template = gst_ffmpegcsp_codectype_to_caps (CODEC_TYPE_VIDEO, NULL);
+ result = gst_caps_intersect (caps, template);
+ gst_caps_unref (template);
+
+ gst_caps_append (result, gst_ffmpegcsp_caps_remove_format_info (caps));
+
+ GST_DEBUG_OBJECT (btrans, "transformed %" GST_PTR_FORMAT " into %"
+ GST_PTR_FORMAT, caps, result);
return result;
}
GstStructure *structure;
gint in_height, in_width;
gint out_height, out_width;
- gdouble in_framerate, out_framerate;
+ const GValue *in_framerate = NULL;
+ const GValue *out_framerate = NULL;
const GValue *in_par = NULL;
const GValue *out_par = NULL;
AVCodecContext *ctx;
+ gboolean res;
space = GST_FFMPEGCSP (btrans);
/* parse in and output values */
structure = gst_caps_get_structure (incaps, 0);
- gst_structure_get_int (structure, "width", &in_width);
- gst_structure_get_int (structure, "height", &in_height);
- gst_structure_get_double (structure, "framerate", &in_framerate);
+
+ /* we have to have width and height */
+ res = gst_structure_get_int (structure, "width", &in_width);
+ res &= gst_structure_get_int (structure, "height", &in_height);
+ if (!res)
+ goto no_width_height;
+
+ /* and framerate */
+ in_framerate = gst_structure_get_value (structure, "framerate");
+ if (in_framerate == NULL || !GST_VALUE_HOLDS_FRACTION (in_framerate))
+ goto no_framerate;
+
+ /* this is optional */
in_par = gst_structure_get_value (structure, "pixel-aspect-ratio");
structure = gst_caps_get_structure (outcaps, 0);
- gst_structure_get_int (structure, "width", &out_width);
- gst_structure_get_int (structure, "height", &out_height);
- gst_structure_get_double (structure, "framerate", &out_framerate);
+
+ /* we have to have width and height */
+ res = gst_structure_get_int (structure, "width", &out_width);
+ res &= gst_structure_get_int (structure, "height", &out_height);
+ if (!res)
+ goto no_width_height;
+
+ /* and framerate */
+ out_framerate = gst_structure_get_value (structure, "framerate");
+ if (out_framerate == NULL || !GST_VALUE_HOLDS_FRACTION (out_framerate))
+ goto no_framerate;
+
+ /* this is optional */
out_par = gst_structure_get_value (structure, "pixel-aspect-ratio");
+ /* these must match */
if (in_width != out_width || in_height != out_height ||
- in_framerate != out_framerate)
+ gst_value_compare (in_framerate, out_framerate) != GST_VALUE_EQUAL)
goto format_mismatch;
+ /* if present, these must match too */
if (in_par && out_par
&& gst_value_compare (in_par, out_par) != GST_VALUE_EQUAL)
goto format_mismatch;
if (space->palette)
av_free (space->palette);
space->palette = ctx->palctrl;
+ ctx->palctrl = NULL;
/* get to format */
ctx->pix_fmt = PIX_FMT_NB;
return TRUE;
/* ERRORS */
+no_width_height:
+ {
+ GST_DEBUG_OBJECT (space, "did not specify width or height");
+ space->from_pixfmt = PIX_FMT_NB;
+ space->to_pixfmt = PIX_FMT_NB;
+ return FALSE;
+ }
+no_framerate:
+ {
+ GST_DEBUG_OBJECT (space, "did not specify framerate");
+ space->from_pixfmt = PIX_FMT_NB;
+ space->to_pixfmt = PIX_FMT_NB;
+ return FALSE;
+ }
format_mismatch:
{
- GST_DEBUG ("input and output formats do not match");
+ GST_DEBUG_OBJECT (space, "input and output formats do not match");
space->from_pixfmt = PIX_FMT_NB;
space->to_pixfmt = PIX_FMT_NB;
return FALSE;
}
invalid_in_caps:
{
- GST_DEBUG ("could not configure context for input format");
+ GST_DEBUG_OBJECT (space, "could not configure context for input format");
av_free (ctx);
space->from_pixfmt = PIX_FMT_NB;
space->to_pixfmt = PIX_FMT_NB;
}
invalid_out_caps:
{
- GST_DEBUG ("could not configure context for output format");
+ GST_DEBUG_OBJECT (space, "could not configure context for output format");
av_free (ctx);
space->from_pixfmt = PIX_FMT_NB;
space->to_pixfmt = PIX_FMT_NB;
};
ffmpegcsp_type = g_type_register_static (GST_TYPE_BASE_TRANSFORM,
- "GstFFMpegColorspace", &ffmpegcsp_info, 0);
+ "GstFFMpegCsp", &ffmpegcsp_info, 0);
}
return ffmpegcsp_type;
}
static void
+gst_ffmpegcsp_finalize (GObject * obj)
+{
+ GstFFMpegCsp *space = GST_FFMPEGCSP (obj);
+
+ if (space->palette)
+ av_free (space->palette);
+
+ G_OBJECT_CLASS (parent_class)->finalize (obj);
+}
+
+static void
gst_ffmpegcsp_class_init (GstFFMpegCspClass * klass)
{
GObjectClass *gobject_class;
gstelement_class = (GstElementClass *) klass;
gstbasetransform_class = (GstBaseTransformClass *) klass;
- parent_class = g_type_class_ref (GST_TYPE_BASE_TRANSFORM);
+ parent_class = g_type_class_peek_parent (klass);
+
+ gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_ffmpegcsp_finalize);
gstbasetransform_class->transform_caps =
GST_DEBUG_FUNCPTR (gst_ffmpegcsp_transform_caps);
GST_DEBUG_FUNCPTR (gst_ffmpegcsp_get_unit_size);
gstbasetransform_class->transform =
GST_DEBUG_FUNCPTR (gst_ffmpegcsp_transform);
+#if 0
gstbasetransform_class->transform_ip =
GST_DEBUG_FUNCPTR (gst_ffmpegcsp_transform_ip);
+#endif
+
+ gstbasetransform_class->passthrough_on_same_caps = TRUE;
GST_DEBUG_CATEGORY_INIT (ffmpegcolorspace_debug, "ffmpegcolorspace", 0,
"FFMPEG-based colorspace converter");
static void
gst_ffmpegcsp_init (GstFFMpegCsp * space)
{
+ gst_base_transform_set_qos_enabled (GST_BASE_TRANSFORM (space), TRUE);
space->from_pixfmt = space->to_pixfmt = PIX_FMT_NB;
space->palette = NULL;
}
gst_ffmpegcsp_get_unit_size (GstBaseTransform * btrans, GstCaps * caps,
guint * size)
{
- GstFFMpegCsp *space;
+ GstFFMpegCsp *space = NULL;
+ GstStructure *structure = NULL;
+ AVCodecContext *ctx = NULL;
+ gint width, height;
- g_return_val_if_fail (size, FALSE);
+ g_assert (size);
space = GST_FFMPEGCSP (btrans);
- if (gst_caps_is_equal (caps, GST_PAD_CAPS (btrans->srcpad))) {
- *size = avpicture_get_size (space->to_pixfmt, space->width, space->height);
- } else if (gst_caps_is_equal (caps, GST_PAD_CAPS (btrans->sinkpad))) {
- *size =
- avpicture_get_size (space->from_pixfmt, space->width, space->height);
+
+ structure = gst_caps_get_structure (caps, 0);
+ gst_structure_get_int (structure, "width", &width);
+ gst_structure_get_int (structure, "height", &height);
+
+ ctx = avcodec_alloc_context ();
+
+ g_assert (ctx != NULL);
+
+ gst_ffmpegcsp_caps_with_codectype (CODEC_TYPE_VIDEO, caps, ctx);
+
+ *size = avpicture_get_size (ctx->pix_fmt, width, height);
+
+ /* ffmpeg frames have the palette after the frame data, whereas
+ * GStreamer currently puts it into the caps as 'palette_data' field,
+ * so for paletted data the frame size avpicture_get_size() returns is
+ * 1024 bytes larger than what GStreamer expects. */
+ if (gst_structure_has_field (structure, "palette_data")) {
+ *size -= 4 * 256; /* = AVPALETTE_SIZE */
}
+ if (ctx->palctrl)
+ av_free (ctx->palctrl);
+ av_free (ctx);
+
return TRUE;
}
+#if 0
+/* FIXME: Could use transform_ip to implement endianness swap type operations */
static GstFlowReturn
gst_ffmpegcsp_transform_ip (GstBaseTransform * btrans, GstBuffer * inbuf)
{
/* do nothing */
return GST_FLOW_OK;
}
+#endif
static GstFlowReturn
gst_ffmpegcsp_transform (GstBaseTransform * btrans, GstBuffer * inbuf,
GstBuffer * outbuf)
{
GstFFMpegCsp *space;
+ gint result;
space = GST_FFMPEGCSP (btrans);
/* fill optional palette */
if (space->palette)
- space->from_frame.data[1] = (uint8_t *) space->palette;
+ space->from_frame.data[1] = (uint8_t *) space->palette->palette;
/* fill target frame */
gst_ffmpegcsp_avpicture_fill (&space->to_frame,
GST_BUFFER_DATA (outbuf), space->to_pixfmt, space->width, space->height);
/* and convert */
- img_convert (&space->to_frame, space->to_pixfmt,
+ result = img_convert (&space->to_frame, space->to_pixfmt,
&space->from_frame, space->from_pixfmt, space->width, space->height);
+ if (result == -1)
+ goto not_supported;
/* copy timestamps */
gst_buffer_stamp (outbuf, inbuf);
("attempting to convert colorspaces between unknown formats"));
return GST_FLOW_NOT_NEGOTIATED;
}
+not_supported:
+ {
+ GST_ELEMENT_ERROR (space, CORE, NOT_IMPLEMENTED, (NULL),
+ ("cannot convert between formats"));
+ return GST_FLOW_NOT_SUPPORTED;
+ }
}
gboolean