GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS (GST_VIDEO_CAPS_YUV ("I420") "; "
- GST_VIDEO_CAPS_RGB "; " GST_VIDEO_CAPS_BGR "; "
- GST_VIDEO_CAPS_RGBx "; " GST_VIDEO_CAPS_xRGB "; "
- GST_VIDEO_CAPS_BGRx "; " GST_VIDEO_CAPS_xBGR "; "
- GST_VIDEO_CAPS_GRAY8)
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE
+ ("{ I420, RGB, BGR, RGBx, xRGB, BGRx, xBGR, GRAY8 }"))
);
+
/* *INDENT-ON* */
+ /* FIXME: sof-marker is for IJG libjpeg 8, should be different for 6.2 */
static GstStaticPadTemplate gst_jpeg_dec_sink_pad_template =
GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_STATIC_CAPS ("image/jpeg, "
"width = (int) [ " G_STRINGIFY (MIN_WIDTH) ", " G_STRINGIFY (MAX_WIDTH)
" ], " "height = (int) [ " G_STRINGIFY (MIN_HEIGHT) ", "
- G_STRINGIFY (MAX_HEIGHT) " ], " "framerate = (fraction) [ 0/1, MAX ]")
+ G_STRINGIFY (MAX_HEIGHT) " ], framerate = (fraction) [ 0/1, MAX ], "
+ "sof-marker = (int) { 0, 1, 2, 5, 6, 7, 9, 10, 13, 14 }")
);
GST_DEBUG_CATEGORY_STATIC (jpeg_dec_debug);
#define GST_CAT_DEFAULT jpeg_dec_debug
GST_DEBUG_CATEGORY_STATIC (GST_CAT_PERFORMANCE);
-/* These macros are adapted from videotestsrc.c
- * and/or gst-plugins/gst/games/gstvideoimage.c */
-#define I420_Y_ROWSTRIDE(width) (GST_ROUND_UP_4(width))
-#define I420_U_ROWSTRIDE(width) (GST_ROUND_UP_8(width)/2)
-#define I420_V_ROWSTRIDE(width) ((GST_ROUND_UP_8(I420_Y_ROWSTRIDE(width)))/2)
-
-#define I420_Y_OFFSET(w,h) (0)
-#define I420_U_OFFSET(w,h) (I420_Y_OFFSET(w,h)+(I420_Y_ROWSTRIDE(w)*GST_ROUND_UP_2(h)))
-#define I420_V_OFFSET(w,h) (I420_U_OFFSET(w,h)+(I420_U_ROWSTRIDE(w)*GST_ROUND_UP_2(h)/2))
-
-#define I420_SIZE(w,h) (I420_V_OFFSET(w,h)+(I420_V_ROWSTRIDE(w)*GST_ROUND_UP_2(h)/2))
-
-static GstElementClass *parent_class; /* NULL */
-
-static void gst_jpeg_dec_base_init (gpointer g_class);
-static void gst_jpeg_dec_class_init (GstJpegDecClass * klass);
-static void gst_jpeg_dec_init (GstJpegDec * jpegdec);
-
static void gst_jpeg_dec_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec);
static void gst_jpeg_dec_get_property (GObject * object, guint prop_id,
GValue * value, GParamSpec * pspec);
static GstFlowReturn gst_jpeg_dec_chain (GstPad * pad, GstBuffer * buffer);
-static gboolean gst_jpeg_dec_setcaps (GstPad * pad, GstCaps * caps);
-static GstCaps *gst_jpeg_dec_getcaps (GstPad * pad);
+static GstCaps *gst_jpeg_dec_getcaps (GstPad * pad, GstCaps * filter);
static gboolean gst_jpeg_dec_sink_event (GstPad * pad, GstEvent * event);
static gboolean gst_jpeg_dec_src_event (GstPad * pad, GstEvent * event);
static GstStateChangeReturn gst_jpeg_dec_change_state (GstElement * element,
static void gst_jpeg_dec_read_qos (GstJpegDec * dec, gdouble * proportion,
GstClockTime * time);
-GType
-gst_jpeg_dec_get_type (void)
-{
- static GType type = 0;
-
- if (!type) {
- static const GTypeInfo jpeg_dec_info = {
- sizeof (GstJpegDecClass),
- (GBaseInitFunc) gst_jpeg_dec_base_init,
- NULL,
- (GClassInitFunc) gst_jpeg_dec_class_init,
- NULL,
- NULL,
- sizeof (GstJpegDec),
- 0,
- (GInstanceInitFunc) gst_jpeg_dec_init,
- };
-
- type = g_type_register_static (GST_TYPE_ELEMENT, "GstJpegDec",
- &jpeg_dec_info, 0);
- }
- return type;
-}
+#define gst_jpeg_dec_parent_class parent_class
+G_DEFINE_TYPE (GstJpegDec, gst_jpeg_dec, GST_TYPE_ELEMENT);
static void
gst_jpeg_dec_finalize (GObject * object)
}
static void
-gst_jpeg_dec_base_init (gpointer g_class)
-{
- GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
-
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&gst_jpeg_dec_src_pad_template));
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&gst_jpeg_dec_sink_pad_template));
- gst_element_class_set_details_simple (element_class, "JPEG image decoder",
- "Codec/Decoder/Image",
- "Decode images from JPEG format", "Wim Taymans <wim@fluendo.com>");
-}
-
-static void
gst_jpeg_dec_class_init (GstJpegDecClass * klass)
{
GstElementClass *gstelement_class;
-1, G_MAXINT, JPEG_DEFAULT_MAX_ERRORS,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&gst_jpeg_dec_src_pad_template));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&gst_jpeg_dec_sink_pad_template));
+ gst_element_class_set_details_simple (gstelement_class, "JPEG image decoder",
+ "Codec/Decoder/Image",
+ "Decode images from JPEG format", "Wim Taymans <wim@fluendo.com>");
+
gstelement_class->change_state =
GST_DEBUG_FUNCPTR (gst_jpeg_dec_change_state);
gst_pad_new_from_static_template (&gst_jpeg_dec_sink_pad_template,
"sink");
gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad);
- gst_pad_set_setcaps_function (dec->sinkpad,
- GST_DEBUG_FUNCPTR (gst_jpeg_dec_setcaps));
gst_pad_set_getcaps_function (dec->sinkpad,
GST_DEBUG_FUNCPTR (gst_jpeg_dec_getcaps));
gst_pad_set_chain_function (dec->sinkpad,
}
static gboolean
-gst_jpeg_dec_setcaps (GstPad * pad, GstCaps * caps)
+gst_jpeg_dec_setcaps (GstJpegDec * dec, GstCaps * caps)
{
GstStructure *s;
- GstJpegDec *dec;
const GValue *framerate;
- dec = GST_JPEG_DEC (GST_OBJECT_PARENT (pad));
s = gst_caps_get_structure (caps, 0);
if ((framerate = gst_structure_get_value (s, "framerate")) != NULL) {
- dec->framerate_numerator = gst_value_get_fraction_numerator (framerate);
- dec->framerate_denominator = gst_value_get_fraction_denominator (framerate);
+ dec->in_fps_n = gst_value_get_fraction_numerator (framerate);
+ dec->in_fps_d = gst_value_get_fraction_denominator (framerate);
dec->packetized = TRUE;
GST_DEBUG ("got framerate of %d/%d fps => packetized mode",
- dec->framerate_numerator, dec->framerate_denominator);
+ dec->in_fps_n, dec->in_fps_d);
}
/* do not extract width/height here. we do that in the chain
}
static GstCaps *
-gst_jpeg_dec_getcaps (GstPad * pad)
+gst_jpeg_dec_getcaps (GstPad * pad, GstCaps * filter)
{
GstJpegDec *dec;
GstCaps *caps;
dec = GST_JPEG_DEC (GST_OBJECT_PARENT (pad));
- if (GST_PAD_CAPS (pad))
- return gst_caps_ref (GST_PAD_CAPS (pad));
+ if (gst_pad_has_current_caps (pad))
+ return gst_pad_get_current_caps (pad);
peer = gst_pad_get_peer (dec->srcpad);
GstStructure *s;
guint i, n;
- peer_caps = gst_pad_get_caps (peer);
+ peer_caps = gst_pad_get_caps (peer, filter);
/* Translate peercaps to image/jpeg */
peer_caps = gst_caps_make_writable (peer_caps);
{
gint i;
- if (G_LIKELY (dec->idr_width_allocated == maxrowbytes))
+ if (G_LIKELY (dec->idr_width_allocated >= maxrowbytes))
return TRUE;
/* FIXME: maybe just alloc one or three blocks altogether? */
}
static void
-gst_jpeg_dec_decode_grayscale (GstJpegDec * dec, guchar * base[1],
- guint width, guint height, guint pstride, guint rstride)
+gst_jpeg_dec_decode_grayscale (GstJpegDec * dec, GstVideoFrame * frame)
{
guchar *rows[16];
guchar **scanarray[1] = { rows };
gint i, j, k;
gint lines;
+ guint8 *base[1];
+ gint width, height;
+ gint pstride, rstride;
GST_DEBUG_OBJECT (dec, "indirect decoding of grayscale");
+ width = GST_VIDEO_FRAME_WIDTH (frame);
+ height = GST_VIDEO_FRAME_HEIGHT (frame);
+
if (G_UNLIKELY (!gst_jpeg_dec_ensure_buffers (dec, GST_ROUND_UP_32 (width))))
return;
+ base[0] = GST_VIDEO_FRAME_COMP_DATA (frame, 0);
+ pstride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);
+ rstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
+
memcpy (rows, dec->idr_y, 16 * sizeof (gpointer));
i = 0;
}
static void
-gst_jpeg_dec_decode_rgb (GstJpegDec * dec, guchar * base[3],
- guint width, guint height, guint pstride, guint rstride)
+gst_jpeg_dec_decode_rgb (GstJpegDec * dec, GstVideoFrame * frame)
{
guchar *r_rows[16], *g_rows[16], *b_rows[16];
guchar **scanarray[3] = { r_rows, g_rows, b_rows };
gint i, j, k;
gint lines;
+ guint8 *base[3];
+ guint pstride, rstride;
+ gint width, height;
GST_DEBUG_OBJECT (dec, "indirect decoding of RGB");
+ width = GST_VIDEO_FRAME_WIDTH (frame);
+ height = GST_VIDEO_FRAME_HEIGHT (frame);
+
if (G_UNLIKELY (!gst_jpeg_dec_ensure_buffers (dec, GST_ROUND_UP_32 (width))))
return;
+ for (i = 0; i < 3; i++)
+ base[i] = GST_VIDEO_FRAME_COMP_DATA (frame, i);
+
+ pstride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0);
+ rstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0);
+
memcpy (r_rows, dec->idr_y, 16 * sizeof (gpointer));
memcpy (g_rows, dec->idr_u, 16 * sizeof (gpointer));
memcpy (b_rows, dec->idr_v, 16 * sizeof (gpointer));
}
static void
-gst_jpeg_dec_decode_indirect (GstJpegDec * dec, guchar * base[3],
- guchar * last[3], guint width, guint height, gint r_v, gint r_h, gint comp)
+gst_jpeg_dec_decode_indirect (GstJpegDec * dec, GstVideoFrame * frame,
+ gint r_v, gint r_h, gint comp)
{
guchar *y_rows[16], *u_rows[16], *v_rows[16];
guchar **scanarray[3] = { y_rows, u_rows, v_rows };
gint i, j, k;
gint lines;
+ guchar *base[3], *last[3];
+ gint stride[3];
+ gint width, height;
GST_DEBUG_OBJECT (dec,
"unadvantageous width or r_h, taking slow route involving memcpy");
+ width = GST_VIDEO_FRAME_WIDTH (frame);
+ height = GST_VIDEO_FRAME_HEIGHT (frame);
+
if (G_UNLIKELY (!gst_jpeg_dec_ensure_buffers (dec, GST_ROUND_UP_32 (width))))
return;
+ for (i = 0; i < 3; i++) {
+ base[i] = GST_VIDEO_FRAME_COMP_DATA (frame, i);
+ stride[i] = GST_VIDEO_FRAME_COMP_STRIDE (frame, i);
+ /* make sure we don't make jpeglib write beyond our buffer,
+ * which might happen if (height % (r_v*DCTSIZE)) != 0 */
+ last[i] = base[i] + (GST_VIDEO_FRAME_COMP_STRIDE (frame, i) *
+ (GST_VIDEO_FRAME_COMP_HEIGHT (frame, i) - 1));
+ }
+
memcpy (y_rows, dec->idr_y, 16 * sizeof (gpointer));
memcpy (u_rows, dec->idr_u, 16 * sizeof (gpointer));
memcpy (v_rows, dec->idr_v, 16 * sizeof (gpointer));
if (G_LIKELY (lines > 0)) {
for (j = 0, k = 0; j < (r_v * DCTSIZE); j += r_v, k++) {
if (G_LIKELY (base[0] <= last[0])) {
- memcpy (base[0], y_rows[j], I420_Y_ROWSTRIDE (width));
- base[0] += I420_Y_ROWSTRIDE (width);
+ memcpy (base[0], y_rows[j], stride[0]);
+ base[0] += stride[0];
}
if (r_v == 2) {
if (G_LIKELY (base[0] <= last[0])) {
- memcpy (base[0], y_rows[j + 1], I420_Y_ROWSTRIDE (width));
- base[0] += I420_Y_ROWSTRIDE (width);
+ memcpy (base[0], y_rows[j + 1], stride[0]);
+ base[0] += stride[0];
}
}
if (G_LIKELY (base[1] <= last[1] && base[2] <= last[2])) {
if (r_h == 2) {
- memcpy (base[1], u_rows[k], I420_U_ROWSTRIDE (width));
- memcpy (base[2], v_rows[k], I420_V_ROWSTRIDE (width));
+ memcpy (base[1], u_rows[k], stride[1]);
+ memcpy (base[2], v_rows[k], stride[2]);
} else if (r_h == 1) {
- hresamplecpy1 (base[1], u_rows[k], I420_U_ROWSTRIDE (width));
- hresamplecpy1 (base[2], v_rows[k], I420_V_ROWSTRIDE (width));
+ hresamplecpy1 (base[1], u_rows[k], stride[1]);
+ hresamplecpy1 (base[2], v_rows[k], stride[2]);
} else {
/* FIXME: implement (at least we avoid crashing by doing nothing) */
}
}
if (r_v == 2 || (k & 1) != 0) {
- base[1] += I420_U_ROWSTRIDE (width);
- base[2] += I420_V_ROWSTRIDE (width);
+ base[1] += stride[1];
+ base[2] += stride[2];
}
}
} else {
}
}
-#ifndef GST_DISABLE_GST_DEBUG
-static inline void
-dump_lines (guchar * base[3], guchar ** line[3], int v_samp0, int width)
-{
- int j;
-
- for (j = 0; j < (v_samp0 * DCTSIZE); ++j) {
- GST_LOG ("[%02d] %5d %5d %5d", j,
- (line[0][j] >= base[0]) ?
- (int) (line[0][j] - base[0]) / I420_Y_ROWSTRIDE (width) : -1,
- (line[1][j] >= base[1]) ?
- (int) (line[1][j] - base[1]) / I420_U_ROWSTRIDE (width) : -1,
- (line[2][j] >= base[2]) ?
- (int) (line[2][j] - base[2]) / I420_V_ROWSTRIDE (width) : -1);
- }
-}
-#endif
-
static GstFlowReturn
-gst_jpeg_dec_decode_direct (GstJpegDec * dec, guchar * base[3],
- guchar * last[3], guint width, guint height)
+gst_jpeg_dec_decode_direct (GstJpegDec * dec, GstVideoFrame * frame)
{
guchar **line[3]; /* the jpeg line buffer */
guchar *y[4 * DCTSIZE] = { NULL, }; /* alloc enough for the lines */
guchar *v[4 * DCTSIZE] = { NULL, };
gint i, j;
gint lines, v_samp[3];
+ guchar *base[3], *last[3];
+ gint stride[3];
+ guint height;
line[0] = y;
line[1] = u;
if (G_UNLIKELY (v_samp[0] > 2 || v_samp[1] > 2 || v_samp[2] > 2))
goto format_not_supported;
+ height = GST_VIDEO_FRAME_HEIGHT (frame);
+
+ for (i = 0; i < 3; i++) {
+ base[i] = GST_VIDEO_FRAME_COMP_DATA (frame, i);
+ stride[i] = GST_VIDEO_FRAME_COMP_STRIDE (frame, i);
+ /* make sure we don't make jpeglib write beyond our buffer,
+ * which might happen if (height % (r_v*DCTSIZE)) != 0 */
+ last[i] = base[i] + (GST_VIDEO_FRAME_COMP_STRIDE (frame, i) *
+ (GST_VIDEO_FRAME_COMP_HEIGHT (frame, i) - 1));
+ }
+
/* let jpeglib decode directly into our final buffer */
GST_DEBUG_OBJECT (dec, "decoding directly into output buffer");
for (i = 0; i < height; i += v_samp[0] * DCTSIZE) {
for (j = 0; j < (v_samp[0] * DCTSIZE); ++j) {
/* Y */
- line[0][j] = base[0] + (i + j) * I420_Y_ROWSTRIDE (width);
+ line[0][j] = base[0] + (i + j) * stride[0];
if (G_UNLIKELY (line[0][j] > last[0]))
line[0][j] = last[0];
/* U */
if (v_samp[1] == v_samp[0]) {
- line[1][j] = base[1] + ((i + j) / 2) * I420_U_ROWSTRIDE (width);
+ line[1][j] = base[1] + ((i + j) / 2) * stride[1];
} else if (j < (v_samp[1] * DCTSIZE)) {
- line[1][j] = base[1] + ((i / 2) + j) * I420_U_ROWSTRIDE (width);
+ line[1][j] = base[1] + ((i / 2) + j) * stride[1];
}
if (G_UNLIKELY (line[1][j] > last[1]))
line[1][j] = last[1];
/* V */
if (v_samp[2] == v_samp[0]) {
- line[2][j] = base[2] + ((i + j) / 2) * I420_V_ROWSTRIDE (width);
+ line[2][j] = base[2] + ((i + j) / 2) * stride[2];
} else if (j < (v_samp[2] * DCTSIZE)) {
- line[2][j] = base[2] + ((i / 2) + j) * I420_V_ROWSTRIDE (width);
+ line[2][j] = base[2] + ((i / 2) + j) * stride[2];
}
if (G_UNLIKELY (line[2][j] > last[2]))
line[2][j] = last[2];
}
- /* dump_lines (base, line, v_samp[0], width); */
-
lines = jpeg_read_raw_data (&dec->cinfo, line, v_samp[0] * DCTSIZE);
if (G_UNLIKELY (!lines)) {
GST_INFO_OBJECT (dec, "jpeg_read_raw_data() returned 0");
GST_OBJECT_LOCK (dec);
dec->proportion = proportion;
if (G_LIKELY (ts != GST_CLOCK_TIME_NONE)) {
- if (G_UNLIKELY (diff > 0))
+ if (G_UNLIKELY (diff > dec->qos_duration))
dec->earliest_time = ts + 2 * diff + dec->qos_duration;
else
dec->earliest_time = ts + diff;
return TRUE;
}
-static void
+static gboolean
+gst_jpeg_dec_buffer_pool (GstJpegDec * dec, GstCaps * caps)
+{
+ GstQuery *query;
+ GstBufferPool *pool = NULL;
+ guint size, min, max, prefix, alignment;
+ GstStructure *config;
+
+ GST_DEBUG_OBJECT (dec, "setting up bufferpool");
+
+ /* find a pool for the negotiated caps now */
+ query = gst_query_new_allocation (caps, TRUE);
+
+ if (gst_pad_peer_query (dec->srcpad, query)) {
+ /* we got configuration from our peer, parse them */
+ gst_query_parse_allocation_params (query, &size, &min, &max, &prefix,
+ &alignment, &pool);
+ size = MAX (size, dec->info.size);
+ } else {
+ GST_DEBUG_OBJECT (dec, "peer query failed, using defaults");
+ size = dec->info.size;
+ min = max = 0;
+ prefix = 0;
+ alignment = 15;
+ }
+ gst_query_unref (query);
+
+ if (pool == NULL) {
+ /* we did not get a pool, make one ourselves then */
+ pool = gst_buffer_pool_new ();
+ }
+
+ config = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_set (config, caps, size, min, max, prefix,
+ alignment | 15);
+ /* and store */
+ gst_buffer_pool_set_config (pool, config);
+
+ if (dec->pool) {
+ gst_buffer_pool_set_active (dec->pool, FALSE);
+ gst_object_unref (dec->pool);
+ }
+ dec->pool = pool;
+
+ /* and activate */
+ gst_buffer_pool_set_active (pool, TRUE);
+
+ return TRUE;
+}
+
+static gboolean
gst_jpeg_dec_negotiate (GstJpegDec * dec, gint width, gint height, gint clrspc)
{
GstCaps *caps;
GstVideoFormat format;
+ GstVideoInfo info;
- if (G_UNLIKELY (width == dec->caps_width && height == dec->caps_height &&
- dec->framerate_numerator == dec->caps_framerate_numerator &&
- dec->framerate_denominator == dec->caps_framerate_denominator &&
- clrspc == dec->clrspc))
- return;
+ if (G_UNLIKELY (width == dec->info.width && height == dec->info.height &&
+ dec->in_fps_n == dec->info.fps_n && dec->in_fps_d == dec->info.fps_d
+ && clrspc == dec->clrspc))
+ return TRUE;
+
+ gst_video_info_init (&info);
/* framerate == 0/1 is a still frame */
- if (dec->framerate_denominator == 0) {
- dec->framerate_numerator = 0;
- dec->framerate_denominator = 1;
+ if (dec->in_fps_d == 0) {
+ info.fps_n = 0;
+ info.fps_d = 1;
+ } else {
+ info.fps_n = dec->in_fps_n;
+ info.fps_d = dec->in_fps_d;
}
/* calculate or assume an average frame duration for QoS purposes */
GST_OBJECT_LOCK (dec);
- if (dec->framerate_numerator != 0) {
- dec->qos_duration = gst_util_uint64_scale (GST_SECOND,
- dec->framerate_denominator, dec->framerate_numerator);
+ if (info.fps_n != 0) {
+ dec->qos_duration =
+ gst_util_uint64_scale (GST_SECOND, info.fps_d, info.fps_n);
+ dec->duration = dec->qos_duration;
} else {
/* if not set just use 25fps */
dec->qos_duration = gst_util_uint64_scale (GST_SECOND, 1, 25);
+ dec->duration = GST_CLOCK_TIME_NONE;
}
GST_OBJECT_UNLOCK (dec);
if (dec->cinfo.jpeg_color_space == JCS_RGB) {
gint i;
GstCaps *allowed_caps;
+ GstVideoInfo tmpinfo;
GST_DEBUG_OBJECT (dec, "selecting RGB format");
/* retrieve allowed caps, and find the first one that reasonably maps
* and get_pad_template_caps doesn't */
caps = gst_caps_copy (gst_pad_get_pad_template_caps (dec->srcpad));
}
- /* avoid lists of fourcc, etc */
+ /* avoid lists of formats, etc */
allowed_caps = gst_caps_normalize (caps);
gst_caps_unref (caps);
caps = NULL;
/* sigh, ds and _parse_caps need fixed caps for parsing, fixate */
gst_pad_fixate_caps (dec->srcpad, caps);
GST_LOG_OBJECT (dec, "checking caps %" GST_PTR_FORMAT, caps);
- if (!gst_video_format_parse_caps (caps, &format, NULL, NULL))
+
+ if (!gst_video_info_from_caps (&tmpinfo, caps))
continue;
/* we'll settle for the first (preferred) downstream rgb format */
- if (gst_video_format_is_rgb (format))
+ if (GST_VIDEO_INFO_IS_RGB (&tmpinfo))
break;
/* default fall-back */
format = GST_VIDEO_FORMAT_RGB;
if (caps)
gst_caps_unref (caps);
gst_caps_unref (allowed_caps);
- caps = gst_video_format_new_caps (format, width, height,
- dec->framerate_numerator, dec->framerate_denominator, 1, 1);
- dec->outsize = gst_video_format_get_size (format, width, height);
- /* some format info */
- dec->offset[0] =
- gst_video_format_get_component_offset (format, 0, width, height);
- dec->offset[1] =
- gst_video_format_get_component_offset (format, 1, width, height);
- dec->offset[2] =
- gst_video_format_get_component_offset (format, 2, width, height);
- /* equal for all components */
- dec->stride = gst_video_format_get_row_stride (format, 0, width);
- dec->inc = gst_video_format_get_pixel_stride (format, 0);
} else if (dec->cinfo.jpeg_color_space == JCS_GRAYSCALE) {
/* TODO is anything else then 8bit supported in jpeg? */
format = GST_VIDEO_FORMAT_GRAY8;
- caps = gst_video_format_new_caps (format, width, height,
- dec->framerate_numerator, dec->framerate_denominator, 1, 1);
- dec->outsize = gst_video_format_get_size (format, width, height);
- dec->offset[0] =
- gst_video_format_get_component_offset (format, 0, width, height);
- dec->stride = gst_video_format_get_row_stride (format, 0, width);
- dec->inc = gst_video_format_get_pixel_stride (format, 0);
} else {
/* go for plain and simple I420 */
/* TODO other YUV cases ? */
- caps = gst_caps_new_simple ("video/x-raw-yuv",
- "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('I', '4', '2', '0'),
- "width", G_TYPE_INT, width, "height", G_TYPE_INT, height,
- "framerate", GST_TYPE_FRACTION, dec->framerate_numerator,
- dec->framerate_denominator, NULL);
- dec->outsize = I420_SIZE (width, height);
+ format = GST_VIDEO_FORMAT_I420;
}
+ gst_video_info_set_format (&info, format, width, height);
+ caps = gst_video_info_to_caps (&info);
+
GST_DEBUG_OBJECT (dec, "setting caps %" GST_PTR_FORMAT, caps);
GST_DEBUG_OBJECT (dec, "max_v_samp_factor=%d", dec->cinfo.max_v_samp_factor);
GST_DEBUG_OBJECT (dec, "max_h_samp_factor=%d", dec->cinfo.max_h_samp_factor);
gst_pad_set_caps (dec->srcpad, caps);
+
+ dec->info = info;
+ dec->clrspc = clrspc;
+
+ gst_jpeg_dec_buffer_pool (dec, caps);
gst_caps_unref (caps);
- dec->caps_width = width;
- dec->caps_height = height;
- dec->caps_framerate_numerator = dec->framerate_numerator;
- dec->caps_framerate_denominator = dec->framerate_denominator;
+ return TRUE;
}
static GstFlowReturn
GstFlowReturn ret = GST_FLOW_OK;
GstJpegDec *dec;
GstBuffer *outbuf = NULL;
-#ifndef GST_DISABLE_GST_DEBUG
- guchar *data;
-#endif
- guchar *outdata;
- guchar *base[3], *last[3];
gint img_len;
- guint outsize;
gint width, height;
gint r_h, r_v;
guint code, hdr_ok;
GstClockTime timestamp, duration;
+ GstVideoFrame frame;
dec = GST_JPEG_DEC (GST_PAD_PARENT (pad));
goto skip_decoding;
#ifndef GST_DISABLE_GST_DEBUG
- data = (guint8 *) gst_adapter_peek (dec->adapter, 4);
- GST_LOG_OBJECT (dec, "reading header %02x %02x %02x %02x", data[0], data[1],
- data[2], data[3]);
+ {
+ guchar data[4];
+
+ gst_adapter_copy (dec->adapter, data, 0, 4);
+ GST_LOG_OBJECT (dec, "reading header %02x %02x %02x %02x", data[0], data[1],
+ data[2], data[3]);
+ }
#endif
gst_jpeg_dec_fill_input_buffer (&dec->cinfo);
gst_jpeg_dec_negotiate (dec, width, height, dec->cinfo.jpeg_color_space);
- ret = gst_pad_alloc_buffer_and_set_caps (dec->srcpad, GST_BUFFER_OFFSET_NONE,
- dec->outsize, GST_PAD_CAPS (dec->srcpad), &outbuf);
+ ret = gst_buffer_pool_acquire_buffer (dec->pool, &outbuf, NULL);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto alloc_failed;
- outdata = GST_BUFFER_DATA (outbuf);
- outsize = GST_BUFFER_SIZE (outbuf);
+ if (!gst_video_frame_map (&frame, &dec->info, outbuf, GST_MAP_READWRITE))
+ goto invalid_frame;
- GST_LOG_OBJECT (dec, "width %d, height %d, buffer size %d, required size %d",
- width, height, outsize, dec->outsize);
+ GST_LOG_OBJECT (dec, "width %d, height %d", width, height);
GST_BUFFER_TIMESTAMP (outbuf) = dec->next_ts;
if (GST_CLOCK_TIME_IS_VALID (duration)) {
/* use duration from incoming buffer for outgoing buffer */
dec->next_ts += duration;
- } else if (dec->framerate_numerator != 0) {
- duration = gst_util_uint64_scale (GST_SECOND,
- dec->framerate_denominator, dec->framerate_numerator);
- dec->next_ts += duration;
+ } else if (GST_CLOCK_TIME_IS_VALID (dec->duration)) {
+ duration = dec->duration;
+ dec->next_ts += dec->duration;
} else {
duration = GST_CLOCK_TIME_NONE;
dec->next_ts = GST_CLOCK_TIME_NONE;
GST_BUFFER_DURATION (outbuf) = duration;
if (dec->cinfo.jpeg_color_space == JCS_RGB) {
- base[0] = outdata + dec->offset[0];
- base[1] = outdata + dec->offset[1];
- base[2] = outdata + dec->offset[2];
- gst_jpeg_dec_decode_rgb (dec, base, width, height, dec->inc, dec->stride);
+ gst_jpeg_dec_decode_rgb (dec, &frame);
} else if (dec->cinfo.jpeg_color_space == JCS_GRAYSCALE) {
- base[0] = outdata + dec->offset[0];
- gst_jpeg_dec_decode_grayscale (dec, base, width, height, dec->inc,
- dec->stride);
+ gst_jpeg_dec_decode_grayscale (dec, &frame);
} else {
- /* mind the swap, jpeglib outputs blue chroma first
- * ensonic: I see no swap?
- */
- base[0] = outdata + I420_Y_OFFSET (width, height);
- base[1] = outdata + I420_U_OFFSET (width, height);
- base[2] = outdata + I420_V_OFFSET (width, height);
-
- /* make sure we don't make jpeglib write beyond our buffer,
- * which might happen if (height % (r_v*DCTSIZE)) != 0 */
- last[0] = base[0] + (I420_Y_ROWSTRIDE (width) * (height - 1));
- last[1] =
- base[1] + (I420_U_ROWSTRIDE (width) * ((GST_ROUND_UP_2 (height) / 2) -
- 1));
- last[2] =
- base[2] + (I420_V_ROWSTRIDE (width) * ((GST_ROUND_UP_2 (height) / 2) -
- 1));
-
GST_LOG_OBJECT (dec, "decompressing (reqired scanline buffer height = %u)",
dec->cinfo.rec_outbuf_height);
|| dec->cinfo.comp_info[2].h_samp_factor != 1)) {
GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, dec,
"indirect decoding using extra buffer copy");
- gst_jpeg_dec_decode_indirect (dec, base, last, width, height, r_v, r_h,
+ gst_jpeg_dec_decode_indirect (dec, &frame, r_v, r_h,
dec->cinfo.num_components);
} else {
- ret = gst_jpeg_dec_decode_direct (dec, base, last, width, height);
-
+ ret = gst_jpeg_dec_decode_direct (dec, &frame);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto decode_direct_failed;
}
GST_LOG_OBJECT (dec, "decompressing finished");
jpeg_finish_decompress (&dec->cinfo);
+ gst_video_frame_unmap (&frame);
+
/* Clipping */
if (dec->segment.format == GST_FORMAT_TIME) {
- gint64 start, stop, clip_start, clip_stop;
+ guint64 start, stop, clip_start, clip_stop;
GST_LOG_OBJECT (dec, "Attempting clipping");
}
goto exit;
}
+invalid_frame:
+ {
+ jpeg_abort_decompress (&dec->cinfo);
+ gst_buffer_unref (outbuf);
+ ret = GST_FLOW_OK;
+ goto exit;
+ }
drop_buffer:
{
GST_WARNING_OBJECT (dec, "Outgoing buffer is outside configured segment");
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_QOS:{
+ GstQOSType type;
GstClockTimeDiff diff;
GstClockTime timestamp;
gdouble proportion;
- gst_event_parse_qos (event, &proportion, &diff, ×tamp);
+ gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
gst_jpeg_dec_update_qos (dec, proportion, diff, timestamp);
break;
}
static gboolean
gst_jpeg_dec_sink_event (GstPad * pad, GstEvent * event)
{
- gboolean ret = TRUE;
+ gboolean ret = TRUE, forward = TRUE;
GstJpegDec *dec = GST_JPEG_DEC (GST_OBJECT_PARENT (pad));
GST_DEBUG_OBJECT (dec, "event : %s", GST_EVENT_TYPE_NAME (event));
dec->parse_resync = FALSE;
gst_jpeg_dec_reset_qos (dec);
break;
- case GST_EVENT_NEWSEGMENT:{
- gboolean update;
- gdouble rate, applied_rate;
- GstFormat format;
- gint64 start, stop, position;
-
- gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
- &format, &start, &stop, &position);
-
- GST_DEBUG_OBJECT (dec, "Got NEWSEGMENT [%" GST_TIME_FORMAT
- " - %" GST_TIME_FORMAT " / %" GST_TIME_FORMAT "]",
- GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
- GST_TIME_ARGS (position));
-
- gst_segment_set_newsegment_full (&dec->segment, update, rate,
- applied_rate, format, start, stop, position);
+ case GST_EVENT_SEGMENT:
+ gst_event_copy_segment (event, &dec->segment);
+ GST_DEBUG_OBJECT (dec, "Got NEWSEGMENT %" GST_SEGMENT_FORMAT,
+ &dec->segment);
+ break;
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+ gst_event_parse_caps (event, &caps);
+ ret = gst_jpeg_dec_setcaps (dec, caps);
+ forward = FALSE;
break;
}
default:
break;
}
- ret = gst_pad_push_event (dec->srcpad, event);
+ if (forward)
+ ret = gst_pad_push_event (dec->srcpad, event);
+ else
+ gst_event_unref (event);
return ret;
}
case GST_STATE_CHANGE_READY_TO_PAUSED:
dec->error_count = 0;
dec->good_count = 0;
- dec->framerate_numerator = 0;
- dec->framerate_denominator = 1;
- dec->caps_framerate_numerator = dec->caps_framerate_denominator = 0;
- dec->caps_width = -1;
- dec->caps_height = -1;
+ dec->in_fps_n = 0;
+ dec->in_fps_d = 1;
+ gst_video_info_init (&dec->info);
dec->clrspc = -1;
dec->packetized = FALSE;
dec->next_ts = 0;
g_free (dec->cur_buf);
dec->cur_buf = NULL;
gst_jpeg_dec_free_buffers (dec);
+ if (dec->pool) {
+ gst_buffer_pool_set_active (dec->pool, FALSE);
+ gst_object_unref (dec->pool);
+ }
+ dec->pool = NULL;
break;
default:
break;
GST_DEBUG_CATEGORY_STATIC (flvdemux_debug);
#define GST_CAT_DEFAULT flvdemux_debug
-GST_BOILERPLATE (GstFlvDemux, gst_flv_demux, GstElement, GST_TYPE_ELEMENT);
+#define gst_flv_demux_parent_class parent_class
+G_DEFINE_TYPE (GstFlvDemux, gst_flv_demux, GST_TYPE_ELEMENT);
/* 9 bytes of header + 4 bytes of first previous tag size */
#define FLV_HEADER_SIZE 13
/* try harder to query upstream size if we didn't get it the first time */
if (demux->upstream_seekable && stop == -1) {
- GstFormat fmt = GST_FORMAT_BYTES;
-
GST_DEBUG_OBJECT (demux, "doing duration query to fix up unset stop");
- gst_pad_query_peer_duration (demux->sinkpad, &fmt, &stop);
+ gst_pad_query_peer_duration (demux->sinkpad, GST_FORMAT_BYTES, &stop);
}
/* if upstream doesn't know the size, it's likely that it's not seekable in
gst_flv_demux_parse_tag_script (GstFlvDemux * demux, GstBuffer * buffer)
{
GstFlowReturn ret = GST_FLOW_OK;
- GstByteReader reader = GST_BYTE_READER_INIT_FROM_BUFFER (buffer);
+ GstByteReader reader;
guint8 type = 0;
+ guint8 *data;
+ gsize size;
+
+ g_return_val_if_fail (gst_buffer_get_size (buffer) >= 7, GST_FLOW_ERROR);
- g_return_val_if_fail (GST_BUFFER_SIZE (buffer) >= 7, GST_FLOW_ERROR);
+ data = gst_buffer_map (buffer, &size, NULL, GST_MAP_READ);
+ gst_byte_reader_init (&reader, data, size);
gst_byte_reader_skip (&reader, 7);
GST_LOG_OBJECT (demux, "parsing a script tag");
if (!gst_byte_reader_get_uint8 (&reader, &type))
- return GST_FLOW_OK;
+ goto cleanup;
/* Must be string */
if (type == 2) {
if (!gst_byte_reader_get_uint8 (&reader, &type)) {
g_free (function_name);
- return GST_FLOW_OK;
+ goto cleanup;
}
switch (type) {
/* ECMA array */
if (!gst_byte_reader_get_uint32_be (&reader, &nb_elems)) {
g_free (function_name);
- return GST_FLOW_OK;
+ goto cleanup;
}
/* The number of elements is just a hint, some files have
default:
GST_DEBUG_OBJECT (demux, "Unhandled script data type : %d", type);
g_free (function_name);
- return GST_FLOW_OK;
+ goto cleanup;
}
demux->push_tags = TRUE;
}
}
+cleanup:
+ gst_buffer_unmap (buffer, data, -1);
+
return ret;
}
case 4:
case 5:
case 6:
- caps = gst_caps_new_simple ("audio/x-nellymoser", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-nellymoser");
break;
case 10:
{
+ guint8 *data = NULL;
+ gsize size;
+
+ if (demux->audio_codec_data)
+ data = gst_buffer_map (demux->audio_codec_data, &size, NULL,
+ GST_MAP_READ);
/* use codec-data to extract and verify samplerate */
- if (demux->audio_codec_data &&
- GST_BUFFER_SIZE (demux->audio_codec_data) >= 2) {
+ if (demux->audio_codec_data && size >= 2) {
gint freq_index;
- freq_index =
- ((GST_READ_UINT16_BE (GST_BUFFER_DATA (demux->audio_codec_data))));
+ freq_index = GST_READ_UINT16_BE (data);
freq_index = (freq_index & 0x0780) >> 7;
adjusted_rate =
gst_codec_utils_aac_get_sample_rate_from_index (freq_index);
adjusted_rate = rate;
}
}
+ if (data)
+ gst_buffer_unmap (demux->audio_codec_data, data, -1);
caps = gst_caps_new_simple ("audio/mpeg",
"mpegversion", G_TYPE_INT, 4, "framed", G_TYPE_BOOLEAN, TRUE,
"stream-format", G_TYPE_STRING, "raw", NULL);
break;
}
case 7:
- caps = gst_caps_new_simple ("audio/x-alaw", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-alaw");
break;
case 8:
- caps = gst_caps_new_simple ("audio/x-mulaw", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-mulaw");
break;
case 11:
- caps = gst_caps_new_simple ("audio/x-speex", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-speex");
break;
default:
GST_WARNING_OBJECT (demux, "unsupported audio codec tag %u", codec_tag);
guint32 pts = 0, codec_tag = 0, rate = 5512, width = 8, channels = 1;
guint32 codec_data = 0, pts_ext = 0;
guint8 flags = 0;
- guint8 *data = GST_BUFFER_DATA (buffer);
+ guint8 *data;
GstBuffer *outbuf;
+ gsize size;
GST_LOG_OBJECT (demux, "parsing an audio tag");
if (demux->no_more_pads && !demux->audio_pad) {
GST_WARNING_OBJECT (demux,
"Signaled no-more-pads already but had no audio pad -- ignoring");
- goto beach;
+ return GST_FLOW_OK;
}
- g_return_val_if_fail (GST_BUFFER_SIZE (buffer) == demux->tag_size,
+ g_return_val_if_fail (gst_buffer_get_size (buffer) == demux->tag_size,
GST_FLOW_ERROR);
+ /* Error out on tags with too small headers */
+ if (gst_buffer_get_size (buffer) < 11) {
+ GST_ERROR_OBJECT (demux, "Too small tag size (%d)",
+ gst_buffer_get_size (buffer));
+ return GST_FLOW_ERROR;
+ }
+
+ data = gst_buffer_map (buffer, &size, NULL, GST_MAP_READ);
+
/* Grab information about audio tag */
pts = GST_READ_UINT24_BE (data);
/* read the pts extension to 32 bits integer */
GST_LOG_OBJECT (demux, "pts bytes %02X %02X %02X %02X (%d)", data[0], data[1],
data[2], data[3], pts);
- /* Error out on tags with too small headers */
- if (GST_BUFFER_SIZE (buffer) < 11) {
- GST_ERROR_OBJECT (demux, "Too small tag size (%d)",
- GST_BUFFER_SIZE (buffer));
- return GST_FLOW_ERROR;
- }
-
- /* Silently skip buffers with no data */
- if (GST_BUFFER_SIZE (buffer) == 11)
- return GST_FLOW_OK;
-
/* Skip the stream id and go directly to the flags */
flags = GST_READ_UINT8 (data + 7);
+ /* Silently skip buffers with no data */
+ if (size == 11)
+ goto beach;
+
/* Channels */
if (flags & 0x01) {
channels = 2;
ret = GST_FLOW_ERROR;
goto beach;
}
+#ifndef GST_DISABLE_GST_DEBUG
+ {
+ GstCaps *caps;
- GST_DEBUG_OBJECT (demux, "created audio pad with caps %" GST_PTR_FORMAT,
- GST_PAD_CAPS (demux->audio_pad));
+ caps = gst_pad_get_current_caps (demux->audio_pad);
+ GST_DEBUG_OBJECT (demux, "created audio pad with caps %" GST_PTR_FORMAT,
+ caps);
+ if (caps)
+ gst_caps_unref (caps);
+ }
+#endif
/* Set functions on the pad */
gst_pad_set_query_type_function (demux->audio_pad,
}
/* Create buffer from pad */
- outbuf =
- gst_buffer_create_sub (buffer, 7 + codec_data,
- demux->tag_data_size - codec_data);
+ outbuf = gst_buffer_copy_region (buffer, GST_BUFFER_COPY_MEMORY,
+ 7 + codec_data, demux->tag_data_size - codec_data);
if (demux->audio_codec_tag == 10) {
guint8 aac_packet_type = GST_READ_UINT8 (data + 8);
GST_BUFFER_DURATION (outbuf) = GST_CLOCK_TIME_NONE;
GST_BUFFER_OFFSET (outbuf) = demux->audio_offset++;
GST_BUFFER_OFFSET_END (outbuf) = demux->audio_offset;
- gst_buffer_set_caps (outbuf, GST_PAD_CAPS (demux->audio_pad));
if (demux->duration == GST_CLOCK_TIME_NONE ||
demux->duration < GST_BUFFER_TIMESTAMP (outbuf))
demux->audio_need_discont = FALSE;
}
- gst_segment_set_last_stop (&demux->segment, GST_FORMAT_TIME,
- GST_BUFFER_TIMESTAMP (outbuf));
+ demux->segment.position = GST_BUFFER_TIMESTAMP (outbuf);
/* Do we need a newsegment event ? */
if (G_UNLIKELY (demux->audio_need_segment)) {
- if (demux->close_seg_event)
- gst_pad_push_event (demux->audio_pad,
- gst_event_ref (demux->close_seg_event));
-
+ /* FIXME need one segment sent for all stream to maintain a/v sync */
if (!demux->new_seg_event) {
GST_DEBUG_OBJECT (demux, "pushing newsegment from %"
GST_TIME_FORMAT " to %" GST_TIME_FORMAT,
- GST_TIME_ARGS (demux->segment.last_stop),
+ GST_TIME_ARGS (demux->segment.position),
GST_TIME_ARGS (demux->segment.stop));
- demux->new_seg_event =
- gst_event_new_new_segment (FALSE, demux->segment.rate,
- demux->segment.format, demux->segment.last_stop,
- demux->segment.stop, demux->segment.last_stop);
+ demux->segment.start = demux->segment.time = demux->segment.position;
+ demux->new_seg_event = gst_event_new_segment (&demux->segment);
} else {
GST_DEBUG_OBJECT (demux, "pushing pre-generated newsegment event");
}
GST_LOG_OBJECT (demux, "pushing %d bytes buffer at pts %" GST_TIME_FORMAT
" with duration %" GST_TIME_FORMAT ", offset %" G_GUINT64_FORMAT,
- GST_BUFFER_SIZE (outbuf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
+ gst_buffer_get_size (outbuf),
+ GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)), GST_BUFFER_OFFSET (outbuf));
if (!GST_CLOCK_TIME_IS_VALID (demux->audio_start)) {
ret = gst_pad_push (demux->audio_pad, outbuf);
if (G_UNLIKELY (ret != GST_FLOW_OK)) {
if (demux->segment.rate < 0.0 && ret == GST_FLOW_UNEXPECTED &&
- demux->segment.last_stop > demux->segment.stop) {
+ demux->segment.position > demux->segment.stop) {
/* In reverse playback we can get a GST_FLOW_UNEXPECTED when
* we are at the end of the segment, so we just need to jump
* back to the previous section. */
demux->audio_linked = TRUE;
beach:
+ gst_buffer_unmap (buffer, data, -1);
+
return ret;
}
/* Generate caps for that pad */
switch (codec_tag) {
case 2:
- caps = gst_caps_new_simple ("video/x-flash-video", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-flash-video");
break;
case 3:
- caps = gst_caps_new_simple ("video/x-flash-screen", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-flash-screen");
break;
case 4:
- caps = gst_caps_new_simple ("video/x-vp6-flash", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-vp6-flash");
break;
case 5:
- caps = gst_caps_new_simple ("video/x-vp6-alpha", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-vp6-alpha");
break;
case 7:
caps =
guint32 pts = 0, codec_data = 1, pts_ext = 0;
gboolean keyframe = FALSE;
guint8 flags = 0, codec_tag = 0;
- guint8 *data = GST_BUFFER_DATA (buffer);
+ guint8 *data;
GstBuffer *outbuf;
+ gsize size;
- g_return_val_if_fail (GST_BUFFER_SIZE (buffer) == demux->tag_size,
+ g_return_val_if_fail (gst_buffer_get_size (buffer) == demux->tag_size,
GST_FLOW_ERROR);
GST_LOG_OBJECT (demux, "parsing a video tag");
-
if (demux->no_more_pads && !demux->video_pad) {
GST_WARNING_OBJECT (demux,
"Signaled no-more-pads already but had no audio pad -- ignoring");
- goto beach;
+ return GST_FLOW_OK;
+ }
+
+ if (gst_buffer_get_size (buffer) < 12) {
+ GST_ERROR_OBJECT (demux, "Too small tag size");
+ return GST_FLOW_ERROR;
}
+ data = gst_buffer_map (buffer, &size, NULL, GST_MAP_READ);
+
/* Grab information about video tag */
pts = GST_READ_UINT24_BE (data);
/* read the pts extension to 32 bits integer */
GST_LOG_OBJECT (demux, "pts bytes %02X %02X %02X %02X (%d)", data[0], data[1],
data[2], data[3], pts);
- if (GST_BUFFER_SIZE (buffer) < 12) {
- GST_ERROR_OBJECT (demux, "Too small tag size");
- return GST_FLOW_ERROR;
- }
-
/* Skip the stream id and go directly to the flags */
flags = GST_READ_UINT8 (data + 7);
GST_LOG_OBJECT (demux, "got cts %d", cts);
- pts = pts + cts;
+ /* avoid negative overflow */
+ if (cts >= 0 || pts >= -cts)
+ pts += cts;
}
GST_LOG_OBJECT (demux, "video tag with codec tag %u, keyframe (%d) "
* metadata tag that would come later and trigger a caps change */
demux->got_par = FALSE;
- GST_DEBUG_OBJECT (demux, "created video pad with caps %" GST_PTR_FORMAT,
- GST_PAD_CAPS (demux->video_pad));
+#ifndef GST_DISABLE_GST_DEBUG
+ {
+ GstCaps *caps;
+
+ caps = gst_pad_get_current_caps (demux->video_pad);
+ GST_DEBUG_OBJECT (demux, "created video pad with caps %" GST_PTR_FORMAT,
+ caps);
+ if (caps)
+ gst_caps_unref (caps);
+ }
+#endif
/* Set functions on the pad */
gst_pad_set_query_type_function (demux->video_pad,
}
/* Create buffer from pad */
- outbuf =
- gst_buffer_create_sub (buffer, 7 + codec_data,
- demux->tag_data_size - codec_data);
+ outbuf = gst_buffer_copy_region (buffer, GST_BUFFER_COPY_MEMORY,
+ 7 + codec_data, demux->tag_data_size - codec_data);
if (demux->video_codec_tag == 7) {
guint8 avc_packet_type = GST_READ_UINT8 (data + 8);
GST_BUFFER_DURATION (outbuf) = GST_CLOCK_TIME_NONE;
GST_BUFFER_OFFSET (outbuf) = demux->video_offset++;
GST_BUFFER_OFFSET_END (outbuf) = demux->video_offset;
- gst_buffer_set_caps (outbuf, GST_PAD_CAPS (demux->video_pad));
if (demux->duration == GST_CLOCK_TIME_NONE ||
demux->duration < GST_BUFFER_TIMESTAMP (outbuf))
demux->video_need_discont = FALSE;
}
- gst_segment_set_last_stop (&demux->segment, GST_FORMAT_TIME,
- GST_BUFFER_TIMESTAMP (outbuf));
+ demux->segment.position = GST_BUFFER_TIMESTAMP (outbuf);
/* Do we need a newsegment event ? */
if (G_UNLIKELY (demux->video_need_segment)) {
- if (demux->close_seg_event)
- gst_pad_push_event (demux->video_pad,
- gst_event_ref (demux->close_seg_event));
-
+ /* FIXME need one segment sent for all stream to maintain a/v sync */
if (!demux->new_seg_event) {
GST_DEBUG_OBJECT (demux, "pushing newsegment from %"
GST_TIME_FORMAT " to %" GST_TIME_FORMAT,
- GST_TIME_ARGS (demux->segment.last_stop),
+ GST_TIME_ARGS (demux->segment.position),
GST_TIME_ARGS (demux->segment.stop));
- demux->new_seg_event =
- gst_event_new_new_segment (FALSE, demux->segment.rate,
- demux->segment.format, demux->segment.last_stop,
- demux->segment.stop, demux->segment.last_stop);
+ demux->segment.start = demux->segment.time = demux->segment.position;
+ demux->new_seg_event = gst_event_new_segment (&demux->segment);
} else {
GST_DEBUG_OBJECT (demux, "pushing pre-generated newsegment event");
}
GST_LOG_OBJECT (demux, "pushing %d bytes buffer at pts %" GST_TIME_FORMAT
" with duration %" GST_TIME_FORMAT ", offset %" G_GUINT64_FORMAT
- ", keyframe (%d)", GST_BUFFER_SIZE (outbuf),
+ ", keyframe (%d)", gst_buffer_get_size (outbuf),
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)), GST_BUFFER_OFFSET (outbuf),
keyframe);
if (G_UNLIKELY (ret != GST_FLOW_OK)) {
if (demux->segment.rate < 0.0 && ret == GST_FLOW_UNEXPECTED &&
- demux->segment.last_stop > demux->segment.stop) {
+ demux->segment.position > demux->segment.stop) {
/* In reverse playback we can get a GST_FLOW_UNEXPECTED when
* we are at the end of the segment, so we just need to jump
* back to the previous section. */
demux->video_linked = TRUE;
beach:
+ gst_buffer_unmap (buffer, data, -1);
return ret;
}
guint32 tag_data_size;
guint8 type;
gboolean keyframe = TRUE;
- GstClockTime ret;
- guint8 *data = GST_BUFFER_DATA (buffer);
+ GstClockTime ret = GST_CLOCK_TIME_NONE;
+ guint8 *data, *bdata;
+ gsize size;
- g_return_val_if_fail (GST_BUFFER_SIZE (buffer) >= 12, GST_CLOCK_TIME_NONE);
+ g_return_val_if_fail (gst_buffer_get_size (buffer) >= 12,
+ GST_CLOCK_TIME_NONE);
+
+ data = bdata = gst_buffer_map (buffer, &size, NULL, GST_MAP_READ);
type = data[0];
if (type != 9 && type != 8 && type != 18) {
GST_WARNING_OBJECT (demux, "Unsupported tag type %u", data[0]);
- return GST_CLOCK_TIME_NONE;
+ goto exit;
}
if (type == 9)
tag_data_size = GST_READ_UINT24_BE (data + 1);
- if (GST_BUFFER_SIZE (buffer) >= tag_data_size + 11 + 4) {
+ if (size >= tag_data_size + 11 + 4) {
if (GST_READ_UINT32_BE (data + tag_data_size + 11) != tag_data_size + 11) {
GST_WARNING_OBJECT (demux, "Invalid tag size");
- return GST_CLOCK_TIME_NONE;
+ goto exit;
}
}
if (demux->duration == GST_CLOCK_TIME_NONE || demux->duration < ret)
demux->duration = ret;
+exit:
+ gst_buffer_unmap (buffer, bdata, -1);
return ret;
}
{
GstFlowReturn ret = GST_FLOW_OK;
guint8 tag_type = 0;
- guint8 *data = GST_BUFFER_DATA (buffer);
+ guint8 *data;
+
+ g_return_val_if_fail (gst_buffer_get_size (buffer) >= 4, GST_FLOW_ERROR);
- g_return_val_if_fail (GST_BUFFER_SIZE (buffer) >= 4, GST_FLOW_ERROR);
+ data = gst_buffer_map (buffer, NULL, NULL, GST_MAP_READ);
tag_type = data[0];
GST_LOG_OBJECT (demux, "tag data size is %" G_GUINT64_FORMAT,
demux->tag_data_size);
+ gst_buffer_unmap (buffer, data, -1);
+
return ret;
}
gst_flv_demux_parse_header (GstFlvDemux * demux, GstBuffer * buffer)
{
GstFlowReturn ret = GST_FLOW_OK;
- guint8 *data = GST_BUFFER_DATA (buffer);
+ guint8 *data;
+
+ g_return_val_if_fail (gst_buffer_get_size (buffer) >= 9, GST_FLOW_ERROR);
- g_return_val_if_fail (GST_BUFFER_SIZE (buffer) >= 9, GST_FLOW_ERROR);
+ data = gst_buffer_map (buffer, NULL, NULL, GST_MAP_READ);
/* Check for the FLV tag */
if (data[0] == 'F' && data[1] == 'L' && data[2] == 'V') {
}
}
- /* Jump over the 4 first bytes */
- data += 4;
-
/* Now look at audio/video flags */
{
- guint8 flags = data[0];
+ guint8 flags = data[4];
demux->has_video = demux->has_audio = FALSE;
demux->need_header = FALSE;
beach:
+ gst_buffer_unmap (buffer, data, -1);
return ret;
}
demux->new_seg_event = NULL;
}
- if (demux->close_seg_event) {
- gst_event_unref (demux->close_seg_event);
- demux->close_seg_event = NULL;
- }
-
gst_adapter_clear (demux->adapter);
if (demux->audio_codec_data) {
demux = GST_FLV_DEMUX (gst_pad_get_parent (pad));
GST_LOG_OBJECT (demux, "received buffer of %d bytes at offset %"
- G_GUINT64_FORMAT, GST_BUFFER_SIZE (buffer), GST_BUFFER_OFFSET (buffer));
+ G_GUINT64_FORMAT, gst_buffer_get_size (buffer),
+ GST_BUFFER_OFFSET (buffer));
if (G_UNLIKELY (GST_BUFFER_OFFSET (buffer) == 0)) {
GST_DEBUG_OBJECT (demux, "beginning of file, expect header");
if (!demux->indexed) {
if (demux->offset == demux->file_size - sizeof (guint32)) {
- GstBuffer *buffer =
- gst_adapter_take_buffer (demux->adapter, sizeof (guint32));
- GstByteReader *reader = gst_byte_reader_new_from_buffer (buffer);
guint64 seek_offset;
+ guint8 *data;
- if (!gst_adapter_available (demux->adapter) >= sizeof (guint32)) {
- /* error */
- }
+ data = gst_adapter_take (demux->adapter, 4);
+ if (!data)
+ goto no_index;
- seek_offset =
- demux->file_size - sizeof (guint32) -
- gst_byte_reader_peek_uint32_be_unchecked (reader);
- gst_byte_reader_free (reader);
- gst_buffer_unref (buffer);
+ seek_offset = demux->file_size - sizeof (guint32) -
+ GST_READ_UINT32_BE (data);
+ g_free (data);
GST_INFO_OBJECT (demux,
"Seeking to beginning of last tag at %" G_GUINT64_FORMAT,
return ret;
}
- if (G_UNLIKELY (*buffer && GST_BUFFER_SIZE (*buffer) != size)) {
+ if (G_UNLIKELY (*buffer && gst_buffer_get_size (*buffer) != size)) {
GST_WARNING_OBJECT (demux,
"partial pull got %d when expecting %d from offset %" G_GUINT64_FORMAT,
- GST_BUFFER_SIZE (*buffer), size, offset);
+ gst_buffer_get_size (*buffer), size, offset);
gst_buffer_unref (*buffer);
ret = GST_FLOW_UNEXPECTED;
*buffer = NULL;
gst_flv_demux_create_index (GstFlvDemux * demux, gint64 pos, GstClockTime ts)
{
gint64 size;
- GstFormat fmt = GST_FORMAT_BYTES;
size_t tag_size;
guint64 old_offset;
GstBuffer *buffer;
GstClockTime tag_time;
GstFlowReturn ret = GST_FLOW_OK;
- if (G_UNLIKELY (!gst_pad_query_peer_duration (demux->sinkpad, &fmt, &size) ||
- fmt != GST_FORMAT_BYTES))
+ if (!gst_pad_query_peer_duration (demux->sinkpad, GST_FORMAT_BYTES, &size))
return GST_FLOW_OK;
GST_DEBUG_OBJECT (demux, "building index at %" G_GINT64_FORMAT
gst_flv_demux_get_metadata (GstFlvDemux * demux)
{
gint64 ret = 0, offset;
- GstFormat fmt = GST_FORMAT_BYTES;
size_t tag_size, size;
GstBuffer *buffer = NULL;
+ guint8 *data;
- if (G_UNLIKELY (!gst_pad_query_peer_duration (demux->sinkpad, &fmt, &offset)
- || fmt != GST_FORMAT_BYTES))
+ if (!gst_pad_query_peer_duration (demux->sinkpad, GST_FORMAT_BYTES, &offset))
goto exit;
ret = offset;
4, &buffer))
goto exit;
- tag_size = GST_READ_UINT32_BE (GST_BUFFER_DATA (buffer));
+ data = gst_buffer_map (buffer, NULL, NULL, GST_MAP_READ);
+ tag_size = GST_READ_UINT32_BE (data);
+ gst_buffer_unmap (buffer, data, -1);
GST_DEBUG_OBJECT (demux, "last tag size: %" G_GSIZE_FORMAT, tag_size);
gst_buffer_unref (buffer);
buffer = NULL;
goto exit;
/* a consistency check */
- size = GST_READ_UINT24_BE (GST_BUFFER_DATA (buffer) + 1);
+ data = gst_buffer_map (buffer, NULL, NULL, GST_MAP_READ);
+ size = GST_READ_UINT24_BE (data + 1);
if (size != tag_size - 11) {
+ gst_buffer_unmap (buffer, data, -1);
GST_DEBUG_OBJECT (demux,
"tag size %" G_GSIZE_FORMAT ", expected %" G_GSIZE_FORMAT
", corrupt or truncated file", size, tag_size - 11);
gst_flv_demux_parse_tag_timestamp (demux, FALSE, buffer, &size);
/* maybe get some more metadata */
- if (GST_BUFFER_DATA (buffer)[0] == 18) {
+ if (data[0] == 18) {
+ gst_buffer_unmap (buffer, data, -1);
gst_buffer_unref (buffer);
buffer = NULL;
GST_DEBUG_OBJECT (demux, "script tag, pulling it to parse");
if (GST_FLOW_OK == gst_flv_demux_pull_range (demux, demux->sinkpad, offset,
tag_size, &buffer))
gst_flv_demux_parse_tag_script (demux, buffer);
+ } else {
+ gst_buffer_unmap (buffer, data, -1);
}
exit:
if (demux->segment.rate < 0.0) {
/* check end of section */
if ((gint64) demux->offset >= demux->to_offset ||
- demux->segment.last_stop >= demux->segment.stop + 2 * GST_SECOND ||
+ demux->segment.position >= demux->segment.stop + 2 * GST_SECOND ||
(demux->audio_done && demux->video_done))
ret = gst_flv_demux_seek_to_prev_keyframe (demux);
} else {
/* check EOS condition */
if ((demux->segment.stop != -1) &&
- (demux->segment.last_stop >= demux->segment.stop)) {
+ (demux->segment.position >= demux->segment.stop)) {
ret = GST_FLOW_UNEXPECTED;
}
}
gst_pad_pause_task (pad);
if (ret == GST_FLOW_UNEXPECTED) {
+ /* handle end-of-stream/segment */
+ /* so align our position with the end of it, if there is one
+ * this ensures a subsequent will arrive at correct base/acc time */
+ if (demux->segment.rate > 0.0 &&
+ GST_CLOCK_TIME_IS_VALID (demux->segment.stop))
+ demux->segment.position = demux->segment.stop;
+ else if (demux->segment.rate < 0.0)
+ demux->segment.position = demux->segment.start;
+
/* perform EOS logic */
if (!demux->no_more_pads) {
gst_element_no_more_pads (GST_ELEMENT_CAST (demux));
g_return_val_if_fail (segment != NULL, 0);
- time = segment->last_stop;
+ time = segment->position;
if (demux->index) {
/* Let's check if we have an index entry for that seek time */
GST_DEBUG_OBJECT (demux, "found index entry for %" GST_TIME_FORMAT
" at %" GST_TIME_FORMAT ", seeking to %" G_GINT64_FORMAT,
- GST_TIME_ARGS (segment->last_stop), GST_TIME_ARGS (time), bytes);
+ GST_TIME_ARGS (segment->position), GST_TIME_ARGS (time), bytes);
/* Key frame seeking */
if (segment->flags & GST_SEEK_FLAG_KEY_UNIT) {
if (time < segment->start) {
segment->start = segment->time = time;
}
- segment->last_stop = time;
+ segment->position = time;
}
} else {
GST_DEBUG_OBJECT (demux, "no index entry found for %" GST_TIME_FORMAT,
&demux->segment);
/* Apply the seek to our segment */
- gst_segment_set_seek (&seeksegment, rate, format, flags,
+ gst_segment_do_seek (&seeksegment, rate, format, flags,
start_type, start, stop_type, stop, &update);
GST_DEBUG_OBJECT (demux, "segment configured %" GST_SEGMENT_FORMAT,
&seeksegment);
- if (flush || seeksegment.last_stop != demux->segment.last_stop) {
+ if (flush || seeksegment.position != demux->segment.position) {
/* Do the actual seeking */
guint64 offset = gst_flv_demux_find_offset (demux, &seeksegment);
if (!demux->indexed) {
guint64 seek_offset = 0;
gboolean building_index;
- GstFormat fmt;
GST_OBJECT_LOCK (demux);
/* handle the seek in the chain function */
building_index = demux->building_index;
if (!building_index) {
demux->building_index = TRUE;
- fmt = GST_FORMAT_BYTES;
if (!demux->file_size
- && !gst_pad_query_peer_duration (demux->sinkpad, &fmt,
+ && !gst_pad_query_peer_duration (demux->sinkpad, GST_FORMAT_BYTES,
&demux->file_size)) {
- GST_WARNING_OBJECT (demux,
- "Cannot obtain file size - %" G_GINT64_FORMAT ", format %u",
- demux->file_size, fmt);
+ GST_WARNING_OBJECT (demux, "Failed to query upstream file size");
GST_OBJECT_UNLOCK (demux);
return FALSE;
}
if (flush) {
/* Stop flushing upstream we need to pull */
- gst_pad_push_event (demux->sinkpad, gst_event_new_flush_stop ());
+ gst_pad_push_event (demux->sinkpad, gst_event_new_flush_stop (TRUE));
}
/* Work on a copy until we are sure the seek succeeded. */
&demux->segment);
/* Apply the seek to our segment */
- gst_segment_set_seek (&seeksegment, rate, format, flags,
+ gst_segment_do_seek (&seeksegment, rate, format, flags,
start_type, start, stop_type, stop, &update);
GST_DEBUG_OBJECT (demux, "segment configured %" GST_SEGMENT_FORMAT,
&seeksegment);
- if (flush || seeksegment.last_stop != demux->segment.last_stop) {
+ if (flush || seeksegment.position != demux->segment.position) {
/* Do the actual seeking */
/* index is reliable if it is complete or we do not go to far ahead */
if (seeking && !demux->indexed &&
- seeksegment.last_stop > demux->index_max_time + 10 * GST_SECOND) {
+ seeksegment.position > demux->index_max_time + 10 * GST_SECOND) {
GST_DEBUG_OBJECT (demux, "delaying seek to post-scan; "
" index only up to %" GST_TIME_FORMAT,
GST_TIME_ARGS (demux->index_max_time));
/* stop flushing for now */
if (flush)
- gst_flv_demux_push_src_event (demux, gst_event_new_flush_stop ());
+ gst_flv_demux_push_src_event (demux, gst_event_new_flush_stop (TRUE));
/* delegate scanning and index building to task thread to avoid
* occupying main (UI) loop */
if (demux->seek_event)
gst_event_unref (demux->seek_event);
demux->seek_event = gst_event_ref (event);
- demux->seek_time = seeksegment.last_stop;
+ demux->seek_time = seeksegment.position;
demux->state = FLV_STATE_SEEK;
/* do not know about succes yet, but we did care and handled it */
ret = TRUE;
ret = TRUE;
}
- if (G_UNLIKELY (demux->close_seg_event)) {
- gst_event_unref (demux->close_seg_event);
- demux->close_seg_event = NULL;
- }
-
if (flush) {
/* Stop flushing, the sinks are at time 0 now */
- gst_flv_demux_push_src_event (demux, gst_event_new_flush_stop ());
- } else {
- GST_DEBUG_OBJECT (demux, "closing running segment %" GST_SEGMENT_FORMAT,
- &demux->segment);
-
- /* Close the current segment for a linear playback */
- if (demux->segment.rate >= 0) {
- /* for forward playback, we played from start to last_stop */
- demux->close_seg_event = gst_event_new_new_segment (TRUE,
- demux->segment.rate, demux->segment.format,
- demux->segment.start, demux->segment.last_stop, demux->segment.time);
- } else {
- gint64 stop;
-
- if ((stop = demux->segment.stop) == -1)
- stop = demux->segment.duration;
-
- /* for reverse playback, we played from stop to last_stop. */
- demux->close_seg_event = gst_event_new_new_segment (TRUE,
- demux->segment.rate, demux->segment.format,
- demux->segment.last_stop, stop, demux->segment.last_stop);
- }
+ gst_flv_demux_push_src_event (demux, gst_event_new_flush_stop (TRUE));
}
if (ret) {
if (demux->segment.flags & GST_SEEK_FLAG_SEGMENT) {
gst_element_post_message (GST_ELEMENT (demux),
gst_message_new_segment_start (GST_OBJECT (demux),
- demux->segment.format, demux->segment.last_stop));
+ demux->segment.format, demux->segment.position));
}
/* Tell all the stream a new segment is needed */
GST_TIME_FORMAT " to %" GST_TIME_FORMAT,
GST_TIME_ARGS (demux->segment.start),
GST_TIME_ARGS (demux->segment.stop));
- demux->new_seg_event =
- gst_event_new_new_segment (FALSE, demux->segment.rate,
- demux->segment.format, demux->segment.start,
- demux->segment.stop, demux->segment.start);
+ demux->new_seg_event = gst_event_new_segment (&demux->segment);
}
}
static gboolean
gst_flv_demux_sink_activate (GstPad * sinkpad)
{
- if (gst_pad_check_pull_range (sinkpad)) {
- return gst_pad_activate_pull (sinkpad, TRUE);
- } else {
+ GstQuery *query;
+ gboolean pull_mode;
+
+ query = gst_query_new_scheduling ();
+
+ if (!gst_pad_peer_query (sinkpad, query)) {
+ gst_query_unref (query);
+ goto activate_push;
+ }
+
+ gst_query_parse_scheduling (query, &pull_mode, NULL, NULL, NULL, NULL, NULL);
+ gst_query_unref (query);
+
+ if (!pull_mode)
+ goto activate_push;
+
+ GST_DEBUG_OBJECT (sinkpad, "activating pull");
+ return gst_pad_activate_pull (sinkpad, TRUE);
+
+activate_push:
+ {
+ GST_DEBUG_OBJECT (sinkpad, "activating push");
return gst_pad_activate_push (sinkpad, TRUE);
}
}
GST_WARNING_OBJECT (demux, "failed pushing EOS on streams");
ret = TRUE;
break;
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_SEGMENT:
{
- GstFormat format;
- gdouble rate;
- gint64 start, stop, time;
- gboolean update;
+ GstSegment in_segment;
GST_DEBUG_OBJECT (demux, "received new segment");
- gst_event_parse_new_segment (event, &update, &rate, &format, &start,
- &stop, &time);
+ gst_event_copy_segment (event, &in_segment);
- if (format == GST_FORMAT_TIME) {
+ if (in_segment.format == GST_FORMAT_TIME) {
/* time segment, this is perfect, copy over the values. */
- gst_segment_set_newsegment (&demux->segment, update, rate, format,
- start, stop, time);
+ memcpy (&demux->segment, &in_segment, sizeof (in_segment));
GST_DEBUG_OBJECT (demux, "NEWSEGMENT: %" GST_SEGMENT_FORMAT,
&demux->segment);
}
GST_DEBUG_OBJECT (pad, "position query, replying %" GST_TIME_FORMAT,
- GST_TIME_ARGS (demux->segment.last_stop));
+ GST_TIME_ARGS (demux->segment.position));
- gst_query_set_position (query, GST_FORMAT_TIME, demux->segment.last_stop);
+ gst_query_set_position (query, GST_FORMAT_TIME, demux->segment.position);
break;
}
demux->new_seg_event = NULL;
}
- if (demux->close_seg_event) {
- gst_event_unref (demux->close_seg_event);
- demux->close_seg_event = NULL;
- }
-
if (demux->audio_codec_data) {
gst_buffer_unref (demux->audio_codec_data);
demux->audio_codec_data = NULL;
}
static void
-gst_flv_demux_base_init (gpointer g_class)
-{
- GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
-
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&flv_sink_template));
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&audio_src_template));
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&video_src_template));
- gst_element_class_set_details_simple (element_class, "FLV Demuxer",
- "Codec/Demuxer",
- "Demux FLV feeds into digital streams",
- "Julien Moutte <julien@moutte.net>");
-}
-
-static void
gst_flv_demux_class_init (GstFlvDemuxClass * klass)
{
GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
GST_DEBUG_FUNCPTR (gst_flv_demux_change_state);
gstelement_class->set_index = GST_DEBUG_FUNCPTR (gst_flv_demux_set_index);
gstelement_class->get_index = GST_DEBUG_FUNCPTR (gst_flv_demux_get_index);
+
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&flv_sink_template));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&audio_src_template));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&video_src_template));
+ gst_element_class_set_details_simple (gstelement_class, "FLV Demuxer",
+ "Codec/Demuxer",
+ "Demux FLV feeds into digital streams",
+ "Julien Moutte <julien@moutte.net>");
}
static void
-gst_flv_demux_init (GstFlvDemux * demux, GstFlvDemuxClass * g_class)
+gst_flv_demux_init (GstFlvDemux * demux)
{
demux->sinkpad =
gst_pad_new_from_static_template (&flv_sink_template, "sink");
#include <glib/gprintf.h>
#include <gst/tag/tag.h>
+#include <gst/audio/audio.h>
#include "qtatomparser.h"
#include "qtdemux_types.h"
GST_PAD_SOMETIMES,
GST_STATIC_CAPS_ANY);
-GST_BOILERPLATE (GstQTDemux, gst_qtdemux, GstQTDemux, GST_TYPE_ELEMENT);
+#define gst_qtdemux_parent_class parent_class
+G_DEFINE_TYPE (GstQTDemux, gst_qtdemux, GST_TYPE_ELEMENT);
static void gst_qtdemux_dispose (GObject * object);
static GstFlowReturn qtdemux_expose_streams (GstQTDemux * qtdemux);
static void
-gst_qtdemux_base_init (gpointer klass)
-{
- GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
-
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&gst_qtdemux_sink_template));
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&gst_qtdemux_videosrc_template));
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&gst_qtdemux_audiosrc_template));
- gst_element_class_add_pad_template (element_class,
- gst_static_pad_template_get (&gst_qtdemux_subsrc_template));
- gst_element_class_set_details_simple (element_class, "QuickTime demuxer",
- "Codec/Demuxer",
- "Demultiplex a QuickTime file into audio and video streams",
- "David Schleef <ds@schleef.org>, Wim Taymans <wim@fluendo.com>");
-
- GST_DEBUG_CATEGORY_INIT (qtdemux_debug, "qtdemux", 0, "qtdemux plugin");
-}
-
-static void
gst_qtdemux_class_init (GstQTDemuxClass * klass)
{
GObjectClass *gobject_class;
gstelement_class->get_index = GST_DEBUG_FUNCPTR (gst_qtdemux_get_index);
gst_tag_register_musicbrainz_tags ();
+
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&gst_qtdemux_sink_template));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&gst_qtdemux_videosrc_template));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&gst_qtdemux_audiosrc_template));
+ gst_element_class_add_pad_template (gstelement_class,
+ gst_static_pad_template_get (&gst_qtdemux_subsrc_template));
+ gst_element_class_set_details_simple (gstelement_class, "QuickTime demuxer",
+ "Codec/Demuxer",
+ "Demultiplex a QuickTime file into audio and video streams",
+ "David Schleef <ds@schleef.org>, Wim Taymans <wim@fluendo.com>");
+
+ GST_DEBUG_CATEGORY_INIT (qtdemux_debug, "qtdemux", 0, "qtdemux plugin");
+
}
static void
-gst_qtdemux_init (GstQTDemux * qtdemux, GstQTDemuxClass * klass)
+gst_qtdemux_init (GstQTDemux * qtdemux)
{
qtdemux->sinkpad =
gst_pad_new_from_static_template (&gst_qtdemux_sink_template, "sink");
}
}
+static void
+_gst_buffer_copy_into_mem (GstBuffer * dest, const guint8 * src,
+ gsize offset, gsize size)
+{
+ guint8 *bdata;
+ gsize bsize;
+
+ g_return_if_fail (gst_buffer_is_writable (dest));
+
+ bsize = gst_buffer_get_size (dest);
+ g_return_if_fail (bsize >= offset + size);
+
+ bdata = gst_buffer_map (dest, &bsize, NULL, GST_MAP_WRITE);
+ memcpy (bdata + offset, src, size);
+ gst_buffer_unmap (dest, bdata, bsize);
+}
+
+static GstBuffer *
+_gst_buffer_new_wrapped (gpointer mem, gsize size, GFreeFunc free_func)
+{
+ GstBuffer *buf;
+
+ buf = gst_buffer_new ();
+ gst_buffer_take_memory (buf, -1,
+ gst_memory_new_wrapped (free_func ? 0 : GST_MEMORY_FLAG_READONLY,
+ mem, free_func, size, 0, size));
+
+ return buf;
+}
+
static GstFlowReturn
gst_qtdemux_pull_atom (GstQTDemux * qtdemux, guint64 offset, guint64 size,
GstBuffer ** buf)
{
GstFlowReturn flow;
+ guint8 *bdata;
+ gsize bsize;
if (G_UNLIKELY (size == 0)) {
GstFlowReturn ret;
if (ret != GST_FLOW_OK)
return ret;
- size = QT_UINT32 (GST_BUFFER_DATA (tmp));
+ bdata = gst_buffer_map (tmp, &bsize, NULL, GST_MAP_READ);
+ size = QT_UINT32 (bdata);
GST_DEBUG_OBJECT (qtdemux, "size 0x%08" G_GINT64_MODIFIER "x", size);
+ gst_buffer_unmap (tmp, bdata, bsize);
gst_buffer_unref (tmp);
}
if (G_UNLIKELY (flow != GST_FLOW_OK))
return flow;
+ bsize = gst_buffer_get_size (*buf);
/* Catch short reads - we don't want any partial atoms */
- if (G_UNLIKELY (GST_BUFFER_SIZE (*buf) < size)) {
+ if (G_UNLIKELY (bsize < size)) {
GST_WARNING_OBJECT (qtdemux, "short read: %u < %" G_GUINT64_FORMAT,
- GST_BUFFER_SIZE (*buf), size);
+ bsize, size);
gst_buffer_unref (*buf);
*buf = NULL;
return GST_FLOW_UNEXPECTED;
switch (GST_QUERY_TYPE (query)) {
case GST_QUERY_POSITION:
- if (GST_CLOCK_TIME_IS_VALID (qtdemux->segment.last_stop)) {
+ if (GST_CLOCK_TIME_IS_VALID (qtdemux->segment.position)) {
gst_query_set_position (query, GST_FORMAT_TIME,
- qtdemux->segment.last_stop);
+ qtdemux->segment.position);
res = TRUE;
}
break;
/* get the index of the sample with media time */
index = gst_qtdemux_find_index_linear (qtdemux, str, media_start);
- GST_DEBUG_OBJECT (qtdemux, "sample for %" GST_TIME_FORMAT " at %u",
- GST_TIME_ARGS (media_start), index);
+ GST_DEBUG_OBJECT (qtdemux, "sample for %" GST_TIME_FORMAT " at %u"
+ " at offset %" G_GUINT64_FORMAT,
+ GST_TIME_ARGS (media_start), index, str->samples[index].offset);
/* find previous keyframe */
kindex = gst_qtdemux_find_keyframe (qtdemux, str, index);
media_time =
gst_util_uint64_scale (str->samples[kindex].timestamp, GST_SECOND,
str->timescale);
- GST_DEBUG_OBJECT (qtdemux, "keyframe at %u with time %" GST_TIME_FORMAT,
- kindex, GST_TIME_ARGS (media_time));
+ GST_DEBUG_OBJECT (qtdemux, "keyframe at %u with time %" GST_TIME_FORMAT
+ " at offset %" G_GUINT64_FORMAT,
+ kindex, GST_TIME_ARGS (media_time), str->samples[kindex].offset);
/* keyframes in the segment get a chance to change the
* desired_offset. keyframes out of the segment are
GstSeekType cur_type, gint64 * cur, GstSeekType stop_type, gint64 * stop)
{
gboolean res;
- GstFormat fmt;
g_return_val_if_fail (format != NULL, FALSE);
g_return_val_if_fail (cur != NULL, FALSE);
if (*format == GST_FORMAT_TIME)
return TRUE;
- fmt = GST_FORMAT_TIME;
res = TRUE;
if (cur_type != GST_SEEK_TYPE_NONE)
- res = gst_pad_query_convert (pad, *format, *cur, &fmt, cur);
+ res = gst_pad_query_convert (pad, *format, *cur, GST_FORMAT_TIME, cur);
if (res && stop_type != GST_SEEK_TYPE_NONE)
- res = gst_pad_query_convert (pad, *format, *stop, &fmt, stop);
+ res = gst_pad_query_convert (pad, *format, *stop, GST_FORMAT_TIME, stop);
if (res)
*format = GST_FORMAT_TIME;
gint64 desired_offset;
gint n;
- desired_offset = segment->last_stop;
+ desired_offset = segment->position;
GST_DEBUG_OBJECT (qtdemux, "seeking to %" GST_TIME_FORMAT,
GST_TIME_ARGS (desired_offset));
stream->segment_index = -1;
stream->last_ret = GST_FLOW_OK;
stream->sent_eos = FALSE;
+
+ if (segment->flags & GST_SEEK_FLAG_FLUSH)
+ gst_segment_init (&stream->segment, GST_FORMAT_TIME);
}
- segment->last_stop = desired_offset;
+ segment->position = desired_offset;
segment->time = desired_offset;
/* we stop at the end */
if (event) {
/* configure the segment with the seek variables */
GST_DEBUG_OBJECT (qtdemux, "configuring seek");
- gst_segment_set_seek (&seeksegment, rate, format, flags,
+ gst_segment_do_seek (&seeksegment, rate, format, flags,
cur_type, cur, stop_type, stop, &update);
}
/* prepare for streaming again */
if (flush) {
- gst_pad_push_event (qtdemux->sinkpad, gst_event_new_flush_stop ());
- gst_qtdemux_push_event (qtdemux, gst_event_new_flush_stop ());
- } else if (qtdemux->segment_running) {
- /* we are running the current segment and doing a non-flushing seek,
- * close the segment first based on the last_stop. */
- GST_DEBUG_OBJECT (qtdemux, "closing running segment %" G_GINT64_FORMAT
- " to %" G_GINT64_FORMAT, qtdemux->segment.start,
- qtdemux->segment.last_stop);
-
- if (qtdemux->segment.rate >= 0) {
- /* FIXME, rate is the product of the global rate and the (quicktime)
- * segment rate. */
- qtdemux->pending_newsegment = gst_event_new_new_segment (TRUE,
- qtdemux->segment.rate, qtdemux->segment.format,
- qtdemux->segment.start, qtdemux->segment.last_stop,
- qtdemux->segment.time);
- } else { /* For Reverse Playback */
- guint64 stop;
-
- if ((stop = qtdemux->segment.stop) == -1)
- stop = qtdemux->segment.duration;
- /* for reverse playback, we played from stop to last_stop. */
- qtdemux->pending_newsegment = gst_event_new_new_segment (TRUE,
- qtdemux->segment.rate, qtdemux->segment.format,
- qtdemux->segment.last_stop, stop, qtdemux->segment.last_stop);
- }
+ gst_pad_push_event (qtdemux->sinkpad, gst_event_new_flush_stop (TRUE));
+ gst_qtdemux_push_event (qtdemux, gst_event_new_flush_stop (TRUE));
}
/* commit the new segment */
if (qtdemux->segment.flags & GST_SEEK_FLAG_SEGMENT) {
gst_element_post_message (GST_ELEMENT_CAST (qtdemux),
gst_message_new_segment_start (GST_OBJECT_CAST (qtdemux),
- qtdemux->segment.format, qtdemux->segment.last_stop));
+ qtdemux->segment.format, qtdemux->segment.position));
}
- /* restart streaming, NEWSEGMENT will be sent from the streaming
- * thread. */
- qtdemux->segment_running = TRUE;
+ /* restart streaming, NEWSEGMENT will be sent from the streaming thread. */
for (i = 0; i < qtdemux->n_streams; i++)
qtdemux->streams[i]->last_ret = GST_FLOW_OK;
GST_LOG_OBJECT (demux, "handling %s event", GST_EVENT_TYPE_NAME (event));
switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_SEGMENT:
{
- GstFormat format;
- gdouble rate, arate;
- gint64 start, stop, time, offset = 0;
+ gint64 offset = 0;
QtDemuxStream *stream;
gint idx;
- gboolean update;
GstSegment segment;
/* some debug output */
- gst_segment_init (&segment, GST_FORMAT_UNDEFINED);
- gst_event_parse_new_segment_full (event, &update, &rate, &arate, &format,
- &start, &stop, &time);
- gst_segment_set_newsegment_full (&segment, update, rate, arate, format,
- start, stop, time);
- GST_DEBUG_OBJECT (demux,
- "received format %d newsegment %" GST_SEGMENT_FORMAT, format,
+ gst_event_copy_segment (event, &segment);
+ GST_DEBUG_OBJECT (demux, "received newsegment %" GST_SEGMENT_FORMAT,
&segment);
/* chain will send initial newsegment after pads have been added */
}
/* we only expect a BYTE segment, e.g. following a seek */
- if (format == GST_FORMAT_BYTES) {
- if (start > 0) {
+ if (segment.format == GST_FORMAT_BYTES) {
+ if (GST_CLOCK_TIME_IS_VALID (segment.start)) {
gint64 requested_seek_time;
guint64 seek_offset;
- offset = start;
+ offset = segment.start;
GST_OBJECT_LOCK (demux);
requested_seek_time = demux->requested_seek_time;
GST_OBJECT_UNLOCK (demux);
if (offset == seek_offset) {
- start = requested_seek_time;
+ segment.start = requested_seek_time;
} else {
- gst_qtdemux_find_sample (demux, start, TRUE, FALSE, NULL, NULL,
- &start);
- start = MAX (start, 0);
+ gst_qtdemux_find_sample (demux, segment.start, TRUE, FALSE, NULL,
+ NULL, (gint64 *) & segment.start);
+ if ((gint64) segment.start < 0)
+ segment.start = 0;
}
}
- if (stop > 0) {
- gst_qtdemux_find_sample (demux, stop, FALSE, FALSE, NULL, NULL,
- &stop);
+ if (GST_CLOCK_TIME_IS_VALID (segment.stop)) {
+ gst_qtdemux_find_sample (demux, segment.stop, FALSE, FALSE, NULL,
+ NULL, (gint64 *) & segment.stop);
/* keyframe seeking should already arrange for start >= stop,
* but make sure in other rare cases */
- stop = MAX (stop, start);
+ segment.stop = MAX (segment.stop, segment.start);
}
} else {
GST_DEBUG_OBJECT (demux, "unsupported segment format, ignoring");
}
/* accept upstream's notion of segment and distribute along */
- gst_segment_set_newsegment_full (&demux->segment, update, rate, arate,
- GST_FORMAT_TIME, start, stop, start);
- GST_DEBUG_OBJECT (demux, "Pushing newseg update %d, rate %g, "
- "applied rate %g, format %d, start %" GST_TIME_FORMAT ", "
- "stop %" GST_TIME_FORMAT, update, rate, arate, GST_FORMAT_TIME,
- GST_TIME_ARGS (start), GST_TIME_ARGS (stop));
+ segment.time = segment.start;
+ segment.duration = demux->segment.duration;
+ segment.base = gst_segment_to_running_time (&demux->segment,
+ GST_FORMAT_TIME, demux->segment.position);
- gst_qtdemux_push_event (demux,
- gst_event_new_new_segment_full (update, rate, arate, GST_FORMAT_TIME,
- start, stop, start));
+ gst_segment_copy_into (&segment, &demux->segment);
+ GST_DEBUG_OBJECT (demux, "Pushing newseg %" GST_SEGMENT_FORMAT, &segment);
+ gst_qtdemux_push_event (demux, gst_event_new_segment (&segment));
/* clear leftover in current segment, if any */
gst_adapter_clear (demux->adapter);
case GST_EVENT_FLUSH_STOP:
{
gint i;
+ GstClockTime dur;
/* clean up, force EOS if no more info follows */
gst_adapter_clear (demux->adapter);
demux->streams[i]->last_ret = GST_FLOW_OK;
demux->streams[i]->sent_eos = FALSE;
}
+ dur = demux->segment.duration;
+ gst_segment_init (&demux->segment, GST_FORMAT_TIME);
+ demux->segment.duration = dur;
break;
}
case GST_EVENT_EOS:
GST_DEBUG_OBJECT (qtdemux, "major brand: %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (qtdemux->major_brand));
buf = qtdemux->comp_brands = gst_buffer_new_and_alloc (length - 16);
- memcpy (GST_BUFFER_DATA (buf), buffer + 16, GST_BUFFER_SIZE (buf));
+ _gst_buffer_copy_into_mem (buf, buffer + 16, 0, length - 16);
}
}
GstBuffer *buf;
GstTagList *taglist;
- buf = gst_buffer_new ();
- GST_BUFFER_DATA (buf) = (guint8 *) buffer + offset + 16;
- GST_BUFFER_SIZE (buf) = length - offset - 16;
-
+ buf = _gst_buffer_new_wrapped ((guint8 *) buffer + offset + 16,
+ length - offset - 16, NULL);
taglist = gst_tag_list_from_xmp_buffer (buf);
gst_buffer_unref (buf);
GstBuffer *buf = NULL;
GstFlowReturn ret = GST_FLOW_OK;
guint64 cur_offset = qtdemux->offset;
+ guint8 *data;
+ gsize size;
ret = gst_pad_pull_range (qtdemux->sinkpad, cur_offset, 16, &buf);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto beach;
- if (G_LIKELY (GST_BUFFER_SIZE (buf) >= 8))
- extract_initial_length_and_fourcc (GST_BUFFER_DATA (buf),
- GST_BUFFER_SIZE (buf), &length, &fourcc);
+ data = gst_buffer_map (buf, &size, NULL, GST_MAP_READ);
+ if (G_LIKELY (size >= 8))
+ extract_initial_length_and_fourcc (data, size, &length, &fourcc);
+ gst_buffer_unmap (buf, data, size);
gst_buffer_unref (buf);
/* maybe we already got most we needed, so only consider this eof */
ret = gst_pad_pull_range (qtdemux->sinkpad, cur_offset, length, &moov);
if (ret != GST_FLOW_OK)
goto beach;
- if (length != GST_BUFFER_SIZE (moov)) {
+ data = gst_buffer_map (moov, &size, NULL, GST_MAP_READ);
+ if (length != size) {
/* Some files have a 'moov' atom at the end of the file which contains
* a terminal 'free' atom where the body of the atom is missing.
* Check for, and permit, this special case.
*/
- if (GST_BUFFER_SIZE (moov) >= 8) {
- guint8 *final_data = GST_BUFFER_DATA (moov) +
- (GST_BUFFER_SIZE (moov) - 8);
+ if (size >= 8) {
+ guint8 *final_data = data + (size - 8);
guint32 final_length = QT_UINT32 (final_data);
guint32 final_fourcc = QT_FOURCC (final_data + 4);
- if (final_fourcc == FOURCC_free &&
- GST_BUFFER_SIZE (moov) + final_length - 8 == length) {
+ gst_buffer_unmap (moov, data, size);
+ if (final_fourcc == FOURCC_free && size + final_length - 8 == length) {
/* Ok, we've found that special case. Allocate a new buffer with
* that free atom actually present. */
GstBuffer *newmoov = gst_buffer_new_and_alloc (length);
- gst_buffer_copy_metadata (newmoov, moov,
- GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS |
- GST_BUFFER_COPY_CAPS);
- memcpy (GST_BUFFER_DATA (newmoov), GST_BUFFER_DATA (moov),
- GST_BUFFER_SIZE (moov));
- memset (GST_BUFFER_DATA (newmoov) + GST_BUFFER_SIZE (moov), 0,
- final_length - 8);
+ gst_buffer_copy_into (newmoov, moov, 0, 0, size);
+ data = gst_buffer_map (newmoov, &size, NULL, GST_MAP_WRITE);
+ memset (data + length - final_length + 8, 0, final_length - 8);
gst_buffer_unref (moov);
moov = newmoov;
}
}
}
- if (length != GST_BUFFER_SIZE (moov)) {
+ if (length != size) {
GST_ELEMENT_ERROR (qtdemux, STREAM, DEMUX,
(_("This file is incomplete and cannot be played.")),
("We got less than expected (received %u, wanted %u, offset %"
- G_GUINT64_FORMAT ")",
- GST_BUFFER_SIZE (moov), (guint) length, cur_offset));
+ G_GUINT64_FORMAT ")", size, (guint) length, cur_offset));
+ gst_buffer_unmap (moov, data, size);
gst_buffer_unref (moov);
ret = GST_FLOW_ERROR;
goto beach;
}
qtdemux->offset += length;
- qtdemux_parse_moov (qtdemux, GST_BUFFER_DATA (moov), length);
+ qtdemux_parse_moov (qtdemux, data, length);
qtdemux_node_dump (qtdemux, qtdemux->moov_node);
qtdemux_parse_tree (qtdemux);
g_node_destroy (qtdemux->moov_node);
+ gst_buffer_unmap (moov, data, size);
gst_buffer_unref (moov);
qtdemux->moov_node = NULL;
qtdemux->got_moov = TRUE;
if (ret != GST_FLOW_OK)
goto beach;
qtdemux->offset += length;
- qtdemux_parse_ftyp (qtdemux, GST_BUFFER_DATA (ftyp),
- GST_BUFFER_SIZE (ftyp));
+ data = gst_buffer_map (ftyp, &size, NULL, GST_MAP_READ);
+ qtdemux_parse_ftyp (qtdemux, data, size);
+ gst_buffer_unmap (ftyp, data, size);
gst_buffer_unref (ftyp);
break;
}
if (ret != GST_FLOW_OK)
goto beach;
qtdemux->offset += length;
- qtdemux_parse_uuid (qtdemux, GST_BUFFER_DATA (uuid),
- GST_BUFFER_SIZE (uuid));
+ data = gst_buffer_map (uuid, &size, NULL, GST_MAP_READ);
+ qtdemux_parse_uuid (qtdemux, data, size);
+ gst_buffer_unmap (uuid, data, size);
gst_buffer_unref (uuid);
break;
}
ret = gst_qtdemux_pull_atom (qtdemux, cur_offset, length, &unknown);
if (ret != GST_FLOW_OK)
goto beach;
- GST_MEMDUMP ("Unknown tag", GST_BUFFER_DATA (unknown),
- GST_BUFFER_SIZE (unknown));
+ data = gst_buffer_map (unknown, &size, NULL, GST_MAP_READ);
+ GST_MEMDUMP ("Unknown tag", data, size);
+ gst_buffer_unmap (unknown, data, size);
gst_buffer_unref (unknown);
qtdemux->offset += length;
break;
QtDemuxStream *str = qtdemux->streams[n];
seg_idx = gst_qtdemux_find_segment (qtdemux, str,
- qtdemux->segment.last_stop);
+ qtdemux->segment.position);
/* segment not found, continue with normal flow */
if (seg_idx == -1)
k_index, GST_TIME_ARGS (k_pos));
/* Set last_stop with the keyframe timestamp we pushed of that stream */
- gst_segment_set_last_stop (&qtdemux->segment, GST_FORMAT_TIME, last_stop);
+ qtdemux->segment.position = last_stop;
GST_DEBUG_OBJECT (qtdemux, "last_stop now is %" GST_TIME_FORMAT,
GST_TIME_ARGS (last_stop));
/* update the segment values used for clipping */
gst_segment_init (&stream->segment, GST_FORMAT_TIME);
- gst_segment_set_newsegment (&stream->segment, FALSE, rate, GST_FORMAT_TIME,
- start, stop, time);
+ /* accumulate previous segments */
+ if (GST_CLOCK_TIME_IS_VALID (stream->segment.stop))
+ stream->segment.base += (stream->segment.stop - stream->segment.start) /
+ ABS (stream->segment.rate);
+ stream->segment.rate = rate;
+ stream->segment.start = start;
+ stream->segment.stop = stop;
+ stream->segment.time = time;
/* now prepare and send the segment */
if (stream->pad) {
- event = gst_event_new_new_segment (FALSE, rate, GST_FORMAT_TIME,
- start, stop, time);
+ event = gst_event_new_segment (&stream->segment);
gst_pad_push_event (stream->pad, event);
/* assume we can send more data now */
stream->last_ret = GST_FLOW_OK;
end_time = stream->segments[stream->n_segments - 1].stop_time;
GST_LOG_OBJECT (demux, "current position: %" GST_TIME_FORMAT
", stream end: %" GST_TIME_FORMAT,
- GST_TIME_ARGS (demux->segment.last_stop), GST_TIME_ARGS (end_time));
- if (end_time + 2 * GST_SECOND < demux->segment.last_stop) {
+ GST_TIME_ARGS (demux->segment.position), GST_TIME_ARGS (end_time));
+ if (end_time + 2 * GST_SECOND < demux->segment.position) {
GST_DEBUG_OBJECT (demux, "sending EOS for stream %s",
GST_PAD_NAME (stream->pad));
stream->sent_eos = TRUE;
gst_qtdemux_clip_buffer (GstQTDemux * qtdemux, QtDemuxStream * stream,
GstBuffer * buf)
{
- gint64 start, stop, cstart, cstop, diff;
+ guint64 start, stop, cstart, cstop, diff;
GstClockTime timestamp = GST_CLOCK_TIME_NONE, duration = GST_CLOCK_TIME_NONE;
- guint8 *data;
guint size;
gint num_rate, denom_rate;
gint frame_size;
gboolean clip_data;
+ guint offset;
- data = GST_BUFFER_DATA (buf);
- size = GST_BUFFER_SIZE (buf);
+ size = gst_buffer_get_size (buf);
+ offset = 0;
/* depending on the type, setup the clip parameters */
if (stream->subtype == FOURCC_soun) {
"clipping start to %" GST_TIME_FORMAT " %"
G_GUINT64_FORMAT " bytes", GST_TIME_ARGS (cstart), diff);
- data += diff;
+ offset = diff;
size -= diff;
}
}
}
}
+ gst_buffer_resize (buf, offset, size);
GST_BUFFER_TIMESTAMP (buf) = timestamp;
GST_BUFFER_DURATION (buf) = duration;
- GST_BUFFER_SIZE (buf) = size;
- GST_BUFFER_DATA (buf) = data;
return buf;
GstBuffer * buf)
{
guint8 *data;
- guint size, nsize = 0;
+ guint nsize = 0;
+ gsize size;
gchar *str;
- data = GST_BUFFER_DATA (buf);
- size = GST_BUFFER_SIZE (buf);
-
/* not many cases for now */
if (G_UNLIKELY (stream->fourcc == FOURCC_mp4s)) {
/* send a one time dvd clut event */
return buf;
}
+ data = gst_buffer_map (buf, &size, NULL, GST_MAP_READ);
+
if (G_LIKELY (size >= 2)) {
nsize = GST_READ_UINT16_BE (data);
nsize = MIN (nsize, size - 2);
/* takes care of UTF-8 validation or UTF-16 recognition,
* no other encoding expected */
str = gst_tag_freeform_string_to_utf8 ((gchar *) data + 2, nsize, NULL);
+ gst_buffer_unmap (buf, data, size);
if (str) {
gst_buffer_unref (buf);
- buf = gst_buffer_new ();
- GST_BUFFER_DATA (buf) = GST_BUFFER_MALLOCDATA (buf) = (guint8 *) str;
- GST_BUFFER_SIZE (buf) = strlen (str);
+ buf = _gst_buffer_new_wrapped (str, strlen (str), g_free);
} else {
/* may be 0-size subtitle, which is also sent to keep pipeline going */
- GST_BUFFER_DATA (buf) = data + 2;
- GST_BUFFER_SIZE (buf) = nsize;
+ gst_buffer_resize (buf, 2, nsize);
}
/* FIXME ? convert optional subsequent style info to markup */
if (G_UNLIKELY (stream->fourcc == FOURCC_rtsp)) {
gchar *url;
+ guint8 *bdata;
+ gsize bsize;
- url = g_strndup ((gchar *) GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf));
+ bdata = gst_buffer_map (buf, &bsize, NULL, GST_MAP_READ);
+ url = g_strndup ((gchar *) bdata, bsize);
+ gst_buffer_unmap (buf, bdata, bsize);
if (url != NULL && strlen (url) != 0) {
/* we have RTSP redirect now */
gst_element_post_message (GST_ELEMENT_CAST (qtdemux),
/* position reporting */
if (qtdemux->segment.rate >= 0) {
- gst_segment_set_last_stop (&qtdemux->segment, GST_FORMAT_TIME, position);
+ qtdemux->segment.position = position;
gst_qtdemux_sync_streams (qtdemux);
}
GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT);
stream->discont = FALSE;
}
- gst_buffer_set_caps (buffer, stream->caps);
gst_pad_push (stream->pad, buffer);
}
/* we're going to modify the metadata */
- buf = gst_buffer_make_metadata_writable (buf);
+ buf = gst_buffer_make_writable (buf);
if (G_UNLIKELY (stream->need_process))
buf = gst_qtdemux_process_buffer (qtdemux, stream, buf);
GST_BUFFER_OFFSET_END (buf) = -1;
if (G_UNLIKELY (stream->padding)) {
- GST_BUFFER_DATA (buf) += stream->padding;
- GST_BUFFER_SIZE (buf) -= stream->padding;
+ gst_buffer_resize (buf, stream->padding, -1);
}
if (G_UNLIKELY (qtdemux->element_index)) {
if (!keyframe)
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT);
- gst_buffer_set_caps (buf, stream->caps);
-
GST_LOG_OBJECT (qtdemux,
"Pushing buffer with time %" GST_TIME_FORMAT ", duration %"
GST_TIME_FORMAT " on pad %s", GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
{
GST_ELEMENT_ERROR (qtdemux, STREAM, FAILED,
(NULL), ("streaming stopped, invalid state"));
- qtdemux->segment_running = FALSE;
gst_pad_pause_task (pad);
gst_qtdemux_push_event (qtdemux, gst_event_new_eos ());
goto done;
GST_LOG_OBJECT (qtdemux, "pausing task, reason %s", reason);
- qtdemux->segment_running = FALSE;
gst_pad_pause_task (pad);
/* fatal errors need special actions */
if (qtdemux->segment.flags & GST_SEEK_FLAG_SEGMENT) {
gint64 stop;
- /* FIXME: I am not sure this is the right fix. If the sinks are
- * supposed to detect the segment is complete and accumulate
- * automatically, it does not seem to work here. Need more work */
- qtdemux->segment_running = TRUE;
-
if ((stop = qtdemux->segment.stop) == -1)
stop = qtdemux->segment.duration;
/* try harder to query upstream size if we didn't get it the first time */
if (seekable && stop == -1) {
- GstFormat fmt = GST_FORMAT_BYTES;
-
GST_DEBUG_OBJECT (demux, "doing duration query to fix up unset stop");
- gst_pad_query_peer_duration (demux->sinkpad, &fmt, &stop);
+ gst_pad_query_peer_duration (demux->sinkpad, GST_FORMAT_BYTES, &stop);
}
/* if upstream doesn't know the size, it's likely that it's not seekable in
gst_qtdemux_check_seekability (demux);
- data = gst_adapter_peek (demux->adapter, demux->neededbytes);
+ data = gst_adapter_map (demux->adapter, demux->neededbytes);
/* get fourcc/length, set neededbytes */
extract_initial_length_and_fourcc ((guint8 *) data, demux->neededbytes,
&size, &fourcc);
+ gst_adapter_unmap (demux->adapter, 0);
+ data = NULL;
GST_DEBUG_OBJECT (demux, "Peeking found [%" GST_FOURCC_FORMAT "] "
"size: %" G_GUINT64_FORMAT, GST_FOURCC_ARGS (fourcc), size);
if (size == 0) {
/* there may be multiple mdat (or alike) buffers */
/* sanity check */
if (demux->mdatbuffer)
- bs = GST_BUFFER_SIZE (demux->mdatbuffer);
+ bs = gst_buffer_get_size (demux->mdatbuffer);
else
bs = 0;
if (size + bs > 10 * (1 << 20))
GST_DEBUG_OBJECT (demux, "In header");
- data = gst_adapter_peek (demux->adapter, demux->neededbytes);
+ data = gst_adapter_map (demux->adapter, demux->neededbytes);
/* parse the header */
extract_initial_length_and_fourcc (data, demux->neededbytes, NULL,
demux->got_moov = TRUE;
/* prepare newsegment to send when streaming actually starts */
- if (!demux->pending_newsegment) {
- demux->pending_newsegment =
- gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME,
- 0, GST_CLOCK_TIME_NONE, 0);
- }
+ if (!demux->pending_newsegment)
+ demux->pending_newsegment = gst_event_new_segment (&demux->segment);
qtdemux_parse_moov (demux, data, demux->neededbytes);
qtdemux_node_dump (demux, demux->moov_node);
GST_DEBUG_OBJECT (demux, "Parsing [moof]");
if (!qtdemux_parse_moof (demux, data, demux->neededbytes,
demux->offset, NULL)) {
+ gst_adapter_unmap (demux->adapter, 0);
ret = GST_FLOW_ERROR;
goto done;
}
GST_FOURCC_ARGS (fourcc));
/* Let's jump that one and go back to initial state */
}
+ gst_adapter_unmap (demux->adapter, 0);
+ data = NULL;
if (demux->mdatbuffer && demux->n_streams) {
/* the mdat was before the header */
}
case QTDEMUX_STATE_BUFFER_MDAT:{
GstBuffer *buf;
+ guint8 fourcc[4];
GST_DEBUG_OBJECT (demux, "Got our buffer at offset %" G_GUINT64_FORMAT,
demux->offset);
buf = gst_adapter_take_buffer (demux->adapter, demux->neededbytes);
+ gst_buffer_extract (buf, 0, fourcc, 4);
GST_DEBUG_OBJECT (demux, "mdatbuffer starts with %" GST_FOURCC_FORMAT,
- GST_FOURCC_ARGS (QT_FOURCC (GST_BUFFER_DATA (buf) + 4)));
+ GST_FOURCC_ARGS (QT_FOURCC (fourcc)));
if (demux->mdatbuffer)
demux->mdatbuffer = gst_buffer_join (demux->mdatbuffer, buf);
else
static gboolean
qtdemux_sink_activate (GstPad * sinkpad)
{
- if (gst_pad_check_pull_range (sinkpad))
- return gst_pad_activate_pull (sinkpad, TRUE);
- else
+ GstQuery *query;
+ gboolean pull_mode;
+
+ query = gst_query_new_scheduling ();
+
+ if (!gst_pad_peer_query (sinkpad, query)) {
+ gst_query_unref (query);
+ goto activate_push;
+ }
+
+ gst_query_parse_scheduling (query, &pull_mode, NULL, NULL, NULL, NULL, NULL);
+ gst_query_unref (query);
+
+ if (!pull_mode)
+ goto activate_push;
+
+ GST_DEBUG_OBJECT (sinkpad, "activating pull");
+ return gst_pad_activate_pull (sinkpad, TRUE);
+
+activate_push:
+ {
+ GST_DEBUG_OBJECT (sinkpad, "activating push");
return gst_pad_activate_push (sinkpad, TRUE);
+ }
}
static gboolean
if (active) {
demux->pullbased = TRUE;
- demux->segment_running = TRUE;
return gst_pad_start_task (sinkpad, (GstTaskFunction) gst_qtdemux_loop,
sinkpad);
} else {
- demux->segment_running = FALSE;
return gst_pad_stop_task (sinkpad);
}
}
switch (type) {
case FOURCC_tCtH:
buffer = gst_buffer_new_and_alloc (size);
- memcpy (GST_BUFFER_DATA (buffer), buf, size);
+ _gst_buffer_copy_into_mem (buffer, buf, 0, size);
stream->buffers = g_slist_append (stream->buffers, buffer);
GST_LOG_OBJECT (qtdemux, "parsing theora header");
break;
case FOURCC_tCt_:
buffer = gst_buffer_new_and_alloc (size);
- memcpy (GST_BUFFER_DATA (buffer), buf, size);
+ _gst_buffer_copy_into_mem (buffer, buf, 0, size);
stream->buffers = g_slist_append (stream->buffers, buffer);
GST_LOG_OBJECT (qtdemux, "parsing theora comment");
break;
case FOURCC_tCtC:
buffer = gst_buffer_new_and_alloc (size);
- memcpy (GST_BUFFER_DATA (buffer), buf, size);
+ _gst_buffer_copy_into_mem (buffer, buf, 0, size);
stream->buffers = g_slist_append (stream->buffers, buffer);
GST_LOG_OBJECT (qtdemux, "parsing theora codebook");
break;
{
/* consistent default for push based mode */
gst_segment_init (&stream->segment, GST_FORMAT_TIME);
- gst_segment_set_newsegment (&stream->segment, FALSE, 1.0, GST_FORMAT_TIME,
- 0, GST_CLOCK_TIME_NONE, 0);
if (stream->subtype == FOURCC_vide) {
gchar *name = g_strdup_printf ("video_%02d", qtdemux->n_video_streams);
/* make sure it's not writable. We leave MALLOCDATA to NULL so that we
* don't free any of the buffer data. */
- palette = gst_buffer_new ();
- GST_BUFFER_FLAG_SET (palette, GST_BUFFER_FLAG_READONLY);
- GST_BUFFER_DATA (palette) = (guint8 *) palette_data;
- GST_BUFFER_SIZE (palette) = sizeof (guint32) * palette_count;
+ palette = _gst_buffer_new_wrapped ((gpointer) palette_data,
+ palette_count, NULL);
gst_caps_set_simple (stream->caps, "palette_data",
GST_TYPE_BUFFER, palette, NULL);
if (stream->pending_tags)
gst_tag_list_free (stream->pending_tags);
stream->pending_tags = list;
- if (list) {
- /* post now, send event on pad later */
- GST_DEBUG_OBJECT (qtdemux, "Posting tags %" GST_PTR_FORMAT, list);
- gst_element_post_message (GST_ELEMENT (qtdemux),
- gst_message_new_tag_full (GST_OBJECT (qtdemux), stream->pad,
- gst_tag_list_copy (list)));
- }
/* global tags go on each pad anyway */
stream->send_global_tags = TRUE;
}
G_GUINT64_FORMAT, GST_FOURCC_ARGS (fourcc), *offset);
while (TRUE) {
+ guint8 *bdata;
+ gsize bsize;
+
ret = gst_pad_pull_range (qtdemux->sinkpad, *offset, 16, &buf);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto locate_failed;
- if (G_LIKELY (GST_BUFFER_SIZE (buf) != 16)) {
+ if (G_UNLIKELY (gst_buffer_get_size (buf) != 16)) {
/* likely EOF */
ret = GST_FLOW_UNEXPECTED;
gst_buffer_unref (buf);
goto locate_failed;
}
- extract_initial_length_and_fourcc (GST_BUFFER_DATA (buf), 16, length,
- &lfourcc);
+ bdata = gst_buffer_map (buf, &bsize, NULL, GST_MAP_READ);
+ extract_initial_length_and_fourcc (bdata, 16, length, &lfourcc);
+ gst_buffer_unmap (buf, bdata, bsize);
gst_buffer_unref (buf);
if (G_UNLIKELY (*length == 0)) {
GstBuffer *buf = NULL;
GstFlowReturn ret = GST_FLOW_OK;
GstFlowReturn res = GST_FLOW_OK;
+ guint8 *bdata;
+ gsize bsize;
offset = qtdemux->moof_offset;
GST_DEBUG_OBJECT (qtdemux, "next moof at offset %" G_GUINT64_FORMAT, offset);
ret = gst_qtdemux_pull_atom (qtdemux, offset, length, &buf);
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto flow_failed;
- if (!qtdemux_parse_moof (qtdemux, GST_BUFFER_DATA (buf),
- GST_BUFFER_SIZE (buf), offset, NULL)) {
+ bdata = gst_buffer_map (buf, &bsize, NULL, GST_MAP_READ);
+ if (!qtdemux_parse_moof (qtdemux, bdata, bsize, offset, NULL)) {
+ gst_buffer_unmap (buf, bdata, bsize);
gst_buffer_unref (buf);
buf = NULL;
goto parse_failed;
}
+ gst_buffer_unmap (buf, bdata, bsize);
gst_buffer_unref (buf);
buf = NULL;
/* sync sample atom */
stream->stps_present = FALSE;
if ((stream->stss_present =
- ! !qtdemux_tree_get_child_by_type_full (stbl, FOURCC_stss,
+ !!qtdemux_tree_get_child_by_type_full (stbl, FOURCC_stss,
&stream->stss) ? TRUE : FALSE) == TRUE) {
/* copy atom data into a new buffer for later use */
stream->stss.data = g_memdup (stream->stss.data, stream->stss.size);
/* partial sync sample atom */
if ((stream->stps_present =
- ! !qtdemux_tree_get_child_by_type_full (stbl, FOURCC_stps,
+ !!qtdemux_tree_get_child_by_type_full (stbl, FOURCC_stps,
&stream->stps) ? TRUE : FALSE) == TRUE) {
/* copy atom data into a new buffer for later use */
stream->stps.data = g_memdup (stream->stps.data, stream->stps.size);
/* composition time-to-sample */
if ((stream->ctts_present =
- ! !qtdemux_tree_get_child_by_type_full (stbl, FOURCC_ctts,
+ !!qtdemux_tree_get_child_by_type_full (stbl, FOURCC_ctts,
&stream->ctts) ? TRUE : FALSE) == TRUE) {
/* copy atom data into a new buffer for later use */
stream->ctts.data = g_memdup (stream->ctts.data, stream->ctts.size);
seqh_size = QT_UINT32 (data + 4);
if (seqh_size > 0) {
_seqh = gst_buffer_new_and_alloc (seqh_size);
- memcpy (GST_BUFFER_DATA (_seqh), data + 8, seqh_size);
+ _gst_buffer_copy_into_mem (_seqh, data + 8, 0, seqh_size);
}
}
}
static const guint wb_bitrates[] = {
6600, 8850, 12650, 14250, 15850, 18250, 19850, 23050, 23850
};
- const guint8 *data = GST_BUFFER_DATA (buf);
- guint size = QT_UINT32 (data), max_mode;
+ guint8 *data;
+ gsize size, max_mode;
guint16 mode_set;
- if (GST_BUFFER_SIZE (buf) != 0x11) {
+ data = gst_buffer_map (buf, &size, NULL, GST_MAP_READ);
+
+ if (size != 0x11) {
GST_DEBUG ("Atom should have size 0x11, not %u", size);
goto bad_data;
}
goto bad_data;
}
+ gst_buffer_unmap (buf, data, size);
return wb ? wb_bitrates[max_mode] : nb_bitrates[max_mode];
bad_data:
+ gst_buffer_unmap (buf, data, size);
return 0;
}
avc_data + 8 + 1, size - 1);
buf = gst_buffer_new_and_alloc (size);
- memcpy (GST_BUFFER_DATA (buf), avc_data + 0x8, size);
+ _gst_buffer_copy_into_mem (buf, avc_data + 0x8, 0, size);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
if (len > 0x8) {
len -= 0x8;
buf = gst_buffer_new_and_alloc (len);
- memcpy (GST_BUFFER_DATA (buf), data + 8, len);
+ _gst_buffer_copy_into_mem (buf, data + 8, 0, len);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
/* see annex I of the jpeg2000 spec */
GNode *jp2h, *ihdr, *colr, *mjp2, *field, *prefix, *cmap, *cdef;
const guint8 *data;
- guint32 fourcc = 0;
+ const gchar *colorspace;
gint ncomp = 0;
guint32 ncomp_map = 0;
gint32 *comp_map = NULL;
if (QT_UINT8 ((guint8 *) colr->data + 8) == 1) {
switch (QT_UINT32 ((guint8 *) colr->data + 11)) {
case 16:
- fourcc = GST_MAKE_FOURCC ('s', 'R', 'G', 'B');
+ colorspace = "sRGB";
break;
case 17:
- fourcc = GST_MAKE_FOURCC ('G', 'R', 'A', 'Y');
+ colorspace = "GRAY";
break;
case 18:
- fourcc = GST_MAKE_FOURCC ('s', 'Y', 'U', 'V');
+ colorspace = "sYUV";
break;
default:
+ colorspace = NULL;
break;
}
}
- if (!fourcc)
+ if (!colorspace)
/* colr is required, and only values 16, 17, and 18 are specified,
- so error if we have no fourcc */
+ so error if we have no colorspace */
break;
/* extract component mapping */
gst_caps_set_simple (stream->caps,
"num-components", G_TYPE_INT, ncomp, NULL);
gst_caps_set_simple (stream->caps,
- "fourcc", GST_TYPE_FOURCC, fourcc, NULL);
+ "colorspace", G_TYPE_STRING, colorspace, NULL);
if (comp_map) {
GValue arr = { 0, };
if (len > 0x8) {
len -= 0x8;
buf = gst_buffer_new_and_alloc (len);
- memcpy (GST_BUFFER_DATA (buf), data + 8, len);
+ _gst_buffer_copy_into_mem (buf, data + 8, 0, len);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
GST_DEBUG_OBJECT (qtdemux, "found codec_data in stsd");
buf = gst_buffer_new_and_alloc (len);
- memcpy (GST_BUFFER_DATA (buf), stsd_data, len);
+ _gst_buffer_copy_into_mem (buf, stsd_data, 0, len);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
case FOURCC_ovc1:
{
GNode *ovc1;
- gchar *ovc1_data;
+ guint8 *ovc1_data;
guint ovc1_len;
GstBuffer *buf;
break;
}
buf = gst_buffer_new_and_alloc (ovc1_len - 198);
- memcpy (GST_BUFFER_DATA (buf), ovc1_data + 198, ovc1_len - 198);
+ _gst_buffer_copy_into_mem (buf, ovc1_data + 198, 0, ovc1_len - 198);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
}
if (enda) {
gst_caps_set_simple (stream->caps,
- "endianness", G_TYPE_INT, G_LITTLE_ENDIAN, NULL);
+ "format", G_TYPE_STRING, "S24_3LE", NULL);
}
break;
}
case FOURCC_owma:
{
GNode *owma;
- const gchar *owma_data, *codec_name = NULL;
+ const guint8 *owma_data;
+ const gchar *codec_name = NULL;
guint owma_len;
GstBuffer *buf;
gint version = 1;
}
wfex = (WAVEFORMATEX *) (owma_data + 36);
buf = gst_buffer_new_and_alloc (owma_len - 54);
- memcpy (GST_BUFFER_DATA (buf), owma_data + 54, owma_len - 54);
+ _gst_buffer_copy_into_mem (buf, owma_data + 54, 0, owma_len - 54);
if (wfex->wFormatTag == 0x0161) {
codec_name = "Windows Media Audio";
version = 2;
waveheader += 8;
headerlen -= 8;
- headerbuf = gst_buffer_new ();
- GST_BUFFER_DATA (headerbuf) = (guint8 *) waveheader;
- GST_BUFFER_SIZE (headerbuf) = headerlen;
+ headerbuf = gst_buffer_new_and_alloc (headerlen);
+ _gst_buffer_copy_into_mem (headerbuf, waveheader, 0, headerlen);
if (gst_riff_parse_strf_auds (GST_ELEMENT_CAST (qtdemux),
headerbuf, &header, &extra)) {
if (len > 0x4C) {
GstBuffer *buf = gst_buffer_new_and_alloc (len - 0x4C);
- memcpy (GST_BUFFER_DATA (buf), stsd_data + 0x4C, len - 0x4C);
+ _gst_buffer_copy_into_mem (buf, stsd_data + 0x4C, 0, len - 0x4C);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
/* codec-data contains alac atom size and prefix,
* ffmpeg likes it that way, not quite gst-ish though ...*/
buf = gst_buffer_new_and_alloc (len);
- memcpy (GST_BUFFER_DATA (buf), alac->data, len);
+ _gst_buffer_copy_into_mem (buf, alac->data, 0, len);
gst_caps_set_simple (stream->caps,
"codec_data", GST_TYPE_BUFFER, buf, NULL);
gst_buffer_unref (buf);
GstBuffer *buf = gst_buffer_new_and_alloc (len - 0x34);
guint bitrate;
- memcpy (GST_BUFFER_DATA (buf), stsd_data + 0x34, len - 0x34);
+ _gst_buffer_copy_into_mem (buf, stsd_data + 0x34, 0, len - 0x34);
/* If we have enough data, let's try to get the 'damr' atom. See
* the 3GPP container spec (26.244) for more details. */
static void
gst_qtdemux_guess_bitrate (GstQTDemux * qtdemux)
{
- GstFormat format = GST_FORMAT_BYTES;
QtDemuxStream *stream = NULL;
gint64 size, duration, sys_bitrate, sum_bitrate = 0;
gint i;
GST_DEBUG_OBJECT (qtdemux, "Looking for streams with unknown bitrate");
- if (!gst_pad_query_peer_duration (qtdemux->sinkpad, &format, &size) ||
- format != GST_FORMAT_BYTES) {
+ if (!gst_pad_query_peer_duration (qtdemux->sinkpad, GST_FORMAT_BYTES, &size)) {
GST_DEBUG_OBJECT (qtdemux,
"Size in bytes of the stream not known - bailing");
return;
return ((qtdemux->major_brand & GST_MAKE_FOURCC (255, 255, 0, 0)) ==
GST_MAKE_FOURCC ('3', 'g', 0, 0));
} else if (qtdemux->comp_brands != NULL) {
- guint8 *data = GST_BUFFER_DATA (qtdemux->comp_brands);
- guint size = GST_BUFFER_SIZE (qtdemux->comp_brands);
+ guint8 *data;
+ gsize size;
gboolean res = FALSE;
+ data = gst_buffer_map (qtdemux->comp_brands, &size, NULL, GST_MAP_READ);
while (size >= 4) {
res = res || ((QT_FOURCC (data) & GST_MAKE_FOURCC (255, 255, 0, 0)) ==
GST_MAKE_FOURCC ('3', 'g', 0, 0));
data += 4;
size -= 4;
}
+ gst_buffer_unmap (qtdemux->comp_brands, data, size);
return res;
} else {
return FALSE;
if (len < 12 + 2)
return;
- buf = gst_buffer_new ();
- GST_BUFFER_DATA (buf) = data + 14;
- GST_BUFFER_SIZE (buf) = len - 14;
+ buf = gst_buffer_new_allocate (NULL, len - 14, 0);
+ gst_buffer_fill (buf, 0, data + 14, len - 14);
taglist = gst_tag_list_from_id3v2_tag (buf);
if (taglist) {
data = node->data;
len = QT_UINT32 (data);
buf = gst_buffer_new_and_alloc (len);
- memcpy (GST_BUFFER_DATA (buf), data, len);
+ _gst_buffer_copy_into_mem (buf, data, 0, len);
/* heuristic to determine style of tag */
if (QT_FOURCC (data + 4) == FOURCC_____ ||
GST_DEBUG_OBJECT (demux, "media type %s", media_type);
caps = gst_caps_new_simple (media_type, "style", G_TYPE_STRING, style, NULL);
- gst_buffer_set_caps (buf, caps);
+ // TODO conver to metadata or ???
+// gst_buffer_set_caps (buf, caps);
gst_caps_unref (caps);
g_free (media_type);
GST_DEBUG_OBJECT (demux, "adding private tag; size %d, caps %" GST_PTR_FORMAT,
- GST_BUFFER_SIZE (buf), caps);
+ len, caps);
gst_tag_list_add (demux->tag_list, GST_TAG_MERGE_APPEND,
GST_QT_DEMUX_PRIVATE_TAG, buf, NULL);
GstBuffer *buf;
GstTagList *taglist;
- buf = gst_buffer_new ();
- GST_BUFFER_DATA (buf) = ((guint8 *) xmp_->data) + 8;
- GST_BUFFER_SIZE (buf) = QT_UINT32 ((guint8 *) xmp_->data) - 8;
-
+ buf = _gst_buffer_new_wrapped (((guint8 *) xmp_->data) + 8,
+ QT_UINT32 ((guint8 *) xmp_->data) - 8, NULL);
taglist = gst_tag_list_from_xmp_buffer (buf);
gst_buffer_unref (buf);
/* set duration in the segment info */
gst_qtdemux_get_duration (qtdemux, &duration);
if (duration)
- gst_segment_set_duration (&qtdemux->segment, GST_FORMAT_TIME, duration);
+ qtdemux->segment.duration = duration;
/* parse all traks */
trak = qtdemux_tree_get_child_by_type (qtdemux->moov_node, FOURCC_trak);
"systemstream", G_TYPE_BOOLEAN, FALSE, NULL);
break;
case 0x6C: /* MJPEG */
- caps = gst_caps_new_simple ("image/jpeg", NULL);
+ caps = gst_caps_new_empty_simple ("image/jpeg");
codec_name = "Motion-JPEG";
break;
case 0x6D: /* PNG */
- caps = gst_caps_new_simple ("image/png", NULL);
+ caps = gst_caps_new_empty_simple ("image/png");
codec_name = "PNG still images";
break;
case 0x6E: /* JPEG2000 */
break;
case 0xA4: /* Dirac */
codec_name = "Dirac";
- caps = gst_caps_new_simple ("video/x-dirac", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-dirac");
break;
case 0xA5: /* AC3 */
codec_name = "AC-3 audio";
case 0xE1: /* QCELP */
/* QCELP, the codec_data is a riff tag (little endian) with
* more info (http://ftp.3gpp2.org/TSGC/Working/2003/2003-05-SanDiego/TSG-C-2003-05-San%20Diego/WG1/SWG12/C12-20030512-006%20=%20C12-20030217-015_Draft_Baseline%20Text%20of%20FFMS_R2.doc). */
- caps = gst_caps_new_simple ("audio/qcelp", NULL);
+ caps = gst_caps_new_empty_simple ("audio/qcelp");
codec_name = "QCELP";
break;
default:
GstBuffer *buffer;
buffer = gst_buffer_new_and_alloc (data_len);
- memcpy (GST_BUFFER_DATA (buffer), data_ptr, data_len);
+ _gst_buffer_copy_into_mem (buffer, data_ptr, 0, data_len);
GST_DEBUG_OBJECT (qtdemux, "setting codec_data from esds");
GST_MEMDUMP_OBJECT (qtdemux, "codec_data from esds", data_ptr, data_len);
switch (fourcc) {
case GST_MAKE_FOURCC ('p', 'n', 'g', ' '):
_codec ("PNG still images");
- caps = gst_caps_new_simple ("image/png", NULL);
+ caps = gst_caps_new_empty_simple ("image/png");
break;
case GST_MAKE_FOURCC ('j', 'p', 'e', 'g'):
_codec ("JPEG still images");
- caps = gst_caps_new_simple ("image/jpeg", NULL);
+ caps = gst_caps_new_empty_simple ("image/jpeg");
break;
case GST_MAKE_FOURCC ('m', 'j', 'p', 'a'):
case GST_MAKE_FOURCC ('A', 'V', 'D', 'J'):
case GST_MAKE_FOURCC ('M', 'J', 'P', 'G'):
case GST_MAKE_FOURCC ('d', 'm', 'b', '1'):
_codec ("Motion-JPEG");
- caps = gst_caps_new_simple ("image/jpeg", NULL);
+ caps = gst_caps_new_empty_simple ("image/jpeg");
break;
case GST_MAKE_FOURCC ('m', 'j', 'p', 'b'):
_codec ("Motion-JPEG format B");
- caps = gst_caps_new_simple ("video/x-mjpeg-b", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-mjpeg-b");
break;
case GST_MAKE_FOURCC ('m', 'j', 'p', '2'):
_codec ("JPEG-2000");
_codec ("Raw RGB video");
bps = QT_UINT16 (stsd_data + 98);
/* set common stuff */
- caps = gst_caps_new_simple ("video/x-raw-rgb",
- "endianness", G_TYPE_INT, G_BYTE_ORDER, "depth", G_TYPE_INT, bps,
- NULL);
+ caps = gst_caps_new_empty_simple ("video/x-raw");
switch (bps) {
case 15:
- gst_caps_set_simple (caps,
- "bpp", G_TYPE_INT, 16,
- "endianness", G_TYPE_INT, G_BIG_ENDIAN,
- "red_mask", G_TYPE_INT, 0x7c00,
- "green_mask", G_TYPE_INT, 0x03e0,
- "blue_mask", G_TYPE_INT, 0x001f, NULL);
+ gst_caps_set_simple (caps, "format", G_TYPE_STRING, "RGB15", NULL);
break;
case 16:
- gst_caps_set_simple (caps,
- "bpp", G_TYPE_INT, 16,
- "endianness", G_TYPE_INT, G_BIG_ENDIAN,
- "red_mask", G_TYPE_INT, 0xf800,
- "green_mask", G_TYPE_INT, 0x07e0,
- "blue_mask", G_TYPE_INT, 0x001f, NULL);
+ gst_caps_set_simple (caps, "format", G_TYPE_STRING, "RGB16", NULL);
break;
case 24:
- gst_caps_set_simple (caps,
- "bpp", G_TYPE_INT, 24,
- "endianness", G_TYPE_INT, G_BIG_ENDIAN,
- "red_mask", G_TYPE_INT, 0xff0000,
- "green_mask", G_TYPE_INT, 0x00ff00,
- "blue_mask", G_TYPE_INT, 0x0000ff, NULL);
+ gst_caps_set_simple (caps, "format", G_TYPE_STRING, "RGB", NULL);
break;
case 32:
- gst_caps_set_simple (caps,
- "bpp", G_TYPE_INT, 32,
- "endianness", G_TYPE_INT, G_BIG_ENDIAN,
- "alpha_mask", G_TYPE_INT, 0xff000000,
- "red_mask", G_TYPE_INT, 0x00ff0000,
- "green_mask", G_TYPE_INT, 0x0000ff00,
- "blue_mask", G_TYPE_INT, 0x000000ff, NULL);
+ gst_caps_set_simple (caps, "format", G_TYPE_STRING, "ARGB", NULL);
break;
default:
/* unknown */
}
case GST_MAKE_FOURCC ('y', 'v', '1', '2'):
_codec ("Raw planar YUV 4:2:0");
- caps = gst_caps_new_simple ("video/x-raw-yuv",
- "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('I', '4', '2', '0'),
- NULL);
+ caps = gst_caps_new_simple ("video/x-raw",
+ "format", G_TYPE_STRING, "I420", NULL);
break;
case GST_MAKE_FOURCC ('y', 'u', 'v', '2'):
case GST_MAKE_FOURCC ('Y', 'u', 'v', '2'):
_codec ("Raw packed YUV 4:2:2");
- caps = gst_caps_new_simple ("video/x-raw-yuv",
- "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'),
- NULL);
+ caps = gst_caps_new_simple ("video/x-raw",
+ "format", G_TYPE_STRING, "YUY2", NULL);
break;
case GST_MAKE_FOURCC ('2', 'v', 'u', 'y'):
case GST_MAKE_FOURCC ('2', 'V', 'u', 'y'):
_codec ("Raw packed YUV 4:2:2");
- caps = gst_caps_new_simple ("video/x-raw-yuv",
- "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'),
- NULL);
+ caps = gst_caps_new_simple ("video/x-raw",
+ "format", G_TYPE_STRING, "UYVY", NULL);
break;
case GST_MAKE_FOURCC ('v', '2', '1', '0'):
_codec ("Raw packed YUV 10-bit 4:2:2");
- caps = gst_caps_new_simple ("video/x-raw-yuv",
- "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('v', '2', '1', '0'),
- NULL);
+ caps = gst_caps_new_simple ("video/x-raw",
+ "format", G_TYPE_STRING, "v210", NULL);
break;
case GST_MAKE_FOURCC ('r', '2', '1', '0'):
_codec ("Raw packed RGB 10-bit 4:4:4");
- caps = gst_caps_new_simple ("video/x-raw-rgb",
- "endianness", G_TYPE_INT, G_BIG_ENDIAN, "depth", G_TYPE_INT, 30,
- "bpp", G_TYPE_INT, 32,
- "endianness", G_TYPE_INT, G_BIG_ENDIAN,
- "red_mask", G_TYPE_INT, 0x3ff00000,
- "green_mask", G_TYPE_INT, 0x000ffc00,
- "blue_mask", G_TYPE_INT, 0x000003ff, NULL);
+ caps = gst_caps_new_simple ("video/x-raw",
+ "format", G_TYPE_STRING, "r210", NULL);
break;
case GST_MAKE_FOURCC ('m', 'p', 'e', 'g'):
case GST_MAKE_FOURCC ('m', 'p', 'g', '1'):
break;
case GST_MAKE_FOURCC ('g', 'i', 'f', ' '):
_codec ("GIF still images");
- caps = gst_caps_new_simple ("image/gif", NULL);
+ caps = gst_caps_new_empty_simple ("image/gif");
break;
case GST_MAKE_FOURCC ('h', '2', '6', '3'):
case GST_MAKE_FOURCC ('H', '2', '6', '3'):
case GST_MAKE_FOURCC ('U', '2', '6', '3'):
_codec ("H.263");
/* ffmpeg uses the height/width props, don't know why */
- caps = gst_caps_new_simple ("video/x-h263", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-h263");
break;
case GST_MAKE_FOURCC ('m', 'p', '4', 'v'):
case GST_MAKE_FOURCC ('M', 'P', '4', 'V'):
case GST_MAKE_FOURCC ('3', 'I', 'V', '1'):
case GST_MAKE_FOURCC ('3', 'I', 'V', '2'):
_codec ("3ivX video");
- caps = gst_caps_new_simple ("video/x-3ivx", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-3ivx");
break;
case GST_MAKE_FOURCC ('D', 'I', 'V', '3'):
_codec ("DivX 3");
case GST_MAKE_FOURCC ('X', 'V', 'I', 'D'):
case GST_MAKE_FOURCC ('x', 'v', 'i', 'd'):
_codec ("XVID MPEG-4");
- caps = gst_caps_new_simple ("video/x-xvid", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-xvid");
break;
case GST_MAKE_FOURCC ('F', 'M', 'P', '4'):
case GST_MAKE_FOURCC ('c', 'v', 'i', 'd'):
_codec ("Cinepak");
- caps = gst_caps_new_simple ("video/x-cinepak", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-cinepak");
break;
case GST_MAKE_FOURCC ('q', 'd', 'r', 'w'):
_codec ("Apple QuickDraw");
- caps = gst_caps_new_simple ("video/x-qdrw", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-qdrw");
break;
case GST_MAKE_FOURCC ('r', 'p', 'z', 'a'):
_codec ("Apple video");
- caps = gst_caps_new_simple ("video/x-apple-video", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-apple-video");
break;
case GST_MAKE_FOURCC ('a', 'v', 'c', '1'):
_codec ("H.264 / AVC");
break;
case GST_MAKE_FOURCC ('s', 'm', 'c', ' '):
_codec ("Apple Graphics (SMC)");
- caps = gst_caps_new_simple ("video/x-smc", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-smc");
break;
case GST_MAKE_FOURCC ('V', 'P', '3', '1'):
_codec ("VP3");
- caps = gst_caps_new_simple ("video/x-vp3", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-vp3");
break;
case GST_MAKE_FOURCC ('X', 'i', 'T', 'h'):
_codec ("Theora");
- caps = gst_caps_new_simple ("video/x-theora", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-theora");
/* theora uses one byte of padding in the data stream because it does not
* allow 0 sized packets while theora does */
stream->padding = 1;
break;
case GST_MAKE_FOURCC ('d', 'r', 'a', 'c'):
_codec ("Dirac");
- caps = gst_caps_new_simple ("video/x-dirac", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-dirac");
break;
case GST_MAKE_FOURCC ('t', 'i', 'f', 'f'):
_codec ("TIFF still images");
- caps = gst_caps_new_simple ("image/tiff", NULL);
+ caps = gst_caps_new_empty_simple ("image/tiff");
break;
case GST_MAKE_FOURCC ('i', 'c', 'o', 'd'):
_codec ("Apple Intermediate Codec");
case FOURCC_ovc1:
_codec ("VC-1");
caps = gst_caps_new_simple ("video/x-wmv",
- "wmvversion", G_TYPE_INT, 3,
- "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('W', 'V', 'C', '1'),
- NULL);
+ "wmvversion", G_TYPE_INT, 3, "format", G_TYPE_STRING, "WVC1", NULL);
break;
case GST_MAKE_FOURCC ('k', 'p', 'c', 'd'):
default:
s = g_strdup_printf ("video/x-gst-fourcc-%" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (fourcc));
- caps = gst_caps_new_simple (s, NULL);
+ caps = gst_caps_new_empty_simple (s);
break;
}
}
/* enable clipping for raw video streams */
s = gst_caps_get_structure (caps, 0);
name = gst_structure_get_name (s);
- if (g_str_has_prefix (name, "video/x-raw-")) {
+ if (g_str_has_prefix (name, "video/x-raw")) {
stream->need_clip = TRUE;
}
return caps;
case GST_MAKE_FOURCC ('N', 'O', 'N', 'E'):
case GST_MAKE_FOURCC ('r', 'a', 'w', ' '):
_codec ("Raw 8-bit PCM audio");
- caps = gst_caps_new_simple ("audio/x-raw-int", "width", G_TYPE_INT, 8,
- "depth", G_TYPE_INT, 8, "signed", G_TYPE_BOOLEAN, FALSE, NULL);
+ caps = gst_caps_new_simple ("audio/x-raw",
+ "format", G_TYPE_STRING, "U8", NULL);
break;
case GST_MAKE_FOURCC ('t', 'w', 'o', 's'):
endian = G_BIG_ENDIAN;
{
gchar *str;
gint depth;
+ GstAudioFormat format;
if (!endian)
endian = G_LITTLE_ENDIAN;
depth = stream->bytes_per_packet * 8;
+ format = gst_audio_format_build_integer (TRUE, endian, depth, depth);
+
str = g_strdup_printf ("Raw %d-bit PCM audio", depth);
_codec (str);
g_free (str);
- caps = gst_caps_new_simple ("audio/x-raw-int",
- "width", G_TYPE_INT, depth, "depth", G_TYPE_INT, depth,
- "endianness", G_TYPE_INT, endian,
- "signed", G_TYPE_BOOLEAN, TRUE, NULL);
+
+ caps = gst_caps_new_simple ("audio/x-raw",
+ "format", G_TYPE_STRING, gst_audio_format_to_string (format), NULL);
break;
}
case GST_MAKE_FOURCC ('f', 'l', '6', '4'):
_codec ("Raw 64-bit floating-point audio");
- caps = gst_caps_new_simple ("audio/x-raw-float", "width", G_TYPE_INT, 64,
- "endianness", G_TYPE_INT, G_BIG_ENDIAN, NULL);
+ caps = gst_caps_new_simple ("audio/x-raw",
+ "format", G_TYPE_STRING, "F64BE", NULL);
break;
case GST_MAKE_FOURCC ('f', 'l', '3', '2'):
_codec ("Raw 32-bit floating-point audio");
- caps = gst_caps_new_simple ("audio/x-raw-float", "width", G_TYPE_INT, 32,
- "endianness", G_TYPE_INT, G_BIG_ENDIAN, NULL);
+ caps = gst_caps_new_simple ("audio/x-raw",
+ "format", G_TYPE_STRING, "F32BE", NULL);
break;
case FOURCC_in24:
_codec ("Raw 24-bit PCM audio");
/* we assume BIG ENDIAN, an enda box will tell us to change this to little
* endian later */
- caps = gst_caps_new_simple ("audio/x-raw-int", "width", G_TYPE_INT, 24,
- "depth", G_TYPE_INT, 24,
- "endianness", G_TYPE_INT, G_BIG_ENDIAN,
- "signed", G_TYPE_BOOLEAN, TRUE, NULL);
+ caps = gst_caps_new_simple ("audio/x-raw",
+ "format", G_TYPE_STRING, "S24BE", NULL);
break;
case GST_MAKE_FOURCC ('i', 'n', '3', '2'):
_codec ("Raw 32-bit PCM audio");
- caps = gst_caps_new_simple ("audio/x-raw-int", "width", G_TYPE_INT, 32,
- "depth", G_TYPE_INT, 32,
- "endianness", G_TYPE_INT, G_BIG_ENDIAN,
- "signed", G_TYPE_BOOLEAN, TRUE, NULL);
+ caps = gst_caps_new_simple ("audio/x-raw",
+ "format", G_TYPE_STRING, "S32BE", NULL);
break;
case GST_MAKE_FOURCC ('u', 'l', 'a', 'w'):
_codec ("Mu-law audio");
- caps = gst_caps_new_simple ("audio/x-mulaw", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-mulaw");
break;
case GST_MAKE_FOURCC ('a', 'l', 'a', 'w'):
_codec ("A-law audio");
- caps = gst_caps_new_simple ("audio/x-alaw", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-alaw");
break;
case 0x0200736d:
case 0x6d730002:
break;
case GST_MAKE_FOURCC ('O', 'g', 'g', 'V'):
/* ogg/vorbis */
- caps = gst_caps_new_simple ("application/ogg", NULL);
+ caps = gst_caps_new_empty_simple ("application/ogg");
break;
case GST_MAKE_FOURCC ('d', 'v', 'c', 'a'):
_codec ("DV audio");
- caps = gst_caps_new_simple ("audio/x-dv", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-dv");
break;
case GST_MAKE_FOURCC ('m', 'p', '4', 'a'):
_codec ("MPEG-4 AAC audio");
break;
case GST_MAKE_FOURCC ('Q', 'D', 'M', 'C'):
_codec ("QDesign Music");
- caps = gst_caps_new_simple ("audio/x-qdm", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-qdm");
break;
case GST_MAKE_FOURCC ('Q', 'D', 'M', '2'):
_codec ("QDesign Music v.2");
"bitrate", G_TYPE_INT, QT_UINT32 (data + 40),
"blocksize", G_TYPE_INT, QT_UINT32 (data + 44), NULL);
} else {
- caps = gst_caps_new_simple ("audio/x-qdm2", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-qdm2");
}
break;
case GST_MAKE_FOURCC ('a', 'g', 's', 'm'):
_codec ("GSM audio");
- caps = gst_caps_new_simple ("audio/x-gsm", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-gsm");
break;
case GST_MAKE_FOURCC ('s', 'a', 'm', 'r'):
_codec ("AMR audio");
- caps = gst_caps_new_simple ("audio/AMR", NULL);
+ caps = gst_caps_new_empty_simple ("audio/AMR");
break;
case GST_MAKE_FOURCC ('s', 'a', 'w', 'b'):
_codec ("AMR-WB audio");
- caps = gst_caps_new_simple ("audio/AMR-WB", NULL);
+ caps = gst_caps_new_empty_simple ("audio/AMR-WB");
break;
case GST_MAKE_FOURCC ('i', 'm', 'a', '4'):
_codec ("Quicktime IMA ADPCM");
break;
case GST_MAKE_FOURCC ('a', 'l', 'a', 'c'):
_codec ("Apple lossless audio");
- caps = gst_caps_new_simple ("audio/x-alac", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-alac");
break;
case GST_MAKE_FOURCC ('Q', 'c', 'l', 'p'):
_codec ("QualComm PureVoice");
break;
case FOURCC_owma:
_codec ("WMA");
- caps = gst_caps_new_simple ("audio/x-wma", NULL);
+ caps = gst_caps_new_empty_simple ("audio/x-wma");
break;
case GST_MAKE_FOURCC ('q', 't', 'v', 'r'):
/* ? */
s = g_strdup_printf ("audio/x-gst-fourcc-%" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (fourcc));
- caps = gst_caps_new_simple (s, NULL);
+ caps = gst_caps_new_empty_simple (s);
break;
}
}
/* enable clipping for raw audio streams */
s = gst_caps_get_structure (caps, 0);
name = gst_structure_get_name (s);
- if (g_str_has_prefix (name, "audio/x-raw-")) {
+ if (g_str_has_prefix (name, "audio/x-raw")) {
stream->need_clip = TRUE;
}
return caps;
switch (fourcc) {
case GST_MAKE_FOURCC ('m', 'p', '4', 's'):
_codec ("DVD subtitle");
- caps = gst_caps_new_simple ("video/x-dvd-subpicture", NULL);
+ caps = gst_caps_new_empty_simple ("video/x-dvd-subpicture");
break;
case GST_MAKE_FOURCC ('t', 'e', 'x', 't'):
_codec ("Quicktime timed text");
case GST_MAKE_FOURCC ('t', 'x', '3', 'g'):
_codec ("3GPP timed text");
text:
- caps = gst_caps_new_simple ("text/plain", NULL);
+ caps = gst_caps_new_empty_simple ("text/plain");
/* actual text piece needs to be extracted */
stream->need_process = TRUE;
break;
s = g_strdup_printf ("text/x-gst-fourcc-%" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (fourcc));
- caps = gst_caps_new_simple (s, NULL);
+ caps = gst_caps_new_empty_simple (s);
break;
}
}