From f8abc7e7340bc0307232298583d12590186f8434 Mon Sep 17 00:00:00 2001 From: Stefan Sauer Date: Wed, 9 Nov 2011 20:09:01 +0100 Subject: [PATCH] audiovisualizer: port to 0.11 --- configure.ac | 2 +- gst/audiovisualizers/gstbaseaudiovisualizer.c | 496 ++++++++++++++++++++------ gst/audiovisualizers/gstbaseaudiovisualizer.h | 10 +- gst/audiovisualizers/gstspacescope.c | 38 +- gst/audiovisualizers/gstspectrascope.c | 55 +-- gst/audiovisualizers/gstsynaescope.c | 44 +-- gst/audiovisualizers/gstwavescope.c | 38 +- 7 files changed, 493 insertions(+), 190 deletions(-) diff --git a/configure.ac b/configure.ac index ce7fab4..6fac7bb 100644 --- a/configure.ac +++ b/configure.ac @@ -294,7 +294,7 @@ dnl *** plug-ins to include *** dnl Non ported plugins (non-dependant, then dependant) dnl Make sure you have a space before and after all plugins GST_PLUGINS_NONPORTED=" adpcmdec adpcmenc aiff asfmux \ - audiovisualizers autoconvert camerabin cdxaparse coloreffects \ + autoconvert camerabin cdxaparse coloreffects \ dccp debugutils dtmf faceoverlay festival \ fieldanalysis freeze frei0r gaudieffects geometrictransform h264parse \ hdvparse hls id3tag inter interlace ivfparse jpegformat jp2kdecimator \ diff --git a/gst/audiovisualizers/gstbaseaudiovisualizer.c b/gst/audiovisualizers/gstbaseaudiovisualizer.c index fb68297..108169b 100644 --- a/gst/audiovisualizers/gstbaseaudiovisualizer.c +++ b/gst/audiovisualizers/gstbaseaudiovisualizer.c @@ -33,7 +33,6 @@ #include "config.h" #endif #include -#include #include "gstbaseaudiovisualizer.h" @@ -64,13 +63,24 @@ static void gst_base_audio_visualizer_dispose (GObject * object); static gboolean gst_base_audio_visualizer_src_negotiate (GstBaseAudioVisualizer * scope); -static gboolean gst_base_audio_visualizer_src_setcaps (GstPad * pad, - GstCaps * caps); -static gboolean gst_base_audio_visualizer_sink_setcaps (GstPad * pad, - GstCaps * caps); +static gboolean gst_base_audio_visualizer_src_setcaps (GstBaseAudioVisualizer * + scope, GstCaps * caps); +static gboolean gst_base_audio_visualizer_sink_setcaps (GstBaseAudioVisualizer * + scope, GstCaps * caps); static GstFlowReturn gst_base_audio_visualizer_chain (GstPad * pad, GstBuffer * buffer); + +static gboolean gst_base_audio_visualizer_src_event (GstPad * pad, + GstEvent * event); +static gboolean gst_base_audio_visualizer_sink_event (GstPad * pad, + GstEvent * event); + +static gboolean gst_base_audio_visualizer_src_query (GstPad * pad, + GstQuery * query); +static gboolean gst_base_audio_visualizer_sink_query (GstPad * pad, + GstQuery * query); + static GstStateChangeReturn gst_base_audio_visualizer_change_state (GstElement * element, GstStateChange transition); @@ -456,16 +466,20 @@ gst_base_audio_visualizer_init (GstBaseAudioVisualizer * scope, scope->sinkpad = gst_pad_new_from_template (pad_template, "sink"); gst_pad_set_chain_function (scope->sinkpad, GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_chain)); - gst_pad_set_setcaps_function (scope->sinkpad, - GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_sink_setcaps)); + gst_pad_set_event_function (scope->sinkpad, + GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_sink_event)); + gst_pad_set_query_function (scope->sinkpad, + GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_sink_query)); gst_element_add_pad (GST_ELEMENT (scope), scope->sinkpad); pad_template = gst_element_class_get_pad_template (GST_ELEMENT_CLASS (g_class), "src"); g_return_if_fail (pad_template != NULL); scope->srcpad = gst_pad_new_from_template (pad_template, "src"); - gst_pad_set_setcaps_function (scope->srcpad, - GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_src_setcaps)); + gst_pad_set_event_function (scope->srcpad, + GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_src_event)); + gst_pad_set_query_function (scope->srcpad, + GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_src_query)); gst_element_add_pad (GST_ELEMENT (scope), scope->srcpad); scope->adapter = gst_adapter_new (); @@ -487,8 +501,6 @@ gst_base_audio_visualizer_init (GstBaseAudioVisualizer * scope, scope->rate = GST_AUDIO_DEF_RATE; scope->channels = 2; - scope->next_ts = GST_CLOCK_TIME_NONE; - scope->config_lock = g_mutex_new (); } @@ -555,16 +567,27 @@ gst_base_audio_visualizer_dispose (GObject * object) G_OBJECT_CLASS (parent_class)->dispose (object); } +static void +gst_base_audio_visualizer_reset (GstBaseAudioVisualizer * scope) +{ + gst_adapter_clear (scope->adapter); + gst_segment_init (&scope->segment, GST_FORMAT_UNDEFINED); + + GST_OBJECT_LOCK (scope); + scope->proportion = 1.0; + scope->earliest_time = -1; + GST_OBJECT_UNLOCK (scope); +} + static gboolean -gst_base_audio_visualizer_sink_setcaps (GstPad * pad, GstCaps * caps) +gst_base_audio_visualizer_sink_setcaps (GstBaseAudioVisualizer * scope, + GstCaps * caps) { - GstBaseAudioVisualizer *scope; GstStructure *structure; gint channels; gint rate; gboolean res = TRUE; - scope = GST_BASE_AUDIO_VISUALIZER (gst_pad_get_parent (pad)); structure = gst_caps_get_structure (caps, 0); if (!gst_structure_get_int (structure, "channels", &channels) || @@ -584,7 +607,6 @@ gst_base_audio_visualizer_sink_setcaps (GstPad * pad, GstCaps * caps) scope->channels, scope->rate); done: - gst_object_unref (scope); return res; /* Errors */ @@ -610,27 +632,81 @@ wrong_rate: } static gboolean +gst_base_audio_visualizer_src_setcaps (GstBaseAudioVisualizer * scope, + GstCaps * caps) +{ + GstBaseAudioVisualizerClass *klass; + GstStructure *structure; + gboolean res; + + structure = gst_caps_get_structure (caps, 0); + if (!gst_structure_get_int (structure, "width", &scope->width) || + !gst_structure_get_int (structure, "height", &scope->height) || + !gst_structure_get_fraction (structure, "framerate", &scope->fps_n, + &scope->fps_d)) + goto error; + + klass = GST_BASE_AUDIO_VISUALIZER_CLASS (G_OBJECT_GET_CLASS (scope)); + + //scope->video_format = format; ?? + + scope->frame_duration = gst_util_uint64_scale_int (GST_SECOND, + scope->fps_d, scope->fps_n); + scope->spf = gst_util_uint64_scale_int (scope->rate, + scope->fps_d, scope->fps_n); + scope->req_spf = scope->spf; + + scope->bpf = scope->width * scope->height * 4; + + if (scope->pixelbuf) + g_free (scope->pixelbuf); + scope->pixelbuf = g_malloc0 (scope->bpf); + + if (klass->setup) + res = klass->setup (scope); + + GST_DEBUG_OBJECT (scope, "video: dimension %dx%d, framerate %d/%d", + scope->width, scope->height, scope->fps_n, scope->fps_d); + GST_DEBUG_OBJECT (scope, "blocks: spf %u, req_spf %u", + scope->spf, scope->req_spf); + + res = gst_pad_push_event (scope->srcpad, gst_event_new_caps (caps)); + + return res; + + /* ERRORS */ +error: + { + GST_DEBUG_OBJECT (scope, "error parsing caps"); + return FALSE; + } +} + +static gboolean gst_base_audio_visualizer_src_negotiate (GstBaseAudioVisualizer * scope) { - GstCaps *othercaps, *target, *intersect; + GstCaps *othercaps, *target; GstStructure *structure; - const GstCaps *templ; + GstCaps *templ; + GstQuery *query; + GstBufferPool *pool = NULL; + guint size, min, max, prefix, alignment; templ = gst_pad_get_pad_template_caps (scope->srcpad); GST_DEBUG_OBJECT (scope, "performing negotiation"); /* see what the peer can do */ - othercaps = gst_pad_peer_get_caps (scope->srcpad); + othercaps = gst_pad_peer_get_caps (scope->srcpad, NULL); if (othercaps) { - intersect = gst_caps_intersect (othercaps, templ); + target = gst_caps_intersect (othercaps, templ); gst_caps_unref (othercaps); + gst_caps_unref (templ); - if (gst_caps_is_empty (intersect)) + if (gst_caps_is_empty (target)) goto no_format; - target = gst_caps_copy_nth (intersect, 0); - gst_caps_unref (intersect); + gst_caps_truncate (target); } else { target = gst_caps_ref ((GstCaps *) templ); } @@ -643,80 +719,70 @@ gst_base_audio_visualizer_src_negotiate (GstBaseAudioVisualizer * scope) GST_DEBUG_OBJECT (scope, "final caps are %" GST_PTR_FORMAT, target); - gst_pad_set_caps (scope->srcpad, target); - gst_caps_unref (target); + gst_base_audio_visualizer_src_setcaps (scope, target); - return TRUE; + /* try to get a bufferpool now */ + /* find a pool for the negotiated caps now */ + query = gst_query_new_allocation (target, TRUE); -no_format: - { - gst_caps_unref (intersect); - return FALSE; + if (gst_pad_peer_query (scope->srcpad, query)) { + /* we got configuration from our peer, parse them */ + gst_query_parse_allocation_params (query, &size, &min, &max, &prefix, + &alignment, &pool); + } else { + size = scope->bpf; + min = max = 0; + prefix = 0; + alignment = 0; } -} -static gboolean -gst_base_audio_visualizer_src_setcaps (GstPad * pad, GstCaps * caps) -{ - GstBaseAudioVisualizer *scope; - GstBaseAudioVisualizerClass *klass; - gint w, h; - gint num, denom; - GstVideoFormat format; - gboolean res = TRUE; + if (pool == NULL) { + GstStructure *config; - scope = GST_BASE_AUDIO_VISUALIZER (gst_pad_get_parent (pad)); - klass = GST_BASE_AUDIO_VISUALIZER_CLASS (G_OBJECT_GET_CLASS (scope)); + /* we did not get a pool, make one ourselves then */ + pool = gst_buffer_pool_new (); - if (!gst_video_format_parse_caps (caps, &format, &w, &h)) { - goto missing_caps_details; - } - if (!gst_video_parse_caps_framerate (caps, &num, &denom)) { - goto missing_caps_details; + config = gst_buffer_pool_get_config (pool); + gst_buffer_pool_config_set (config, target, size, min, max, prefix, + alignment); + gst_buffer_pool_set_config (pool, config); } - g_mutex_lock (scope->config_lock); - - scope->width = w; - scope->height = h; - scope->fps_n = num; - scope->fps_d = denom; - scope->video_format = format; - - scope->frame_duration = gst_util_uint64_scale_int (GST_SECOND, - scope->fps_d, scope->fps_n); - scope->spf = gst_util_uint64_scale_int (scope->rate, - scope->fps_d, scope->fps_n); - scope->req_spf = scope->spf; + if (scope->pool) + gst_object_unref (scope->pool); + scope->pool = pool; - scope->bpf = w * h * 4; + /* and activate */ + gst_buffer_pool_set_active (pool, TRUE); - if (scope->pixelbuf) - g_free (scope->pixelbuf); - scope->pixelbuf = g_malloc0 (scope->bpf); + gst_caps_unref (target); - if (klass->setup) - res = klass->setup (scope); + return TRUE; - GST_DEBUG_OBJECT (scope, "video: dimension %dx%d, framerate %d/%d", - scope->width, scope->height, scope->fps_n, scope->fps_d); - GST_DEBUG_OBJECT (scope, "blocks: spf %u, req_spf %u", - scope->spf, scope->req_spf); +no_format: + { + gst_caps_unref (target); + return FALSE; + } +} - g_mutex_unlock (scope->config_lock); +/* make sure we are negotiated */ +static GstFlowReturn +gst_base_audio_visualizer_ensure_negotiated (GstBaseAudioVisualizer * scope) +{ + gboolean reconfigure; -done: - gst_object_unref (scope); - return res; + GST_OBJECT_LOCK (scope->srcpad); + reconfigure = GST_PAD_NEEDS_RECONFIGURE (scope->srcpad); + GST_OBJECT_FLAG_UNSET (scope->srcpad, GST_PAD_NEED_RECONFIGURE); + GST_OBJECT_UNLOCK (scope->srcpad); - /* Errors */ -missing_caps_details: - { - GST_WARNING_OBJECT (scope, - "missing width, height or framerate in the caps"); - res = FALSE; - goto done; + /* we don't know an output format yet, pick one */ + if (reconfigure || !gst_pad_has_current_caps (scope->srcpad)) { + if (!gst_base_audio_visualizer_src_negotiate (scope)) + return GST_FLOW_NOT_NEGOTIATED; } + return GST_FLOW_OK; } static GstFlowReturn @@ -726,8 +792,9 @@ gst_base_audio_visualizer_chain (GstPad * pad, GstBuffer * buffer) GstBaseAudioVisualizer *scope; GstBaseAudioVisualizerClass *klass; GstBuffer *inbuf; + guint64 dist, ts; guint avail, sbpf; - guint8 *adata; + gpointer adata, vdata; gboolean (*render) (GstBaseAudioVisualizer * scope, GstBuffer * audio, GstBuffer * video); @@ -740,18 +807,19 @@ gst_base_audio_visualizer_chain (GstPad * pad, GstBuffer * buffer) /* resync on DISCONT */ if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) { - scope->next_ts = GST_CLOCK_TIME_NONE; gst_adapter_clear (scope->adapter); } - if (GST_PAD_CAPS (scope->srcpad) == NULL) { - if (!gst_base_audio_visualizer_src_negotiate (scope)) - return GST_FLOW_NOT_NEGOTIATED; + if (scope->bps == 0) { + ret = GST_FLOW_NOT_NEGOTIATED; + goto beach; + } + /* Make sure have an output format */ + ret = gst_base_audio_visualizer_ensure_negotiated (scope); + if (ret != GST_FLOW_OK) { + gst_buffer_unref (buffer); + goto beach; } - - /* Match timestamps from the incoming audio */ - if (GST_BUFFER_TIMESTAMP (buffer) != GST_CLOCK_TIME_NONE) - scope->next_ts = GST_BUFFER_TIMESTAMP (buffer); gst_adapter_push (scope->adapter, buffer); @@ -762,7 +830,7 @@ gst_base_audio_visualizer_chain (GstPad * pad, GstBuffer * buffer) inbuf = scope->inbuf; /* FIXME: the timestamp in the adapter would be different */ - gst_buffer_copy_metadata (inbuf, buffer, GST_BUFFER_COPY_ALL); + gst_buffer_copy_into (inbuf, buffer, GST_BUFFER_COPY_METADATA, 0, -1); /* this is what we have */ avail = gst_adapter_available (scope->adapter); @@ -770,10 +838,37 @@ gst_base_audio_visualizer_chain (GstPad * pad, GstBuffer * buffer) while (avail >= sbpf) { GstBuffer *outbuf; + /* get timestamp of the current adapter content */ + ts = gst_adapter_prev_timestamp (scope->adapter, &dist); + if (GST_CLOCK_TIME_IS_VALID (ts)) { + /* convert bytes to time */ + dist /= scope->bps; + ts += gst_util_uint64_scale_int (dist, GST_SECOND, scope->rate); + } + + if (GST_CLOCK_TIME_IS_VALID (ts)) { + gint64 qostime; + gboolean need_skip; + + qostime = + gst_segment_to_running_time (&scope->segment, GST_FORMAT_TIME, ts) + + scope->frame_duration; + + GST_OBJECT_LOCK (scope); + /* check for QoS, don't compute buffers that are known to be late */ + need_skip = scope->earliest_time != -1 && qostime <= scope->earliest_time; + GST_OBJECT_UNLOCK (scope); + + if (need_skip) { + GST_WARNING_OBJECT (scope, + "QoS: skip ts: %" GST_TIME_FORMAT ", earliest: %" GST_TIME_FORMAT, + GST_TIME_ARGS (qostime), GST_TIME_ARGS (scope->earliest_time)); + goto skip; + } + } + g_mutex_unlock (scope->config_lock); - ret = gst_pad_alloc_buffer_and_set_caps (scope->srcpad, - GST_BUFFER_OFFSET_NONE, - scope->bpf, GST_PAD_CAPS (scope->srcpad), &outbuf); + ret = gst_buffer_pool_acquire_buffer (scope->pool, &outbuf, NULL); g_mutex_lock (scope->config_lock); /* recheck as the value could have changed */ sbpf = scope->req_spf * scope->channels * sizeof (gint16); @@ -783,22 +878,25 @@ gst_base_audio_visualizer_chain (GstPad * pad, GstBuffer * buffer) break; /* sync controlled properties */ - gst_object_sync_values (GST_OBJECT (scope), scope->next_ts); + gst_object_sync_values (GST_OBJECT (scope), ts); - GST_BUFFER_TIMESTAMP (outbuf) = scope->next_ts; + GST_BUFFER_TIMESTAMP (outbuf) = ts; GST_BUFFER_DURATION (outbuf) = scope->frame_duration; + + vdata = gst_buffer_map (outbuf, NULL, NULL, GST_MAP_WRITE); if (scope->shader) { - memcpy (GST_BUFFER_DATA (outbuf), scope->pixelbuf, scope->bpf); + memcpy (vdata, scope->pixelbuf, scope->bpf); } else { - memset (GST_BUFFER_DATA (outbuf), 0, scope->bpf); + memset (vdata, 0, scope->bpf); } /* this can fail as the data size we need could have changed */ - if (!(adata = (guint8 *) gst_adapter_peek (scope->adapter, sbpf))) + if (!(adata = (gpointer) gst_adapter_map (scope->adapter, sbpf))) break; - GST_BUFFER_DATA (inbuf) = adata; - GST_BUFFER_SIZE (inbuf) = sbpf; + gst_buffer_take_memory (inbuf, -1, + gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY, adata, NULL, sbpf, 0, + sbpf)); /* call class->render() vmethod */ if (render) { @@ -807,59 +905,243 @@ gst_base_audio_visualizer_chain (GstPad * pad, GstBuffer * buffer) } else { /* run various post processing (shading and geometri transformation */ if (scope->shader) { - scope->shader (scope, GST_BUFFER_DATA (outbuf), scope->pixelbuf); + scope->shader (scope, vdata, scope->pixelbuf); } } } + gst_buffer_unmap (outbuf, vdata, scope->bpf); + g_mutex_unlock (scope->config_lock); ret = gst_pad_push (scope->srcpad, outbuf); outbuf = NULL; g_mutex_lock (scope->config_lock); + skip: /* recheck as the value could have changed */ sbpf = scope->req_spf * scope->channels * sizeof (gint16); GST_LOG_OBJECT (scope, "avail: %u, bpf: %u", avail, sbpf); /* we want to take less or more, depending on spf : req_spf */ if (avail - sbpf >= sbpf) { gst_adapter_flush (scope->adapter, sbpf); + gst_adapter_unmap (scope->adapter); } else if (avail - sbpf >= 0) { /* just flush a bit and stop */ gst_adapter_flush (scope->adapter, (avail - sbpf)); + gst_adapter_unmap (scope->adapter); break; } avail = gst_adapter_available (scope->adapter); if (ret != GST_FLOW_OK) break; - - if (scope->next_ts != GST_CLOCK_TIME_NONE) - scope->next_ts += scope->frame_duration; } g_mutex_unlock (scope->config_lock); +beach: gst_object_unref (scope); return ret; } +static gboolean +gst_base_audio_visualizer_src_event (GstPad * pad, GstEvent * event) +{ + gboolean res; + GstBaseAudioVisualizer *scope; + + scope = GST_BASE_AUDIO_VISUALIZER (gst_pad_get_parent (pad)); + + switch (GST_EVENT_TYPE (event)) { + case GST_EVENT_QOS: + { + gdouble proportion; + GstClockTimeDiff diff; + GstClockTime timestamp; + + gst_event_parse_qos (event, NULL, &proportion, &diff, ×tamp); + + /* save stuff for the _chain() function */ + GST_OBJECT_LOCK (scope); + scope->proportion = proportion; + if (diff >= 0) + /* we're late, this is a good estimate for next displayable + * frame (see part-qos.txt) */ + scope->earliest_time = timestamp + 2 * diff + scope->frame_duration; + else + scope->earliest_time = timestamp + diff; + GST_OBJECT_UNLOCK (scope); + + res = gst_pad_push_event (scope->sinkpad, event); + break; + } + default: + res = gst_pad_push_event (scope->sinkpad, event); + break; + } + gst_object_unref (scope); + + return res; +} + +static gboolean +gst_base_audio_visualizer_sink_event (GstPad * pad, GstEvent * event) +{ + gboolean res; + GstBaseAudioVisualizer *scope; + + scope = GST_BASE_AUDIO_VISUALIZER (gst_pad_get_parent (pad)); + + switch (GST_EVENT_TYPE (event)) { + case GST_EVENT_CAPS: + { + GstCaps *caps; + + gst_event_parse_caps (event, &caps); + res = gst_base_audio_visualizer_sink_setcaps (scope, caps); + break; + } + case GST_EVENT_FLUSH_START: + res = gst_pad_push_event (scope->srcpad, event); + break; + case GST_EVENT_FLUSH_STOP: + gst_base_audio_visualizer_reset (scope); + res = gst_pad_push_event (scope->srcpad, event); + break; + case GST_EVENT_SEGMENT: + { + /* the newsegment values are used to clip the input samples + * and to convert the incomming timestamps to running time so + * we can do QoS */ + gst_event_copy_segment (event, &scope->segment); + + res = gst_pad_push_event (scope->srcpad, event); + break; + } + default: + res = gst_pad_push_event (scope->srcpad, event); + break; + } + gst_object_unref (scope); + + return res; +} + +static gboolean +gst_base_audio_visualizer_src_query (GstPad * pad, GstQuery * query) +{ + gboolean res = FALSE; + GstBaseAudioVisualizer *scope; + + scope = GST_BASE_AUDIO_VISUALIZER (gst_pad_get_parent (pad)); + + switch (GST_QUERY_TYPE (query)) { + case GST_QUERY_LATENCY: + { + /* We need to send the query upstream and add the returned latency to our + * own */ + GstClockTime min_latency, max_latency; + gboolean us_live; + GstClockTime our_latency; + guint max_samples; + + if (scope->rate == 0) + break; + + if ((res = gst_pad_peer_query (scope->sinkpad, query))) { + gst_query_parse_latency (query, &us_live, &min_latency, &max_latency); + + GST_DEBUG_OBJECT (scope, "Peer latency: min %" + GST_TIME_FORMAT " max %" GST_TIME_FORMAT, + GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency)); + + /* the max samples we must buffer buffer */ + max_samples = MAX (scope->req_spf, scope->spf); + our_latency = + gst_util_uint64_scale_int (max_samples, GST_SECOND, scope->rate); + + GST_DEBUG_OBJECT (scope, "Our latency: %" GST_TIME_FORMAT, + GST_TIME_ARGS (our_latency)); + + /* we add some latency but only if we need to buffer more than what + * upstream gives us */ + min_latency += our_latency; + if (max_latency != -1) + max_latency += our_latency; + + GST_DEBUG_OBJECT (scope, "Calculated total latency : min %" + GST_TIME_FORMAT " max %" GST_TIME_FORMAT, + GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency)); + + gst_query_set_latency (query, TRUE, min_latency, max_latency); + } + break; + } + default: + res = gst_pad_peer_query (scope->sinkpad, query); + break; + } + + gst_object_unref (scope); + + return res; +} + +static gboolean +gst_base_audio_visualizer_sink_query (GstPad * pad, GstQuery * query) +{ + gboolean res = FALSE; + GstBaseAudioVisualizer *scope; + + scope = GST_BASE_AUDIO_VISUALIZER (gst_pad_get_parent (pad)); + + switch (GST_QUERY_TYPE (query)) { + case GST_QUERY_ALLOCATION: + /* we convert audio to video, don't pass allocation queries for audio + * through */ + break; + default: + res = gst_pad_peer_query (scope->srcpad, query); + break; + } + + gst_object_unref (scope); + + return res; +} + static GstStateChangeReturn gst_base_audio_visualizer_change_state (GstElement * element, GstStateChange transition) { + GstStateChangeReturn ret; GstBaseAudioVisualizer *scope; scope = GST_BASE_AUDIO_VISUALIZER (element); switch (transition) { case GST_STATE_CHANGE_READY_TO_PAUSED: - scope->next_ts = GST_CLOCK_TIME_NONE; - gst_adapter_clear (scope->adapter); + gst_base_audio_visualizer_reset (scope); + break; + default: + break; + } + + ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); + + switch (transition) { + case GST_STATE_CHANGE_PAUSED_TO_READY: + if (scope->pool) { + gst_buffer_pool_set_active (scope->pool, FALSE); + gst_object_replace ((GstObject **) & scope->pool, NULL); + } + break; + case GST_STATE_CHANGE_READY_TO_NULL: break; default: break; } - return GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); + return ret; } diff --git a/gst/audiovisualizers/gstbaseaudiovisualizer.h b/gst/audiovisualizers/gstbaseaudiovisualizer.h index 42a4c07..0bd7687 100644 --- a/gst/audiovisualizers/gstbaseaudiovisualizer.h +++ b/gst/audiovisualizers/gstbaseaudiovisualizer.h @@ -74,6 +74,7 @@ struct _GstBaseAudioVisualizer /* pads */ GstPad *srcpad, *sinkpad; + GstBufferPool *pool; GstAdapter *adapter; GstBuffer *inbuf; guint8 *pixelbuf; @@ -82,7 +83,6 @@ struct _GstBaseAudioVisualizer GstBaseAudioVisualizerShaderFunc shader; guint32 shade_amount; - guint64 next_ts; /* the timestamp of the next frame */ guint64 frame_duration; guint bpf; /* bytes per frame */ guint bps; /* bytes per sample */ @@ -94,14 +94,20 @@ struct _GstBaseAudioVisualizer gint fps_n, fps_d; gint width; gint height; - gint channels; /* audio state */ gint sample_rate; + gint channels; gint rate; /* configuration mutex */ GMutex *config_lock; + + /* QoS stuff *//* with LOCK */ + gdouble proportion; + GstClockTime earliest_time; + + GstSegment segment; }; struct _GstBaseAudioVisualizerClass diff --git a/gst/audiovisualizers/gstspacescope.c b/gst/audiovisualizers/gstspacescope.c index fe8169d..85621f6 100644 --- a/gst/audiovisualizers/gstspacescope.c +++ b/gst/audiovisualizers/gstspacescope.c @@ -41,14 +41,20 @@ static GstStaticPadTemplate gst_space_scope_src_template = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_VIDEO_CAPS_xRGB_HOST_ENDIAN) +#if G_BYTE_ORDER == G_BIG_ENDIAN + GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("xRGB")) +#else + GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("BGRx")) +#endif ); static GstStaticPadTemplate gst_space_scope_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_AUDIO_INT_STANDARD_PAD_TEMPLATE_CAPS) + GST_STATIC_CAPS ("audio/x-raw, " + "format = (string) " GST_AUDIO_NE (S16) ", " + "rate = (int) [ 8000, 96000 ], " "channels = (int) 2") ); @@ -59,13 +65,14 @@ static gboolean gst_space_scope_render (GstBaseAudioVisualizer * scope, GstBuffer * audio, GstBuffer * video); -GST_BOILERPLATE (GstSpaceScope, gst_space_scope, GstBaseAudioVisualizer, - GST_TYPE_BASE_AUDIO_VISUALIZER); +G_DEFINE_TYPE (GstSpaceScope, gst_space_scope, GST_TYPE_BASE_AUDIO_VISUALIZER); static void -gst_space_scope_base_init (gpointer g_class) +gst_space_scope_class_init (GstSpaceScopeClass * g_class) { - GstElementClass *element_class = GST_ELEMENT_CLASS (g_class); + GstElementClass *element_class = (GstElementClass *) g_class; + GstBaseAudioVisualizerClass *scope_class = + (GstBaseAudioVisualizerClass *) g_class; gst_element_class_set_details_simple (element_class, "Stereo visualizer", "Visualization", @@ -75,19 +82,12 @@ gst_space_scope_base_init (gpointer g_class) gst_static_pad_template_get (&gst_space_scope_src_template)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_space_scope_sink_template)); -} - -static void -gst_space_scope_class_init (GstSpaceScopeClass * g_class) -{ - GstBaseAudioVisualizerClass *scope_class = - (GstBaseAudioVisualizerClass *) g_class; scope_class->render = GST_DEBUG_FUNCPTR (gst_space_scope_render); } static void -gst_space_scope_init (GstSpaceScope * scope, GstSpaceScopeClass * g_class) +gst_space_scope_init (GstSpaceScope * scope) { /* do nothing */ } @@ -96,15 +96,17 @@ static gboolean gst_space_scope_render (GstBaseAudioVisualizer * scope, GstBuffer * audio, GstBuffer * video) { - guint32 *vdata = (guint32 *) GST_BUFFER_DATA (video); - gint16 *adata = (gint16 *) GST_BUFFER_DATA (audio); + gsize asize; + guint32 *vdata = + (guint32 *) gst_buffer_map (video, NULL, NULL, GST_MAP_WRITE); + gint16 *adata = (gint16 *) gst_buffer_map (audio, &asize, NULL, GST_MAP_READ); guint i, s, x, y, off, ox, oy; guint num_samples; gfloat dx, dy; guint w = scope->width; /* draw dots 1st channel x, 2nd channel y */ - num_samples = GST_BUFFER_SIZE (audio) / (scope->channels * sizeof (gint16)); + num_samples = asize / (scope->channels * sizeof (gint16)); dx = scope->width / 65536.0; ox = scope->width / 2; dy = scope->height / 65536.0; @@ -116,6 +118,8 @@ gst_space_scope_render (GstBaseAudioVisualizer * scope, GstBuffer * audio, off = (y * w) + x; vdata[off] = 0x00FFFFFF; } + gst_buffer_unmap (video, vdata, -1); + gst_buffer_unmap (audio, adata, -1); return TRUE; } diff --git a/gst/audiovisualizers/gstspectrascope.c b/gst/audiovisualizers/gstspectrascope.c index 4ebc7a1..d79675a 100644 --- a/gst/audiovisualizers/gstspectrascope.c +++ b/gst/audiovisualizers/gstspectrascope.c @@ -42,14 +42,20 @@ static GstStaticPadTemplate gst_spectra_scope_src_template = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_VIDEO_CAPS_xRGB_HOST_ENDIAN) +#if G_BYTE_ORDER == G_BIG_ENDIAN + GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("xRGB")) +#else + GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("BGRx")) +#endif ); static GstStaticPadTemplate gst_spectra_scope_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_AUDIO_INT_STANDARD_PAD_TEMPLATE_CAPS) + GST_STATIC_CAPS ("audio/x-raw, " + "format = (string) " GST_AUDIO_NE (S16) ", " + "rate = (int) [ 8000, 96000 ], " "channels = (int) 2") ); @@ -63,13 +69,18 @@ static gboolean gst_spectra_scope_render (GstBaseAudioVisualizer * scope, GstBuffer * audio, GstBuffer * video); -GST_BOILERPLATE (GstSpectraScope, gst_spectra_scope, GstBaseAudioVisualizer, +G_DEFINE_TYPE (GstSpectraScope, gst_spectra_scope, GST_TYPE_BASE_AUDIO_VISUALIZER); static void -gst_spectra_scope_base_init (gpointer g_class) +gst_spectra_scope_class_init (GstSpectraScopeClass * g_class) { - GstElementClass *element_class = GST_ELEMENT_CLASS (g_class); + GObjectClass *gobject_class = (GObjectClass *) g_class; + GstElementClass *element_class = (GstElementClass *) g_class; + GstBaseAudioVisualizerClass *scope_class = + (GstBaseAudioVisualizerClass *) g_class; + + gobject_class->finalize = gst_spectra_scope_finalize; gst_element_class_set_details_simple (element_class, "Frequency spectrum scope", "Visualization", @@ -79,23 +90,13 @@ gst_spectra_scope_base_init (gpointer g_class) gst_static_pad_template_get (&gst_spectra_scope_src_template)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_spectra_scope_sink_template)); -} - -static void -gst_spectra_scope_class_init (GstSpectraScopeClass * g_class) -{ - GObjectClass *gobject_class = (GObjectClass *) g_class; - GstBaseAudioVisualizerClass *scope_class = - (GstBaseAudioVisualizerClass *) g_class; - - gobject_class->finalize = gst_spectra_scope_finalize; scope_class->setup = GST_DEBUG_FUNCPTR (gst_spectra_scope_setup); scope_class->render = GST_DEBUG_FUNCPTR (gst_spectra_scope_render); } static void -gst_spectra_scope_init (GstSpectraScope * scope, GstSpectraScopeClass * g_class) +gst_spectra_scope_init (GstSpectraScope * scope) { /* do nothing */ } @@ -114,7 +115,7 @@ gst_spectra_scope_finalize (GObject * object) scope->freq_data = NULL; } - G_OBJECT_CLASS (parent_class)->finalize (object); + G_OBJECT_CLASS (gst_spectra_scope_parent_class)->finalize (object); } static gboolean @@ -164,9 +165,11 @@ gst_spectra_scope_render (GstBaseAudioVisualizer * bscope, GstBuffer * audio, GstBuffer * video) { GstSpectraScope *scope = GST_SPECTRA_SCOPE (bscope); - guint32 *vdata = (guint32 *) GST_BUFFER_DATA (video); - gint16 *adata = (gint16 *) g_memdup (GST_BUFFER_DATA (audio), - GST_BUFFER_SIZE (audio)); + gsize asize; + guint32 *vdata = + (guint32 *) gst_buffer_map (video, NULL, NULL, GST_MAP_WRITE); + gint16 *adata = (gint16 *) gst_buffer_map (audio, &asize, NULL, GST_MAP_READ); + gint16 *mono_adata = (gint16 *) g_memdup (adata, asize); GstFFTS16Complex *fdata = scope->freq_data; guint x, y, off; guint l, h = bscope->height - 1; @@ -175,22 +178,22 @@ gst_spectra_scope_render (GstBaseAudioVisualizer * bscope, GstBuffer * audio, if (bscope->channels > 1) { guint ch = bscope->channels; - guint num_samples = GST_BUFFER_SIZE (audio) / (ch * sizeof (gint16)); + guint num_samples = asize / (ch * sizeof (gint16)); guint i, c, v, s = 0; /* deinterleave and mixdown adata */ for (i = 0; i < num_samples; i++) { v = 0; for (c = 0; c < ch; c++) { - v += adata[s++]; + v += mono_adata[s++]; } - adata[i] = v / ch; + mono_adata[i] = v / ch; } } /* run fft */ - gst_fft_s16_window (scope->fft_ctx, adata, GST_FFT_WINDOW_HAMMING); - gst_fft_s16_fft (scope->fft_ctx, adata, fdata); + gst_fft_s16_window (scope->fft_ctx, mono_adata, GST_FFT_WINDOW_HAMMING); + gst_fft_s16_fft (scope->fft_ctx, mono_adata, fdata); g_free (adata); /* draw lines */ @@ -210,6 +213,8 @@ gst_spectra_scope_render (GstBaseAudioVisualizer * bscope, GstBuffer * audio, add_pixel (&vdata[off], 0x007F7F7F); } } + gst_buffer_unmap (video, vdata, -1); + gst_buffer_unmap (audio, adata, -1); return TRUE; } diff --git a/gst/audiovisualizers/gstsynaescope.c b/gst/audiovisualizers/gstsynaescope.c index 414e608..3754c00 100644 --- a/gst/audiovisualizers/gstsynaescope.c +++ b/gst/audiovisualizers/gstsynaescope.c @@ -41,14 +41,20 @@ static GstStaticPadTemplate gst_synae_scope_src_template = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_VIDEO_CAPS_xRGB_HOST_ENDIAN) +#if G_BYTE_ORDER == G_BIG_ENDIAN + GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("xRGB")) +#else + GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("BGRx")) +#endif ); static GstStaticPadTemplate gst_synae_scope_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_AUDIO_INT_STANDARD_PAD_TEMPLATE_CAPS) + GST_STATIC_CAPS ("audio/x-raw, " + "format = (string) " GST_AUDIO_NE (S16) ", " + "rate = (int) [ 8000, 96000 ], " "channels = (int) 2") ); @@ -62,13 +68,17 @@ static gboolean gst_synae_scope_render (GstBaseAudioVisualizer * scope, GstBuffer * audio, GstBuffer * video); -GST_BOILERPLATE (GstSynaeScope, gst_synae_scope, GstBaseAudioVisualizer, - GST_TYPE_BASE_AUDIO_VISUALIZER); +G_DEFINE_TYPE (GstSynaeScope, gst_synae_scope, GST_TYPE_BASE_AUDIO_VISUALIZER); static void -gst_synae_scope_base_init (gpointer g_class) +gst_synae_scope_class_init (GstSynaeScopeClass * g_class) { - GstElementClass *element_class = GST_ELEMENT_CLASS (g_class); + GObjectClass *gobject_class = (GObjectClass *) g_class; + GstElementClass *element_class = (GstElementClass *) g_class; + GstBaseAudioVisualizerClass *scope_class = + (GstBaseAudioVisualizerClass *) g_class; + + gobject_class->finalize = gst_synae_scope_finalize; gst_element_class_set_details_simple (element_class, "Synaescope", "Visualization", @@ -79,23 +89,13 @@ gst_synae_scope_base_init (gpointer g_class) gst_static_pad_template_get (&gst_synae_scope_src_template)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_synae_scope_sink_template)); -} - -static void -gst_synae_scope_class_init (GstSynaeScopeClass * g_class) -{ - GObjectClass *gobject_class = (GObjectClass *) g_class; - GstBaseAudioVisualizerClass *scope_class = - (GstBaseAudioVisualizerClass *) g_class; - - gobject_class->finalize = gst_synae_scope_finalize; scope_class->setup = GST_DEBUG_FUNCPTR (gst_synae_scope_setup); scope_class->render = GST_DEBUG_FUNCPTR (gst_synae_scope_render); } static void -gst_synae_scope_init (GstSynaeScope * scope, GstSynaeScopeClass * g_class) +gst_synae_scope_init (GstSynaeScope * scope) { guint32 *colors = scope->colors; guint *shade = scope->shade; @@ -144,7 +144,7 @@ gst_synae_scope_finalize (GObject * object) scope->adata_r = NULL; } - G_OBJECT_CLASS (parent_class)->finalize (object); + G_OBJECT_CLASS (gst_synae_scope_parent_class)->finalize (object); } static gboolean @@ -203,8 +203,10 @@ gst_synae_scope_render (GstBaseAudioVisualizer * bscope, GstBuffer * audio, GstBuffer * video) { GstSynaeScope *scope = GST_SYNAE_SCOPE (bscope); - guint32 *vdata = (guint32 *) GST_BUFFER_DATA (video); - gint16 *adata = (gint16 *) GST_BUFFER_DATA (audio); + gsize asize; + guint32 *vdata = + (guint32 *) gst_buffer_map (video, NULL, NULL, GST_MAP_WRITE); + gint16 *adata = (gint16 *) gst_buffer_map (audio, &asize, NULL, GST_MAP_READ); gint16 *adata_l = scope->adata_l; gint16 *adata_r = scope->adata_r; GstFFTS16Complex *fdata_l = scope->freq_data_l; @@ -217,7 +219,7 @@ gst_synae_scope_render (GstBaseAudioVisualizer * bscope, GstBuffer * audio, guint *shade = scope->shade; //guint w2 = w /2; guint ch = bscope->channels; - guint num_samples = GST_BUFFER_SIZE (audio) / (ch * sizeof (gint16)); + guint num_samples = asize / (ch * sizeof (gint16)); gint i, j, b; gint br, br1, br2; gint clarity; diff --git a/gst/audiovisualizers/gstwavescope.c b/gst/audiovisualizers/gstwavescope.c index e735148..62ef816 100644 --- a/gst/audiovisualizers/gstwavescope.c +++ b/gst/audiovisualizers/gstwavescope.c @@ -41,14 +41,20 @@ static GstStaticPadTemplate gst_wave_scope_src_template = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_VIDEO_CAPS_xRGB_HOST_ENDIAN) +#if G_BYTE_ORDER == G_BIG_ENDIAN + GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("xRGB")) +#else + GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("BGRx")) +#endif ); static GstStaticPadTemplate gst_wave_scope_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_AUDIO_INT_STANDARD_PAD_TEMPLATE_CAPS) + GST_STATIC_CAPS ("audio/x-raw, " + "format = (string) " GST_AUDIO_NE (S16) ", " + "rate = (int) [ 8000, 96000 ], " "channels = (int) 2") ); @@ -59,13 +65,14 @@ static gboolean gst_wave_scope_render (GstBaseAudioVisualizer * scope, GstBuffer * audio, GstBuffer * video); -GST_BOILERPLATE (GstWaveScope, gst_wave_scope, GstBaseAudioVisualizer, - GST_TYPE_BASE_AUDIO_VISUALIZER); +G_DEFINE_TYPE (GstWaveScope, gst_wave_scope, GST_TYPE_BASE_AUDIO_VISUALIZER); static void -gst_wave_scope_base_init (gpointer g_class) +gst_wave_scope_class_init (GstWaveScopeClass * g_class) { - GstElementClass *element_class = GST_ELEMENT_CLASS (g_class); + GstElementClass *element_class = (GstElementClass *) g_class; + GstBaseAudioVisualizerClass *scope_class = + (GstBaseAudioVisualizerClass *) g_class; gst_element_class_set_details_simple (element_class, "Waveform oscilloscope", "Visualization", @@ -75,19 +82,12 @@ gst_wave_scope_base_init (gpointer g_class) gst_static_pad_template_get (&gst_wave_scope_src_template)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_wave_scope_sink_template)); -} - -static void -gst_wave_scope_class_init (GstWaveScopeClass * g_class) -{ - GstBaseAudioVisualizerClass *scope_class = - (GstBaseAudioVisualizerClass *) g_class; scope_class->render = GST_DEBUG_FUNCPTR (gst_wave_scope_render); } static void -gst_wave_scope_init (GstWaveScope * scope, GstWaveScopeClass * g_class) +gst_wave_scope_init (GstWaveScope * scope) { /* do nothing */ } @@ -96,15 +96,17 @@ static gboolean gst_wave_scope_render (GstBaseAudioVisualizer * scope, GstBuffer * audio, GstBuffer * video) { - guint32 *vdata = (guint32 *) GST_BUFFER_DATA (video); - gint16 *adata = (gint16 *) GST_BUFFER_DATA (audio); + gsize asize; + guint32 *vdata = + (guint32 *) gst_buffer_map (video, NULL, NULL, GST_MAP_WRITE); + gint16 *adata = (gint16 *) gst_buffer_map (audio, &asize, NULL, GST_MAP_READ); guint i, c, s, x, y, off, oy; guint num_samples; gfloat dx, dy; guint w = scope->width; /* draw dots */ - num_samples = GST_BUFFER_SIZE (audio) / (scope->channels * sizeof (gint16)); + num_samples = asize / (scope->channels * sizeof (gint16)); dx = (gfloat) scope->width / (gfloat) num_samples; dy = scope->height / 65536.0; oy = scope->height / 2; @@ -117,6 +119,8 @@ gst_wave_scope_render (GstBaseAudioVisualizer * scope, GstBuffer * audio, vdata[off] = 0x00FFFFFF; } } + gst_buffer_unmap (video, vdata, -1); + gst_buffer_unmap (audio, adata, -1); return TRUE; } -- 2.7.4