1f126cf0dd60c20e7903a67d6036e89fae0203c2
[framework/web/webkit-efl.git] / Source / WebCore / platform / audio / gstreamer / AudioDestinationGStreamer.cpp
1 /*
2  *  Copyright (C) 2011, 2012 Igalia S.L
3  *
4  *  This library is free software; you can redistribute it and/or
5  *  modify it under the terms of the GNU Lesser General Public
6  *  License as published by the Free Software Foundation; either
7  *  version 2 of the License, or (at your option) any later version.
8  *
9  *  This library is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  *  Lesser General Public License for more details.
13  *
14  *  You should have received a copy of the GNU Lesser General Public
15  *  License along with this library; if not, write to the Free Software
16  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18
19 #include "config.h"
20
21 #if ENABLE(WEB_AUDIO)
22
23 #include "AudioDestinationGStreamer.h"
24
25 #include "AudioChannel.h"
26 #include "AudioSourceProvider.h"
27 #include <wtf/gobject/GOwnPtr.h>
28 #include "GRefPtrGStreamer.h"
29 #include "GStreamerVersioning.h"
30 #include "Logging.h"
31 #include "WebKitWebAudioSourceGStreamer.h"
32 #include <gst/gst.h>
33 #include <gst/pbutils/pbutils.h>
34
35 namespace WebCore {
36
37 // Size of the AudioBus for playback. The webkitwebaudiosrc element
38 // needs to handle this number of frames per cycle as well.
39 const unsigned framesToPull = 128;
40
41 gboolean messageCallback(GstBus* bus, GstMessage* message, AudioDestinationGStreamer* destination)
42 {
43     return destination->handleMessage(message);
44 }
45
46 PassOwnPtr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
47 {
48     // FIXME: Add support for local/live audio input.
49     if (numberOfInputChannels)
50         LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled input channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
51
52     // FIXME: Add support for multi-channel (> stereo) output.
53     if (numberOfOutputChannels != 2)
54         LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled output channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
55
56     return adoptPtr(new AudioDestinationGStreamer(callback, sampleRate));
57 }
58
59 float AudioDestination::hardwareSampleRate()
60 {
61     return 44100;
62 }
63
64 #ifndef GST_API_VERSION_1
65 static void onGStreamerWavparsePadAddedCallback(GstElement*, GstPad* pad, AudioDestinationGStreamer* destination)
66 {
67     destination->finishBuildingPipelineAfterWavParserPadReady(pad);
68 }
69 #endif
70
71 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
72 static ASM_cb_result_t AudioDestinationAudioSessionEventSourcePause(ASM_event_sources_t eventSource, void* callbackData)
73 {
74     AudioDestinationGStreamer* pDestination = static_cast<AudioDestinationGStreamer*>(callbackData);
75     if (!pDestination)
76         return ASM_CB_RES_IGNORE;
77
78     switch (eventSource) {
79     case ASM_EVENT_SOURCE_CALL_START:
80     case ASM_EVENT_SOURCE_ALARM_START:
81     case ASM_EVENT_SOURCE_EARJACK_UNPLUG:
82     case ASM_EVENT_SOURCE_MEDIA:
83     case ASM_EVENT_SOURCE_EMERGENCY_START:
84     case ASM_EVENT_SOURCE_OTHER_PLAYER_APP:
85     case ASM_EVENT_SOURCE_RESOURCE_CONFLICT:
86         pDestination->stop();
87         return ASM_CB_RES_PAUSE;
88     default:
89         return ASM_CB_RES_NONE;
90     }
91 }
92
93 static ASM_cb_result_t AudioDestinationAudioSessionEventSourcePlay(ASM_event_sources_t eventSource, void* callbackData)
94 {
95     AudioDestinationGStreamer* pDestination = static_cast<AudioDestinationGStreamer*>(callbackData);
96     if (!pDestination)
97         return ASM_CB_RES_IGNORE;
98
99     switch (eventSource) {
100     case ASM_EVENT_SOURCE_ALARM_END:
101         pDestination->start();
102         return ASM_CB_RES_PLAYING;
103     default:
104         return ASM_CB_RES_NONE;
105     }
106 }
107
108 static ASM_cb_result_t AudioDestinationAudioSessionNotifyCallback(int, ASM_event_sources_t eventSource, ASM_sound_commands_t command, unsigned int, void* callbackData)
109 {
110     if (command == ASM_COMMAND_STOP || command == ASM_COMMAND_PAUSE)
111         return AudioDestinationAudioSessionEventSourcePause(eventSource, callbackData);
112     if (command == ASM_COMMAND_PLAY || command == ASM_COMMAND_RESUME)
113         return AudioDestinationAudioSessionEventSourcePlay(eventSource, callbackData);
114
115     return ASM_CB_RES_NONE;
116 }
117 #endif
118
119 AudioDestinationGStreamer::AudioDestinationGStreamer(AudioIOCallback& callback, float sampleRate)
120     : m_callback(callback)
121     , m_renderBus(2, framesToPull, false)
122     , m_sampleRate(sampleRate)
123     , m_isPlaying(false)
124 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
125     , m_audioSessionManager(AudioSessionManagerGStreamerTizen::createAudioSessionManager())
126 #endif
127 {
128 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
129     if (m_audioSessionManager)
130         m_audioSessionManager->registerAudioSessionManager(MM_SESSION_TYPE_SHARE, AudioDestinationAudioSessionNotifyCallback, this);
131 #endif
132     m_pipeline = gst_pipeline_new("play");
133     GRefPtr<GstBus> bus = webkitGstPipelineGetBus(GST_PIPELINE(m_pipeline));
134     ASSERT(bus);
135     gst_bus_add_signal_watch(bus.get());
136     g_signal_connect(bus.get(), "message", G_CALLBACK(messageCallback), this);
137
138     GstElement* webkitAudioSrc = reinterpret_cast<GstElement*>(g_object_new(WEBKIT_TYPE_WEB_AUDIO_SRC,
139                                                                             "rate", sampleRate,
140                                                                             "bus", &m_renderBus,
141                                                                             "provider", &m_callback,
142                                                                             "frames", framesToPull, NULL));
143
144     GstElement* wavParser = gst_element_factory_make("wavparse", 0);
145
146     m_wavParserAvailable = wavParser;
147     ASSERT_WITH_MESSAGE(m_wavParserAvailable, "Failed to create GStreamer wavparse element");
148     if (!m_wavParserAvailable)
149         return;
150
151 #ifndef GST_API_VERSION_1
152     g_signal_connect(wavParser, "pad-added", G_CALLBACK(onGStreamerWavparsePadAddedCallback), this);
153 #endif
154     gst_bin_add_many(GST_BIN(m_pipeline), webkitAudioSrc, wavParser, NULL);
155     gst_element_link_pads_full(webkitAudioSrc, "src", wavParser, "sink", GST_PAD_LINK_CHECK_NOTHING);
156
157 #ifdef GST_API_VERSION_1
158     GRefPtr<GstPad> srcPad = adoptGRef(gst_element_get_static_pad(wavParser, "src"));
159     finishBuildingPipelineAfterWavParserPadReady(srcPad.get());
160 #endif
161 }
162
163 AudioDestinationGStreamer::~AudioDestinationGStreamer()
164 {
165     GRefPtr<GstBus> bus = webkitGstPipelineGetBus(GST_PIPELINE(m_pipeline));
166     ASSERT(bus);
167     g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
168     gst_bus_remove_signal_watch(bus.get());
169
170     gst_element_set_state(m_pipeline, GST_STATE_NULL);
171
172 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
173     if (m_audioSessionManager)
174         m_audioSessionManager->setSoundState(ASM_STATE_STOP);
175 #endif
176     gst_object_unref(m_pipeline);
177 }
178
179 void AudioDestinationGStreamer::finishBuildingPipelineAfterWavParserPadReady(GstPad* pad)
180 {
181     ASSERT(m_wavParserAvailable);
182
183 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
184     GRefPtr<GstElement> audioSink = adoptGRef(gst_element_factory_make("avsysaudiosink", 0));
185     m_audioSinkAvailable = audioSink;
186     g_object_set(audioSink.get(), "close-handle-on-prepare", 1, NULL);
187
188     if (!audioSink.get()) {
189         TIZEN_LOGE("GST_MESSAGE_ERROR : Failed to create GStreamer avsysaudiosink element");
190         LOG_ERROR("Failed to create GStreamer avsysaudiosink element");
191         return;
192     }
193 #else
194     GRefPtr<GstElement> audioSink = gst_element_factory_make("autoaudiosink", 0);
195     m_audioSinkAvailable = audioSink;
196
197     if (!audioSink) {
198 #if ENABLE(TIZEN_DLOG_SUPPORT)
199         TIZEN_LOGE("GST_MESSAGE_ERROR : Failed to create GStreamer autoaudiosink element");
200 #endif
201         LOG_ERROR("Failed to create GStreamer autoaudiosink element");
202         return;
203     }
204
205     // Autoaudiosink does the real sink detection in the GST_STATE_NULL->READY transition
206     // so it's best to roll it to READY as soon as possible to ensure the underlying platform
207     // audiosink was loaded correctly.
208     GstStateChangeReturn stateChangeReturn = gst_element_set_state(audioSink.get(), GST_STATE_READY);
209     if (stateChangeReturn == GST_STATE_CHANGE_FAILURE) {
210 #if ENABLE(TIZEN_DLOG_SUPPORT)
211         TIZEN_LOGE("GST_MESSAGE_ERROR : Failed to change autoaudiosink element state");
212 #endif
213         LOG_ERROR("Failed to change autoaudiosink element state");
214         gst_element_set_state(audioSink.get(), GST_STATE_NULL);
215         m_audioSinkAvailable = false;
216         return;
217     }
218 #endif
219
220     GstElement* audioConvert = gst_element_factory_make("audioconvert", 0);
221     gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioSink.get(), NULL);
222
223     // Link wavparse's src pad to audioconvert sink pad.
224     GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(audioConvert, "sink"));
225     gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
226
227     // Link audioconvert to audiosink and roll states.
228     gst_element_link_pads_full(audioConvert, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
229     gst_element_sync_state_with_parent(audioConvert);
230     gst_element_sync_state_with_parent(audioSink.leakRef());
231 }
232
233 gboolean AudioDestinationGStreamer::handleMessage(GstMessage* message)
234 {
235     GOwnPtr<GError> error;
236     GOwnPtr<gchar> debug;
237
238     switch (GST_MESSAGE_TYPE(message)) {
239     case GST_MESSAGE_WARNING:
240         gst_message_parse_warning(message, &error.outPtr(), &debug.outPtr());
241 #if ENABLE(TIZEN_DLOG_SUPPORT)
242         TIZEN_LOGE("GST_MESSAGE_WARNING: %d %s. Debug output: %s", error->code,  error->message, debug.get());
243 #endif
244         g_warning("Warning: %d, %s. Debug output: %s", error->code,  error->message, debug.get());
245         break;
246     case GST_MESSAGE_ERROR:
247         gst_message_parse_error(message, &error.outPtr(), &debug.outPtr());
248 #if ENABLE(TIZEN_DLOG_SUPPORT)
249         TIZEN_LOGE("GST_MESSAGE_ERROR: %d, %s. Debug output: %s", error->code,  error->message, debug.get());
250 #endif
251         g_warning("Error: %d, %s. Debug output: %s", error->code,  error->message, debug.get());
252         gst_element_set_state(m_pipeline, GST_STATE_NULL);
253
254 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
255         if (m_audioSessionManager)
256             m_audioSessionManager->setSoundState(ASM_STATE_STOP);
257 #endif
258         m_isPlaying = false;
259         break;
260
261 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
262     case GST_MESSAGE_EOS:
263         gst_element_set_state(m_pipeline, GST_STATE_NULL);
264         if (m_audioSessionManager)
265             m_audioSessionManager->setSoundState(ASM_STATE_STOP);
266         break;
267 #endif
268     default:
269         break;
270     }
271     return TRUE;
272 }
273
274 void AudioDestinationGStreamer::start()
275 {
276     ASSERT(m_wavParserAvailable);
277     if (!m_wavParserAvailable)
278         return;
279 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
280     if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PLAYING))
281         return;
282 #endif
283     gst_element_set_state(m_pipeline, GST_STATE_PLAYING);
284     m_isPlaying = true;
285 }
286
287 void AudioDestinationGStreamer::stop()
288 {
289     ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
290 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
291     if (!m_wavParserAvailable || !m_audioSinkAvailable)
292        return;
293     if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PAUSE))
294        return;
295 #else
296     if (!m_wavParserAvailable || m_audioSinkAvailable)
297        return;
298 #endif
299     gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
300     m_isPlaying = false;
301 }
302
303 } // namespace WebCore
304
305 #endif // ENABLE(WEB_AUDIO)