From 7e9ba6264d512cfe2b55e097ac998d5f0c6d805b Mon Sep 17 00:00:00 2001 From: "keonho07.kim" Date: Thu, 11 Apr 2013 02:53:01 +0900 Subject: [PATCH] WebAudio task shouldn't be running on suspend to reduce usage of CPU. [Title] WebAudio task shouldn't be running on suspend to reduce usage of CPU. [Problem] webKitWebAudioSrcLoop is running even if browser is gone to suspending. [Cause] Task of gstreamer is never been made stop to keep pipeline is running. [Solution] Implement pauseRendering() to make pause tast of gstreamer. Change-Id: I05f60f415c8a10226a97885130afa0e005ea5ec8 --- .../Modules/webaudio/AudioBufferSourceNode.cpp | 3 ++ Source/WebCore/Modules/webaudio/AudioContext.cpp | 44 +++++++++++++++- Source/WebCore/Modules/webaudio/AudioContext.h | 21 ++++++-- .../Modules/webaudio/AudioDestinationNode.cpp | 9 ++++ .../Modules/webaudio/AudioDestinationNode.h | 3 ++ .../Modules/webaudio/AudioScheduledSourceNode.cpp | 2 + .../webaudio/DefaultAudioDestinationNode.cpp | 8 +++ .../Modules/webaudio/DefaultAudioDestinationNode.h | 4 +- .../Modules/webaudio/OfflineAudioDestinationNode.h | 4 +- .../Modules/webaudio/ScriptProcessorNode.cpp | 8 ++- .../audio/gstreamer/AudioDestinationGStreamer.cpp | 60 +++++++++++++++++----- .../audio/gstreamer/AudioDestinationGStreamer.h | 7 +++ .../gstreamer/WebKitWebAudioSourceGStreamer.cpp | 17 +++++- 13 files changed, 166 insertions(+), 24 deletions(-) mode change 100644 => 100755 Source/WebCore/Modules/webaudio/AudioContext.cpp diff --git a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp index 32f43d3..d37807c 100644 --- a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp +++ b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp @@ -433,6 +433,9 @@ void AudioBufferSourceNode::startGrain(double when, double grainOffset, double g m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate()); m_playbackState = SCHEDULED_STATE; +#if ENABLE(TIZEN_WEB_AUDIO) + context()->destination()->startRendering(); +#endif } #if ENABLE(TIZEN_WEB_AUDIO) diff --git a/Source/WebCore/Modules/webaudio/AudioContext.cpp b/Source/WebCore/Modules/webaudio/AudioContext.cpp old mode 100644 new mode 100755 index fdcbd45..4bfd6a0 --- a/Source/WebCore/Modules/webaudio/AudioContext.cpp +++ b/Source/WebCore/Modules/webaudio/AudioContext.cpp @@ -131,6 +131,9 @@ AudioContext::AudioContext(Document* document) , m_graphOwnerThread(UndefinedThreadIdentifier) , m_isOfflineContext(false) , m_activeSourceCount(0) +#if ENABLE(TIZEN_WEB_AUDIO) + , m_activeScriptProcessorCount(0) +#endif { constructCommon(); @@ -156,6 +159,9 @@ AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t , m_graphOwnerThread(UndefinedThreadIdentifier) , m_isOfflineContext(true) , m_activeSourceCount(0) +#if ENABLE(TIZEN_WEB_AUDIO) + , m_activeScriptProcessorCount(0) +#endif { constructCommon(); @@ -211,7 +217,9 @@ void AudioContext::lazyInitialize() // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum". // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript. // We may want to consider requiring it for symmetry with OfflineAudioContext. +#if !ENABLE(TIZEN_WEB_AUDIO) m_destinationNode->startRendering(); +#endif ++s_hardwareContextCount; } @@ -440,6 +448,11 @@ PassRefPtr AudioContext::createScriptProcessor(size_t buffe { ASSERT(isMainThread()); lazyInitialize(); +#if ENABLE(TIZEN_WEB_AUDIO) + incrementActiveScriptProcessorCount(); + destination()->startRendering(); +#endif + RefPtr node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels); if (!node.get()) { @@ -829,7 +842,7 @@ void AudioContext::deleteMarkedNodes() AudioNode* node = m_nodesToDelete[n - 1]; m_nodesToDelete.removeLast(); - // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions. + // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions. unsigned numberOfInputs = node->numberOfInputs(); for (unsigned i = 0; i < numberOfInputs; ++i) m_dirtySummingJunctions.remove(node->input(i)); @@ -975,6 +988,35 @@ void AudioContext::decrementActiveSourceCount() atomicDecrement(&m_activeSourceCount); } +#if ENABLE(TIZEN_WEB_AUDIO) +void AudioContext::incrementActiveScriptProcessorCount() +{ + atomicIncrement(&m_activeScriptProcessorCount); +} + +void AudioContext::decrementActiveScriptProcessorCount() +{ + atomicDecrement(&m_activeScriptProcessorCount); +} + +void AudioContext::pauseDispatch(void* userData) +{ + ASSERT(isMainThread()); + AudioContext* context = reinterpret_cast(userData); + ASSERT(context); + if (!context) + return; + + context->destination()->pauseRendering(); +} + +void AudioContext::pause() +{ + ASSERT(isAudioThread()); + + callOnMainThread(pauseDispatch, this); +} +#endif } // namespace WebCore #endif // ENABLE(WEB_AUDIO) diff --git a/Source/WebCore/Modules/webaudio/AudioContext.h b/Source/WebCore/Modules/webaudio/AudioContext.h index 215f1c6..34f2390 100644 --- a/Source/WebCore/Modules/webaudio/AudioContext.h +++ b/Source/WebCore/Modules/webaudio/AudioContext.h @@ -83,7 +83,7 @@ public: virtual ~AudioContext(); bool isInitialized() const; - + bool isOfflineContext() { return m_isOfflineContext; } // Returns true when initialize() was called AND all asynchronous initialization has completed. @@ -91,7 +91,9 @@ public: // Document notification virtual void stop(); - +#if ENABLE(TIZEN_WEB_AUDIO) + void pause(); +#endif Document* document() const; // ASSERTs if document no longer exists. bool hasDocument(); @@ -100,10 +102,15 @@ public: double currentTime() const { return m_destinationNode->currentTime(); } float sampleRate() const { return m_destinationNode->sampleRate(); } unsigned long activeSourceCount() const { return static_cast(m_activeSourceCount); } - +#if ENABLE(TIZEN_WEB_AUDIO) + unsigned long activeScriptProcessorCount() const { return static_cast(m_activeScriptProcessorCount); } +#endif void incrementActiveSourceCount(); void decrementActiveSourceCount(); - +#if ENABLE(TIZEN_WEB_AUDIO) + void incrementActiveScriptProcessorCount(); + void decrementActiveScriptProcessorCount(); +#endif PassRefPtr createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&); PassRefPtr createBuffer(ArrayBuffer*, bool mixToMono, ExceptionCode&); @@ -264,6 +271,9 @@ private: // We'd like to schedule only one stop action for them. bool m_isStopScheduled; static void uninitializeDispatch(void* userData); +#if ENABLE(TIZEN_WEB_AUDIO) + static void pauseDispatch(void* userData); +#endif void clear(); void scheduleNodeDeletion(); @@ -348,6 +358,9 @@ private: // Number of AudioBufferSourceNodes that are active (playing). int m_activeSourceCount; +#if ENABLE(TIZEN_WEB_AUDIO) + int m_activeScriptProcessorCount; +#endif }; } // WebCore diff --git a/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp b/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp index 9ca44b4..982c671 100644 --- a/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp +++ b/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp @@ -87,6 +87,11 @@ void AudioDestinationNode::render(AudioBus* sourceBus, AudioBus* destinationBus, return; } +#if ENABLE(TIZEN_WEB_AUDIO) + if (!context()->activeSourceCount() && !context()->activeScriptProcessorCount()) + destinationBus->zero(); +#endif + // Let the context take care of any business at the start of each render quantum. context()->handlePreRenderTasks(); @@ -113,6 +118,10 @@ void AudioDestinationNode::render(AudioBus* sourceBus, AudioBus* destinationBus, // Advance current sample-frame. m_currentSampleFrame += numberOfFrames; +#if ENABLE(TIZEN_WEB_AUDIO) + if (!context()->activeSourceCount() && !context()->activeScriptProcessorCount()) + context()->pause(); +#endif } } // namespace WebCore diff --git a/Source/WebCore/Modules/webaudio/AudioDestinationNode.h b/Source/WebCore/Modules/webaudio/AudioDestinationNode.h index 121403d..17093f1 100644 --- a/Source/WebCore/Modules/webaudio/AudioDestinationNode.h +++ b/Source/WebCore/Modules/webaudio/AudioDestinationNode.h @@ -67,6 +67,9 @@ public: virtual void enableInput() = 0; virtual void startRendering() = 0; +#if ENABLE(TIZEN_WEB_AUDIO) + virtual void pauseRendering() = 0; +#endif AudioSourceProvider* localAudioInputProvider() { return &m_localAudioInputProvider; } diff --git a/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp b/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp index 9a79e2b..4cfcc13 100644 --- a/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp +++ b/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp @@ -143,6 +143,8 @@ void AudioScheduledSourceNode::start(double when, ExceptionCode& ec) m_startTime = when; m_playbackState = SCHEDULED_STATE; + + context()->destination()->startRendering(); } void AudioScheduledSourceNode::stop(double when, ExceptionCode& ec) diff --git a/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp b/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp index 8c43329..369dd9e 100644 --- a/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp +++ b/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp @@ -97,6 +97,14 @@ void DefaultAudioDestinationNode::startRendering() m_destination->start(); } +#if ENABLE(TIZEN_WEB_AUDIO) +void DefaultAudioDestinationNode::pauseRendering() +{ + ASSERT(isInitialized()); + if (isInitialized()) + m_destination->stop(); +} +#endif } // namespace WebCore #endif // ENABLE(WEB_AUDIO) diff --git a/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h b/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h index 2b999ad..da8eb6a 100644 --- a/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h +++ b/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h @@ -49,7 +49,9 @@ public: // AudioDestinationNode virtual void enableInput() OVERRIDE; virtual void startRendering() OVERRIDE; - +#if ENABLE(TIZEN_WEB_AUDIO) + virtual void pauseRendering() OVERRIDE; +#endif private: explicit DefaultAudioDestinationNode(AudioContext*); void createDestination(); diff --git a/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h b/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h index cfc767c..b5fb236 100644 --- a/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h +++ b/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h @@ -52,7 +52,9 @@ public: // AudioDestinationNode virtual void enableInput() OVERRIDE { }; virtual void startRendering() OVERRIDE; - +#if ENABLE(TIZEN_WEB_AUDIO) + virtual void pauseRendering() OVERRIDE { }; +#endif virtual float sampleRate() const { return m_renderTarget->sampleRate(); } private: diff --git a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp index 1f2f13d..f71cb12 100644 --- a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp +++ b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp @@ -129,6 +129,10 @@ void ScriptProcessorNode::uninitialize() m_outputBuffers.clear(); AudioNode::uninitialize(); + +#if ENABLE(TIZEN_WEB_AUDIO) + context()->decrementActiveScriptProcessorCount(); +#endif } void ScriptProcessorNode::process(size_t framesToProcess) @@ -138,7 +142,7 @@ void ScriptProcessorNode::process(size_t framesToProcess) // Additionally, there is a double-buffering for input and output which is exposed directly to JavaScript (see inputBuffer and outputBuffer below). // This node is the producer for inputBuffer and the consumer for outputBuffer. // The JavaScript code is the consumer of inputBuffer and the producer for outputBuffer. - + // Get input and output busses. AudioBus* inputBus = this->input(0)->bus(); AudioBus* outputBus = this->output(0)->bus(); @@ -149,7 +153,7 @@ void ScriptProcessorNode::process(size_t framesToProcess) ASSERT(isDoubleBufferIndexGood); if (!isDoubleBufferIndexGood) return; - + AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get(); AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get(); diff --git a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp index 1f126cf..9d987e7 100755 --- a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp +++ b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp @@ -121,6 +121,9 @@ AudioDestinationGStreamer::AudioDestinationGStreamer(AudioIOCallback& callback, , m_renderBus(2, framesToPull, false) , m_sampleRate(sampleRate) , m_isPlaying(false) +#if ENABLE(TIZEN_WEB_AUDIO) + , m_timer(this, &AudioDestinationGStreamer::timerFired) +#endif #if ENABLE(TIZEN_GSTREAMER_AUDIO) , m_audioSessionManager(AudioSessionManagerGStreamerTizen::createAudioSessionManager()) #endif @@ -167,6 +170,10 @@ AudioDestinationGStreamer::~AudioDestinationGStreamer() g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast(messageCallback), this); gst_bus_remove_signal_watch(bus.get()); +#if ENABLE(TIZEN_WEB_AUDIO) + stopTimer(); +#endif + gst_element_set_state(m_pipeline, GST_STATE_NULL); #if ENABLE(TIZEN_GSTREAMER_AUDIO) @@ -257,14 +264,6 @@ gboolean AudioDestinationGStreamer::handleMessage(GstMessage* message) #endif m_isPlaying = false; break; - -#if ENABLE(TIZEN_GSTREAMER_AUDIO) - case GST_MESSAGE_EOS: - gst_element_set_state(m_pipeline, GST_STATE_NULL); - if (m_audioSessionManager) - m_audioSessionManager->setSoundState(ASM_STATE_STOP); - break; -#endif default: break; } @@ -276,30 +275,63 @@ void AudioDestinationGStreamer::start() ASSERT(m_wavParserAvailable); if (!m_wavParserAvailable) return; +#if ENABLE(TIZEN_WEB_AUDIO) + stopTimer(); + if (m_isPlaying) + return; #if ENABLE(TIZEN_GSTREAMER_AUDIO) if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PLAYING)) return; #endif +#endif gst_element_set_state(m_pipeline, GST_STATE_PLAYING); m_isPlaying = true; } void AudioDestinationGStreamer::stop() { +#if ENABLE(TIZEN_WEB_AUDIO) ASSERT(m_wavParserAvailable && m_audioSinkAvailable); -#if ENABLE(TIZEN_GSTREAMER_AUDIO) if (!m_wavParserAvailable || !m_audioSinkAvailable) - return; - if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PAUSE)) - return; + return; + if (!m_isPlaying) + return; + + startTimer(0.7); #else + ASSERT(m_wavParserAvailable && m_audioSinkAvailable); if (!m_wavParserAvailable || m_audioSinkAvailable) - return; -#endif + return; + gst_element_set_state(m_pipeline, GST_STATE_PAUSED); m_isPlaying = false; +#endif +} + +#if ENABLE(TIZEN_WEB_AUDIO) +void AudioDestinationGStreamer::startTimer(double expiredTime) +{ + if (!m_timer.isActive()) + m_timer.startOneShot(expiredTime); +} + +void AudioDestinationGStreamer::stopTimer() +{ + if (m_timer.isActive()) + m_timer.stop(); } +void AudioDestinationGStreamer::timerFired(Timer*) +{ + m_timer.stop(); +#if ENABLE(TIZEN_GSTREAMER_AUDIO) + if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PAUSE)) + return; +#endif + gst_element_set_state(m_pipeline, GST_STATE_PAUSED); + m_isPlaying = false; +} +#endif } // namespace WebCore #endif // ENABLE(WEB_AUDIO) diff --git a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h index a51ca27..5546f14 100644 --- a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h +++ b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h @@ -53,6 +53,13 @@ private: float m_sampleRate; bool m_isPlaying; +#if ENABLE(TIZEN_WEB_AUDIO) + void startTimer(double); + void stopTimer(); + void timerFired(Timer*); + + Timer m_timer; +#endif bool m_wavParserAvailable; bool m_audioSinkAvailable; GstElement* m_pipeline; diff --git a/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp index 72911b7..1c07a33 100755 --- a/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp +++ b/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp @@ -410,6 +410,13 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs return GST_STATE_CHANGE_FAILURE; } break; +#if ENABLE(TIZEN_WEB_AUDIO) + case GST_STATE_CHANGE_PAUSED_TO_PLAYING: + GST_DEBUG_OBJECT(src, "PAUSED->PLAYING"); + if (!gst_task_start(src->priv->task.get())) + return GST_STATE_CHANGE_FAILURE; + break; +#endif default: break; } @@ -421,7 +428,14 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs } switch (transition) { - case GST_STATE_CHANGE_READY_TO_PAUSED: +#if ENABLE(TIZEN_WEB_AUDIO) + case GST_STATE_CHANGE_PLAYING_TO_PAUSED: + GST_DEBUG_OBJECT(src, "PLAYING->PAUSED"); + if (!gst_task_stop(src->priv->task.get())) + returnValue = GST_STATE_CHANGE_FAILURE; + break; +#else + case GST_STATE_CHANGE_READY_TO_PAUSED: GST_DEBUG_OBJECT(src, "READY->PAUSED"); if (!gst_task_start(src->priv->task.get())) returnValue = GST_STATE_CHANGE_FAILURE; @@ -431,6 +445,7 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs if (!gst_task_join(src->priv->task.get())) returnValue = GST_STATE_CHANGE_FAILURE; break; +#endif default: break; } -- 2.7.4