WebAudio task shouldn't be running on suspend to reduce usage of CPU.
authorkeonho07.kim <keonho07.kim@samsung.com>
Wed, 10 Apr 2013 17:53:01 +0000 (02:53 +0900)
committerGerrit Code Review <gerrit2@kim11>
Fri, 12 Apr 2013 03:16:28 +0000 (12:16 +0900)
[Title] WebAudio task shouldn't be running on suspend to reduce usage of CPU.
[Problem] webKitWebAudioSrcLoop is running even if browser is gone to suspending.
[Cause] Task of gstreamer is never been made stop to keep pipeline is running.
[Solution] Implement pauseRendering() to make pause tast of gstreamer.

Change-Id: I05f60f415c8a10226a97885130afa0e005ea5ec8

13 files changed:
Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
Source/WebCore/Modules/webaudio/AudioContext.cpp [changed mode: 0644->0755]
Source/WebCore/Modules/webaudio/AudioContext.h
Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/AudioDestinationNode.h
Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp
Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h
Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp
Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h
Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp

index 32f43d3..d37807c 100644 (file)
@@ -433,6 +433,9 @@ void AudioBufferSourceNode::startGrain(double when, double grainOffset, double g
     m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
     
     m_playbackState = SCHEDULED_STATE;
+#if ENABLE(TIZEN_WEB_AUDIO)
+    context()->destination()->startRendering();
+#endif
 }
 
 #if ENABLE(TIZEN_WEB_AUDIO)
old mode 100644 (file)
new mode 100755 (executable)
index fdcbd45..4bfd6a0
@@ -131,6 +131,9 @@ AudioContext::AudioContext(Document* document)
     , m_graphOwnerThread(UndefinedThreadIdentifier)
     , m_isOfflineContext(false)
     , m_activeSourceCount(0)
+#if ENABLE(TIZEN_WEB_AUDIO)
+    , m_activeScriptProcessorCount(0)
+#endif
 {
     constructCommon();
 
@@ -156,6 +159,9 @@ AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t
     , m_graphOwnerThread(UndefinedThreadIdentifier)
     , m_isOfflineContext(true)
     , m_activeSourceCount(0)
+#if ENABLE(TIZEN_WEB_AUDIO)
+    , m_activeScriptProcessorCount(0)
+#endif
 {
     constructCommon();
 
@@ -211,7 +217,9 @@ void AudioContext::lazyInitialize()
                     // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
                     // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
                     // We may want to consider requiring it for symmetry with OfflineAudioContext.
+#if !ENABLE(TIZEN_WEB_AUDIO)
                     m_destinationNode->startRendering();                    
+#endif
                     ++s_hardwareContextCount;
                 }
 
@@ -440,6 +448,11 @@ PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
 {
     ASSERT(isMainThread());
     lazyInitialize();
+#if ENABLE(TIZEN_WEB_AUDIO)
+    incrementActiveScriptProcessorCount();
+    destination()->startRendering();
+#endif
+
     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
 
     if (!node.get()) {
@@ -829,7 +842,7 @@ void AudioContext::deleteMarkedNodes()
             AudioNode* node = m_nodesToDelete[n - 1];
             m_nodesToDelete.removeLast();
 
-            // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
+             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
             unsigned numberOfInputs = node->numberOfInputs();
             for (unsigned i = 0; i < numberOfInputs; ++i)
                 m_dirtySummingJunctions.remove(node->input(i));
@@ -975,6 +988,35 @@ void AudioContext::decrementActiveSourceCount()
     atomicDecrement(&m_activeSourceCount);
 }
 
+#if ENABLE(TIZEN_WEB_AUDIO)
+void AudioContext::incrementActiveScriptProcessorCount()
+{
+    atomicIncrement(&m_activeScriptProcessorCount);
+}
+
+void AudioContext::decrementActiveScriptProcessorCount()
+{
+    atomicDecrement(&m_activeScriptProcessorCount);
+}
+
+void AudioContext::pauseDispatch(void* userData)
+{
+    ASSERT(isMainThread());
+    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
+    ASSERT(context);
+    if (!context)
+        return;
+
+    context->destination()->pauseRendering();
+}
+
+void AudioContext::pause()
+{
+    ASSERT(isAudioThread());
+
+    callOnMainThread(pauseDispatch, this);
+}
+#endif
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)
index 215f1c6..34f2390 100644 (file)
@@ -83,7 +83,7 @@ public:
     virtual ~AudioContext();
 
     bool isInitialized() const;
-    
+
     bool isOfflineContext() { return m_isOfflineContext; }
 
     // Returns true when initialize() was called AND all asynchronous initialization has completed.
@@ -91,7 +91,9 @@ public:
 
     // Document notification
     virtual void stop();
-
+#if ENABLE(TIZEN_WEB_AUDIO)
+    void pause();
+#endif
     Document* document() const; // ASSERTs if document no longer exists.
     bool hasDocument();
 
@@ -100,10 +102,15 @@ public:
     double currentTime() const { return m_destinationNode->currentTime(); }
     float sampleRate() const { return m_destinationNode->sampleRate(); }
     unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
-
+#if ENABLE(TIZEN_WEB_AUDIO)
+    unsigned long activeScriptProcessorCount() const { return static_cast<unsigned long>(m_activeScriptProcessorCount); }
+#endif
     void incrementActiveSourceCount();
     void decrementActiveSourceCount();
-    
+#if ENABLE(TIZEN_WEB_AUDIO)
+    void incrementActiveScriptProcessorCount();
+    void decrementActiveScriptProcessorCount();
+#endif
     PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
     PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionCode&);
 
@@ -264,6 +271,9 @@ private:
     // We'd like to schedule only one stop action for them.
     bool m_isStopScheduled;
     static void uninitializeDispatch(void* userData);
+#if ENABLE(TIZEN_WEB_AUDIO)
+    static void pauseDispatch(void* userData);
+#endif
     void clear();
 
     void scheduleNodeDeletion();
@@ -348,6 +358,9 @@ private:
 
     // Number of AudioBufferSourceNodes that are active (playing).
     int m_activeSourceCount;
+#if ENABLE(TIZEN_WEB_AUDIO)
+    int m_activeScriptProcessorCount;
+#endif
 };
 
 } // WebCore
index 9ca44b4..982c671 100644 (file)
@@ -87,6 +87,11 @@ void AudioDestinationNode::render(AudioBus* sourceBus, AudioBus* destinationBus,
         return;
     }
 
+#if ENABLE(TIZEN_WEB_AUDIO)
+    if (!context()->activeSourceCount() && !context()->activeScriptProcessorCount())
+        destinationBus->zero();
+#endif
+
     // Let the context take care of any business at the start of each render quantum.
     context()->handlePreRenderTasks();
 
@@ -113,6 +118,10 @@ void AudioDestinationNode::render(AudioBus* sourceBus, AudioBus* destinationBus,
     
     // Advance current sample-frame.
     m_currentSampleFrame += numberOfFrames;
+#if ENABLE(TIZEN_WEB_AUDIO)
+    if (!context()->activeSourceCount() && !context()->activeScriptProcessorCount())
+        context()->pause();
+#endif
 }
 
 } // namespace WebCore
index 121403d..17093f1 100644 (file)
@@ -67,6 +67,9 @@ public:
 
     virtual void enableInput() = 0;
     virtual void startRendering() = 0;
+#if ENABLE(TIZEN_WEB_AUDIO)
+    virtual void pauseRendering() = 0;
+#endif
 
     AudioSourceProvider* localAudioInputProvider() { return &m_localAudioInputProvider; }
     
index 9a79e2b..4cfcc13 100644 (file)
@@ -143,6 +143,8 @@ void AudioScheduledSourceNode::start(double when, ExceptionCode& ec)
 
     m_startTime = when;
     m_playbackState = SCHEDULED_STATE;
+
+    context()->destination()->startRendering();
 }
 
 void AudioScheduledSourceNode::stop(double when, ExceptionCode& ec)
index 8c43329..369dd9e 100644 (file)
@@ -97,6 +97,14 @@ void DefaultAudioDestinationNode::startRendering()
         m_destination->start();
 }
 
+#if ENABLE(TIZEN_WEB_AUDIO)
+void DefaultAudioDestinationNode::pauseRendering()
+{
+    ASSERT(isInitialized());
+    if (isInitialized())
+        m_destination->stop();
+}
+#endif
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)
index 2b999ad..da8eb6a 100644 (file)
@@ -49,7 +49,9 @@ public:
     // AudioDestinationNode
     virtual void enableInput() OVERRIDE;
     virtual void startRendering() OVERRIDE;
-    
+#if ENABLE(TIZEN_WEB_AUDIO)
+    virtual void pauseRendering() OVERRIDE;
+#endif
 private:
     explicit DefaultAudioDestinationNode(AudioContext*);
     void createDestination();
index cfc767c..b5fb236 100644 (file)
@@ -52,7 +52,9 @@ public:
     // AudioDestinationNode
     virtual void enableInput() OVERRIDE { };
     virtual void startRendering() OVERRIDE;
-
+#if ENABLE(TIZEN_WEB_AUDIO)
+    virtual void pauseRendering() OVERRIDE { };
+#endif
     virtual float sampleRate()  const { return m_renderTarget->sampleRate(); }
 
 private:
index 1f2f13d..f71cb12 100644 (file)
@@ -129,6 +129,10 @@ void ScriptProcessorNode::uninitialize()
     m_outputBuffers.clear();
 
     AudioNode::uninitialize();
+
+#if ENABLE(TIZEN_WEB_AUDIO)
+    context()->decrementActiveScriptProcessorCount();
+#endif
 }
 
 void ScriptProcessorNode::process(size_t framesToProcess)
@@ -138,7 +142,7 @@ void ScriptProcessorNode::process(size_t framesToProcess)
     // Additionally, there is a double-buffering for input and output which is exposed directly to JavaScript (see inputBuffer and outputBuffer below).
     // This node is the producer for inputBuffer and the consumer for outputBuffer.
     // The JavaScript code is the consumer of inputBuffer and the producer for outputBuffer.
-    
+
     // Get input and output busses.
     AudioBus* inputBus = this->input(0)->bus();
     AudioBus* outputBus = this->output(0)->bus();
@@ -149,7 +153,7 @@ void ScriptProcessorNode::process(size_t framesToProcess)
     ASSERT(isDoubleBufferIndexGood);
     if (!isDoubleBufferIndexGood)
         return;
-    
+
     AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get();
     AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get();
 
index 1f126cf..9d987e7 100755 (executable)
@@ -121,6 +121,9 @@ AudioDestinationGStreamer::AudioDestinationGStreamer(AudioIOCallback& callback,
     , m_renderBus(2, framesToPull, false)
     , m_sampleRate(sampleRate)
     , m_isPlaying(false)
+#if ENABLE(TIZEN_WEB_AUDIO)
+    , m_timer(this, &AudioDestinationGStreamer::timerFired)
+#endif
 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
     , m_audioSessionManager(AudioSessionManagerGStreamerTizen::createAudioSessionManager())
 #endif
@@ -167,6 +170,10 @@ AudioDestinationGStreamer::~AudioDestinationGStreamer()
     g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
     gst_bus_remove_signal_watch(bus.get());
 
+#if ENABLE(TIZEN_WEB_AUDIO)
+    stopTimer();
+#endif
+
     gst_element_set_state(m_pipeline, GST_STATE_NULL);
 
 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
@@ -257,14 +264,6 @@ gboolean AudioDestinationGStreamer::handleMessage(GstMessage* message)
 #endif
         m_isPlaying = false;
         break;
-
-#if ENABLE(TIZEN_GSTREAMER_AUDIO)
-    case GST_MESSAGE_EOS:
-        gst_element_set_state(m_pipeline, GST_STATE_NULL);
-        if (m_audioSessionManager)
-            m_audioSessionManager->setSoundState(ASM_STATE_STOP);
-        break;
-#endif
     default:
         break;
     }
@@ -276,30 +275,63 @@ void AudioDestinationGStreamer::start()
     ASSERT(m_wavParserAvailable);
     if (!m_wavParserAvailable)
         return;
+#if ENABLE(TIZEN_WEB_AUDIO)
+    stopTimer();
+    if (m_isPlaying)
+        return;
 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
     if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PLAYING))
         return;
 #endif
+#endif
     gst_element_set_state(m_pipeline, GST_STATE_PLAYING);
     m_isPlaying = true;
 }
 
 void AudioDestinationGStreamer::stop()
 {
+#if ENABLE(TIZEN_WEB_AUDIO)
     ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
-#if ENABLE(TIZEN_GSTREAMER_AUDIO)
     if (!m_wavParserAvailable || !m_audioSinkAvailable)
-       return;
-    if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PAUSE))
-       return;
+        return;
+    if (!m_isPlaying)
+        return;
+
+    startTimer(0.7);
 #else
+    ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
     if (!m_wavParserAvailable || m_audioSinkAvailable)
-       return;
-#endif
+        return;
+
     gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
     m_isPlaying = false;
+#endif
+}
+
+#if ENABLE(TIZEN_WEB_AUDIO)
+void AudioDestinationGStreamer::startTimer(double expiredTime)
+{
+    if (!m_timer.isActive())
+        m_timer.startOneShot(expiredTime);
+}
+
+void AudioDestinationGStreamer::stopTimer()
+{
+    if (m_timer.isActive())
+        m_timer.stop();
 }
 
+void AudioDestinationGStreamer::timerFired(Timer<AudioDestinationGStreamer>*)
+{
+    m_timer.stop();
+#if ENABLE(TIZEN_GSTREAMER_AUDIO)
+    if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PAUSE))
+        return;
+#endif
+    gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
+    m_isPlaying = false;
+}
+#endif
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)
index a51ca27..5546f14 100644 (file)
@@ -53,6 +53,13 @@ private:
 
     float m_sampleRate;
     bool m_isPlaying;
+#if ENABLE(TIZEN_WEB_AUDIO)
+    void startTimer(double);
+    void stopTimer();
+    void timerFired(Timer<AudioDestinationGStreamer>*);
+
+    Timer<AudioDestinationGStreamer> m_timer;
+#endif
     bool m_wavParserAvailable;
     bool m_audioSinkAvailable;
     GstElement* m_pipeline;
index 72911b7..1c07a33 100755 (executable)
@@ -410,6 +410,13 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs
             return GST_STATE_CHANGE_FAILURE;
         }
         break;
+#if ENABLE(TIZEN_WEB_AUDIO)
+    case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
+        GST_DEBUG_OBJECT(src, "PAUSED->PLAYING");
+        if (!gst_task_start(src->priv->task.get()))
+            return GST_STATE_CHANGE_FAILURE;
+        break;
+#endif
     default:
         break;
     }
@@ -421,7 +428,14 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs
     }
 
     switch (transition) {
-    case GST_STATE_CHANGE_READY_TO_PAUSED:
+#if ENABLE(TIZEN_WEB_AUDIO)
+    case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
+        GST_DEBUG_OBJECT(src, "PLAYING->PAUSED");
+        if (!gst_task_stop(src->priv->task.get()))
+            returnValue = GST_STATE_CHANGE_FAILURE;
+        break;
+#else
+     case GST_STATE_CHANGE_READY_TO_PAUSED:
         GST_DEBUG_OBJECT(src, "READY->PAUSED");
         if (!gst_task_start(src->priv->task.get()))
             returnValue = GST_STATE_CHANGE_FAILURE;
@@ -431,6 +445,7 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs
         if (!gst_task_join(src->priv->task.get()))
             returnValue = GST_STATE_CHANGE_FAILURE;
         break;
+#endif
     default:
         break;
     }