Revert "WebAudio task shouldn't be running on suspend to reduce usage of CPU."
authorKeonHo Kim <keonho07.kim@samsung.com>
Mon, 15 Apr 2013 14:38:24 +0000 (23:38 +0900)
committerGerrit Code Review <gerrit2@kim11>
Mon, 15 Apr 2013 14:41:56 +0000 (23:41 +0900)
This reverts commit 7e9ba6264d512cfe2b55e097ac998d5f0c6d805b

13 files changed:
Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
Source/WebCore/Modules/webaudio/AudioContext.cpp [changed mode: 0755->0644]
Source/WebCore/Modules/webaudio/AudioContext.h
Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/AudioDestinationNode.h
Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp
Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp
Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h
Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp
Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h
Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp

index d37807c..32f43d3 100644 (file)
@@ -433,9 +433,6 @@ void AudioBufferSourceNode::startGrain(double when, double grainOffset, double g
     m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
     
     m_playbackState = SCHEDULED_STATE;
-#if ENABLE(TIZEN_WEB_AUDIO)
-    context()->destination()->startRendering();
-#endif
 }
 
 #if ENABLE(TIZEN_WEB_AUDIO)
old mode 100755 (executable)
new mode 100644 (file)
index 4bfd6a0..fdcbd45
@@ -131,9 +131,6 @@ AudioContext::AudioContext(Document* document)
     , m_graphOwnerThread(UndefinedThreadIdentifier)
     , m_isOfflineContext(false)
     , m_activeSourceCount(0)
-#if ENABLE(TIZEN_WEB_AUDIO)
-    , m_activeScriptProcessorCount(0)
-#endif
 {
     constructCommon();
 
@@ -159,9 +156,6 @@ AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t
     , m_graphOwnerThread(UndefinedThreadIdentifier)
     , m_isOfflineContext(true)
     , m_activeSourceCount(0)
-#if ENABLE(TIZEN_WEB_AUDIO)
-    , m_activeScriptProcessorCount(0)
-#endif
 {
     constructCommon();
 
@@ -217,9 +211,7 @@ void AudioContext::lazyInitialize()
                     // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
                     // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
                     // We may want to consider requiring it for symmetry with OfflineAudioContext.
-#if !ENABLE(TIZEN_WEB_AUDIO)
                     m_destinationNode->startRendering();                    
-#endif
                     ++s_hardwareContextCount;
                 }
 
@@ -448,11 +440,6 @@ PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t buffe
 {
     ASSERT(isMainThread());
     lazyInitialize();
-#if ENABLE(TIZEN_WEB_AUDIO)
-    incrementActiveScriptProcessorCount();
-    destination()->startRendering();
-#endif
-
     RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
 
     if (!node.get()) {
@@ -842,7 +829,7 @@ void AudioContext::deleteMarkedNodes()
             AudioNode* node = m_nodesToDelete[n - 1];
             m_nodesToDelete.removeLast();
 
-             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
+            // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
             unsigned numberOfInputs = node->numberOfInputs();
             for (unsigned i = 0; i < numberOfInputs; ++i)
                 m_dirtySummingJunctions.remove(node->input(i));
@@ -988,35 +975,6 @@ void AudioContext::decrementActiveSourceCount()
     atomicDecrement(&m_activeSourceCount);
 }
 
-#if ENABLE(TIZEN_WEB_AUDIO)
-void AudioContext::incrementActiveScriptProcessorCount()
-{
-    atomicIncrement(&m_activeScriptProcessorCount);
-}
-
-void AudioContext::decrementActiveScriptProcessorCount()
-{
-    atomicDecrement(&m_activeScriptProcessorCount);
-}
-
-void AudioContext::pauseDispatch(void* userData)
-{
-    ASSERT(isMainThread());
-    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
-    ASSERT(context);
-    if (!context)
-        return;
-
-    context->destination()->pauseRendering();
-}
-
-void AudioContext::pause()
-{
-    ASSERT(isAudioThread());
-
-    callOnMainThread(pauseDispatch, this);
-}
-#endif
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)
index 34f2390..215f1c6 100644 (file)
@@ -83,7 +83,7 @@ public:
     virtual ~AudioContext();
 
     bool isInitialized() const;
-
+    
     bool isOfflineContext() { return m_isOfflineContext; }
 
     // Returns true when initialize() was called AND all asynchronous initialization has completed.
@@ -91,9 +91,7 @@ public:
 
     // Document notification
     virtual void stop();
-#if ENABLE(TIZEN_WEB_AUDIO)
-    void pause();
-#endif
+
     Document* document() const; // ASSERTs if document no longer exists.
     bool hasDocument();
 
@@ -102,15 +100,10 @@ public:
     double currentTime() const { return m_destinationNode->currentTime(); }
     float sampleRate() const { return m_destinationNode->sampleRate(); }
     unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
-#if ENABLE(TIZEN_WEB_AUDIO)
-    unsigned long activeScriptProcessorCount() const { return static_cast<unsigned long>(m_activeScriptProcessorCount); }
-#endif
+
     void incrementActiveSourceCount();
     void decrementActiveSourceCount();
-#if ENABLE(TIZEN_WEB_AUDIO)
-    void incrementActiveScriptProcessorCount();
-    void decrementActiveScriptProcessorCount();
-#endif
+    
     PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
     PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionCode&);
 
@@ -271,9 +264,6 @@ private:
     // We'd like to schedule only one stop action for them.
     bool m_isStopScheduled;
     static void uninitializeDispatch(void* userData);
-#if ENABLE(TIZEN_WEB_AUDIO)
-    static void pauseDispatch(void* userData);
-#endif
     void clear();
 
     void scheduleNodeDeletion();
@@ -358,9 +348,6 @@ private:
 
     // Number of AudioBufferSourceNodes that are active (playing).
     int m_activeSourceCount;
-#if ENABLE(TIZEN_WEB_AUDIO)
-    int m_activeScriptProcessorCount;
-#endif
 };
 
 } // WebCore
index 982c671..9ca44b4 100644 (file)
@@ -87,11 +87,6 @@ void AudioDestinationNode::render(AudioBus* sourceBus, AudioBus* destinationBus,
         return;
     }
 
-#if ENABLE(TIZEN_WEB_AUDIO)
-    if (!context()->activeSourceCount() && !context()->activeScriptProcessorCount())
-        destinationBus->zero();
-#endif
-
     // Let the context take care of any business at the start of each render quantum.
     context()->handlePreRenderTasks();
 
@@ -118,10 +113,6 @@ void AudioDestinationNode::render(AudioBus* sourceBus, AudioBus* destinationBus,
     
     // Advance current sample-frame.
     m_currentSampleFrame += numberOfFrames;
-#if ENABLE(TIZEN_WEB_AUDIO)
-    if (!context()->activeSourceCount() && !context()->activeScriptProcessorCount())
-        context()->pause();
-#endif
 }
 
 } // namespace WebCore
index 17093f1..121403d 100644 (file)
@@ -67,9 +67,6 @@ public:
 
     virtual void enableInput() = 0;
     virtual void startRendering() = 0;
-#if ENABLE(TIZEN_WEB_AUDIO)
-    virtual void pauseRendering() = 0;
-#endif
 
     AudioSourceProvider* localAudioInputProvider() { return &m_localAudioInputProvider; }
     
index 4cfcc13..9a79e2b 100644 (file)
@@ -143,8 +143,6 @@ void AudioScheduledSourceNode::start(double when, ExceptionCode& ec)
 
     m_startTime = when;
     m_playbackState = SCHEDULED_STATE;
-
-    context()->destination()->startRendering();
 }
 
 void AudioScheduledSourceNode::stop(double when, ExceptionCode& ec)
index 369dd9e..8c43329 100644 (file)
@@ -97,14 +97,6 @@ void DefaultAudioDestinationNode::startRendering()
         m_destination->start();
 }
 
-#if ENABLE(TIZEN_WEB_AUDIO)
-void DefaultAudioDestinationNode::pauseRendering()
-{
-    ASSERT(isInitialized());
-    if (isInitialized())
-        m_destination->stop();
-}
-#endif
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)
index da8eb6a..2b999ad 100644 (file)
@@ -49,9 +49,7 @@ public:
     // AudioDestinationNode
     virtual void enableInput() OVERRIDE;
     virtual void startRendering() OVERRIDE;
-#if ENABLE(TIZEN_WEB_AUDIO)
-    virtual void pauseRendering() OVERRIDE;
-#endif
+    
 private:
     explicit DefaultAudioDestinationNode(AudioContext*);
     void createDestination();
index b5fb236..cfc767c 100644 (file)
@@ -52,9 +52,7 @@ public:
     // AudioDestinationNode
     virtual void enableInput() OVERRIDE { };
     virtual void startRendering() OVERRIDE;
-#if ENABLE(TIZEN_WEB_AUDIO)
-    virtual void pauseRendering() OVERRIDE { };
-#endif
+
     virtual float sampleRate()  const { return m_renderTarget->sampleRate(); }
 
 private:
index f71cb12..1f2f13d 100644 (file)
@@ -129,10 +129,6 @@ void ScriptProcessorNode::uninitialize()
     m_outputBuffers.clear();
 
     AudioNode::uninitialize();
-
-#if ENABLE(TIZEN_WEB_AUDIO)
-    context()->decrementActiveScriptProcessorCount();
-#endif
 }
 
 void ScriptProcessorNode::process(size_t framesToProcess)
@@ -142,7 +138,7 @@ void ScriptProcessorNode::process(size_t framesToProcess)
     // Additionally, there is a double-buffering for input and output which is exposed directly to JavaScript (see inputBuffer and outputBuffer below).
     // This node is the producer for inputBuffer and the consumer for outputBuffer.
     // The JavaScript code is the consumer of inputBuffer and the producer for outputBuffer.
-
+    
     // Get input and output busses.
     AudioBus* inputBus = this->input(0)->bus();
     AudioBus* outputBus = this->output(0)->bus();
@@ -153,7 +149,7 @@ void ScriptProcessorNode::process(size_t framesToProcess)
     ASSERT(isDoubleBufferIndexGood);
     if (!isDoubleBufferIndexGood)
         return;
-
+    
     AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get();
     AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get();
 
index 9d987e7..1f126cf 100755 (executable)
@@ -121,9 +121,6 @@ AudioDestinationGStreamer::AudioDestinationGStreamer(AudioIOCallback& callback,
     , m_renderBus(2, framesToPull, false)
     , m_sampleRate(sampleRate)
     , m_isPlaying(false)
-#if ENABLE(TIZEN_WEB_AUDIO)
-    , m_timer(this, &AudioDestinationGStreamer::timerFired)
-#endif
 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
     , m_audioSessionManager(AudioSessionManagerGStreamerTizen::createAudioSessionManager())
 #endif
@@ -170,10 +167,6 @@ AudioDestinationGStreamer::~AudioDestinationGStreamer()
     g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
     gst_bus_remove_signal_watch(bus.get());
 
-#if ENABLE(TIZEN_WEB_AUDIO)
-    stopTimer();
-#endif
-
     gst_element_set_state(m_pipeline, GST_STATE_NULL);
 
 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
@@ -264,6 +257,14 @@ gboolean AudioDestinationGStreamer::handleMessage(GstMessage* message)
 #endif
         m_isPlaying = false;
         break;
+
+#if ENABLE(TIZEN_GSTREAMER_AUDIO)
+    case GST_MESSAGE_EOS:
+        gst_element_set_state(m_pipeline, GST_STATE_NULL);
+        if (m_audioSessionManager)
+            m_audioSessionManager->setSoundState(ASM_STATE_STOP);
+        break;
+#endif
     default:
         break;
     }
@@ -275,63 +276,30 @@ void AudioDestinationGStreamer::start()
     ASSERT(m_wavParserAvailable);
     if (!m_wavParserAvailable)
         return;
-#if ENABLE(TIZEN_WEB_AUDIO)
-    stopTimer();
-    if (m_isPlaying)
-        return;
 #if ENABLE(TIZEN_GSTREAMER_AUDIO)
     if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PLAYING))
         return;
 #endif
-#endif
     gst_element_set_state(m_pipeline, GST_STATE_PLAYING);
     m_isPlaying = true;
 }
 
 void AudioDestinationGStreamer::stop()
 {
-#if ENABLE(TIZEN_WEB_AUDIO)
     ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
+#if ENABLE(TIZEN_GSTREAMER_AUDIO)
     if (!m_wavParserAvailable || !m_audioSinkAvailable)
-        return;
-    if (!m_isPlaying)
-        return;
-
-    startTimer(0.7);
+       return;
+    if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PAUSE))
+       return;
 #else
-    ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
     if (!m_wavParserAvailable || m_audioSinkAvailable)
-        return;
-
-    gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
-    m_isPlaying = false;
-#endif
-}
-
-#if ENABLE(TIZEN_WEB_AUDIO)
-void AudioDestinationGStreamer::startTimer(double expiredTime)
-{
-    if (!m_timer.isActive())
-        m_timer.startOneShot(expiredTime);
-}
-
-void AudioDestinationGStreamer::stopTimer()
-{
-    if (m_timer.isActive())
-        m_timer.stop();
-}
-
-void AudioDestinationGStreamer::timerFired(Timer<AudioDestinationGStreamer>*)
-{
-    m_timer.stop();
-#if ENABLE(TIZEN_GSTREAMER_AUDIO)
-    if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PAUSE))
-        return;
+       return;
 #endif
     gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
     m_isPlaying = false;
 }
-#endif
+
 } // namespace WebCore
 
 #endif // ENABLE(WEB_AUDIO)
index 5546f14..a51ca27 100644 (file)
@@ -53,13 +53,6 @@ private:
 
     float m_sampleRate;
     bool m_isPlaying;
-#if ENABLE(TIZEN_WEB_AUDIO)
-    void startTimer(double);
-    void stopTimer();
-    void timerFired(Timer<AudioDestinationGStreamer>*);
-
-    Timer<AudioDestinationGStreamer> m_timer;
-#endif
     bool m_wavParserAvailable;
     bool m_audioSinkAvailable;
     GstElement* m_pipeline;
index 1c07a33..72911b7 100755 (executable)
@@ -410,13 +410,6 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs
             return GST_STATE_CHANGE_FAILURE;
         }
         break;
-#if ENABLE(TIZEN_WEB_AUDIO)
-    case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
-        GST_DEBUG_OBJECT(src, "PAUSED->PLAYING");
-        if (!gst_task_start(src->priv->task.get()))
-            return GST_STATE_CHANGE_FAILURE;
-        break;
-#endif
     default:
         break;
     }
@@ -428,14 +421,7 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs
     }
 
     switch (transition) {
-#if ENABLE(TIZEN_WEB_AUDIO)
-    case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
-        GST_DEBUG_OBJECT(src, "PLAYING->PAUSED");
-        if (!gst_task_stop(src->priv->task.get()))
-            returnValue = GST_STATE_CHANGE_FAILURE;
-        break;
-#else
-     case GST_STATE_CHANGE_READY_TO_PAUSED:
+    case GST_STATE_CHANGE_READY_TO_PAUSED:
         GST_DEBUG_OBJECT(src, "READY->PAUSED");
         if (!gst_task_start(src->priv->task.get()))
             returnValue = GST_STATE_CHANGE_FAILURE;
@@ -445,7 +431,6 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs
         if (!gst_task_join(src->priv->task.get()))
             returnValue = GST_STATE_CHANGE_FAILURE;
         break;
-#endif
     default:
         break;
     }