[Title] WebAudio task shouldn't be running on suspend to reduce usage of CPU.
[Problem] webKitWebAudioSrcLoop is running even if browser is gone to suspending.
[Cause] Task of gstreamer is never been made stop to keep pipeline is running.
[Solution] Implement pauseRendering() to make pause tast of gstreamer.
Change-Id: I05f60f415c8a10226a97885130afa0e005ea5ec8
m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
m_playbackState = SCHEDULED_STATE;
+#if ENABLE(TIZEN_WEB_AUDIO)
+ context()->destination()->startRendering();
+#endif
}
#if ENABLE(TIZEN_WEB_AUDIO)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(false)
, m_activeSourceCount(0)
+#if ENABLE(TIZEN_WEB_AUDIO)
+ , m_activeScriptProcessorCount(0)
+#endif
{
constructCommon();
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(true)
, m_activeSourceCount(0)
+#if ENABLE(TIZEN_WEB_AUDIO)
+ , m_activeScriptProcessorCount(0)
+#endif
{
constructCommon();
// Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
// NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
// We may want to consider requiring it for symmetry with OfflineAudioContext.
+#if !ENABLE(TIZEN_WEB_AUDIO)
m_destinationNode->startRendering();
+#endif
++s_hardwareContextCount;
}
{
ASSERT(isMainThread());
lazyInitialize();
+#if ENABLE(TIZEN_WEB_AUDIO)
+ incrementActiveScriptProcessorCount();
+ destination()->startRendering();
+#endif
+
RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
if (!node.get()) {
AudioNode* node = m_nodesToDelete[n - 1];
m_nodesToDelete.removeLast();
- // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
+ // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
unsigned numberOfInputs = node->numberOfInputs();
for (unsigned i = 0; i < numberOfInputs; ++i)
m_dirtySummingJunctions.remove(node->input(i));
atomicDecrement(&m_activeSourceCount);
}
+#if ENABLE(TIZEN_WEB_AUDIO)
+void AudioContext::incrementActiveScriptProcessorCount()
+{
+ atomicIncrement(&m_activeScriptProcessorCount);
+}
+
+void AudioContext::decrementActiveScriptProcessorCount()
+{
+ atomicDecrement(&m_activeScriptProcessorCount);
+}
+
+void AudioContext::pauseDispatch(void* userData)
+{
+ ASSERT(isMainThread());
+ AudioContext* context = reinterpret_cast<AudioContext*>(userData);
+ ASSERT(context);
+ if (!context)
+ return;
+
+ context->destination()->pauseRendering();
+}
+
+void AudioContext::pause()
+{
+ ASSERT(isAudioThread());
+
+ callOnMainThread(pauseDispatch, this);
+}
+#endif
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
virtual ~AudioContext();
bool isInitialized() const;
-
+
bool isOfflineContext() { return m_isOfflineContext; }
// Returns true when initialize() was called AND all asynchronous initialization has completed.
// Document notification
virtual void stop();
-
+#if ENABLE(TIZEN_WEB_AUDIO)
+ void pause();
+#endif
Document* document() const; // ASSERTs if document no longer exists.
bool hasDocument();
double currentTime() const { return m_destinationNode->currentTime(); }
float sampleRate() const { return m_destinationNode->sampleRate(); }
unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
-
+#if ENABLE(TIZEN_WEB_AUDIO)
+ unsigned long activeScriptProcessorCount() const { return static_cast<unsigned long>(m_activeScriptProcessorCount); }
+#endif
void incrementActiveSourceCount();
void decrementActiveSourceCount();
-
+#if ENABLE(TIZEN_WEB_AUDIO)
+ void incrementActiveScriptProcessorCount();
+ void decrementActiveScriptProcessorCount();
+#endif
PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionCode&);
// We'd like to schedule only one stop action for them.
bool m_isStopScheduled;
static void uninitializeDispatch(void* userData);
+#if ENABLE(TIZEN_WEB_AUDIO)
+ static void pauseDispatch(void* userData);
+#endif
void clear();
void scheduleNodeDeletion();
// Number of AudioBufferSourceNodes that are active (playing).
int m_activeSourceCount;
+#if ENABLE(TIZEN_WEB_AUDIO)
+ int m_activeScriptProcessorCount;
+#endif
};
} // WebCore
return;
}
+#if ENABLE(TIZEN_WEB_AUDIO)
+ if (!context()->activeSourceCount() && !context()->activeScriptProcessorCount())
+ destinationBus->zero();
+#endif
+
// Let the context take care of any business at the start of each render quantum.
context()->handlePreRenderTasks();
// Advance current sample-frame.
m_currentSampleFrame += numberOfFrames;
+#if ENABLE(TIZEN_WEB_AUDIO)
+ if (!context()->activeSourceCount() && !context()->activeScriptProcessorCount())
+ context()->pause();
+#endif
}
} // namespace WebCore
virtual void enableInput() = 0;
virtual void startRendering() = 0;
+#if ENABLE(TIZEN_WEB_AUDIO)
+ virtual void pauseRendering() = 0;
+#endif
AudioSourceProvider* localAudioInputProvider() { return &m_localAudioInputProvider; }
m_startTime = when;
m_playbackState = SCHEDULED_STATE;
+
+ context()->destination()->startRendering();
}
void AudioScheduledSourceNode::stop(double when, ExceptionCode& ec)
m_destination->start();
}
+#if ENABLE(TIZEN_WEB_AUDIO)
+void DefaultAudioDestinationNode::pauseRendering()
+{
+ ASSERT(isInitialized());
+ if (isInitialized())
+ m_destination->stop();
+}
+#endif
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
// AudioDestinationNode
virtual void enableInput() OVERRIDE;
virtual void startRendering() OVERRIDE;
-
+#if ENABLE(TIZEN_WEB_AUDIO)
+ virtual void pauseRendering() OVERRIDE;
+#endif
private:
explicit DefaultAudioDestinationNode(AudioContext*);
void createDestination();
// AudioDestinationNode
virtual void enableInput() OVERRIDE { };
virtual void startRendering() OVERRIDE;
-
+#if ENABLE(TIZEN_WEB_AUDIO)
+ virtual void pauseRendering() OVERRIDE { };
+#endif
virtual float sampleRate() const { return m_renderTarget->sampleRate(); }
private:
m_outputBuffers.clear();
AudioNode::uninitialize();
+
+#if ENABLE(TIZEN_WEB_AUDIO)
+ context()->decrementActiveScriptProcessorCount();
+#endif
}
void ScriptProcessorNode::process(size_t framesToProcess)
// Additionally, there is a double-buffering for input and output which is exposed directly to JavaScript (see inputBuffer and outputBuffer below).
// This node is the producer for inputBuffer and the consumer for outputBuffer.
// The JavaScript code is the consumer of inputBuffer and the producer for outputBuffer.
-
+
// Get input and output busses.
AudioBus* inputBus = this->input(0)->bus();
AudioBus* outputBus = this->output(0)->bus();
ASSERT(isDoubleBufferIndexGood);
if (!isDoubleBufferIndexGood)
return;
-
+
AudioBuffer* inputBuffer = m_inputBuffers[doubleBufferIndex].get();
AudioBuffer* outputBuffer = m_outputBuffers[doubleBufferIndex].get();
, m_renderBus(2, framesToPull, false)
, m_sampleRate(sampleRate)
, m_isPlaying(false)
+#if ENABLE(TIZEN_WEB_AUDIO)
+ , m_timer(this, &AudioDestinationGStreamer::timerFired)
+#endif
#if ENABLE(TIZEN_GSTREAMER_AUDIO)
, m_audioSessionManager(AudioSessionManagerGStreamerTizen::createAudioSessionManager())
#endif
g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
gst_bus_remove_signal_watch(bus.get());
+#if ENABLE(TIZEN_WEB_AUDIO)
+ stopTimer();
+#endif
+
gst_element_set_state(m_pipeline, GST_STATE_NULL);
#if ENABLE(TIZEN_GSTREAMER_AUDIO)
#endif
m_isPlaying = false;
break;
-
-#if ENABLE(TIZEN_GSTREAMER_AUDIO)
- case GST_MESSAGE_EOS:
- gst_element_set_state(m_pipeline, GST_STATE_NULL);
- if (m_audioSessionManager)
- m_audioSessionManager->setSoundState(ASM_STATE_STOP);
- break;
-#endif
default:
break;
}
ASSERT(m_wavParserAvailable);
if (!m_wavParserAvailable)
return;
+#if ENABLE(TIZEN_WEB_AUDIO)
+ stopTimer();
+ if (m_isPlaying)
+ return;
#if ENABLE(TIZEN_GSTREAMER_AUDIO)
if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PLAYING))
return;
#endif
+#endif
gst_element_set_state(m_pipeline, GST_STATE_PLAYING);
m_isPlaying = true;
}
void AudioDestinationGStreamer::stop()
{
+#if ENABLE(TIZEN_WEB_AUDIO)
ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
-#if ENABLE(TIZEN_GSTREAMER_AUDIO)
if (!m_wavParserAvailable || !m_audioSinkAvailable)
- return;
- if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PAUSE))
- return;
+ return;
+ if (!m_isPlaying)
+ return;
+
+ startTimer(0.7);
#else
+ ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
if (!m_wavParserAvailable || m_audioSinkAvailable)
- return;
-#endif
+ return;
+
gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
m_isPlaying = false;
+#endif
+}
+
+#if ENABLE(TIZEN_WEB_AUDIO)
+void AudioDestinationGStreamer::startTimer(double expiredTime)
+{
+ if (!m_timer.isActive())
+ m_timer.startOneShot(expiredTime);
+}
+
+void AudioDestinationGStreamer::stopTimer()
+{
+ if (m_timer.isActive())
+ m_timer.stop();
}
+void AudioDestinationGStreamer::timerFired(Timer<AudioDestinationGStreamer>*)
+{
+ m_timer.stop();
+#if ENABLE(TIZEN_GSTREAMER_AUDIO)
+ if (m_audioSessionManager && !m_audioSessionManager->setSoundState(ASM_STATE_PAUSE))
+ return;
+#endif
+ gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
+ m_isPlaying = false;
+}
+#endif
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
float m_sampleRate;
bool m_isPlaying;
+#if ENABLE(TIZEN_WEB_AUDIO)
+ void startTimer(double);
+ void stopTimer();
+ void timerFired(Timer<AudioDestinationGStreamer>*);
+
+ Timer<AudioDestinationGStreamer> m_timer;
+#endif
bool m_wavParserAvailable;
bool m_audioSinkAvailable;
GstElement* m_pipeline;
return GST_STATE_CHANGE_FAILURE;
}
break;
+#if ENABLE(TIZEN_WEB_AUDIO)
+ case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
+ GST_DEBUG_OBJECT(src, "PAUSED->PLAYING");
+ if (!gst_task_start(src->priv->task.get()))
+ return GST_STATE_CHANGE_FAILURE;
+ break;
+#endif
default:
break;
}
}
switch (transition) {
- case GST_STATE_CHANGE_READY_TO_PAUSED:
+#if ENABLE(TIZEN_WEB_AUDIO)
+ case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
+ GST_DEBUG_OBJECT(src, "PLAYING->PAUSED");
+ if (!gst_task_stop(src->priv->task.get()))
+ returnValue = GST_STATE_CHANGE_FAILURE;
+ break;
+#else
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
GST_DEBUG_OBJECT(src, "READY->PAUSED");
if (!gst_task_start(src->priv->task.get()))
returnValue = GST_STATE_CHANGE_FAILURE;
if (!gst_task_join(src->priv->task.get()))
returnValue = GST_STATE_CHANGE_FAILURE;
break;
+#endif
default:
break;
}