2 * Copyright (C) 2010, Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "AudioContext.h"
31 #include "AnalyserNode.h"
32 #include "AsyncAudioDecoder.h"
33 #include "AudioBuffer.h"
34 #include "AudioBufferCallback.h"
35 #include "AudioBufferSourceNode.h"
36 #include "AudioListener.h"
37 #include "AudioNodeInput.h"
38 #include "AudioNodeOutput.h"
39 #include "BiquadFilterNode.h"
40 #include "ChannelMergerNode.h"
41 #include "ChannelSplitterNode.h"
42 #include "ConvolverNode.h"
43 #include "DefaultAudioDestinationNode.h"
44 #include "DelayNode.h"
46 #include "DynamicsCompressorNode.h"
47 #include "ExceptionCode.h"
50 #include "HRTFDatabaseLoader.h"
51 #include "HRTFPanner.h"
52 #include "OfflineAudioCompletionEvent.h"
53 #include "OfflineAudioDestinationNode.h"
54 #include "OscillatorNode.h"
55 #include "PannerNode.h"
56 #include "PlatformString.h"
57 #include "ScriptCallStack.h"
58 #include "ScriptProcessorNode.h"
59 #include "WaveShaperNode.h"
60 #include "WaveTable.h"
62 #if ENABLE(MEDIA_STREAM)
63 #include "MediaStream.h"
64 #include "MediaStreamAudioDestinationNode.h"
65 #include "MediaStreamAudioSourceNode.h"
69 #include "HTMLMediaElement.h"
70 #include "MediaElementAudioSourceNode.h"
73 #if DEBUG_AUDIONODE_REFERENCES
78 #include "GStreamerUtilities.h"
81 #include <wtf/ArrayBuffer.h>
82 #include <wtf/Atomics.h>
83 #include <wtf/MainThread.h>
84 #include <wtf/OwnPtr.h>
85 #include <wtf/PassOwnPtr.h>
86 #include <wtf/RefCounted.h>
88 // FIXME: check the proper way to reference an undefined thread ID
89 const int UndefinedThreadIdentifier = 0xffffffff;
91 const unsigned MaxNodesToDeletePerQuantum = 10;
95 bool AudioContext::isSampleRateRangeGood(float sampleRate)
97 // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
98 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
99 return sampleRate >= 44100 && sampleRate <= 96000;
102 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
103 const unsigned MaxHardwareContexts = 4;
104 unsigned AudioContext::s_hardwareContextCount = 0;
106 PassRefPtr<AudioContext> AudioContext::create(Document* document, ExceptionCode& ec)
111 ASSERT(isMainThread());
112 if (s_hardwareContextCount >= MaxHardwareContexts)
115 RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
116 audioContext->suspendIfNeeded();
117 return audioContext.release();
120 // Constructor for rendering to the audio hardware.
121 AudioContext::AudioContext(Document* document)
122 : ActiveDOMObject(document, this)
123 , m_isStopScheduled(false)
124 , m_isInitialized(false)
125 , m_isAudioThreadFinished(false)
126 , m_destinationNode(0)
127 , m_isDeletionScheduled(false)
128 , m_automaticPullNodesNeedUpdating(false)
129 , m_connectionCount(0)
131 , m_graphOwnerThread(UndefinedThreadIdentifier)
132 , m_isOfflineContext(false)
133 , m_activeSourceCount(0)
134 #if ENABLE(TIZEN_WEB_AUDIO)
135 , m_activeScriptProcessorCount(0)
140 m_destinationNode = DefaultAudioDestinationNode::create(this);
142 // This sets in motion an asynchronous loading mechanism on another thread.
143 // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
144 // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
145 // when this has finished (see AudioDestinationNode).
146 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
149 // Constructor for offline (non-realtime) rendering.
150 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
151 : ActiveDOMObject(document, this)
152 , m_isStopScheduled(false)
153 , m_isInitialized(false)
154 , m_isAudioThreadFinished(false)
155 , m_destinationNode(0)
156 , m_automaticPullNodesNeedUpdating(false)
157 , m_connectionCount(0)
159 , m_graphOwnerThread(UndefinedThreadIdentifier)
160 , m_isOfflineContext(true)
161 , m_activeSourceCount(0)
162 #if ENABLE(TIZEN_WEB_AUDIO)
163 , m_activeScriptProcessorCount(0)
168 // FIXME: the passed in sampleRate MUST match the hardware sample-rate since HRTFDatabaseLoader is a singleton.
169 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
171 // Create a new destination for offline rendering.
172 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
173 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
176 void AudioContext::constructCommon()
178 // According to spec AudioContext must die only after page navigate.
179 // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
180 setPendingActivity(this);
183 initializeGStreamer();
186 FFTFrame::initialize();
188 m_listener = AudioListener::create();
191 AudioContext::~AudioContext()
193 #if DEBUG_AUDIONODE_REFERENCES
194 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
196 // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
197 ASSERT(!m_isInitialized);
198 ASSERT(m_isStopScheduled);
199 ASSERT(!m_nodesToDelete.size());
200 ASSERT(!m_referencedNodes.size());
201 ASSERT(!m_finishedNodes.size());
202 ASSERT(!m_automaticPullNodes.size());
203 ASSERT(!m_renderingAutomaticPullNodes.size());
206 void AudioContext::lazyInitialize()
208 if (!m_isInitialized) {
209 // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
210 ASSERT(!m_isAudioThreadFinished);
211 if (!m_isAudioThreadFinished) {
212 if (m_destinationNode.get()) {
213 m_destinationNode->initialize();
215 if (!isOfflineContext()) {
216 // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
217 // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
218 // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
219 // We may want to consider requiring it for symmetry with OfflineAudioContext.
220 #if !ENABLE(TIZEN_WEB_AUDIO)
221 m_destinationNode->startRendering();
223 ++s_hardwareContextCount;
227 m_isInitialized = true;
232 void AudioContext::clear()
234 // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
235 if (m_destinationNode)
236 m_destinationNode.clear();
238 // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
241 m_nodesToDelete.append(m_nodesMarkedForDeletion);
242 m_nodesMarkedForDeletion.clear();
243 } while (m_nodesToDelete.size());
245 // It was set in constructCommon.
246 unsetPendingActivity(this);
249 void AudioContext::uninitialize()
251 ASSERT(isMainThread());
253 if (!m_isInitialized)
256 #if ENABLE(TIZEN_WEB_AUDIO)
257 m_isInitialized = false;
260 // This stops the audio thread and all audio rendering.
261 m_destinationNode->uninitialize();
263 // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
264 m_isAudioThreadFinished = true;
266 if (!isOfflineContext()) {
267 ASSERT(s_hardwareContextCount);
268 --s_hardwareContextCount;
271 // Get rid of the sources which may still be playing.
272 derefUnfinishedSourceNodes();
274 #if !ENABLE(TIZEN_WEB_AUDIO)
275 m_isInitialized = false;
279 bool AudioContext::isInitialized() const
281 return m_isInitialized;
284 bool AudioContext::isRunnable() const
286 if (!isInitialized())
289 // Check with the HRTF spatialization system to see if it's finished loading.
290 return m_hrtfDatabaseLoader->isLoaded();
293 void AudioContext::uninitializeDispatch(void* userData)
295 AudioContext* context = reinterpret_cast<AudioContext*>(userData);
300 context->uninitialize();
304 void AudioContext::stop()
306 // Usually ScriptExecutionContext calls stop twice.
307 if (m_isStopScheduled)
309 m_isStopScheduled = true;
311 // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
312 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
313 // ActiveDOMObjects so let's schedule uninitialize() to be called later.
314 // FIXME: see if there's a more direct way to handle this issue.
315 callOnMainThread(uninitializeDispatch, this);
318 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
320 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
321 if (!audioBuffer.get()) {
329 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionCode& ec)
337 RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
338 if (!audioBuffer.get()) {
346 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec)
352 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
355 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
357 ASSERT(isMainThread());
359 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
361 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
362 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
369 PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionCode& ec)
371 ASSERT(mediaElement);
373 ec = INVALID_STATE_ERR;
377 ASSERT(isMainThread());
380 // First check if this media element already has a source node.
381 if (mediaElement->audioSourceNode()) {
382 ec = INVALID_STATE_ERR;
386 RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
388 mediaElement->setAudioSourceNode(node.get());
390 refNode(node.get()); // context keeps reference until node is disconnected
395 #if ENABLE(MEDIA_STREAM)
396 PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
400 ec = INVALID_STATE_ERR;
404 ASSERT(isMainThread());
407 AudioSourceProvider* provider = 0;
409 if (mediaStream->isLocal() && mediaStream->audioTracks()->length()) {
410 provider = destination()->localAudioInputProvider();
411 destination()->enableInput();
413 // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
417 RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);
419 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
420 node->setFormat(2, sampleRate());
422 refNode(node.get()); // context keeps reference until node is disconnected
426 PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
428 // FIXME: Add support for an optional argument which specifies the number of channels.
429 // FIXME: The default should probably be stereo instead of mono.
430 return MediaStreamAudioDestinationNode::create(this, 1);
435 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionCode& ec)
437 // Set number of input/output channels to stereo by default.
438 return createScriptProcessor(bufferSize, 2, 2, ec);
441 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec)
443 // Set number of output channels to stereo by default.
444 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec);
447 PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
449 ASSERT(isMainThread());
451 #if ENABLE(TIZEN_WEB_AUDIO)
452 incrementActiveScriptProcessorCount();
453 destination()->startRendering();
456 RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
463 refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
467 PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
469 ASSERT(isMainThread());
471 return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
474 PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
476 ASSERT(isMainThread());
478 return WaveShaperNode::create(this);
481 PassRefPtr<PannerNode> AudioContext::createPanner()
483 ASSERT(isMainThread());
485 return PannerNode::create(this, m_destinationNode->sampleRate());
488 PassRefPtr<ConvolverNode> AudioContext::createConvolver()
490 ASSERT(isMainThread());
492 return ConvolverNode::create(this, m_destinationNode->sampleRate());
495 PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
497 ASSERT(isMainThread());
499 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
502 PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
504 ASSERT(isMainThread());
506 return AnalyserNode::create(this, m_destinationNode->sampleRate());
509 PassRefPtr<GainNode> AudioContext::createGain()
511 ASSERT(isMainThread());
513 return GainNode::create(this, m_destinationNode->sampleRate());
516 PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionCode& ec)
518 const double defaultMaxDelayTime = 1;
519 return createDelay(defaultMaxDelayTime, ec);
522 PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCode& ec)
524 ASSERT(isMainThread());
526 RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec);
532 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionCode& ec)
534 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
535 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec);
538 PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec)
540 ASSERT(isMainThread());
543 RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
553 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& ec)
555 const unsigned ChannelMergerDefaultNumberOfInputs = 6;
556 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec);
559 PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec)
561 ASSERT(isMainThread());
564 RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
574 PassRefPtr<OscillatorNode> AudioContext::createOscillator()
576 ASSERT(isMainThread());
579 RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
581 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
582 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
588 PassRefPtr<WaveTable> AudioContext::createWaveTable(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
590 ASSERT(isMainThread());
592 if (!real || !imag || (real->length() != imag->length())) {
598 return WaveTable::create(sampleRate(), real, imag);
601 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
603 ASSERT(isAudioThread());
604 m_finishedNodes.append(node);
607 void AudioContext::derefFinishedSourceNodes()
609 ASSERT(isGraphOwner());
610 ASSERT(isAudioThread() || isAudioThreadFinished());
611 for (unsigned i = 0; i < m_finishedNodes.size(); i++)
612 derefNode(m_finishedNodes[i]);
614 m_finishedNodes.clear();
617 void AudioContext::refNode(AudioNode* node)
619 ASSERT(isMainThread());
620 AutoLocker locker(this);
622 node->ref(AudioNode::RefTypeConnection);
623 m_referencedNodes.append(node);
626 void AudioContext::derefNode(AudioNode* node)
628 ASSERT(isGraphOwner());
630 node->deref(AudioNode::RefTypeConnection);
632 for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
633 if (node == m_referencedNodes[i]) {
634 m_referencedNodes.remove(i);
640 void AudioContext::derefUnfinishedSourceNodes()
642 ASSERT(isMainThread() && isAudioThreadFinished());
643 for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
644 m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
646 m_referencedNodes.clear();
649 void AudioContext::lock(bool& mustReleaseLock)
651 // Don't allow regular lock in real-time audio thread.
652 ASSERT(isMainThread());
654 ThreadIdentifier thisThread = currentThread();
656 if (thisThread == m_graphOwnerThread) {
657 // We already have the lock.
658 mustReleaseLock = false;
661 m_contextGraphMutex.lock();
662 m_graphOwnerThread = thisThread;
663 mustReleaseLock = true;
667 bool AudioContext::tryLock(bool& mustReleaseLock)
669 ThreadIdentifier thisThread = currentThread();
670 bool isAudioThread = thisThread == audioThread();
672 // Try to catch cases of using try lock on main thread - it should use regular lock.
673 ASSERT(isAudioThread || isAudioThreadFinished());
675 if (!isAudioThread) {
676 // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
677 lock(mustReleaseLock);
683 if (thisThread == m_graphOwnerThread) {
684 // Thread already has the lock.
686 mustReleaseLock = false;
688 // Don't already have the lock - try to acquire it.
689 hasLock = m_contextGraphMutex.tryLock();
692 m_graphOwnerThread = thisThread;
694 mustReleaseLock = hasLock;
700 void AudioContext::unlock()
702 ASSERT(currentThread() == m_graphOwnerThread);
704 m_graphOwnerThread = UndefinedThreadIdentifier;
705 m_contextGraphMutex.unlock();
708 bool AudioContext::isAudioThread() const
710 return currentThread() == m_audioThread;
713 bool AudioContext::isGraphOwner() const
715 return currentThread() == m_graphOwnerThread;
718 void AudioContext::addDeferredFinishDeref(AudioNode* node)
720 ASSERT(isAudioThread());
721 m_deferredFinishDerefList.append(node);
724 void AudioContext::handlePreRenderTasks()
726 ASSERT(isAudioThread());
728 // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
729 // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
730 bool mustReleaseLock;
731 if (tryLock(mustReleaseLock)) {
732 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
733 handleDirtyAudioSummingJunctions();
734 handleDirtyAudioNodeOutputs();
736 updateAutomaticPullNodes();
743 void AudioContext::handlePostRenderTasks()
745 ASSERT(isAudioThread());
747 // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
748 // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
749 // from the render graph (in which case they'll render silence).
750 bool mustReleaseLock;
751 if (tryLock(mustReleaseLock)) {
752 // Take care of finishing any derefs where the tryLock() failed previously.
753 handleDeferredFinishDerefs();
755 // Dynamically clean up nodes which are no longer needed.
756 derefFinishedSourceNodes();
758 // Don't delete in the real-time thread. Let the main thread do it.
759 // Ref-counted objects held by certain AudioNodes may not be thread-safe.
760 scheduleNodeDeletion();
762 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
763 handleDirtyAudioSummingJunctions();
764 handleDirtyAudioNodeOutputs();
766 updateAutomaticPullNodes();
773 void AudioContext::handleDeferredFinishDerefs()
775 ASSERT(isAudioThread() && isGraphOwner());
776 for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
777 AudioNode* node = m_deferredFinishDerefList[i];
778 node->finishDeref(AudioNode::RefTypeConnection);
781 m_deferredFinishDerefList.clear();
784 void AudioContext::markForDeletion(AudioNode* node)
786 ASSERT(isGraphOwner());
788 if (isAudioThreadFinished())
789 m_nodesToDelete.append(node);
791 m_nodesMarkedForDeletion.append(node);
793 // This is probably the best time for us to remove the node from automatic pull list,
794 // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
795 // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
796 // modify m_renderingAutomaticPullNodes.
797 removeAutomaticPullNode(node);
800 void AudioContext::scheduleNodeDeletion()
802 bool isGood = m_isInitialized && isGraphOwner();
807 // Make sure to call deleteMarkedNodes() on main thread.
808 if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
809 m_nodesToDelete.append(m_nodesMarkedForDeletion);
810 m_nodesMarkedForDeletion.clear();
812 m_isDeletionScheduled = true;
814 // Don't let ourself get deleted before the callback.
815 // See matching deref() in deleteMarkedNodesDispatch().
817 callOnMainThread(deleteMarkedNodesDispatch, this);
821 void AudioContext::deleteMarkedNodesDispatch(void* userData)
823 AudioContext* context = reinterpret_cast<AudioContext*>(userData);
828 context->deleteMarkedNodes();
832 void AudioContext::deleteMarkedNodes()
834 ASSERT(isMainThread());
836 // Protect this object from being deleted before we release the mutex locked by AutoLocker.
837 RefPtr<AudioContext> protect(this);
839 AutoLocker locker(this);
841 while (size_t n = m_nodesToDelete.size()) {
842 AudioNode* node = m_nodesToDelete[n - 1];
843 m_nodesToDelete.removeLast();
845 // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
846 unsigned numberOfInputs = node->numberOfInputs();
847 for (unsigned i = 0; i < numberOfInputs; ++i)
848 m_dirtySummingJunctions.remove(node->input(i));
850 // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
851 unsigned numberOfOutputs = node->numberOfOutputs();
852 for (unsigned i = 0; i < numberOfOutputs; ++i)
853 m_dirtyAudioNodeOutputs.remove(node->output(i));
855 // Finally, delete it.
858 m_isDeletionScheduled = false;
862 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
864 ASSERT(isGraphOwner());
865 m_dirtySummingJunctions.add(summingJunction);
868 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
870 ASSERT(isMainThread());
871 AutoLocker locker(this);
872 m_dirtySummingJunctions.remove(summingJunction);
875 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
877 ASSERT(isGraphOwner());
878 m_dirtyAudioNodeOutputs.add(output);
881 void AudioContext::handleDirtyAudioSummingJunctions()
883 ASSERT(isGraphOwner());
885 for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
886 (*i)->updateRenderingState();
888 m_dirtySummingJunctions.clear();
891 void AudioContext::handleDirtyAudioNodeOutputs()
893 ASSERT(isGraphOwner());
895 for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
896 (*i)->updateRenderingState();
898 m_dirtyAudioNodeOutputs.clear();
901 void AudioContext::addAutomaticPullNode(AudioNode* node)
903 ASSERT(isGraphOwner());
905 if (!m_automaticPullNodes.contains(node)) {
906 m_automaticPullNodes.add(node);
907 m_automaticPullNodesNeedUpdating = true;
911 void AudioContext::removeAutomaticPullNode(AudioNode* node)
913 ASSERT(isGraphOwner());
915 if (m_automaticPullNodes.contains(node)) {
916 m_automaticPullNodes.remove(node);
917 m_automaticPullNodesNeedUpdating = true;
921 void AudioContext::updateAutomaticPullNodes()
923 ASSERT(isGraphOwner());
925 if (m_automaticPullNodesNeedUpdating) {
926 // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
927 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
930 for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
931 AudioNode* output = *i;
932 m_renderingAutomaticPullNodes[j] = output;
935 m_automaticPullNodesNeedUpdating = false;
939 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
941 ASSERT(isAudioThread());
943 for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
944 m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
947 const AtomicString& AudioContext::interfaceName() const
949 return eventNames().interfaceForAudioContext;
952 ScriptExecutionContext* AudioContext::scriptExecutionContext() const
954 return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
957 void AudioContext::startRendering()
959 destination()->startRendering();
962 void AudioContext::fireCompletionEvent()
964 ASSERT(isMainThread());
968 AudioBuffer* renderedBuffer = m_renderTarget.get();
970 ASSERT(renderedBuffer);
974 // Avoid firing the event if the document has already gone away.
975 if (scriptExecutionContext()) {
976 // Call the offline rendering completion event listener.
977 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
981 void AudioContext::incrementActiveSourceCount()
983 atomicIncrement(&m_activeSourceCount);
986 void AudioContext::decrementActiveSourceCount()
988 atomicDecrement(&m_activeSourceCount);
991 #if ENABLE(TIZEN_WEB_AUDIO)
992 void AudioContext::incrementActiveScriptProcessorCount()
994 atomicIncrement(&m_activeScriptProcessorCount);
997 void AudioContext::decrementActiveScriptProcessorCount()
999 atomicDecrement(&m_activeScriptProcessorCount);
1002 void AudioContext::pauseDispatch(void* userData)
1004 ASSERT(isMainThread());
1005 AudioContext* context = reinterpret_cast<AudioContext*>(userData);
1010 context->destination()->pauseRendering();
1013 void AudioContext::pause()
1015 ASSERT(isAudioThread());
1017 callOnMainThread(pauseDispatch, this);
1020 } // namespace WebCore
1022 #endif // ENABLE(WEB_AUDIO)