2 * Copyright (C) 2010, Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "modules/webaudio/AudioContext.h"
31 #include "bindings/v8/ExceptionMessages.h"
32 #include "bindings/v8/ExceptionState.h"
33 #include "core/dom/Document.h"
34 #include "core/dom/ExceptionCode.h"
35 #include "core/html/HTMLMediaElement.h"
36 #include "core/inspector/ScriptCallStack.h"
37 #include "platform/audio/FFTFrame.h"
38 #include "platform/audio/HRTFPanner.h"
39 #include "modules/mediastream/MediaStream.h"
40 #include "modules/webaudio/AnalyserNode.h"
41 #include "modules/webaudio/AudioBuffer.h"
42 #include "modules/webaudio/AudioBufferCallback.h"
43 #include "modules/webaudio/AudioBufferSourceNode.h"
44 #include "modules/webaudio/AudioListener.h"
45 #include "modules/webaudio/AudioNodeInput.h"
46 #include "modules/webaudio/AudioNodeOutput.h"
47 #include "modules/webaudio/BiquadFilterNode.h"
48 #include "modules/webaudio/ChannelMergerNode.h"
49 #include "modules/webaudio/ChannelSplitterNode.h"
50 #include "modules/webaudio/ConvolverNode.h"
51 #include "modules/webaudio/DefaultAudioDestinationNode.h"
52 #include "modules/webaudio/DelayNode.h"
53 #include "modules/webaudio/DynamicsCompressorNode.h"
54 #include "modules/webaudio/GainNode.h"
55 #include "modules/webaudio/MediaElementAudioSourceNode.h"
56 #include "modules/webaudio/MediaStreamAudioDestinationNode.h"
57 #include "modules/webaudio/MediaStreamAudioSourceNode.h"
58 #include "modules/webaudio/OfflineAudioCompletionEvent.h"
59 #include "modules/webaudio/OfflineAudioContext.h"
60 #include "modules/webaudio/OfflineAudioDestinationNode.h"
61 #include "modules/webaudio/OscillatorNode.h"
62 #include "modules/webaudio/PannerNode.h"
63 #include "modules/webaudio/PeriodicWave.h"
64 #include "modules/webaudio/ScriptProcessorNode.h"
65 #include "modules/webaudio/WaveShaperNode.h"
67 #if DEBUG_AUDIONODE_REFERENCES
71 #include "wtf/ArrayBuffer.h"
72 #include "wtf/Atomics.h"
73 #include "wtf/PassOwnPtr.h"
74 #include "wtf/text/WTFString.h"
76 // FIXME: check the proper way to reference an undefined thread ID
77 const int UndefinedThreadIdentifier = 0xffffffff;
81 bool AudioContext::isSampleRateRangeGood(float sampleRate)
83 // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
85 return sampleRate >= 44100 && sampleRate <= 96000;
88 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
89 const unsigned MaxHardwareContexts = 6;
90 unsigned AudioContext::s_hardwareContextCount = 0;
92 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
94 ASSERT(isMainThread());
95 if (s_hardwareContextCount >= MaxHardwareContexts) {
96 exceptionState.throwDOMException(
98 "number of hardware contexts reached maximum (" + String::number(MaxHardwareContexts) + ").");
102 RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCountedGarbageCollected(new AudioContext(&document)));
103 audioContext->suspendIfNeeded();
104 return audioContext.release();
107 // Constructor for rendering to the audio hardware.
108 AudioContext::AudioContext(Document* document)
109 : ActiveDOMObject(document)
110 , m_isStopScheduled(false)
112 , m_isInitialized(false)
113 , m_destinationNode(nullptr)
114 , m_isDeletionScheduled(false)
115 , m_automaticPullNodesNeedUpdating(false)
116 , m_connectionCount(0)
118 , m_graphOwnerThread(UndefinedThreadIdentifier)
119 , m_isOfflineContext(false)
121 ScriptWrappable::init(this);
123 m_destinationNode = DefaultAudioDestinationNode::create(this);
128 // Constructor for offline (non-realtime) rendering.
129 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
130 : ActiveDOMObject(document)
131 , m_isStopScheduled(false)
133 , m_isInitialized(false)
134 , m_destinationNode(nullptr)
135 , m_automaticPullNodesNeedUpdating(false)
136 , m_connectionCount(0)
138 , m_graphOwnerThread(UndefinedThreadIdentifier)
139 , m_isOfflineContext(true)
141 ScriptWrappable::init(this);
143 // Create a new destination for offline rendering.
144 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
145 if (m_renderTarget.get())
146 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
151 AudioContext::~AudioContext()
153 #if DEBUG_AUDIONODE_REFERENCES
154 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
156 // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
157 ASSERT(!m_isInitialized);
158 ASSERT(!m_nodesToDelete.size());
159 ASSERT(!m_referencedNodes.size());
160 ASSERT(!m_finishedNodes.size());
161 ASSERT(!m_automaticPullNodes.size());
162 if (m_automaticPullNodesNeedUpdating)
163 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
164 ASSERT(!m_renderingAutomaticPullNodes.size());
167 void AudioContext::initialize()
172 FFTFrame::initialize();
173 m_listener = AudioListener::create();
175 if (m_destinationNode.get()) {
176 m_destinationNode->initialize();
178 if (!isOfflineContext()) {
179 // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
180 // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
181 // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
182 // We may want to consider requiring it for symmetry with OfflineAudioContext.
183 m_destinationNode->startRendering();
184 ++s_hardwareContextCount;
187 m_isInitialized = true;
191 void AudioContext::clear()
193 // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
194 if (m_destinationNode)
195 m_destinationNode.clear();
197 // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
199 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
200 m_nodesMarkedForDeletion.clear();
202 } while (m_nodesToDelete.size());
207 void AudioContext::uninitialize()
209 ASSERT(isMainThread());
211 if (!isInitialized())
214 // This stops the audio thread and all audio rendering.
215 m_destinationNode->uninitialize();
217 if (!isOfflineContext()) {
218 ASSERT(s_hardwareContextCount);
219 --s_hardwareContextCount;
222 // Get rid of the sources which may still be playing.
223 derefUnfinishedSourceNodes();
225 m_isInitialized = false;
228 void AudioContext::stopDispatch(void* userData)
230 AudioContext* context = reinterpret_cast<AudioContext*>(userData);
235 context->uninitialize();
239 void AudioContext::stop()
241 // Usually ExecutionContext calls stop twice.
242 if (m_isStopScheduled)
244 m_isStopScheduled = true;
246 // Don't call uninitialize() immediately here because the ExecutionContext is in the middle
247 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
248 // ActiveDOMObjects so let's schedule uninitialize() to be called later.
249 // FIXME: see if there's a more direct way to handle this issue.
250 callOnMainThread(stopDispatch, this);
253 bool AudioContext::hasPendingActivity() const
255 // According to spec AudioContext must die only after page navigates.
259 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
261 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState);
266 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBufferCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, ExceptionState& exceptionState)
269 exceptionState.throwDOMException(
271 "invalid ArrayBuffer for audioData.");
274 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
277 PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
279 ASSERT(isMainThread());
280 RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
282 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
283 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
289 PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
291 ASSERT(isMainThread());
293 exceptionState.throwDOMException(
295 "invalid HTMLMedialElement.");
299 // First check if this media element already has a source node.
300 if (mediaElement->audioSourceNode()) {
301 exceptionState.throwDOMException(
303 "invalid HTMLMediaElement.");
307 RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
309 mediaElement->setAudioSourceNode(node.get());
311 refNode(node.get()); // context keeps reference until node is disconnected
315 PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
317 ASSERT(isMainThread());
319 exceptionState.throwDOMException(
321 "invalid MediaStream source");
325 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
326 if (audioTracks.isEmpty()) {
327 exceptionState.throwDOMException(
329 "MediaStream has no audio track");
333 // Use the first audio track in the media stream.
334 RefPtrWillBeRawPtr<MediaStreamTrack> audioTrack = audioTracks[0];
335 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
336 RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider.release());
338 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
339 node->setFormat(2, sampleRate());
341 refNode(node.get()); // context keeps reference until node is disconnected
345 PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
347 // Set number of output channels to stereo by default.
348 return MediaStreamAudioDestinationNode::create(this, 2);
351 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
353 // Set number of input/output channels to stereo by default.
354 return createScriptProcessor(0, 2, 2, exceptionState);
357 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
359 // Set number of input/output channels to stereo by default.
360 return createScriptProcessor(bufferSize, 2, 2, exceptionState);
363 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
365 // Set number of output channels to stereo by default.
366 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState);
369 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
371 ASSERT(isMainThread());
372 RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
375 if (!numberOfInputChannels && !numberOfOutputChannels) {
376 exceptionState.throwDOMException(
378 "number of input channels and output channels cannot both be zero.");
379 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
380 exceptionState.throwDOMException(
382 "number of input channels (" + String::number(numberOfInputChannels)
383 + ") exceeds maximum ("
384 + String::number(AudioContext::maxNumberOfChannels()) + ").");
385 } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) {
386 exceptionState.throwDOMException(
388 "number of output channels (" + String::number(numberOfInputChannels)
389 + ") exceeds maximum ("
390 + String::number(AudioContext::maxNumberOfChannels()) + ").");
392 exceptionState.throwDOMException(
394 "buffer size (" + String::number(bufferSize)
395 + ") must be a power of two between 256 and 16384.");
400 refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
404 PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
406 ASSERT(isMainThread());
407 return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
410 PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper()
412 ASSERT(isMainThread());
413 return WaveShaperNode::create(this);
416 PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner()
418 ASSERT(isMainThread());
419 return PannerNode::create(this, m_destinationNode->sampleRate());
422 PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver()
424 ASSERT(isMainThread());
425 return ConvolverNode::create(this, m_destinationNode->sampleRate());
428 PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
430 ASSERT(isMainThread());
431 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
434 PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser()
436 ASSERT(isMainThread());
437 return AnalyserNode::create(this, m_destinationNode->sampleRate());
440 PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain()
442 ASSERT(isMainThread());
443 return GainNode::create(this, m_destinationNode->sampleRate());
446 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
448 const double defaultMaxDelayTime = 1;
449 return createDelay(defaultMaxDelayTime, exceptionState);
452 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
454 ASSERT(isMainThread());
455 RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
456 if (exceptionState.hadException())
461 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
463 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
464 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState);
467 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
469 ASSERT(isMainThread());
471 RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
474 exceptionState.throwDOMException(
476 "number of outputs (" + String::number(numberOfOutputs)
477 + ") must be between 1 and "
478 + String::number(AudioContext::maxNumberOfChannels()) + ".");
485 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
487 const unsigned ChannelMergerDefaultNumberOfInputs = 6;
488 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState);
491 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
493 ASSERT(isMainThread());
495 RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
498 exceptionState.throwDOMException(
500 "number of inputs (" + String::number(numberOfInputs)
501 + ") must be between 1 and "
502 + String::number(AudioContext::maxNumberOfChannels()) + ".");
509 PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator()
511 ASSERT(isMainThread());
513 RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
515 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
516 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
522 PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
524 ASSERT(isMainThread());
527 exceptionState.throwDOMException(
529 "invalid real array");
534 exceptionState.throwDOMException(
536 "invalid imaginary array");
540 if (real->length() != imag->length()) {
541 exceptionState.throwDOMException(
543 "length of real array (" + String::number(real->length())
544 + ") and length of imaginary array (" + String::number(imag->length())
549 if (real->length() > 4096) {
550 exceptionState.throwDOMException(
552 "length of real array (" + String::number(real->length())
553 + ") exceeds allowed maximum of 4096");
557 if (imag->length() > 4096) {
558 exceptionState.throwDOMException(
560 "length of imaginary array (" + String::number(imag->length())
561 + ") exceeds allowed maximum of 4096");
565 return PeriodicWave::create(sampleRate(), real, imag);
568 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
570 ASSERT(isAudioThread());
571 m_finishedNodes.append(node);
574 void AudioContext::derefFinishedSourceNodes()
576 ASSERT(isGraphOwner());
577 ASSERT(isAudioThread());
578 for (unsigned i = 0; i < m_finishedNodes.size(); i++)
579 derefNode(m_finishedNodes[i]);
581 m_finishedNodes.clear();
584 void AudioContext::refNode(AudioNode* node)
586 ASSERT(isMainThread());
587 AutoLocker locker(this);
589 node->ref(AudioNode::RefTypeConnection);
590 m_referencedNodes.append(node);
593 void AudioContext::derefNode(AudioNode* node)
595 ASSERT(isGraphOwner());
597 node->deref(AudioNode::RefTypeConnection);
599 for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
600 if (node == m_referencedNodes[i]) {
601 m_referencedNodes.remove(i);
607 void AudioContext::derefUnfinishedSourceNodes()
609 ASSERT(isMainThread());
610 for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
611 m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
613 m_referencedNodes.clear();
616 void AudioContext::lock(bool& mustReleaseLock)
618 // Don't allow regular lock in real-time audio thread.
619 ASSERT(isMainThread());
621 ThreadIdentifier thisThread = currentThread();
623 if (thisThread == m_graphOwnerThread) {
624 // We already have the lock.
625 mustReleaseLock = false;
628 m_contextGraphMutex.lock();
629 m_graphOwnerThread = thisThread;
630 mustReleaseLock = true;
634 bool AudioContext::tryLock(bool& mustReleaseLock)
636 ThreadIdentifier thisThread = currentThread();
637 bool isAudioThread = thisThread == audioThread();
639 // Try to catch cases of using try lock on main thread - it should use regular lock.
640 ASSERT(isAudioThread);
642 if (!isAudioThread) {
643 // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
644 lock(mustReleaseLock);
650 if (thisThread == m_graphOwnerThread) {
651 // Thread already has the lock.
653 mustReleaseLock = false;
655 // Don't already have the lock - try to acquire it.
656 hasLock = m_contextGraphMutex.tryLock();
659 m_graphOwnerThread = thisThread;
661 mustReleaseLock = hasLock;
667 void AudioContext::unlock()
669 ASSERT(currentThread() == m_graphOwnerThread);
671 m_graphOwnerThread = UndefinedThreadIdentifier;
672 m_contextGraphMutex.unlock();
675 bool AudioContext::isAudioThread() const
677 return currentThread() == m_audioThread;
680 bool AudioContext::isGraphOwner() const
682 return currentThread() == m_graphOwnerThread;
685 void AudioContext::addDeferredFinishDeref(AudioNode* node)
687 ASSERT(isAudioThread());
688 m_deferredFinishDerefList.append(node);
691 void AudioContext::handlePreRenderTasks()
693 ASSERT(isAudioThread());
695 // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
696 // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
697 bool mustReleaseLock;
698 if (tryLock(mustReleaseLock)) {
699 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
700 handleDirtyAudioSummingJunctions();
701 handleDirtyAudioNodeOutputs();
703 updateAutomaticPullNodes();
710 void AudioContext::handlePostRenderTasks()
712 ASSERT(isAudioThread());
714 // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
715 // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
716 // from the render graph (in which case they'll render silence).
717 bool mustReleaseLock;
718 if (tryLock(mustReleaseLock)) {
719 // Take care of finishing any derefs where the tryLock() failed previously.
720 handleDeferredFinishDerefs();
722 // Dynamically clean up nodes which are no longer needed.
723 derefFinishedSourceNodes();
725 // Don't delete in the real-time thread. Let the main thread do it.
726 // Ref-counted objects held by certain AudioNodes may not be thread-safe.
727 scheduleNodeDeletion();
729 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
730 handleDirtyAudioSummingJunctions();
731 handleDirtyAudioNodeOutputs();
733 updateAutomaticPullNodes();
740 void AudioContext::handleDeferredFinishDerefs()
742 ASSERT(isAudioThread() && isGraphOwner());
743 for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
744 AudioNode* node = m_deferredFinishDerefList[i];
745 node->finishDeref(AudioNode::RefTypeConnection);
748 m_deferredFinishDerefList.clear();
751 void AudioContext::markForDeletion(AudioNode* node)
753 ASSERT(isGraphOwner());
755 if (!isInitialized())
756 m_nodesToDelete.append(node);
758 m_nodesMarkedForDeletion.append(node);
760 // This is probably the best time for us to remove the node from automatic pull list,
761 // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
762 // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
763 // modify m_renderingAutomaticPullNodes.
764 removeAutomaticPullNode(node);
767 void AudioContext::scheduleNodeDeletion()
769 bool isGood = isInitialized() && isGraphOwner();
774 // Make sure to call deleteMarkedNodes() on main thread.
775 if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
776 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
777 m_nodesMarkedForDeletion.clear();
779 m_isDeletionScheduled = true;
781 // Don't let ourself get deleted before the callback.
782 // See matching deref() in deleteMarkedNodesDispatch().
784 callOnMainThread(deleteMarkedNodesDispatch, this);
788 void AudioContext::deleteMarkedNodesDispatch(void* userData)
790 AudioContext* context = reinterpret_cast<AudioContext*>(userData);
795 context->deleteMarkedNodes();
799 void AudioContext::deleteMarkedNodes()
801 ASSERT(isMainThread());
803 // Protect this object from being deleted before we release the mutex locked by AutoLocker.
804 RefPtrWillBeRawPtr<AudioContext> protect(this);
806 AutoLocker locker(this);
808 while (size_t n = m_nodesToDelete.size()) {
809 AudioNode* node = m_nodesToDelete[n - 1];
810 m_nodesToDelete.removeLast();
812 // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
813 unsigned numberOfInputs = node->numberOfInputs();
814 for (unsigned i = 0; i < numberOfInputs; ++i)
815 m_dirtySummingJunctions.remove(node->input(i));
817 // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
818 unsigned numberOfOutputs = node->numberOfOutputs();
819 for (unsigned i = 0; i < numberOfOutputs; ++i)
820 m_dirtyAudioNodeOutputs.remove(node->output(i));
822 // Finally, clear the keep alive handle that keeps this
823 // object from being collected.
824 node->clearKeepAlive();
826 // Finally, delete it.
830 m_isDeletionScheduled = false;
834 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
836 ASSERT(isGraphOwner());
837 m_dirtySummingJunctions.add(summingJunction);
840 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
842 ASSERT(isMainThread());
843 AutoLocker locker(this);
844 m_dirtySummingJunctions.remove(summingJunction);
847 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
849 ASSERT(isGraphOwner());
850 m_dirtyAudioNodeOutputs.add(output);
853 void AudioContext::handleDirtyAudioSummingJunctions()
855 ASSERT(isGraphOwner());
857 for (HashSet<AudioSummingJunction* >::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
858 (*i)->updateRenderingState();
860 m_dirtySummingJunctions.clear();
863 void AudioContext::handleDirtyAudioNodeOutputs()
865 ASSERT(isGraphOwner());
867 for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
868 (*i)->updateRenderingState();
870 m_dirtyAudioNodeOutputs.clear();
873 void AudioContext::addAutomaticPullNode(AudioNode* node)
875 ASSERT(isGraphOwner());
877 if (!m_automaticPullNodes.contains(node)) {
878 m_automaticPullNodes.add(node);
879 m_automaticPullNodesNeedUpdating = true;
883 void AudioContext::removeAutomaticPullNode(AudioNode* node)
885 ASSERT(isGraphOwner());
887 if (m_automaticPullNodes.contains(node)) {
888 m_automaticPullNodes.remove(node);
889 m_automaticPullNodesNeedUpdating = true;
893 void AudioContext::updateAutomaticPullNodes()
895 ASSERT(isGraphOwner());
897 if (m_automaticPullNodesNeedUpdating) {
898 // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
899 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
902 for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
903 AudioNode* output = *i;
904 m_renderingAutomaticPullNodes[j] = output;
907 m_automaticPullNodesNeedUpdating = false;
911 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
913 ASSERT(isAudioThread());
915 for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
916 m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
919 const AtomicString& AudioContext::interfaceName() const
921 return EventTargetNames::AudioContext;
924 ExecutionContext* AudioContext::executionContext() const
926 return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext();
929 void AudioContext::startRendering()
931 destination()->startRendering();
934 void AudioContext::fireCompletionEvent()
936 ASSERT(isMainThread());
940 AudioBuffer* renderedBuffer = m_renderTarget.get();
942 ASSERT(renderedBuffer);
946 // Avoid firing the event if the document has already gone away.
947 if (executionContext()) {
948 // Call the offline rendering completion event listener.
949 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
953 void AudioContext::trace(Visitor* visitor)
955 visitor->trace(m_renderTarget);
956 visitor->trace(m_destinationNode);
957 visitor->trace(m_listener);
958 visitor->trace(m_dirtySummingJunctions);
959 EventTargetWithInlineData::trace(visitor);
962 } // namespace WebCore
964 #endif // ENABLE(WEB_AUDIO)