6e4ddf65add102016812c4947f32b902ee144636
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / modules / webaudio / AudioContext.cpp
1 /*
2  * Copyright (C) 2010, Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1.  Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2.  Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #include "config.h"
26
27 #if ENABLE(WEB_AUDIO)
28
29 #include "modules/webaudio/AudioContext.h"
30
31 #include "bindings/v8/ExceptionMessages.h"
32 #include "bindings/v8/ExceptionState.h"
33 #include "core/dom/Document.h"
34 #include "core/dom/ExceptionCode.h"
35 #include "core/html/HTMLMediaElement.h"
36 #include "core/inspector/ScriptCallStack.h"
37 #include "platform/audio/FFTFrame.h"
38 #include "platform/audio/HRTFPanner.h"
39 #include "modules/mediastream/MediaStream.h"
40 #include "modules/webaudio/AnalyserNode.h"
41 #include "modules/webaudio/AudioBuffer.h"
42 #include "modules/webaudio/AudioBufferCallback.h"
43 #include "modules/webaudio/AudioBufferSourceNode.h"
44 #include "modules/webaudio/AudioListener.h"
45 #include "modules/webaudio/AudioNodeInput.h"
46 #include "modules/webaudio/AudioNodeOutput.h"
47 #include "modules/webaudio/BiquadFilterNode.h"
48 #include "modules/webaudio/ChannelMergerNode.h"
49 #include "modules/webaudio/ChannelSplitterNode.h"
50 #include "modules/webaudio/ConvolverNode.h"
51 #include "modules/webaudio/DefaultAudioDestinationNode.h"
52 #include "modules/webaudio/DelayNode.h"
53 #include "modules/webaudio/DynamicsCompressorNode.h"
54 #include "modules/webaudio/GainNode.h"
55 #include "modules/webaudio/MediaElementAudioSourceNode.h"
56 #include "modules/webaudio/MediaStreamAudioDestinationNode.h"
57 #include "modules/webaudio/MediaStreamAudioSourceNode.h"
58 #include "modules/webaudio/OfflineAudioCompletionEvent.h"
59 #include "modules/webaudio/OfflineAudioContext.h"
60 #include "modules/webaudio/OfflineAudioDestinationNode.h"
61 #include "modules/webaudio/OscillatorNode.h"
62 #include "modules/webaudio/PannerNode.h"
63 #include "modules/webaudio/PeriodicWave.h"
64 #include "modules/webaudio/ScriptProcessorNode.h"
65 #include "modules/webaudio/WaveShaperNode.h"
66
67 #if DEBUG_AUDIONODE_REFERENCES
68 #include <stdio.h>
69 #endif
70
71 #include "wtf/ArrayBuffer.h"
72 #include "wtf/Atomics.h"
73 #include "wtf/PassOwnPtr.h"
74 #include "wtf/text/WTFString.h"
75
76 // FIXME: check the proper way to reference an undefined thread ID
77 const int UndefinedThreadIdentifier = 0xffffffff;
78
79 namespace WebCore {
80
81 bool AudioContext::isSampleRateRangeGood(float sampleRate)
82 {
83     // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
84     // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
85     return sampleRate >= 44100 && sampleRate <= 96000;
86 }
87
88 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
89 const unsigned MaxHardwareContexts = 6;
90 unsigned AudioContext::s_hardwareContextCount = 0;
91
92 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
93 {
94     ASSERT(isMainThread());
95     if (s_hardwareContextCount >= MaxHardwareContexts) {
96         exceptionState.throwDOMException(
97             SyntaxError,
98             "number of hardware contexts reached maximum (" + String::number(MaxHardwareContexts) + ").");
99         return nullptr;
100     }
101
102     RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCountedGarbageCollected(new AudioContext(&document)));
103     audioContext->suspendIfNeeded();
104     return audioContext.release();
105 }
106
107 // Constructor for rendering to the audio hardware.
108 AudioContext::AudioContext(Document* document)
109     : ActiveDOMObject(document)
110     , m_isStopScheduled(false)
111     , m_isCleared(false)
112     , m_isInitialized(false)
113     , m_destinationNode(nullptr)
114     , m_isDeletionScheduled(false)
115     , m_automaticPullNodesNeedUpdating(false)
116     , m_connectionCount(0)
117     , m_audioThread(0)
118     , m_graphOwnerThread(UndefinedThreadIdentifier)
119     , m_isOfflineContext(false)
120 {
121     ScriptWrappable::init(this);
122
123     m_destinationNode = DefaultAudioDestinationNode::create(this);
124
125     initialize();
126 }
127
128 // Constructor for offline (non-realtime) rendering.
129 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
130     : ActiveDOMObject(document)
131     , m_isStopScheduled(false)
132     , m_isCleared(false)
133     , m_isInitialized(false)
134     , m_destinationNode(nullptr)
135     , m_automaticPullNodesNeedUpdating(false)
136     , m_connectionCount(0)
137     , m_audioThread(0)
138     , m_graphOwnerThread(UndefinedThreadIdentifier)
139     , m_isOfflineContext(true)
140 {
141     ScriptWrappable::init(this);
142
143     // Create a new destination for offline rendering.
144     m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
145     if (m_renderTarget.get())
146         m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
147
148     initialize();
149 }
150
151 AudioContext::~AudioContext()
152 {
153 #if DEBUG_AUDIONODE_REFERENCES
154     fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
155 #endif
156     // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
157     ASSERT(!m_isInitialized);
158     ASSERT(!m_nodesToDelete.size());
159     ASSERT(!m_referencedNodes.size());
160     ASSERT(!m_finishedNodes.size());
161     ASSERT(!m_automaticPullNodes.size());
162     if (m_automaticPullNodesNeedUpdating)
163         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
164     ASSERT(!m_renderingAutomaticPullNodes.size());
165 }
166
167 void AudioContext::initialize()
168 {
169     if (isInitialized())
170         return;
171
172     FFTFrame::initialize();
173     m_listener = AudioListener::create();
174
175     if (m_destinationNode.get()) {
176         m_destinationNode->initialize();
177
178         if (!isOfflineContext()) {
179             // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
180             // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
181             // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
182             // We may want to consider requiring it for symmetry with OfflineAudioContext.
183             m_destinationNode->startRendering();
184             ++s_hardwareContextCount;
185         }
186
187         m_isInitialized = true;
188     }
189 }
190
191 void AudioContext::clear()
192 {
193     // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
194     if (m_destinationNode)
195         m_destinationNode.clear();
196
197     // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
198     do {
199         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
200         m_nodesMarkedForDeletion.clear();
201         deleteMarkedNodes();
202     } while (m_nodesToDelete.size());
203
204     m_isCleared = true;
205 }
206
207 void AudioContext::uninitialize()
208 {
209     ASSERT(isMainThread());
210
211     if (!isInitialized())
212         return;
213
214     // This stops the audio thread and all audio rendering.
215     m_destinationNode->uninitialize();
216
217     if (!isOfflineContext()) {
218         ASSERT(s_hardwareContextCount);
219         --s_hardwareContextCount;
220     }
221
222     // Get rid of the sources which may still be playing.
223     derefUnfinishedSourceNodes();
224
225     m_isInitialized = false;
226 }
227
228 void AudioContext::stopDispatch(void* userData)
229 {
230     AudioContext* context = reinterpret_cast<AudioContext*>(userData);
231     ASSERT(context);
232     if (!context)
233         return;
234
235     context->uninitialize();
236     context->clear();
237 }
238
239 void AudioContext::stop()
240 {
241     // Usually ExecutionContext calls stop twice.
242     if (m_isStopScheduled)
243         return;
244     m_isStopScheduled = true;
245
246     // Don't call uninitialize() immediately here because the ExecutionContext is in the middle
247     // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
248     // ActiveDOMObjects so let's schedule uninitialize() to be called later.
249     // FIXME: see if there's a more direct way to handle this issue.
250     callOnMainThread(stopDispatch, this);
251 }
252
253 bool AudioContext::hasPendingActivity() const
254 {
255     // According to spec AudioContext must die only after page navigates.
256     return !m_isCleared;
257 }
258
259 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
260 {
261     RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState);
262
263     return audioBuffer;
264 }
265
266 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBufferCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, ExceptionState& exceptionState)
267 {
268     if (!audioData) {
269         exceptionState.throwDOMException(
270             SyntaxError,
271             "invalid ArrayBuffer for audioData.");
272         return;
273     }
274     m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
275 }
276
277 PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
278 {
279     ASSERT(isMainThread());
280     RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
281
282     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
283     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
284     refNode(node.get());
285
286     return node;
287 }
288
289 PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
290 {
291     ASSERT(isMainThread());
292     if (!mediaElement) {
293         exceptionState.throwDOMException(
294             InvalidStateError,
295             "invalid HTMLMedialElement.");
296         return nullptr;
297     }
298
299     // First check if this media element already has a source node.
300     if (mediaElement->audioSourceNode()) {
301         exceptionState.throwDOMException(
302             InvalidStateError,
303             "invalid HTMLMediaElement.");
304         return nullptr;
305     }
306
307     RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
308
309     mediaElement->setAudioSourceNode(node.get());
310
311     refNode(node.get()); // context keeps reference until node is disconnected
312     return node;
313 }
314
315 PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
316 {
317     ASSERT(isMainThread());
318     if (!mediaStream) {
319         exceptionState.throwDOMException(
320             InvalidStateError,
321             "invalid MediaStream source");
322         return nullptr;
323     }
324
325     MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
326     if (audioTracks.isEmpty()) {
327         exceptionState.throwDOMException(
328             InvalidStateError,
329             "MediaStream has no audio track");
330         return nullptr;
331     }
332
333     // Use the first audio track in the media stream.
334     RefPtrWillBeRawPtr<MediaStreamTrack> audioTrack = audioTracks[0];
335     OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
336     RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider.release());
337
338     // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
339     node->setFormat(2, sampleRate());
340
341     refNode(node.get()); // context keeps reference until node is disconnected
342     return node;
343 }
344
345 PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
346 {
347     // Set number of output channels to stereo by default.
348     return MediaStreamAudioDestinationNode::create(this, 2);
349 }
350
351 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
352 {
353     // Set number of input/output channels to stereo by default.
354     return createScriptProcessor(0, 2, 2, exceptionState);
355 }
356
357 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
358 {
359     // Set number of input/output channels to stereo by default.
360     return createScriptProcessor(bufferSize, 2, 2, exceptionState);
361 }
362
363 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
364 {
365     // Set number of output channels to stereo by default.
366     return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState);
367 }
368
369 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
370 {
371     ASSERT(isMainThread());
372     RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
373
374     if (!node.get()) {
375         if (!numberOfInputChannels && !numberOfOutputChannels) {
376             exceptionState.throwDOMException(
377                 IndexSizeError,
378                 "number of input channels and output channels cannot both be zero.");
379         } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
380             exceptionState.throwDOMException(
381                 IndexSizeError,
382                 "number of input channels (" + String::number(numberOfInputChannels)
383                 + ") exceeds maximum ("
384                 + String::number(AudioContext::maxNumberOfChannels()) + ").");
385         } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) {
386             exceptionState.throwDOMException(
387                 IndexSizeError,
388                 "number of output channels (" + String::number(numberOfInputChannels)
389                 + ") exceeds maximum ("
390                 + String::number(AudioContext::maxNumberOfChannels()) + ").");
391         } else {
392             exceptionState.throwDOMException(
393                 IndexSizeError,
394                 "buffer size (" + String::number(bufferSize)
395                 + ") must be a power of two between 256 and 16384.");
396         }
397         return nullptr;
398     }
399
400     refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
401     return node;
402 }
403
404 PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
405 {
406     ASSERT(isMainThread());
407     return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
408 }
409
410 PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper()
411 {
412     ASSERT(isMainThread());
413     return WaveShaperNode::create(this);
414 }
415
416 PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner()
417 {
418     ASSERT(isMainThread());
419     return PannerNode::create(this, m_destinationNode->sampleRate());
420 }
421
422 PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver()
423 {
424     ASSERT(isMainThread());
425     return ConvolverNode::create(this, m_destinationNode->sampleRate());
426 }
427
428 PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
429 {
430     ASSERT(isMainThread());
431     return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
432 }
433
434 PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser()
435 {
436     ASSERT(isMainThread());
437     return AnalyserNode::create(this, m_destinationNode->sampleRate());
438 }
439
440 PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain()
441 {
442     ASSERT(isMainThread());
443     return GainNode::create(this, m_destinationNode->sampleRate());
444 }
445
446 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
447 {
448     const double defaultMaxDelayTime = 1;
449     return createDelay(defaultMaxDelayTime, exceptionState);
450 }
451
452 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
453 {
454     ASSERT(isMainThread());
455     RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
456     if (exceptionState.hadException())
457         return nullptr;
458     return node;
459 }
460
461 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
462 {
463     const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
464     return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState);
465 }
466
467 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
468 {
469     ASSERT(isMainThread());
470
471     RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
472
473     if (!node.get()) {
474         exceptionState.throwDOMException(
475             IndexSizeError,
476             "number of outputs (" + String::number(numberOfOutputs)
477             + ") must be between 1 and "
478             + String::number(AudioContext::maxNumberOfChannels()) + ".");
479         return nullptr;
480     }
481
482     return node;
483 }
484
485 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
486 {
487     const unsigned ChannelMergerDefaultNumberOfInputs = 6;
488     return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState);
489 }
490
491 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
492 {
493     ASSERT(isMainThread());
494
495     RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
496
497     if (!node.get()) {
498         exceptionState.throwDOMException(
499             IndexSizeError,
500             "number of inputs (" + String::number(numberOfInputs)
501             + ") must be between 1 and "
502             + String::number(AudioContext::maxNumberOfChannels()) + ".");
503         return nullptr;
504     }
505
506     return node;
507 }
508
509 PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator()
510 {
511     ASSERT(isMainThread());
512
513     RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
514
515     // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
516     // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
517     refNode(node.get());
518
519     return node;
520 }
521
522 PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
523 {
524     ASSERT(isMainThread());
525
526     if (!real) {
527         exceptionState.throwDOMException(
528             SyntaxError,
529             "invalid real array");
530         return nullptr;
531     }
532
533     if (!imag) {
534         exceptionState.throwDOMException(
535             SyntaxError,
536             "invalid imaginary array");
537         return nullptr;
538     }
539
540     if (real->length() != imag->length()) {
541         exceptionState.throwDOMException(
542             IndexSizeError,
543             "length of real array (" + String::number(real->length())
544             + ") and length of imaginary array (" +  String::number(imag->length())
545             + ") must match.");
546         return nullptr;
547     }
548
549     if (real->length() > 4096) {
550         exceptionState.throwDOMException(
551             IndexSizeError,
552             "length of real array (" + String::number(real->length())
553             + ") exceeds allowed maximum of 4096");
554         return nullptr;
555     }
556
557     if (imag->length() > 4096) {
558         exceptionState.throwDOMException(
559             IndexSizeError,
560             "length of imaginary array (" + String::number(imag->length())
561             + ") exceeds allowed maximum of 4096");
562         return nullptr;
563     }
564
565     return PeriodicWave::create(sampleRate(), real, imag);
566 }
567
568 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
569 {
570     ASSERT(isAudioThread());
571     m_finishedNodes.append(node);
572 }
573
574 void AudioContext::derefFinishedSourceNodes()
575 {
576     ASSERT(isGraphOwner());
577     ASSERT(isAudioThread());
578     for (unsigned i = 0; i < m_finishedNodes.size(); i++)
579         derefNode(m_finishedNodes[i]);
580
581     m_finishedNodes.clear();
582 }
583
584 void AudioContext::refNode(AudioNode* node)
585 {
586     ASSERT(isMainThread());
587     AutoLocker locker(this);
588
589     node->ref(AudioNode::RefTypeConnection);
590     m_referencedNodes.append(node);
591 }
592
593 void AudioContext::derefNode(AudioNode* node)
594 {
595     ASSERT(isGraphOwner());
596
597     node->deref(AudioNode::RefTypeConnection);
598
599     for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
600         if (node == m_referencedNodes[i]) {
601             m_referencedNodes.remove(i);
602             break;
603         }
604     }
605 }
606
607 void AudioContext::derefUnfinishedSourceNodes()
608 {
609     ASSERT(isMainThread());
610     for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
611         m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
612
613     m_referencedNodes.clear();
614 }
615
616 void AudioContext::lock(bool& mustReleaseLock)
617 {
618     // Don't allow regular lock in real-time audio thread.
619     ASSERT(isMainThread());
620
621     ThreadIdentifier thisThread = currentThread();
622
623     if (thisThread == m_graphOwnerThread) {
624         // We already have the lock.
625         mustReleaseLock = false;
626     } else {
627         // Acquire the lock.
628         m_contextGraphMutex.lock();
629         m_graphOwnerThread = thisThread;
630         mustReleaseLock = true;
631     }
632 }
633
634 bool AudioContext::tryLock(bool& mustReleaseLock)
635 {
636     ThreadIdentifier thisThread = currentThread();
637     bool isAudioThread = thisThread == audioThread();
638
639     // Try to catch cases of using try lock on main thread - it should use regular lock.
640     ASSERT(isAudioThread);
641
642     if (!isAudioThread) {
643         // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
644         lock(mustReleaseLock);
645         return true;
646     }
647
648     bool hasLock;
649
650     if (thisThread == m_graphOwnerThread) {
651         // Thread already has the lock.
652         hasLock = true;
653         mustReleaseLock = false;
654     } else {
655         // Don't already have the lock - try to acquire it.
656         hasLock = m_contextGraphMutex.tryLock();
657
658         if (hasLock)
659             m_graphOwnerThread = thisThread;
660
661         mustReleaseLock = hasLock;
662     }
663
664     return hasLock;
665 }
666
667 void AudioContext::unlock()
668 {
669     ASSERT(currentThread() == m_graphOwnerThread);
670
671     m_graphOwnerThread = UndefinedThreadIdentifier;
672     m_contextGraphMutex.unlock();
673 }
674
675 bool AudioContext::isAudioThread() const
676 {
677     return currentThread() == m_audioThread;
678 }
679
680 bool AudioContext::isGraphOwner() const
681 {
682     return currentThread() == m_graphOwnerThread;
683 }
684
685 void AudioContext::addDeferredFinishDeref(AudioNode* node)
686 {
687     ASSERT(isAudioThread());
688     m_deferredFinishDerefList.append(node);
689 }
690
691 void AudioContext::handlePreRenderTasks()
692 {
693     ASSERT(isAudioThread());
694
695     // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
696     // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
697     bool mustReleaseLock;
698     if (tryLock(mustReleaseLock)) {
699         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
700         handleDirtyAudioSummingJunctions();
701         handleDirtyAudioNodeOutputs();
702
703         updateAutomaticPullNodes();
704
705         if (mustReleaseLock)
706             unlock();
707     }
708 }
709
710 void AudioContext::handlePostRenderTasks()
711 {
712     ASSERT(isAudioThread());
713
714     // Must use a tryLock() here too.  Don't worry, the lock will very rarely be contended and this method is called frequently.
715     // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
716     // from the render graph (in which case they'll render silence).
717     bool mustReleaseLock;
718     if (tryLock(mustReleaseLock)) {
719         // Take care of finishing any derefs where the tryLock() failed previously.
720         handleDeferredFinishDerefs();
721
722         // Dynamically clean up nodes which are no longer needed.
723         derefFinishedSourceNodes();
724
725         // Don't delete in the real-time thread. Let the main thread do it.
726         // Ref-counted objects held by certain AudioNodes may not be thread-safe.
727         scheduleNodeDeletion();
728
729         // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
730         handleDirtyAudioSummingJunctions();
731         handleDirtyAudioNodeOutputs();
732
733         updateAutomaticPullNodes();
734
735         if (mustReleaseLock)
736             unlock();
737     }
738 }
739
740 void AudioContext::handleDeferredFinishDerefs()
741 {
742     ASSERT(isAudioThread() && isGraphOwner());
743     for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
744         AudioNode* node = m_deferredFinishDerefList[i];
745         node->finishDeref(AudioNode::RefTypeConnection);
746     }
747
748     m_deferredFinishDerefList.clear();
749 }
750
751 void AudioContext::markForDeletion(AudioNode* node)
752 {
753     ASSERT(isGraphOwner());
754
755     if (!isInitialized())
756         m_nodesToDelete.append(node);
757     else
758         m_nodesMarkedForDeletion.append(node);
759
760     // This is probably the best time for us to remove the node from automatic pull list,
761     // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
762     // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
763     // modify m_renderingAutomaticPullNodes.
764     removeAutomaticPullNode(node);
765 }
766
767 void AudioContext::scheduleNodeDeletion()
768 {
769     bool isGood = isInitialized() && isGraphOwner();
770     ASSERT(isGood);
771     if (!isGood)
772         return;
773
774     // Make sure to call deleteMarkedNodes() on main thread.
775     if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
776         m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
777         m_nodesMarkedForDeletion.clear();
778
779         m_isDeletionScheduled = true;
780
781         // Don't let ourself get deleted before the callback.
782         // See matching deref() in deleteMarkedNodesDispatch().
783         ref();
784         callOnMainThread(deleteMarkedNodesDispatch, this);
785     }
786 }
787
788 void AudioContext::deleteMarkedNodesDispatch(void* userData)
789 {
790     AudioContext* context = reinterpret_cast<AudioContext*>(userData);
791     ASSERT(context);
792     if (!context)
793         return;
794
795     context->deleteMarkedNodes();
796     context->deref();
797 }
798
799 void AudioContext::deleteMarkedNodes()
800 {
801     ASSERT(isMainThread());
802
803     // Protect this object from being deleted before we release the mutex locked by AutoLocker.
804     RefPtrWillBeRawPtr<AudioContext> protect(this);
805     {
806         AutoLocker locker(this);
807
808         while (size_t n = m_nodesToDelete.size()) {
809             AudioNode* node = m_nodesToDelete[n - 1];
810             m_nodesToDelete.removeLast();
811
812             // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
813             unsigned numberOfInputs = node->numberOfInputs();
814             for (unsigned i = 0; i < numberOfInputs; ++i)
815                 m_dirtySummingJunctions.remove(node->input(i));
816
817             // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
818             unsigned numberOfOutputs = node->numberOfOutputs();
819             for (unsigned i = 0; i < numberOfOutputs; ++i)
820                 m_dirtyAudioNodeOutputs.remove(node->output(i));
821 #if ENABLE(OILPAN)
822             // Finally, clear the keep alive handle that keeps this
823             // object from being collected.
824             node->clearKeepAlive();
825 #else
826             // Finally, delete it.
827             delete node;
828 #endif
829         }
830         m_isDeletionScheduled = false;
831     }
832 }
833
834 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
835 {
836     ASSERT(isGraphOwner());
837     m_dirtySummingJunctions.add(summingJunction);
838 }
839
840 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
841 {
842     ASSERT(isMainThread());
843     AutoLocker locker(this);
844     m_dirtySummingJunctions.remove(summingJunction);
845 }
846
847 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
848 {
849     ASSERT(isGraphOwner());
850     m_dirtyAudioNodeOutputs.add(output);
851 }
852
853 void AudioContext::handleDirtyAudioSummingJunctions()
854 {
855     ASSERT(isGraphOwner());
856
857     for (HashSet<AudioSummingJunction* >::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
858         (*i)->updateRenderingState();
859
860     m_dirtySummingJunctions.clear();
861 }
862
863 void AudioContext::handleDirtyAudioNodeOutputs()
864 {
865     ASSERT(isGraphOwner());
866
867     for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
868         (*i)->updateRenderingState();
869
870     m_dirtyAudioNodeOutputs.clear();
871 }
872
873 void AudioContext::addAutomaticPullNode(AudioNode* node)
874 {
875     ASSERT(isGraphOwner());
876
877     if (!m_automaticPullNodes.contains(node)) {
878         m_automaticPullNodes.add(node);
879         m_automaticPullNodesNeedUpdating = true;
880     }
881 }
882
883 void AudioContext::removeAutomaticPullNode(AudioNode* node)
884 {
885     ASSERT(isGraphOwner());
886
887     if (m_automaticPullNodes.contains(node)) {
888         m_automaticPullNodes.remove(node);
889         m_automaticPullNodesNeedUpdating = true;
890     }
891 }
892
893 void AudioContext::updateAutomaticPullNodes()
894 {
895     ASSERT(isGraphOwner());
896
897     if (m_automaticPullNodesNeedUpdating) {
898         // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
899         m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
900
901         unsigned j = 0;
902         for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
903             AudioNode* output = *i;
904             m_renderingAutomaticPullNodes[j] = output;
905         }
906
907         m_automaticPullNodesNeedUpdating = false;
908     }
909 }
910
911 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
912 {
913     ASSERT(isAudioThread());
914
915     for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
916         m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
917 }
918
919 const AtomicString& AudioContext::interfaceName() const
920 {
921     return EventTargetNames::AudioContext;
922 }
923
924 ExecutionContext* AudioContext::executionContext() const
925 {
926     return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext();
927 }
928
929 void AudioContext::startRendering()
930 {
931     destination()->startRendering();
932 }
933
934 void AudioContext::fireCompletionEvent()
935 {
936     ASSERT(isMainThread());
937     if (!isMainThread())
938         return;
939
940     AudioBuffer* renderedBuffer = m_renderTarget.get();
941
942     ASSERT(renderedBuffer);
943     if (!renderedBuffer)
944         return;
945
946     // Avoid firing the event if the document has already gone away.
947     if (executionContext()) {
948         // Call the offline rendering completion event listener.
949         dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
950     }
951 }
952
953 void AudioContext::trace(Visitor* visitor)
954 {
955     visitor->trace(m_renderTarget);
956     visitor->trace(m_destinationNode);
957     visitor->trace(m_listener);
958     visitor->trace(m_dirtySummingJunctions);
959     EventTargetWithInlineData::trace(visitor);
960 }
961
962 } // namespace WebCore
963
964 #endif // ENABLE(WEB_AUDIO)