#include "modules/webaudio/AudioContext.h"
-#include "bindings/v8/ExceptionMessages.h"
-#include "bindings/v8/ExceptionState.h"
+#include "bindings/core/v8/ExceptionMessages.h"
+#include "bindings/core/v8/ExceptionState.h"
#include "core/dom/Document.h"
#include "core/dom/ExceptionCode.h"
#include "core/html/HTMLMediaElement.h"
#include "core/inspector/ScriptCallStack.h"
#include "platform/audio/FFTFrame.h"
-#include "platform/audio/HRTFDatabaseLoader.h"
#include "platform/audio/HRTFPanner.h"
#include "modules/mediastream/MediaStream.h"
#include "modules/webaudio/AnalyserNode.h"
-#include "modules/webaudio/AsyncAudioDecoder.h"
#include "modules/webaudio/AudioBuffer.h"
#include "modules/webaudio/AudioBufferCallback.h"
#include "modules/webaudio/AudioBufferSourceNode.h"
#include "wtf/ArrayBuffer.h"
#include "wtf/Atomics.h"
-#include "wtf/MainThread.h"
-#include "wtf/OwnPtr.h"
#include "wtf/PassOwnPtr.h"
-#include "wtf/RefCounted.h"
#include "wtf/text/WTFString.h"
// FIXME: check the proper way to reference an undefined thread ID
-const int UndefinedThreadIdentifier = 0xffffffff;
+const WTF::ThreadIdentifier UndefinedThreadIdentifier = 0xffffffff;
-namespace WebCore {
+namespace blink {
bool AudioContext::isSampleRateRangeGood(float sampleRate)
{
}
// Don't allow more than this number of simultaneous AudioContexts talking to hardware.
-const unsigned MaxHardwareContexts = 4;
+const unsigned MaxHardwareContexts = 6;
unsigned AudioContext::s_hardwareContextCount = 0;
-PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& es)
+PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
if (s_hardwareContextCount >= MaxHardwareContexts) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
SyntaxError,
- ExceptionMessages::failedToConstruct(
- "AudioContext",
- "number of hardware contexts reached maximum (" + String::number(MaxHardwareContexts) + ")."));
- return 0;
+ "number of hardware contexts reached maximum (" + String::number(MaxHardwareContexts) + ").");
+ return nullptr;
}
- RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(&document)));
+ RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCountedGarbageCollected(new AudioContext(&document)));
audioContext->suspendIfNeeded();
return audioContext.release();
}
-PassRefPtr<AudioContext> AudioContext::create(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& es)
-{
- document.addConsoleMessage(JSMessageSource, WarningMessageLevel, "Deprecated AudioContext constructor: use OfflineAudioContext instead");
- return OfflineAudioContext::create(&document, numberOfChannels, numberOfFrames, sampleRate, es);
-}
-
// Constructor for rendering to the audio hardware.
AudioContext::AudioContext(Document* document)
: ActiveDOMObject(document)
, m_isStopScheduled(false)
+ , m_isCleared(false)
, m_isInitialized(false)
- , m_isAudioThreadFinished(false)
- , m_destinationNode(0)
+ , m_destinationNode(nullptr)
+#if !ENABLE(OILPAN)
, m_isDeletionScheduled(false)
+#endif
, m_automaticPullNodesNeedUpdating(false)
, m_connectionCount(0)
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(false)
- , m_activeSourceCount(0)
{
- constructCommon();
+ ScriptWrappable::init(this);
m_destinationNode = DefaultAudioDestinationNode::create(this);
- // This sets in motion an asynchronous loading mechanism on another thread.
- // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
- // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
- // when this has finished (see AudioDestinationNode).
- m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
+ initialize();
+#if DEBUG_AUDIONODE_REFERENCES
+ fprintf(stderr, "%p: AudioContext::AudioContext() #%u\n", this, AudioContext::s_hardwareContextCount);
+#endif
}
// Constructor for offline (non-realtime) rendering.
AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
: ActiveDOMObject(document)
, m_isStopScheduled(false)
+ , m_isCleared(false)
, m_isInitialized(false)
- , m_isAudioThreadFinished(false)
- , m_destinationNode(0)
+ , m_destinationNode(nullptr)
+#if !ENABLE(OILPAN)
+ , m_isDeletionScheduled(false)
+#endif
, m_automaticPullNodesNeedUpdating(false)
, m_connectionCount(0)
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(true)
- , m_activeSourceCount(0)
{
- constructCommon();
-
- // FIXME: the passed in sampleRate MUST match the hardware sample-rate since HRTFDatabaseLoader is a singleton.
- m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
+ ScriptWrappable::init(this);
// Create a new destination for offline rendering.
m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
- ASSERT(m_renderTarget);
- m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
- ASSERT(m_destinationNode);
-}
-
-void AudioContext::constructCommon()
-{
- ScriptWrappable::init(this);
- // According to spec AudioContext must die only after page navigate.
- // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
- setPendingActivity(this);
-
- FFTFrame::initialize();
+ if (m_renderTarget.get())
+ m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
- m_listener = AudioListener::create();
+ initialize();
}
AudioContext::~AudioContext()
#endif
// AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
ASSERT(!m_isInitialized);
- ASSERT(m_isStopScheduled);
+#if !ENABLE(OILPAN)
ASSERT(!m_nodesToDelete.size());
ASSERT(!m_referencedNodes.size());
+#endif
ASSERT(!m_finishedNodes.size());
ASSERT(!m_automaticPullNodes.size());
if (m_automaticPullNodesNeedUpdating)
ASSERT(!m_renderingAutomaticPullNodes.size());
}
-void AudioContext::lazyInitialize()
+void AudioContext::initialize()
{
- if (!m_isInitialized) {
- // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
- ASSERT(!m_isAudioThreadFinished);
- if (!m_isAudioThreadFinished) {
- if (m_destinationNode.get()) {
- m_destinationNode->initialize();
+ if (isInitialized())
+ return;
+
+ FFTFrame::initialize();
+ m_listener = AudioListener::create();
- if (!isOfflineContext()) {
- // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
- // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
- // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
- // We may want to consider requiring it for symmetry with OfflineAudioContext.
- m_destinationNode->startRendering();
- ++s_hardwareContextCount;
- }
+ if (m_destinationNode.get()) {
+ m_destinationNode->initialize();
- }
- m_isInitialized = true;
+ if (!isOfflineContext()) {
+ // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
+ // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
+ // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
+ // We may want to consider requiring it for symmetry with OfflineAudioContext.
+ m_destinationNode->startRendering();
+ ++s_hardwareContextCount;
}
+
+ m_isInitialized = true;
}
}
void AudioContext::clear()
{
+#if ENABLE(OILPAN)
+ // We need to run disposers before destructing m_contextGraphMutex.
+ m_liveAudioSummingJunctions.clear();
+ m_liveNodes.clear();
+#else
+
// We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
if (m_destinationNode)
m_destinationNode.clear();
// Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
do {
- deleteMarkedNodes();
- m_nodesToDelete.append(m_nodesMarkedForDeletion);
+ m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
m_nodesMarkedForDeletion.clear();
+ deleteMarkedNodes();
} while (m_nodesToDelete.size());
+#endif
- // It was set in constructCommon.
- unsetPendingActivity(this);
+ m_isCleared = true;
}
void AudioContext::uninitialize()
{
ASSERT(isMainThread());
- if (!m_isInitialized)
+ if (!isInitialized())
return;
// This stops the audio thread and all audio rendering.
m_destinationNode->uninitialize();
- // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
- m_isAudioThreadFinished = true;
-
if (!isOfflineContext()) {
ASSERT(s_hardwareContextCount);
--s_hardwareContextCount;
derefUnfinishedSourceNodes();
m_isInitialized = false;
-}
-
-bool AudioContext::isInitialized() const
-{
- return m_isInitialized;
-}
-
-bool AudioContext::isRunnable() const
-{
- if (!isInitialized())
- return false;
-
- // Check with the HRTF spatialization system to see if it's finished loading.
- return m_hrtfDatabaseLoader->isLoaded();
-}
-
-void AudioContext::stopDispatch(void* userData)
-{
- AudioContext* context = reinterpret_cast<AudioContext*>(userData);
- ASSERT(context);
- if (!context)
- return;
-
- context->uninitialize();
- context->clear();
+ clear();
}
void AudioContext::stop()
// of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
// ActiveDOMObjects so let's schedule uninitialize() to be called later.
// FIXME: see if there's a more direct way to handle this issue.
- callOnMainThread(stopDispatch, this);
-}
-
-PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& es)
-{
- RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
- if (!audioBuffer.get()) {
- if (numberOfChannels > AudioContext::maxNumberOfChannels()) {
- es.throwDOMException(
- NotSupportedError,
- ExceptionMessages::failedToConstruct(
- "AudioBuffer",
- "requested number of channels (" + String::number(numberOfChannels) + ") exceeds maximum (" + String::number(AudioContext::maxNumberOfChannels()) + ")"));
- } else if (sampleRate < AudioBuffer::minAllowedSampleRate() || sampleRate > AudioBuffer::maxAllowedSampleRate()) {
- es.throwDOMException(
- NotSupportedError,
- ExceptionMessages::failedToConstruct(
- "AudioBuffer",
- "requested sample rate (" + String::number(sampleRate)
- + ") does not lie in the allowed range of "
- + String::number(AudioBuffer::minAllowedSampleRate())
- + "-" + String::number(AudioBuffer::maxAllowedSampleRate()) + " Hz"));
- } else if (!numberOfFrames) {
- es.throwDOMException(
- NotSupportedError,
- ExceptionMessages::failedToConstruct(
- "AudioBuffer",
- "number of frames must be greater than 0."));
- } else {
- es.throwDOMException(
- NotSupportedError,
- ExceptionMessages::failedToConstruct(
- "AudioBuffer",
- "unable to create buffer of " + String::number(numberOfChannels)
- + " channel(s) of " + String::number(numberOfFrames)
- + " frames each."));
- }
- return 0;
- }
-
- return audioBuffer;
+ callOnMainThread(bind(&AudioContext::uninitialize, PassRefPtrWillBeRawPtr<AudioContext>(this)));
}
-PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionState& es)
+bool AudioContext::hasPendingActivity() const
{
- ASSERT(arrayBuffer);
- if (!arrayBuffer) {
- es.throwDOMException(
- SyntaxError,
- ExceptionMessages::failedToConstruct(
- "AudioBuffer",
- "invalid ArrayBuffer."));
- return 0;
- }
+ // According to spec AudioContext must die only after page navigates.
+ return !m_isCleared;
+}
- RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
- if (!audioBuffer.get()) {
- es.throwDOMException(
- SyntaxError,
- ExceptionMessages::failedToConstruct(
- "AudioBuffer",
- "invalid audio data in ArrayBuffer."));
- return 0;
- }
+PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
+{
+ RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState);
return audioBuffer;
}
-void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionState& es)
+void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBufferCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, ExceptionState& exceptionState)
{
if (!audioData) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
SyntaxError,
- ExceptionMessages::failedToExecute(
- "decodeAudioData",
- "AudioContext",
- "invalid ArrayBuffer for audioData."));
+ "invalid ArrayBuffer for audioData.");
return;
}
m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
}
-PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
+PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
+ RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
// Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
// When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
return node;
}
-PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& es)
+PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
{
+ ASSERT(isMainThread());
if (!mediaElement) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
InvalidStateError,
- ExceptionMessages::failedToConstruct(
- "MediaElementAudioSourceNode",
- "invalid HTMLMedialElement."));
- return 0;
+ "invalid HTMLMedialElement.");
+ return nullptr;
}
- ASSERT(isMainThread());
- lazyInitialize();
-
// First check if this media element already has a source node.
if (mediaElement->audioSourceNode()) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
InvalidStateError,
- ExceptionMessages::failedToConstruct(
- "MediaElementAudioSourceNode",
- "invalid HTMLMediaElement."));
- return 0;
+ "invalid HTMLMediaElement.");
+ return nullptr;
}
- RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
+ RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
mediaElement->setAudioSourceNode(node.get());
return node;
}
-PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& es)
+PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
{
+ ASSERT(isMainThread());
if (!mediaStream) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
InvalidStateError,
- ExceptionMessages::failedToConstruct(
- "MediaStreamAudioSourceNode",
- "invalid MediaStream source"));
- return 0;
+ "invalid MediaStream source");
+ return nullptr;
}
- ASSERT(isMainThread());
- lazyInitialize();
-
- AudioSourceProvider* provider = 0;
-
MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
-
- // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
- for (size_t i = 0; i < audioTracks.size(); ++i) {
- RefPtr<MediaStreamTrack> localAudio = audioTracks[i];
- if (localAudio->component()->audioSourceProvider()) {
- provider = localAudio->component()->audioSourceProvider();
- break;
- }
+ if (audioTracks.isEmpty()) {
+ exceptionState.throwDOMException(
+ InvalidStateError,
+ "MediaStream has no audio track");
+ return nullptr;
}
- RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);
+ // Use the first audio track in the media stream.
+ MediaStreamTrack* audioTrack = audioTracks[0];
+ OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
+ RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack, provider.release());
// FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
node->setFormat(2, sampleRate());
return node;
}
-PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
+PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
{
- // FIXME: Add support for an optional argument which specifies the number of channels.
- // FIXME: The default should probably be stereo instead of mono.
- return MediaStreamAudioDestinationNode::create(this, 1);
+ // Set number of output channels to stereo by default.
+ return MediaStreamAudioDestinationNode::create(this, 2);
}
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& es)
+PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
{
// Set number of input/output channels to stereo by default.
- return createScriptProcessor(0, 2, 2, es);
+ return createScriptProcessor(0, 2, 2, exceptionState);
}
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& es)
+PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
{
// Set number of input/output channels to stereo by default.
- return createScriptProcessor(bufferSize, 2, 2, es);
+ return createScriptProcessor(bufferSize, 2, 2, exceptionState);
}
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& es)
+PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
{
// Set number of output channels to stereo by default.
- return createScriptProcessor(bufferSize, numberOfInputChannels, 2, es);
+ return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState);
}
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& es)
+PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
+ RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
if (!node.get()) {
if (!numberOfInputChannels && !numberOfOutputChannels) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToConstruct(
- "ScriptProcessorNode",
- "number of input channels and output channels cannot both be zero."));
+ "number of input channels and output channels cannot both be zero.");
} else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToConstruct(
- "ScriptProcessorNode",
- "number of input channels (" + String::number(numberOfInputChannels)
- + ") exceeds maximum ("
- + String::number(AudioContext::maxNumberOfChannels()) + ")."));
+ "number of input channels (" + String::number(numberOfInputChannels)
+ + ") exceeds maximum ("
+ + String::number(AudioContext::maxNumberOfChannels()) + ").");
} else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToConstruct(
- "ScriptProcessorNode",
- "number of output channels (" + String::number(numberOfInputChannels)
- + ") exceeds maximum ("
- + String::number(AudioContext::maxNumberOfChannels()) + ")."));
+ "number of output channels (" + String::number(numberOfInputChannels)
+ + ") exceeds maximum ("
+ + String::number(AudioContext::maxNumberOfChannels()) + ").");
} else {
- es.throwDOMException(
+ exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToConstruct(
- "ScriptProcessorNode",
- "buffer size (" + String::number(bufferSize)
- + ") must be a power of two between 256 and 16384."));
+ "buffer size (" + String::number(bufferSize)
+ + ") must be a power of two between 256 and 16384.");
}
- return 0;
+ return nullptr;
}
refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
return node;
}
-PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
+PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
{
ASSERT(isMainThread());
- lazyInitialize();
return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
+PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper()
{
ASSERT(isMainThread());
- lazyInitialize();
return WaveShaperNode::create(this);
}
-PassRefPtr<PannerNode> AudioContext::createPanner()
+PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner()
{
ASSERT(isMainThread());
- lazyInitialize();
return PannerNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<ConvolverNode> AudioContext::createConvolver()
+PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver()
{
ASSERT(isMainThread());
- lazyInitialize();
return ConvolverNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
+PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
{
ASSERT(isMainThread());
- lazyInitialize();
return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
+PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser()
{
ASSERT(isMainThread());
- lazyInitialize();
return AnalyserNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<GainNode> AudioContext::createGain()
+PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain()
{
ASSERT(isMainThread());
- lazyInitialize();
return GainNode::create(this, m_destinationNode->sampleRate());
}
-PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionState& es)
+PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
{
const double defaultMaxDelayTime = 1;
- return createDelay(defaultMaxDelayTime, es);
+ return createDelay(defaultMaxDelayTime, exceptionState);
}
-PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& es)
+PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, es);
- if (es.hadException())
- return 0;
+ RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
+ if (exceptionState.hadException())
+ return nullptr;
return node;
}
-PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& es)
+PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
{
const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
- return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, es);
+ return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState);
}
-PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& es)
+PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
+ RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
if (!node.get()) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToConstruct(
- "ChannelSplitterNode",
- "number of outputs (" + String::number(numberOfOutputs)
- + ") must be between 1 and "
- + String::number(AudioContext::maxNumberOfChannels()) + "."));
- return 0;
+ "number of outputs (" + String::number(numberOfOutputs)
+ + ") must be between 1 and "
+ + String::number(AudioContext::maxNumberOfChannels()) + ".");
+ return nullptr;
}
return node;
}
-PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& es)
+PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
{
const unsigned ChannelMergerDefaultNumberOfInputs = 6;
- return createChannelMerger(ChannelMergerDefaultNumberOfInputs, es);
+ return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState);
}
-PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& es)
+PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
+ RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
if (!node.get()) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToConstruct(
- "ChannelMergerNode",
- "number of inputs (" + String::number(numberOfInputs)
- + ") must be between 1 and "
- + String::number(AudioContext::maxNumberOfChannels()) + "."));
- return 0;
+ "number of inputs (" + String::number(numberOfInputs)
+ + ") must be between 1 and "
+ + String::number(AudioContext::maxNumberOfChannels()) + ".");
+ return nullptr;
}
return node;
}
-PassRefPtr<OscillatorNode> AudioContext::createOscillator()
+PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator()
{
ASSERT(isMainThread());
- lazyInitialize();
- RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
+ RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
// Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
// When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
return node;
}
-PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& es)
+PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
{
ASSERT(isMainThread());
if (!real) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
SyntaxError,
- ExceptionMessages::failedToConstruct(
- "PeriodicWave",
- "invalid real array"));
- return 0;
+ "invalid real array");
+ return nullptr;
}
if (!imag) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
SyntaxError,
- ExceptionMessages::failedToConstruct(
- "PeriodicWave",
- "invalid imaginary array"));
- return 0;
+ "invalid imaginary array");
+ return nullptr;
}
if (real->length() != imag->length()) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToConstruct(
- "PeriodicWave",
- "length of real array (" + String::number(real->length())
- + ") and length of imaginary array (" + String::number(imag->length())
- + ") must match."));
- return 0;
+ "length of real array (" + String::number(real->length())
+ + ") and length of imaginary array (" + String::number(imag->length())
+ + ") must match.");
+ return nullptr;
}
if (real->length() > 4096) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToConstruct(
- "PeriodicWave",
- "length of real array (" + String::number(real->length())
- + ") exceeds allowed maximum of 4096"));
- return 0;
+ "length of real array (" + String::number(real->length())
+ + ") exceeds allowed maximum of 4096");
+ return nullptr;
}
if (imag->length() > 4096) {
- es.throwDOMException(
+ exceptionState.throwDOMException(
IndexSizeError,
- ExceptionMessages::failedToConstruct(
- "PeriodicWave",
- "length of imaginary array (" + String::number(imag->length())
- + ") exceeds allowed maximum of 4096"));
- return 0;
+ "length of imaginary array (" + String::number(imag->length())
+ + ") exceeds allowed maximum of 4096");
+ return nullptr;
}
- lazyInitialize();
return PeriodicWave::create(sampleRate(), real, imag);
}
void AudioContext::derefFinishedSourceNodes()
{
ASSERT(isGraphOwner());
- ASSERT(isAudioThread() || isAudioThreadFinished());
+ ASSERT(isAudioThread());
for (unsigned i = 0; i < m_finishedNodes.size(); i++)
derefNode(m_finishedNodes[i]);
ASSERT(isMainThread());
AutoLocker locker(this);
- node->ref(AudioNode::RefTypeConnection);
m_referencedNodes.append(node);
+ node->makeConnection();
}
void AudioContext::derefNode(AudioNode* node)
{
ASSERT(isGraphOwner());
- node->deref(AudioNode::RefTypeConnection);
-
for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
- if (node == m_referencedNodes[i]) {
+ if (node == m_referencedNodes[i].get()) {
+ node->breakConnection();
m_referencedNodes.remove(i);
break;
}
void AudioContext::derefUnfinishedSourceNodes()
{
- ASSERT(isMainThread() && isAudioThreadFinished());
+ ASSERT(isMainThread());
for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
- m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
+ m_referencedNodes[i]->breakConnection();
m_referencedNodes.clear();
}
bool isAudioThread = thisThread == audioThread();
// Try to catch cases of using try lock on main thread - it should use regular lock.
- ASSERT(isAudioThread || isAudioThreadFinished());
+ ASSERT(isAudioThread);
if (!isAudioThread) {
// In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
return currentThread() == m_graphOwnerThread;
}
+void AudioContext::addDeferredBreakConnection(AudioNode& node)
+{
+ ASSERT(isAudioThread());
+ m_deferredBreakConnectionList.append(&node);
+}
+
+#if !ENABLE(OILPAN)
void AudioContext::addDeferredFinishDeref(AudioNode* node)
{
ASSERT(isAudioThread());
m_deferredFinishDerefList.append(node);
}
+#endif
void AudioContext::handlePreRenderTasks()
{
// from the render graph (in which case they'll render silence).
bool mustReleaseLock;
if (tryLock(mustReleaseLock)) {
- // Take care of finishing any derefs where the tryLock() failed previously.
- handleDeferredFinishDerefs();
+ // Take care of AudioNode tasks where the tryLock() failed previously.
+ handleDeferredAudioNodeTasks();
// Dynamically clean up nodes which are no longer needed.
derefFinishedSourceNodes();
+#if !ENABLE(OILPAN)
// Don't delete in the real-time thread. Let the main thread do it.
// Ref-counted objects held by certain AudioNodes may not be thread-safe.
scheduleNodeDeletion();
+#endif
// Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
handleDirtyAudioSummingJunctions();
}
}
-void AudioContext::handleDeferredFinishDerefs()
+void AudioContext::handleDeferredAudioNodeTasks()
{
ASSERT(isAudioThread() && isGraphOwner());
- for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
- AudioNode* node = m_deferredFinishDerefList[i];
- node->finishDeref(AudioNode::RefTypeConnection);
- }
+ for (unsigned i = 0; i < m_deferredBreakConnectionList.size(); ++i)
+ m_deferredBreakConnectionList[i]->breakConnectionWithLock();
+ m_deferredBreakConnectionList.clear();
+
+#if !ENABLE(OILPAN)
+ for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i)
+ m_deferredFinishDerefList[i]->finishDeref();
m_deferredFinishDerefList.clear();
+#endif
+}
+
+#if ENABLE(OILPAN)
+void AudioContext::registerLiveNode(AudioNode& node)
+{
+ ASSERT(isMainThread());
+ m_liveNodes.add(&node, adoptPtr(new AudioNodeDisposer(node)));
+}
+
+AudioContext::AudioNodeDisposer::~AudioNodeDisposer()
+{
+ ASSERT(isMainThread());
+ AudioContext::AutoLocker locker(m_node.context());
+ m_node.dispose();
+}
+
+void AudioContext::registerLiveAudioSummingJunction(AudioSummingJunction& junction)
+{
+ ASSERT(isMainThread());
+ m_liveAudioSummingJunctions.add(&junction, adoptPtr(new AudioSummingJunctionDisposer(junction)));
}
+AudioContext::AudioSummingJunctionDisposer::~AudioSummingJunctionDisposer()
+{
+ ASSERT(isMainThread());
+ m_junction.context()->removeMarkedSummingJunction(&m_junction);
+}
+#else
+
void AudioContext::markForDeletion(AudioNode* node)
{
ASSERT(isGraphOwner());
- if (isAudioThreadFinished())
+ if (!isInitialized())
m_nodesToDelete.append(node);
else
m_nodesMarkedForDeletion.append(node);
-
- // This is probably the best time for us to remove the node from automatic pull list,
- // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
- // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
- // modify m_renderingAutomaticPullNodes.
- removeAutomaticPullNode(node);
}
void AudioContext::scheduleNodeDeletion()
{
- bool isGood = m_isInitialized && isGraphOwner();
+ bool isGood = isInitialized() && isGraphOwner();
ASSERT(isGood);
if (!isGood)
return;
// Make sure to call deleteMarkedNodes() on main thread.
if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
- m_nodesToDelete.append(m_nodesMarkedForDeletion);
+ m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
m_nodesMarkedForDeletion.clear();
m_isDeletionScheduled = true;
ASSERT(isMainThread());
// Protect this object from being deleted before we release the mutex locked by AutoLocker.
- RefPtr<AudioContext> protect(this);
+ RefPtrWillBeRawPtr<AudioContext> protect(this);
{
AutoLocker locker(this);
AudioNode* node = m_nodesToDelete[n - 1];
m_nodesToDelete.removeLast();
- // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
- unsigned numberOfInputs = node->numberOfInputs();
- for (unsigned i = 0; i < numberOfInputs; ++i)
- m_dirtySummingJunctions.remove(node->input(i));
-
- // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
- unsigned numberOfOutputs = node->numberOfOutputs();
- for (unsigned i = 0; i < numberOfOutputs; ++i)
- m_dirtyAudioNodeOutputs.remove(node->output(i));
+ node->dispose();
// Finally, delete it.
delete node;
m_isDeletionScheduled = false;
}
}
+#endif
+
+void AudioContext::unmarkDirtyNode(AudioNode& node)
+{
+ ASSERT(isGraphOwner());
+#if !ENABLE(OILPAN)
+ // Before deleting the node, clear out any AudioNodeInputs from
+ // m_dirtySummingJunctions.
+ unsigned numberOfInputs = node.numberOfInputs();
+ for (unsigned i = 0; i < numberOfInputs; ++i)
+ m_dirtySummingJunctions.remove(node.input(i));
+#endif
+
+ // Before deleting the node, clear out any AudioNodeOutputs from
+ // m_dirtyAudioNodeOutputs.
+ unsigned numberOfOutputs = node.numberOfOutputs();
+ for (unsigned i = 0; i < numberOfOutputs; ++i)
+ m_dirtyAudioNodeOutputs.remove(node.output(i));
+}
void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
{
void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
{
ASSERT(isGraphOwner());
+ ASSERT(isMainThread());
m_dirtyAudioNodeOutputs.add(output);
}
{
ASSERT(isGraphOwner());
- for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
+ for (HashSet<AudioSummingJunction* >::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
(*i)->updateRenderingState();
m_dirtySummingJunctions.clear();
}
}
-void AudioContext::incrementActiveSourceCount()
+void AudioContext::trace(Visitor* visitor)
{
- atomicIncrement(&m_activeSourceCount);
-}
-
-void AudioContext::decrementActiveSourceCount()
-{
- atomicDecrement(&m_activeSourceCount);
+ visitor->trace(m_renderTarget);
+ visitor->trace(m_destinationNode);
+ visitor->trace(m_listener);
+#if ENABLE(OILPAN)
+ visitor->trace(m_referencedNodes);
+ visitor->trace(m_liveNodes);
+ visitor->trace(m_liveAudioSummingJunctions);
+#endif
+ EventTargetWithInlineData::trace(visitor);
}
-} // namespace WebCore
+} // namespace blink
#endif // ENABLE(WEB_AUDIO)