#include "modules/webaudio/ScriptProcessorNode.h"
-#include "core/dom/Document.h"
+#include "core/dom/CrossThreadTask.h"
+#include "core/dom/ExecutionContext.h"
#include "modules/webaudio/AudioBuffer.h"
#include "modules/webaudio/AudioContext.h"
#include "modules/webaudio/AudioNodeInput.h"
#include "modules/webaudio/AudioProcessingEvent.h"
#include "public/platform/Platform.h"
#include "wtf/Float32Array.h"
-#include "wtf/MainThread.h"
-namespace WebCore {
+namespace blink {
static size_t chooseBufferSize()
{
// Choose a buffer size based on the audio hardware buffer size. Arbitarily make it a power of
// two that is 4 times greater than the hardware buffer size.
// FIXME: What is the best way to choose this?
- size_t hardwareBufferSize = blink::Platform::current()->audioHardwareBufferSize();
+ size_t hardwareBufferSize = Platform::current()->audioHardwareBufferSize();
size_t bufferSize = 1 << static_cast<unsigned>(log2(4 * hardwareBufferSize) + 0.5);
if (bufferSize < 256)
return bufferSize;
}
-PassRefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+ScriptProcessorNode* ScriptProcessorNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
{
// Check for valid buffer size.
switch (bufferSize) {
case 16384:
break;
default:
- return nullptr;
+ return 0;
}
if (!numberOfInputChannels && !numberOfOutputChannels)
- return nullptr;
+ return 0;
if (numberOfInputChannels > AudioContext::maxNumberOfChannels())
- return nullptr;
+ return 0;
if (numberOfOutputChannels > AudioContext::maxNumberOfChannels())
- return nullptr;
+ return 0;
- return adoptRef(new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
+ return adoptRefCountedGarbageCollectedWillBeNoop(new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
}
ScriptProcessorNode::ScriptProcessorNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
, m_doubleBufferIndexForEvent(0)
, m_bufferSize(bufferSize)
, m_bufferReadWriteIndex(0)
- , m_isRequestOutstanding(false)
, m_numberOfInputChannels(numberOfInputChannels)
, m_numberOfOutputChannels(numberOfOutputChannels)
, m_internalInputBus(AudioBus::create(numberOfInputChannels, AudioNode::ProcessingSizeInFrames, false))
{
- ScriptWrappable::init(this);
// Regardless of the allowed buffer sizes, we still need to process at the granularity of the AudioNode.
if (m_bufferSize < AudioNode::ProcessingSizeInFrames)
m_bufferSize = AudioNode::ProcessingSizeInFrames;
ASSERT(numberOfInputChannels <= AudioContext::maxNumberOfChannels());
- addInput(adoptPtr(new AudioNodeInput(this)));
- addOutput(adoptPtr(new AudioNodeOutput(this, numberOfOutputChannels)));
+ addInput();
+ addOutput(AudioNodeOutput::create(this, numberOfOutputChannels));
setNodeType(NodeTypeJavaScript);
ScriptProcessorNode::~ScriptProcessorNode()
{
+ ASSERT(!isInitialized());
+}
+
+void ScriptProcessorNode::dispose()
+{
uninitialize();
+ AudioNode::dispose();
}
void ScriptProcessorNode::initialize()
// Create double buffers on both the input and output sides.
// These AudioBuffers will be directly accessed in the main thread by JavaScript.
for (unsigned i = 0; i < 2; ++i) {
- RefPtr<AudioBuffer> inputBuffer = m_numberOfInputChannels ? AudioBuffer::create(m_numberOfInputChannels, bufferSize(), sampleRate) : nullptr;
- RefPtr<AudioBuffer> outputBuffer = m_numberOfOutputChannels ? AudioBuffer::create(m_numberOfOutputChannels, bufferSize(), sampleRate) : nullptr;
+ AudioBuffer* inputBuffer = m_numberOfInputChannels ? AudioBuffer::create(m_numberOfInputChannels, bufferSize(), sampleRate) : 0;
+ AudioBuffer* outputBuffer = m_numberOfOutputChannels ? AudioBuffer::create(m_numberOfOutputChannels, bufferSize(), sampleRate) : 0;
m_inputBuffers.append(inputBuffer);
m_outputBuffers.append(outputBuffer);
if (!m_bufferReadWriteIndex) {
// Avoid building up requests on the main thread to fire process events when they're not being handled.
// This could be a problem if the main thread is very busy doing other things and is being held up handling previous requests.
- if (m_isRequestOutstanding) {
+ // The audio thread can't block on this lock, so we call tryLock() instead.
+ MutexTryLocker tryLocker(m_processEventLock);
+ if (!tryLocker.locked()) {
// We're late in handling the previous request. The main thread must be very busy.
// The best we can do is clear out the buffer ourself here.
outputBuffer->zero();
- } else {
- // Reference ourself so we don't accidentally get deleted before fireProcessEvent() gets called.
- ref();
-
+ } else if (context()->executionContext()) {
// Fire the event on the main thread, not this one (which is the realtime audio thread).
m_doubleBufferIndexForEvent = m_doubleBufferIndex;
- m_isRequestOutstanding = true;
- callOnMainThread(fireProcessEventDispatch, this);
+ context()->executionContext()->postTask(createCrossThreadTask(&ScriptProcessorNode::fireProcessEvent, this));
}
swapBuffers();
}
}
-void ScriptProcessorNode::fireProcessEventDispatch(void* userData)
-{
- ScriptProcessorNode* jsAudioNode = static_cast<ScriptProcessorNode*>(userData);
- ASSERT(jsAudioNode);
- if (!jsAudioNode)
- return;
-
- jsAudioNode->fireProcessEvent();
-
- // De-reference to match the ref() call in process().
- jsAudioNode->deref();
-}
-
void ScriptProcessorNode::fireProcessEvent()
{
- ASSERT(isMainThread() && m_isRequestOutstanding);
+ ASSERT(isMainThread());
bool isIndexGood = m_doubleBufferIndexForEvent < 2;
ASSERT(isIndexGood);
// Avoid firing the event if the document has already gone away.
if (context()->executionContext()) {
- // Let the audio thread know we've gotten to the point where it's OK for it to make another request.
- m_isRequestOutstanding = false;
+ // This synchronizes with process().
+ MutexLocker processLocker(m_processEventLock);
+
+ // Calculate a playbackTime with the buffersize which needs to be processed each time onaudioprocess is called.
+ // The outputBuffer being passed to JS will be played after exhuasting previous outputBuffer by double-buffering.
+ double playbackTime = (context()->currentSampleFrame() + m_bufferSize) / static_cast<double>(context()->sampleRate());
// Call the JavaScript event handler which will do the audio processing.
- dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer));
+ dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer, playbackTime));
}
}
return std::numeric_limits<double>::infinity();
}
-} // namespace WebCore
+void ScriptProcessorNode::trace(Visitor* visitor)
+{
+ visitor->trace(m_inputBuffers);
+ visitor->trace(m_outputBuffers);
+ AudioNode::trace(visitor);
+}
+
+} // namespace blink
#endif // ENABLE(WEB_AUDIO)