Upstream version 10.39.225.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / modules / webaudio / ScriptProcessorNode.cpp
index 79c446c..7150ee8 100644 (file)
@@ -28,7 +28,8 @@
 
 #include "modules/webaudio/ScriptProcessorNode.h"
 
-#include "core/dom/Document.h"
+#include "core/dom/CrossThreadTask.h"
+#include "core/dom/ExecutionContext.h"
 #include "modules/webaudio/AudioBuffer.h"
 #include "modules/webaudio/AudioContext.h"
 #include "modules/webaudio/AudioNodeInput.h"
 #include "modules/webaudio/AudioProcessingEvent.h"
 #include "public/platform/Platform.h"
 #include "wtf/Float32Array.h"
-#include "wtf/MainThread.h"
 
-namespace WebCore {
+namespace blink {
 
 static size_t chooseBufferSize()
 {
     // Choose a buffer size based on the audio hardware buffer size. Arbitarily make it a power of
     // two that is 4 times greater than the hardware buffer size.
     // FIXME: What is the best way to choose this?
-    size_t hardwareBufferSize = blink::Platform::current()->audioHardwareBufferSize();
+    size_t hardwareBufferSize = Platform::current()->audioHardwareBufferSize();
     size_t bufferSize = 1 << static_cast<unsigned>(log2(4 * hardwareBufferSize) + 0.5);
 
     if (bufferSize < 256)
@@ -56,7 +56,7 @@ static size_t chooseBufferSize()
     return bufferSize;
 }
 
-PassRefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+ScriptProcessorNode* ScriptProcessorNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
 {
     // Check for valid buffer size.
     switch (bufferSize) {
@@ -72,19 +72,19 @@ PassRefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* contex
     case 16384:
         break;
     default:
-        return nullptr;
+        return 0;
     }
 
     if (!numberOfInputChannels && !numberOfOutputChannels)
-        return nullptr;
+        return 0;
 
     if (numberOfInputChannels > AudioContext::maxNumberOfChannels())
-        return nullptr;
+        return 0;
 
     if (numberOfOutputChannels > AudioContext::maxNumberOfChannels())
-        return nullptr;
+        return 0;
 
-    return adoptRef(new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
+    return adoptRefCountedGarbageCollectedWillBeNoop(new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
 }
 
 ScriptProcessorNode::ScriptProcessorNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
@@ -93,20 +93,18 @@ ScriptProcessorNode::ScriptProcessorNode(AudioContext* context, float sampleRate
     , m_doubleBufferIndexForEvent(0)
     , m_bufferSize(bufferSize)
     , m_bufferReadWriteIndex(0)
-    , m_isRequestOutstanding(false)
     , m_numberOfInputChannels(numberOfInputChannels)
     , m_numberOfOutputChannels(numberOfOutputChannels)
     , m_internalInputBus(AudioBus::create(numberOfInputChannels, AudioNode::ProcessingSizeInFrames, false))
 {
-    ScriptWrappable::init(this);
     // Regardless of the allowed buffer sizes, we still need to process at the granularity of the AudioNode.
     if (m_bufferSize < AudioNode::ProcessingSizeInFrames)
         m_bufferSize = AudioNode::ProcessingSizeInFrames;
 
     ASSERT(numberOfInputChannels <= AudioContext::maxNumberOfChannels());
 
-    addInput(adoptPtr(new AudioNodeInput(this)));
-    addOutput(adoptPtr(new AudioNodeOutput(this, numberOfOutputChannels)));
+    addInput();
+    addOutput(AudioNodeOutput::create(this, numberOfOutputChannels));
 
     setNodeType(NodeTypeJavaScript);
 
@@ -115,7 +113,13 @@ ScriptProcessorNode::ScriptProcessorNode(AudioContext* context, float sampleRate
 
 ScriptProcessorNode::~ScriptProcessorNode()
 {
+    ASSERT(!isInitialized());
+}
+
+void ScriptProcessorNode::dispose()
+{
     uninitialize();
+    AudioNode::dispose();
 }
 
 void ScriptProcessorNode::initialize()
@@ -128,8 +132,8 @@ void ScriptProcessorNode::initialize()
     // Create double buffers on both the input and output sides.
     // These AudioBuffers will be directly accessed in the main thread by JavaScript.
     for (unsigned i = 0; i < 2; ++i) {
-        RefPtr<AudioBuffer> inputBuffer = m_numberOfInputChannels ? AudioBuffer::create(m_numberOfInputChannels, bufferSize(), sampleRate) : nullptr;
-        RefPtr<AudioBuffer> outputBuffer = m_numberOfOutputChannels ? AudioBuffer::create(m_numberOfOutputChannels, bufferSize(), sampleRate) : nullptr;
+        AudioBuffer* inputBuffer = m_numberOfInputChannels ? AudioBuffer::create(m_numberOfInputChannels, bufferSize(), sampleRate) : 0;
+        AudioBuffer* outputBuffer = m_numberOfOutputChannels ? AudioBuffer::create(m_numberOfOutputChannels, bufferSize(), sampleRate) : 0;
 
         m_inputBuffers.append(inputBuffer);
         m_outputBuffers.append(outputBuffer);
@@ -214,40 +218,25 @@ void ScriptProcessorNode::process(size_t framesToProcess)
     if (!m_bufferReadWriteIndex) {
         // Avoid building up requests on the main thread to fire process events when they're not being handled.
         // This could be a problem if the main thread is very busy doing other things and is being held up handling previous requests.
-        if (m_isRequestOutstanding) {
+        // The audio thread can't block on this lock, so we call tryLock() instead.
+        MutexTryLocker tryLocker(m_processEventLock);
+        if (!tryLocker.locked()) {
             // We're late in handling the previous request. The main thread must be very busy.
             // The best we can do is clear out the buffer ourself here.
             outputBuffer->zero();
-        } else {
-            // Reference ourself so we don't accidentally get deleted before fireProcessEvent() gets called.
-            ref();
-
+        } else if (context()->executionContext()) {
             // Fire the event on the main thread, not this one (which is the realtime audio thread).
             m_doubleBufferIndexForEvent = m_doubleBufferIndex;
-            m_isRequestOutstanding = true;
-            callOnMainThread(fireProcessEventDispatch, this);
+            context()->executionContext()->postTask(createCrossThreadTask(&ScriptProcessorNode::fireProcessEvent, this));
         }
 
         swapBuffers();
     }
 }
 
-void ScriptProcessorNode::fireProcessEventDispatch(void* userData)
-{
-    ScriptProcessorNode* jsAudioNode = static_cast<ScriptProcessorNode*>(userData);
-    ASSERT(jsAudioNode);
-    if (!jsAudioNode)
-        return;
-
-    jsAudioNode->fireProcessEvent();
-
-    // De-reference to match the ref() call in process().
-    jsAudioNode->deref();
-}
-
 void ScriptProcessorNode::fireProcessEvent()
 {
-    ASSERT(isMainThread() && m_isRequestOutstanding);
+    ASSERT(isMainThread());
 
     bool isIndexGood = m_doubleBufferIndexForEvent < 2;
     ASSERT(isIndexGood);
@@ -262,11 +251,15 @@ void ScriptProcessorNode::fireProcessEvent()
 
     // Avoid firing the event if the document has already gone away.
     if (context()->executionContext()) {
-        // Let the audio thread know we've gotten to the point where it's OK for it to make another request.
-        m_isRequestOutstanding = false;
+        // This synchronizes with process().
+        MutexLocker processLocker(m_processEventLock);
+
+        // Calculate a playbackTime with the buffersize which needs to be processed each time onaudioprocess is called.
+        // The outputBuffer being passed to JS will be played after exhuasting previous outputBuffer by double-buffering.
+        double playbackTime = (context()->currentSampleFrame() + m_bufferSize) / static_cast<double>(context()->sampleRate());
 
         // Call the JavaScript event handler which will do the audio processing.
-        dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer));
+        dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer, playbackTime));
     }
 }
 
@@ -280,6 +273,13 @@ double ScriptProcessorNode::latencyTime() const
     return std::numeric_limits<double>::infinity();
 }
 
-} // namespace WebCore
+void ScriptProcessorNode::trace(Visitor* visitor)
+{
+    visitor->trace(m_inputBuffers);
+    visitor->trace(m_outputBuffers);
+    AudioNode::trace(visitor);
+}
+
+} // namespace blink
 
 #endif // ENABLE(WEB_AUDIO)