Upstream version 5.34.104.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / modules / speech / SpeechSynthesis.cpp
index 2a989a9..79cf909 100644 (file)
 
 namespace WebCore {
 
-PassRefPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext* context)
+DEFINE_GC_INFO(SpeechSynthesis);
+
+PassRefPtrWillBeRawPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext* context)
 {
-    return adoptRef(new SpeechSynthesis(context));
+    return adoptRefCountedWillBeRefCountedGarbageCollected(new SpeechSynthesis(context));
 }
 
 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context)
     : ContextLifecycleObserver(context)
     , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this))
-    , m_currentSpeechUtterance(0)
     , m_isPaused(false)
 {
     ScriptWrappable::init(this);
@@ -65,7 +66,7 @@ void SpeechSynthesis::voicesDidChange()
         dispatchEvent(Event::create(EventTypeNames::voiceschanged));
 }
 
-const Vector<RefPtr<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices()
+const WillBeHeapVector<RefPtrWillBeMember<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices()
 {
     if (m_voiceList.size())
         return m_voiceList;
@@ -83,7 +84,7 @@ bool SpeechSynthesis::speaking() const
 {
     // If we have a current speech utterance, then that means we're assumed to be in a speaking state.
     // This state is independent of whether the utterance happens to be paused.
-    return m_currentSpeechUtterance;
+    return currentSpeechUtterance();
 }
 
 bool SpeechSynthesis::pending() const
@@ -98,11 +99,12 @@ bool SpeechSynthesis::paused() const
     return m_isPaused;
 }
 
-void SpeechSynthesis::startSpeakingImmediately(SpeechSynthesisUtterance* utterance)
+void SpeechSynthesis::startSpeakingImmediately()
 {
-    ASSERT(!m_currentSpeechUtterance);
+    SpeechSynthesisUtterance* utterance = currentSpeechUtterance();
+    ASSERT(utterance);
+
     utterance->setStartTime(monotonicallyIncreasingTime());
-    m_currentSpeechUtterance = utterance;
     m_isPaused = false;
     m_platformSpeechSynthesizer->speak(utterance->platformUtterance());
 }
@@ -116,22 +118,18 @@ void SpeechSynthesis::speak(SpeechSynthesisUtterance* utterance, ExceptionState&
 
     m_utteranceQueue.append(utterance);
 
-    // If the queue was empty, speak this immediately and add it to the queue.
+    // If the queue was empty, speak this immediately.
     if (m_utteranceQueue.size() == 1)
-        startSpeakingImmediately(utterance);
+        startSpeakingImmediately();
 }
 
 void SpeechSynthesis::cancel()
 {
-    // Remove all the items from the utterance queue.
-    // Hold on to the current utterance so the platform synthesizer can have a chance to clean up.
-    RefPtr<SpeechSynthesisUtterance> current = m_currentSpeechUtterance;
+    // Remove all the items from the utterance queue. The platform
+    // may still have references to some of these utterances and may
+    // fire events on them asynchronously.
     m_utteranceQueue.clear();
     m_platformSpeechSynthesizer->cancel();
-    current = 0;
-
-    // The platform should have called back immediately and cleared the current utterance.
-    ASSERT(!m_currentSpeechUtterance);
 }
 
 void SpeechSynthesis::pause()
@@ -142,7 +140,7 @@ void SpeechSynthesis::pause()
 
 void SpeechSynthesis::resume()
 {
-    if (!m_currentSpeechUtterance)
+    if (!currentSpeechUtterance())
         return;
     m_platformSpeechSynthesizer->resume();
 }
@@ -156,21 +154,24 @@ void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtteran
 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utterance, bool errorOccurred)
 {
     ASSERT(utterance);
-    ASSERT(m_currentSpeechUtterance);
-    m_currentSpeechUtterance = 0;
 
-    fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utterance, 0, String());
+    bool didJustFinishCurrentUtterance = false;
+    // If the utterance that completed was the one we're currently speaking,
+    // remove it from the queue and start speaking the next one.
+    if (utterance == currentSpeechUtterance()) {
+        m_utteranceQueue.removeFirst();
+        didJustFinishCurrentUtterance = true;
+    }
 
-    if (m_utteranceQueue.size()) {
-        RefPtr<SpeechSynthesisUtterance> firstUtterance = m_utteranceQueue.first();
-        ASSERT(firstUtterance == utterance);
-        if (firstUtterance == utterance)
-            m_utteranceQueue.removeFirst();
+    // Always fire the event, because the platform may have asynchronously
+    // sent an event on an utterance before it got the message that we
+    // canceled it, and we should always report to the user what actually
+    // happened.
+    fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utterance, 0, String());
 
-        // Start the next job if there is one pending.
-        if (!m_utteranceQueue.isEmpty())
-            startSpeakingImmediately(m_utteranceQueue.first().get());
-    }
+    // Start the next utterance if we just finished one and one was pending.
+    if (didJustFinishCurrentUtterance && !m_utteranceQueue.isEmpty())
+        startSpeakingImmediately();
 }
 
 void SpeechSynthesis::boundaryEventOccurred(PassRefPtr<PlatformSpeechSynthesisUtterance> utterance, SpeechBoundary boundary, unsigned charIndex)
@@ -222,9 +223,22 @@ void SpeechSynthesis::speakingErrorOccurred(PassRefPtr<PlatformSpeechSynthesisUt
         handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance->client()), true);
 }
 
+SpeechSynthesisUtterance* SpeechSynthesis::currentSpeechUtterance() const
+{
+    if (!m_utteranceQueue.isEmpty())
+        return m_utteranceQueue.first().get();
+    return 0;
+}
+
 const AtomicString& SpeechSynthesis::interfaceName() const
 {
     return EventTargetNames::SpeechSynthesisUtterance;
 }
 
+void SpeechSynthesis::trace(Visitor* visitor)
+{
+    visitor->trace(m_voiceList);
+    visitor->trace(m_utteranceQueue);
+}
+
 } // namespace WebCore