namespace WebCore {
-PassRefPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext* context)
+DEFINE_GC_INFO(SpeechSynthesis);
+
+PassRefPtrWillBeRawPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext* context)
{
- return adoptRef(new SpeechSynthesis(context));
+ return adoptRefCountedWillBeRefCountedGarbageCollected(new SpeechSynthesis(context));
}
SpeechSynthesis::SpeechSynthesis(ExecutionContext* context)
: ContextLifecycleObserver(context)
, m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this))
- , m_currentSpeechUtterance(0)
, m_isPaused(false)
{
ScriptWrappable::init(this);
dispatchEvent(Event::create(EventTypeNames::voiceschanged));
}
-const Vector<RefPtr<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices()
+const WillBeHeapVector<RefPtrWillBeMember<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices()
{
if (m_voiceList.size())
return m_voiceList;
{
// If we have a current speech utterance, then that means we're assumed to be in a speaking state.
// This state is independent of whether the utterance happens to be paused.
- return m_currentSpeechUtterance;
+ return currentSpeechUtterance();
}
bool SpeechSynthesis::pending() const
return m_isPaused;
}
-void SpeechSynthesis::startSpeakingImmediately(SpeechSynthesisUtterance* utterance)
+void SpeechSynthesis::startSpeakingImmediately()
{
- ASSERT(!m_currentSpeechUtterance);
+ SpeechSynthesisUtterance* utterance = currentSpeechUtterance();
+ ASSERT(utterance);
+
utterance->setStartTime(monotonicallyIncreasingTime());
- m_currentSpeechUtterance = utterance;
m_isPaused = false;
m_platformSpeechSynthesizer->speak(utterance->platformUtterance());
}
m_utteranceQueue.append(utterance);
- // If the queue was empty, speak this immediately and add it to the queue.
+ // If the queue was empty, speak this immediately.
if (m_utteranceQueue.size() == 1)
- startSpeakingImmediately(utterance);
+ startSpeakingImmediately();
}
void SpeechSynthesis::cancel()
{
- // Remove all the items from the utterance queue.
- // Hold on to the current utterance so the platform synthesizer can have a chance to clean up.
- RefPtr<SpeechSynthesisUtterance> current = m_currentSpeechUtterance;
+ // Remove all the items from the utterance queue. The platform
+ // may still have references to some of these utterances and may
+ // fire events on them asynchronously.
m_utteranceQueue.clear();
m_platformSpeechSynthesizer->cancel();
- current = 0;
-
- // The platform should have called back immediately and cleared the current utterance.
- ASSERT(!m_currentSpeechUtterance);
}
void SpeechSynthesis::pause()
void SpeechSynthesis::resume()
{
- if (!m_currentSpeechUtterance)
+ if (!currentSpeechUtterance())
return;
m_platformSpeechSynthesizer->resume();
}
void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utterance, bool errorOccurred)
{
ASSERT(utterance);
- ASSERT(m_currentSpeechUtterance);
- m_currentSpeechUtterance = 0;
- fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utterance, 0, String());
+ bool didJustFinishCurrentUtterance = false;
+ // If the utterance that completed was the one we're currently speaking,
+ // remove it from the queue and start speaking the next one.
+ if (utterance == currentSpeechUtterance()) {
+ m_utteranceQueue.removeFirst();
+ didJustFinishCurrentUtterance = true;
+ }
- if (m_utteranceQueue.size()) {
- RefPtr<SpeechSynthesisUtterance> firstUtterance = m_utteranceQueue.first();
- ASSERT(firstUtterance == utterance);
- if (firstUtterance == utterance)
- m_utteranceQueue.removeFirst();
+ // Always fire the event, because the platform may have asynchronously
+ // sent an event on an utterance before it got the message that we
+ // canceled it, and we should always report to the user what actually
+ // happened.
+ fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utterance, 0, String());
- // Start the next job if there is one pending.
- if (!m_utteranceQueue.isEmpty())
- startSpeakingImmediately(m_utteranceQueue.first().get());
- }
+ // Start the next utterance if we just finished one and one was pending.
+ if (didJustFinishCurrentUtterance && !m_utteranceQueue.isEmpty())
+ startSpeakingImmediately();
}
void SpeechSynthesis::boundaryEventOccurred(PassRefPtr<PlatformSpeechSynthesisUtterance> utterance, SpeechBoundary boundary, unsigned charIndex)
handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance->client()), true);
}
+SpeechSynthesisUtterance* SpeechSynthesis::currentSpeechUtterance() const
+{
+ if (!m_utteranceQueue.isEmpty())
+ return m_utteranceQueue.first().get();
+ return 0;
+}
+
const AtomicString& SpeechSynthesis::interfaceName() const
{
return EventTargetNames::SpeechSynthesisUtterance;
}
+void SpeechSynthesis::trace(Visitor* visitor)
+{
+ visitor->trace(m_voiceList);
+ visitor->trace(m_utteranceQueue);
+}
+
} // namespace WebCore