| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Apple Inc. All rights reserved. | 2 * Copyright (C) 2013 Apple Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 24 matching lines...) Expand all Loading... |
| 35 namespace WebCore { | 35 namespace WebCore { |
| 36 | 36 |
| 37 PassRefPtrWillBeRawPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext
* context) | 37 PassRefPtrWillBeRawPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext
* context) |
| 38 { | 38 { |
| 39 return adoptRefWillBeRefCountedGarbageCollected(new SpeechSynthesis(context)
); | 39 return adoptRefWillBeRefCountedGarbageCollected(new SpeechSynthesis(context)
); |
| 40 } | 40 } |
| 41 | 41 |
| 42 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) | 42 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) |
| 43 : ContextLifecycleObserver(context) | 43 : ContextLifecycleObserver(context) |
| 44 , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)) | 44 , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)) |
| 45 , m_currentSpeechUtterance(nullptr) | |
| 46 , m_isPaused(false) | 45 , m_isPaused(false) |
| 47 { | 46 { |
| 48 ScriptWrappable::init(this); | 47 ScriptWrappable::init(this); |
| 49 } | 48 } |
| 50 | 49 |
| 51 void SpeechSynthesis::setPlatformSynthesizer(PassOwnPtr<PlatformSpeechSynthesize
r> synthesizer) | 50 void SpeechSynthesis::setPlatformSynthesizer(PassOwnPtr<PlatformSpeechSynthesize
r> synthesizer) |
| 52 { | 51 { |
| 53 m_platformSpeechSynthesizer = synthesizer; | 52 m_platformSpeechSynthesizer = synthesizer; |
| 54 } | 53 } |
| 55 | 54 |
| (...skipping 20 matching lines...) Expand all Loading... |
| 76 for (size_t k = 0; k < voiceCount; k++) | 75 for (size_t k = 0; k < voiceCount; k++) |
| 77 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k])); | 76 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k])); |
| 78 | 77 |
| 79 return m_voiceList; | 78 return m_voiceList; |
| 80 } | 79 } |
| 81 | 80 |
| 82 bool SpeechSynthesis::speaking() const | 81 bool SpeechSynthesis::speaking() const |
| 83 { | 82 { |
| 84 // If we have a current speech utterance, then that means we're assumed to b
e in a speaking state. | 83 // If we have a current speech utterance, then that means we're assumed to b
e in a speaking state. |
| 85 // This state is independent of whether the utterance happens to be paused. | 84 // This state is independent of whether the utterance happens to be paused. |
| 86 return m_currentSpeechUtterance; | 85 return currentSpeechUtterance(); |
| 87 } | 86 } |
| 88 | 87 |
| 89 bool SpeechSynthesis::pending() const | 88 bool SpeechSynthesis::pending() const |
| 90 { | 89 { |
| 91 // This is true if there are any utterances that have not started. | 90 // This is true if there are any utterances that have not started. |
| 92 // That means there will be more than one in the queue. | 91 // That means there will be more than one in the queue. |
| 93 return m_utteranceQueue.size() > 1; | 92 return m_utteranceQueue.size() > 1; |
| 94 } | 93 } |
| 95 | 94 |
| 96 bool SpeechSynthesis::paused() const | 95 bool SpeechSynthesis::paused() const |
| 97 { | 96 { |
| 98 return m_isPaused; | 97 return m_isPaused; |
| 99 } | 98 } |
| 100 | 99 |
| 101 void SpeechSynthesis::startSpeakingImmediately(SpeechSynthesisUtterance* utteran
ce) | 100 void SpeechSynthesis::startSpeakingImmediately() |
| 102 { | 101 { |
| 103 ASSERT(!m_currentSpeechUtterance); | 102 SpeechSynthesisUtterance* utterance = currentSpeechUtterance(); |
| 103 ASSERT(utterance); |
| 104 |
| 104 utterance->setStartTime(monotonicallyIncreasingTime()); | 105 utterance->setStartTime(monotonicallyIncreasingTime()); |
| 105 m_currentSpeechUtterance = utterance; | |
| 106 m_isPaused = false; | 106 m_isPaused = false; |
| 107 m_platformSpeechSynthesizer->speak(utterance->platformUtterance()); | 107 m_platformSpeechSynthesizer->speak(utterance->platformUtterance()); |
| 108 } | 108 } |
| 109 | 109 |
| 110 void SpeechSynthesis::speak(SpeechSynthesisUtterance* utterance, ExceptionState&
exceptionState) | 110 void SpeechSynthesis::speak(SpeechSynthesisUtterance* utterance, ExceptionState&
exceptionState) |
| 111 { | 111 { |
| 112 if (!utterance) { | 112 if (!utterance) { |
| 113 exceptionState.throwTypeError("Invalid utterance argument"); | 113 exceptionState.throwTypeError("Invalid utterance argument"); |
| 114 return; | 114 return; |
| 115 } | 115 } |
| 116 | 116 |
| 117 m_utteranceQueue.append(utterance); | 117 m_utteranceQueue.append(utterance); |
| 118 | 118 |
| 119 // If the queue was empty, speak this immediately and add it to the queue. | 119 // If the queue was empty, speak this immediately. |
| 120 if (m_utteranceQueue.size() == 1) | 120 if (m_utteranceQueue.size() == 1) |
| 121 startSpeakingImmediately(utterance); | 121 startSpeakingImmediately(); |
| 122 } | 122 } |
| 123 | 123 |
| 124 void SpeechSynthesis::cancel() | 124 void SpeechSynthesis::cancel() |
| 125 { | 125 { |
| 126 // Remove all the items from the utterance queue. | 126 // Remove all the items from the utterance queue. The platform |
| 127 // Hold on to the current utterance so the platform synthesizer can have a c
hance to clean up. | 127 // may still have references to some of these utterances and may |
| 128 RefPtrWillBeMember<SpeechSynthesisUtterance> current = m_currentSpeechUttera
nce; | 128 // fire events on them asynchronously. |
| 129 m_utteranceQueue.clear(); | 129 m_utteranceQueue.clear(); |
| 130 m_platformSpeechSynthesizer->cancel(); | 130 m_platformSpeechSynthesizer->cancel(); |
| 131 current = nullptr; | |
| 132 | |
| 133 // The platform should have called back immediately and cleared the current
utterance. | |
| 134 ASSERT(!m_currentSpeechUtterance); | |
| 135 } | 131 } |
| 136 | 132 |
| 137 void SpeechSynthesis::pause() | 133 void SpeechSynthesis::pause() |
| 138 { | 134 { |
| 139 if (!m_isPaused) | 135 if (!m_isPaused) |
| 140 m_platformSpeechSynthesizer->pause(); | 136 m_platformSpeechSynthesizer->pause(); |
| 141 } | 137 } |
| 142 | 138 |
| 143 void SpeechSynthesis::resume() | 139 void SpeechSynthesis::resume() |
| 144 { | 140 { |
| 145 if (!m_currentSpeechUtterance) | 141 if (!currentSpeechUtterance()) |
| 146 return; | 142 return; |
| 147 m_platformSpeechSynthesizer->resume(); | 143 m_platformSpeechSynthesizer->resume(); |
| 148 } | 144 } |
| 149 | 145 |
| 150 void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtteran
ce* utterance, unsigned long charIndex, const String& name) | 146 void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtteran
ce* utterance, unsigned long charIndex, const String& name) |
| 151 { | 147 { |
| 152 if (!executionContext()->activeDOMObjectsAreStopped()) | 148 if (!executionContext()->activeDOMObjectsAreStopped()) |
| 153 utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (
currentTime() - utterance->startTime()), name)); | 149 utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (
currentTime() - utterance->startTime()), name)); |
| 154 } | 150 } |
| 155 | 151 |
| 156 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) | 152 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) |
| 157 { | 153 { |
| 158 ASSERT(utterance); | 154 ASSERT(utterance); |
| 159 ASSERT(m_currentSpeechUtterance); | |
| 160 m_currentSpeechUtterance = nullptr; | |
| 161 | 155 |
| 156 bool didJustFinishCurrentUtterance = false; |
| 157 // If the utterance that completed was the one we're currently speaking, |
| 158 // remove it from the queue and start speaking the next one. |
| 159 if (utterance == currentSpeechUtterance()) { |
| 160 m_utteranceQueue.removeFirst(); |
| 161 didJustFinishCurrentUtterance = true; |
| 162 } |
| 163 |
| 164 // Always fire the event, because the platform may have asynchronously |
| 165 // sent an event on an utterance before it got the message that we |
| 166 // canceled it, and we should always report to the user what actually |
| 167 // happened. |
| 162 fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utter
ance, 0, String()); | 168 fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utter
ance, 0, String()); |
| 163 | 169 |
| 164 if (m_utteranceQueue.size()) { | 170 // Start the next utterance if we just finished one and one was pending. |
| 165 RefPtrWillBeMember<SpeechSynthesisUtterance> firstUtterance = m_utteranc
eQueue.first(); | 171 if (didJustFinishCurrentUtterance && !m_utteranceQueue.isEmpty()) |
| 166 ASSERT(firstUtterance == utterance); | 172 startSpeakingImmediately(); |
| 167 if (firstUtterance == utterance) | |
| 168 m_utteranceQueue.removeFirst(); | |
| 169 | |
| 170 // Start the next job if there is one pending. | |
| 171 if (!m_utteranceQueue.isEmpty()) | |
| 172 startSpeakingImmediately(m_utteranceQueue.first().get()); | |
| 173 } | |
| 174 } | 173 } |
| 175 | 174 |
| 176 void SpeechSynthesis::boundaryEventOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance, SpeechBoundary boundary, unsigned charIndex) | 175 void SpeechSynthesis::boundaryEventOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance, SpeechBoundary boundary, unsigned charIndex) |
| 177 { | 176 { |
| 178 DEFINE_STATIC_LOCAL(const String, wordBoundaryString, ("word")); | 177 DEFINE_STATIC_LOCAL(const String, wordBoundaryString, ("word")); |
| 179 DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, ("sentence")); | 178 DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, ("sentence")); |
| 180 | 179 |
| 181 switch (boundary) { | 180 switch (boundary) { |
| 182 case SpeechWordBoundary: | 181 case SpeechWordBoundary: |
| 183 fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance
*>(utterance->client()), charIndex, wordBoundaryString); | 182 fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance
*>(utterance->client()), charIndex, wordBoundaryString); |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 215 if (utterance->client()) | 214 if (utterance->client()) |
| 216 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), false); | 215 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), false); |
| 217 } | 216 } |
| 218 | 217 |
| 219 void SpeechSynthesis::speakingErrorOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance) | 218 void SpeechSynthesis::speakingErrorOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance) |
| 220 { | 219 { |
| 221 if (utterance->client()) | 220 if (utterance->client()) |
| 222 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), true); | 221 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), true); |
| 223 } | 222 } |
| 224 | 223 |
| 224 SpeechSynthesisUtterance* SpeechSynthesis::currentSpeechUtterance() const |
| 225 { |
| 226 if (!m_utteranceQueue.isEmpty()) |
| 227 return m_utteranceQueue.first().get(); |
| 228 return 0; |
| 229 } |
| 230 |
| 225 const AtomicString& SpeechSynthesis::interfaceName() const | 231 const AtomicString& SpeechSynthesis::interfaceName() const |
| 226 { | 232 { |
| 227 return EventTargetNames::SpeechSynthesisUtterance; | 233 return EventTargetNames::SpeechSynthesisUtterance; |
| 228 } | 234 } |
| 229 | 235 |
| 230 void SpeechSynthesis::trace(Visitor* visitor) | 236 void SpeechSynthesis::trace(Visitor* visitor) |
| 231 { | 237 { |
| 232 visitor->trace(m_voiceList); | 238 visitor->trace(m_voiceList); |
| 233 visitor->trace(m_currentSpeechUtterance); | |
| 234 visitor->trace(m_utteranceQueue); | 239 visitor->trace(m_utteranceQueue); |
| 235 } | 240 } |
| 236 | 241 |
| 237 } // namespace WebCore | 242 } // namespace WebCore |
| OLD | NEW |