| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Apple Inc. All rights reserved. | 2 * Copyright (C) 2013 Apple Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 146 void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtteran
ce* utterance, unsigned long charIndex, const String& name) | 146 void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtteran
ce* utterance, unsigned long charIndex, const String& name) |
| 147 { | 147 { |
| 148 if (executionContext() && !executionContext()->activeDOMObjectsAreStopped()) | 148 if (executionContext() && !executionContext()->activeDOMObjectsAreStopped()) |
| 149 utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (
currentTime() - utterance->startTime()), name)); | 149 utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (
currentTime() - utterance->startTime()), name)); |
| 150 } | 150 } |
| 151 | 151 |
| 152 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) | 152 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) |
| 153 { | 153 { |
| 154 ASSERT(utterance); | 154 ASSERT(utterance); |
| 155 | 155 |
| 156 bool didJustFinishCurrentUtterance = false; | 156 bool shouldStartSpeaking = false; |
| 157 // If the utterance that completed was the one we're currently speaking, | 157 // If the utterance that completed was the one we're currently speaking, |
| 158 // remove it from the queue and start speaking the next one. | 158 // remove it from the queue and start speaking the next one. |
| 159 if (utterance == currentSpeechUtterance()) { | 159 if (utterance == currentSpeechUtterance()) { |
| 160 m_utteranceQueue.removeFirst(); | 160 m_utteranceQueue.removeFirst(); |
| 161 didJustFinishCurrentUtterance = true; | 161 shouldStartSpeaking = !!m_utteranceQueue.size(); |
| 162 } | 162 } |
| 163 | 163 |
| 164 // Always fire the event, because the platform may have asynchronously | 164 // Always fire the event, because the platform may have asynchronously |
| 165 // sent an event on an utterance before it got the message that we | 165 // sent an event on an utterance before it got the message that we |
| 166 // canceled it, and we should always report to the user what actually | 166 // canceled it, and we should always report to the user what actually |
| 167 // happened. | 167 // happened. |
| 168 fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utter
ance, 0, String()); | 168 fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utter
ance, 0, String()); |
| 169 | 169 |
| 170 // Start the next utterance if we just finished one and one was pending. | 170 // Start the next utterance if we just finished one and one was pending. |
| 171 if (didJustFinishCurrentUtterance && !m_utteranceQueue.isEmpty() && !utteran
ce->startTime()) | 171 if (shouldStartSpeaking && !m_utteranceQueue.isEmpty()) |
| 172 startSpeakingImmediately(); | 172 startSpeakingImmediately(); |
| 173 } | 173 } |
| 174 | 174 |
| 175 void SpeechSynthesis::boundaryEventOccurred(PlatformSpeechSynthesisUtterance* ut
terance, SpeechBoundary boundary, unsigned charIndex) | 175 void SpeechSynthesis::boundaryEventOccurred(PlatformSpeechSynthesisUtterance* ut
terance, SpeechBoundary boundary, unsigned charIndex) |
| 176 { | 176 { |
| 177 DEFINE_STATIC_LOCAL(const String, wordBoundaryString, ("word")); | 177 DEFINE_STATIC_LOCAL(const String, wordBoundaryString, ("word")); |
| 178 DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, ("sentence")); | 178 DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, ("sentence")); |
| 179 | 179 |
| 180 switch (boundary) { | 180 switch (boundary) { |
| 181 case SpeechWordBoundary: | 181 case SpeechWordBoundary: |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 236 void SpeechSynthesis::trace(Visitor* visitor) | 236 void SpeechSynthesis::trace(Visitor* visitor) |
| 237 { | 237 { |
| 238 visitor->trace(m_platformSpeechSynthesizer); | 238 visitor->trace(m_platformSpeechSynthesizer); |
| 239 visitor->trace(m_voiceList); | 239 visitor->trace(m_voiceList); |
| 240 visitor->trace(m_utteranceQueue); | 240 visitor->trace(m_utteranceQueue); |
| 241 PlatformSpeechSynthesizerClient::trace(visitor); | 241 PlatformSpeechSynthesizerClient::trace(visitor); |
| 242 EventTargetWithInlineData::trace(visitor); | 242 EventTargetWithInlineData::trace(visitor); |
| 243 } | 243 } |
| 244 | 244 |
| 245 } // namespace blink | 245 } // namespace blink |
| OLD | NEW |