| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Apple Inc. All rights reserved. | 2 * Copyright (C) 2013 Apple Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 29 matching lines...) Expand all Loading... |
| 40 } | 40 } |
| 41 | 41 |
| 42 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) | 42 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) |
| 43 : ContextLifecycleObserver(context) | 43 : ContextLifecycleObserver(context) |
| 44 , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)) | 44 , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)) |
| 45 , m_isPaused(false) | 45 , m_isPaused(false) |
| 46 { | 46 { |
| 47 ScriptWrappable::init(this); | 47 ScriptWrappable::init(this); |
| 48 } | 48 } |
| 49 | 49 |
| 50 void SpeechSynthesis::setPlatformSynthesizer(PassOwnPtr<PlatformSpeechSynthesize
r> synthesizer) | 50 void SpeechSynthesis::setPlatformSynthesizer(PlatformSpeechSynthesizer* synthesi
zer) |
| 51 { | 51 { |
| 52 m_platformSpeechSynthesizer = synthesizer; | 52 m_platformSpeechSynthesizer = synthesizer; |
| 53 } | 53 } |
| 54 | 54 |
| 55 ExecutionContext* SpeechSynthesis::executionContext() const | 55 ExecutionContext* SpeechSynthesis::executionContext() const |
| 56 { | 56 { |
| 57 return ContextLifecycleObserver::executionContext(); | 57 return ContextLifecycleObserver::executionContext(); |
| 58 } | 58 } |
| 59 | 59 |
| 60 void SpeechSynthesis::voicesDidChange() | 60 void SpeechSynthesis::voicesDidChange() |
| 61 { | 61 { |
| 62 m_voiceList.clear(); | 62 m_voiceList.clear(); |
| 63 if (executionContext() && !executionContext()->activeDOMObjectsAreStopped()) | 63 if (executionContext() && !executionContext()->activeDOMObjectsAreStopped()) |
| 64 dispatchEvent(Event::create(EventTypeNames::voiceschanged)); | 64 dispatchEvent(Event::create(EventTypeNames::voiceschanged)); |
| 65 } | 65 } |
| 66 | 66 |
| 67 const HeapVector<Member<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices() | 67 const HeapVector<Member<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices() |
| 68 { | 68 { |
| 69 if (m_voiceList.size()) | 69 if (m_voiceList.size()) |
| 70 return m_voiceList; | 70 return m_voiceList; |
| 71 | 71 |
| 72 // If the voiceList is empty, that's the cue to get the voices from the plat
form again. | 72 // If the voiceList is empty, that's the cue to get the voices from the plat
form again. |
| 73 const Vector<RefPtr<PlatformSpeechSynthesisVoice> >& platformVoices = m_plat
formSpeechSynthesizer->voiceList(); | 73 const HeapVector<Member<PlatformSpeechSynthesisVoice> >& platformVoices = m_
platformSpeechSynthesizer->voiceList(); |
| 74 size_t voiceCount = platformVoices.size(); | 74 size_t voiceCount = platformVoices.size(); |
| 75 for (size_t k = 0; k < voiceCount; k++) | 75 for (size_t k = 0; k < voiceCount; k++) |
| 76 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k])); | 76 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k].get())
); |
| 77 | 77 |
| 78 return m_voiceList; | 78 return m_voiceList; |
| 79 } | 79 } |
| 80 | 80 |
| 81 bool SpeechSynthesis::speaking() const | 81 bool SpeechSynthesis::speaking() const |
| 82 { | 82 { |
| 83 // If we have a current speech utterance, then that means we're assumed to b
e in a speaking state. | 83 // If we have a current speech utterance, then that means we're assumed to b
e in a speaking state. |
| 84 // This state is independent of whether the utterance happens to be paused. | 84 // This state is independent of whether the utterance happens to be paused. |
| 85 return currentSpeechUtterance(); | 85 return currentSpeechUtterance(); |
| 86 } | 86 } |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 165 // sent an event on an utterance before it got the message that we | 165 // sent an event on an utterance before it got the message that we |
| 166 // canceled it, and we should always report to the user what actually | 166 // canceled it, and we should always report to the user what actually |
| 167 // happened. | 167 // happened. |
| 168 fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utter
ance, 0, String()); | 168 fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utter
ance, 0, String()); |
| 169 | 169 |
| 170 // Start the next utterance if we just finished one and one was pending. | 170 // Start the next utterance if we just finished one and one was pending. |
| 171 if (didJustFinishCurrentUtterance && !m_utteranceQueue.isEmpty()) | 171 if (didJustFinishCurrentUtterance && !m_utteranceQueue.isEmpty()) |
| 172 startSpeakingImmediately(); | 172 startSpeakingImmediately(); |
| 173 } | 173 } |
| 174 | 174 |
| 175 void SpeechSynthesis::boundaryEventOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance, SpeechBoundary boundary, unsigned charIndex) | 175 void SpeechSynthesis::boundaryEventOccurred(PlatformSpeechSynthesisUtterance* ut
terance, SpeechBoundary boundary, unsigned charIndex) |
| 176 { | 176 { |
| 177 DEFINE_STATIC_LOCAL(const String, wordBoundaryString, ("word")); | 177 DEFINE_STATIC_LOCAL(const String, wordBoundaryString, ("word")); |
| 178 DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, ("sentence")); | 178 DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, ("sentence")); |
| 179 | 179 |
| 180 switch (boundary) { | 180 switch (boundary) { |
| 181 case SpeechWordBoundary: | 181 case SpeechWordBoundary: |
| 182 fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance
*>(utterance->client()), charIndex, wordBoundaryString); | 182 fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance
*>(utterance->client()), charIndex, wordBoundaryString); |
| 183 break; | 183 break; |
| 184 case SpeechSentenceBoundary: | 184 case SpeechSentenceBoundary: |
| 185 fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance
*>(utterance->client()), charIndex, sentenceBoundaryString); | 185 fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance
*>(utterance->client()), charIndex, sentenceBoundaryString); |
| 186 break; | 186 break; |
| 187 default: | 187 default: |
| 188 ASSERT_NOT_REACHED(); | 188 ASSERT_NOT_REACHED(); |
| 189 } | 189 } |
| 190 } | 190 } |
| 191 | 191 |
| 192 void SpeechSynthesis::didStartSpeaking(PassRefPtr<PlatformSpeechSynthesisUtteran
ce> utterance) | 192 void SpeechSynthesis::didStartSpeaking(PlatformSpeechSynthesisUtterance* utteran
ce) |
| 193 { | 193 { |
| 194 if (utterance->client()) | 194 if (utterance->client()) |
| 195 fireEvent(EventTypeNames::start, static_cast<SpeechSynthesisUtterance*>(
utterance->client()), 0, String()); | 195 fireEvent(EventTypeNames::start, static_cast<SpeechSynthesisUtterance*>(
utterance->client()), 0, String()); |
| 196 } | 196 } |
| 197 | 197 |
| 198 void SpeechSynthesis::didPauseSpeaking(PassRefPtr<PlatformSpeechSynthesisUtteran
ce> utterance) | 198 void SpeechSynthesis::didPauseSpeaking(PlatformSpeechSynthesisUtterance* utteran
ce) |
| 199 { | 199 { |
| 200 m_isPaused = true; | 200 m_isPaused = true; |
| 201 if (utterance->client()) | 201 if (utterance->client()) |
| 202 fireEvent(EventTypeNames::pause, static_cast<SpeechSynthesisUtterance*>(
utterance->client()), 0, String()); | 202 fireEvent(EventTypeNames::pause, static_cast<SpeechSynthesisUtterance*>(
utterance->client()), 0, String()); |
| 203 } | 203 } |
| 204 | 204 |
| 205 void SpeechSynthesis::didResumeSpeaking(PassRefPtr<PlatformSpeechSynthesisUttera
nce> utterance) | 205 void SpeechSynthesis::didResumeSpeaking(PlatformSpeechSynthesisUtterance* uttera
nce) |
| 206 { | 206 { |
| 207 m_isPaused = false; | 207 m_isPaused = false; |
| 208 if (utterance->client()) | 208 if (utterance->client()) |
| 209 fireEvent(EventTypeNames::resume, static_cast<SpeechSynthesisUtterance*>
(utterance->client()), 0, String()); | 209 fireEvent(EventTypeNames::resume, static_cast<SpeechSynthesisUtterance*>
(utterance->client()), 0, String()); |
| 210 } | 210 } |
| 211 | 211 |
| 212 void SpeechSynthesis::didFinishSpeaking(PassRefPtr<PlatformSpeechSynthesisUttera
nce> utterance) | 212 void SpeechSynthesis::didFinishSpeaking(PlatformSpeechSynthesisUtterance* uttera
nce) |
| 213 { | 213 { |
| 214 if (utterance->client()) | 214 if (utterance->client()) |
| 215 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), false); | 215 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), false); |
| 216 } | 216 } |
| 217 | 217 |
| 218 void SpeechSynthesis::speakingErrorOccurred(PassRefPtr<PlatformSpeechSynthesisUt
terance> utterance) | 218 void SpeechSynthesis::speakingErrorOccurred(PlatformSpeechSynthesisUtterance* ut
terance) |
| 219 { | 219 { |
| 220 if (utterance->client()) | 220 if (utterance->client()) |
| 221 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), true); | 221 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), true); |
| 222 } | 222 } |
| 223 | 223 |
| 224 SpeechSynthesisUtterance* SpeechSynthesis::currentSpeechUtterance() const | 224 SpeechSynthesisUtterance* SpeechSynthesis::currentSpeechUtterance() const |
| 225 { | 225 { |
| 226 if (!m_utteranceQueue.isEmpty()) | 226 if (!m_utteranceQueue.isEmpty()) |
| 227 return m_utteranceQueue.first().get(); | 227 return m_utteranceQueue.first().get(); |
| 228 return 0; | 228 return 0; |
| 229 } | 229 } |
| 230 | 230 |
| 231 const AtomicString& SpeechSynthesis::interfaceName() const | 231 const AtomicString& SpeechSynthesis::interfaceName() const |
| 232 { | 232 { |
| 233 return EventTargetNames::SpeechSynthesisUtterance; | 233 return EventTargetNames::SpeechSynthesisUtterance; |
| 234 } | 234 } |
| 235 | 235 |
| 236 void SpeechSynthesis::trace(Visitor* visitor) | 236 void SpeechSynthesis::trace(Visitor* visitor) |
| 237 { | 237 { |
| 238 visitor->trace(m_platformSpeechSynthesizer); |
| 238 visitor->trace(m_voiceList); | 239 visitor->trace(m_voiceList); |
| 239 visitor->trace(m_utteranceQueue); | 240 visitor->trace(m_utteranceQueue); |
| 241 PlatformSpeechSynthesizerClient::trace(visitor); |
| 240 EventTargetWithInlineData::trace(visitor); | 242 EventTargetWithInlineData::trace(visitor); |
| 241 } | 243 } |
| 242 | 244 |
| 243 } // namespace WebCore | 245 } // namespace WebCore |
| OLD | NEW |