| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Apple Inc. All rights reserved. | 2 * Copyright (C) 2013 Apple Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 19 matching lines...) Expand all Loading... |
| 30 #include "platform/speech/PlatformSpeechSynthesisVoice.h" | 30 #include "platform/speech/PlatformSpeechSynthesisVoice.h" |
| 31 #include "wtf/CurrentTime.h" | 31 #include "wtf/CurrentTime.h" |
| 32 | 32 |
| 33 namespace blink { | 33 namespace blink { |
| 34 | 34 |
| 35 SpeechSynthesis* SpeechSynthesis::create(ExecutionContext* context) { | 35 SpeechSynthesis* SpeechSynthesis::create(ExecutionContext* context) { |
| 36 return new SpeechSynthesis(context); | 36 return new SpeechSynthesis(context); |
| 37 } | 37 } |
| 38 | 38 |
| 39 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) | 39 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) |
| 40 : m_executionContext(context), | 40 : ContextClient(context), |
| 41 m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)), | 41 m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)), |
| 42 m_isPaused(false) {} | 42 m_isPaused(false) {} |
| 43 | 43 |
| 44 void SpeechSynthesis::setPlatformSynthesizer( | 44 void SpeechSynthesis::setPlatformSynthesizer( |
| 45 PlatformSpeechSynthesizer* synthesizer) { | 45 PlatformSpeechSynthesizer* synthesizer) { |
| 46 m_platformSpeechSynthesizer = synthesizer; | 46 m_platformSpeechSynthesizer = synthesizer; |
| 47 } | 47 } |
| 48 | 48 |
| 49 void SpeechSynthesis::voicesDidChange() { | 49 void SpeechSynthesis::voicesDidChange() { |
| 50 m_voiceList.clear(); | 50 m_voiceList.clear(); |
| 51 if (!m_executionContext->isContextDestroyed()) | 51 if (getExecutionContext()) |
| 52 dispatchEvent(Event::create(EventTypeNames::voiceschanged)); | 52 dispatchEvent(Event::create(EventTypeNames::voiceschanged)); |
| 53 } | 53 } |
| 54 | 54 |
| 55 const HeapVector<Member<SpeechSynthesisVoice>>& SpeechSynthesis::getVoices() { | 55 const HeapVector<Member<SpeechSynthesisVoice>>& SpeechSynthesis::getVoices() { |
| 56 if (m_voiceList.size()) | 56 if (m_voiceList.size()) |
| 57 return m_voiceList; | 57 return m_voiceList; |
| 58 | 58 |
| 59 // If the voiceList is empty, that's the cue to get the voices from the | 59 // If the voiceList is empty, that's the cue to get the voices from the |
| 60 // platform again. | 60 // platform again. |
| 61 const Vector<RefPtr<PlatformSpeechSynthesisVoice>>& platformVoices = | 61 const Vector<RefPtr<PlatformSpeechSynthesisVoice>>& platformVoices = |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 119 void SpeechSynthesis::resume() { | 119 void SpeechSynthesis::resume() { |
| 120 if (!currentSpeechUtterance()) | 120 if (!currentSpeechUtterance()) |
| 121 return; | 121 return; |
| 122 m_platformSpeechSynthesizer->resume(); | 122 m_platformSpeechSynthesizer->resume(); |
| 123 } | 123 } |
| 124 | 124 |
| 125 void SpeechSynthesis::fireEvent(const AtomicString& type, | 125 void SpeechSynthesis::fireEvent(const AtomicString& type, |
| 126 SpeechSynthesisUtterance* utterance, | 126 SpeechSynthesisUtterance* utterance, |
| 127 unsigned long charIndex, | 127 unsigned long charIndex, |
| 128 const String& name) { | 128 const String& name) { |
| 129 if (!m_executionContext->isContextDestroyed()) { | 129 if (!getExecutionContext()) |
| 130 double elapsedTimeMillis = | 130 return; |
| 131 (monotonicallyIncreasingTime() - utterance->startTime()) * 1000.0; | 131 |
| 132 utterance->dispatchEvent(SpeechSynthesisEvent::create( | 132 double elapsedTimeMillis = |
| 133 type, utterance, charIndex, elapsedTimeMillis, name)); | 133 (monotonicallyIncreasingTime() - utterance->startTime()) * 1000.0; |
| 134 } | 134 utterance->dispatchEvent(SpeechSynthesisEvent::create( |
| 135 type, utterance, charIndex, elapsedTimeMillis, name)); |
| 135 } | 136 } |
| 136 | 137 |
| 137 void SpeechSynthesis::handleSpeakingCompleted( | 138 void SpeechSynthesis::handleSpeakingCompleted( |
| 138 SpeechSynthesisUtterance* utterance, | 139 SpeechSynthesisUtterance* utterance, |
| 139 bool errorOccurred) { | 140 bool errorOccurred) { |
| 140 ASSERT(utterance); | 141 ASSERT(utterance); |
| 141 | 142 |
| 142 bool shouldStartSpeaking = false; | 143 bool shouldStartSpeaking = false; |
| 143 // If the utterance that completed was the one we're currently speaking, | 144 // If the utterance that completed was the one we're currently speaking, |
| 144 // remove it from the queue and start speaking the next one. | 145 // remove it from the queue and start speaking the next one. |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 227 return nullptr; | 228 return nullptr; |
| 228 | 229 |
| 229 return m_utteranceQueue.first(); | 230 return m_utteranceQueue.first(); |
| 230 } | 231 } |
| 231 | 232 |
| 232 const AtomicString& SpeechSynthesis::interfaceName() const { | 233 const AtomicString& SpeechSynthesis::interfaceName() const { |
| 233 return EventTargetNames::SpeechSynthesis; | 234 return EventTargetNames::SpeechSynthesis; |
| 234 } | 235 } |
| 235 | 236 |
| 236 DEFINE_TRACE(SpeechSynthesis) { | 237 DEFINE_TRACE(SpeechSynthesis) { |
| 237 visitor->trace(m_executionContext); | |
| 238 visitor->trace(m_platformSpeechSynthesizer); | 238 visitor->trace(m_platformSpeechSynthesizer); |
| 239 visitor->trace(m_voiceList); | 239 visitor->trace(m_voiceList); |
| 240 visitor->trace(m_utteranceQueue); | 240 visitor->trace(m_utteranceQueue); |
| 241 PlatformSpeechSynthesizerClient::trace(visitor); | 241 PlatformSpeechSynthesizerClient::trace(visitor); |
| 242 ContextClient::trace(visitor); |
| 242 EventTargetWithInlineData::trace(visitor); | 243 EventTargetWithInlineData::trace(visitor); |
| 243 } | 244 } |
| 244 | 245 |
| 245 } // namespace blink | 246 } // namespace blink |
| OLD | NEW |