| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Apple Inc. All rights reserved. | 2 * Copyright (C) 2013 Apple Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| (...skipping 16 matching lines...) Expand all Loading... |
| 27 #include "modules/speech/SpeechSynthesis.h" | 27 #include "modules/speech/SpeechSynthesis.h" |
| 28 | 28 |
| 29 #include "bindings/v8/ExceptionState.h" | 29 #include "bindings/v8/ExceptionState.h" |
| 30 #include "core/dom/ExecutionContext.h" | 30 #include "core/dom/ExecutionContext.h" |
| 31 #include "modules/speech/SpeechSynthesisEvent.h" | 31 #include "modules/speech/SpeechSynthesisEvent.h" |
| 32 #include "platform/speech/PlatformSpeechSynthesisVoice.h" | 32 #include "platform/speech/PlatformSpeechSynthesisVoice.h" |
| 33 #include "wtf/CurrentTime.h" | 33 #include "wtf/CurrentTime.h" |
| 34 | 34 |
| 35 namespace WebCore { | 35 namespace WebCore { |
| 36 | 36 |
| 37 PassRefPtrWillBeRawPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext
* context) | 37 SpeechSynthesis* SpeechSynthesis::create(ExecutionContext* context) |
| 38 { | 38 { |
| 39 return adoptRefWillBeNoop(new SpeechSynthesis(context)); | 39 return adoptRefCountedGarbageCollectedWillBeNoop(new SpeechSynthesis(context
)); |
| 40 } | 40 } |
| 41 | 41 |
| 42 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) | 42 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) |
| 43 : ContextLifecycleObserver(context) | 43 : ContextLifecycleObserver(context) |
| 44 , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)) | 44 , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)) |
| 45 , m_isPaused(false) | 45 , m_isPaused(false) |
| 46 { | 46 { |
| 47 ScriptWrappable::init(this); | 47 ScriptWrappable::init(this); |
| 48 } | 48 } |
| 49 | 49 |
| 50 void SpeechSynthesis::setPlatformSynthesizer(PassOwnPtr<PlatformSpeechSynthesize
r> synthesizer) | 50 void SpeechSynthesis::setPlatformSynthesizer(PassOwnPtr<PlatformSpeechSynthesize
r> synthesizer) |
| 51 { | 51 { |
| 52 m_platformSpeechSynthesizer = synthesizer; | 52 m_platformSpeechSynthesizer = synthesizer; |
| 53 } | 53 } |
| 54 | 54 |
| 55 ExecutionContext* SpeechSynthesis::executionContext() const | 55 ExecutionContext* SpeechSynthesis::executionContext() const |
| 56 { | 56 { |
| 57 return ContextLifecycleObserver::executionContext(); | 57 return ContextLifecycleObserver::executionContext(); |
| 58 } | 58 } |
| 59 | 59 |
| 60 void SpeechSynthesis::voicesDidChange() | 60 void SpeechSynthesis::voicesDidChange() |
| 61 { | 61 { |
| 62 m_voiceList.clear(); | 62 m_voiceList.clear(); |
| 63 if (executionContext() && !executionContext()->activeDOMObjectsAreStopped()) | 63 if (executionContext() && !executionContext()->activeDOMObjectsAreStopped()) |
| 64 dispatchEvent(Event::create(EventTypeNames::voiceschanged)); | 64 dispatchEvent(Event::create(EventTypeNames::voiceschanged)); |
| 65 } | 65 } |
| 66 | 66 |
| 67 const WillBeHeapVector<RefPtrWillBeMember<SpeechSynthesisVoice> >& SpeechSynthes
is::getVoices() | 67 const HeapVector<Member<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices() |
| 68 { | 68 { |
| 69 if (m_voiceList.size()) | 69 if (m_voiceList.size()) |
| 70 return m_voiceList; | 70 return m_voiceList; |
| 71 | 71 |
| 72 // If the voiceList is empty, that's the cue to get the voices from the plat
form again. | 72 // If the voiceList is empty, that's the cue to get the voices from the plat
form again. |
| 73 const Vector<RefPtr<PlatformSpeechSynthesisVoice> >& platformVoices = m_plat
formSpeechSynthesizer->voiceList(); | 73 const Vector<RefPtr<PlatformSpeechSynthesisVoice> >& platformVoices = m_plat
formSpeechSynthesizer->voiceList(); |
| 74 size_t voiceCount = platformVoices.size(); | 74 size_t voiceCount = platformVoices.size(); |
| 75 for (size_t k = 0; k < voiceCount; k++) | 75 for (size_t k = 0; k < voiceCount; k++) |
| 76 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k])); | 76 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k])); |
| 77 | 77 |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 146 void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtteran
ce* utterance, unsigned long charIndex, const String& name) | 146 void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtteran
ce* utterance, unsigned long charIndex, const String& name) |
| 147 { | 147 { |
| 148 if (executionContext() && !executionContext()->activeDOMObjectsAreStopped()) | 148 if (executionContext() && !executionContext()->activeDOMObjectsAreStopped()) |
| 149 utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (
currentTime() - utterance->startTime()), name)); | 149 utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (
currentTime() - utterance->startTime()), name)); |
| 150 } | 150 } |
| 151 | 151 |
| 152 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) | 152 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) |
| 153 { | 153 { |
| 154 ASSERT(utterance); | 154 ASSERT(utterance); |
| 155 | 155 |
| 156 // Keep the utterance around long enough to fire an event on it in case m_ut
teranceQueue | |
| 157 // is holding the last reference to it. | |
| 158 RefPtrWillBeRawPtr<SpeechSynthesisUtterance> protect(utterance); | |
| 159 | |
| 160 bool didJustFinishCurrentUtterance = false; | 156 bool didJustFinishCurrentUtterance = false; |
| 161 // If the utterance that completed was the one we're currently speaking, | 157 // If the utterance that completed was the one we're currently speaking, |
| 162 // remove it from the queue and start speaking the next one. | 158 // remove it from the queue and start speaking the next one. |
| 163 if (utterance == currentSpeechUtterance()) { | 159 if (utterance == currentSpeechUtterance()) { |
| 164 m_utteranceQueue.removeFirst(); | 160 m_utteranceQueue.removeFirst(); |
| 165 didJustFinishCurrentUtterance = true; | 161 didJustFinishCurrentUtterance = true; |
| 166 } | 162 } |
| 167 | 163 |
| 168 // Always fire the event, because the platform may have asynchronously | 164 // Always fire the event, because the platform may have asynchronously |
| 169 // sent an event on an utterance before it got the message that we | 165 // sent an event on an utterance before it got the message that we |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 238 } | 234 } |
| 239 | 235 |
| 240 void SpeechSynthesis::trace(Visitor* visitor) | 236 void SpeechSynthesis::trace(Visitor* visitor) |
| 241 { | 237 { |
| 242 visitor->trace(m_voiceList); | 238 visitor->trace(m_voiceList); |
| 243 visitor->trace(m_utteranceQueue); | 239 visitor->trace(m_utteranceQueue); |
| 244 EventTargetWithInlineData::trace(visitor); | 240 EventTargetWithInlineData::trace(visitor); |
| 245 } | 241 } |
| 246 | 242 |
| 247 } // namespace WebCore | 243 } // namespace WebCore |
| OLD | NEW |