OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Apple Inc. All rights reserved. | 2 * Copyright (C) 2013 Apple Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
(...skipping 16 matching lines...) Expand all Loading... |
27 #include "modules/speech/SpeechSynthesis.h" | 27 #include "modules/speech/SpeechSynthesis.h" |
28 | 28 |
29 #include "bindings/v8/ExceptionState.h" | 29 #include "bindings/v8/ExceptionState.h" |
30 #include "core/dom/ExecutionContext.h" | 30 #include "core/dom/ExecutionContext.h" |
31 #include "modules/speech/SpeechSynthesisEvent.h" | 31 #include "modules/speech/SpeechSynthesisEvent.h" |
32 #include "platform/speech/PlatformSpeechSynthesisVoice.h" | 32 #include "platform/speech/PlatformSpeechSynthesisVoice.h" |
33 #include "wtf/CurrentTime.h" | 33 #include "wtf/CurrentTime.h" |
34 | 34 |
35 namespace WebCore { | 35 namespace WebCore { |
36 | 36 |
37 PassRefPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext* context) | 37 DEFINE_GC_INFO(SpeechSynthesis); |
| 38 |
| 39 PassRefPtrWillBeRawPtr<SpeechSynthesis> SpeechSynthesis::create(ExecutionContext
* context) |
38 { | 40 { |
39 return adoptRef(new SpeechSynthesis(context)); | 41 return adoptRefWillBeNoop(new SpeechSynthesis(context)); |
40 } | 42 } |
41 | 43 |
42 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) | 44 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context) |
43 : ContextLifecycleObserver(context) | 45 : ContextLifecycleObserver(context) |
44 , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)) | 46 , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this)) |
45 , m_currentSpeechUtterance(0) | 47 , m_currentSpeechUtterance(0) |
46 , m_isPaused(false) | 48 , m_isPaused(false) |
47 { | 49 { |
48 ScriptWrappable::init(this); | 50 ScriptWrappable::init(this); |
49 } | 51 } |
50 | 52 |
51 void SpeechSynthesis::setPlatformSynthesizer(PassOwnPtr<PlatformSpeechSynthesize
r> synthesizer) | 53 void SpeechSynthesis::setPlatformSynthesizer(PassOwnPtr<PlatformSpeechSynthesize
r> synthesizer) |
52 { | 54 { |
53 m_platformSpeechSynthesizer = synthesizer; | 55 m_platformSpeechSynthesizer = synthesizer; |
54 } | 56 } |
55 | 57 |
56 ExecutionContext* SpeechSynthesis::executionContext() const | 58 ExecutionContext* SpeechSynthesis::executionContext() const |
57 { | 59 { |
58 return ContextLifecycleObserver::executionContext(); | 60 return ContextLifecycleObserver::executionContext(); |
59 } | 61 } |
60 | 62 |
61 void SpeechSynthesis::voicesDidChange() | 63 void SpeechSynthesis::voicesDidChange() |
62 { | 64 { |
63 m_voiceList.clear(); | 65 m_voiceList.clear(); |
64 if (!executionContext()->activeDOMObjectsAreStopped()) | 66 if (!executionContext()->activeDOMObjectsAreStopped()) |
65 dispatchEvent(Event::create(EventTypeNames::voiceschanged)); | 67 dispatchEvent(Event::create(EventTypeNames::voiceschanged)); |
66 } | 68 } |
67 | 69 |
68 const Vector<RefPtr<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices() | 70 const WillBeHeapVector<RefPtrWillBeMember<SpeechSynthesisVoice> >& SpeechSynthes
is::getVoices() |
69 { | 71 { |
70 if (m_voiceList.size()) | 72 if (m_voiceList.size()) |
71 return m_voiceList; | 73 return m_voiceList; |
72 | 74 |
73 // If the voiceList is empty, that's the cue to get the voices from the plat
form again. | 75 // If the voiceList is empty, that's the cue to get the voices from the plat
form again. |
74 const Vector<RefPtr<PlatformSpeechSynthesisVoice> >& platformVoices = m_plat
formSpeechSynthesizer->voiceList(); | 76 const Vector<RefPtr<PlatformSpeechSynthesisVoice> >& platformVoices = m_plat
formSpeechSynthesizer->voiceList(); |
75 size_t voiceCount = platformVoices.size(); | 77 size_t voiceCount = platformVoices.size(); |
76 for (size_t k = 0; k < voiceCount; k++) | 78 for (size_t k = 0; k < voiceCount; k++) |
77 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k])); | 79 m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k])); |
78 | 80 |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
118 | 120 |
119 // If the queue was empty, speak this immediately and add it to the queue. | 121 // If the queue was empty, speak this immediately and add it to the queue. |
120 if (m_utteranceQueue.size() == 1) | 122 if (m_utteranceQueue.size() == 1) |
121 startSpeakingImmediately(utterance); | 123 startSpeakingImmediately(utterance); |
122 } | 124 } |
123 | 125 |
124 void SpeechSynthesis::cancel() | 126 void SpeechSynthesis::cancel() |
125 { | 127 { |
126 // Remove all the items from the utterance queue. | 128 // Remove all the items from the utterance queue. |
127 // Hold on to the current utterance so the platform synthesizer can have a c
hance to clean up. | 129 // Hold on to the current utterance so the platform synthesizer can have a c
hance to clean up. |
128 RefPtr<SpeechSynthesisUtterance> current = m_currentSpeechUtterance; | 130 RefPtrWillBeMember<SpeechSynthesisUtterance> current = m_currentSpeechUttera
nce; |
129 m_utteranceQueue.clear(); | 131 m_utteranceQueue.clear(); |
130 m_platformSpeechSynthesizer->cancel(); | 132 m_platformSpeechSynthesizer->cancel(); |
131 current = 0; | 133 current = 0; |
132 | 134 |
133 // The platform should have called back immediately and cleared the current
utterance. | 135 // The platform should have called back immediately and cleared the current
utterance. |
134 ASSERT(!m_currentSpeechUtterance); | 136 ASSERT(!m_currentSpeechUtterance); |
135 } | 137 } |
136 | 138 |
137 void SpeechSynthesis::pause() | 139 void SpeechSynthesis::pause() |
138 { | 140 { |
(...skipping 16 matching lines...) Expand all Loading... |
155 | 157 |
156 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) | 158 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utteranc
e, bool errorOccurred) |
157 { | 159 { |
158 ASSERT(utterance); | 160 ASSERT(utterance); |
159 ASSERT(m_currentSpeechUtterance); | 161 ASSERT(m_currentSpeechUtterance); |
160 m_currentSpeechUtterance = 0; | 162 m_currentSpeechUtterance = 0; |
161 | 163 |
162 fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utter
ance, 0, String()); | 164 fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utter
ance, 0, String()); |
163 | 165 |
164 if (m_utteranceQueue.size()) { | 166 if (m_utteranceQueue.size()) { |
165 RefPtr<SpeechSynthesisUtterance> firstUtterance = m_utteranceQueue.first
(); | 167 RefPtrWillBeMember<SpeechSynthesisUtterance> firstUtterance = m_utteranc
eQueue.first(); |
166 ASSERT(firstUtterance == utterance); | 168 ASSERT(firstUtterance == utterance); |
167 if (firstUtterance == utterance) | 169 if (firstUtterance == utterance) |
168 m_utteranceQueue.removeFirst(); | 170 m_utteranceQueue.removeFirst(); |
169 | 171 |
170 // Start the next job if there is one pending. | 172 // Start the next job if there is one pending. |
171 if (!m_utteranceQueue.isEmpty()) | 173 if (!m_utteranceQueue.isEmpty()) |
172 startSpeakingImmediately(m_utteranceQueue.first().get()); | 174 startSpeakingImmediately(m_utteranceQueue.first().get()); |
173 } | 175 } |
174 } | 176 } |
175 | 177 |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
220 { | 222 { |
221 if (utterance->client()) | 223 if (utterance->client()) |
222 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), true); | 224 handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance
->client()), true); |
223 } | 225 } |
224 | 226 |
225 const AtomicString& SpeechSynthesis::interfaceName() const | 227 const AtomicString& SpeechSynthesis::interfaceName() const |
226 { | 228 { |
227 return EventTargetNames::SpeechSynthesisUtterance; | 229 return EventTargetNames::SpeechSynthesisUtterance; |
228 } | 230 } |
229 | 231 |
| 232 void SpeechSynthesis::trace(Visitor* visitor) |
| 233 { |
| 234 #if ENABLE(OILPAN) |
| 235 visitor->trace(m_voiceList); |
| 236 visitor->trace(m_currentSpeechUtterance); |
| 237 visitor->trace(m_utteranceQueue); |
| 238 #endif |
| 239 } |
| 240 |
230 } // namespace WebCore | 241 } // namespace WebCore |
OLD | NEW |