Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(388)

Side by Side Diff: third_party/WebKit/Source/modules/webaudio/BaseAudioContext.h

Issue 2389253002: reflow comments in modules/{webaudio,vr} (Closed)
Patch Set: . Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved. 2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 1. Redistributions of source code must retain the above copyright 7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer. 8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright 9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the 10 * notice, this list of conditions and the following disclaimer in the
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 class PannerNode; 73 class PannerNode;
74 class PeriodicWave; 74 class PeriodicWave;
75 class PeriodicWaveConstraints; 75 class PeriodicWaveConstraints;
76 class ScriptProcessorNode; 76 class ScriptProcessorNode;
77 class ScriptPromiseResolver; 77 class ScriptPromiseResolver;
78 class ScriptState; 78 class ScriptState;
79 class SecurityOrigin; 79 class SecurityOrigin;
80 class StereoPannerNode; 80 class StereoPannerNode;
81 class WaveShaperNode; 81 class WaveShaperNode;
82 82
83 // BaseAudioContext is the cornerstone of the web audio API and all AudioNodes a re created from it. 83 // BaseAudioContext is the cornerstone of the web audio API and all AudioNodes
84 // For thread safety between the audio thread and the main thread, it has a rend ering graph locking mechanism. 84 // are created from it. For thread safety between the audio thread and the main
85 // thread, it has a rendering graph locking mechanism.
85 86
86 class MODULES_EXPORT BaseAudioContext : public EventTargetWithInlineData, 87 class MODULES_EXPORT BaseAudioContext : public EventTargetWithInlineData,
87 public ActiveScriptWrappable, 88 public ActiveScriptWrappable,
88 public ActiveDOMObject { 89 public ActiveDOMObject {
89 USING_GARBAGE_COLLECTED_MIXIN(BaseAudioContext); 90 USING_GARBAGE_COLLECTED_MIXIN(BaseAudioContext);
90 DEFINE_WRAPPERTYPEINFO(); 91 DEFINE_WRAPPERTYPEINFO();
91 92
92 public: 93 public:
93 // The state of an audio context. On creation, the state is Suspended. The st ate is Running if 94 // The state of an audio context. On creation, the state is Suspended. The
94 // audio is being processed (audio graph is being pulled for data). The state is Closed if the 95 // state is Running if audio is being processed (audio graph is being pulled
95 // audio context has been closed. The valid transitions are from Suspended to either Running or 96 // for data). The state is Closed if the audio context has been closed. The
96 // Closed; Running to Suspended or Closed. Once Closed, there are no valid tra nsitions. 97 // valid transitions are from Suspended to either Running or Closed; Running
98 // to Suspended or Closed. Once Closed, there are no valid transitions.
97 enum AudioContextState { Suspended, Running, Closed }; 99 enum AudioContextState { Suspended, Running, Closed };
98 100
99 // Create an AudioContext for rendering to the audio hardware. 101 // Create an AudioContext for rendering to the audio hardware.
100 static BaseAudioContext* create(Document&, ExceptionState&); 102 static BaseAudioContext* create(Document&, ExceptionState&);
101 103
102 ~BaseAudioContext() override; 104 ~BaseAudioContext() override;
103 105
104 DECLARE_VIRTUAL_TRACE(); 106 DECLARE_VIRTUAL_TRACE();
105 107
106 // Is the destination node initialized and ready to handle audio? 108 // Is the destination node initialized and ready to handle audio?
107 bool isDestinationInitialized() const { 109 bool isDestinationInitialized() const {
108 AudioDestinationNode* dest = destination(); 110 AudioDestinationNode* dest = destination();
109 return dest ? dest->audioDestinationHandler().isInitialized() : false; 111 return dest ? dest->audioDestinationHandler().isInitialized() : false;
110 } 112 }
111 113
112 // Document notification 114 // Document notification
113 void stop() final; 115 void stop() final;
114 bool hasPendingActivity() const final; 116 bool hasPendingActivity() const final;
115 117
116 // Cannnot be called from the audio thread. 118 // Cannnot be called from the audio thread.
117 AudioDestinationNode* destination() const; 119 AudioDestinationNode* destination() const;
118 120
119 size_t currentSampleFrame() const { 121 size_t currentSampleFrame() const {
120 // TODO: What is the correct value for the current frame if the destination node has gone 122 // TODO: What is the correct value for the current frame if the destination
121 // away? 0 is a valid frame. 123 // node has gone away? 0 is a valid frame.
122 return m_destinationNode 124 return m_destinationNode
123 ? m_destinationNode->audioDestinationHandler() 125 ? m_destinationNode->audioDestinationHandler()
124 .currentSampleFrame() 126 .currentSampleFrame()
125 : 0; 127 : 0;
126 } 128 }
127 129
128 double currentTime() const { 130 double currentTime() const {
129 // TODO: What is the correct value for the current time if the destination n ode has gone 131 // TODO: What is the correct value for the current time if the destination
130 // away? 0 is a valid time. 132 // node has gone away? 0 is a valid time.
131 return m_destinationNode 133 return m_destinationNode
132 ? m_destinationNode->audioDestinationHandler().currentTime() 134 ? m_destinationNode->audioDestinationHandler().currentTime()
133 : 0; 135 : 0;
134 } 136 }
135 137
136 float sampleRate() const { 138 float sampleRate() const {
137 return m_destinationNode ? m_destinationNode->handler().sampleRate() : 0; 139 return m_destinationNode ? m_destinationNode->handler().sampleRate() : 0;
138 } 140 }
139 141
140 String state() const; 142 String state() const;
141 AudioContextState contextState() const { return m_contextState; } 143 AudioContextState contextState() const { return m_contextState; }
142 void throwExceptionForClosedState(ExceptionState&); 144 void throwExceptionForClosedState(ExceptionState&);
143 145
144 AudioBuffer* createBuffer(unsigned numberOfChannels, 146 AudioBuffer* createBuffer(unsigned numberOfChannels,
145 size_t numberOfFrames, 147 size_t numberOfFrames,
146 float sampleRate, 148 float sampleRate,
147 ExceptionState&); 149 ExceptionState&);
148 150
149 // Asynchronous audio file data decoding. 151 // Asynchronous audio file data decoding.
150 ScriptPromise decodeAudioData(ScriptState*, 152 ScriptPromise decodeAudioData(ScriptState*,
151 DOMArrayBuffer* audioData, 153 DOMArrayBuffer* audioData,
152 AudioBufferCallback* successCallback, 154 AudioBufferCallback* successCallback,
153 AudioBufferCallback* errorCallback, 155 AudioBufferCallback* errorCallback,
154 ExceptionState&); 156 ExceptionState&);
155 157
156 // Handles the promise and callbacks when |decodeAudioData| is finished decodi ng. 158 // Handles the promise and callbacks when |decodeAudioData| is finished
159 // decoding.
157 void handleDecodeAudioData(AudioBuffer*, 160 void handleDecodeAudioData(AudioBuffer*,
158 ScriptPromiseResolver*, 161 ScriptPromiseResolver*,
159 AudioBufferCallback* successCallback, 162 AudioBufferCallback* successCallback,
160 AudioBufferCallback* errorCallback); 163 AudioBufferCallback* errorCallback);
161 164
162 AudioListener* listener() { return m_listener; } 165 AudioListener* listener() { return m_listener; }
163 166
164 virtual bool hasRealtimeConstraint() = 0; 167 virtual bool hasRealtimeConstraint() = 0;
165 168
166 // The AudioNode create methods are called on the main thread (from JavaScript ). 169 // The AudioNode create methods are called on the main thread (from
170 // JavaScript).
167 AudioBufferSourceNode* createBufferSource(ExceptionState&); 171 AudioBufferSourceNode* createBufferSource(ExceptionState&);
168 MediaElementAudioSourceNode* createMediaElementSource(HTMLMediaElement*, 172 MediaElementAudioSourceNode* createMediaElementSource(HTMLMediaElement*,
169 ExceptionState&); 173 ExceptionState&);
170 MediaStreamAudioSourceNode* createMediaStreamSource(MediaStream*, 174 MediaStreamAudioSourceNode* createMediaStreamSource(MediaStream*,
171 ExceptionState&); 175 ExceptionState&);
172 MediaStreamAudioDestinationNode* createMediaStreamDestination( 176 MediaStreamAudioDestinationNode* createMediaStreamDestination(
173 ExceptionState&); 177 ExceptionState&);
174 GainNode* createGain(ExceptionState&); 178 GainNode* createGain(ExceptionState&);
175 BiquadFilterNode* createBiquadFilter(ExceptionState&); 179 BiquadFilterNode* createBiquadFilter(ExceptionState&);
176 WaveShaperNode* createWaveShaper(ExceptionState&); 180 WaveShaperNode* createWaveShaper(ExceptionState&);
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
282 void notifyStateChange(); 286 void notifyStateChange();
283 287
284 // A context is considered closed if: 288 // A context is considered closed if:
285 // - closeContext() has been called. 289 // - closeContext() has been called.
286 // - it has been stopped by its execution context. 290 // - it has been stopped by its execution context.
287 virtual bool isContextClosed() const { return m_isCleared; } 291 virtual bool isContextClosed() const { return m_isCleared; }
288 292
289 // Get the security origin for this audio context. 293 // Get the security origin for this audio context.
290 SecurityOrigin* getSecurityOrigin() const; 294 SecurityOrigin* getSecurityOrigin() const;
291 295
292 // Get the PeriodicWave for the specified oscillator type. The table is initi alized internally 296 // Get the PeriodicWave for the specified oscillator type. The table is
293 // if necessary. 297 // initialized internally if necessary.
294 PeriodicWave* periodicWave(int type); 298 PeriodicWave* periodicWave(int type);
295 299
296 protected: 300 protected:
297 explicit BaseAudioContext(Document*); 301 explicit BaseAudioContext(Document*);
298 BaseAudioContext(Document*, 302 BaseAudioContext(Document*,
299 unsigned numberOfChannels, 303 unsigned numberOfChannels,
300 size_t numberOfFrames, 304 size_t numberOfFrames,
301 float sampleRate); 305 float sampleRate);
302 306
303 void initialize(); 307 void initialize();
304 void uninitialize(); 308 void uninitialize();
305 309
306 void setContextState(AudioContextState); 310 void setContextState(AudioContextState);
307 311
308 virtual void didClose() {} 312 virtual void didClose() {}
309 313
310 // Tries to handle AudioBufferSourceNodes that were started but became disconn ected or was never 314 // Tries to handle AudioBufferSourceNodes that were started but became
311 // connected. Because these never get pulled anymore, they will stay around fo rever. So if we 315 // disconnected or was never connected. Because these never get pulled
312 // can, try to stop them so they can be collected. 316 // anymore, they will stay around forever. So if we can, try to stop them so
317 // they can be collected.
313 void handleStoppableSourceNodes(); 318 void handleStoppableSourceNodes();
314 319
315 Member<AudioDestinationNode> m_destinationNode; 320 Member<AudioDestinationNode> m_destinationNode;
316 321
317 // FIXME(dominicc): Move m_resumeResolvers to AudioContext, because only 322 // FIXME(dominicc): Move m_resumeResolvers to AudioContext, because only
318 // it creates these Promises. 323 // it creates these Promises.
319 // Vector of promises created by resume(). It takes time to handle them, so we collect all of 324 // Vector of promises created by resume(). It takes time to handle them, so we
320 // the promises here until they can be resolved or rejected. 325 // collect all of the promises here until they can be resolved or rejected.
321 HeapVector<Member<ScriptPromiseResolver>> m_resumeResolvers; 326 HeapVector<Member<ScriptPromiseResolver>> m_resumeResolvers;
322 327
323 void setClosedContextSampleRate(float newSampleRate) { 328 void setClosedContextSampleRate(float newSampleRate) {
324 m_closedContextSampleRate = newSampleRate; 329 m_closedContextSampleRate = newSampleRate;
325 } 330 }
326 float closedContextSampleRate() const { return m_closedContextSampleRate; } 331 float closedContextSampleRate() const { return m_closedContextSampleRate; }
327 332
328 void rejectPendingDecodeAudioDataResolvers(); 333 void rejectPendingDecodeAudioDataResolvers();
329 334
330 // If any, unlock user gesture requirements if a user gesture is being 335 // If any, unlock user gesture requirements if a user gesture is being
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
365 // the nodes are added to |m_finishedSourceNodes| and scheduled for removal 370 // the nodes are added to |m_finishedSourceNodes| and scheduled for removal
366 // from |m_activeSourceNodes| by the main thread. 371 // from |m_activeSourceNodes| by the main thread.
367 HashSet<UntracedMember<AudioNode>> m_finishedSourceNodes; 372 HashSet<UntracedMember<AudioNode>> m_finishedSourceNodes;
368 373
369 // FIXME(dominicc): Move these to AudioContext because only 374 // FIXME(dominicc): Move these to AudioContext because only
370 // it creates these Promises. 375 // it creates these Promises.
371 // Handle Promises for resume() and suspend() 376 // Handle Promises for resume() and suspend()
372 void resolvePromisesForResume(); 377 void resolvePromisesForResume();
373 void resolvePromisesForResumeOnMainThread(); 378 void resolvePromisesForResumeOnMainThread();
374 379
375 // When the context is going away, reject any pending script promise resolvers . 380 // When the context is going away, reject any pending script promise
381 // resolvers.
376 virtual void rejectPendingResolvers(); 382 virtual void rejectPendingResolvers();
377 383
378 // True if we're in the process of resolving promises for resume(). Resolving can take some 384 // True if we're in the process of resolving promises for resume(). Resolving
379 // time and the audio context process loop is very fast, so we don't want to c all resolve an 385 // can take some time and the audio context process loop is very fast, so we
380 // excessive number of times. 386 // don't want to call resolve an excessive number of times.
381 bool m_isResolvingResumePromises; 387 bool m_isResolvingResumePromises;
382 388
383 // Whether a user gesture is required to start this AudioContext. 389 // Whether a user gesture is required to start this AudioContext.
384 bool m_userGestureRequired; 390 bool m_userGestureRequired;
385 391
386 unsigned m_connectionCount; 392 unsigned m_connectionCount;
387 393
388 // Graph locking. 394 // Graph locking.
389 RefPtr<DeferredTaskHandler> m_deferredTaskHandler; 395 RefPtr<DeferredTaskHandler> m_deferredTaskHandler;
390 396
391 // The state of the BaseAudioContext. 397 // The state of the BaseAudioContext.
392 AudioContextState m_contextState; 398 AudioContextState m_contextState;
393 399
394 AsyncAudioDecoder m_audioDecoder; 400 AsyncAudioDecoder m_audioDecoder;
395 401
396 // When a context is closed, the sample rate is cleared. But decodeAudioData can be called 402 // When a context is closed, the sample rate is cleared. But decodeAudioData
397 // after the context has been closed and it needs the sample rate. When the c ontext is closed, 403 // can be called after the context has been closed and it needs the sample
398 // the sample rate is saved here. 404 // rate. When the context is closed, the sample rate is saved here.
399 float m_closedContextSampleRate; 405 float m_closedContextSampleRate;
400 406
401 // Vector of promises created by decodeAudioData. This keeps the resolvers al ive until 407 // Vector of promises created by decodeAudioData. This keeps the resolvers
402 // decodeAudioData finishes decoding and can tell the main thread to resolve t hem. 408 // alive until decodeAudioData finishes decoding and can tell the main thread
409 // to resolve them.
403 HeapHashSet<Member<ScriptPromiseResolver>> m_decodeAudioResolvers; 410 HeapHashSet<Member<ScriptPromiseResolver>> m_decodeAudioResolvers;
404 411
405 // PeriodicWave's for the builtin oscillator types. These only depend on the sample rate. so 412 // PeriodicWave's for the builtin oscillator types. These only depend on the
406 // they can be shared with all OscillatorNodes in the context. To conserve me mory, these are 413 // sample rate. so they can be shared with all OscillatorNodes in the context.
407 // lazily initiialized on first use. 414 // To conserve memory, these are lazily initialized on first use.
408 Member<PeriodicWave> m_periodicWaveSine; 415 Member<PeriodicWave> m_periodicWaveSine;
409 Member<PeriodicWave> m_periodicWaveSquare; 416 Member<PeriodicWave> m_periodicWaveSquare;
410 Member<PeriodicWave> m_periodicWaveSawtooth; 417 Member<PeriodicWave> m_periodicWaveSawtooth;
411 Member<PeriodicWave> m_periodicWaveTriangle; 418 Member<PeriodicWave> m_periodicWaveTriangle;
412 419
413 // This is considering 32 is large enough for multiple channels audio. 420 // This is considering 32 is large enough for multiple channels audio.
414 // It is somewhat arbitrary and could be increased if necessary. 421 // It is somewhat arbitrary and could be increased if necessary.
415 enum { MaxNumberOfChannels = 32 }; 422 enum { MaxNumberOfChannels = 32 };
416 }; 423 };
417 424
418 } // namespace blink 425 } // namespace blink
419 426
420 #endif // BaseAudioContext_h 427 #endif // BaseAudioContext_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698