OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/renderer_webaudiodevice_impl.h" | 5 #include "content/renderer/media/renderer_webaudiodevice_impl.h" |
6 | 6 |
7 #include <stddef.h> | 7 #include <stddef.h> |
8 | 8 |
9 #include <string> | 9 #include <string> |
10 | 10 |
| 11 #include "base/bind.h" |
11 #include "base/command_line.h" | 12 #include "base/command_line.h" |
12 #include "base/logging.h" | 13 #include "base/logging.h" |
13 #include "base/time/time.h" | 14 #include "base/time/time.h" |
14 #include "build/build_config.h" | 15 #include "build/build_config.h" |
15 #include "content/renderer/media/audio_device_factory.h" | 16 #include "content/renderer/media/audio_device_factory.h" |
16 #include "content/renderer/render_frame_impl.h" | 17 #include "content/renderer/render_frame_impl.h" |
17 #include "content/renderer/render_thread_impl.h" | 18 #include "content/renderer/render_thread_impl.h" |
18 #include "media/base/audio_timestamp_helper.h" | 19 #include "media/base/audio_timestamp_helper.h" |
19 #include "media/base/silent_sink_suspender.h" | 20 #include "media/base/silent_sink_suspender.h" |
20 #include "third_party/WebKit/public/web/WebLocalFrame.h" | 21 #include "third_party/WebKit/public/web/WebLocalFrame.h" |
21 #include "third_party/WebKit/public/web/WebView.h" | 22 #include "third_party/WebKit/public/web/WebView.h" |
22 | 23 |
23 using blink::WebAudioDevice; | 24 using blink::WebAudioDevice; |
| 25 using blink::WebAudioLatencyHint; |
24 using blink::WebLocalFrame; | 26 using blink::WebLocalFrame; |
25 using blink::WebVector; | 27 using blink::WebVector; |
26 using blink::WebView; | 28 using blink::WebView; |
27 | 29 |
28 namespace content { | 30 namespace content { |
29 | 31 |
30 RendererWebAudioDeviceImpl::RendererWebAudioDeviceImpl( | 32 namespace { |
31 const media::AudioParameters& params, | 33 |
| 34 AudioDeviceFactory::SourceType GetLatencyHintSourceType( |
| 35 WebAudioLatencyHint::AudioContextLatencyCategory latency_category) { |
| 36 switch (latency_category) { |
| 37 case WebAudioLatencyHint::kCategoryInteractive: |
| 38 return AudioDeviceFactory::kSourceWebAudioInteractive; |
| 39 case WebAudioLatencyHint::kCategoryBalanced: |
| 40 return AudioDeviceFactory::kSourceWebAudioBalanced; |
| 41 case WebAudioLatencyHint::kCategoryPlayback: |
| 42 return AudioDeviceFactory::kSourceWebAudioPlayback; |
| 43 case WebAudioLatencyHint::kCategoryExact: |
| 44 // TODO implement CategoryExact |
| 45 return AudioDeviceFactory::kSourceWebAudioInteractive; |
| 46 } |
| 47 NOTREACHED(); |
| 48 return AudioDeviceFactory::kSourceWebAudioInteractive; |
| 49 } |
| 50 |
| 51 int FrameIdFromCurrentContext() { |
| 52 // Assumption: This method is being invoked within a V8 call stack. CHECKs |
| 53 // will fail in the call to frameForCurrentContext() otherwise. |
| 54 // |
| 55 // Therefore, we can perform look-ups to determine which RenderView is |
| 56 // starting the audio device. The reason for all this is because the creator |
| 57 // of the WebAudio objects might not be the actual source of the audio (e.g., |
| 58 // an extension creates a object that is passed and used within a page). |
| 59 blink::WebLocalFrame* const web_frame = |
| 60 blink::WebLocalFrame::frameForCurrentContext(); |
| 61 RenderFrame* const render_frame = RenderFrame::FromWebFrame(web_frame); |
| 62 return render_frame ? render_frame->GetRoutingID() : MSG_ROUTING_NONE; |
| 63 } |
| 64 |
| 65 media::AudioParameters GetOutputDeviceParameters( |
| 66 int frame_id, |
| 67 int session_id, |
| 68 const std::string& device_id, |
| 69 const url::Origin& security_origin) { |
| 70 return AudioDeviceFactory::GetOutputDeviceInfo(frame_id, session_id, |
| 71 device_id, security_origin) |
| 72 .output_params(); |
| 73 } |
| 74 |
| 75 } // namespace |
| 76 |
| 77 RendererWebAudioDeviceImpl* RendererWebAudioDeviceImpl::Create( |
| 78 media::ChannelLayout layout, |
| 79 const blink::WebAudioLatencyHint& latency_hint, |
32 WebAudioDevice::RenderCallback* callback, | 80 WebAudioDevice::RenderCallback* callback, |
33 int session_id, | 81 int session_id, |
34 const url::Origin& security_origin) | 82 const url::Origin& security_origin) { |
35 : params_(params), | 83 return new RendererWebAudioDeviceImpl(layout, latency_hint, callback, |
| 84 session_id, security_origin, |
| 85 base::Bind(&GetOutputDeviceParameters), |
| 86 base::Bind(&FrameIdFromCurrentContext)); |
| 87 } |
| 88 |
| 89 RendererWebAudioDeviceImpl::RendererWebAudioDeviceImpl( |
| 90 media::ChannelLayout layout, |
| 91 const blink::WebAudioLatencyHint& latency_hint, |
| 92 WebAudioDevice::RenderCallback* callback, |
| 93 int session_id, |
| 94 const url::Origin& security_origin, |
| 95 const OutputDeviceParamsCallback& device_params_cb, |
| 96 const RenderFrameIdCallback& render_frame_id_cb) |
| 97 : latency_hint_(latency_hint), |
36 client_callback_(callback), | 98 client_callback_(callback), |
37 session_id_(session_id), | 99 session_id_(session_id), |
38 security_origin_(security_origin) { | 100 security_origin_(security_origin), |
| 101 frame_id_(render_frame_id_cb.Run()) { |
39 DCHECK(client_callback_); | 102 DCHECK(client_callback_); |
| 103 DCHECK_NE(frame_id_, MSG_ROUTING_NONE); |
| 104 |
| 105 media::AudioParameters hardware_params(device_params_cb.Run( |
| 106 frame_id_, session_id_, std::string(), security_origin_)); |
| 107 |
| 108 int output_buffer_size = 0; |
| 109 |
| 110 media::AudioLatency::LatencyType latency = |
| 111 AudioDeviceFactory::GetSourceLatencyType( |
| 112 GetLatencyHintSourceType(latency_hint_.category())); |
| 113 |
| 114 // Adjust output buffer size according to the latency requirement. |
| 115 switch (latency) { |
| 116 case media::AudioLatency::LATENCY_INTERACTIVE: |
| 117 output_buffer_size = media::AudioLatency::GetInteractiveBufferSize( |
| 118 hardware_params.frames_per_buffer()); |
| 119 break; |
| 120 case media::AudioLatency::LATENCY_RTC: |
| 121 output_buffer_size = media::AudioLatency::GetRtcBufferSize( |
| 122 hardware_params.sample_rate(), hardware_params.frames_per_buffer()); |
| 123 break; |
| 124 case media::AudioLatency::LATENCY_PLAYBACK: |
| 125 output_buffer_size = media::AudioLatency::GetHighLatencyBufferSize( |
| 126 hardware_params.sample_rate(), 0); |
| 127 break; |
| 128 case media::AudioLatency::LATENCY_EXACT_MS: |
| 129 // TODO(olka): add support when WebAudio requires it. |
| 130 default: |
| 131 NOTREACHED(); |
| 132 } |
| 133 |
| 134 DCHECK_NE(output_buffer_size, 0); |
| 135 |
| 136 sink_params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, layout, |
| 137 hardware_params.sample_rate(), 16, output_buffer_size); |
| 138 |
| 139 // Specify the latency info to be passed to the browser side. |
| 140 sink_params_.set_latency_tag(latency); |
40 } | 141 } |
41 | 142 |
42 RendererWebAudioDeviceImpl::~RendererWebAudioDeviceImpl() { | 143 RendererWebAudioDeviceImpl::~RendererWebAudioDeviceImpl() { |
43 DCHECK(!sink_); | 144 DCHECK(!sink_); |
44 } | 145 } |
45 | 146 |
46 void RendererWebAudioDeviceImpl::start() { | 147 void RendererWebAudioDeviceImpl::start() { |
47 DCHECK(thread_checker_.CalledOnValidThread()); | 148 DCHECK(thread_checker_.CalledOnValidThread()); |
48 | 149 |
49 if (sink_) | 150 if (sink_) |
50 return; // Already started. | 151 return; // Already started. |
51 | 152 |
52 // Assumption: This method is being invoked within a V8 call stack. CHECKs | |
53 // will fail in the call to frameForCurrentContext() otherwise. | |
54 // | |
55 // Therefore, we can perform look-ups to determine which RenderView is | |
56 // starting the audio device. The reason for all this is because the creator | |
57 // of the WebAudio objects might not be the actual source of the audio (e.g., | |
58 // an extension creates a object that is passed and used within a page). | |
59 WebLocalFrame* const web_frame = WebLocalFrame::frameForCurrentContext(); | |
60 RenderFrame* const render_frame = | |
61 web_frame ? RenderFrame::FromWebFrame(web_frame) : NULL; | |
62 sink_ = AudioDeviceFactory::NewAudioRendererSink( | 153 sink_ = AudioDeviceFactory::NewAudioRendererSink( |
63 AudioDeviceFactory::kSourceWebAudioInteractive, | 154 GetLatencyHintSourceType(latency_hint_.category()), frame_id_, |
64 render_frame ? render_frame->GetRoutingID() : MSG_ROUTING_NONE, | |
65 session_id_, std::string(), security_origin_); | 155 session_id_, std::string(), security_origin_); |
66 | 156 |
67 // Specify the latency info to be passed to the browser side. | |
68 media::AudioParameters sink_params(params_); | |
69 sink_params.set_latency_tag(AudioDeviceFactory::GetSourceLatencyType( | |
70 AudioDeviceFactory::kSourceWebAudioInteractive)); | |
71 | |
72 #if defined(OS_ANDROID) | 157 #if defined(OS_ANDROID) |
73 // Use the media thread instead of the render thread for fake Render() calls | 158 // Use the media thread instead of the render thread for fake Render() calls |
74 // since it has special connotations for Blink and garbage collection. Timeout | 159 // since it has special connotations for Blink and garbage collection. Timeout |
75 // value chosen to be highly unlikely in the normal case. | 160 // value chosen to be highly unlikely in the normal case. |
76 webaudio_suspender_.reset(new media::SilentSinkSuspender( | 161 webaudio_suspender_.reset(new media::SilentSinkSuspender( |
77 this, base::TimeDelta::FromSeconds(30), sink_params, sink_, | 162 this, base::TimeDelta::FromSeconds(30), sink_params_, sink_, |
78 RenderThreadImpl::current()->GetMediaThreadTaskRunner())); | 163 GetMediaTaskRunner())); |
79 sink_->Initialize(sink_params, webaudio_suspender_.get()); | 164 sink_->Initialize(sink_params_, webaudio_suspender_.get()); |
80 #else | 165 #else |
81 sink_->Initialize(sink_params, this); | 166 sink_->Initialize(sink_params_, this); |
82 #endif | 167 #endif |
83 | 168 |
84 sink_->Start(); | 169 sink_->Start(); |
85 sink_->Play(); | 170 sink_->Play(); |
86 } | 171 } |
87 | 172 |
88 void RendererWebAudioDeviceImpl::stop() { | 173 void RendererWebAudioDeviceImpl::stop() { |
89 DCHECK(thread_checker_.CalledOnValidThread()); | 174 DCHECK(thread_checker_.CalledOnValidThread()); |
90 if (sink_) { | 175 if (sink_) { |
91 sink_->Stop(); | 176 sink_->Stop(); |
92 sink_ = nullptr; | 177 sink_ = nullptr; |
93 } | 178 } |
94 | 179 |
95 #if defined(OS_ANDROID) | 180 #if defined(OS_ANDROID) |
96 webaudio_suspender_.reset(); | 181 webaudio_suspender_.reset(); |
97 #endif | 182 #endif |
98 } | 183 } |
99 | 184 |
100 double RendererWebAudioDeviceImpl::sampleRate() { | 185 double RendererWebAudioDeviceImpl::sampleRate() { |
101 return params_.sample_rate(); | 186 return sink_params_.sample_rate(); |
| 187 } |
| 188 |
| 189 int RendererWebAudioDeviceImpl::framesPerBuffer() { |
| 190 return sink_params_.frames_per_buffer(); |
102 } | 191 } |
103 | 192 |
104 int RendererWebAudioDeviceImpl::Render(base::TimeDelta delay, | 193 int RendererWebAudioDeviceImpl::Render(base::TimeDelta delay, |
105 base::TimeTicks delay_timestamp, | 194 base::TimeTicks delay_timestamp, |
106 int prior_frames_skipped, | 195 int prior_frames_skipped, |
107 media::AudioBus* dest) { | 196 media::AudioBus* dest) { |
108 // Wrap the output pointers using WebVector. | 197 // Wrap the output pointers using WebVector. |
109 WebVector<float*> web_audio_dest_data(static_cast<size_t>(dest->channels())); | 198 WebVector<float*> web_audio_dest_data(static_cast<size_t>(dest->channels())); |
110 for (int i = 0; i < dest->channels(); ++i) | 199 for (int i = 0; i < dest->channels(); ++i) |
111 web_audio_dest_data[i] = dest->channel(i); | 200 web_audio_dest_data[i] = dest->channel(i); |
112 | 201 |
113 if (!delay.is_zero()) { // Zero values are send at the first call. | 202 if (!delay.is_zero()) { // Zero values are send at the first call. |
114 // Substruct the bus duration to get hardware delay. | 203 // Substruct the bus duration to get hardware delay. |
115 delay -= media::AudioTimestampHelper::FramesToTime(dest->frames(), | 204 delay -= |
116 params_.sample_rate()); | 205 media::AudioTimestampHelper::FramesToTime(dest->frames(), sampleRate()); |
117 } | 206 } |
118 DCHECK_GE(delay, base::TimeDelta()); | 207 DCHECK_GE(delay, base::TimeDelta()); |
119 | 208 |
120 client_callback_->render( | 209 client_callback_->render( |
121 web_audio_dest_data, dest->frames(), delay.InSecondsF(), | 210 web_audio_dest_data, dest->frames(), delay.InSecondsF(), |
122 (delay_timestamp - base::TimeTicks()).InSecondsF(), prior_frames_skipped); | 211 (delay_timestamp - base::TimeTicks()).InSecondsF(), prior_frames_skipped); |
123 | 212 |
124 return dest->frames(); | 213 return dest->frames(); |
125 } | 214 } |
126 | 215 |
127 void RendererWebAudioDeviceImpl::OnRenderError() { | 216 void RendererWebAudioDeviceImpl::OnRenderError() { |
128 // TODO(crogers): implement error handling. | 217 // TODO(crogers): implement error handling. |
129 } | 218 } |
130 | 219 |
| 220 void RendererWebAudioDeviceImpl::SetMediaTaskRunnerForTesting( |
| 221 const scoped_refptr<base::SingleThreadTaskRunner>& media_task_runner) { |
| 222 media_task_runner_ = media_task_runner; |
| 223 } |
| 224 |
| 225 const scoped_refptr<base::SingleThreadTaskRunner>& |
| 226 RendererWebAudioDeviceImpl::GetMediaTaskRunner() { |
| 227 if (!media_task_runner_) { |
| 228 media_task_runner_ = |
| 229 RenderThreadImpl::current()->GetMediaThreadTaskRunner(); |
| 230 } |
| 231 return media_task_runner_; |
| 232 } |
| 233 |
131 } // namespace content | 234 } // namespace content |
OLD | NEW |