OLD | NEW |
| (Empty) |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/media/renderer_gpu_video_accelerator_factories.h" | |
6 | |
7 #include <GLES2/gl2.h> | |
8 #include <GLES2/gl2ext.h> | |
9 | |
10 #include "base/bind.h" | |
11 #include "content/child/child_thread.h" | |
12 #include "content/common/gpu/client/gpu_channel_host.h" | |
13 #include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h" | |
14 #include "gpu/command_buffer/client/gles2_implementation.h" | |
15 #include "gpu/ipc/command_buffer_proxy.h" | |
16 #include "third_party/skia/include/core/SkPixelRef.h" | |
17 | |
18 namespace content { | |
19 | |
20 RendererGpuVideoAcceleratorFactories::~RendererGpuVideoAcceleratorFactories() {} | |
21 RendererGpuVideoAcceleratorFactories::RendererGpuVideoAcceleratorFactories( | |
22 GpuChannelHost* gpu_channel_host, | |
23 const scoped_refptr<base::MessageLoopProxy>& message_loop, | |
24 WebGraphicsContext3DCommandBufferImpl* context) | |
25 : message_loop_(message_loop), | |
26 main_message_loop_(base::MessageLoopProxy::current()), | |
27 gpu_channel_host_(gpu_channel_host), | |
28 aborted_waiter_(true, false), | |
29 message_loop_async_waiter_(false, false), | |
30 render_thread_async_waiter_(false, false) { | |
31 // |context| is only required to support HW-accelerated decode. | |
32 if (!context) | |
33 return; | |
34 | |
35 if (message_loop_->BelongsToCurrentThread()) { | |
36 AsyncGetContext(context); | |
37 message_loop_async_waiter_.Reset(); | |
38 return; | |
39 } | |
40 // Wait for the context to be acquired. | |
41 message_loop_->PostTask( | |
42 FROM_HERE, | |
43 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncGetContext, | |
44 // Unretained to avoid ref/deref'ing |*this|, which is not yet | |
45 // stored in a scoped_refptr. Safe because the Wait() below | |
46 // keeps us alive until this task completes. | |
47 base::Unretained(this), | |
48 // OK to pass raw because the pointee is only deleted on the | |
49 // compositor thread, and only as the result of a PostTask from | |
50 // the render thread which can only happen after this function | |
51 // returns, so our PostTask will run first. | |
52 context)); | |
53 message_loop_async_waiter_.Wait(); | |
54 } | |
55 | |
56 RendererGpuVideoAcceleratorFactories::RendererGpuVideoAcceleratorFactories() | |
57 : aborted_waiter_(true, false), | |
58 message_loop_async_waiter_(false, false), | |
59 render_thread_async_waiter_(false, false) {} | |
60 | |
61 void RendererGpuVideoAcceleratorFactories::AsyncGetContext( | |
62 WebGraphicsContext3DCommandBufferImpl* context) { | |
63 context_ = context->AsWeakPtr(); | |
64 if (context_.get()) { | |
65 if (context_->makeContextCurrent()) { | |
66 // Called once per media player, but is a no-op after the first one in | |
67 // each renderer. | |
68 context_->insertEventMarkerEXT("GpuVDAContext3D"); | |
69 } | |
70 } | |
71 message_loop_async_waiter_.Signal(); | |
72 } | |
73 | |
74 scoped_ptr<media::VideoDecodeAccelerator> | |
75 RendererGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator( | |
76 media::VideoCodecProfile profile, | |
77 media::VideoDecodeAccelerator::Client* client) { | |
78 if (message_loop_->BelongsToCurrentThread()) { | |
79 AsyncCreateVideoDecodeAccelerator(profile, client); | |
80 message_loop_async_waiter_.Reset(); | |
81 return vda_.Pass(); | |
82 } | |
83 // The VDA is returned in the vda_ member variable by the | |
84 // AsyncCreateVideoDecodeAccelerator() function. | |
85 message_loop_->PostTask(FROM_HERE, | |
86 base::Bind(&RendererGpuVideoAcceleratorFactories:: | |
87 AsyncCreateVideoDecodeAccelerator, | |
88 this, | |
89 profile, | |
90 client)); | |
91 | |
92 base::WaitableEvent* objects[] = {&aborted_waiter_, | |
93 &message_loop_async_waiter_}; | |
94 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) { | |
95 // If we are aborting and the VDA is created by the | |
96 // AsyncCreateVideoDecodeAccelerator() function later we need to ensure | |
97 // that it is destroyed on the same thread. | |
98 message_loop_->PostTask(FROM_HERE, | |
99 base::Bind(&RendererGpuVideoAcceleratorFactories:: | |
100 AsyncDestroyVideoDecodeAccelerator, | |
101 this)); | |
102 return scoped_ptr<media::VideoDecodeAccelerator>(); | |
103 } | |
104 return vda_.Pass(); | |
105 } | |
106 | |
107 scoped_ptr<media::VideoEncodeAccelerator> | |
108 RendererGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator( | |
109 media::VideoEncodeAccelerator::Client* client) { | |
110 if (message_loop_->BelongsToCurrentThread()) { | |
111 AsyncCreateVideoEncodeAccelerator(client); | |
112 message_loop_async_waiter_.Reset(); | |
113 return vea_.Pass(); | |
114 } | |
115 // The VEA is returned in the vea_ member variable by the | |
116 // AsyncCreateVideoEncodeAccelerator() function. | |
117 message_loop_->PostTask(FROM_HERE, | |
118 base::Bind(&RendererGpuVideoAcceleratorFactories:: | |
119 AsyncCreateVideoEncodeAccelerator, | |
120 this, | |
121 client)); | |
122 | |
123 base::WaitableEvent* objects[] = {&aborted_waiter_, | |
124 &message_loop_async_waiter_}; | |
125 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) { | |
126 // If we are aborting and the VDA is created by the | |
127 // AsyncCreateVideoEncodeAccelerator() function later we need to ensure | |
128 // that it is destroyed on the same thread. | |
129 message_loop_->PostTask(FROM_HERE, | |
130 base::Bind(&RendererGpuVideoAcceleratorFactories:: | |
131 AsyncDestroyVideoEncodeAccelerator, | |
132 this)); | |
133 return scoped_ptr<media::VideoEncodeAccelerator>(); | |
134 } | |
135 return vea_.Pass(); | |
136 } | |
137 | |
138 void RendererGpuVideoAcceleratorFactories::AsyncCreateVideoDecodeAccelerator( | |
139 media::VideoCodecProfile profile, | |
140 media::VideoDecodeAccelerator::Client* client) { | |
141 DCHECK(message_loop_->BelongsToCurrentThread()); | |
142 | |
143 if (context_.get() && context_->GetCommandBufferProxy()) { | |
144 vda_ = gpu_channel_host_->CreateVideoDecoder( | |
145 context_->GetCommandBufferProxy()->GetRouteID(), profile, client); | |
146 } | |
147 message_loop_async_waiter_.Signal(); | |
148 } | |
149 | |
150 void RendererGpuVideoAcceleratorFactories::AsyncCreateVideoEncodeAccelerator( | |
151 media::VideoEncodeAccelerator::Client* client) { | |
152 DCHECK(message_loop_->BelongsToCurrentThread()); | |
153 | |
154 vea_ = gpu_channel_host_->CreateVideoEncoder(client).Pass(); | |
155 message_loop_async_waiter_.Signal(); | |
156 } | |
157 | |
158 uint32 RendererGpuVideoAcceleratorFactories::CreateTextures( | |
159 int32 count, | |
160 const gfx::Size& size, | |
161 std::vector<uint32>* texture_ids, | |
162 std::vector<gpu::Mailbox>* texture_mailboxes, | |
163 uint32 texture_target) { | |
164 uint32 sync_point = 0; | |
165 | |
166 if (message_loop_->BelongsToCurrentThread()) { | |
167 AsyncCreateTextures(count, size, texture_target, &sync_point); | |
168 texture_ids->swap(created_textures_); | |
169 texture_mailboxes->swap(created_texture_mailboxes_); | |
170 message_loop_async_waiter_.Reset(); | |
171 return sync_point; | |
172 } | |
173 message_loop_->PostTask( | |
174 FROM_HERE, | |
175 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncCreateTextures, | |
176 this, | |
177 count, | |
178 size, | |
179 texture_target, | |
180 &sync_point)); | |
181 | |
182 base::WaitableEvent* objects[] = {&aborted_waiter_, | |
183 &message_loop_async_waiter_}; | |
184 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) | |
185 return 0; | |
186 texture_ids->swap(created_textures_); | |
187 texture_mailboxes->swap(created_texture_mailboxes_); | |
188 return sync_point; | |
189 } | |
190 | |
191 void RendererGpuVideoAcceleratorFactories::AsyncCreateTextures( | |
192 int32 count, | |
193 const gfx::Size& size, | |
194 uint32 texture_target, | |
195 uint32* sync_point) { | |
196 DCHECK(message_loop_->BelongsToCurrentThread()); | |
197 DCHECK(texture_target); | |
198 | |
199 if (!context_.get()) { | |
200 message_loop_async_waiter_.Signal(); | |
201 return; | |
202 } | |
203 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); | |
204 created_textures_.resize(count); | |
205 created_texture_mailboxes_.resize(count); | |
206 gles2->GenTextures(count, &created_textures_[0]); | |
207 for (int i = 0; i < count; ++i) { | |
208 gles2->ActiveTexture(GL_TEXTURE0); | |
209 uint32 texture_id = created_textures_[i]; | |
210 gles2->BindTexture(texture_target, texture_id); | |
211 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | |
212 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); | |
213 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | |
214 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | |
215 if (texture_target == GL_TEXTURE_2D) { | |
216 gles2->TexImage2D(texture_target, | |
217 0, | |
218 GL_RGBA, | |
219 size.width(), | |
220 size.height(), | |
221 0, | |
222 GL_RGBA, | |
223 GL_UNSIGNED_BYTE, | |
224 NULL); | |
225 } | |
226 gles2->GenMailboxCHROMIUM(created_texture_mailboxes_[i].name); | |
227 gles2->ProduceTextureCHROMIUM(texture_target, | |
228 created_texture_mailboxes_[i].name); | |
229 } | |
230 | |
231 // We need a glFlush here to guarantee the decoder (in the GPU process) can | |
232 // use the texture ids we return here. Since textures are expected to be | |
233 // reused, this should not be unacceptably expensive. | |
234 gles2->Flush(); | |
235 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); | |
236 | |
237 *sync_point = gles2->InsertSyncPointCHROMIUM(); | |
238 message_loop_async_waiter_.Signal(); | |
239 } | |
240 | |
241 void RendererGpuVideoAcceleratorFactories::DeleteTexture(uint32 texture_id) { | |
242 if (message_loop_->BelongsToCurrentThread()) { | |
243 AsyncDeleteTexture(texture_id); | |
244 return; | |
245 } | |
246 message_loop_->PostTask( | |
247 FROM_HERE, | |
248 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncDeleteTexture, | |
249 this, | |
250 texture_id)); | |
251 } | |
252 | |
253 void RendererGpuVideoAcceleratorFactories::AsyncDeleteTexture( | |
254 uint32 texture_id) { | |
255 DCHECK(message_loop_->BelongsToCurrentThread()); | |
256 if (!context_.get()) | |
257 return; | |
258 | |
259 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); | |
260 gles2->DeleteTextures(1, &texture_id); | |
261 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); | |
262 } | |
263 | |
264 void RendererGpuVideoAcceleratorFactories::WaitSyncPoint(uint32 sync_point) { | |
265 if (message_loop_->BelongsToCurrentThread()) { | |
266 AsyncWaitSyncPoint(sync_point); | |
267 message_loop_async_waiter_.Reset(); | |
268 return; | |
269 } | |
270 | |
271 message_loop_->PostTask( | |
272 FROM_HERE, | |
273 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncWaitSyncPoint, | |
274 this, | |
275 sync_point)); | |
276 base::WaitableEvent* objects[] = {&aborted_waiter_, | |
277 &message_loop_async_waiter_}; | |
278 base::WaitableEvent::WaitMany(objects, arraysize(objects)); | |
279 } | |
280 | |
281 void RendererGpuVideoAcceleratorFactories::AsyncWaitSyncPoint( | |
282 uint32 sync_point) { | |
283 DCHECK(message_loop_->BelongsToCurrentThread()); | |
284 if (!context_) { | |
285 message_loop_async_waiter_.Signal(); | |
286 return; | |
287 } | |
288 | |
289 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); | |
290 gles2->WaitSyncPointCHROMIUM(sync_point); | |
291 message_loop_async_waiter_.Signal(); | |
292 } | |
293 | |
294 void RendererGpuVideoAcceleratorFactories::ReadPixels(uint32 texture_id, | |
295 uint32 texture_target, | |
296 const gfx::Size& size, | |
297 const SkBitmap& pixels) { | |
298 // SkBitmaps use the SkPixelRef object to refcount the underlying pixels. | |
299 // Multiple SkBitmaps can share a SkPixelRef instance. We use this to | |
300 // ensure that the underlying pixels in the SkBitmap passed in remain valid | |
301 // until the AsyncReadPixels() call completes. | |
302 read_pixels_bitmap_.setPixelRef(pixels.pixelRef()); | |
303 | |
304 if (!message_loop_->BelongsToCurrentThread()) { | |
305 message_loop_->PostTask( | |
306 FROM_HERE, | |
307 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncReadPixels, | |
308 this, | |
309 texture_id, | |
310 texture_target, | |
311 size)); | |
312 base::WaitableEvent* objects[] = {&aborted_waiter_, | |
313 &message_loop_async_waiter_}; | |
314 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) | |
315 return; | |
316 } else { | |
317 AsyncReadPixels(texture_id, texture_target, size); | |
318 message_loop_async_waiter_.Reset(); | |
319 } | |
320 read_pixels_bitmap_.setPixelRef(NULL); | |
321 } | |
322 | |
323 void RendererGpuVideoAcceleratorFactories::AsyncReadPixels( | |
324 uint32 texture_id, | |
325 uint32 texture_target, | |
326 const gfx::Size& size) { | |
327 DCHECK(message_loop_->BelongsToCurrentThread()); | |
328 if (!context_.get()) { | |
329 message_loop_async_waiter_.Signal(); | |
330 return; | |
331 } | |
332 | |
333 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); | |
334 | |
335 GLuint tmp_texture; | |
336 gles2->GenTextures(1, &tmp_texture); | |
337 gles2->BindTexture(texture_target, tmp_texture); | |
338 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | |
339 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); | |
340 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | |
341 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | |
342 context_->copyTextureCHROMIUM( | |
343 texture_target, texture_id, tmp_texture, 0, GL_RGBA, GL_UNSIGNED_BYTE); | |
344 | |
345 GLuint fb; | |
346 gles2->GenFramebuffers(1, &fb); | |
347 gles2->BindFramebuffer(GL_FRAMEBUFFER, fb); | |
348 gles2->FramebufferTexture2D( | |
349 GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture_target, tmp_texture, 0); | |
350 gles2->PixelStorei(GL_PACK_ALIGNMENT, 4); | |
351 gles2->ReadPixels(0, | |
352 0, | |
353 size.width(), | |
354 size.height(), | |
355 GL_BGRA_EXT, | |
356 GL_UNSIGNED_BYTE, | |
357 read_pixels_bitmap_.pixelRef()->pixels()); | |
358 gles2->DeleteFramebuffers(1, &fb); | |
359 gles2->DeleteTextures(1, &tmp_texture); | |
360 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); | |
361 message_loop_async_waiter_.Signal(); | |
362 } | |
363 | |
364 base::SharedMemory* RendererGpuVideoAcceleratorFactories::CreateSharedMemory( | |
365 size_t size) { | |
366 if (main_message_loop_->BelongsToCurrentThread()) { | |
367 return ChildThread::current()->AllocateSharedMemory(size); | |
368 } | |
369 main_message_loop_->PostTask( | |
370 FROM_HERE, | |
371 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncCreateSharedMemory, | |
372 this, | |
373 size)); | |
374 | |
375 base::WaitableEvent* objects[] = {&aborted_waiter_, | |
376 &render_thread_async_waiter_}; | |
377 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) | |
378 return NULL; | |
379 return shared_memory_segment_.release(); | |
380 } | |
381 | |
382 void RendererGpuVideoAcceleratorFactories::AsyncCreateSharedMemory( | |
383 size_t size) { | |
384 DCHECK_EQ(base::MessageLoop::current(), | |
385 ChildThread::current()->message_loop()); | |
386 | |
387 shared_memory_segment_.reset( | |
388 ChildThread::current()->AllocateSharedMemory(size)); | |
389 render_thread_async_waiter_.Signal(); | |
390 } | |
391 | |
392 scoped_refptr<base::MessageLoopProxy> | |
393 RendererGpuVideoAcceleratorFactories::GetMessageLoop() { | |
394 return message_loop_; | |
395 } | |
396 | |
397 void RendererGpuVideoAcceleratorFactories::Abort() { aborted_waiter_.Signal(); } | |
398 | |
399 bool RendererGpuVideoAcceleratorFactories::IsAborted() { | |
400 return aborted_waiter_.IsSignaled(); | |
401 } | |
402 | |
403 scoped_refptr<RendererGpuVideoAcceleratorFactories> | |
404 RendererGpuVideoAcceleratorFactories::Clone() { | |
405 scoped_refptr<RendererGpuVideoAcceleratorFactories> factories = | |
406 new RendererGpuVideoAcceleratorFactories(); | |
407 factories->message_loop_ = message_loop_; | |
408 factories->main_message_loop_ = main_message_loop_; | |
409 factories->gpu_channel_host_ = gpu_channel_host_; | |
410 factories->context_ = context_; | |
411 return factories; | |
412 } | |
413 | |
414 void | |
415 RendererGpuVideoAcceleratorFactories::AsyncDestroyVideoDecodeAccelerator() { | |
416 // OK to release because Destroy() will delete the VDA instance. | |
417 if (vda_) | |
418 vda_.release()->Destroy(); | |
419 } | |
420 | |
421 void | |
422 RendererGpuVideoAcceleratorFactories::AsyncDestroyVideoEncodeAccelerator() { | |
423 // OK to release because Destroy() will delete the VDA instance. | |
424 if (vea_) | |
425 vea_.release()->Destroy(); | |
426 } | |
427 | |
428 } // namespace content | |
OLD | NEW |