Index: content/renderer/media/renderer_gpu_video_decoder_factories.cc |
diff --git a/content/renderer/media/renderer_gpu_video_decoder_factories.cc b/content/renderer/media/renderer_gpu_video_decoder_factories.cc |
index 2a59e673e6df567325d02b2f86cc78dc7586e052..d14d67f9e42f793c0e03cf70db1953675fd7d5ad 100644 |
--- a/content/renderer/media/renderer_gpu_video_decoder_factories.cc |
+++ b/content/renderer/media/renderer_gpu_video_decoder_factories.cc |
@@ -96,25 +96,30 @@ void RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator( |
compositor_loop_async_waiter_.Signal(); |
} |
-bool RendererGpuVideoDecoderFactories::CreateTextures( |
+uint32 RendererGpuVideoDecoderFactories::CreateTextures( |
int32 count, const gfx::Size& size, |
std::vector<uint32>* texture_ids, |
+ std::vector<gpu::Mailbox>* texture_mailboxes, |
uint32 texture_target) { |
+ uint32 sync_point = 0; |
+ |
DCHECK(!message_loop_->BelongsToCurrentThread()); |
message_loop_->PostTask(FROM_HERE, base::Bind( |
&RendererGpuVideoDecoderFactories::AsyncCreateTextures, this, |
- count, size, texture_target)); |
+ count, size, texture_target, &sync_point)); |
base::WaitableEvent* objects[] = {&aborted_waiter_, |
&compositor_loop_async_waiter_}; |
if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) |
- return false; |
+ return 0; |
texture_ids->swap(created_textures_); |
- return true; |
+ texture_mailboxes->swap(created_texture_mailboxes_); |
+ return sync_point; |
} |
void RendererGpuVideoDecoderFactories::AsyncCreateTextures( |
- int32 count, const gfx::Size& size, uint32 texture_target) { |
+ int32 count, const gfx::Size& size, uint32 texture_target, |
+ uint32* sync_point) { |
DCHECK(message_loop_->BelongsToCurrentThread()); |
DCHECK(texture_target); |
@@ -124,6 +129,7 @@ void RendererGpuVideoDecoderFactories::AsyncCreateTextures( |
} |
gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); |
created_textures_.resize(count); |
+ created_texture_mailboxes_.resize(count); |
gles2->GenTextures(count, &created_textures_[0]); |
for (int i = 0; i < count; ++i) { |
gles2->ActiveTexture(GL_TEXTURE0); |
@@ -137,7 +143,12 @@ void RendererGpuVideoDecoderFactories::AsyncCreateTextures( |
gles2->TexImage2D(texture_target, 0, GL_RGBA, size.width(), size.height(), |
0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); |
} |
+ gles2->GenMailboxCHROMIUM(created_texture_mailboxes_[i].name); |
+ gles2->ProduceTextureCHROMIUM(texture_target, |
+ created_texture_mailboxes_[i].name); |
} |
+ *sync_point = gles2->InsertSyncPointCHROMIUM(); |
piman
2013/06/17 19:58:29
move the sync point after the flush (the other con
danakj
2013/06/18 16:54:12
Done.
|
+ |
// We need a glFlush here to guarantee the decoder (in the GPU process) can |
// use the texture ids we return here. Since textures are expected to be |
// reused, this should not be unacceptably expensive. |
@@ -162,6 +173,33 @@ void RendererGpuVideoDecoderFactories::AsyncDeleteTexture(uint32 texture_id) { |
DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); |
} |
+void RendererGpuVideoDecoderFactories::WaitSyncPoint(uint32 sync_point) { |
+ if (message_loop_->BelongsToCurrentThread()) { |
+ AsyncWaitSyncPoint(sync_point); |
+ return; |
+ } |
+ |
+ message_loop_->PostTask(FROM_HERE, base::Bind( |
+ &RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint, |
+ this, |
+ sync_point)); |
+ base::WaitableEvent* objects[] = {&aborted_waiter_, |
+ &compositor_loop_async_waiter_}; |
+ base::WaitableEvent::WaitMany(objects, arraysize(objects)); |
+} |
+ |
+void RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint(uint32 sync_point) { |
+ DCHECK(message_loop_->BelongsToCurrentThread()); |
+ if (!context_) { |
+ compositor_loop_async_waiter_.Signal(); |
+ return; |
+ } |
+ |
+ gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); |
+ gles2->WaitSyncPointCHROMIUM(sync_point); |
+ compositor_loop_async_waiter_.Signal(); |
+} |
+ |
void RendererGpuVideoDecoderFactories::ReadPixels( |
uint32 texture_id, uint32 texture_target, const gfx::Size& size, |
const SkBitmap& pixels) { |