| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/renderer_gpu_video_decoder_factories.h" | 5 #include "content/renderer/media/renderer_gpu_video_accelerator_factories.h" |
| 6 | 6 |
| 7 #include <GLES2/gl2.h> | 7 #include <GLES2/gl2.h> |
| 8 #include <GLES2/gl2ext.h> | 8 #include <GLES2/gl2ext.h> |
| 9 | 9 |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| 11 #include "content/child/child_thread.h" | 11 #include "content/child/child_thread.h" |
| 12 #include "content/common/gpu/client/gpu_channel_host.h" | 12 #include "content/common/gpu/client/gpu_channel_host.h" |
| 13 #include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h" | 13 #include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h" |
| 14 #include "gpu/command_buffer/client/gles2_implementation.h" | 14 #include "gpu/command_buffer/client/gles2_implementation.h" |
| 15 #include "gpu/ipc/command_buffer_proxy.h" | 15 #include "gpu/ipc/command_buffer_proxy.h" |
| 16 #include "third_party/skia/include/core/SkPixelRef.h" | 16 #include "third_party/skia/include/core/SkPixelRef.h" |
| 17 | 17 |
| 18 namespace content { | 18 namespace content { |
| 19 | 19 |
| 20 RendererGpuVideoDecoderFactories::~RendererGpuVideoDecoderFactories() {} | 20 RendererGpuVideoAcceleratorFactories::~RendererGpuVideoAcceleratorFactories() {} |
| 21 RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories( | 21 RendererGpuVideoAcceleratorFactories::RendererGpuVideoAcceleratorFactories( |
| 22 GpuChannelHost* gpu_channel_host, | 22 GpuChannelHost* gpu_channel_host, |
| 23 const scoped_refptr<base::MessageLoopProxy>& message_loop, | 23 const scoped_refptr<base::MessageLoopProxy>& message_loop, |
| 24 WebGraphicsContext3DCommandBufferImpl* context) | 24 WebGraphicsContext3DCommandBufferImpl* context) |
| 25 : message_loop_(message_loop), | 25 : message_loop_(message_loop), |
| 26 main_message_loop_(base::MessageLoopProxy::current()), | 26 main_message_loop_(base::MessageLoopProxy::current()), |
| 27 gpu_channel_host_(gpu_channel_host), | 27 gpu_channel_host_(gpu_channel_host), |
| 28 aborted_waiter_(true, false), | 28 aborted_waiter_(true, false), |
| 29 message_loop_async_waiter_(false, false), | 29 message_loop_async_waiter_(false, false), |
| 30 render_thread_async_waiter_(false, false) { | 30 render_thread_async_waiter_(false, false) { |
| 31 // |context| is only required to support HW-accelerated decode. |
| 32 if (!context) |
| 33 return; |
| 34 |
| 31 if (message_loop_->BelongsToCurrentThread()) { | 35 if (message_loop_->BelongsToCurrentThread()) { |
| 32 AsyncGetContext(context); | 36 AsyncGetContext(context); |
| 33 message_loop_async_waiter_.Reset(); | 37 message_loop_async_waiter_.Reset(); |
| 34 return; | 38 return; |
| 35 } | 39 } |
| 36 // Wait for the context to be acquired. | 40 // Wait for the context to be acquired. |
| 37 message_loop_->PostTask(FROM_HERE, base::Bind( | 41 message_loop_->PostTask( |
| 38 &RendererGpuVideoDecoderFactories::AsyncGetContext, | 42 FROM_HERE, |
| 39 // Unretained to avoid ref/deref'ing |*this|, which is not yet stored in a | 43 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncGetContext, |
| 40 // scoped_refptr. Safe because the Wait() below keeps us alive until this | 44 // Unretained to avoid ref/deref'ing |*this|, which is not yet |
| 41 // task completes. | 45 // stored in a scoped_refptr. Safe because the Wait() below |
| 42 base::Unretained(this), | 46 // keeps us alive until this task completes. |
| 43 // OK to pass raw because the pointee is only deleted on the compositor | 47 base::Unretained(this), |
| 44 // thread, and only as the result of a PostTask from the render thread | 48 // OK to pass raw because the pointee is only deleted on the |
| 45 // which can only happen after this function returns, so our PostTask will | 49 // compositor thread, and only as the result of a PostTask from |
| 46 // run first. | 50 // the render thread which can only happen after this function |
| 47 context)); | 51 // returns, so our PostTask will run first. |
| 52 context)); |
| 48 message_loop_async_waiter_.Wait(); | 53 message_loop_async_waiter_.Wait(); |
| 49 } | 54 } |
| 50 | 55 |
| 51 RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories() | 56 RendererGpuVideoAcceleratorFactories::RendererGpuVideoAcceleratorFactories() |
| 52 : aborted_waiter_(true, false), | 57 : aborted_waiter_(true, false), |
| 53 message_loop_async_waiter_(false, false), | 58 message_loop_async_waiter_(false, false), |
| 54 render_thread_async_waiter_(false, false) {} | 59 render_thread_async_waiter_(false, false) {} |
| 55 | 60 |
| 56 void RendererGpuVideoDecoderFactories::AsyncGetContext( | 61 void RendererGpuVideoAcceleratorFactories::AsyncGetContext( |
| 57 WebGraphicsContext3DCommandBufferImpl* context) { | 62 WebGraphicsContext3DCommandBufferImpl* context) { |
| 58 context_ = context->AsWeakPtr(); | 63 context_ = context->AsWeakPtr(); |
| 59 if (context_.get()) { | 64 if (context_.get()) { |
| 60 if (context_->makeContextCurrent()) { | 65 if (context_->makeContextCurrent()) { |
| 61 // Called once per media player, but is a no-op after the first one in | 66 // Called once per media player, but is a no-op after the first one in |
| 62 // each renderer. | 67 // each renderer. |
| 63 context_->insertEventMarkerEXT("GpuVDAContext3D"); | 68 context_->insertEventMarkerEXT("GpuVDAContext3D"); |
| 64 } | 69 } |
| 65 } | 70 } |
| 66 message_loop_async_waiter_.Signal(); | 71 message_loop_async_waiter_.Signal(); |
| 67 } | 72 } |
| 68 | 73 |
| 69 media::VideoDecodeAccelerator* | 74 scoped_ptr<media::VideoDecodeAccelerator> |
| 70 RendererGpuVideoDecoderFactories::CreateVideoDecodeAccelerator( | 75 RendererGpuVideoAcceleratorFactories::CreateVideoDecodeAccelerator( |
| 71 media::VideoCodecProfile profile, | 76 media::VideoCodecProfile profile, |
| 72 media::VideoDecodeAccelerator::Client* client) { | 77 media::VideoDecodeAccelerator::Client* client) { |
| 73 if (message_loop_->BelongsToCurrentThread()) { | 78 if (message_loop_->BelongsToCurrentThread()) { |
| 74 AsyncCreateVideoDecodeAccelerator(profile, client); | 79 AsyncCreateVideoDecodeAccelerator(profile, client); |
| 75 message_loop_async_waiter_.Reset(); | 80 message_loop_async_waiter_.Reset(); |
| 76 return vda_.release(); | 81 return vda_.Pass(); |
| 77 } | 82 } |
| 78 // The VDA is returned in the vda_ member variable by the | 83 // The VDA is returned in the vda_ member variable by the |
| 79 // AsyncCreateVideoDecodeAccelerator() function. | 84 // AsyncCreateVideoDecodeAccelerator() function. |
| 80 message_loop_->PostTask(FROM_HERE, base::Bind( | 85 message_loop_->PostTask(FROM_HERE, |
| 81 &RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator, | 86 base::Bind(&RendererGpuVideoAcceleratorFactories:: |
| 82 this, profile, client)); | 87 AsyncCreateVideoDecodeAccelerator, |
| 88 this, |
| 89 profile, |
| 90 client)); |
| 83 | 91 |
| 84 base::WaitableEvent* objects[] = {&aborted_waiter_, | 92 base::WaitableEvent* objects[] = {&aborted_waiter_, |
| 85 &message_loop_async_waiter_}; | 93 &message_loop_async_waiter_}; |
| 86 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) { | 94 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) { |
| 87 // If we are aborting and the VDA is created by the | 95 // If we are aborting and the VDA is created by the |
| 88 // AsyncCreateVideoDecodeAccelerator() function later we need to ensure | 96 // AsyncCreateVideoDecodeAccelerator() function later we need to ensure |
| 89 // that it is destroyed on the same thread. | 97 // that it is destroyed on the same thread. |
| 90 message_loop_->PostTask(FROM_HERE, base::Bind( | 98 message_loop_->PostTask(FROM_HERE, |
| 91 &RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator, | 99 base::Bind(&RendererGpuVideoAcceleratorFactories:: |
| 92 this)); | 100 AsyncDestroyVideoDecodeAccelerator, |
| 93 return NULL; | 101 this)); |
| 102 return scoped_ptr<media::VideoDecodeAccelerator>(); |
| 94 } | 103 } |
| 95 return vda_.release(); | 104 return vda_.Pass(); |
| 96 } | 105 } |
| 97 | 106 |
| 98 void RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator( | 107 scoped_ptr<media::VideoEncodeAccelerator> |
| 99 media::VideoCodecProfile profile, | 108 RendererGpuVideoAcceleratorFactories::CreateVideoEncodeAccelerator( |
| 100 media::VideoDecodeAccelerator::Client* client) { | 109 media::VideoEncodeAccelerator::Client* client) { |
| 110 if (message_loop_->BelongsToCurrentThread()) { |
| 111 AsyncCreateVideoEncodeAccelerator(client); |
| 112 message_loop_async_waiter_.Reset(); |
| 113 return vea_.Pass(); |
| 114 } |
| 115 // The VEA is returned in the vea_ member variable by the |
| 116 // AsyncCreateVideoEncodeAccelerator() function. |
| 117 message_loop_->PostTask(FROM_HERE, |
| 118 base::Bind(&RendererGpuVideoAcceleratorFactories:: |
| 119 AsyncCreateVideoEncodeAccelerator, |
| 120 this, |
| 121 client)); |
| 122 |
| 123 base::WaitableEvent* objects[] = {&aborted_waiter_, |
| 124 &message_loop_async_waiter_}; |
| 125 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) { |
| 126 // If we are aborting and the VDA is created by the |
| 127 // AsyncCreateVideoEncodeAccelerator() function later we need to ensure |
| 128 // that it is destroyed on the same thread. |
| 129 message_loop_->PostTask(FROM_HERE, |
| 130 base::Bind(&RendererGpuVideoAcceleratorFactories:: |
| 131 AsyncDestroyVideoEncodeAccelerator, |
| 132 this)); |
| 133 return scoped_ptr<media::VideoEncodeAccelerator>(); |
| 134 } |
| 135 return vea_.Pass(); |
| 136 } |
| 137 |
| 138 void RendererGpuVideoAcceleratorFactories::AsyncCreateVideoDecodeAccelerator( |
| 139 media::VideoCodecProfile profile, |
| 140 media::VideoDecodeAccelerator::Client* client) { |
| 101 DCHECK(message_loop_->BelongsToCurrentThread()); | 141 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 102 | 142 |
| 103 if (context_.get() && context_->GetCommandBufferProxy()) { | 143 if (context_.get() && context_->GetCommandBufferProxy()) { |
| 104 vda_ = gpu_channel_host_->CreateVideoDecoder( | 144 vda_ = gpu_channel_host_->CreateVideoDecoder( |
| 105 context_->GetCommandBufferProxy()->GetRouteID(), profile, client); | 145 context_->GetCommandBufferProxy()->GetRouteID(), profile, client); |
| 106 } | 146 } |
| 107 message_loop_async_waiter_.Signal(); | 147 message_loop_async_waiter_.Signal(); |
| 108 } | 148 } |
| 109 | 149 |
| 110 uint32 RendererGpuVideoDecoderFactories::CreateTextures( | 150 void RendererGpuVideoAcceleratorFactories::AsyncCreateVideoEncodeAccelerator( |
| 111 int32 count, const gfx::Size& size, | 151 media::VideoEncodeAccelerator::Client* client) { |
| 152 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 153 |
| 154 vea_ = gpu_channel_host_->CreateVideoEncoder(client).Pass(); |
| 155 message_loop_async_waiter_.Signal(); |
| 156 } |
| 157 |
| 158 uint32 RendererGpuVideoAcceleratorFactories::CreateTextures( |
| 159 int32 count, |
| 160 const gfx::Size& size, |
| 112 std::vector<uint32>* texture_ids, | 161 std::vector<uint32>* texture_ids, |
| 113 std::vector<gpu::Mailbox>* texture_mailboxes, | 162 std::vector<gpu::Mailbox>* texture_mailboxes, |
| 114 uint32 texture_target) { | 163 uint32 texture_target) { |
| 115 uint32 sync_point = 0; | 164 uint32 sync_point = 0; |
| 116 | 165 |
| 117 if (message_loop_->BelongsToCurrentThread()) { | 166 if (message_loop_->BelongsToCurrentThread()) { |
| 118 AsyncCreateTextures(count, size, texture_target, &sync_point); | 167 AsyncCreateTextures(count, size, texture_target, &sync_point); |
| 119 texture_ids->swap(created_textures_); | 168 texture_ids->swap(created_textures_); |
| 120 texture_mailboxes->swap(created_texture_mailboxes_); | 169 texture_mailboxes->swap(created_texture_mailboxes_); |
| 121 message_loop_async_waiter_.Reset(); | 170 message_loop_async_waiter_.Reset(); |
| 122 return sync_point; | 171 return sync_point; |
| 123 } | 172 } |
| 124 message_loop_->PostTask(FROM_HERE, base::Bind( | 173 message_loop_->PostTask( |
| 125 &RendererGpuVideoDecoderFactories::AsyncCreateTextures, this, | 174 FROM_HERE, |
| 126 count, size, texture_target, &sync_point)); | 175 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncCreateTextures, |
| 176 this, |
| 177 count, |
| 178 size, |
| 179 texture_target, |
| 180 &sync_point)); |
| 127 | 181 |
| 128 base::WaitableEvent* objects[] = {&aborted_waiter_, | 182 base::WaitableEvent* objects[] = {&aborted_waiter_, |
| 129 &message_loop_async_waiter_}; | 183 &message_loop_async_waiter_}; |
| 130 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) | 184 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) |
| 131 return 0; | 185 return 0; |
| 132 texture_ids->swap(created_textures_); | 186 texture_ids->swap(created_textures_); |
| 133 texture_mailboxes->swap(created_texture_mailboxes_); | 187 texture_mailboxes->swap(created_texture_mailboxes_); |
| 134 return sync_point; | 188 return sync_point; |
| 135 } | 189 } |
| 136 | 190 |
| 137 void RendererGpuVideoDecoderFactories::AsyncCreateTextures( | 191 void RendererGpuVideoAcceleratorFactories::AsyncCreateTextures( |
| 138 int32 count, const gfx::Size& size, uint32 texture_target, | 192 int32 count, |
| 193 const gfx::Size& size, |
| 194 uint32 texture_target, |
| 139 uint32* sync_point) { | 195 uint32* sync_point) { |
| 140 DCHECK(message_loop_->BelongsToCurrentThread()); | 196 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 141 DCHECK(texture_target); | 197 DCHECK(texture_target); |
| 142 | 198 |
| 143 if (!context_.get()) { | 199 if (!context_.get()) { |
| 144 message_loop_async_waiter_.Signal(); | 200 message_loop_async_waiter_.Signal(); |
| 145 return; | 201 return; |
| 146 } | 202 } |
| 147 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); | 203 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); |
| 148 created_textures_.resize(count); | 204 created_textures_.resize(count); |
| 149 created_texture_mailboxes_.resize(count); | 205 created_texture_mailboxes_.resize(count); |
| 150 gles2->GenTextures(count, &created_textures_[0]); | 206 gles2->GenTextures(count, &created_textures_[0]); |
| 151 for (int i = 0; i < count; ++i) { | 207 for (int i = 0; i < count; ++i) { |
| 152 gles2->ActiveTexture(GL_TEXTURE0); | 208 gles2->ActiveTexture(GL_TEXTURE0); |
| 153 uint32 texture_id = created_textures_[i]; | 209 uint32 texture_id = created_textures_[i]; |
| 154 gles2->BindTexture(texture_target, texture_id); | 210 gles2->BindTexture(texture_target, texture_id); |
| 155 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | 211 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); |
| 156 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); | 212 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); |
| 157 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | 213 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
| 158 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | 214 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
| 159 if (texture_target == GL_TEXTURE_2D) { | 215 if (texture_target == GL_TEXTURE_2D) { |
| 160 gles2->TexImage2D(texture_target, 0, GL_RGBA, size.width(), size.height(), | 216 gles2->TexImage2D(texture_target, |
| 161 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); | 217 0, |
| 218 GL_RGBA, |
| 219 size.width(), |
| 220 size.height(), |
| 221 0, |
| 222 GL_RGBA, |
| 223 GL_UNSIGNED_BYTE, |
| 224 NULL); |
| 162 } | 225 } |
| 163 gles2->GenMailboxCHROMIUM(created_texture_mailboxes_[i].name); | 226 gles2->GenMailboxCHROMIUM(created_texture_mailboxes_[i].name); |
| 164 gles2->ProduceTextureCHROMIUM(texture_target, | 227 gles2->ProduceTextureCHROMIUM(texture_target, |
| 165 created_texture_mailboxes_[i].name); | 228 created_texture_mailboxes_[i].name); |
| 166 } | 229 } |
| 167 | 230 |
| 168 // We need a glFlush here to guarantee the decoder (in the GPU process) can | 231 // We need a glFlush here to guarantee the decoder (in the GPU process) can |
| 169 // use the texture ids we return here. Since textures are expected to be | 232 // use the texture ids we return here. Since textures are expected to be |
| 170 // reused, this should not be unacceptably expensive. | 233 // reused, this should not be unacceptably expensive. |
| 171 gles2->Flush(); | 234 gles2->Flush(); |
| 172 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); | 235 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); |
| 173 | 236 |
| 174 *sync_point = gles2->InsertSyncPointCHROMIUM(); | 237 *sync_point = gles2->InsertSyncPointCHROMIUM(); |
| 175 message_loop_async_waiter_.Signal(); | 238 message_loop_async_waiter_.Signal(); |
| 176 } | 239 } |
| 177 | 240 |
| 178 void RendererGpuVideoDecoderFactories::DeleteTexture(uint32 texture_id) { | 241 void RendererGpuVideoAcceleratorFactories::DeleteTexture(uint32 texture_id) { |
| 179 if (message_loop_->BelongsToCurrentThread()) { | 242 if (message_loop_->BelongsToCurrentThread()) { |
| 180 AsyncDeleteTexture(texture_id); | 243 AsyncDeleteTexture(texture_id); |
| 181 return; | 244 return; |
| 182 } | 245 } |
| 183 message_loop_->PostTask(FROM_HERE, base::Bind( | 246 message_loop_->PostTask( |
| 184 &RendererGpuVideoDecoderFactories::AsyncDeleteTexture, this, texture_id)); | 247 FROM_HERE, |
| 248 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncDeleteTexture, |
| 249 this, |
| 250 texture_id)); |
| 185 } | 251 } |
| 186 | 252 |
| 187 void RendererGpuVideoDecoderFactories::AsyncDeleteTexture(uint32 texture_id) { | 253 void RendererGpuVideoAcceleratorFactories::AsyncDeleteTexture( |
| 254 uint32 texture_id) { |
| 188 DCHECK(message_loop_->BelongsToCurrentThread()); | 255 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 189 if (!context_.get()) | 256 if (!context_.get()) |
| 190 return; | 257 return; |
| 191 | 258 |
| 192 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); | 259 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); |
| 193 gles2->DeleteTextures(1, &texture_id); | 260 gles2->DeleteTextures(1, &texture_id); |
| 194 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); | 261 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); |
| 195 } | 262 } |
| 196 | 263 |
| 197 void RendererGpuVideoDecoderFactories::WaitSyncPoint(uint32 sync_point) { | 264 void RendererGpuVideoAcceleratorFactories::WaitSyncPoint(uint32 sync_point) { |
| 198 if (message_loop_->BelongsToCurrentThread()) { | 265 if (message_loop_->BelongsToCurrentThread()) { |
| 199 AsyncWaitSyncPoint(sync_point); | 266 AsyncWaitSyncPoint(sync_point); |
| 200 message_loop_async_waiter_.Reset(); | 267 message_loop_async_waiter_.Reset(); |
| 201 return; | 268 return; |
| 202 } | 269 } |
| 203 | 270 |
| 204 message_loop_->PostTask(FROM_HERE, base::Bind( | 271 message_loop_->PostTask( |
| 205 &RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint, | 272 FROM_HERE, |
| 206 this, | 273 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncWaitSyncPoint, |
| 207 sync_point)); | 274 this, |
| 275 sync_point)); |
| 208 base::WaitableEvent* objects[] = {&aborted_waiter_, | 276 base::WaitableEvent* objects[] = {&aborted_waiter_, |
| 209 &message_loop_async_waiter_}; | 277 &message_loop_async_waiter_}; |
| 210 base::WaitableEvent::WaitMany(objects, arraysize(objects)); | 278 base::WaitableEvent::WaitMany(objects, arraysize(objects)); |
| 211 } | 279 } |
| 212 | 280 |
| 213 void RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint(uint32 sync_point) { | 281 void RendererGpuVideoAcceleratorFactories::AsyncWaitSyncPoint( |
| 282 uint32 sync_point) { |
| 214 DCHECK(message_loop_->BelongsToCurrentThread()); | 283 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 215 if (!context_) { | 284 if (!context_) { |
| 216 message_loop_async_waiter_.Signal(); | 285 message_loop_async_waiter_.Signal(); |
| 217 return; | 286 return; |
| 218 } | 287 } |
| 219 | 288 |
| 220 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); | 289 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); |
| 221 gles2->WaitSyncPointCHROMIUM(sync_point); | 290 gles2->WaitSyncPointCHROMIUM(sync_point); |
| 222 message_loop_async_waiter_.Signal(); | 291 message_loop_async_waiter_.Signal(); |
| 223 } | 292 } |
| 224 | 293 |
| 225 void RendererGpuVideoDecoderFactories::ReadPixels( | 294 void RendererGpuVideoAcceleratorFactories::ReadPixels(uint32 texture_id, |
| 226 uint32 texture_id, uint32 texture_target, const gfx::Size& size, | 295 uint32 texture_target, |
| 227 const SkBitmap& pixels) { | 296 const gfx::Size& size, |
| 297 const SkBitmap& pixels) { |
| 228 // SkBitmaps use the SkPixelRef object to refcount the underlying pixels. | 298 // SkBitmaps use the SkPixelRef object to refcount the underlying pixels. |
| 229 // Multiple SkBitmaps can share a SkPixelRef instance. We use this to | 299 // Multiple SkBitmaps can share a SkPixelRef instance. We use this to |
| 230 // ensure that the underlying pixels in the SkBitmap passed in remain valid | 300 // ensure that the underlying pixels in the SkBitmap passed in remain valid |
| 231 // until the AsyncReadPixels() call completes. | 301 // until the AsyncReadPixels() call completes. |
| 232 read_pixels_bitmap_.setPixelRef(pixels.pixelRef()); | 302 read_pixels_bitmap_.setPixelRef(pixels.pixelRef()); |
| 233 | 303 |
| 234 if (!message_loop_->BelongsToCurrentThread()) { | 304 if (!message_loop_->BelongsToCurrentThread()) { |
| 235 message_loop_->PostTask(FROM_HERE, base::Bind( | 305 message_loop_->PostTask( |
| 236 &RendererGpuVideoDecoderFactories::AsyncReadPixels, this, | 306 FROM_HERE, |
| 237 texture_id, texture_target, size)); | 307 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncReadPixels, |
| 308 this, |
| 309 texture_id, |
| 310 texture_target, |
| 311 size)); |
| 238 base::WaitableEvent* objects[] = {&aborted_waiter_, | 312 base::WaitableEvent* objects[] = {&aborted_waiter_, |
| 239 &message_loop_async_waiter_}; | 313 &message_loop_async_waiter_}; |
| 240 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) | 314 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) |
| 241 return; | 315 return; |
| 242 } else { | 316 } else { |
| 243 AsyncReadPixels(texture_id, texture_target, size); | 317 AsyncReadPixels(texture_id, texture_target, size); |
| 244 message_loop_async_waiter_.Reset(); | 318 message_loop_async_waiter_.Reset(); |
| 245 } | 319 } |
| 246 read_pixels_bitmap_.setPixelRef(NULL); | 320 read_pixels_bitmap_.setPixelRef(NULL); |
| 247 } | 321 } |
| 248 | 322 |
| 249 void RendererGpuVideoDecoderFactories::AsyncReadPixels( | 323 void RendererGpuVideoAcceleratorFactories::AsyncReadPixels( |
| 250 uint32 texture_id, uint32 texture_target, const gfx::Size& size) { | 324 uint32 texture_id, |
| 325 uint32 texture_target, |
| 326 const gfx::Size& size) { |
| 251 DCHECK(message_loop_->BelongsToCurrentThread()); | 327 DCHECK(message_loop_->BelongsToCurrentThread()); |
| 252 if (!context_.get()) { | 328 if (!context_.get()) { |
| 253 message_loop_async_waiter_.Signal(); | 329 message_loop_async_waiter_.Signal(); |
| 254 return; | 330 return; |
| 255 } | 331 } |
| 256 | 332 |
| 257 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); | 333 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); |
| 258 | 334 |
| 259 GLuint tmp_texture; | 335 GLuint tmp_texture; |
| 260 gles2->GenTextures(1, &tmp_texture); | 336 gles2->GenTextures(1, &tmp_texture); |
| 261 gles2->BindTexture(texture_target, tmp_texture); | 337 gles2->BindTexture(texture_target, tmp_texture); |
| 262 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | 338 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); |
| 263 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); | 339 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); |
| 264 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | 340 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
| 265 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | 341 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
| 266 context_->copyTextureCHROMIUM( | 342 context_->copyTextureCHROMIUM( |
| 267 texture_target, texture_id, tmp_texture, 0, GL_RGBA, GL_UNSIGNED_BYTE); | 343 texture_target, texture_id, tmp_texture, 0, GL_RGBA, GL_UNSIGNED_BYTE); |
| 268 | 344 |
| 269 GLuint fb; | 345 GLuint fb; |
| 270 gles2->GenFramebuffers(1, &fb); | 346 gles2->GenFramebuffers(1, &fb); |
| 271 gles2->BindFramebuffer(GL_FRAMEBUFFER, fb); | 347 gles2->BindFramebuffer(GL_FRAMEBUFFER, fb); |
| 272 gles2->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, | 348 gles2->FramebufferTexture2D( |
| 273 texture_target, tmp_texture, 0); | 349 GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture_target, tmp_texture, 0); |
| 274 gles2->PixelStorei(GL_PACK_ALIGNMENT, 4); | 350 gles2->PixelStorei(GL_PACK_ALIGNMENT, 4); |
| 275 gles2->ReadPixels(0, 0, size.width(), size.height(), GL_BGRA_EXT, | 351 gles2->ReadPixels(0, |
| 276 GL_UNSIGNED_BYTE, read_pixels_bitmap_.pixelRef()->pixels()); | 352 0, |
| 353 size.width(), |
| 354 size.height(), |
| 355 GL_BGRA_EXT, |
| 356 GL_UNSIGNED_BYTE, |
| 357 read_pixels_bitmap_.pixelRef()->pixels()); |
| 277 gles2->DeleteFramebuffers(1, &fb); | 358 gles2->DeleteFramebuffers(1, &fb); |
| 278 gles2->DeleteTextures(1, &tmp_texture); | 359 gles2->DeleteTextures(1, &tmp_texture); |
| 279 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); | 360 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); |
| 280 message_loop_async_waiter_.Signal(); | 361 message_loop_async_waiter_.Signal(); |
| 281 } | 362 } |
| 282 | 363 |
| 283 base::SharedMemory* RendererGpuVideoDecoderFactories::CreateSharedMemory( | 364 base::SharedMemory* RendererGpuVideoAcceleratorFactories::CreateSharedMemory( |
| 284 size_t size) { | 365 size_t size) { |
| 285 if (main_message_loop_->BelongsToCurrentThread()) { | 366 if (main_message_loop_->BelongsToCurrentThread()) { |
| 286 return ChildThread::current()->AllocateSharedMemory(size); | 367 return ChildThread::current()->AllocateSharedMemory(size); |
| 287 } | 368 } |
| 288 main_message_loop_->PostTask(FROM_HERE, base::Bind( | 369 main_message_loop_->PostTask( |
| 289 &RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory, this, | 370 FROM_HERE, |
| 290 size)); | 371 base::Bind(&RendererGpuVideoAcceleratorFactories::AsyncCreateSharedMemory, |
| 372 this, |
| 373 size)); |
| 291 | 374 |
| 292 base::WaitableEvent* objects[] = {&aborted_waiter_, | 375 base::WaitableEvent* objects[] = {&aborted_waiter_, |
| 293 &render_thread_async_waiter_}; | 376 &render_thread_async_waiter_}; |
| 294 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) | 377 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) |
| 295 return NULL; | 378 return NULL; |
| 296 return shared_memory_segment_.release(); | 379 return shared_memory_segment_.release(); |
| 297 } | 380 } |
| 298 | 381 |
| 299 void RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory(size_t size) { | 382 void RendererGpuVideoAcceleratorFactories::AsyncCreateSharedMemory( |
| 383 size_t size) { |
| 300 DCHECK_EQ(base::MessageLoop::current(), | 384 DCHECK_EQ(base::MessageLoop::current(), |
| 301 ChildThread::current()->message_loop()); | 385 ChildThread::current()->message_loop()); |
| 302 | 386 |
| 303 shared_memory_segment_.reset( | 387 shared_memory_segment_.reset( |
| 304 ChildThread::current()->AllocateSharedMemory(size)); | 388 ChildThread::current()->AllocateSharedMemory(size)); |
| 305 render_thread_async_waiter_.Signal(); | 389 render_thread_async_waiter_.Signal(); |
| 306 } | 390 } |
| 307 | 391 |
| 308 scoped_refptr<base::MessageLoopProxy> | 392 scoped_refptr<base::MessageLoopProxy> |
| 309 RendererGpuVideoDecoderFactories::GetMessageLoop() { | 393 RendererGpuVideoAcceleratorFactories::GetMessageLoop() { |
| 310 return message_loop_; | 394 return message_loop_; |
| 311 } | 395 } |
| 312 | 396 |
| 313 void RendererGpuVideoDecoderFactories::Abort() { | 397 void RendererGpuVideoAcceleratorFactories::Abort() { aborted_waiter_.Signal(); } |
| 314 aborted_waiter_.Signal(); | |
| 315 } | |
| 316 | 398 |
| 317 bool RendererGpuVideoDecoderFactories::IsAborted() { | 399 bool RendererGpuVideoAcceleratorFactories::IsAborted() { |
| 318 return aborted_waiter_.IsSignaled(); | 400 return aborted_waiter_.IsSignaled(); |
| 319 } | 401 } |
| 320 | 402 |
| 321 scoped_refptr<media::GpuVideoDecoderFactories> | 403 scoped_refptr<RendererGpuVideoAcceleratorFactories> |
| 322 RendererGpuVideoDecoderFactories::Clone() { | 404 RendererGpuVideoAcceleratorFactories::Clone() { |
| 323 scoped_refptr<RendererGpuVideoDecoderFactories> factories = | 405 scoped_refptr<RendererGpuVideoAcceleratorFactories> factories = |
| 324 new RendererGpuVideoDecoderFactories(); | 406 new RendererGpuVideoAcceleratorFactories(); |
| 325 factories->message_loop_ = message_loop_; | 407 factories->message_loop_ = message_loop_; |
| 326 factories->main_message_loop_ = main_message_loop_; | 408 factories->main_message_loop_ = main_message_loop_; |
| 327 factories->gpu_channel_host_ = gpu_channel_host_; | 409 factories->gpu_channel_host_ = gpu_channel_host_; |
| 328 factories->context_ = context_; | 410 factories->context_ = context_; |
| 329 return factories; | 411 return factories; |
| 330 } | 412 } |
| 331 | 413 |
| 332 void RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator() { | 414 void |
| 415 RendererGpuVideoAcceleratorFactories::AsyncDestroyVideoDecodeAccelerator() { |
| 333 // OK to release because Destroy() will delete the VDA instance. | 416 // OK to release because Destroy() will delete the VDA instance. |
| 334 if (vda_) | 417 if (vda_) |
| 335 vda_.release()->Destroy(); | 418 vda_.release()->Destroy(); |
| 336 } | 419 } |
| 337 | 420 |
| 421 void |
| 422 RendererGpuVideoAcceleratorFactories::AsyncDestroyVideoEncodeAccelerator() { |
| 423 // OK to release because Destroy() will delete the VDA instance. |
| 424 if (vea_) |
| 425 vea_.release()->Destroy(); |
| 426 } |
| 427 |
| 338 } // namespace content | 428 } // namespace content |
| OLD | NEW |