Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(44)

Side by Side Diff: content/renderer/media/renderer_gpu_video_decoder_factories.cc

Issue 19534002: Make RendererGpuVideoDecoderFactories live on arbitrary threads. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: rebase Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/renderer_gpu_video_decoder_factories.h" 5 #include "content/renderer/media/renderer_gpu_video_decoder_factories.h"
6 6
7 #include <GLES2/gl2.h> 7 #include <GLES2/gl2.h>
8 #include <GLES2/gl2ext.h> 8 #include <GLES2/gl2ext.h>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
11 #include "content/child/child_thread.h" 11 #include "content/child/child_thread.h"
12 #include "content/common/gpu/client/gpu_channel_host.h" 12 #include "content/common/gpu/client/gpu_channel_host.h"
13 #include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h" 13 #include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h"
14 #include "gpu/command_buffer/client/gles2_implementation.h" 14 #include "gpu/command_buffer/client/gles2_implementation.h"
15 #include "gpu/ipc/command_buffer_proxy.h" 15 #include "gpu/ipc/command_buffer_proxy.h"
16 #include "third_party/skia/include/core/SkPixelRef.h" 16 #include "third_party/skia/include/core/SkPixelRef.h"
17 17
18 namespace content { 18 namespace content {
19 19
20 RendererGpuVideoDecoderFactories::~RendererGpuVideoDecoderFactories() {} 20 RendererGpuVideoDecoderFactories::~RendererGpuVideoDecoderFactories() {}
21 RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories( 21 RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories(
22 GpuChannelHost* gpu_channel_host, 22 GpuChannelHost* gpu_channel_host,
23 const scoped_refptr<base::MessageLoopProxy>& compositor_message_loop, 23 const scoped_refptr<base::MessageLoopProxy>& message_loop,
24 WebGraphicsContext3DCommandBufferImpl* context) 24 WebGraphicsContext3DCommandBufferImpl* context)
25 : compositor_message_loop_(compositor_message_loop), 25 : message_loop_(message_loop),
26 main_message_loop_(base::MessageLoopProxy::current()), 26 main_message_loop_(base::MessageLoopProxy::current()),
27 gpu_channel_host_(gpu_channel_host), 27 gpu_channel_host_(gpu_channel_host),
28 aborted_waiter_(true, false), 28 aborted_waiter_(true, false),
29 compositor_loop_async_waiter_(false, false), 29 message_loop_async_waiter_(false, false),
30 render_thread_async_waiter_(false, false) { 30 render_thread_async_waiter_(false, false) {
31 if (compositor_message_loop_->BelongsToCurrentThread()) { 31 if (message_loop_->BelongsToCurrentThread()) {
32 AsyncGetContext(context); 32 AsyncGetContext(context);
33 compositor_loop_async_waiter_.Reset(); 33 message_loop_async_waiter_.Reset();
34 return; 34 return;
35 } 35 }
36 // Threaded compositor requires us to wait for the context to be acquired. 36 // Wait for the context to be acquired.
37 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 37 message_loop_->PostTask(FROM_HERE, base::Bind(
38 &RendererGpuVideoDecoderFactories::AsyncGetContext, 38 &RendererGpuVideoDecoderFactories::AsyncGetContext,
39 // Unretained to avoid ref/deref'ing |*this|, which is not yet stored in a 39 // Unretained to avoid ref/deref'ing |*this|, which is not yet stored in a
40 // scoped_refptr. Safe because the Wait() below keeps us alive until this 40 // scoped_refptr. Safe because the Wait() below keeps us alive until this
41 // task completes. 41 // task completes.
42 base::Unretained(this), 42 base::Unretained(this),
43 // OK to pass raw because the pointee is only deleted on the compositor 43 // OK to pass raw because the pointee is only deleted on the compositor
44 // thread, and only as the result of a PostTask from the render thread 44 // thread, and only as the result of a PostTask from the render thread
45 // which can only happen after this function returns, so our PostTask will 45 // which can only happen after this function returns, so our PostTask will
46 // run first. 46 // run first.
47 context)); 47 context));
48 compositor_loop_async_waiter_.Wait(); 48 message_loop_async_waiter_.Wait();
49 } 49 }
50 50
51 RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories()
52 : aborted_waiter_(true, false),
53 message_loop_async_waiter_(false, false),
54 render_thread_async_waiter_(false, false) {}
55
51 void RendererGpuVideoDecoderFactories::AsyncGetContext( 56 void RendererGpuVideoDecoderFactories::AsyncGetContext(
52 WebGraphicsContext3DCommandBufferImpl* context) { 57 WebGraphicsContext3DCommandBufferImpl* context) {
53 context_ = context->AsWeakPtr(); 58 context_ = context->AsWeakPtr();
54 if (context_.get()) { 59 if (context_.get()) {
55 if (context_->makeContextCurrent()) { 60 if (context_->makeContextCurrent()) {
56 // Called once per media player, but is a no-op after the first one in 61 // Called once per media player, but is a no-op after the first one in
57 // each renderer. 62 // each renderer.
58 context_->insertEventMarkerEXT("GpuVDAContext3D"); 63 context_->insertEventMarkerEXT("GpuVDAContext3D");
59 } 64 }
60 } 65 }
61 compositor_loop_async_waiter_.Signal(); 66 message_loop_async_waiter_.Signal();
62 } 67 }
63 68
64 media::VideoDecodeAccelerator* 69 media::VideoDecodeAccelerator*
65 RendererGpuVideoDecoderFactories::CreateVideoDecodeAccelerator( 70 RendererGpuVideoDecoderFactories::CreateVideoDecodeAccelerator(
66 media::VideoCodecProfile profile, 71 media::VideoCodecProfile profile,
67 media::VideoDecodeAccelerator::Client* client) { 72 media::VideoDecodeAccelerator::Client* client) {
68 if (compositor_message_loop_->BelongsToCurrentThread()) { 73 if (message_loop_->BelongsToCurrentThread()) {
69 AsyncCreateVideoDecodeAccelerator(profile, client); 74 AsyncCreateVideoDecodeAccelerator(profile, client);
70 compositor_loop_async_waiter_.Reset(); 75 message_loop_async_waiter_.Reset();
71 return vda_.release(); 76 return vda_.release();
72 } 77 }
73 // The VDA is returned in the vda_ member variable by the 78 // The VDA is returned in the vda_ member variable by the
74 // AsyncCreateVideoDecodeAccelerator() function. 79 // AsyncCreateVideoDecodeAccelerator() function.
75 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 80 message_loop_->PostTask(FROM_HERE, base::Bind(
76 &RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator, 81 &RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator,
77 this, profile, client)); 82 this, profile, client));
78 83
79 base::WaitableEvent* objects[] = {&aborted_waiter_, 84 base::WaitableEvent* objects[] = {&aborted_waiter_,
80 &compositor_loop_async_waiter_}; 85 &message_loop_async_waiter_};
81 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) { 86 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) {
82 // If we are aborting and the VDA is created by the 87 // If we are aborting and the VDA is created by the
83 // AsyncCreateVideoDecodeAccelerator() function later we need to ensure 88 // AsyncCreateVideoDecodeAccelerator() function later we need to ensure
84 // that it is destroyed on the same thread. 89 // that it is destroyed on the same thread.
85 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 90 message_loop_->PostTask(FROM_HERE, base::Bind(
86 &RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator, 91 &RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator,
87 this)); 92 this));
88 return NULL; 93 return NULL;
89 } 94 }
90 return vda_.release(); 95 return vda_.release();
91 } 96 }
92 97
93 void RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator( 98 void RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator(
94 media::VideoCodecProfile profile, 99 media::VideoCodecProfile profile,
95 media::VideoDecodeAccelerator::Client* client) { 100 media::VideoDecodeAccelerator::Client* client) {
96 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 101 DCHECK(message_loop_->BelongsToCurrentThread());
97 102
98 if (context_.get() && context_->GetCommandBufferProxy()) { 103 if (context_.get() && context_->GetCommandBufferProxy()) {
99 vda_ = gpu_channel_host_->CreateVideoDecoder( 104 vda_ = gpu_channel_host_->CreateVideoDecoder(
100 context_->GetCommandBufferProxy()->GetRouteID(), profile, client); 105 context_->GetCommandBufferProxy()->GetRouteID(), profile, client);
101 } 106 }
102 compositor_loop_async_waiter_.Signal(); 107 message_loop_async_waiter_.Signal();
103 } 108 }
104 109
105 uint32 RendererGpuVideoDecoderFactories::CreateTextures( 110 uint32 RendererGpuVideoDecoderFactories::CreateTextures(
106 int32 count, const gfx::Size& size, 111 int32 count, const gfx::Size& size,
107 std::vector<uint32>* texture_ids, 112 std::vector<uint32>* texture_ids,
108 std::vector<gpu::Mailbox>* texture_mailboxes, 113 std::vector<gpu::Mailbox>* texture_mailboxes,
109 uint32 texture_target) { 114 uint32 texture_target) {
110 uint32 sync_point = 0; 115 uint32 sync_point = 0;
111 116
112 if (compositor_message_loop_->BelongsToCurrentThread()) { 117 if (message_loop_->BelongsToCurrentThread()) {
113 AsyncCreateTextures(count, size, texture_target, &sync_point); 118 AsyncCreateTextures(count, size, texture_target, &sync_point);
114 texture_ids->swap(created_textures_); 119 texture_ids->swap(created_textures_);
115 texture_mailboxes->swap(created_texture_mailboxes_); 120 texture_mailboxes->swap(created_texture_mailboxes_);
116 compositor_loop_async_waiter_.Reset(); 121 message_loop_async_waiter_.Reset();
117 return sync_point; 122 return sync_point;
118 } 123 }
119 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 124 message_loop_->PostTask(FROM_HERE, base::Bind(
120 &RendererGpuVideoDecoderFactories::AsyncCreateTextures, this, 125 &RendererGpuVideoDecoderFactories::AsyncCreateTextures, this,
121 count, size, texture_target, &sync_point)); 126 count, size, texture_target, &sync_point));
122 127
123 base::WaitableEvent* objects[] = {&aborted_waiter_, 128 base::WaitableEvent* objects[] = {&aborted_waiter_,
124 &compositor_loop_async_waiter_}; 129 &message_loop_async_waiter_};
125 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) 130 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
126 return 0; 131 return 0;
127 texture_ids->swap(created_textures_); 132 texture_ids->swap(created_textures_);
128 texture_mailboxes->swap(created_texture_mailboxes_); 133 texture_mailboxes->swap(created_texture_mailboxes_);
129 return sync_point; 134 return sync_point;
130 } 135 }
131 136
132 void RendererGpuVideoDecoderFactories::AsyncCreateTextures( 137 void RendererGpuVideoDecoderFactories::AsyncCreateTextures(
133 int32 count, const gfx::Size& size, uint32 texture_target, 138 int32 count, const gfx::Size& size, uint32 texture_target,
134 uint32* sync_point) { 139 uint32* sync_point) {
135 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 140 DCHECK(message_loop_->BelongsToCurrentThread());
136 DCHECK(texture_target); 141 DCHECK(texture_target);
137 142
138 if (!context_.get()) { 143 if (!context_.get()) {
139 compositor_loop_async_waiter_.Signal(); 144 message_loop_async_waiter_.Signal();
140 return; 145 return;
141 } 146 }
142 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); 147 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
143 created_textures_.resize(count); 148 created_textures_.resize(count);
144 created_texture_mailboxes_.resize(count); 149 created_texture_mailboxes_.resize(count);
145 gles2->GenTextures(count, &created_textures_[0]); 150 gles2->GenTextures(count, &created_textures_[0]);
146 for (int i = 0; i < count; ++i) { 151 for (int i = 0; i < count; ++i) {
147 gles2->ActiveTexture(GL_TEXTURE0); 152 gles2->ActiveTexture(GL_TEXTURE0);
148 uint32 texture_id = created_textures_[i]; 153 uint32 texture_id = created_textures_[i];
149 gles2->BindTexture(texture_target, texture_id); 154 gles2->BindTexture(texture_target, texture_id);
(...skipping 10 matching lines...) Expand all
160 created_texture_mailboxes_[i].name); 165 created_texture_mailboxes_[i].name);
161 } 166 }
162 167
163 // We need a glFlush here to guarantee the decoder (in the GPU process) can 168 // We need a glFlush here to guarantee the decoder (in the GPU process) can
164 // use the texture ids we return here. Since textures are expected to be 169 // use the texture ids we return here. Since textures are expected to be
165 // reused, this should not be unacceptably expensive. 170 // reused, this should not be unacceptably expensive.
166 gles2->Flush(); 171 gles2->Flush();
167 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); 172 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
168 173
169 *sync_point = gles2->InsertSyncPointCHROMIUM(); 174 *sync_point = gles2->InsertSyncPointCHROMIUM();
170 compositor_loop_async_waiter_.Signal(); 175 message_loop_async_waiter_.Signal();
171 } 176 }
172 177
173 void RendererGpuVideoDecoderFactories::DeleteTexture(uint32 texture_id) { 178 void RendererGpuVideoDecoderFactories::DeleteTexture(uint32 texture_id) {
174 if (compositor_message_loop_->BelongsToCurrentThread()) { 179 if (message_loop_->BelongsToCurrentThread()) {
175 AsyncDeleteTexture(texture_id); 180 AsyncDeleteTexture(texture_id);
176 return; 181 return;
177 } 182 }
178 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 183 message_loop_->PostTask(FROM_HERE, base::Bind(
179 &RendererGpuVideoDecoderFactories::AsyncDeleteTexture, this, texture_id)); 184 &RendererGpuVideoDecoderFactories::AsyncDeleteTexture, this, texture_id));
180 } 185 }
181 186
182 void RendererGpuVideoDecoderFactories::AsyncDeleteTexture(uint32 texture_id) { 187 void RendererGpuVideoDecoderFactories::AsyncDeleteTexture(uint32 texture_id) {
183 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 188 DCHECK(message_loop_->BelongsToCurrentThread());
184 if (!context_.get()) 189 if (!context_.get())
185 return; 190 return;
186 191
187 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); 192 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
188 gles2->DeleteTextures(1, &texture_id); 193 gles2->DeleteTextures(1, &texture_id);
189 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); 194 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
190 } 195 }
191 196
192 void RendererGpuVideoDecoderFactories::WaitSyncPoint(uint32 sync_point) { 197 void RendererGpuVideoDecoderFactories::WaitSyncPoint(uint32 sync_point) {
193 if (compositor_message_loop_->BelongsToCurrentThread()) { 198 if (message_loop_->BelongsToCurrentThread()) {
194 AsyncWaitSyncPoint(sync_point); 199 AsyncWaitSyncPoint(sync_point);
200 message_loop_async_waiter_.Reset();
195 return; 201 return;
196 } 202 }
197 203
198 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 204 message_loop_->PostTask(FROM_HERE, base::Bind(
199 &RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint, 205 &RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint,
200 this, 206 this,
201 sync_point)); 207 sync_point));
202 base::WaitableEvent* objects[] = {&aborted_waiter_, 208 base::WaitableEvent* objects[] = {&aborted_waiter_,
203 &compositor_loop_async_waiter_}; 209 &message_loop_async_waiter_};
204 base::WaitableEvent::WaitMany(objects, arraysize(objects)); 210 base::WaitableEvent::WaitMany(objects, arraysize(objects));
205 } 211 }
206 212
207 void RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint(uint32 sync_point) { 213 void RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint(uint32 sync_point) {
208 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 214 DCHECK(message_loop_->BelongsToCurrentThread());
209 if (!context_) { 215 if (!context_) {
210 compositor_loop_async_waiter_.Signal(); 216 message_loop_async_waiter_.Signal();
211 return; 217 return;
212 } 218 }
213 219
214 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); 220 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
215 gles2->WaitSyncPointCHROMIUM(sync_point); 221 gles2->WaitSyncPointCHROMIUM(sync_point);
216 compositor_loop_async_waiter_.Signal(); 222 message_loop_async_waiter_.Signal();
217 } 223 }
218 224
219 void RendererGpuVideoDecoderFactories::ReadPixels( 225 void RendererGpuVideoDecoderFactories::ReadPixels(
220 uint32 texture_id, uint32 texture_target, const gfx::Size& size, 226 uint32 texture_id, uint32 texture_target, const gfx::Size& size,
221 const SkBitmap& pixels) { 227 const SkBitmap& pixels) {
222 // SkBitmaps use the SkPixelRef object to refcount the underlying pixels. 228 // SkBitmaps use the SkPixelRef object to refcount the underlying pixels.
223 // Multiple SkBitmaps can share a SkPixelRef instance. We use this to 229 // Multiple SkBitmaps can share a SkPixelRef instance. We use this to
224 // ensure that the underlying pixels in the SkBitmap passed in remain valid 230 // ensure that the underlying pixels in the SkBitmap passed in remain valid
225 // until the AsyncReadPixels() call completes. 231 // until the AsyncReadPixels() call completes.
226 read_pixels_bitmap_.setPixelRef(pixels.pixelRef()); 232 read_pixels_bitmap_.setPixelRef(pixels.pixelRef());
227 233
228 if (!compositor_message_loop_->BelongsToCurrentThread()) { 234 if (!message_loop_->BelongsToCurrentThread()) {
229 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 235 message_loop_->PostTask(FROM_HERE, base::Bind(
230 &RendererGpuVideoDecoderFactories::AsyncReadPixels, this, 236 &RendererGpuVideoDecoderFactories::AsyncReadPixels, this,
231 texture_id, texture_target, size)); 237 texture_id, texture_target, size));
232 base::WaitableEvent* objects[] = {&aborted_waiter_, 238 base::WaitableEvent* objects[] = {&aborted_waiter_,
233 &compositor_loop_async_waiter_}; 239 &message_loop_async_waiter_};
234 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) 240 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
235 return; 241 return;
236 } else { 242 } else {
237 AsyncReadPixels(texture_id, texture_target, size); 243 AsyncReadPixels(texture_id, texture_target, size);
244 message_loop_async_waiter_.Reset();
238 } 245 }
239 read_pixels_bitmap_.setPixelRef(NULL); 246 read_pixels_bitmap_.setPixelRef(NULL);
240 } 247 }
241 248
242 void RendererGpuVideoDecoderFactories::AsyncReadPixels( 249 void RendererGpuVideoDecoderFactories::AsyncReadPixels(
243 uint32 texture_id, uint32 texture_target, const gfx::Size& size) { 250 uint32 texture_id, uint32 texture_target, const gfx::Size& size) {
244 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 251 DCHECK(message_loop_->BelongsToCurrentThread());
245 if (!context_.get()) { 252 if (!context_.get()) {
246 compositor_loop_async_waiter_.Signal(); 253 message_loop_async_waiter_.Signal();
247 return; 254 return;
248 } 255 }
249 256
250 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); 257 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
251 258
252 GLuint tmp_texture; 259 GLuint tmp_texture;
253 gles2->GenTextures(1, &tmp_texture); 260 gles2->GenTextures(1, &tmp_texture);
254 gles2->BindTexture(texture_target, tmp_texture); 261 gles2->BindTexture(texture_target, tmp_texture);
255 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); 262 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
256 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); 263 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
257 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); 264 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
258 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); 265 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
259 context_->copyTextureCHROMIUM( 266 context_->copyTextureCHROMIUM(
260 texture_target, texture_id, tmp_texture, 0, GL_RGBA, GL_UNSIGNED_BYTE); 267 texture_target, texture_id, tmp_texture, 0, GL_RGBA, GL_UNSIGNED_BYTE);
261 268
262 GLuint fb; 269 GLuint fb;
263 gles2->GenFramebuffers(1, &fb); 270 gles2->GenFramebuffers(1, &fb);
264 gles2->BindFramebuffer(GL_FRAMEBUFFER, fb); 271 gles2->BindFramebuffer(GL_FRAMEBUFFER, fb);
265 gles2->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, 272 gles2->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
266 texture_target, tmp_texture, 0); 273 texture_target, tmp_texture, 0);
267 gles2->PixelStorei(GL_PACK_ALIGNMENT, 4); 274 gles2->PixelStorei(GL_PACK_ALIGNMENT, 4);
268 gles2->ReadPixels(0, 0, size.width(), size.height(), GL_BGRA_EXT, 275 gles2->ReadPixels(0, 0, size.width(), size.height(), GL_BGRA_EXT,
269 GL_UNSIGNED_BYTE, read_pixels_bitmap_.pixelRef()->pixels()); 276 GL_UNSIGNED_BYTE, read_pixels_bitmap_.pixelRef()->pixels());
270 gles2->DeleteFramebuffers(1, &fb); 277 gles2->DeleteFramebuffers(1, &fb);
271 gles2->DeleteTextures(1, &tmp_texture); 278 gles2->DeleteTextures(1, &tmp_texture);
272 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); 279 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
273 compositor_loop_async_waiter_.Signal(); 280 message_loop_async_waiter_.Signal();
274 } 281 }
275 282
276 base::SharedMemory* RendererGpuVideoDecoderFactories::CreateSharedMemory( 283 base::SharedMemory* RendererGpuVideoDecoderFactories::CreateSharedMemory(
277 size_t size) { 284 size_t size) {
278 if (main_message_loop_->BelongsToCurrentThread()) { 285 if (main_message_loop_->BelongsToCurrentThread()) {
279 return ChildThread::current()->AllocateSharedMemory(size); 286 return ChildThread::current()->AllocateSharedMemory(size);
280 } 287 }
281 main_message_loop_->PostTask(FROM_HERE, base::Bind( 288 main_message_loop_->PostTask(FROM_HERE, base::Bind(
282 &RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory, this, 289 &RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory, this,
283 size)); 290 size));
284 291
285 base::WaitableEvent* objects[] = {&aborted_waiter_, 292 base::WaitableEvent* objects[] = {&aborted_waiter_,
286 &render_thread_async_waiter_}; 293 &render_thread_async_waiter_};
287 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) 294 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
288 return NULL; 295 return NULL;
289 return shared_memory_segment_.release(); 296 return shared_memory_segment_.release();
290 } 297 }
291 298
292 void RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory(size_t size) { 299 void RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory(size_t size) {
293 DCHECK_EQ(base::MessageLoop::current(), 300 DCHECK_EQ(base::MessageLoop::current(),
294 ChildThread::current()->message_loop()); 301 ChildThread::current()->message_loop());
295 302
296 shared_memory_segment_.reset( 303 shared_memory_segment_.reset(
297 ChildThread::current()->AllocateSharedMemory(size)); 304 ChildThread::current()->AllocateSharedMemory(size));
298 render_thread_async_waiter_.Signal(); 305 render_thread_async_waiter_.Signal();
299 } 306 }
300 307
301 scoped_refptr<base::MessageLoopProxy> 308 scoped_refptr<base::MessageLoopProxy>
302 RendererGpuVideoDecoderFactories::GetMessageLoop() { 309 RendererGpuVideoDecoderFactories::GetMessageLoop() {
303 return compositor_message_loop_; 310 return message_loop_;
304 } 311 }
305 312
306 void RendererGpuVideoDecoderFactories::Abort() { 313 void RendererGpuVideoDecoderFactories::Abort() {
307 aborted_waiter_.Signal(); 314 aborted_waiter_.Signal();
308 } 315 }
309 316
310 bool RendererGpuVideoDecoderFactories::IsAborted() { 317 bool RendererGpuVideoDecoderFactories::IsAborted() {
311 return aborted_waiter_.IsSignaled(); 318 return aborted_waiter_.IsSignaled();
312 } 319 }
313 320
321 scoped_refptr<media::GpuVideoDecoderFactories>
322 RendererGpuVideoDecoderFactories::Clone() {
323 scoped_refptr<RendererGpuVideoDecoderFactories> factories =
324 new RendererGpuVideoDecoderFactories();
325 factories->message_loop_ = message_loop_;
326 factories->main_message_loop_ = main_message_loop_;
327 factories->gpu_channel_host_ = gpu_channel_host_;
328 factories->context_ = context_;
329 return factories;
330 }
331
314 void RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator() { 332 void RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator() {
315 // OK to release because Destroy() will delete the VDA instance. 333 // OK to release because Destroy() will delete the VDA instance.
316 if (vda_) 334 if (vda_)
317 vda_.release()->Destroy(); 335 vda_.release()->Destroy();
318 } 336 }
319 337
320 } // namespace content 338 } // namespace content
OLDNEW
« no previous file with comments | « content/renderer/media/renderer_gpu_video_decoder_factories.h ('k') | content/renderer/media/rtc_video_decoder.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698