Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(561)

Side by Side Diff: content/renderer/media/renderer_gpu_video_decoder_factories.cc

Issue 19534002: Make RendererGpuVideoDecoderFactories live on arbitrary threads. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: address review comments Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/renderer/media/renderer_gpu_video_decoder_factories.h" 5 #include "content/renderer/media/renderer_gpu_video_decoder_factories.h"
6 6
7 #include <GLES2/gl2.h> 7 #include <GLES2/gl2.h>
8 #include <GLES2/gl2ext.h> 8 #include <GLES2/gl2ext.h>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
11 #include "content/child/child_thread.h" 11 #include "content/child/child_thread.h"
12 #include "content/common/gpu/client/gpu_channel_host.h" 12 #include "content/common/gpu/client/gpu_channel_host.h"
13 #include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h" 13 #include "content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.h"
14 #include "gpu/command_buffer/client/gles2_implementation.h" 14 #include "gpu/command_buffer/client/gles2_implementation.h"
15 #include "gpu/ipc/command_buffer_proxy.h" 15 #include "gpu/ipc/command_buffer_proxy.h"
16 #include "third_party/skia/include/core/SkPixelRef.h" 16 #include "third_party/skia/include/core/SkPixelRef.h"
17 17
18 namespace content { 18 namespace content {
19 19
20 RendererGpuVideoDecoderFactories::~RendererGpuVideoDecoderFactories() {} 20 RendererGpuVideoDecoderFactories::~RendererGpuVideoDecoderFactories() {}
21 RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories( 21 RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories(
22 GpuChannelHost* gpu_channel_host, 22 GpuChannelHost* gpu_channel_host,
23 const scoped_refptr<base::MessageLoopProxy>& compositor_message_loop, 23 const scoped_refptr<base::MessageLoopProxy>& message_loop,
24 WebGraphicsContext3DCommandBufferImpl* context) 24 WebGraphicsContext3DCommandBufferImpl* context)
25 : compositor_message_loop_(compositor_message_loop), 25 : message_loop_(message_loop),
26 main_message_loop_(base::MessageLoopProxy::current()), 26 main_message_loop_(base::MessageLoopProxy::current()),
27 gpu_channel_host_(gpu_channel_host), 27 gpu_channel_host_(gpu_channel_host),
28 aborted_waiter_(true, false), 28 aborted_waiter_(true, false),
29 compositor_loop_async_waiter_(false, false), 29 message_loop_async_waiter_(false, false),
30 render_thread_async_waiter_(false, false) { 30 render_thread_async_waiter_(false, false) {
31 if (compositor_message_loop_->BelongsToCurrentThread()) { 31 if (message_loop_->BelongsToCurrentThread()) {
32 AsyncGetContext(context); 32 AsyncGetContext(context);
33 compositor_loop_async_waiter_.Reset(); 33 message_loop_async_waiter_.Reset();
34 return; 34 return;
35 } 35 }
36 // Threaded compositor requires us to wait for the context to be acquired. 36 // Wait for the context to be acquired.
37 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 37 message_loop_->PostTask(FROM_HERE, base::Bind(
38 &RendererGpuVideoDecoderFactories::AsyncGetContext, 38 &RendererGpuVideoDecoderFactories::AsyncGetContext,
39 // Unretained to avoid ref/deref'ing |*this|, which is not yet stored in a 39 // Unretained to avoid ref/deref'ing |*this|, which is not yet stored in a
40 // scoped_refptr. Safe because the Wait() below keeps us alive until this 40 // scoped_refptr. Safe because the Wait() below keeps us alive until this
41 // task completes. 41 // task completes.
42 base::Unretained(this), 42 base::Unretained(this),
43 // OK to pass raw because the pointee is only deleted on the compositor 43 // OK to pass raw because the pointee is only deleted on the compositor
44 // thread, and only as the result of a PostTask from the render thread 44 // thread, and only as the result of a PostTask from the render thread
45 // which can only happen after this function returns, so our PostTask will 45 // which can only happen after this function returns, so our PostTask will
46 // run first. 46 // run first.
47 context)); 47 context));
48 compositor_loop_async_waiter_.Wait(); 48 message_loop_async_waiter_.Wait();
49 } 49 }
50 50
51 RendererGpuVideoDecoderFactories::RendererGpuVideoDecoderFactories()
52 : aborted_waiter_(true, false),
53 message_loop_async_waiter_(false, false),
54 render_thread_async_waiter_(false, false) {}
55
51 void RendererGpuVideoDecoderFactories::AsyncGetContext( 56 void RendererGpuVideoDecoderFactories::AsyncGetContext(
52 WebGraphicsContext3DCommandBufferImpl* context) { 57 WebGraphicsContext3DCommandBufferImpl* context) {
53 context_ = context->AsWeakPtr(); 58 context_ = context->AsWeakPtr();
54 if (context_.get()) { 59 if (context_.get()) {
55 if (context_->makeContextCurrent()) { 60 if (context_->makeContextCurrent()) {
56 // Called once per media player, but is a no-op after the first one in 61 // Called once per media player, but is a no-op after the first one in
57 // each renderer. 62 // each renderer.
58 context_->insertEventMarkerEXT("GpuVDAContext3D"); 63 context_->insertEventMarkerEXT("GpuVDAContext3D");
59 } 64 }
60 } 65 }
61 compositor_loop_async_waiter_.Signal(); 66 message_loop_async_waiter_.Signal();
62 } 67 }
63 68
64 media::VideoDecodeAccelerator* 69 media::VideoDecodeAccelerator*
65 RendererGpuVideoDecoderFactories::CreateVideoDecodeAccelerator( 70 RendererGpuVideoDecoderFactories::CreateVideoDecodeAccelerator(
66 media::VideoCodecProfile profile, 71 media::VideoCodecProfile profile,
67 media::VideoDecodeAccelerator::Client* client) { 72 media::VideoDecodeAccelerator::Client* client) {
68 if (compositor_message_loop_->BelongsToCurrentThread()) { 73 if (message_loop_->BelongsToCurrentThread()) {
69 AsyncCreateVideoDecodeAccelerator(profile, client); 74 AsyncCreateVideoDecodeAccelerator(profile, client);
70 compositor_loop_async_waiter_.Reset(); 75 message_loop_async_waiter_.Reset();
71 return vda_.release(); 76 return vda_.release();
72 } 77 }
73 // The VDA is returned in the vda_ member variable by the 78 // The VDA is returned in the vda_ member variable by the
74 // AsyncCreateVideoDecodeAccelerator() function. 79 // AsyncCreateVideoDecodeAccelerator() function.
75 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 80 message_loop_->PostTask(FROM_HERE, base::Bind(
76 &RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator, 81 &RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator,
77 this, profile, client)); 82 this, profile, client));
78 83
79 base::WaitableEvent* objects[] = {&aborted_waiter_, 84 base::WaitableEvent* objects[] = {&aborted_waiter_,
80 &compositor_loop_async_waiter_}; 85 &message_loop_async_waiter_};
81 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) { 86 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) {
82 // If we are aborting and the VDA is created by the 87 // If we are aborting and the VDA is created by the
83 // AsyncCreateVideoDecodeAccelerator() function later we need to ensure 88 // AsyncCreateVideoDecodeAccelerator() function later we need to ensure
84 // that it is destroyed on the same thread. 89 // that it is destroyed on the same thread.
85 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 90 message_loop_->PostTask(FROM_HERE, base::Bind(
86 &RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator, 91 &RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator,
87 this)); 92 this));
88 return NULL; 93 return NULL;
89 } 94 }
90 return vda_.release(); 95 return vda_.release();
91 } 96 }
92 97
93 void RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator( 98 void RendererGpuVideoDecoderFactories::AsyncCreateVideoDecodeAccelerator(
94 media::VideoCodecProfile profile, 99 media::VideoCodecProfile profile,
95 media::VideoDecodeAccelerator::Client* client) { 100 media::VideoDecodeAccelerator::Client* client) {
96 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 101 DCHECK(message_loop_->BelongsToCurrentThread());
97 102
98 if (context_.get() && context_->GetCommandBufferProxy()) { 103 if (context_.get() && context_->GetCommandBufferProxy()) {
99 vda_ = gpu_channel_host_->CreateVideoDecoder( 104 vda_ = gpu_channel_host_->CreateVideoDecoder(
100 context_->GetCommandBufferProxy()->GetRouteID(), profile, client); 105 context_->GetCommandBufferProxy()->GetRouteID(), profile, client);
101 } 106 }
102 compositor_loop_async_waiter_.Signal(); 107 message_loop_async_waiter_.Signal();
103 } 108 }
104 109
105 uint32 RendererGpuVideoDecoderFactories::CreateTextures( 110 uint32 RendererGpuVideoDecoderFactories::CreateTextures(
106 int32 count, const gfx::Size& size, 111 int32 count, const gfx::Size& size,
107 std::vector<uint32>* texture_ids, 112 std::vector<uint32>* texture_ids,
108 std::vector<gpu::Mailbox>* texture_mailboxes, 113 std::vector<gpu::Mailbox>* texture_mailboxes,
109 uint32 texture_target) { 114 uint32 texture_target) {
110 uint32 sync_point = 0; 115 uint32 sync_point = 0;
111 116
112 if (compositor_message_loop_->BelongsToCurrentThread()) { 117 if (message_loop_->BelongsToCurrentThread()) {
113 AsyncCreateTextures(count, size, texture_target, &sync_point); 118 AsyncCreateTextures(count, size, texture_target, &sync_point);
114 texture_ids->swap(created_textures_); 119 texture_ids->swap(created_textures_);
115 texture_mailboxes->swap(created_texture_mailboxes_); 120 texture_mailboxes->swap(created_texture_mailboxes_);
116 compositor_loop_async_waiter_.Reset(); 121 message_loop_async_waiter_.Reset();
117 return sync_point; 122 return sync_point;
118 } 123 }
119 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 124 message_loop_->PostTask(FROM_HERE, base::Bind(
120 &RendererGpuVideoDecoderFactories::AsyncCreateTextures, this, 125 &RendererGpuVideoDecoderFactories::AsyncCreateTextures, this,
121 count, size, texture_target, &sync_point)); 126 count, size, texture_target, &sync_point));
122 127
123 base::WaitableEvent* objects[] = {&aborted_waiter_, 128 base::WaitableEvent* objects[] = {&aborted_waiter_,
124 &compositor_loop_async_waiter_}; 129 &message_loop_async_waiter_};
125 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) 130 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
126 return 0; 131 return 0;
127 texture_ids->swap(created_textures_); 132 texture_ids->swap(created_textures_);
128 texture_mailboxes->swap(created_texture_mailboxes_); 133 texture_mailboxes->swap(created_texture_mailboxes_);
129 return sync_point; 134 return sync_point;
130 } 135 }
131 136
132 void RendererGpuVideoDecoderFactories::AsyncCreateTextures( 137 void RendererGpuVideoDecoderFactories::AsyncCreateTextures(
133 int32 count, const gfx::Size& size, uint32 texture_target, 138 int32 count, const gfx::Size& size, uint32 texture_target,
134 uint32* sync_point) { 139 uint32* sync_point) {
135 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 140 DCHECK(message_loop_->BelongsToCurrentThread());
136 DCHECK(texture_target); 141 DCHECK(texture_target);
137 142
138 if (!context_.get()) { 143 if (!context_.get()) {
139 compositor_loop_async_waiter_.Signal(); 144 message_loop_async_waiter_.Signal();
140 return; 145 return;
141 } 146 }
142 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); 147 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
143 created_textures_.resize(count); 148 created_textures_.resize(count);
144 created_texture_mailboxes_.resize(count); 149 created_texture_mailboxes_.resize(count);
145 gles2->GenTextures(count, &created_textures_[0]); 150 gles2->GenTextures(count, &created_textures_[0]);
146 for (int i = 0; i < count; ++i) { 151 for (int i = 0; i < count; ++i) {
147 gles2->ActiveTexture(GL_TEXTURE0); 152 gles2->ActiveTexture(GL_TEXTURE0);
148 uint32 texture_id = created_textures_[i]; 153 uint32 texture_id = created_textures_[i];
149 gles2->BindTexture(texture_target, texture_id); 154 gles2->BindTexture(texture_target, texture_id);
(...skipping 10 matching lines...) Expand all
160 created_texture_mailboxes_[i].name); 165 created_texture_mailboxes_[i].name);
161 } 166 }
162 167
163 // We need a glFlush here to guarantee the decoder (in the GPU process) can 168 // We need a glFlush here to guarantee the decoder (in the GPU process) can
164 // use the texture ids we return here. Since textures are expected to be 169 // use the texture ids we return here. Since textures are expected to be
165 // reused, this should not be unacceptably expensive. 170 // reused, this should not be unacceptably expensive.
166 gles2->Flush(); 171 gles2->Flush();
167 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); 172 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
168 173
169 *sync_point = gles2->InsertSyncPointCHROMIUM(); 174 *sync_point = gles2->InsertSyncPointCHROMIUM();
170 compositor_loop_async_waiter_.Signal(); 175 message_loop_async_waiter_.Signal();
171 } 176 }
172 177
173 void RendererGpuVideoDecoderFactories::DeleteTexture(uint32 texture_id) { 178 void RendererGpuVideoDecoderFactories::DeleteTexture(uint32 texture_id) {
174 if (compositor_message_loop_->BelongsToCurrentThread()) { 179 if (message_loop_->BelongsToCurrentThread()) {
175 AsyncDeleteTexture(texture_id); 180 AsyncDeleteTexture(texture_id);
176 return; 181 return;
177 } 182 }
178 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 183 message_loop_->PostTask(FROM_HERE, base::Bind(
179 &RendererGpuVideoDecoderFactories::AsyncDeleteTexture, this, texture_id)); 184 &RendererGpuVideoDecoderFactories::AsyncDeleteTexture, this, texture_id));
180 } 185 }
181 186
182 void RendererGpuVideoDecoderFactories::AsyncDeleteTexture(uint32 texture_id) { 187 void RendererGpuVideoDecoderFactories::AsyncDeleteTexture(uint32 texture_id) {
183 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 188 DCHECK(message_loop_->BelongsToCurrentThread());
184 if (!context_.get()) 189 if (!context_.get())
185 return; 190 return;
186 191
187 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); 192 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
188 gles2->DeleteTextures(1, &texture_id); 193 gles2->DeleteTextures(1, &texture_id);
189 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); 194 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
190 } 195 }
191 196
192 void RendererGpuVideoDecoderFactories::WaitSyncPoint(uint32 sync_point) { 197 void RendererGpuVideoDecoderFactories::WaitSyncPoint(uint32 sync_point) {
193 if (compositor_message_loop_->BelongsToCurrentThread()) { 198 if (message_loop_->BelongsToCurrentThread()) {
194 AsyncWaitSyncPoint(sync_point); 199 AsyncWaitSyncPoint(sync_point);
195 return; 200 return;
Ami GONE FROM CHROMIUM 2013/07/22 19:46:21 message_loop_async_waiter_.Reset();
wuchengli 2013/07/23 16:29:28 Done.
196 } 201 }
197 202
198 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 203 message_loop_->PostTask(FROM_HERE, base::Bind(
199 &RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint, 204 &RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint,
200 this, 205 this,
201 sync_point)); 206 sync_point));
202 base::WaitableEvent* objects[] = {&aborted_waiter_, 207 base::WaitableEvent* objects[] = {&aborted_waiter_,
203 &compositor_loop_async_waiter_}; 208 &message_loop_async_waiter_};
204 base::WaitableEvent::WaitMany(objects, arraysize(objects)); 209 base::WaitableEvent::WaitMany(objects, arraysize(objects));
205 } 210 }
206 211
207 void RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint(uint32 sync_point) { 212 void RendererGpuVideoDecoderFactories::AsyncWaitSyncPoint(uint32 sync_point) {
208 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 213 DCHECK(message_loop_->BelongsToCurrentThread());
209 if (!context_) { 214 if (!context_) {
210 compositor_loop_async_waiter_.Signal(); 215 message_loop_async_waiter_.Signal();
211 return; 216 return;
212 } 217 }
213 218
214 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); 219 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
215 gles2->WaitSyncPointCHROMIUM(sync_point); 220 gles2->WaitSyncPointCHROMIUM(sync_point);
216 compositor_loop_async_waiter_.Signal(); 221 message_loop_async_waiter_.Signal();
217 } 222 }
218 223
219 void RendererGpuVideoDecoderFactories::ReadPixels( 224 void RendererGpuVideoDecoderFactories::ReadPixels(
220 uint32 texture_id, uint32 texture_target, const gfx::Size& size, 225 uint32 texture_id, uint32 texture_target, const gfx::Size& size,
221 const SkBitmap& pixels) { 226 const SkBitmap& pixels) {
222 // SkBitmaps use the SkPixelRef object to refcount the underlying pixels. 227 // SkBitmaps use the SkPixelRef object to refcount the underlying pixels.
223 // Multiple SkBitmaps can share a SkPixelRef instance. We use this to 228 // Multiple SkBitmaps can share a SkPixelRef instance. We use this to
224 // ensure that the underlying pixels in the SkBitmap passed in remain valid 229 // ensure that the underlying pixels in the SkBitmap passed in remain valid
225 // until the AsyncReadPixels() call completes. 230 // until the AsyncReadPixels() call completes.
226 read_pixels_bitmap_.setPixelRef(pixels.pixelRef()); 231 read_pixels_bitmap_.setPixelRef(pixels.pixelRef());
227 232
228 if (!compositor_message_loop_->BelongsToCurrentThread()) { 233 if (!message_loop_->BelongsToCurrentThread()) {
229 compositor_message_loop_->PostTask(FROM_HERE, base::Bind( 234 message_loop_->PostTask(FROM_HERE, base::Bind(
230 &RendererGpuVideoDecoderFactories::AsyncReadPixels, this, 235 &RendererGpuVideoDecoderFactories::AsyncReadPixels, this,
231 texture_id, texture_target, size)); 236 texture_id, texture_target, size));
232 base::WaitableEvent* objects[] = {&aborted_waiter_, 237 base::WaitableEvent* objects[] = {&aborted_waiter_,
233 &compositor_loop_async_waiter_}; 238 &message_loop_async_waiter_};
234 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) 239 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
235 return; 240 return;
236 } else { 241 } else {
237 AsyncReadPixels(texture_id, texture_target, size); 242 AsyncReadPixels(texture_id, texture_target, size);
Ami GONE FROM CHROMIUM 2013/07/22 19:46:21 message_loop_async_waiter_.Reset();
wuchengli 2013/07/23 16:29:28 Done.
238 } 243 }
239 read_pixels_bitmap_.setPixelRef(NULL); 244 read_pixels_bitmap_.setPixelRef(NULL);
240 } 245 }
241 246
242 void RendererGpuVideoDecoderFactories::AsyncReadPixels( 247 void RendererGpuVideoDecoderFactories::AsyncReadPixels(
243 uint32 texture_id, uint32 texture_target, const gfx::Size& size) { 248 uint32 texture_id, uint32 texture_target, const gfx::Size& size) {
244 DCHECK(compositor_message_loop_->BelongsToCurrentThread()); 249 DCHECK(message_loop_->BelongsToCurrentThread());
245 if (!context_.get()) { 250 if (!context_.get()) {
246 compositor_loop_async_waiter_.Signal(); 251 message_loop_async_waiter_.Signal();
247 return; 252 return;
248 } 253 }
249 254
250 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation(); 255 gpu::gles2::GLES2Implementation* gles2 = context_->GetImplementation();
251 256
252 GLuint tmp_texture; 257 GLuint tmp_texture;
253 gles2->GenTextures(1, &tmp_texture); 258 gles2->GenTextures(1, &tmp_texture);
254 gles2->BindTexture(texture_target, tmp_texture); 259 gles2->BindTexture(texture_target, tmp_texture);
255 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR); 260 gles2->TexParameteri(texture_target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
256 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR); 261 gles2->TexParameteri(texture_target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
257 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); 262 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
258 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); 263 gles2->TexParameteri(texture_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
259 context_->copyTextureCHROMIUM( 264 context_->copyTextureCHROMIUM(
260 texture_target, texture_id, tmp_texture, 0, GL_RGBA, GL_UNSIGNED_BYTE); 265 texture_target, texture_id, tmp_texture, 0, GL_RGBA, GL_UNSIGNED_BYTE);
261 266
262 GLuint fb; 267 GLuint fb;
263 gles2->GenFramebuffers(1, &fb); 268 gles2->GenFramebuffers(1, &fb);
264 gles2->BindFramebuffer(GL_FRAMEBUFFER, fb); 269 gles2->BindFramebuffer(GL_FRAMEBUFFER, fb);
265 gles2->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, 270 gles2->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
266 texture_target, tmp_texture, 0); 271 texture_target, tmp_texture, 0);
267 gles2->PixelStorei(GL_PACK_ALIGNMENT, 4); 272 gles2->PixelStorei(GL_PACK_ALIGNMENT, 4);
268 gles2->ReadPixels(0, 0, size.width(), size.height(), GL_BGRA_EXT, 273 gles2->ReadPixels(0, 0, size.width(), size.height(), GL_BGRA_EXT,
269 GL_UNSIGNED_BYTE, read_pixels_bitmap_.pixelRef()->pixels()); 274 GL_UNSIGNED_BYTE, read_pixels_bitmap_.pixelRef()->pixels());
270 gles2->DeleteFramebuffers(1, &fb); 275 gles2->DeleteFramebuffers(1, &fb);
271 gles2->DeleteTextures(1, &tmp_texture); 276 gles2->DeleteTextures(1, &tmp_texture);
272 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR)); 277 DCHECK_EQ(gles2->GetError(), static_cast<GLenum>(GL_NO_ERROR));
273 compositor_loop_async_waiter_.Signal(); 278 message_loop_async_waiter_.Signal();
274 } 279 }
275 280
276 base::SharedMemory* RendererGpuVideoDecoderFactories::CreateSharedMemory( 281 base::SharedMemory* RendererGpuVideoDecoderFactories::CreateSharedMemory(
277 size_t size) { 282 size_t size) {
278 if (main_message_loop_->BelongsToCurrentThread()) { 283 if (main_message_loop_->BelongsToCurrentThread()) {
279 return ChildThread::current()->AllocateSharedMemory(size); 284 return ChildThread::current()->AllocateSharedMemory(size);
280 } 285 }
281 main_message_loop_->PostTask(FROM_HERE, base::Bind( 286 main_message_loop_->PostTask(FROM_HERE, base::Bind(
282 &RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory, this, 287 &RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory, this,
283 size)); 288 size));
284 289
285 base::WaitableEvent* objects[] = {&aborted_waiter_, 290 base::WaitableEvent* objects[] = {&aborted_waiter_,
286 &render_thread_async_waiter_}; 291 &render_thread_async_waiter_};
287 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0) 292 if (base::WaitableEvent::WaitMany(objects, arraysize(objects)) == 0)
288 return NULL; 293 return NULL;
289 return shared_memory_segment_.release(); 294 return shared_memory_segment_.release();
290 } 295 }
291 296
292 void RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory(size_t size) { 297 void RendererGpuVideoDecoderFactories::AsyncCreateSharedMemory(size_t size) {
293 DCHECK_EQ(base::MessageLoop::current(), 298 DCHECK_EQ(base::MessageLoop::current(),
294 ChildThread::current()->message_loop()); 299 ChildThread::current()->message_loop());
295 300
296 shared_memory_segment_.reset( 301 shared_memory_segment_.reset(
297 ChildThread::current()->AllocateSharedMemory(size)); 302 ChildThread::current()->AllocateSharedMemory(size));
298 render_thread_async_waiter_.Signal(); 303 render_thread_async_waiter_.Signal();
299 } 304 }
300 305
301 scoped_refptr<base::MessageLoopProxy> 306 scoped_refptr<base::MessageLoopProxy>
302 RendererGpuVideoDecoderFactories::GetMessageLoop() { 307 RendererGpuVideoDecoderFactories::GetMessageLoop() {
303 return compositor_message_loop_; 308 return message_loop_;
304 } 309 }
305 310
306 void RendererGpuVideoDecoderFactories::Abort() { 311 void RendererGpuVideoDecoderFactories::Abort() {
307 aborted_waiter_.Signal(); 312 aborted_waiter_.Signal();
308 } 313 }
309 314
310 bool RendererGpuVideoDecoderFactories::IsAborted() { 315 bool RendererGpuVideoDecoderFactories::IsAborted() {
311 return aborted_waiter_.IsSignaled(); 316 return aborted_waiter_.IsSignaled();
312 } 317 }
313 318
319 scoped_refptr<media::GpuVideoDecoder::Factories>
320 RendererGpuVideoDecoderFactories::Clone() {
321 scoped_refptr<RendererGpuVideoDecoderFactories> factories =
322 new RendererGpuVideoDecoderFactories();
Ami GONE FROM CHROMIUM 2013/07/22 19:46:21 AFAICT you can reuse the non-trivial ctor and then
wuchengli 2013/07/23 16:29:28 Non-trial ctor takes raw pointer of WebGraphicsCon
323 factories->message_loop_ = message_loop_;
324 factories->main_message_loop_ = main_message_loop_;
325 factories->gpu_channel_host_ = gpu_channel_host_;
326 factories->context_ = context_;
327 return factories;
328 }
329
314 void RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator() { 330 void RendererGpuVideoDecoderFactories::AsyncDestroyVideoDecodeAccelerator() {
315 // OK to release because Destroy() will delete the VDA instance. 331 // OK to release because Destroy() will delete the VDA instance.
316 if (vda_) 332 if (vda_)
317 vda_.release()->Destroy(); 333 vda_.release()->Destroy();
318 } 334 }
319 335
320 } // namespace content 336 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698