OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h" | |
6 | |
7 #include <vector> | |
8 | |
9 #include "base/bind.h" | |
10 #include "base/location.h" | |
11 #include "base/logging.h" | |
12 #include "base/memory/ref_counted.h" | |
13 #include "base/single_thread_task_runner.h" | |
14 #include "base/stl_util.h" | |
15 #include "base/thread_task_runner_handle.h" | |
16 #include "build/build_config.h" | |
17 #include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h" | |
18 #include "gpu/command_buffer/common/command_buffer.h" | |
19 #include "gpu/command_buffer/service/gpu_preferences.h" | |
20 #include "gpu/ipc/service/gpu_channel.h" | |
21 #include "gpu/ipc/service/gpu_channel_manager.h" | |
22 #include "ipc/ipc_message_macros.h" | |
23 #include "ipc/ipc_message_utils.h" | |
24 #include "ipc/message_filter.h" | |
25 #include "media/base/limits.h" | |
26 #include "media/gpu/ipc/common/gpu_video_accelerator_util.h" | |
27 #include "media/gpu/ipc/common/media_messages.h" | |
28 #include "ui/gfx/geometry/size.h" | |
29 #include "ui/gl/gl_context.h" | |
30 #include "ui/gl/gl_image.h" | |
31 | |
32 namespace content { | |
33 | |
34 namespace { | |
35 static gfx::GLContext* GetGLContext( | |
36 const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) { | |
37 if (!stub) { | |
38 DLOG(ERROR) << "Stub is gone; no GLContext."; | |
39 return nullptr; | |
40 } | |
41 | |
42 return stub->decoder()->GetGLContext(); | |
43 } | |
44 | |
45 static bool MakeDecoderContextCurrent( | |
46 const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) { | |
47 if (!stub) { | |
48 DLOG(ERROR) << "Stub is gone; won't MakeCurrent()."; | |
49 return false; | |
50 } | |
51 | |
52 if (!stub->decoder()->MakeCurrent()) { | |
53 DLOG(ERROR) << "Failed to MakeCurrent()"; | |
54 return false; | |
55 } | |
56 | |
57 return true; | |
58 } | |
59 | |
60 #if (defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)) || defined(OS_MACOSX) | |
61 static bool BindImage(const base::WeakPtr<gpu::GpuCommandBufferStub>& stub, | |
62 uint32_t client_texture_id, | |
63 uint32_t texture_target, | |
64 const scoped_refptr<gl::GLImage>& image, | |
65 bool can_bind_to_sampler) { | |
66 if (!stub) { | |
67 DLOG(ERROR) << "Stub is gone; won't BindImage()."; | |
68 return false; | |
69 } | |
70 | |
71 gpu::gles2::GLES2Decoder* command_decoder = stub->decoder(); | |
72 gpu::gles2::TextureManager* texture_manager = | |
73 command_decoder->GetContextGroup()->texture_manager(); | |
74 gpu::gles2::TextureRef* ref = texture_manager->GetTexture(client_texture_id); | |
75 if (ref) { | |
76 texture_manager->SetLevelImage(ref, texture_target, 0, image.get(), | |
77 can_bind_to_sampler | |
78 ? gpu::gles2::Texture::BOUND | |
79 : gpu::gles2::Texture::UNBOUND); | |
80 } | |
81 | |
82 return true; | |
83 } | |
84 #endif | |
85 | |
86 static base::WeakPtr<gpu::gles2::GLES2Decoder> GetGLES2Decoder( | |
87 const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) { | |
88 if (!stub) { | |
89 DLOG(ERROR) << "Stub is gone; no GLES2Decoder."; | |
90 return base::WeakPtr<gpu::gles2::GLES2Decoder>(); | |
91 } | |
92 | |
93 return stub->decoder()->AsWeakPtr(); | |
94 } | |
95 } // anonymous namespace | |
96 | |
97 // DebugAutoLock works like AutoLock but only acquires the lock when | |
98 // DCHECK is on. | |
99 #if DCHECK_IS_ON() | |
100 typedef base::AutoLock DebugAutoLock; | |
101 #else | |
102 class DebugAutoLock { | |
103 public: | |
104 explicit DebugAutoLock(base::Lock&) {} | |
105 }; | |
106 #endif | |
107 | |
108 class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter { | |
109 public: | |
110 MessageFilter(GpuVideoDecodeAccelerator* owner, int32_t host_route_id) | |
111 : owner_(owner), host_route_id_(host_route_id) {} | |
112 | |
113 void OnChannelError() override { sender_ = NULL; } | |
114 | |
115 void OnChannelClosing() override { sender_ = NULL; } | |
116 | |
117 void OnFilterAdded(IPC::Sender* sender) override { sender_ = sender; } | |
118 | |
119 void OnFilterRemoved() override { | |
120 // This will delete |owner_| and |this|. | |
121 owner_->OnFilterRemoved(); | |
122 } | |
123 | |
124 bool OnMessageReceived(const IPC::Message& msg) override { | |
125 if (msg.routing_id() != host_route_id_) | |
126 return false; | |
127 | |
128 IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg) | |
129 IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_, | |
130 GpuVideoDecodeAccelerator::OnDecode) | |
131 IPC_MESSAGE_UNHANDLED(return false) | |
132 IPC_END_MESSAGE_MAP() | |
133 return true; | |
134 } | |
135 | |
136 bool SendOnIOThread(IPC::Message* message) { | |
137 DCHECK(!message->is_sync()); | |
138 if (!sender_) { | |
139 delete message; | |
140 return false; | |
141 } | |
142 return sender_->Send(message); | |
143 } | |
144 | |
145 protected: | |
146 ~MessageFilter() override {} | |
147 | |
148 private: | |
149 GpuVideoDecodeAccelerator* const owner_; | |
150 const int32_t host_route_id_; | |
151 // The sender to which this filter was added. | |
152 IPC::Sender* sender_; | |
153 }; | |
154 | |
155 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator( | |
156 int32_t host_route_id, | |
157 gpu::GpuCommandBufferStub* stub, | |
158 const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) | |
159 : host_route_id_(host_route_id), | |
160 stub_(stub), | |
161 texture_target_(0), | |
162 textures_per_buffer_(0), | |
163 filter_removed_(true, false), | |
164 child_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
165 io_task_runner_(io_task_runner), | |
166 weak_factory_for_io_(this) { | |
167 DCHECK(stub_); | |
168 stub_->AddDestructionObserver(this); | |
169 get_gl_context_cb_ = base::Bind(&GetGLContext, stub_->AsWeakPtr()); | |
170 make_context_current_cb_ = | |
171 base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr()); | |
172 #if (defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)) || defined(OS_MACOSX) | |
173 bind_image_cb_ = base::Bind(&BindImage, stub_->AsWeakPtr()); | |
174 #endif | |
175 get_gles2_decoder_cb_ = base::Bind(&GetGLES2Decoder, stub_->AsWeakPtr()); | |
176 } | |
177 | |
178 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() { | |
179 // This class can only be self-deleted from OnWillDestroyStub(), which means | |
180 // the VDA has already been destroyed in there. | |
181 DCHECK(!video_decode_accelerator_); | |
182 } | |
183 | |
184 // static | |
185 gpu::VideoDecodeAcceleratorCapabilities | |
186 GpuVideoDecodeAccelerator::GetCapabilities( | |
187 const gpu::GpuPreferences& gpu_preferences) { | |
188 return GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities( | |
189 gpu_preferences); | |
190 } | |
191 | |
192 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { | |
193 if (!video_decode_accelerator_) | |
194 return false; | |
195 | |
196 bool handled = true; | |
197 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg) | |
198 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_SetCdm, OnSetCdm) | |
199 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode) | |
200 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers, | |
201 OnAssignPictureBuffers) | |
202 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer, | |
203 OnReusePictureBuffer) | |
204 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush) | |
205 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset) | |
206 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy) | |
207 IPC_MESSAGE_UNHANDLED(handled = false) | |
208 IPC_END_MESSAGE_MAP() | |
209 return handled; | |
210 } | |
211 | |
212 void GpuVideoDecodeAccelerator::NotifyInitializationComplete(bool success) { | |
213 if (!Send(new AcceleratedVideoDecoderHostMsg_InitializationComplete( | |
214 host_route_id_, success))) | |
215 DLOG(ERROR) | |
216 << "Send(AcceleratedVideoDecoderHostMsg_InitializationComplete) failed"; | |
217 } | |
218 | |
219 void GpuVideoDecodeAccelerator::ProvidePictureBuffers( | |
220 uint32_t requested_num_of_buffers, | |
221 uint32_t textures_per_buffer, | |
222 const gfx::Size& dimensions, | |
223 uint32_t texture_target) { | |
224 if (dimensions.width() > media::limits::kMaxDimension || | |
225 dimensions.height() > media::limits::kMaxDimension || | |
226 dimensions.GetArea() > media::limits::kMaxCanvas) { | |
227 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); | |
228 return; | |
229 } | |
230 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers( | |
231 host_route_id_, requested_num_of_buffers, textures_per_buffer, | |
232 dimensions, texture_target))) { | |
233 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) " | |
234 << "failed"; | |
235 } | |
236 texture_dimensions_ = dimensions; | |
237 textures_per_buffer_ = textures_per_buffer; | |
238 texture_target_ = texture_target; | |
239 } | |
240 | |
241 void GpuVideoDecodeAccelerator::DismissPictureBuffer( | |
242 int32_t picture_buffer_id) { | |
243 // Notify client that picture buffer is now unused. | |
244 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer( | |
245 host_route_id_, picture_buffer_id))) { | |
246 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) " | |
247 << "failed"; | |
248 } | |
249 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); | |
250 uncleared_textures_.erase(picture_buffer_id); | |
251 } | |
252 | |
253 void GpuVideoDecodeAccelerator::PictureReady( | |
254 const media::Picture& picture) { | |
255 // VDA may call PictureReady on IO thread. SetTextureCleared should run on | |
256 // the child thread. VDA is responsible to call PictureReady on the child | |
257 // thread when a picture buffer is delivered the first time. | |
258 if (child_task_runner_->BelongsToCurrentThread()) { | |
259 SetTextureCleared(picture); | |
260 } else { | |
261 DCHECK(io_task_runner_->BelongsToCurrentThread()); | |
262 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); | |
263 DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id())); | |
264 } | |
265 | |
266 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady( | |
267 host_route_id_, picture.picture_buffer_id(), | |
268 picture.bitstream_buffer_id(), picture.visible_rect(), | |
269 picture.allow_overlay(), picture.size_changed()))) { | |
270 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed"; | |
271 } | |
272 } | |
273 | |
274 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( | |
275 int32_t bitstream_buffer_id) { | |
276 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed( | |
277 host_route_id_, bitstream_buffer_id))) { | |
278 DLOG(ERROR) | |
279 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) " | |
280 << "failed"; | |
281 } | |
282 } | |
283 | |
284 void GpuVideoDecodeAccelerator::NotifyFlushDone() { | |
285 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_))) | |
286 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed"; | |
287 } | |
288 | |
289 void GpuVideoDecodeAccelerator::NotifyResetDone() { | |
290 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_))) | |
291 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed"; | |
292 } | |
293 | |
294 void GpuVideoDecodeAccelerator::NotifyError( | |
295 media::VideoDecodeAccelerator::Error error) { | |
296 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification( | |
297 host_route_id_, error))) { | |
298 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) " | |
299 << "failed"; | |
300 } | |
301 } | |
302 | |
303 void GpuVideoDecodeAccelerator::OnWillDestroyStub() { | |
304 // The stub is going away, so we have to stop and destroy VDA here, before | |
305 // returning, because the VDA may need the GL context to run and/or do its | |
306 // cleanup. We cannot destroy the VDA before the IO thread message filter is | |
307 // removed however, since we cannot service incoming messages with VDA gone. | |
308 // We cannot simply check for existence of VDA on IO thread though, because | |
309 // we don't want to synchronize the IO thread with the ChildThread. | |
310 // So we have to wait for the RemoveFilter callback here instead and remove | |
311 // the VDA after it arrives and before returning. | |
312 if (filter_) { | |
313 stub_->channel()->RemoveFilter(filter_.get()); | |
314 filter_removed_.Wait(); | |
315 } | |
316 | |
317 stub_->channel()->RemoveRoute(host_route_id_); | |
318 stub_->RemoveDestructionObserver(this); | |
319 | |
320 video_decode_accelerator_.reset(); | |
321 delete this; | |
322 } | |
323 | |
324 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) { | |
325 if (filter_ && io_task_runner_->BelongsToCurrentThread()) | |
326 return filter_->SendOnIOThread(message); | |
327 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
328 return stub_->channel()->Send(message); | |
329 } | |
330 | |
331 bool GpuVideoDecodeAccelerator::Initialize( | |
332 const media::VideoDecodeAccelerator::Config& config) { | |
333 DCHECK(!video_decode_accelerator_); | |
334 | |
335 if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) { | |
336 DLOG(ERROR) << "Initialize(): failed to add route"; | |
337 return false; | |
338 } | |
339 | |
340 #if !defined(OS_WIN) | |
341 // Ensure we will be able to get a GL context at all before initializing | |
342 // non-Windows VDAs. | |
343 if (!make_context_current_cb_.Run()) | |
344 return false; | |
345 #endif | |
346 | |
347 std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl> vda_factory = | |
348 GpuVideoDecodeAcceleratorFactoryImpl::CreateWithGLES2Decoder( | |
349 get_gl_context_cb_, make_context_current_cb_, bind_image_cb_, | |
350 get_gles2_decoder_cb_); | |
351 | |
352 if (!vda_factory) { | |
353 LOG(ERROR) << "Failed creating the VDA factory"; | |
354 return false; | |
355 } | |
356 | |
357 const gpu::GpuPreferences& gpu_preferences = | |
358 stub_->channel()->gpu_channel_manager()->gpu_preferences(); | |
359 video_decode_accelerator_ = | |
360 vda_factory->CreateVDA(this, config, gpu_preferences); | |
361 if (!video_decode_accelerator_) { | |
362 LOG(ERROR) << "HW video decode not available for profile " << config.profile | |
363 << (config.is_encrypted ? " with encryption" : ""); | |
364 return false; | |
365 } | |
366 | |
367 // Attempt to set up performing decoding tasks on IO thread, if supported by | |
368 // the VDA. | |
369 if (video_decode_accelerator_->TryToSetupDecodeOnSeparateThread( | |
370 weak_factory_for_io_.GetWeakPtr(), io_task_runner_)) { | |
371 filter_ = new MessageFilter(this, host_route_id_); | |
372 stub_->channel()->AddFilter(filter_.get()); | |
373 } | |
374 | |
375 return true; | |
376 } | |
377 | |
378 void GpuVideoDecodeAccelerator::OnSetCdm(int cdm_id) { | |
379 DCHECK(video_decode_accelerator_); | |
380 video_decode_accelerator_->SetCdm(cdm_id); | |
381 } | |
382 | |
383 // Runs on IO thread if VDA::TryToSetupDecodeOnSeparateThread() succeeded, | |
384 // otherwise on the main thread. | |
385 void GpuVideoDecodeAccelerator::OnDecode( | |
386 const media::BitstreamBuffer& bitstream_buffer) { | |
387 DCHECK(video_decode_accelerator_); | |
388 video_decode_accelerator_->Decode(bitstream_buffer); | |
389 } | |
390 | |
391 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers( | |
392 const std::vector<int32_t>& buffer_ids, | |
393 const std::vector<media::PictureBuffer::TextureIds>& texture_ids) { | |
394 if (buffer_ids.size() != texture_ids.size()) { | |
395 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
396 return; | |
397 } | |
398 | |
399 gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder(); | |
400 gpu::gles2::TextureManager* texture_manager = | |
401 command_decoder->GetContextGroup()->texture_manager(); | |
402 | |
403 std::vector<media::PictureBuffer> buffers; | |
404 std::vector<std::vector<scoped_refptr<gpu::gles2::TextureRef>>> textures; | |
405 for (uint32_t i = 0; i < buffer_ids.size(); ++i) { | |
406 if (buffer_ids[i] < 0) { | |
407 DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range"; | |
408 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
409 return; | |
410 } | |
411 std::vector<scoped_refptr<gpu::gles2::TextureRef>> current_textures; | |
412 media::PictureBuffer::TextureIds buffer_texture_ids = texture_ids[i]; | |
413 media::PictureBuffer::TextureIds service_ids; | |
414 if (buffer_texture_ids.size() != textures_per_buffer_) { | |
415 DLOG(ERROR) << "Requested " << textures_per_buffer_ | |
416 << " textures per picture buffer, got " | |
417 << buffer_texture_ids.size(); | |
418 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
419 return; | |
420 } | |
421 for (size_t j = 0; j < textures_per_buffer_; j++) { | |
422 gpu::gles2::TextureRef* texture_ref = | |
423 texture_manager->GetTexture(buffer_texture_ids[j]); | |
424 if (!texture_ref) { | |
425 DLOG(ERROR) << "Failed to find texture id " << buffer_texture_ids[j]; | |
426 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
427 return; | |
428 } | |
429 gpu::gles2::Texture* info = texture_ref->texture(); | |
430 if (info->target() != texture_target_) { | |
431 DLOG(ERROR) << "Texture target mismatch for texture id " | |
432 << buffer_texture_ids[j]; | |
433 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
434 return; | |
435 } | |
436 if (texture_target_ == GL_TEXTURE_EXTERNAL_OES || | |
437 texture_target_ == GL_TEXTURE_RECTANGLE_ARB) { | |
438 // These textures have their dimensions defined by the underlying | |
439 // storage. | |
440 // Use |texture_dimensions_| for this size. | |
441 texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, GL_RGBA, | |
442 texture_dimensions_.width(), | |
443 texture_dimensions_.height(), 1, 0, | |
444 GL_RGBA, 0, gfx::Rect()); | |
445 } else { | |
446 // For other targets, texture dimensions should already be defined. | |
447 GLsizei width = 0, height = 0; | |
448 info->GetLevelSize(texture_target_, 0, &width, &height, nullptr); | |
449 if (width != texture_dimensions_.width() || | |
450 height != texture_dimensions_.height()) { | |
451 DLOG(ERROR) << "Size mismatch for texture id " | |
452 << buffer_texture_ids[j]; | |
453 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
454 return; | |
455 } | |
456 | |
457 // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691 | |
458 GLenum format = | |
459 video_decode_accelerator_.get()->GetSurfaceInternalFormat(); | |
460 if (format != GL_RGBA) { | |
461 texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, format, | |
462 width, height, 1, 0, format, 0, | |
463 gfx::Rect()); | |
464 } | |
465 } | |
466 service_ids.push_back(texture_ref->service_id()); | |
467 current_textures.push_back(texture_ref); | |
468 } | |
469 textures.push_back(current_textures); | |
470 buffers.push_back(media::PictureBuffer(buffer_ids[i], texture_dimensions_, | |
471 service_ids, buffer_texture_ids)); | |
472 } | |
473 video_decode_accelerator_->AssignPictureBuffers(buffers); | |
474 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); | |
475 for (uint32_t i = 0; i < buffer_ids.size(); ++i) | |
476 uncleared_textures_[buffer_ids[i]] = textures[i]; | |
477 } | |
478 | |
479 void GpuVideoDecodeAccelerator::OnReusePictureBuffer( | |
480 int32_t picture_buffer_id) { | |
481 DCHECK(video_decode_accelerator_); | |
482 video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id); | |
483 } | |
484 | |
485 void GpuVideoDecodeAccelerator::OnFlush() { | |
486 DCHECK(video_decode_accelerator_); | |
487 video_decode_accelerator_->Flush(); | |
488 } | |
489 | |
490 void GpuVideoDecodeAccelerator::OnReset() { | |
491 DCHECK(video_decode_accelerator_); | |
492 video_decode_accelerator_->Reset(); | |
493 } | |
494 | |
495 void GpuVideoDecodeAccelerator::OnDestroy() { | |
496 DCHECK(video_decode_accelerator_); | |
497 OnWillDestroyStub(); | |
498 } | |
499 | |
500 void GpuVideoDecodeAccelerator::OnFilterRemoved() { | |
501 // We're destroying; cancel all callbacks. | |
502 weak_factory_for_io_.InvalidateWeakPtrs(); | |
503 filter_removed_.Signal(); | |
504 } | |
505 | |
506 void GpuVideoDecodeAccelerator::SetTextureCleared( | |
507 const media::Picture& picture) { | |
508 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
509 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); | |
510 auto it = uncleared_textures_.find(picture.picture_buffer_id()); | |
511 if (it == uncleared_textures_.end()) | |
512 return; // the texture has been cleared | |
513 | |
514 for (auto texture_ref : it->second) { | |
515 GLenum target = texture_ref->texture()->target(); | |
516 gpu::gles2::TextureManager* texture_manager = | |
517 stub_->decoder()->GetContextGroup()->texture_manager(); | |
518 DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0)); | |
519 texture_manager->SetLevelCleared(texture_ref.get(), target, 0, true); | |
520 } | |
521 uncleared_textures_.erase(it); | |
522 } | |
523 | |
524 } // namespace content | |
OLD | NEW |