OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h" | |
6 | |
7 #include <memory> | |
8 #include <vector> | |
9 | |
10 #include "base/bind.h" | |
11 #include "base/location.h" | |
12 #include "base/logging.h" | |
13 #include "base/memory/ref_counted.h" | |
14 #include "base/single_thread_task_runner.h" | |
15 #include "base/stl_util.h" | |
16 #include "base/thread_task_runner_handle.h" | |
17 #include "build/build_config.h" | |
18 #include "content/common/gpu/media/gpu_video_decode_accelerator_factory_impl.h" | |
19 #include "gpu/command_buffer/common/command_buffer.h" | |
20 #include "gpu/command_buffer/service/gpu_preferences.h" | |
21 #include "gpu/ipc/service/gpu_channel.h" | |
22 #include "gpu/ipc/service/gpu_channel_manager.h" | |
23 #include "ipc/ipc_message_macros.h" | |
24 #include "ipc/ipc_message_utils.h" | |
25 #include "ipc/message_filter.h" | |
26 #include "media/base/limits.h" | |
27 #include "media/gpu/ipc/common/gpu_video_accelerator_util.h" | |
28 #include "media/gpu/ipc/common/media_messages.h" | |
29 #include "ui/gfx/geometry/size.h" | |
30 #include "ui/gl/gl_context.h" | |
31 #include "ui/gl/gl_image.h" | |
32 | |
33 namespace content { | |
34 | |
35 namespace { | |
36 static gfx::GLContext* GetGLContext( | |
37 const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) { | |
38 if (!stub) { | |
39 DLOG(ERROR) << "Stub is gone; no GLContext."; | |
40 return nullptr; | |
41 } | |
42 | |
43 return stub->decoder()->GetGLContext(); | |
44 } | |
45 | |
46 static bool MakeDecoderContextCurrent( | |
47 const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) { | |
48 if (!stub) { | |
49 DLOG(ERROR) << "Stub is gone; won't MakeCurrent()."; | |
50 return false; | |
51 } | |
52 | |
53 if (!stub->decoder()->MakeCurrent()) { | |
54 DLOG(ERROR) << "Failed to MakeCurrent()"; | |
55 return false; | |
56 } | |
57 | |
58 return true; | |
59 } | |
60 | |
61 #if (defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)) || defined(OS_MACOSX) | |
62 static bool BindImage(const base::WeakPtr<gpu::GpuCommandBufferStub>& stub, | |
63 uint32_t client_texture_id, | |
64 uint32_t texture_target, | |
65 const scoped_refptr<gl::GLImage>& image, | |
66 bool can_bind_to_sampler) { | |
67 if (!stub) { | |
68 DLOG(ERROR) << "Stub is gone; won't BindImage()."; | |
69 return false; | |
70 } | |
71 | |
72 gpu::gles2::GLES2Decoder* command_decoder = stub->decoder(); | |
73 gpu::gles2::TextureManager* texture_manager = | |
74 command_decoder->GetContextGroup()->texture_manager(); | |
75 gpu::gles2::TextureRef* ref = texture_manager->GetTexture(client_texture_id); | |
76 if (ref) { | |
77 texture_manager->SetLevelImage(ref, texture_target, 0, image.get(), | |
78 can_bind_to_sampler | |
79 ? gpu::gles2::Texture::BOUND | |
80 : gpu::gles2::Texture::UNBOUND); | |
81 } | |
82 | |
83 return true; | |
84 } | |
85 #endif | |
86 | |
87 static base::WeakPtr<gpu::gles2::GLES2Decoder> GetGLES2Decoder( | |
88 const base::WeakPtr<gpu::GpuCommandBufferStub>& stub) { | |
89 if (!stub) { | |
90 DLOG(ERROR) << "Stub is gone; no GLES2Decoder."; | |
91 return base::WeakPtr<gpu::gles2::GLES2Decoder>(); | |
92 } | |
93 | |
94 return stub->decoder()->AsWeakPtr(); | |
95 } | |
96 } // anonymous namespace | |
97 | |
98 // DebugAutoLock works like AutoLock but only acquires the lock when | |
99 // DCHECK is on. | |
100 #if DCHECK_IS_ON() | |
101 typedef base::AutoLock DebugAutoLock; | |
102 #else | |
103 class DebugAutoLock { | |
104 public: | |
105 explicit DebugAutoLock(base::Lock&) {} | |
106 }; | |
107 #endif | |
108 | |
109 class GpuVideoDecodeAccelerator::MessageFilter : public IPC::MessageFilter { | |
110 public: | |
111 MessageFilter(GpuVideoDecodeAccelerator* owner, int32_t host_route_id) | |
112 : owner_(owner), host_route_id_(host_route_id) {} | |
113 | |
114 void OnChannelError() override { sender_ = NULL; } | |
115 | |
116 void OnChannelClosing() override { sender_ = NULL; } | |
117 | |
118 void OnFilterAdded(IPC::Sender* sender) override { sender_ = sender; } | |
119 | |
120 void OnFilterRemoved() override { | |
121 // This will delete |owner_| and |this|. | |
122 owner_->OnFilterRemoved(); | |
123 } | |
124 | |
125 bool OnMessageReceived(const IPC::Message& msg) override { | |
126 if (msg.routing_id() != host_route_id_) | |
127 return false; | |
128 | |
129 IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg) | |
130 IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_, | |
131 GpuVideoDecodeAccelerator::OnDecode) | |
132 IPC_MESSAGE_UNHANDLED(return false) | |
133 IPC_END_MESSAGE_MAP() | |
134 return true; | |
135 } | |
136 | |
137 bool SendOnIOThread(IPC::Message* message) { | |
138 DCHECK(!message->is_sync()); | |
139 if (!sender_) { | |
140 delete message; | |
141 return false; | |
142 } | |
143 return sender_->Send(message); | |
144 } | |
145 | |
146 protected: | |
147 ~MessageFilter() override {} | |
148 | |
149 private: | |
150 GpuVideoDecodeAccelerator* const owner_; | |
151 const int32_t host_route_id_; | |
152 // The sender to which this filter was added. | |
153 IPC::Sender* sender_; | |
154 }; | |
155 | |
156 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator( | |
157 int32_t host_route_id, | |
158 gpu::GpuCommandBufferStub* stub, | |
159 const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner) | |
160 : host_route_id_(host_route_id), | |
161 stub_(stub), | |
162 texture_target_(0), | |
163 textures_per_buffer_(0), | |
164 filter_removed_(true, false), | |
165 child_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
166 io_task_runner_(io_task_runner), | |
167 weak_factory_for_io_(this) { | |
168 DCHECK(stub_); | |
169 stub_->AddDestructionObserver(this); | |
170 get_gl_context_cb_ = base::Bind(&GetGLContext, stub_->AsWeakPtr()); | |
171 make_context_current_cb_ = | |
172 base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr()); | |
173 #if (defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)) || defined(OS_MACOSX) | |
174 bind_image_cb_ = base::Bind(&BindImage, stub_->AsWeakPtr()); | |
175 #endif | |
176 get_gles2_decoder_cb_ = base::Bind(&GetGLES2Decoder, stub_->AsWeakPtr()); | |
177 } | |
178 | |
179 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() { | |
180 // This class can only be self-deleted from OnWillDestroyStub(), which means | |
181 // the VDA has already been destroyed in there. | |
182 DCHECK(!video_decode_accelerator_); | |
183 } | |
184 | |
185 // static | |
186 gpu::VideoDecodeAcceleratorCapabilities | |
187 GpuVideoDecodeAccelerator::GetCapabilities( | |
188 const gpu::GpuPreferences& gpu_preferences) { | |
189 return GpuVideoDecodeAcceleratorFactoryImpl::GetDecoderCapabilities( | |
190 gpu_preferences); | |
191 } | |
192 | |
193 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { | |
194 if (!video_decode_accelerator_) | |
195 return false; | |
196 | |
197 bool handled = true; | |
198 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg) | |
199 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_SetCdm, OnSetCdm) | |
200 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode) | |
201 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers, | |
202 OnAssignPictureBuffers) | |
203 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer, | |
204 OnReusePictureBuffer) | |
205 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush) | |
206 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset) | |
207 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy) | |
208 IPC_MESSAGE_UNHANDLED(handled = false) | |
209 IPC_END_MESSAGE_MAP() | |
210 return handled; | |
211 } | |
212 | |
213 void GpuVideoDecodeAccelerator::NotifyInitializationComplete(bool success) { | |
214 if (!Send(new AcceleratedVideoDecoderHostMsg_InitializationComplete( | |
215 host_route_id_, success))) | |
216 DLOG(ERROR) | |
217 << "Send(AcceleratedVideoDecoderHostMsg_InitializationComplete) failed"; | |
218 } | |
219 | |
220 void GpuVideoDecodeAccelerator::ProvidePictureBuffers( | |
221 uint32_t requested_num_of_buffers, | |
222 uint32_t textures_per_buffer, | |
223 const gfx::Size& dimensions, | |
224 uint32_t texture_target) { | |
225 if (dimensions.width() > media::limits::kMaxDimension || | |
226 dimensions.height() > media::limits::kMaxDimension || | |
227 dimensions.GetArea() > media::limits::kMaxCanvas) { | |
228 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); | |
229 return; | |
230 } | |
231 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers( | |
232 host_route_id_, requested_num_of_buffers, textures_per_buffer, | |
233 dimensions, texture_target))) { | |
234 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) " | |
235 << "failed"; | |
236 } | |
237 texture_dimensions_ = dimensions; | |
238 textures_per_buffer_ = textures_per_buffer; | |
239 texture_target_ = texture_target; | |
240 } | |
241 | |
242 void GpuVideoDecodeAccelerator::DismissPictureBuffer( | |
243 int32_t picture_buffer_id) { | |
244 // Notify client that picture buffer is now unused. | |
245 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer( | |
246 host_route_id_, picture_buffer_id))) { | |
247 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) " | |
248 << "failed"; | |
249 } | |
250 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); | |
251 uncleared_textures_.erase(picture_buffer_id); | |
252 } | |
253 | |
254 void GpuVideoDecodeAccelerator::PictureReady( | |
255 const media::Picture& picture) { | |
256 // VDA may call PictureReady on IO thread. SetTextureCleared should run on | |
257 // the child thread. VDA is responsible to call PictureReady on the child | |
258 // thread when a picture buffer is delivered the first time. | |
259 if (child_task_runner_->BelongsToCurrentThread()) { | |
260 SetTextureCleared(picture); | |
261 } else { | |
262 DCHECK(io_task_runner_->BelongsToCurrentThread()); | |
263 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); | |
264 DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id())); | |
265 } | |
266 | |
267 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady( | |
268 host_route_id_, picture.picture_buffer_id(), | |
269 picture.bitstream_buffer_id(), picture.visible_rect(), | |
270 picture.allow_overlay(), picture.size_changed()))) { | |
271 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed"; | |
272 } | |
273 } | |
274 | |
275 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( | |
276 int32_t bitstream_buffer_id) { | |
277 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed( | |
278 host_route_id_, bitstream_buffer_id))) { | |
279 DLOG(ERROR) | |
280 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) " | |
281 << "failed"; | |
282 } | |
283 } | |
284 | |
285 void GpuVideoDecodeAccelerator::NotifyFlushDone() { | |
286 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_))) | |
287 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed"; | |
288 } | |
289 | |
290 void GpuVideoDecodeAccelerator::NotifyResetDone() { | |
291 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_))) | |
292 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed"; | |
293 } | |
294 | |
295 void GpuVideoDecodeAccelerator::NotifyError( | |
296 media::VideoDecodeAccelerator::Error error) { | |
297 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification( | |
298 host_route_id_, error))) { | |
299 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) " | |
300 << "failed"; | |
301 } | |
302 } | |
303 | |
304 void GpuVideoDecodeAccelerator::OnWillDestroyStub() { | |
305 // The stub is going away, so we have to stop and destroy VDA here, before | |
306 // returning, because the VDA may need the GL context to run and/or do its | |
307 // cleanup. We cannot destroy the VDA before the IO thread message filter is | |
308 // removed however, since we cannot service incoming messages with VDA gone. | |
309 // We cannot simply check for existence of VDA on IO thread though, because | |
310 // we don't want to synchronize the IO thread with the ChildThread. | |
311 // So we have to wait for the RemoveFilter callback here instead and remove | |
312 // the VDA after it arrives and before returning. | |
313 if (filter_) { | |
314 stub_->channel()->RemoveFilter(filter_.get()); | |
315 filter_removed_.Wait(); | |
316 } | |
317 | |
318 stub_->channel()->RemoveRoute(host_route_id_); | |
319 stub_->RemoveDestructionObserver(this); | |
320 | |
321 video_decode_accelerator_.reset(); | |
322 delete this; | |
323 } | |
324 | |
325 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) { | |
326 if (filter_ && io_task_runner_->BelongsToCurrentThread()) | |
327 return filter_->SendOnIOThread(message); | |
328 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
329 return stub_->channel()->Send(message); | |
330 } | |
331 | |
332 bool GpuVideoDecodeAccelerator::Initialize( | |
333 const media::VideoDecodeAccelerator::Config& config) { | |
334 DCHECK(!video_decode_accelerator_); | |
335 | |
336 if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) { | |
337 DLOG(ERROR) << "Initialize(): failed to add route"; | |
338 return false; | |
339 } | |
340 | |
341 #if !defined(OS_WIN) | |
342 // Ensure we will be able to get a GL context at all before initializing | |
343 // non-Windows VDAs. | |
344 if (!make_context_current_cb_.Run()) | |
345 return false; | |
346 #endif | |
347 | |
348 std::unique_ptr<GpuVideoDecodeAcceleratorFactoryImpl> vda_factory = | |
349 GpuVideoDecodeAcceleratorFactoryImpl::CreateWithGLES2Decoder( | |
350 get_gl_context_cb_, make_context_current_cb_, bind_image_cb_, | |
351 get_gles2_decoder_cb_); | |
352 | |
353 if (!vda_factory) { | |
354 LOG(ERROR) << "Failed creating the VDA factory"; | |
355 return false; | |
356 } | |
357 | |
358 const gpu::GpuPreferences& gpu_preferences = | |
359 stub_->channel()->gpu_channel_manager()->gpu_preferences(); | |
360 video_decode_accelerator_ = | |
361 vda_factory->CreateVDA(this, config, gpu_preferences); | |
362 if (!video_decode_accelerator_) { | |
363 LOG(ERROR) << "HW video decode not available for profile " << config.profile | |
364 << (config.is_encrypted ? " with encryption" : ""); | |
365 return false; | |
366 } | |
367 | |
368 // Attempt to set up performing decoding tasks on IO thread, if supported by | |
369 // the VDA. | |
370 if (video_decode_accelerator_->TryToSetupDecodeOnSeparateThread( | |
371 weak_factory_for_io_.GetWeakPtr(), io_task_runner_)) { | |
372 filter_ = new MessageFilter(this, host_route_id_); | |
373 stub_->channel()->AddFilter(filter_.get()); | |
374 } | |
375 | |
376 return true; | |
377 } | |
378 | |
379 void GpuVideoDecodeAccelerator::OnSetCdm(int cdm_id) { | |
380 DCHECK(video_decode_accelerator_); | |
381 video_decode_accelerator_->SetCdm(cdm_id); | |
382 } | |
383 | |
384 // Runs on IO thread if VDA::TryToSetupDecodeOnSeparateThread() succeeded, | |
385 // otherwise on the main thread. | |
386 void GpuVideoDecodeAccelerator::OnDecode( | |
387 const media::BitstreamBuffer& bitstream_buffer) { | |
388 DCHECK(video_decode_accelerator_); | |
389 video_decode_accelerator_->Decode(bitstream_buffer); | |
390 } | |
391 | |
392 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers( | |
393 const std::vector<int32_t>& buffer_ids, | |
394 const std::vector<media::PictureBuffer::TextureIds>& texture_ids) { | |
395 if (buffer_ids.size() != texture_ids.size()) { | |
396 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
397 return; | |
398 } | |
399 | |
400 gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder(); | |
401 gpu::gles2::TextureManager* texture_manager = | |
402 command_decoder->GetContextGroup()->texture_manager(); | |
403 | |
404 std::vector<media::PictureBuffer> buffers; | |
405 std::vector<std::vector<scoped_refptr<gpu::gles2::TextureRef>>> textures; | |
406 for (uint32_t i = 0; i < buffer_ids.size(); ++i) { | |
407 if (buffer_ids[i] < 0) { | |
408 DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range"; | |
409 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
410 return; | |
411 } | |
412 std::vector<scoped_refptr<gpu::gles2::TextureRef>> current_textures; | |
413 media::PictureBuffer::TextureIds buffer_texture_ids = texture_ids[i]; | |
414 media::PictureBuffer::TextureIds service_ids; | |
415 if (buffer_texture_ids.size() != textures_per_buffer_) { | |
416 DLOG(ERROR) << "Requested " << textures_per_buffer_ | |
417 << " textures per picture buffer, got " | |
418 << buffer_texture_ids.size(); | |
419 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
420 return; | |
421 } | |
422 for (size_t j = 0; j < textures_per_buffer_; j++) { | |
423 gpu::gles2::TextureRef* texture_ref = | |
424 texture_manager->GetTexture(buffer_texture_ids[j]); | |
425 if (!texture_ref) { | |
426 DLOG(ERROR) << "Failed to find texture id " << buffer_texture_ids[j]; | |
427 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
428 return; | |
429 } | |
430 gpu::gles2::Texture* info = texture_ref->texture(); | |
431 if (info->target() != texture_target_) { | |
432 DLOG(ERROR) << "Texture target mismatch for texture id " | |
433 << buffer_texture_ids[j]; | |
434 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
435 return; | |
436 } | |
437 if (texture_target_ == GL_TEXTURE_EXTERNAL_OES || | |
438 texture_target_ == GL_TEXTURE_RECTANGLE_ARB) { | |
439 // These textures have their dimensions defined by the underlying | |
440 // storage. | |
441 // Use |texture_dimensions_| for this size. | |
442 texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, GL_RGBA, | |
443 texture_dimensions_.width(), | |
444 texture_dimensions_.height(), 1, 0, | |
445 GL_RGBA, 0, gfx::Rect()); | |
446 } else { | |
447 // For other targets, texture dimensions should already be defined. | |
448 GLsizei width = 0, height = 0; | |
449 info->GetLevelSize(texture_target_, 0, &width, &height, nullptr); | |
450 if (width != texture_dimensions_.width() || | |
451 height != texture_dimensions_.height()) { | |
452 DLOG(ERROR) << "Size mismatch for texture id " | |
453 << buffer_texture_ids[j]; | |
454 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); | |
455 return; | |
456 } | |
457 | |
458 // TODO(dshwang): after moving to D3D11, remove this. crbug.com/438691 | |
459 GLenum format = | |
460 video_decode_accelerator_.get()->GetSurfaceInternalFormat(); | |
461 if (format != GL_RGBA) { | |
462 texture_manager->SetLevelInfo(texture_ref, texture_target_, 0, format, | |
463 width, height, 1, 0, format, 0, | |
464 gfx::Rect()); | |
465 } | |
466 } | |
467 service_ids.push_back(texture_ref->service_id()); | |
468 current_textures.push_back(texture_ref); | |
469 } | |
470 textures.push_back(current_textures); | |
471 buffers.push_back(media::PictureBuffer(buffer_ids[i], texture_dimensions_, | |
472 service_ids, buffer_texture_ids)); | |
473 } | |
474 { | |
475 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); | |
476 for (uint32_t i = 0; i < buffer_ids.size(); ++i) | |
477 uncleared_textures_[buffer_ids[i]] = textures[i]; | |
478 } | |
479 video_decode_accelerator_->AssignPictureBuffers(buffers); | |
480 } | |
481 | |
482 void GpuVideoDecodeAccelerator::OnReusePictureBuffer( | |
483 int32_t picture_buffer_id) { | |
484 DCHECK(video_decode_accelerator_); | |
485 video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id); | |
486 } | |
487 | |
488 void GpuVideoDecodeAccelerator::OnFlush() { | |
489 DCHECK(video_decode_accelerator_); | |
490 video_decode_accelerator_->Flush(); | |
491 } | |
492 | |
493 void GpuVideoDecodeAccelerator::OnReset() { | |
494 DCHECK(video_decode_accelerator_); | |
495 video_decode_accelerator_->Reset(); | |
496 } | |
497 | |
498 void GpuVideoDecodeAccelerator::OnDestroy() { | |
499 DCHECK(video_decode_accelerator_); | |
500 OnWillDestroyStub(); | |
501 } | |
502 | |
503 void GpuVideoDecodeAccelerator::OnFilterRemoved() { | |
504 // We're destroying; cancel all callbacks. | |
505 weak_factory_for_io_.InvalidateWeakPtrs(); | |
506 filter_removed_.Signal(); | |
507 } | |
508 | |
509 void GpuVideoDecodeAccelerator::SetTextureCleared( | |
510 const media::Picture& picture) { | |
511 DCHECK(child_task_runner_->BelongsToCurrentThread()); | |
512 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); | |
513 auto it = uncleared_textures_.find(picture.picture_buffer_id()); | |
514 if (it == uncleared_textures_.end()) | |
515 return; // the texture has been cleared | |
516 | |
517 for (auto texture_ref : it->second) { | |
518 GLenum target = texture_ref->texture()->target(); | |
519 gpu::gles2::TextureManager* texture_manager = | |
520 stub_->decoder()->GetContextGroup()->texture_manager(); | |
521 DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0)); | |
522 texture_manager->SetLevelCleared(texture_ref.get(), target, 0, true); | |
523 } | |
524 uncleared_textures_.erase(it); | |
525 } | |
526 | |
527 } // namespace content | |
OLD | NEW |