| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/common/gpu/media/android_video_decode_accelerator.h" | |
| 6 | |
| 7 #include <stddef.h> | |
| 8 | |
| 9 #include <memory> | |
| 10 | |
| 11 #include "base/android/build_info.h" | |
| 12 #include "base/auto_reset.h" | |
| 13 #include "base/bind.h" | |
| 14 #include "base/bind_helpers.h" | |
| 15 #include "base/callback_helpers.h" | |
| 16 #include "base/command_line.h" | |
| 17 #include "base/lazy_instance.h" | |
| 18 #include "base/logging.h" | |
| 19 #include "base/message_loop/message_loop.h" | |
| 20 #include "base/metrics/histogram.h" | |
| 21 #include "base/task_runner_util.h" | |
| 22 #include "base/threading/thread_checker.h" | |
| 23 #include "base/trace_event/trace_event.h" | |
| 24 #include "content/common/gpu/media/android_copying_backing_strategy.h" | |
| 25 #include "content/common/gpu/media/android_deferred_rendering_backing_strategy.h
" | |
| 26 #include "content/common/gpu/media/avda_return_on_failure.h" | |
| 27 #include "content/common/gpu/media/shared_memory_region.h" | |
| 28 #include "gpu/command_buffer/service/gles2_cmd_decoder.h" | |
| 29 #include "gpu/command_buffer/service/mailbox_manager.h" | |
| 30 #include "gpu/ipc/service/gpu_channel.h" | |
| 31 #include "media/base/android/media_codec_bridge.h" | |
| 32 #include "media/base/android/media_codec_util.h" | |
| 33 #include "media/base/bind_to_current_loop.h" | |
| 34 #include "media/base/bitstream_buffer.h" | |
| 35 #include "media/base/limits.h" | |
| 36 #include "media/base/media.h" | |
| 37 #include "media/base/timestamp_constants.h" | |
| 38 #include "media/base/video_decoder_config.h" | |
| 39 #include "media/video/picture.h" | |
| 40 #include "ui/gl/android/scoped_java_surface.h" | |
| 41 #include "ui/gl/android/surface_texture.h" | |
| 42 #include "ui/gl/gl_bindings.h" | |
| 43 | |
| 44 #if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS) | |
| 45 #include "media/mojo/services/mojo_cdm_service.h" | |
| 46 #endif | |
| 47 | |
| 48 #define POST_ERROR(error_code, error_message) \ | |
| 49 do { \ | |
| 50 DLOG(ERROR) << error_message; \ | |
| 51 PostError(FROM_HERE, media::VideoDecodeAccelerator::error_code); \ | |
| 52 } while (0) | |
| 53 | |
| 54 namespace content { | |
| 55 | |
| 56 enum { kNumPictureBuffers = media::limits::kMaxVideoFrames + 1 }; | |
| 57 | |
| 58 // Max number of bitstreams notified to the client with | |
| 59 // NotifyEndOfBitstreamBuffer() before getting output from the bitstream. | |
| 60 enum { kMaxBitstreamsNotifiedInAdvance = 32 }; | |
| 61 | |
| 62 // MediaCodec is only guaranteed to support baseline, but some devices may | |
| 63 // support others. Advertise support for all H264 profiles and let the | |
| 64 // MediaCodec fail when decoding if it's not actually supported. It's assumed | |
| 65 // that consumers won't have software fallback for H264 on Android anyway. | |
| 66 static const media::VideoCodecProfile kSupportedH264Profiles[] = { | |
| 67 media::H264PROFILE_BASELINE, | |
| 68 media::H264PROFILE_MAIN, | |
| 69 media::H264PROFILE_EXTENDED, | |
| 70 media::H264PROFILE_HIGH, | |
| 71 media::H264PROFILE_HIGH10PROFILE, | |
| 72 media::H264PROFILE_HIGH422PROFILE, | |
| 73 media::H264PROFILE_HIGH444PREDICTIVEPROFILE, | |
| 74 media::H264PROFILE_SCALABLEBASELINE, | |
| 75 media::H264PROFILE_SCALABLEHIGH, | |
| 76 media::H264PROFILE_STEREOHIGH, | |
| 77 media::H264PROFILE_MULTIVIEWHIGH | |
| 78 }; | |
| 79 | |
| 80 // Because MediaCodec is thread-hostile (must be poked on a single thread) and | |
| 81 // has no callback mechanism (b/11990118), we must drive it by polling for | |
| 82 // complete frames (and available input buffers, when the codec is fully | |
| 83 // saturated). This function defines the polling delay. The value used is an | |
| 84 // arbitrary choice that trades off CPU utilization (spinning) against latency. | |
| 85 // Mirrors android_video_encode_accelerator.cc:EncodePollDelay(). | |
| 86 static inline const base::TimeDelta DecodePollDelay() { | |
| 87 // An alternative to this polling scheme could be to dedicate a new thread | |
| 88 // (instead of using the ChildThread) to run the MediaCodec, and make that | |
| 89 // thread use the timeout-based flavor of MediaCodec's dequeue methods when it | |
| 90 // believes the codec should complete "soon" (e.g. waiting for an input | |
| 91 // buffer, or waiting for a picture when it knows enough complete input | |
| 92 // pictures have been fed to saturate any internal buffering). This is | |
| 93 // speculative and it's unclear that this would be a win (nor that there's a | |
| 94 // reasonably device-agnostic way to fill in the "believes" above). | |
| 95 return base::TimeDelta::FromMilliseconds(10); | |
| 96 } | |
| 97 | |
| 98 static inline const base::TimeDelta NoWaitTimeOut() { | |
| 99 return base::TimeDelta::FromMicroseconds(0); | |
| 100 } | |
| 101 | |
| 102 static inline const base::TimeDelta IdleTimerTimeOut() { | |
| 103 return base::TimeDelta::FromSeconds(1); | |
| 104 } | |
| 105 | |
| 106 // Time between when we notice an error, and when we actually notify somebody. | |
| 107 // This is to prevent codec errors caused by SurfaceView fullscreen transitions | |
| 108 // from breaking the pipeline, if we're about to be reset anyway. | |
| 109 static inline const base::TimeDelta ErrorPostingDelay() { | |
| 110 return base::TimeDelta::FromSeconds(2); | |
| 111 } | |
| 112 | |
| 113 // For RecordFormatChangedMetric. | |
| 114 enum FormatChangedValue { | |
| 115 CodecInitialized = false, | |
| 116 MissingFormatChanged = true | |
| 117 }; | |
| 118 | |
| 119 static inline void RecordFormatChangedMetric(FormatChangedValue value) { | |
| 120 UMA_HISTOGRAM_BOOLEAN("Media.AVDA.MissingFormatChanged", !!value); | |
| 121 } | |
| 122 | |
| 123 // Handle OnFrameAvailable callbacks safely. Since they occur asynchronously, | |
| 124 // we take care that the AVDA that wants them still exists. A WeakPtr to | |
| 125 // the AVDA would be preferable, except that OnFrameAvailable callbacks can | |
| 126 // occur off the gpu main thread. We also can't guarantee when the | |
| 127 // SurfaceTexture will quit sending callbacks to coordinate with the | |
| 128 // destruction of the AVDA, so we have a separate object that the cb can own. | |
| 129 class AndroidVideoDecodeAccelerator::OnFrameAvailableHandler | |
| 130 : public base::RefCountedThreadSafe<OnFrameAvailableHandler> { | |
| 131 public: | |
| 132 // We do not retain ownership of |owner|. It must remain valid until | |
| 133 // after ClearOwner() is called. This will register with | |
| 134 // |surface_texture| to receive OnFrameAvailable callbacks. | |
| 135 OnFrameAvailableHandler( | |
| 136 AndroidVideoDecodeAccelerator* owner, | |
| 137 const scoped_refptr<gfx::SurfaceTexture>& surface_texture) | |
| 138 : owner_(owner) { | |
| 139 // Note that the callback owns a strong ref to us. | |
| 140 surface_texture->SetFrameAvailableCallbackOnAnyThread( | |
| 141 base::Bind(&OnFrameAvailableHandler::OnFrameAvailable, | |
| 142 scoped_refptr<OnFrameAvailableHandler>(this))); | |
| 143 } | |
| 144 | |
| 145 // Forget about our owner, which is required before one deletes it. | |
| 146 // No further callbacks will happen once this completes. | |
| 147 void ClearOwner() { | |
| 148 base::AutoLock lock(lock_); | |
| 149 // No callback can happen until we release the lock. | |
| 150 owner_ = nullptr; | |
| 151 } | |
| 152 | |
| 153 // Call back into our owner if it hasn't been deleted. | |
| 154 void OnFrameAvailable() { | |
| 155 base::AutoLock auto_lock(lock_); | |
| 156 // |owner_| can't be deleted while we have the lock. | |
| 157 if (owner_) | |
| 158 owner_->OnFrameAvailable(); | |
| 159 } | |
| 160 | |
| 161 private: | |
| 162 friend class base::RefCountedThreadSafe<OnFrameAvailableHandler>; | |
| 163 virtual ~OnFrameAvailableHandler() {} | |
| 164 | |
| 165 // Protects changes to owner_. | |
| 166 base::Lock lock_; | |
| 167 | |
| 168 // AVDA that wants the OnFrameAvailable callback. | |
| 169 AndroidVideoDecodeAccelerator* owner_; | |
| 170 | |
| 171 DISALLOW_COPY_AND_ASSIGN(OnFrameAvailableHandler); | |
| 172 }; | |
| 173 | |
| 174 // Helper class to share an IO timer for DoIOTask() execution; prevents each | |
| 175 // AVDA instance from starting its own high frequency timer. The intuition | |
| 176 // behind this is that, if we're waiting for long enough, then either (a) | |
| 177 // MediaCodec is broken or (b) MediaCodec is waiting on us to change state | |
| 178 // (e.g., get new demuxed data / get a free picture buffer / return an output | |
| 179 // buffer to MediaCodec). This is inherently a race, since we don't know if | |
| 180 // MediaCodec is broken or just slow. Since the MediaCodec API doesn't let | |
| 181 // us wait on MediaCodec state changes prior to L, we more or less have to | |
| 182 // time out or keep polling forever in some common cases. | |
| 183 class AVDATimerManager { | |
| 184 public: | |
| 185 // Make sure that the construction thread is started for |avda_instance|. | |
| 186 bool StartThread(AndroidVideoDecodeAccelerator* avda_instance) { | |
| 187 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 188 | |
| 189 if (thread_avda_instances_.empty()) { | |
| 190 if (!construction_thread_.Start()) { | |
| 191 LOG(ERROR) << "Failed to start construction thread."; | |
| 192 return false; | |
| 193 } | |
| 194 } | |
| 195 | |
| 196 thread_avda_instances_.insert(avda_instance); | |
| 197 return true; | |
| 198 } | |
| 199 | |
| 200 // |avda_instance| will no longer need the construction thread. Stop the | |
| 201 // thread if this is the last instance. | |
| 202 void StopThread(AndroidVideoDecodeAccelerator* avda_instance) { | |
| 203 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 204 | |
| 205 thread_avda_instances_.erase(avda_instance); | |
| 206 if (thread_avda_instances_.empty()) | |
| 207 construction_thread_.Stop(); | |
| 208 } | |
| 209 | |
| 210 // Request periodic callback of |avda_instance|->DoIOTask(). Does nothing if | |
| 211 // the instance is already registered and the timer started. The first request | |
| 212 // will start the repeating timer on an interval of DecodePollDelay(). | |
| 213 void StartTimer(AndroidVideoDecodeAccelerator* avda_instance) { | |
| 214 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 215 | |
| 216 timer_avda_instances_.insert(avda_instance); | |
| 217 | |
| 218 // If the timer is running, StopTimer() might have been called earlier, if | |
| 219 // so remove the instance from the pending erasures. | |
| 220 if (timer_running_) | |
| 221 pending_erase_.erase(avda_instance); | |
| 222 | |
| 223 if (io_timer_.IsRunning()) | |
| 224 return; | |
| 225 io_timer_.Start(FROM_HERE, DecodePollDelay(), this, | |
| 226 &AVDATimerManager::RunTimer); | |
| 227 } | |
| 228 | |
| 229 // Stop callbacks to |avda_instance|->DoIOTask(). Does nothing if the instance | |
| 230 // is not registered. If there are no instances left, the repeating timer will | |
| 231 // be stopped. | |
| 232 void StopTimer(AndroidVideoDecodeAccelerator* avda_instance) { | |
| 233 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 234 | |
| 235 // If the timer is running, defer erasures to avoid iterator invalidation. | |
| 236 if (timer_running_) { | |
| 237 pending_erase_.insert(avda_instance); | |
| 238 return; | |
| 239 } | |
| 240 | |
| 241 timer_avda_instances_.erase(avda_instance); | |
| 242 if (timer_avda_instances_.empty()) | |
| 243 io_timer_.Stop(); | |
| 244 } | |
| 245 | |
| 246 // Eventually, we should run the timer on this thread. For now, we just keep | |
| 247 // it as a convenience for construction. | |
| 248 scoped_refptr<base::SingleThreadTaskRunner> ConstructionTaskRunner() { | |
| 249 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 250 return construction_thread_.task_runner(); | |
| 251 } | |
| 252 | |
| 253 private: | |
| 254 friend struct base::DefaultLazyInstanceTraits<AVDATimerManager>; | |
| 255 | |
| 256 AVDATimerManager() : construction_thread_("AVDAThread") {} | |
| 257 ~AVDATimerManager() { NOTREACHED(); } | |
| 258 | |
| 259 void RunTimer() { | |
| 260 { | |
| 261 // Call out to all AVDA instances, some of which may attempt to remove | |
| 262 // themselves from the list during this operation; those removals will be | |
| 263 // deferred until after all iterations are complete. | |
| 264 base::AutoReset<bool> scoper(&timer_running_, true); | |
| 265 for (auto* avda : timer_avda_instances_) | |
| 266 avda->DoIOTask(false); | |
| 267 } | |
| 268 | |
| 269 // Take care of any deferred erasures. | |
| 270 for (auto* avda : pending_erase_) | |
| 271 StopTimer(avda); | |
| 272 pending_erase_.clear(); | |
| 273 | |
| 274 // TODO(dalecurtis): We may want to consider chunking this if task execution | |
| 275 // takes too long for the combined timer. | |
| 276 } | |
| 277 | |
| 278 // All AVDA instances that would like us to poll DoIOTask. | |
| 279 std::set<AndroidVideoDecodeAccelerator*> timer_avda_instances_; | |
| 280 | |
| 281 // All AVDA instances that might like to use the construction thread. | |
| 282 std::set<AndroidVideoDecodeAccelerator*> thread_avda_instances_; | |
| 283 | |
| 284 // Since we can't delete while iterating when using a set, defer erasure until | |
| 285 // after iteration complete. | |
| 286 bool timer_running_ = false; | |
| 287 std::set<AndroidVideoDecodeAccelerator*> pending_erase_; | |
| 288 | |
| 289 // Repeating timer responsible for draining pending IO to the codecs. | |
| 290 base::RepeatingTimer io_timer_; | |
| 291 | |
| 292 base::Thread construction_thread_; | |
| 293 | |
| 294 base::ThreadChecker thread_checker_; | |
| 295 | |
| 296 DISALLOW_COPY_AND_ASSIGN(AVDATimerManager); | |
| 297 }; | |
| 298 | |
| 299 static base::LazyInstance<AVDATimerManager>::Leaky g_avda_timer = | |
| 300 LAZY_INSTANCE_INITIALIZER; | |
| 301 | |
| 302 AndroidVideoDecodeAccelerator::CodecConfig::CodecConfig() {} | |
| 303 | |
| 304 AndroidVideoDecodeAccelerator::CodecConfig::~CodecConfig() {} | |
| 305 | |
| 306 AndroidVideoDecodeAccelerator::AndroidVideoDecodeAccelerator( | |
| 307 const MakeGLContextCurrentCallback& make_context_current_cb, | |
| 308 const GetGLES2DecoderCallback& get_gles2_decoder_cb) | |
| 309 : client_(NULL), | |
| 310 make_context_current_cb_(make_context_current_cb), | |
| 311 get_gles2_decoder_cb_(get_gles2_decoder_cb), | |
| 312 is_encrypted_(false), | |
| 313 state_(NO_ERROR), | |
| 314 picturebuffers_requested_(false), | |
| 315 drain_type_(DRAIN_TYPE_NONE), | |
| 316 media_drm_bridge_cdm_context_(nullptr), | |
| 317 cdm_registration_id_(0), | |
| 318 pending_input_buf_index_(-1), | |
| 319 error_sequence_token_(0), | |
| 320 defer_errors_(false), | |
| 321 deferred_initialization_pending_(false), | |
| 322 weak_this_factory_(this) {} | |
| 323 | |
| 324 AndroidVideoDecodeAccelerator::~AndroidVideoDecodeAccelerator() { | |
| 325 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 326 g_avda_timer.Pointer()->StopTimer(this); | |
| 327 g_avda_timer.Pointer()->StopThread(this); | |
| 328 | |
| 329 #if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS) | |
| 330 if (!media_drm_bridge_cdm_context_) | |
| 331 return; | |
| 332 | |
| 333 DCHECK(cdm_registration_id_); | |
| 334 media_drm_bridge_cdm_context_->UnregisterPlayer(cdm_registration_id_); | |
| 335 #endif // defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS) | |
| 336 } | |
| 337 | |
| 338 bool AndroidVideoDecodeAccelerator::Initialize(const Config& config, | |
| 339 Client* client) { | |
| 340 DCHECK(!media_codec_); | |
| 341 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 342 TRACE_EVENT0("media", "AVDA::Initialize"); | |
| 343 | |
| 344 DVLOG(1) << __FUNCTION__ << ": " << config.AsHumanReadableString(); | |
| 345 | |
| 346 if (make_context_current_cb_.is_null() || get_gles2_decoder_cb_.is_null()) { | |
| 347 NOTREACHED() << "GL callbacks are required for this VDA"; | |
| 348 return false; | |
| 349 } | |
| 350 | |
| 351 if (config.output_mode != Config::OutputMode::ALLOCATE) { | |
| 352 NOTREACHED() << "Only ALLOCATE OutputMode is supported by this VDA"; | |
| 353 return false; | |
| 354 } | |
| 355 | |
| 356 DCHECK(client); | |
| 357 client_ = client; | |
| 358 codec_config_ = new CodecConfig(); | |
| 359 codec_config_->codec_ = VideoCodecProfileToVideoCodec(config.profile); | |
| 360 codec_config_->initial_expected_coded_size_ = | |
| 361 config.initial_expected_coded_size; | |
| 362 is_encrypted_ = config.is_encrypted; | |
| 363 | |
| 364 bool profile_supported = codec_config_->codec_ == media::kCodecVP8 || | |
| 365 codec_config_->codec_ == media::kCodecVP9 || | |
| 366 codec_config_->codec_ == media::kCodecH264; | |
| 367 | |
| 368 // We signalled that we support deferred initialization, so see if the client | |
| 369 // does also. | |
| 370 deferred_initialization_pending_ = config.is_deferred_initialization_allowed; | |
| 371 | |
| 372 if (!profile_supported) { | |
| 373 LOG(ERROR) << "Unsupported profile: " << config.profile; | |
| 374 return false; | |
| 375 } | |
| 376 | |
| 377 // For encrypted streams we postpone configuration until MediaCrypto is | |
| 378 // available. | |
| 379 DCHECK(!is_encrypted_ || deferred_initialization_pending_); | |
| 380 | |
| 381 // Only use MediaCodec for VP8/9 if it's likely backed by hardware | |
| 382 // or if the stream is encrypted. | |
| 383 if ((codec_config_->codec_ == media::kCodecVP8 || | |
| 384 codec_config_->codec_ == media::kCodecVP9) && | |
| 385 !is_encrypted_ && | |
| 386 media::VideoCodecBridge::IsKnownUnaccelerated( | |
| 387 codec_config_->codec_, media::MEDIA_CODEC_DECODER)) { | |
| 388 DVLOG(1) << "Initialization failed: " | |
| 389 << (codec_config_->codec_ == media::kCodecVP8 ? "vp8" : "vp9") | |
| 390 << " is not hardware accelerated"; | |
| 391 return false; | |
| 392 } | |
| 393 | |
| 394 auto gles_decoder = get_gles2_decoder_cb_.Run(); | |
| 395 if (!gles_decoder) { | |
| 396 LOG(ERROR) << "Failed to get gles2 decoder instance."; | |
| 397 return false; | |
| 398 } | |
| 399 | |
| 400 const gpu::GpuPreferences& gpu_preferences = | |
| 401 gles_decoder->GetContextGroup()->gpu_preferences(); | |
| 402 | |
| 403 if (UseDeferredRenderingStrategy(gpu_preferences)) { | |
| 404 // TODO(liberato, watk): Figure out what we want to do about zero copy for | |
| 405 // fullscreen external SurfaceView in WebView. http://crbug.com/582170. | |
| 406 DCHECK(!gles_decoder->GetContextGroup()->mailbox_manager()->UsesSync()); | |
| 407 DVLOG(1) << __FUNCTION__ << ", using deferred rendering strategy."; | |
| 408 strategy_.reset(new AndroidDeferredRenderingBackingStrategy(this)); | |
| 409 } else { | |
| 410 DVLOG(1) << __FUNCTION__ << ", using copy back strategy."; | |
| 411 strategy_.reset(new AndroidCopyingBackingStrategy(this)); | |
| 412 } | |
| 413 | |
| 414 if (!make_context_current_cb_.Run()) { | |
| 415 LOG(ERROR) << "Failed to make this decoder's GL context current."; | |
| 416 return false; | |
| 417 } | |
| 418 | |
| 419 codec_config_->surface_ = strategy_->Initialize(config.surface_id); | |
| 420 if (codec_config_->surface_.IsEmpty()) { | |
| 421 LOG(ERROR) << "Failed to initialize the backing strategy. The returned " | |
| 422 "Java surface is empty."; | |
| 423 return false; | |
| 424 } | |
| 425 | |
| 426 // TODO(watk,liberato): move this into the strategy. | |
| 427 scoped_refptr<gfx::SurfaceTexture> surface_texture = | |
| 428 strategy_->GetSurfaceTexture(); | |
| 429 if (surface_texture) { | |
| 430 on_frame_available_handler_ = | |
| 431 new OnFrameAvailableHandler(this, surface_texture); | |
| 432 } | |
| 433 | |
| 434 // Start the thread for async configuration, even if we don't need it now. | |
| 435 // ResetCodecState might rebuild the codec later, for example. | |
| 436 if (!g_avda_timer.Pointer()->StartThread(this)) { | |
| 437 LOG(ERROR) << "Failed to start thread for AVDA timer"; | |
| 438 return false; | |
| 439 } | |
| 440 | |
| 441 // If we are encrypted, then we aren't able to create the codec yet. | |
| 442 if (is_encrypted_) | |
| 443 return true; | |
| 444 | |
| 445 if (deferred_initialization_pending_) { | |
| 446 ConfigureMediaCodecAsynchronously(); | |
| 447 return true; | |
| 448 } | |
| 449 | |
| 450 // If the client doesn't support deferred initialization (WebRTC), then we | |
| 451 // should complete it now and return a meaningful result. | |
| 452 return ConfigureMediaCodecSynchronously(); | |
| 453 } | |
| 454 | |
| 455 void AndroidVideoDecodeAccelerator::SetCdm(int cdm_id) { | |
| 456 DVLOG(2) << __FUNCTION__ << ": " << cdm_id; | |
| 457 | |
| 458 #if defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS) | |
| 459 DCHECK(client_) << "SetCdm() must be called after Initialize()."; | |
| 460 | |
| 461 if (media_drm_bridge_cdm_context_) { | |
| 462 NOTREACHED() << "We do not support resetting CDM."; | |
| 463 NotifyInitializationComplete(false); | |
| 464 return; | |
| 465 } | |
| 466 | |
| 467 // Store the CDM to hold a reference to it. | |
| 468 cdm_for_reference_holding_only_ = media::MojoCdmService::LegacyGetCdm(cdm_id); | |
| 469 DCHECK(cdm_for_reference_holding_only_); | |
| 470 | |
| 471 // On Android platform the CdmContext must be a MediaDrmBridgeCdmContext. | |
| 472 media_drm_bridge_cdm_context_ = static_cast<media::MediaDrmBridgeCdmContext*>( | |
| 473 cdm_for_reference_holding_only_->GetCdmContext()); | |
| 474 DCHECK(media_drm_bridge_cdm_context_); | |
| 475 | |
| 476 // Register CDM callbacks. The callbacks registered will be posted back to | |
| 477 // this thread via BindToCurrentLoop. | |
| 478 | |
| 479 // Since |this| holds a reference to the |cdm_|, by the time the CDM is | |
| 480 // destructed, UnregisterPlayer() must have been called and |this| has been | |
| 481 // destructed as well. So the |cdm_unset_cb| will never have a chance to be | |
| 482 // called. | |
| 483 // TODO(xhwang): Remove |cdm_unset_cb| after it's not used on all platforms. | |
| 484 cdm_registration_id_ = media_drm_bridge_cdm_context_->RegisterPlayer( | |
| 485 media::BindToCurrentLoop( | |
| 486 base::Bind(&AndroidVideoDecodeAccelerator::OnKeyAdded, | |
| 487 weak_this_factory_.GetWeakPtr())), | |
| 488 base::Bind(&base::DoNothing)); | |
| 489 | |
| 490 media_drm_bridge_cdm_context_->SetMediaCryptoReadyCB(media::BindToCurrentLoop( | |
| 491 base::Bind(&AndroidVideoDecodeAccelerator::OnMediaCryptoReady, | |
| 492 weak_this_factory_.GetWeakPtr()))); | |
| 493 | |
| 494 // Postpone NotifyInitializationComplete() call till we create the MediaCodec | |
| 495 // after OnMediaCryptoReady(). | |
| 496 #else | |
| 497 | |
| 498 NOTIMPLEMENTED(); | |
| 499 NotifyInitializationComplete(false); | |
| 500 | |
| 501 #endif // !defined(ENABLE_MOJO_MEDIA_IN_GPU_PROCESS) | |
| 502 } | |
| 503 | |
| 504 void AndroidVideoDecodeAccelerator::DoIOTask(bool start_timer) { | |
| 505 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 506 TRACE_EVENT0("media", "AVDA::DoIOTask"); | |
| 507 if (state_ == ERROR || state_ == WAITING_FOR_CODEC) | |
| 508 return; | |
| 509 | |
| 510 strategy_->MaybeRenderEarly(); | |
| 511 bool did_work = QueueInput(); | |
| 512 while (DequeueOutput()) | |
| 513 did_work = true; | |
| 514 | |
| 515 ManageTimer(did_work || start_timer); | |
| 516 } | |
| 517 | |
| 518 bool AndroidVideoDecodeAccelerator::QueueInput() { | |
| 519 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 520 TRACE_EVENT0("media", "AVDA::QueueInput"); | |
| 521 base::AutoReset<bool> auto_reset(&defer_errors_, true); | |
| 522 if (bitstreams_notified_in_advance_.size() > kMaxBitstreamsNotifiedInAdvance) | |
| 523 return false; | |
| 524 if (pending_bitstream_buffers_.empty()) | |
| 525 return false; | |
| 526 if (state_ == WAITING_FOR_KEY) | |
| 527 return false; | |
| 528 | |
| 529 int input_buf_index = pending_input_buf_index_; | |
| 530 | |
| 531 // Do not dequeue a new input buffer if we failed with MEDIA_CODEC_NO_KEY. | |
| 532 // That status does not return this buffer back to the pool of | |
| 533 // available input buffers. We have to reuse it in QueueSecureInputBuffer(). | |
| 534 if (input_buf_index == -1) { | |
| 535 media::MediaCodecStatus status = | |
| 536 media_codec_->DequeueInputBuffer(NoWaitTimeOut(), &input_buf_index); | |
| 537 switch (status) { | |
| 538 case media::MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER: | |
| 539 return false; | |
| 540 case media::MEDIA_CODEC_ERROR: | |
| 541 POST_ERROR(PLATFORM_FAILURE, "Failed to DequeueInputBuffer"); | |
| 542 return false; | |
| 543 case media::MEDIA_CODEC_OK: | |
| 544 break; | |
| 545 default: | |
| 546 NOTREACHED() << "Unknown DequeueInputBuffer status " << status; | |
| 547 return false; | |
| 548 } | |
| 549 } | |
| 550 | |
| 551 DCHECK_NE(input_buf_index, -1); | |
| 552 | |
| 553 media::BitstreamBuffer bitstream_buffer = pending_bitstream_buffers_.front(); | |
| 554 | |
| 555 if (bitstream_buffer.id() == -1) { | |
| 556 pending_bitstream_buffers_.pop(); | |
| 557 TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount", | |
| 558 pending_bitstream_buffers_.size()); | |
| 559 | |
| 560 media_codec_->QueueEOS(input_buf_index); | |
| 561 return true; | |
| 562 } | |
| 563 | |
| 564 std::unique_ptr<SharedMemoryRegion> shm; | |
| 565 | |
| 566 if (pending_input_buf_index_ == -1) { | |
| 567 // When |pending_input_buf_index_| is not -1, the buffer is already dequeued | |
| 568 // from MediaCodec, filled with data and bitstream_buffer.handle() is | |
| 569 // closed. | |
| 570 shm.reset(new SharedMemoryRegion(bitstream_buffer, true)); | |
| 571 | |
| 572 if (!shm->Map()) { | |
| 573 POST_ERROR(UNREADABLE_INPUT, "Failed to SharedMemoryRegion::Map()"); | |
| 574 return false; | |
| 575 } | |
| 576 } | |
| 577 | |
| 578 const base::TimeDelta presentation_timestamp = | |
| 579 bitstream_buffer.presentation_timestamp(); | |
| 580 DCHECK(presentation_timestamp != media::kNoTimestamp()) | |
| 581 << "Bitstream buffers must have valid presentation timestamps"; | |
| 582 | |
| 583 // There may already be a bitstream buffer with this timestamp, e.g., VP9 alt | |
| 584 // ref frames, but it's OK to overwrite it because we only expect a single | |
| 585 // output frame to have that timestamp. AVDA clients only use the bitstream | |
| 586 // buffer id in the returned Pictures to map a bitstream buffer back to a | |
| 587 // timestamp on their side, so either one of the bitstream buffer ids will | |
| 588 // result in them finding the right timestamp. | |
| 589 bitstream_buffers_in_decoder_[presentation_timestamp] = bitstream_buffer.id(); | |
| 590 | |
| 591 // Notice that |memory| will be null if we repeatedly enqueue the same buffer, | |
| 592 // this happens after MEDIA_CODEC_NO_KEY. | |
| 593 const uint8_t* memory = | |
| 594 shm ? static_cast<const uint8_t*>(shm->memory()) : nullptr; | |
| 595 const std::string& key_id = bitstream_buffer.key_id(); | |
| 596 const std::string& iv = bitstream_buffer.iv(); | |
| 597 const std::vector<media::SubsampleEntry>& subsamples = | |
| 598 bitstream_buffer.subsamples(); | |
| 599 | |
| 600 media::MediaCodecStatus status; | |
| 601 if (key_id.empty() || iv.empty()) { | |
| 602 status = media_codec_->QueueInputBuffer(input_buf_index, memory, | |
| 603 bitstream_buffer.size(), | |
| 604 presentation_timestamp); | |
| 605 } else { | |
| 606 status = media_codec_->QueueSecureInputBuffer( | |
| 607 input_buf_index, memory, bitstream_buffer.size(), key_id, iv, | |
| 608 subsamples, presentation_timestamp); | |
| 609 } | |
| 610 | |
| 611 DVLOG(2) << __FUNCTION__ | |
| 612 << ": Queue(Secure)InputBuffer: pts:" << presentation_timestamp | |
| 613 << " status:" << status; | |
| 614 | |
| 615 if (status == media::MEDIA_CODEC_NO_KEY) { | |
| 616 // Keep trying to enqueue the same input buffer. | |
| 617 // The buffer is owned by us (not the MediaCodec) and is filled with data. | |
| 618 DVLOG(1) << "QueueSecureInputBuffer failed: NO_KEY"; | |
| 619 pending_input_buf_index_ = input_buf_index; | |
| 620 state_ = WAITING_FOR_KEY; | |
| 621 return false; | |
| 622 } | |
| 623 | |
| 624 pending_input_buf_index_ = -1; | |
| 625 pending_bitstream_buffers_.pop(); | |
| 626 TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount", | |
| 627 pending_bitstream_buffers_.size()); | |
| 628 // We should call NotifyEndOfBitstreamBuffer(), when no more decoded output | |
| 629 // will be returned from the bitstream buffer. However, MediaCodec API is | |
| 630 // not enough to guarantee it. | |
| 631 // So, here, we calls NotifyEndOfBitstreamBuffer() in advance in order to | |
| 632 // keep getting more bitstreams from the client, and throttle them by using | |
| 633 // |bitstreams_notified_in_advance_|. | |
| 634 // TODO(dwkang): check if there is a way to remove this workaround. | |
| 635 base::MessageLoop::current()->PostTask( | |
| 636 FROM_HERE, | |
| 637 base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer, | |
| 638 weak_this_factory_.GetWeakPtr(), bitstream_buffer.id())); | |
| 639 bitstreams_notified_in_advance_.push_back(bitstream_buffer.id()); | |
| 640 | |
| 641 if (status != media::MEDIA_CODEC_OK) { | |
| 642 POST_ERROR(PLATFORM_FAILURE, "Failed to QueueInputBuffer: " << status); | |
| 643 return false; | |
| 644 } | |
| 645 | |
| 646 return true; | |
| 647 } | |
| 648 | |
| 649 bool AndroidVideoDecodeAccelerator::DequeueOutput() { | |
| 650 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 651 TRACE_EVENT0("media", "AVDA::DequeueOutput"); | |
| 652 base::AutoReset<bool> auto_reset(&defer_errors_, true); | |
| 653 if (picturebuffers_requested_ && output_picture_buffers_.empty()) | |
| 654 return false; | |
| 655 | |
| 656 if (!output_picture_buffers_.empty() && free_picture_ids_.empty()) { | |
| 657 // Don't have any picture buffer to send. Need to wait more. | |
| 658 return false; | |
| 659 } | |
| 660 | |
| 661 bool eos = false; | |
| 662 base::TimeDelta presentation_timestamp; | |
| 663 int32_t buf_index = 0; | |
| 664 do { | |
| 665 size_t offset = 0; | |
| 666 size_t size = 0; | |
| 667 | |
| 668 TRACE_EVENT_BEGIN0("media", "AVDA::DequeueOutput"); | |
| 669 media::MediaCodecStatus status = media_codec_->DequeueOutputBuffer( | |
| 670 NoWaitTimeOut(), &buf_index, &offset, &size, &presentation_timestamp, | |
| 671 &eos, NULL); | |
| 672 TRACE_EVENT_END2("media", "AVDA::DequeueOutput", "status", status, | |
| 673 "presentation_timestamp (ms)", | |
| 674 presentation_timestamp.InMilliseconds()); | |
| 675 | |
| 676 switch (status) { | |
| 677 case media::MEDIA_CODEC_ERROR: | |
| 678 // Do not post an error if we are draining for reset and destroy. | |
| 679 // Instead, run the drain completion task. | |
| 680 if (IsDrainingForResetOrDestroy()) { | |
| 681 DVLOG(1) << __FUNCTION__ << ": error while codec draining"; | |
| 682 state_ = ERROR; | |
| 683 OnDrainCompleted(); | |
| 684 } else { | |
| 685 POST_ERROR(PLATFORM_FAILURE, "DequeueOutputBuffer failed."); | |
| 686 } | |
| 687 return false; | |
| 688 | |
| 689 case media::MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER: | |
| 690 return false; | |
| 691 | |
| 692 case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: { | |
| 693 // An OUTPUT_FORMAT_CHANGED is not reported after flush() if the frame | |
| 694 // size does not change. Therefore we have to keep track on the format | |
| 695 // even if draining, unless we are draining for destroy. | |
| 696 if (drain_type_ == DRAIN_FOR_DESTROY) | |
| 697 return true; // ignore | |
| 698 | |
| 699 if (media_codec_->GetOutputSize(&size_) != media::MEDIA_CODEC_OK) { | |
| 700 POST_ERROR(PLATFORM_FAILURE, "GetOutputSize failed."); | |
| 701 return false; | |
| 702 } | |
| 703 | |
| 704 DVLOG(3) << __FUNCTION__ | |
| 705 << " OUTPUT_FORMAT_CHANGED, new size: " << size_.ToString(); | |
| 706 | |
| 707 // Don't request picture buffers if we already have some. This avoids | |
| 708 // having to dismiss the existing buffers which may actively reference | |
| 709 // decoded images. Breaking their connection to the decoded image will | |
| 710 // cause rendering of black frames. Instead, we let the existing | |
| 711 // PictureBuffers live on and we simply update their size the next time | |
| 712 // they're attachted to an image of the new resolution. See the | |
| 713 // size update in |SendDecodedFrameToClient| and https://crbug/587994. | |
| 714 if (output_picture_buffers_.empty() && !picturebuffers_requested_) { | |
| 715 picturebuffers_requested_ = true; | |
| 716 base::MessageLoop::current()->PostTask( | |
| 717 FROM_HERE, | |
| 718 base::Bind(&AndroidVideoDecodeAccelerator::RequestPictureBuffers, | |
| 719 weak_this_factory_.GetWeakPtr())); | |
| 720 return false; | |
| 721 } | |
| 722 | |
| 723 return true; | |
| 724 } | |
| 725 | |
| 726 case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED: | |
| 727 break; | |
| 728 | |
| 729 case media::MEDIA_CODEC_OK: | |
| 730 DCHECK_GE(buf_index, 0); | |
| 731 DVLOG(3) << __FUNCTION__ << ": pts:" << presentation_timestamp | |
| 732 << " buf_index:" << buf_index << " offset:" << offset | |
| 733 << " size:" << size << " eos:" << eos; | |
| 734 break; | |
| 735 | |
| 736 default: | |
| 737 NOTREACHED(); | |
| 738 break; | |
| 739 } | |
| 740 } while (buf_index < 0); | |
| 741 | |
| 742 if (eos) { | |
| 743 OnDrainCompleted(); | |
| 744 return false; | |
| 745 } | |
| 746 | |
| 747 if (IsDrainingForResetOrDestroy()) { | |
| 748 media_codec_->ReleaseOutputBuffer(buf_index, false); | |
| 749 return true; | |
| 750 } | |
| 751 | |
| 752 if (!picturebuffers_requested_) { | |
| 753 // If, somehow, we get a decoded frame back before a FORMAT_CHANGED | |
| 754 // message, then we might not have any picture buffers to use. This | |
| 755 // isn't supposed to happen (see EncodeDecodeTest.java#617). | |
| 756 // Log a metric to see how common this is. | |
| 757 RecordFormatChangedMetric(FormatChangedValue::MissingFormatChanged); | |
| 758 media_codec_->ReleaseOutputBuffer(buf_index, false); | |
| 759 POST_ERROR(PLATFORM_FAILURE, "Dequeued buffers before FORMAT_CHANGED."); | |
| 760 return false; | |
| 761 } | |
| 762 | |
| 763 // Get the bitstream buffer id from the timestamp. | |
| 764 auto it = bitstream_buffers_in_decoder_.find(presentation_timestamp); | |
| 765 | |
| 766 if (it != bitstream_buffers_in_decoder_.end()) { | |
| 767 const int32_t bitstream_buffer_id = it->second; | |
| 768 bitstream_buffers_in_decoder_.erase(bitstream_buffers_in_decoder_.begin(), | |
| 769 ++it); | |
| 770 SendDecodedFrameToClient(buf_index, bitstream_buffer_id); | |
| 771 | |
| 772 // Removes ids former or equal than the id from decoder. Note that | |
| 773 // |bitstreams_notified_in_advance_| does not mean bitstream ids in decoder | |
| 774 // because of frame reordering issue. We just maintain this roughly and use | |
| 775 // it for throttling. | |
| 776 for (auto bitstream_it = bitstreams_notified_in_advance_.begin(); | |
| 777 bitstream_it != bitstreams_notified_in_advance_.end(); | |
| 778 ++bitstream_it) { | |
| 779 if (*bitstream_it == bitstream_buffer_id) { | |
| 780 bitstreams_notified_in_advance_.erase( | |
| 781 bitstreams_notified_in_advance_.begin(), ++bitstream_it); | |
| 782 break; | |
| 783 } | |
| 784 } | |
| 785 } else { | |
| 786 // Normally we assume that the decoder makes at most one output frame for | |
| 787 // each distinct input timestamp. However MediaCodecBridge uses timestamp | |
| 788 // correction and provides a non-decreasing timestamp sequence, which might | |
| 789 // result in timestamp duplicates. Discard the frame if we cannot get the | |
| 790 // corresponding buffer id. | |
| 791 DVLOG(3) << __FUNCTION__ << ": Releasing buffer with unexpected PTS: " | |
| 792 << presentation_timestamp; | |
| 793 media_codec_->ReleaseOutputBuffer(buf_index, false); | |
| 794 } | |
| 795 | |
| 796 // We got a decoded frame, so try for another. | |
| 797 return true; | |
| 798 } | |
| 799 | |
| 800 void AndroidVideoDecodeAccelerator::SendDecodedFrameToClient( | |
| 801 int32_t codec_buffer_index, | |
| 802 int32_t bitstream_id) { | |
| 803 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 804 DCHECK_NE(bitstream_id, -1); | |
| 805 DCHECK(!free_picture_ids_.empty()); | |
| 806 TRACE_EVENT0("media", "AVDA::SendDecodedFrameToClient"); | |
| 807 | |
| 808 if (!make_context_current_cb_.Run()) { | |
| 809 POST_ERROR(PLATFORM_FAILURE, "Failed to make the GL context current."); | |
| 810 return; | |
| 811 } | |
| 812 | |
| 813 int32_t picture_buffer_id = free_picture_ids_.front(); | |
| 814 free_picture_ids_.pop(); | |
| 815 TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size()); | |
| 816 | |
| 817 const auto& i = output_picture_buffers_.find(picture_buffer_id); | |
| 818 if (i == output_picture_buffers_.end()) { | |
| 819 POST_ERROR(PLATFORM_FAILURE, | |
| 820 "Can't find PictureBuffer id: " << picture_buffer_id); | |
| 821 return; | |
| 822 } | |
| 823 | |
| 824 bool size_changed = false; | |
| 825 if (i->second.size() != size_) { | |
| 826 // Size may have changed due to resolution change since the last time this | |
| 827 // PictureBuffer was used. | |
| 828 strategy_->UpdatePictureBufferSize(&i->second, size_); | |
| 829 size_changed = true; | |
| 830 } | |
| 831 | |
| 832 const bool allow_overlay = strategy_->ArePicturesOverlayable(); | |
| 833 media::Picture picture(picture_buffer_id, bitstream_id, gfx::Rect(size_), | |
| 834 allow_overlay); | |
| 835 picture.set_size_changed(size_changed); | |
| 836 | |
| 837 // Notify picture ready before calling UseCodecBufferForPictureBuffer() since | |
| 838 // that process may be slow and shouldn't delay delivery of the frame to the | |
| 839 // renderer. The picture is only used on the same thread as this method is | |
| 840 // called, so it is safe to do this. | |
| 841 NotifyPictureReady(picture); | |
| 842 | |
| 843 // Connect the PictureBuffer to the decoded frame, via whatever mechanism the | |
| 844 // strategy likes. | |
| 845 strategy_->UseCodecBufferForPictureBuffer(codec_buffer_index, i->second); | |
| 846 } | |
| 847 | |
| 848 void AndroidVideoDecodeAccelerator::Decode( | |
| 849 const media::BitstreamBuffer& bitstream_buffer) { | |
| 850 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 851 | |
| 852 if (bitstream_buffer.id() >= 0 && bitstream_buffer.size() > 0) { | |
| 853 DecodeBuffer(bitstream_buffer); | |
| 854 return; | |
| 855 } | |
| 856 | |
| 857 if (base::SharedMemory::IsHandleValid(bitstream_buffer.handle())) | |
| 858 base::SharedMemory::CloseHandle(bitstream_buffer.handle()); | |
| 859 | |
| 860 if (bitstream_buffer.id() < 0) { | |
| 861 POST_ERROR(INVALID_ARGUMENT, | |
| 862 "Invalid bistream_buffer, id: " << bitstream_buffer.id()); | |
| 863 } else { | |
| 864 base::MessageLoop::current()->PostTask( | |
| 865 FROM_HERE, | |
| 866 base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer, | |
| 867 weak_this_factory_.GetWeakPtr(), bitstream_buffer.id())); | |
| 868 } | |
| 869 } | |
| 870 | |
| 871 void AndroidVideoDecodeAccelerator::DecodeBuffer( | |
| 872 const media::BitstreamBuffer& bitstream_buffer) { | |
| 873 pending_bitstream_buffers_.push(bitstream_buffer); | |
| 874 TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount", | |
| 875 pending_bitstream_buffers_.size()); | |
| 876 | |
| 877 DoIOTask(true); | |
| 878 } | |
| 879 | |
| 880 void AndroidVideoDecodeAccelerator::RequestPictureBuffers() { | |
| 881 if (client_) { | |
| 882 client_->ProvidePictureBuffers(kNumPictureBuffers, 1, | |
| 883 strategy_->GetPictureBufferSize(), | |
| 884 strategy_->GetTextureTarget()); | |
| 885 } | |
| 886 } | |
| 887 | |
| 888 void AndroidVideoDecodeAccelerator::AssignPictureBuffers( | |
| 889 const std::vector<media::PictureBuffer>& buffers) { | |
| 890 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 891 DCHECK(output_picture_buffers_.empty()); | |
| 892 DCHECK(free_picture_ids_.empty()); | |
| 893 | |
| 894 if (buffers.size() < kNumPictureBuffers) { | |
| 895 POST_ERROR(INVALID_ARGUMENT, "Not enough picture buffers assigned."); | |
| 896 return; | |
| 897 } | |
| 898 | |
| 899 const bool have_context = make_context_current_cb_.Run(); | |
| 900 LOG_IF(WARNING, !have_context) | |
| 901 << "Failed to make GL context current for Assign, continuing."; | |
| 902 | |
| 903 for (size_t i = 0; i < buffers.size(); ++i) { | |
| 904 if (buffers[i].size() != strategy_->GetPictureBufferSize()) { | |
| 905 POST_ERROR(INVALID_ARGUMENT, | |
| 906 "Invalid picture buffer size assigned. Wanted " | |
| 907 << size_.ToString() << ", but got " | |
| 908 << buffers[i].size().ToString()); | |
| 909 return; | |
| 910 } | |
| 911 int32_t id = buffers[i].id(); | |
| 912 output_picture_buffers_.insert(std::make_pair(id, buffers[i])); | |
| 913 free_picture_ids_.push(id); | |
| 914 | |
| 915 strategy_->AssignOnePictureBuffer(buffers[i], have_context); | |
| 916 } | |
| 917 TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size()); | |
| 918 DoIOTask(true); | |
| 919 } | |
| 920 | |
| 921 void AndroidVideoDecodeAccelerator::ReusePictureBuffer( | |
| 922 int32_t picture_buffer_id) { | |
| 923 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 924 | |
| 925 free_picture_ids_.push(picture_buffer_id); | |
| 926 TRACE_COUNTER1("media", "AVDA::FreePictureIds", free_picture_ids_.size()); | |
| 927 | |
| 928 OutputBufferMap::const_iterator i = | |
| 929 output_picture_buffers_.find(picture_buffer_id); | |
| 930 if (i == output_picture_buffers_.end()) { | |
| 931 POST_ERROR(PLATFORM_FAILURE, "Can't find PictureBuffer id " | |
| 932 << picture_buffer_id); | |
| 933 return; | |
| 934 } | |
| 935 | |
| 936 strategy_->ReuseOnePictureBuffer(i->second); | |
| 937 DoIOTask(true); | |
| 938 } | |
| 939 | |
| 940 void AndroidVideoDecodeAccelerator::Flush() { | |
| 941 DVLOG(1) << __FUNCTION__; | |
| 942 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 943 | |
| 944 StartCodecDrain(DRAIN_FOR_FLUSH); | |
| 945 } | |
| 946 | |
| 947 void AndroidVideoDecodeAccelerator::ConfigureMediaCodecAsynchronously() { | |
| 948 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 949 | |
| 950 // It's probably okay just to return here, since the codec will be configured | |
| 951 // asynchronously. It's unclear that any state for the new request could | |
| 952 // be different, unless somebody modifies |codec_config_| while we're already | |
| 953 // waiting for a codec. One shouldn't do that for thread safety. | |
| 954 DCHECK_NE(state_, WAITING_FOR_CODEC); | |
| 955 | |
| 956 state_ = WAITING_FOR_CODEC; | |
| 957 | |
| 958 // Tell the strategy that we're changing codecs. The codec itself could be | |
| 959 // used normally, since we don't replace it until we're back on the main | |
| 960 // thread. However, if we're using an output surface, then the incoming codec | |
| 961 // might access that surface while the main thread is drawing. Telling the | |
| 962 // strategy to forget the codec avoids this. | |
| 963 if (media_codec_) { | |
| 964 media_codec_.reset(); | |
| 965 strategy_->CodecChanged(nullptr); | |
| 966 } | |
| 967 | |
| 968 scoped_refptr<base::SingleThreadTaskRunner> task_runner = | |
| 969 g_avda_timer.Pointer()->ConstructionTaskRunner(); | |
| 970 CHECK(task_runner); | |
| 971 | |
| 972 base::PostTaskAndReplyWithResult( | |
| 973 task_runner.get(), FROM_HERE, | |
| 974 base::Bind(&AndroidVideoDecodeAccelerator::ConfigureMediaCodecOnAnyThread, | |
| 975 codec_config_), | |
| 976 base::Bind(&AndroidVideoDecodeAccelerator::OnCodecConfigured, | |
| 977 weak_this_factory_.GetWeakPtr())); | |
| 978 } | |
| 979 | |
| 980 bool AndroidVideoDecodeAccelerator::ConfigureMediaCodecSynchronously() { | |
| 981 state_ = WAITING_FOR_CODEC; | |
| 982 std::unique_ptr<media::VideoCodecBridge> media_codec = | |
| 983 ConfigureMediaCodecOnAnyThread(codec_config_); | |
| 984 OnCodecConfigured(std::move(media_codec)); | |
| 985 return !!media_codec_; | |
| 986 } | |
| 987 | |
| 988 std::unique_ptr<media::VideoCodecBridge> | |
| 989 AndroidVideoDecodeAccelerator::ConfigureMediaCodecOnAnyThread( | |
| 990 scoped_refptr<CodecConfig> codec_config) { | |
| 991 TRACE_EVENT0("media", "AVDA::ConfigureMediaCodec"); | |
| 992 | |
| 993 jobject media_crypto = codec_config->media_crypto_ | |
| 994 ? codec_config->media_crypto_->obj() | |
| 995 : nullptr; | |
| 996 | |
| 997 // |needs_protected_surface_| implies encrypted stream. | |
| 998 DCHECK(!codec_config->needs_protected_surface_ || media_crypto); | |
| 999 | |
| 1000 return std::unique_ptr<media::VideoCodecBridge>( | |
| 1001 media::VideoCodecBridge::CreateDecoder( | |
| 1002 codec_config->codec_, codec_config->needs_protected_surface_, | |
| 1003 codec_config->initial_expected_coded_size_, | |
| 1004 codec_config->surface_.j_surface().obj(), media_crypto, true)); | |
| 1005 } | |
| 1006 | |
| 1007 void AndroidVideoDecodeAccelerator::OnCodecConfigured( | |
| 1008 std::unique_ptr<media::VideoCodecBridge> media_codec) { | |
| 1009 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1010 DCHECK_EQ(state_, WAITING_FOR_CODEC); | |
| 1011 | |
| 1012 media_codec_ = std::move(media_codec); | |
| 1013 | |
| 1014 // Record one instance of the codec being initialized. | |
| 1015 RecordFormatChangedMetric(FormatChangedValue::CodecInitialized); | |
| 1016 | |
| 1017 strategy_->CodecChanged(media_codec_.get()); | |
| 1018 | |
| 1019 // If we are supposed to notify that initialization is complete, then do so | |
| 1020 // now. Otherwise, this is a reconfiguration. | |
| 1021 if (deferred_initialization_pending_) { | |
| 1022 NotifyInitializationComplete(!!media_codec_); | |
| 1023 deferred_initialization_pending_ = false; | |
| 1024 } | |
| 1025 | |
| 1026 if (!media_codec_) { | |
| 1027 POST_ERROR(PLATFORM_FAILURE, "Failed to create MediaCodec."); | |
| 1028 return; | |
| 1029 } | |
| 1030 | |
| 1031 state_ = NO_ERROR; | |
| 1032 | |
| 1033 ManageTimer(true); | |
| 1034 } | |
| 1035 | |
| 1036 void AndroidVideoDecodeAccelerator::StartCodecDrain(DrainType drain_type) { | |
| 1037 DVLOG(2) << __FUNCTION__ << " drain_type:" << drain_type; | |
| 1038 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1039 | |
| 1040 // We assume that DRAIN_FOR_FLUSH and DRAIN_FOR_RESET cannot come while | |
| 1041 // another drain request is present, but DRAIN_FOR_DESTROY can. | |
| 1042 DCHECK_NE(drain_type, DRAIN_TYPE_NONE); | |
| 1043 DCHECK(drain_type_ == DRAIN_TYPE_NONE || drain_type == DRAIN_FOR_DESTROY) | |
| 1044 << "Unexpected StartCodecDrain() with drain type " << drain_type | |
| 1045 << " while already draining with drain type " << drain_type_; | |
| 1046 | |
| 1047 const bool enqueue_eos = drain_type_ == DRAIN_TYPE_NONE; | |
| 1048 drain_type_ = drain_type; | |
| 1049 | |
| 1050 if (enqueue_eos) | |
| 1051 DecodeBuffer(media::BitstreamBuffer(-1, base::SharedMemoryHandle(), 0)); | |
| 1052 } | |
| 1053 | |
| 1054 bool AndroidVideoDecodeAccelerator::IsDrainingForResetOrDestroy() const { | |
| 1055 return drain_type_ == DRAIN_FOR_RESET || drain_type_ == DRAIN_FOR_DESTROY; | |
| 1056 } | |
| 1057 | |
| 1058 void AndroidVideoDecodeAccelerator::OnDrainCompleted() { | |
| 1059 DVLOG(2) << __FUNCTION__; | |
| 1060 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1061 | |
| 1062 // If we were waiting for an EOS, clear the state and reset the MediaCodec | |
| 1063 // as normal. Otherwise, enter the ERROR state which will force destruction | |
| 1064 // of MediaCodec during ResetCodecState(). | |
| 1065 // | |
| 1066 // Some Android platforms seem to send an EOS buffer even when we're not | |
| 1067 // expecting it. In this case, destroy and reset the codec but don't notify | |
| 1068 // flush done since it violates the state machine. http://crbug.com/585959. | |
| 1069 | |
| 1070 switch (drain_type_) { | |
| 1071 case DRAIN_TYPE_NONE: | |
| 1072 // Unexpected EOS. | |
| 1073 state_ = ERROR; | |
| 1074 ResetCodecState(base::Closure()); | |
| 1075 break; | |
| 1076 case DRAIN_FOR_FLUSH: | |
| 1077 ResetCodecState(media::BindToCurrentLoop( | |
| 1078 base::Bind(&AndroidVideoDecodeAccelerator::NotifyFlushDone, | |
| 1079 weak_this_factory_.GetWeakPtr()))); | |
| 1080 break; | |
| 1081 case DRAIN_FOR_RESET: | |
| 1082 ResetCodecState(media::BindToCurrentLoop( | |
| 1083 base::Bind(&AndroidVideoDecodeAccelerator::NotifyResetDone, | |
| 1084 weak_this_factory_.GetWeakPtr()))); | |
| 1085 break; | |
| 1086 case DRAIN_FOR_DESTROY: | |
| 1087 base::MessageLoop::current()->PostTask( | |
| 1088 FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::ActualDestroy, | |
| 1089 weak_this_factory_.GetWeakPtr())); | |
| 1090 break; | |
| 1091 } | |
| 1092 drain_type_ = DRAIN_TYPE_NONE; | |
| 1093 } | |
| 1094 | |
| 1095 void AndroidVideoDecodeAccelerator::ResetCodecState( | |
| 1096 const base::Closure& done_cb) { | |
| 1097 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1098 | |
| 1099 // If there is already a reset in flight, then that counts. This can really | |
| 1100 // only happen if somebody calls Reset. | |
| 1101 if (state_ == WAITING_FOR_CODEC) { | |
| 1102 if (!done_cb.is_null()) | |
| 1103 done_cb.Run(); | |
| 1104 return; | |
| 1105 } | |
| 1106 | |
| 1107 bitstream_buffers_in_decoder_.clear(); | |
| 1108 | |
| 1109 if (pending_input_buf_index_ != -1) { | |
| 1110 // The data for that index exists in the input buffer, but corresponding | |
| 1111 // shm block been deleted. Check that it is safe to flush the coec, i.e. | |
| 1112 // |pending_bitstream_buffers_| is empty. | |
| 1113 // TODO(timav): keep shm block for that buffer and remove this restriction. | |
| 1114 DCHECK(pending_bitstream_buffers_.empty()); | |
| 1115 pending_input_buf_index_ = -1; | |
| 1116 } | |
| 1117 | |
| 1118 const bool did_codec_error_happen = state_ == ERROR; | |
| 1119 state_ = NO_ERROR; | |
| 1120 | |
| 1121 // We might increment error_sequence_token here to cancel any delayed errors, | |
| 1122 // but right now it's unclear that it's safe to do so. If we are in an error | |
| 1123 // state because of a codec error, then it would be okay. Otherwise, it's | |
| 1124 // less obvious that we are exiting the error state. Since deferred errors | |
| 1125 // are only intended for fullscreen transitions right now, we take the more | |
| 1126 // conservative approach and let the errors post. | |
| 1127 // TODO(liberato): revisit this once we sort out the error state a bit more. | |
| 1128 | |
| 1129 // When codec is not in error state we can quickly reset (internally calls | |
| 1130 // flush()) for JB-MR2 and beyond. Prior to JB-MR2, flush() had several bugs | |
| 1131 // (b/8125974, b/8347958) so we must delete the MediaCodec and create a new | |
| 1132 // one. The full reconfigure is much slower and may cause visible freezing if | |
| 1133 // done mid-stream. | |
| 1134 if (!did_codec_error_happen && | |
| 1135 base::android::BuildInfo::GetInstance()->sdk_int() >= 18) { | |
| 1136 DVLOG(3) << __FUNCTION__ << " Doing fast MediaCodec reset (flush)."; | |
| 1137 media_codec_->Reset(); | |
| 1138 // Since we just flushed all the output buffers, make sure that nothing is | |
| 1139 // using them. | |
| 1140 strategy_->CodecChanged(media_codec_.get()); | |
| 1141 } else { | |
| 1142 DVLOG(3) << __FUNCTION__ | |
| 1143 << " Deleting the MediaCodec and creating a new one."; | |
| 1144 g_avda_timer.Pointer()->StopTimer(this); | |
| 1145 // Changing the codec will also notify the strategy to forget about any | |
| 1146 // output buffers it has currently. | |
| 1147 ConfigureMediaCodecAsynchronously(); | |
| 1148 } | |
| 1149 | |
| 1150 if (!done_cb.is_null()) | |
| 1151 done_cb.Run(); | |
| 1152 } | |
| 1153 | |
| 1154 void AndroidVideoDecodeAccelerator::Reset() { | |
| 1155 DVLOG(1) << __FUNCTION__; | |
| 1156 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1157 TRACE_EVENT0("media", "AVDA::Reset"); | |
| 1158 | |
| 1159 while (!pending_bitstream_buffers_.empty()) { | |
| 1160 int32_t bitstream_buffer_id = pending_bitstream_buffers_.front().id(); | |
| 1161 pending_bitstream_buffers_.pop(); | |
| 1162 | |
| 1163 if (bitstream_buffer_id != -1) { | |
| 1164 base::MessageLoop::current()->PostTask( | |
| 1165 FROM_HERE, | |
| 1166 base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer, | |
| 1167 weak_this_factory_.GetWeakPtr(), bitstream_buffer_id)); | |
| 1168 } | |
| 1169 } | |
| 1170 TRACE_COUNTER1("media", "AVDA::PendingBitstreamBufferCount", 0); | |
| 1171 bitstreams_notified_in_advance_.clear(); | |
| 1172 | |
| 1173 // Any error that is waiting to post can be ignored. | |
| 1174 error_sequence_token_++; | |
| 1175 | |
| 1176 DCHECK(strategy_); | |
| 1177 strategy_->ReleaseCodecBuffers(output_picture_buffers_); | |
| 1178 | |
| 1179 // Some VP8 files require complete MediaCodec drain before we can call | |
| 1180 // MediaCodec.flush() or MediaCodec.reset(). http://crbug.com/598963. | |
| 1181 if (media_codec_ && codec_config_->codec_ == media::kCodecVP8) { | |
| 1182 // Postpone ResetCodecState() after the drain. | |
| 1183 StartCodecDrain(DRAIN_FOR_RESET); | |
| 1184 } else { | |
| 1185 ResetCodecState(media::BindToCurrentLoop( | |
| 1186 base::Bind(&AndroidVideoDecodeAccelerator::NotifyResetDone, | |
| 1187 weak_this_factory_.GetWeakPtr()))); | |
| 1188 } | |
| 1189 } | |
| 1190 | |
| 1191 void AndroidVideoDecodeAccelerator::Destroy() { | |
| 1192 DVLOG(1) << __FUNCTION__; | |
| 1193 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1194 | |
| 1195 bool have_context = make_context_current_cb_.Run(); | |
| 1196 if (!have_context) | |
| 1197 LOG(WARNING) << "Failed make GL context current for Destroy, continuing."; | |
| 1198 | |
| 1199 if (strategy_) | |
| 1200 strategy_->Cleanup(have_context, output_picture_buffers_); | |
| 1201 | |
| 1202 // If we have an OnFrameAvailable handler, tell it that we're going away. | |
| 1203 if (on_frame_available_handler_) { | |
| 1204 on_frame_available_handler_->ClearOwner(); | |
| 1205 on_frame_available_handler_ = nullptr; | |
| 1206 } | |
| 1207 | |
| 1208 client_ = nullptr; | |
| 1209 | |
| 1210 // Some VP8 files require complete MediaCodec drain before we can call | |
| 1211 // MediaCodec.flush() or MediaCodec.reset(). http://crbug.com/598963. | |
| 1212 if (media_codec_ && codec_config_->codec_ == media::kCodecVP8) { | |
| 1213 // Clear pending_bitstream_buffers_. | |
| 1214 while (!pending_bitstream_buffers_.empty()) | |
| 1215 pending_bitstream_buffers_.pop(); | |
| 1216 | |
| 1217 // Postpone ActualDestroy after the drain. | |
| 1218 StartCodecDrain(DRAIN_FOR_DESTROY); | |
| 1219 } else { | |
| 1220 ActualDestroy(); | |
| 1221 } | |
| 1222 } | |
| 1223 | |
| 1224 void AndroidVideoDecodeAccelerator::ActualDestroy() { | |
| 1225 DVLOG(1) << __FUNCTION__; | |
| 1226 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1227 | |
| 1228 // Note that async codec construction might still be in progress. In that | |
| 1229 // case, the codec will be deleted when it completes once we invalidate all | |
| 1230 // our weak refs. | |
| 1231 weak_this_factory_.InvalidateWeakPtrs(); | |
| 1232 if (media_codec_) { | |
| 1233 g_avda_timer.Pointer()->StopTimer(this); | |
| 1234 media_codec_.reset(); | |
| 1235 } | |
| 1236 delete this; | |
| 1237 } | |
| 1238 | |
| 1239 bool AndroidVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( | |
| 1240 const base::WeakPtr<Client>& decode_client, | |
| 1241 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { | |
| 1242 return false; | |
| 1243 } | |
| 1244 | |
| 1245 const gfx::Size& AndroidVideoDecodeAccelerator::GetSize() const { | |
| 1246 return size_; | |
| 1247 } | |
| 1248 | |
| 1249 const base::ThreadChecker& AndroidVideoDecodeAccelerator::ThreadChecker() | |
| 1250 const { | |
| 1251 return thread_checker_; | |
| 1252 } | |
| 1253 | |
| 1254 base::WeakPtr<gpu::gles2::GLES2Decoder> | |
| 1255 AndroidVideoDecodeAccelerator::GetGlDecoder() const { | |
| 1256 return get_gles2_decoder_cb_.Run(); | |
| 1257 } | |
| 1258 | |
| 1259 gpu::gles2::TextureRef* AndroidVideoDecodeAccelerator::GetTextureForPicture( | |
| 1260 const media::PictureBuffer& picture_buffer) { | |
| 1261 auto gles_decoder = GetGlDecoder(); | |
| 1262 RETURN_ON_FAILURE(this, gles_decoder, "Failed to get GL decoder", | |
| 1263 ILLEGAL_STATE, nullptr); | |
| 1264 RETURN_ON_FAILURE(this, gles_decoder->GetContextGroup(), | |
| 1265 "Null gles_decoder->GetContextGroup()", ILLEGAL_STATE, | |
| 1266 nullptr); | |
| 1267 gpu::gles2::TextureManager* texture_manager = | |
| 1268 gles_decoder->GetContextGroup()->texture_manager(); | |
| 1269 RETURN_ON_FAILURE(this, texture_manager, "Null texture_manager", | |
| 1270 ILLEGAL_STATE, nullptr); | |
| 1271 | |
| 1272 DCHECK_LE(1u, picture_buffer.internal_texture_ids().size()); | |
| 1273 gpu::gles2::TextureRef* texture_ref = | |
| 1274 texture_manager->GetTexture(picture_buffer.internal_texture_ids()[0]); | |
| 1275 RETURN_ON_FAILURE(this, texture_manager, "Null texture_ref", ILLEGAL_STATE, | |
| 1276 nullptr); | |
| 1277 | |
| 1278 return texture_ref; | |
| 1279 } | |
| 1280 | |
| 1281 void AndroidVideoDecodeAccelerator::OnFrameAvailable() { | |
| 1282 // Remember: this may be on any thread. | |
| 1283 DCHECK(strategy_); | |
| 1284 strategy_->OnFrameAvailable(); | |
| 1285 } | |
| 1286 | |
| 1287 void AndroidVideoDecodeAccelerator::PostError( | |
| 1288 const ::tracked_objects::Location& from_here, | |
| 1289 media::VideoDecodeAccelerator::Error error) { | |
| 1290 base::MessageLoop::current()->PostDelayedTask( | |
| 1291 from_here, | |
| 1292 base::Bind(&AndroidVideoDecodeAccelerator::NotifyError, | |
| 1293 weak_this_factory_.GetWeakPtr(), error, error_sequence_token_), | |
| 1294 (defer_errors_ ? ErrorPostingDelay() : base::TimeDelta())); | |
| 1295 state_ = ERROR; | |
| 1296 } | |
| 1297 | |
| 1298 void AndroidVideoDecodeAccelerator::OnMediaCryptoReady( | |
| 1299 media::MediaDrmBridgeCdmContext::JavaObjectPtr media_crypto, | |
| 1300 bool needs_protected_surface) { | |
| 1301 DVLOG(1) << __FUNCTION__; | |
| 1302 | |
| 1303 if (!media_crypto) { | |
| 1304 LOG(ERROR) << "MediaCrypto is not available, can't play encrypted stream."; | |
| 1305 cdm_for_reference_holding_only_ = nullptr; | |
| 1306 media_drm_bridge_cdm_context_ = nullptr; | |
| 1307 NotifyInitializationComplete(false); | |
| 1308 return; | |
| 1309 } | |
| 1310 | |
| 1311 DCHECK(!media_crypto->is_null()); | |
| 1312 | |
| 1313 // We assume this is a part of the initialization process, thus MediaCodec | |
| 1314 // is not created yet. | |
| 1315 DCHECK(!media_codec_); | |
| 1316 | |
| 1317 codec_config_->media_crypto_ = std::move(media_crypto); | |
| 1318 codec_config_->needs_protected_surface_ = needs_protected_surface; | |
| 1319 | |
| 1320 // After receiving |media_crypto_| we can configure MediaCodec. | |
| 1321 ConfigureMediaCodecAsynchronously(); | |
| 1322 } | |
| 1323 | |
| 1324 void AndroidVideoDecodeAccelerator::OnKeyAdded() { | |
| 1325 DVLOG(1) << __FUNCTION__; | |
| 1326 | |
| 1327 if (state_ == WAITING_FOR_KEY) | |
| 1328 state_ = NO_ERROR; | |
| 1329 | |
| 1330 DoIOTask(true); | |
| 1331 } | |
| 1332 | |
| 1333 void AndroidVideoDecodeAccelerator::NotifyInitializationComplete(bool success) { | |
| 1334 if (client_) | |
| 1335 client_->NotifyInitializationComplete(success); | |
| 1336 } | |
| 1337 | |
| 1338 void AndroidVideoDecodeAccelerator::NotifyPictureReady( | |
| 1339 const media::Picture& picture) { | |
| 1340 if (client_) | |
| 1341 client_->PictureReady(picture); | |
| 1342 } | |
| 1343 | |
| 1344 void AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( | |
| 1345 int input_buffer_id) { | |
| 1346 if (client_) | |
| 1347 client_->NotifyEndOfBitstreamBuffer(input_buffer_id); | |
| 1348 } | |
| 1349 | |
| 1350 void AndroidVideoDecodeAccelerator::NotifyFlushDone() { | |
| 1351 if (client_) | |
| 1352 client_->NotifyFlushDone(); | |
| 1353 } | |
| 1354 | |
| 1355 void AndroidVideoDecodeAccelerator::NotifyResetDone() { | |
| 1356 if (client_) | |
| 1357 client_->NotifyResetDone(); | |
| 1358 } | |
| 1359 | |
| 1360 void AndroidVideoDecodeAccelerator::NotifyError( | |
| 1361 media::VideoDecodeAccelerator::Error error, | |
| 1362 int token) { | |
| 1363 DVLOG(1) << __FUNCTION__ << ": error: " << error << " token: " << token | |
| 1364 << " current: " << error_sequence_token_; | |
| 1365 if (token != error_sequence_token_) | |
| 1366 return; | |
| 1367 | |
| 1368 if (client_) | |
| 1369 client_->NotifyError(error); | |
| 1370 } | |
| 1371 | |
| 1372 void AndroidVideoDecodeAccelerator::ManageTimer(bool did_work) { | |
| 1373 bool should_be_running = true; | |
| 1374 | |
| 1375 base::TimeTicks now = base::TimeTicks::Now(); | |
| 1376 if (!did_work && !most_recent_work_.is_null()) { | |
| 1377 // Make sure that we have done work recently enough, else stop the timer. | |
| 1378 if (now - most_recent_work_ > IdleTimerTimeOut()) { | |
| 1379 most_recent_work_ = base::TimeTicks(); | |
| 1380 should_be_running = false; | |
| 1381 } | |
| 1382 } else { | |
| 1383 most_recent_work_ = now; | |
| 1384 } | |
| 1385 | |
| 1386 if (should_be_running) | |
| 1387 g_avda_timer.Pointer()->StartTimer(this); | |
| 1388 else | |
| 1389 g_avda_timer.Pointer()->StopTimer(this); | |
| 1390 } | |
| 1391 | |
| 1392 // static | |
| 1393 bool AndroidVideoDecodeAccelerator::UseDeferredRenderingStrategy( | |
| 1394 const gpu::GpuPreferences& gpu_preferences) { | |
| 1395 // TODO(liberato, watk): Figure out what we want to do about zero copy for | |
| 1396 // fullscreen external SurfaceView in WebView. http://crbug.com/582170. | |
| 1397 return !gpu_preferences.enable_threaded_texture_mailboxes; | |
| 1398 } | |
| 1399 | |
| 1400 // static | |
| 1401 media::VideoDecodeAccelerator::Capabilities | |
| 1402 AndroidVideoDecodeAccelerator::GetCapabilities( | |
| 1403 const gpu::GpuPreferences& gpu_preferences) { | |
| 1404 Capabilities capabilities; | |
| 1405 SupportedProfiles& profiles = capabilities.supported_profiles; | |
| 1406 | |
| 1407 SupportedProfile profile; | |
| 1408 | |
| 1409 if (media::MediaCodecUtil::IsVp8DecoderAvailable()) { | |
| 1410 profile.profile = media::VP8PROFILE_ANY; | |
| 1411 profile.min_resolution.SetSize(0, 0); | |
| 1412 profile.max_resolution.SetSize(1920, 1088); | |
| 1413 // If we know MediaCodec will just create a software codec, prefer our | |
| 1414 // internal software decoder instead. It's more up to date and secured | |
| 1415 // within the renderer sandbox. However if the content is encrypted, we | |
| 1416 // must use MediaCodec anyways since MediaDrm offers no way to decrypt | |
| 1417 // the buffers and let us use our internal software decoders. | |
| 1418 profile.encrypted_only = media::VideoCodecBridge::IsKnownUnaccelerated( | |
| 1419 media::kCodecVP8, media::MEDIA_CODEC_DECODER); | |
| 1420 profiles.push_back(profile); | |
| 1421 } | |
| 1422 | |
| 1423 if (media::MediaCodecUtil::IsVp9DecoderAvailable()) { | |
| 1424 profile.min_resolution.SetSize(0, 0); | |
| 1425 profile.max_resolution.SetSize(1920, 1088); | |
| 1426 // If we know MediaCodec will just create a software codec, prefer our | |
| 1427 // internal software decoder instead. It's more up to date and secured | |
| 1428 // within the renderer sandbox. However if the content is encrypted, we | |
| 1429 // must use MediaCodec anyways since MediaDrm offers no way to decrypt | |
| 1430 // the buffers and let us use our internal software decoders. | |
| 1431 profile.encrypted_only = media::VideoCodecBridge::IsKnownUnaccelerated( | |
| 1432 media::kCodecVP9, media::MEDIA_CODEC_DECODER); | |
| 1433 profile.profile = media::VP9PROFILE_PROFILE0; | |
| 1434 profiles.push_back(profile); | |
| 1435 profile.profile = media::VP9PROFILE_PROFILE1; | |
| 1436 profiles.push_back(profile); | |
| 1437 profile.profile = media::VP9PROFILE_PROFILE2; | |
| 1438 profiles.push_back(profile); | |
| 1439 profile.profile = media::VP9PROFILE_PROFILE3; | |
| 1440 profiles.push_back(profile); | |
| 1441 } | |
| 1442 | |
| 1443 for (const auto& supported_profile : kSupportedH264Profiles) { | |
| 1444 SupportedProfile profile; | |
| 1445 profile.profile = supported_profile; | |
| 1446 profile.min_resolution.SetSize(0, 0); | |
| 1447 // Advertise support for 4k and let the MediaCodec fail when decoding if it | |
| 1448 // doesn't support the resolution. It's assumed that consumers won't have | |
| 1449 // software fallback for H264 on Android anyway. | |
| 1450 profile.max_resolution.SetSize(3840, 2160); | |
| 1451 profiles.push_back(profile); | |
| 1452 } | |
| 1453 | |
| 1454 if (UseDeferredRenderingStrategy(gpu_preferences)) { | |
| 1455 capabilities.flags = media::VideoDecodeAccelerator::Capabilities:: | |
| 1456 NEEDS_ALL_PICTURE_BUFFERS_TO_DECODE | | |
| 1457 media::VideoDecodeAccelerator::Capabilities:: | |
| 1458 SUPPORTS_DEFERRED_INITIALIZATION; | |
| 1459 if (media::MediaCodecUtil::IsSurfaceViewOutputSupported()) { | |
| 1460 capabilities.flags |= media::VideoDecodeAccelerator::Capabilities:: | |
| 1461 SUPPORTS_EXTERNAL_OUTPUT_SURFACE; | |
| 1462 } | |
| 1463 } | |
| 1464 | |
| 1465 return capabilities; | |
| 1466 } | |
| 1467 | |
| 1468 } // namespace content | |
| OLD | NEW |