| Index: media/gpu/vt_video_decode_accelerator_mac.cc
|
| diff --git a/content/common/gpu/media/vt_video_decode_accelerator_mac.cc b/media/gpu/vt_video_decode_accelerator_mac.cc
|
| similarity index 85%
|
| rename from content/common/gpu/media/vt_video_decode_accelerator_mac.cc
|
| rename to media/gpu/vt_video_decode_accelerator_mac.cc
|
| index 1942577a4ec84f585bb0a37450d4d523496b9b5d..12f4e87ea66569cd16caba0b5dcd1e2537238ddf 100644
|
| --- a/content/common/gpu/media/vt_video_decode_accelerator_mac.cc
|
| +++ b/media/gpu/vt_video_decode_accelerator_mac.cc
|
| @@ -2,7 +2,7 @@
|
| // Use of this source code is governed by a BSD-style license that can be
|
| // found in the LICENSE file.
|
|
|
| -#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
|
| +#include "media/gpu/vt_video_decode_accelerator_mac.h"
|
|
|
| #include <CoreVideo/CoreVideo.h>
|
| #include <OpenGL/CGLIOSurface.h>
|
| @@ -28,34 +28,30 @@
|
| #include "ui/gl/gl_implementation.h"
|
| #include "ui/gl/scoped_binders.h"
|
|
|
| -using content_common_gpu_media::kModuleVt;
|
| -using content_common_gpu_media::InitializeStubs;
|
| -using content_common_gpu_media::IsVtInitialized;
|
| -using content_common_gpu_media::StubPathMap;
|
| +using media_gpu::kModuleVt;
|
| +using media_gpu::InitializeStubs;
|
| +using media_gpu::IsVtInitialized;
|
| +using media_gpu::StubPathMap;
|
|
|
| -#define NOTIFY_STATUS(name, status, session_failure) \
|
| - do { \
|
| - OSSTATUS_DLOG(ERROR, status) << name; \
|
| - NotifyError(PLATFORM_FAILURE, session_failure); \
|
| - } while (0)
|
| +#define NOTIFY_STATUS(name, status, session_failure) \
|
| + do { \
|
| + OSSTATUS_DLOG(ERROR, status) << name; \
|
| + NotifyError(PLATFORM_FAILURE, session_failure); \
|
| + } while (0)
|
|
|
| -namespace content {
|
| +namespace media {
|
|
|
| // Only H.264 with 4:2:0 chroma sampling is supported.
|
| static const media::VideoCodecProfile kSupportedProfiles[] = {
|
| - media::H264PROFILE_BASELINE,
|
| - media::H264PROFILE_MAIN,
|
| - media::H264PROFILE_EXTENDED,
|
| - media::H264PROFILE_HIGH,
|
| - // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
|
| - // that some codecs fail to check the profile during initialization and
|
| - // then fail on the first frame decode, which currently results in a
|
| - // pipeline failure.
|
| - // media::H264PROFILE_HIGH10PROFILE,
|
| - media::H264PROFILE_SCALABLEBASELINE,
|
| - media::H264PROFILE_SCALABLEHIGH,
|
| - media::H264PROFILE_STEREOHIGH,
|
| - media::H264PROFILE_MULTIVIEWHIGH,
|
| + media::H264PROFILE_BASELINE, media::H264PROFILE_MAIN,
|
| + media::H264PROFILE_EXTENDED, media::H264PROFILE_HIGH,
|
| + // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
|
| + // that some codecs fail to check the profile during initialization and
|
| + // then fail on the first frame decode, which currently results in a
|
| + // pipeline failure.
|
| + // media::H264PROFILE_HIGH10PROFILE,
|
| + media::H264PROFILE_SCALABLEBASELINE, media::H264PROFILE_SCALABLEHIGH,
|
| + media::H264PROFILE_STEREOHIGH, media::H264PROFILE_MULTIVIEWHIGH,
|
| };
|
|
|
| // Size to use for NALU length headers in AVC format (can be 1, 2, or 4).
|
| @@ -73,8 +69,8 @@ static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1;
|
| static const int kMaxReorderQueueSize = 16;
|
|
|
| // Build an |image_config| dictionary for VideoToolbox initialization.
|
| -static base::ScopedCFTypeRef<CFMutableDictionaryRef>
|
| -BuildImageConfig(CMVideoDimensions coded_dimensions) {
|
| +static base::ScopedCFTypeRef<CFMutableDictionaryRef> BuildImageConfig(
|
| + CMVideoDimensions coded_dimensions) {
|
| base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config;
|
|
|
| // Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are
|
| @@ -88,12 +84,10 @@ BuildImageConfig(CMVideoDimensions coded_dimensions) {
|
| if (!cf_pixel_format.get() || !cf_width.get() || !cf_height.get())
|
| return image_config;
|
|
|
| - image_config.reset(
|
| - CFDictionaryCreateMutable(
|
| - kCFAllocatorDefault,
|
| - 3, // capacity
|
| - &kCFTypeDictionaryKeyCallBacks,
|
| - &kCFTypeDictionaryValueCallBacks));
|
| + image_config.reset(CFDictionaryCreateMutable(
|
| + kCFAllocatorDefault,
|
| + 3, // capacity
|
| + &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks));
|
| if (!image_config.get())
|
| return image_config;
|
|
|
| @@ -111,8 +105,10 @@ BuildImageConfig(CMVideoDimensions coded_dimensions) {
|
| // successful.
|
| //
|
| // TODO(sandersd): Merge with ConfigureDecoder(), as the code is very similar.
|
| -static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
|
| - const uint8_t* pps, size_t pps_size,
|
| +static bool CreateVideoToolboxSession(const uint8_t* sps,
|
| + size_t sps_size,
|
| + const uint8_t* pps,
|
| + size_t pps_size,
|
| bool require_hardware) {
|
| const uint8_t* data_ptrs[] = {sps, pps};
|
| const size_t data_sizes[] = {sps_size, pps_size};
|
| @@ -120,10 +116,10 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
|
| base::ScopedCFTypeRef<CMFormatDescriptionRef> format;
|
| OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
|
| kCFAllocatorDefault,
|
| - 2, // parameter_set_count
|
| - data_ptrs, // ¶meter_set_pointers
|
| - data_sizes, // ¶meter_set_sizes
|
| - kNALUHeaderLength, // nal_unit_header_length
|
| + 2, // parameter_set_count
|
| + data_ptrs, // ¶meter_set_pointers
|
| + data_sizes, // ¶meter_set_sizes
|
| + kNALUHeaderLength, // nal_unit_header_length
|
| format.InitializeInto());
|
| if (status) {
|
| OSSTATUS_DLOG(WARNING, status)
|
| @@ -132,11 +128,10 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
|
| }
|
|
|
| base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
|
| - CFDictionaryCreateMutable(
|
| - kCFAllocatorDefault,
|
| - 1, // capacity
|
| - &kCFTypeDictionaryKeyCallBacks,
|
| - &kCFTypeDictionaryValueCallBacks));
|
| + CFDictionaryCreateMutable(kCFAllocatorDefault,
|
| + 1, // capacity
|
| + &kCFTypeDictionaryKeyCallBacks,
|
| + &kCFTypeDictionaryValueCallBacks));
|
| if (!decoder_config.get())
|
| return false;
|
|
|
| @@ -144,8 +139,7 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
|
| CFDictionarySetValue(
|
| decoder_config,
|
| // kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
|
| - CFSTR("RequireHardwareAcceleratedVideoDecoder"),
|
| - kCFBooleanTrue);
|
| + CFSTR("RequireHardwareAcceleratedVideoDecoder"), kCFBooleanTrue);
|
| }
|
|
|
| base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
|
| @@ -158,10 +152,10 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
|
| base::ScopedCFTypeRef<VTDecompressionSessionRef> session;
|
| status = VTDecompressionSessionCreate(
|
| kCFAllocatorDefault,
|
| - format, // video_format_description
|
| - decoder_config, // video_decoder_specification
|
| - image_config, // destination_image_buffer_attributes
|
| - &callback, // output_callback
|
| + format, // video_format_description
|
| + decoder_config, // video_decoder_specification
|
| + image_config, // destination_image_buffer_attributes
|
| + &callback, // output_callback
|
| session.InitializeInto());
|
| if (status) {
|
| OSSTATUS_DLOG(WARNING, status)
|
| @@ -235,36 +229,31 @@ bool InitializeVideoToolbox() {
|
| }
|
|
|
| // Route decoded frame callbacks back into the VTVideoDecodeAccelerator.
|
| -static void OutputThunk(
|
| - void* decompression_output_refcon,
|
| - void* source_frame_refcon,
|
| - OSStatus status,
|
| - VTDecodeInfoFlags info_flags,
|
| - CVImageBufferRef image_buffer,
|
| - CMTime presentation_time_stamp,
|
| - CMTime presentation_duration) {
|
| +static void OutputThunk(void* decompression_output_refcon,
|
| + void* source_frame_refcon,
|
| + OSStatus status,
|
| + VTDecodeInfoFlags info_flags,
|
| + CVImageBufferRef image_buffer,
|
| + CMTime presentation_time_stamp,
|
| + CMTime presentation_duration) {
|
| VTVideoDecodeAccelerator* vda =
|
| reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon);
|
| vda->Output(source_frame_refcon, status, image_buffer);
|
| }
|
|
|
| -VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {
|
| -}
|
| +VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {}
|
|
|
| VTVideoDecodeAccelerator::Task::Task(const Task& other) = default;
|
|
|
| -VTVideoDecodeAccelerator::Task::~Task() {
|
| -}
|
| +VTVideoDecodeAccelerator::Task::~Task() {}
|
|
|
| VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id)
|
| : bitstream_id(bitstream_id),
|
| pic_order_cnt(0),
|
| is_idr(false),
|
| - reorder_window(0) {
|
| -}
|
| + reorder_window(0) {}
|
|
|
| -VTVideoDecodeAccelerator::Frame::~Frame() {
|
| -}
|
| +VTVideoDecodeAccelerator::Frame::~Frame() {}
|
|
|
| VTVideoDecodeAccelerator::PictureInfo::PictureInfo(uint32_t client_texture_id,
|
| uint32_t service_texture_id)
|
| @@ -354,8 +343,7 @@ bool VTVideoDecodeAccelerator::Initialize(const Config& config,
|
|
|
| // Count the session as successfully initialized.
|
| UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason",
|
| - SFT_SUCCESSFULLY_INITIALIZED,
|
| - SFT_MAX + 1);
|
| + SFT_SUCCESSFULLY_INITIALIZED, SFT_MAX + 1);
|
| return true;
|
| }
|
|
|
| @@ -364,8 +352,8 @@ bool VTVideoDecodeAccelerator::FinishDelayedFrames() {
|
| if (session_) {
|
| OSStatus status = VTDecompressionSessionWaitForAsynchronousFrames(session_);
|
| if (status) {
|
| - NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()",
|
| - status, SFT_PLATFORM_ERROR);
|
| + NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()", status,
|
| + SFT_PLATFORM_ERROR);
|
| return false;
|
| }
|
| }
|
| @@ -395,10 +383,10 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
|
| format_.reset();
|
| OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
|
| kCFAllocatorDefault,
|
| - nalu_data_ptrs.size(), // parameter_set_count
|
| - &nalu_data_ptrs.front(), // ¶meter_set_pointers
|
| - &nalu_data_sizes.front(), // ¶meter_set_sizes
|
| - kNALUHeaderLength, // nal_unit_header_length
|
| + nalu_data_ptrs.size(), // parameter_set_count
|
| + &nalu_data_ptrs.front(), // ¶meter_set_pointers
|
| + &nalu_data_sizes.front(), // ¶meter_set_sizes
|
| + kNALUHeaderLength, // nal_unit_header_length
|
| format_.InitializeInto());
|
| if (status) {
|
| NOTIFY_STATUS("CMVideoFormatDescriptionCreateFromH264ParameterSets()",
|
| @@ -416,11 +404,10 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
|
|
|
| // Prepare VideoToolbox configuration dictionaries.
|
| base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
|
| - CFDictionaryCreateMutable(
|
| - kCFAllocatorDefault,
|
| - 1, // capacity
|
| - &kCFTypeDictionaryKeyCallBacks,
|
| - &kCFTypeDictionaryValueCallBacks));
|
| + CFDictionaryCreateMutable(kCFAllocatorDefault,
|
| + 1, // capacity
|
| + &kCFTypeDictionaryKeyCallBacks,
|
| + &kCFTypeDictionaryValueCallBacks));
|
| if (!decoder_config.get()) {
|
| DLOG(ERROR) << "Failed to create CFMutableDictionary";
|
| NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
|
| @@ -430,8 +417,7 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
|
| CFDictionarySetValue(
|
| decoder_config,
|
| // kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
|
| - CFSTR("EnableHardwareAcceleratedVideoDecoder"),
|
| - kCFBooleanTrue);
|
| + CFSTR("EnableHardwareAcceleratedVideoDecoder"), kCFBooleanTrue);
|
|
|
| base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
|
| BuildImageConfig(coded_dimensions));
|
| @@ -449,10 +435,10 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
|
| session_.reset();
|
| status = VTDecompressionSessionCreate(
|
| kCFAllocatorDefault,
|
| - format_, // video_format_description
|
| - decoder_config, // video_decoder_specification
|
| - image_config, // destination_image_buffer_attributes
|
| - &callback_, // output_callback
|
| + format_, // video_format_description
|
| + decoder_config, // video_decoder_specification
|
| + image_config, // destination_image_buffer_attributes
|
| + &callback_, // output_callback
|
| session_.InitializeInto());
|
| if (status) {
|
| NOTIFY_STATUS("VTDecompressionSessionCreate()", status,
|
| @@ -466,8 +452,7 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
|
| if (VTSessionCopyProperty(
|
| session_,
|
| // kVTDecompressionPropertyKey_UsingHardwareAcceleratedVideoDecoder
|
| - CFSTR("UsingHardwareAcceleratedVideoDecoder"),
|
| - kCFAllocatorDefault,
|
| + CFSTR("UsingHardwareAcceleratedVideoDecoder"), kCFAllocatorDefault,
|
| cf_using_hardware.InitializeInto()) == 0) {
|
| using_hardware = CFBooleanGetValue(cf_using_hardware);
|
| }
|
| @@ -617,8 +602,8 @@ void VTVideoDecodeAccelerator::DecodeTask(
|
|
|
| if (sps->vui_parameters_present_flag &&
|
| sps->bitstream_restriction_flag) {
|
| - frame->reorder_window = std::min(sps->max_num_reorder_frames,
|
| - kMaxReorderQueueSize - 1);
|
| + frame->reorder_window =
|
| + std::min(sps->max_num_reorder_frames, kMaxReorderQueueSize - 1);
|
| }
|
| }
|
| has_slice = true;
|
| @@ -677,8 +662,9 @@ void VTVideoDecodeAccelerator::DecodeTask(
|
| // Keep everything in order by flushing first.
|
| if (!FinishDelayedFrames())
|
| return;
|
| - gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
|
| - &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
|
| + gpu_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(&VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
|
| return;
|
| }
|
|
|
| @@ -726,8 +712,8 @@ void VTVideoDecodeAccelerator::DecodeTask(
|
| for (size_t i = 0; i < nalus.size(); i++) {
|
| media::H264NALU& nalu = nalus[i];
|
| uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size));
|
| - status = CMBlockBufferReplaceDataBytes(
|
| - &header, data, offset, kNALUHeaderLength);
|
| + status =
|
| + CMBlockBufferReplaceDataBytes(&header, data, offset, kNALUHeaderLength);
|
| if (status) {
|
| NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status,
|
| SFT_PLATFORM_ERROR);
|
| @@ -745,19 +731,18 @@ void VTVideoDecodeAccelerator::DecodeTask(
|
|
|
| // Package the data in a CMSampleBuffer.
|
| base::ScopedCFTypeRef<CMSampleBufferRef> sample;
|
| - status = CMSampleBufferCreate(
|
| - kCFAllocatorDefault,
|
| - data, // data_buffer
|
| - true, // data_ready
|
| - nullptr, // make_data_ready_callback
|
| - nullptr, // make_data_ready_refcon
|
| - format_, // format_description
|
| - 1, // num_samples
|
| - 0, // num_sample_timing_entries
|
| - nullptr, // &sample_timing_array
|
| - 1, // num_sample_size_entries
|
| - &data_size, // &sample_size_array
|
| - sample.InitializeInto());
|
| + status = CMSampleBufferCreate(kCFAllocatorDefault,
|
| + data, // data_buffer
|
| + true, // data_ready
|
| + nullptr, // make_data_ready_callback
|
| + nullptr, // make_data_ready_refcon
|
| + format_, // format_description
|
| + 1, // num_samples
|
| + 0, // num_sample_timing_entries
|
| + nullptr, // &sample_timing_array
|
| + 1, // num_sample_size_entries
|
| + &data_size, // &sample_size_array
|
| + sample.InitializeInto());
|
| if (status) {
|
| NOTIFY_STATUS("CMSampleBufferCreate()", status, SFT_PLATFORM_ERROR);
|
| return;
|
| @@ -772,10 +757,10 @@ void VTVideoDecodeAccelerator::DecodeTask(
|
| kVTDecodeFrame_EnableAsynchronousDecompression;
|
| status = VTDecompressionSessionDecodeFrame(
|
| session_,
|
| - sample, // sample_buffer
|
| - decode_flags, // decode_flags
|
| - reinterpret_cast<void*>(frame), // source_frame_refcon
|
| - nullptr); // &info_flags_out
|
| + sample, // sample_buffer
|
| + decode_flags, // decode_flags
|
| + reinterpret_cast<void*>(frame), // source_frame_refcon
|
| + nullptr); // &info_flags_out
|
| if (status) {
|
| NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status,
|
| SFT_DECODE_ERROR);
|
| @@ -784,10 +769,9 @@ void VTVideoDecodeAccelerator::DecodeTask(
|
| }
|
|
|
| // This method may be called on any VideoToolbox thread.
|
| -void VTVideoDecodeAccelerator::Output(
|
| - void* source_frame_refcon,
|
| - OSStatus status,
|
| - CVImageBufferRef image_buffer) {
|
| +void VTVideoDecodeAccelerator::Output(void* source_frame_refcon,
|
| + OSStatus status,
|
| + CVImageBufferRef image_buffer) {
|
| if (status) {
|
| NOTIFY_STATUS("Decoding", status, SFT_DECODE_ERROR);
|
| return;
|
| @@ -809,8 +793,9 @@ void VTVideoDecodeAccelerator::Output(
|
|
|
| Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon);
|
| frame->image.reset(image_buffer, base::scoped_policy::RETAIN);
|
| - gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
|
| - &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
|
| + gpu_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(&VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
|
| }
|
|
|
| void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) {
|
| @@ -829,8 +814,9 @@ void VTVideoDecodeAccelerator::FlushTask(TaskType type) {
|
|
|
| // Always queue a task, even if FinishDelayedFrames() fails, so that
|
| // destruction always completes.
|
| - gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
|
| - &VTVideoDecodeAccelerator::FlushDone, weak_this_, type));
|
| + gpu_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(&VTVideoDecodeAccelerator::FlushDone, weak_this_, type));
|
| }
|
|
|
| void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
|
| @@ -876,8 +862,9 @@ void VTVideoDecodeAccelerator::AssignPictureBuffers(
|
| // Pictures are not marked as uncleared until after this method returns, and
|
| // they will be broken if they are used before that happens. So, schedule
|
| // future work after that happens.
|
| - gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
|
| - &VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_));
|
| + gpu_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(&VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_));
|
| }
|
|
|
| void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) {
|
| @@ -982,9 +969,9 @@ bool VTVideoDecodeAccelerator::ProcessReorderQueue() {
|
| // If the next task is a flush (because there is a pending flush or becuase
|
| // the next frame is an IDR), then we don't need a full reorder buffer to send
|
| // the next frame.
|
| - bool flushing = !task_queue_.empty() &&
|
| - (task_queue_.front().type != TASK_FRAME ||
|
| - task_queue_.front().frame->is_idr);
|
| + bool flushing =
|
| + !task_queue_.empty() && (task_queue_.front().type != TASK_FRAME ||
|
| + task_queue_.front().frame->is_idr);
|
|
|
| size_t reorder_window = std::max(0, reorder_queue_.top()->reorder_window);
|
| if (flushing || reorder_queue_.size() > reorder_window) {
|
| @@ -1057,7 +1044,7 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
|
| frame.image.get(), gfx::GenericSharedMemoryId(),
|
| gfx::BufferFormat::YUV_420_BIPLANAR)) {
|
| NOTIFY_STATUS("Failed to initialize GLImageIOSurface", PLATFORM_FAILURE,
|
| - SFT_PLATFORM_ERROR);
|
| + SFT_PLATFORM_ERROR);
|
| }
|
|
|
| if (!bind_image_cb_.Run(picture_info->client_texture_id,
|
| @@ -1078,8 +1065,7 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
|
| // resolution changed. We should find the correct API to get the real
|
| // coded size and fix it.
|
| client_->PictureReady(media::Picture(picture_id, frame.bitstream_id,
|
| - gfx::Rect(frame.coded_size),
|
| - true));
|
| + gfx::Rect(frame.coded_size), true));
|
| return true;
|
| }
|
|
|
| @@ -1088,14 +1074,14 @@ void VTVideoDecodeAccelerator::NotifyError(
|
| VTVDASessionFailureType session_failure_type) {
|
| DCHECK_LT(session_failure_type, SFT_MAX + 1);
|
| if (!gpu_thread_checker_.CalledOnValidThread()) {
|
| - gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
|
| - &VTVideoDecodeAccelerator::NotifyError, weak_this_, vda_error_type,
|
| - session_failure_type));
|
| + gpu_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(&VTVideoDecodeAccelerator::NotifyError, weak_this_,
|
| + vda_error_type, session_failure_type));
|
| } else if (state_ == STATE_DECODING) {
|
| state_ = STATE_ERROR;
|
| UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason",
|
| - session_failure_type,
|
| - SFT_MAX + 1);
|
| + session_failure_type, SFT_MAX + 1);
|
| client_->NotifyError(vda_error_type);
|
| }
|
| }
|
| @@ -1162,4 +1148,4 @@ VTVideoDecodeAccelerator::GetSupportedProfiles() {
|
| return profiles;
|
| }
|
|
|
| -} // namespace content
|
| +} // namespace media
|
|
|