Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1191)

Unified Diff: media/gpu/vt_video_decode_accelerator_mac.cc

Issue 1882373004: Migrate content/common/gpu/media code to media/gpu (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix several more bot-identified build issues Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: media/gpu/vt_video_decode_accelerator_mac.cc
diff --git a/content/common/gpu/media/vt_video_decode_accelerator_mac.cc b/media/gpu/vt_video_decode_accelerator_mac.cc
similarity index 85%
rename from content/common/gpu/media/vt_video_decode_accelerator_mac.cc
rename to media/gpu/vt_video_decode_accelerator_mac.cc
index d832870f496248a462f37ec6899c8a1fc12577a2..8beef26b60c6b48952fafb0bf6ea5b4cc4a34083 100644
--- a/content/common/gpu/media/vt_video_decode_accelerator_mac.cc
+++ b/media/gpu/vt_video_decode_accelerator_mac.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
+#include "media/gpu/vt_video_decode_accelerator_mac.h"
#include <CoreVideo/CoreVideo.h>
#include <OpenGL/CGLIOSurface.h>
@@ -27,34 +27,30 @@
#include "ui/gl/gl_implementation.h"
#include "ui/gl/scoped_binders.h"
-using content_common_gpu_media::kModuleVt;
-using content_common_gpu_media::InitializeStubs;
-using content_common_gpu_media::IsVtInitialized;
-using content_common_gpu_media::StubPathMap;
+using media_gpu::kModuleVt;
+using media_gpu::InitializeStubs;
+using media_gpu::IsVtInitialized;
+using media_gpu::StubPathMap;
-#define NOTIFY_STATUS(name, status, session_failure) \
- do { \
- OSSTATUS_DLOG(ERROR, status) << name; \
- NotifyError(PLATFORM_FAILURE, session_failure); \
- } while (0)
+#define NOTIFY_STATUS(name, status, session_failure) \
+ do { \
+ OSSTATUS_DLOG(ERROR, status) << name; \
+ NotifyError(PLATFORM_FAILURE, session_failure); \
+ } while (0)
-namespace content {
+namespace media {
// Only H.264 with 4:2:0 chroma sampling is supported.
static const media::VideoCodecProfile kSupportedProfiles[] = {
- media::H264PROFILE_BASELINE,
- media::H264PROFILE_MAIN,
- media::H264PROFILE_EXTENDED,
- media::H264PROFILE_HIGH,
- // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
- // that some codecs fail to check the profile during initialization and
- // then fail on the first frame decode, which currently results in a
- // pipeline failure.
- // media::H264PROFILE_HIGH10PROFILE,
- media::H264PROFILE_SCALABLEBASELINE,
- media::H264PROFILE_SCALABLEHIGH,
- media::H264PROFILE_STEREOHIGH,
- media::H264PROFILE_MULTIVIEWHIGH,
+ media::H264PROFILE_BASELINE, media::H264PROFILE_MAIN,
+ media::H264PROFILE_EXTENDED, media::H264PROFILE_HIGH,
+ // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
+ // that some codecs fail to check the profile during initialization and
+ // then fail on the first frame decode, which currently results in a
+ // pipeline failure.
+ // media::H264PROFILE_HIGH10PROFILE,
+ media::H264PROFILE_SCALABLEBASELINE, media::H264PROFILE_SCALABLEHIGH,
+ media::H264PROFILE_STEREOHIGH, media::H264PROFILE_MULTIVIEWHIGH,
};
// Size to use for NALU length headers in AVC format (can be 1, 2, or 4).
@@ -72,8 +68,8 @@ static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1;
static const int kMaxReorderQueueSize = 16;
// Build an |image_config| dictionary for VideoToolbox initialization.
-static base::ScopedCFTypeRef<CFMutableDictionaryRef>
-BuildImageConfig(CMVideoDimensions coded_dimensions) {
+static base::ScopedCFTypeRef<CFMutableDictionaryRef> BuildImageConfig(
+ CMVideoDimensions coded_dimensions) {
base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config;
// Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are
@@ -87,12 +83,10 @@ BuildImageConfig(CMVideoDimensions coded_dimensions) {
if (!cf_pixel_format.get() || !cf_width.get() || !cf_height.get())
return image_config;
- image_config.reset(
- CFDictionaryCreateMutable(
- kCFAllocatorDefault,
- 3, // capacity
- &kCFTypeDictionaryKeyCallBacks,
- &kCFTypeDictionaryValueCallBacks));
+ image_config.reset(CFDictionaryCreateMutable(
+ kCFAllocatorDefault,
+ 3, // capacity
+ &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks));
if (!image_config.get())
return image_config;
@@ -110,8 +104,10 @@ BuildImageConfig(CMVideoDimensions coded_dimensions) {
// successful.
//
// TODO(sandersd): Merge with ConfigureDecoder(), as the code is very similar.
-static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
- const uint8_t* pps, size_t pps_size,
+static bool CreateVideoToolboxSession(const uint8_t* sps,
+ size_t sps_size,
+ const uint8_t* pps,
+ size_t pps_size,
bool require_hardware) {
const uint8_t* data_ptrs[] = {sps, pps};
const size_t data_sizes[] = {sps_size, pps_size};
@@ -119,10 +115,10 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
base::ScopedCFTypeRef<CMFormatDescriptionRef> format;
OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
kCFAllocatorDefault,
- 2, // parameter_set_count
- data_ptrs, // &parameter_set_pointers
- data_sizes, // &parameter_set_sizes
- kNALUHeaderLength, // nal_unit_header_length
+ 2, // parameter_set_count
+ data_ptrs, // &parameter_set_pointers
+ data_sizes, // &parameter_set_sizes
+ kNALUHeaderLength, // nal_unit_header_length
format.InitializeInto());
if (status) {
OSSTATUS_DLOG(WARNING, status)
@@ -131,11 +127,10 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
}
base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
- CFDictionaryCreateMutable(
- kCFAllocatorDefault,
- 1, // capacity
- &kCFTypeDictionaryKeyCallBacks,
- &kCFTypeDictionaryValueCallBacks));
+ CFDictionaryCreateMutable(kCFAllocatorDefault,
+ 1, // capacity
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
if (!decoder_config.get())
return false;
@@ -143,8 +138,7 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
CFDictionarySetValue(
decoder_config,
// kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
- CFSTR("RequireHardwareAcceleratedVideoDecoder"),
- kCFBooleanTrue);
+ CFSTR("RequireHardwareAcceleratedVideoDecoder"), kCFBooleanTrue);
}
base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
@@ -157,10 +151,10 @@ static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
base::ScopedCFTypeRef<VTDecompressionSessionRef> session;
status = VTDecompressionSessionCreate(
kCFAllocatorDefault,
- format, // video_format_description
- decoder_config, // video_decoder_specification
- image_config, // destination_image_buffer_attributes
- &callback, // output_callback
+ format, // video_format_description
+ decoder_config, // video_decoder_specification
+ image_config, // destination_image_buffer_attributes
+ &callback, // output_callback
session.InitializeInto());
if (status) {
OSSTATUS_DLOG(WARNING, status)
@@ -234,36 +228,31 @@ bool InitializeVideoToolbox() {
}
// Route decoded frame callbacks back into the VTVideoDecodeAccelerator.
-static void OutputThunk(
- void* decompression_output_refcon,
- void* source_frame_refcon,
- OSStatus status,
- VTDecodeInfoFlags info_flags,
- CVImageBufferRef image_buffer,
- CMTime presentation_time_stamp,
- CMTime presentation_duration) {
+static void OutputThunk(void* decompression_output_refcon,
+ void* source_frame_refcon,
+ OSStatus status,
+ VTDecodeInfoFlags info_flags,
+ CVImageBufferRef image_buffer,
+ CMTime presentation_time_stamp,
+ CMTime presentation_duration) {
VTVideoDecodeAccelerator* vda =
reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon);
vda->Output(source_frame_refcon, status, image_buffer);
}
-VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {
-}
+VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {}
VTVideoDecodeAccelerator::Task::Task(const Task& other) = default;
-VTVideoDecodeAccelerator::Task::~Task() {
-}
+VTVideoDecodeAccelerator::Task::~Task() {}
VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id)
: bitstream_id(bitstream_id),
pic_order_cnt(0),
is_idr(false),
- reorder_window(0) {
-}
+ reorder_window(0) {}
-VTVideoDecodeAccelerator::Frame::~Frame() {
-}
+VTVideoDecodeAccelerator::Frame::~Frame() {}
VTVideoDecodeAccelerator::PictureInfo::PictureInfo(uint32_t client_texture_id,
uint32_t service_texture_id)
@@ -348,8 +337,7 @@ bool VTVideoDecodeAccelerator::Initialize(const Config& config,
// Count the session as successfully initialized.
UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason",
- SFT_SUCCESSFULLY_INITIALIZED,
- SFT_MAX + 1);
+ SFT_SUCCESSFULLY_INITIALIZED, SFT_MAX + 1);
return true;
}
@@ -358,8 +346,8 @@ bool VTVideoDecodeAccelerator::FinishDelayedFrames() {
if (session_) {
OSStatus status = VTDecompressionSessionWaitForAsynchronousFrames(session_);
if (status) {
- NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()",
- status, SFT_PLATFORM_ERROR);
+ NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()", status,
+ SFT_PLATFORM_ERROR);
return false;
}
}
@@ -389,10 +377,10 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
format_.reset();
OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
kCFAllocatorDefault,
- nalu_data_ptrs.size(), // parameter_set_count
- &nalu_data_ptrs.front(), // &parameter_set_pointers
- &nalu_data_sizes.front(), // &parameter_set_sizes
- kNALUHeaderLength, // nal_unit_header_length
+ nalu_data_ptrs.size(), // parameter_set_count
+ &nalu_data_ptrs.front(), // &parameter_set_pointers
+ &nalu_data_sizes.front(), // &parameter_set_sizes
+ kNALUHeaderLength, // nal_unit_header_length
format_.InitializeInto());
if (status) {
NOTIFY_STATUS("CMVideoFormatDescriptionCreateFromH264ParameterSets()",
@@ -410,11 +398,10 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
// Prepare VideoToolbox configuration dictionaries.
base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
- CFDictionaryCreateMutable(
- kCFAllocatorDefault,
- 1, // capacity
- &kCFTypeDictionaryKeyCallBacks,
- &kCFTypeDictionaryValueCallBacks));
+ CFDictionaryCreateMutable(kCFAllocatorDefault,
+ 1, // capacity
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
if (!decoder_config.get()) {
DLOG(ERROR) << "Failed to create CFMutableDictionary";
NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
@@ -424,8 +411,7 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
CFDictionarySetValue(
decoder_config,
// kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
- CFSTR("EnableHardwareAcceleratedVideoDecoder"),
- kCFBooleanTrue);
+ CFSTR("EnableHardwareAcceleratedVideoDecoder"), kCFBooleanTrue);
base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
BuildImageConfig(coded_dimensions));
@@ -443,10 +429,10 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
session_.reset();
status = VTDecompressionSessionCreate(
kCFAllocatorDefault,
- format_, // video_format_description
- decoder_config, // video_decoder_specification
- image_config, // destination_image_buffer_attributes
- &callback_, // output_callback
+ format_, // video_format_description
+ decoder_config, // video_decoder_specification
+ image_config, // destination_image_buffer_attributes
+ &callback_, // output_callback
session_.InitializeInto());
if (status) {
NOTIFY_STATUS("VTDecompressionSessionCreate()", status,
@@ -460,8 +446,7 @@ bool VTVideoDecodeAccelerator::ConfigureDecoder() {
if (VTSessionCopyProperty(
session_,
// kVTDecompressionPropertyKey_UsingHardwareAcceleratedVideoDecoder
- CFSTR("UsingHardwareAcceleratedVideoDecoder"),
- kCFAllocatorDefault,
+ CFSTR("UsingHardwareAcceleratedVideoDecoder"), kCFAllocatorDefault,
cf_using_hardware.InitializeInto()) == 0) {
using_hardware = CFBooleanGetValue(cf_using_hardware);
}
@@ -611,8 +596,8 @@ void VTVideoDecodeAccelerator::DecodeTask(
if (sps->vui_parameters_present_flag &&
sps->bitstream_restriction_flag) {
- frame->reorder_window = std::min(sps->max_num_reorder_frames,
- kMaxReorderQueueSize - 1);
+ frame->reorder_window =
+ std::min(sps->max_num_reorder_frames, kMaxReorderQueueSize - 1);
}
}
has_slice = true;
@@ -671,8 +656,9 @@ void VTVideoDecodeAccelerator::DecodeTask(
// Keep everything in order by flushing first.
if (!FinishDelayedFrames())
return;
- gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
+ gpu_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
return;
}
@@ -720,8 +706,8 @@ void VTVideoDecodeAccelerator::DecodeTask(
for (size_t i = 0; i < nalus.size(); i++) {
media::H264NALU& nalu = nalus[i];
uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size));
- status = CMBlockBufferReplaceDataBytes(
- &header, data, offset, kNALUHeaderLength);
+ status =
+ CMBlockBufferReplaceDataBytes(&header, data, offset, kNALUHeaderLength);
if (status) {
NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status,
SFT_PLATFORM_ERROR);
@@ -739,19 +725,18 @@ void VTVideoDecodeAccelerator::DecodeTask(
// Package the data in a CMSampleBuffer.
base::ScopedCFTypeRef<CMSampleBufferRef> sample;
- status = CMSampleBufferCreate(
- kCFAllocatorDefault,
- data, // data_buffer
- true, // data_ready
- nullptr, // make_data_ready_callback
- nullptr, // make_data_ready_refcon
- format_, // format_description
- 1, // num_samples
- 0, // num_sample_timing_entries
- nullptr, // &sample_timing_array
- 1, // num_sample_size_entries
- &data_size, // &sample_size_array
- sample.InitializeInto());
+ status = CMSampleBufferCreate(kCFAllocatorDefault,
+ data, // data_buffer
+ true, // data_ready
+ nullptr, // make_data_ready_callback
+ nullptr, // make_data_ready_refcon
+ format_, // format_description
+ 1, // num_samples
+ 0, // num_sample_timing_entries
+ nullptr, // &sample_timing_array
+ 1, // num_sample_size_entries
+ &data_size, // &sample_size_array
+ sample.InitializeInto());
if (status) {
NOTIFY_STATUS("CMSampleBufferCreate()", status, SFT_PLATFORM_ERROR);
return;
@@ -766,10 +751,10 @@ void VTVideoDecodeAccelerator::DecodeTask(
kVTDecodeFrame_EnableAsynchronousDecompression;
status = VTDecompressionSessionDecodeFrame(
session_,
- sample, // sample_buffer
- decode_flags, // decode_flags
- reinterpret_cast<void*>(frame), // source_frame_refcon
- nullptr); // &info_flags_out
+ sample, // sample_buffer
+ decode_flags, // decode_flags
+ reinterpret_cast<void*>(frame), // source_frame_refcon
+ nullptr); // &info_flags_out
if (status) {
NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status,
SFT_DECODE_ERROR);
@@ -778,10 +763,9 @@ void VTVideoDecodeAccelerator::DecodeTask(
}
// This method may be called on any VideoToolbox thread.
-void VTVideoDecodeAccelerator::Output(
- void* source_frame_refcon,
- OSStatus status,
- CVImageBufferRef image_buffer) {
+void VTVideoDecodeAccelerator::Output(void* source_frame_refcon,
+ OSStatus status,
+ CVImageBufferRef image_buffer) {
if (status) {
NOTIFY_STATUS("Decoding", status, SFT_DECODE_ERROR);
return;
@@ -803,8 +787,9 @@ void VTVideoDecodeAccelerator::Output(
Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon);
frame->image.reset(image_buffer, base::scoped_policy::RETAIN);
- gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
+ gpu_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
}
void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) {
@@ -823,8 +808,9 @@ void VTVideoDecodeAccelerator::FlushTask(TaskType type) {
// Always queue a task, even if FinishDelayedFrames() fails, so that
// destruction always completes.
- gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::FlushDone, weak_this_, type));
+ gpu_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoDecodeAccelerator::FlushDone, weak_this_, type));
}
void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
@@ -870,8 +856,9 @@ void VTVideoDecodeAccelerator::AssignPictureBuffers(
// Pictures are not marked as uncleared until after this method returns, and
// they will be broken if they are used before that happens. So, schedule
// future work after that happens.
- gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_));
+ gpu_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_));
}
void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) {
@@ -976,9 +963,9 @@ bool VTVideoDecodeAccelerator::ProcessReorderQueue() {
// If the next task is a flush (because there is a pending flush or becuase
// the next frame is an IDR), then we don't need a full reorder buffer to send
// the next frame.
- bool flushing = !task_queue_.empty() &&
- (task_queue_.front().type != TASK_FRAME ||
- task_queue_.front().frame->is_idr);
+ bool flushing =
+ !task_queue_.empty() && (task_queue_.front().type != TASK_FRAME ||
+ task_queue_.front().frame->is_idr);
size_t reorder_window = std::max(0, reorder_queue_.top()->reorder_window);
if (flushing || reorder_queue_.size() > reorder_window) {
@@ -1051,7 +1038,7 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
frame.image.get(), gfx::GenericSharedMemoryId(),
gfx::BufferFormat::YUV_420_BIPLANAR)) {
NOTIFY_STATUS("Failed to initialize GLImageIOSurface", PLATFORM_FAILURE,
- SFT_PLATFORM_ERROR);
+ SFT_PLATFORM_ERROR);
}
if (!bind_image_cb_.Run(picture_info->client_texture_id,
@@ -1072,8 +1059,7 @@ bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
// resolution changed. We should find the correct API to get the real
// coded size and fix it.
client_->PictureReady(media::Picture(picture_id, frame.bitstream_id,
- gfx::Rect(frame.coded_size),
- true));
+ gfx::Rect(frame.coded_size), true));
return true;
}
@@ -1082,14 +1068,14 @@ void VTVideoDecodeAccelerator::NotifyError(
VTVDASessionFailureType session_failure_type) {
DCHECK_LT(session_failure_type, SFT_MAX + 1);
if (!gpu_thread_checker_.CalledOnValidThread()) {
- gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
- &VTVideoDecodeAccelerator::NotifyError, weak_this_, vda_error_type,
- session_failure_type));
+ gpu_task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&VTVideoDecodeAccelerator::NotifyError, weak_this_,
+ vda_error_type, session_failure_type));
} else if (state_ == STATE_DECODING) {
state_ = STATE_ERROR;
UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason",
- session_failure_type,
- SFT_MAX + 1);
+ session_failure_type, SFT_MAX + 1);
client_->NotifyError(vda_error_type);
}
}
@@ -1156,4 +1142,4 @@ VTVideoDecodeAccelerator::GetSupportedProfiles() {
return profiles;
}
-} // namespace content
+} // namespace media

Powered by Google App Engine
This is Rietveld 408576698