| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/common/gpu/media/gpu_video_encode_accelerator.h" | |
| 6 | |
| 7 #include "base/callback.h" | |
| 8 #include "base/command_line.h" | |
| 9 #include "base/logging.h" | |
| 10 #include "base/memory/ptr_util.h" | |
| 11 #include "base/memory/shared_memory.h" | |
| 12 #include "base/numerics/safe_math.h" | |
| 13 #include "base/sys_info.h" | |
| 14 #include "build/build_config.h" | |
| 15 #include "gpu/ipc/client/gpu_memory_buffer_impl.h" | |
| 16 #include "gpu/ipc/service/gpu_channel.h" | |
| 17 #include "gpu/ipc/service/gpu_channel_manager.h" | |
| 18 #include "ipc/ipc_message_macros.h" | |
| 19 #include "media/base/bind_to_current_loop.h" | |
| 20 #include "media/base/limits.h" | |
| 21 #include "media/base/video_frame.h" | |
| 22 #include "media/gpu/ipc/common/gpu_video_accelerator_util.h" | |
| 23 #include "media/gpu/ipc/common/media_messages.h" | |
| 24 | |
| 25 #if defined(OS_CHROMEOS) | |
| 26 #if defined(USE_V4L2_CODEC) | |
| 27 #include "content/common/gpu/media/v4l2_video_encode_accelerator.h" | |
| 28 #endif | |
| 29 #if defined(ARCH_CPU_X86_FAMILY) | |
| 30 #include "content/common/gpu/media/vaapi_video_encode_accelerator.h" | |
| 31 #endif | |
| 32 #elif defined(OS_ANDROID) && defined(ENABLE_WEBRTC) | |
| 33 #include "content/common/gpu/media/android_video_encode_accelerator.h" | |
| 34 #elif defined(OS_MACOSX) | |
| 35 #include "content/common/gpu/media/vt_video_encode_accelerator_mac.h" | |
| 36 #endif | |
| 37 | |
| 38 namespace content { | |
| 39 | |
| 40 static bool MakeDecoderContextCurrent( | |
| 41 const base::WeakPtr<gpu::GpuCommandBufferStub> stub) { | |
| 42 if (!stub) { | |
| 43 DLOG(ERROR) << "Stub is gone; won't MakeCurrent()."; | |
| 44 return false; | |
| 45 } | |
| 46 | |
| 47 if (!stub->decoder()->MakeCurrent()) { | |
| 48 DLOG(ERROR) << "Failed to MakeCurrent()"; | |
| 49 return false; | |
| 50 } | |
| 51 | |
| 52 return true; | |
| 53 } | |
| 54 | |
| 55 GpuVideoEncodeAccelerator::GpuVideoEncodeAccelerator( | |
| 56 int32_t host_route_id, | |
| 57 gpu::GpuCommandBufferStub* stub) | |
| 58 : host_route_id_(host_route_id), | |
| 59 stub_(stub), | |
| 60 input_format_(media::PIXEL_FORMAT_UNKNOWN), | |
| 61 output_buffer_size_(0), | |
| 62 weak_this_factory_(this) { | |
| 63 stub_->AddDestructionObserver(this); | |
| 64 make_context_current_ = | |
| 65 base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr()); | |
| 66 } | |
| 67 | |
| 68 GpuVideoEncodeAccelerator::~GpuVideoEncodeAccelerator() { | |
| 69 // This class can only be self-deleted from OnWillDestroyStub(), which means | |
| 70 // the VEA has already been destroyed in there. | |
| 71 DCHECK(!encoder_); | |
| 72 } | |
| 73 | |
| 74 bool GpuVideoEncodeAccelerator::Initialize( | |
| 75 media::VideoPixelFormat input_format, | |
| 76 const gfx::Size& input_visible_size, | |
| 77 media::VideoCodecProfile output_profile, | |
| 78 uint32_t initial_bitrate) { | |
| 79 DVLOG(2) << "GpuVideoEncodeAccelerator::Initialize(): " | |
| 80 "input_format=" << input_format | |
| 81 << ", input_visible_size=" << input_visible_size.ToString() | |
| 82 << ", output_profile=" << output_profile | |
| 83 << ", initial_bitrate=" << initial_bitrate; | |
| 84 DCHECK(!encoder_); | |
| 85 | |
| 86 if (!stub_->channel()->AddRoute(host_route_id_, stub_->stream_id(), this)) { | |
| 87 DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): " | |
| 88 "failed to add route"; | |
| 89 return false; | |
| 90 } | |
| 91 | |
| 92 if (input_visible_size.width() > media::limits::kMaxDimension || | |
| 93 input_visible_size.height() > media::limits::kMaxDimension || | |
| 94 input_visible_size.GetArea() > media::limits::kMaxCanvas) { | |
| 95 DLOG(ERROR) << "GpuVideoEncodeAccelerator::Initialize(): " | |
| 96 "input_visible_size " << input_visible_size.ToString() | |
| 97 << " too large"; | |
| 98 return false; | |
| 99 } | |
| 100 | |
| 101 const gpu::GpuPreferences& gpu_preferences = | |
| 102 stub_->channel()->gpu_channel_manager()->gpu_preferences(); | |
| 103 | |
| 104 std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps = | |
| 105 CreateVEAFps(gpu_preferences); | |
| 106 // Try all possible encoders and use the first successful encoder. | |
| 107 for (size_t i = 0; i < create_vea_fps.size(); ++i) { | |
| 108 encoder_ = (*create_vea_fps[i])(); | |
| 109 if (encoder_ && encoder_->Initialize(input_format, | |
| 110 input_visible_size, | |
| 111 output_profile, | |
| 112 initial_bitrate, | |
| 113 this)) { | |
| 114 input_format_ = input_format; | |
| 115 input_visible_size_ = input_visible_size; | |
| 116 return true; | |
| 117 } | |
| 118 } | |
| 119 encoder_.reset(); | |
| 120 DLOG(ERROR) | |
| 121 << "GpuVideoEncodeAccelerator::Initialize(): VEA initialization failed"; | |
| 122 return false; | |
| 123 } | |
| 124 | |
| 125 bool GpuVideoEncodeAccelerator::OnMessageReceived(const IPC::Message& message) { | |
| 126 bool handled = true; | |
| 127 IPC_BEGIN_MESSAGE_MAP(GpuVideoEncodeAccelerator, message) | |
| 128 IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_Encode, OnEncode) | |
| 129 IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_Encode2, OnEncode2) | |
| 130 IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_UseOutputBitstreamBuffer, | |
| 131 OnUseOutputBitstreamBuffer) | |
| 132 IPC_MESSAGE_HANDLER( | |
| 133 AcceleratedVideoEncoderMsg_RequestEncodingParametersChange, | |
| 134 OnRequestEncodingParametersChange) | |
| 135 IPC_MESSAGE_HANDLER(AcceleratedVideoEncoderMsg_Destroy, OnDestroy) | |
| 136 IPC_MESSAGE_UNHANDLED(handled = false) | |
| 137 IPC_END_MESSAGE_MAP() | |
| 138 return handled; | |
| 139 } | |
| 140 | |
| 141 void GpuVideoEncodeAccelerator::RequireBitstreamBuffers( | |
| 142 unsigned int input_count, | |
| 143 const gfx::Size& input_coded_size, | |
| 144 size_t output_buffer_size) { | |
| 145 Send(new AcceleratedVideoEncoderHostMsg_RequireBitstreamBuffers( | |
| 146 host_route_id_, input_count, input_coded_size, output_buffer_size)); | |
| 147 input_coded_size_ = input_coded_size; | |
| 148 output_buffer_size_ = output_buffer_size; | |
| 149 } | |
| 150 | |
| 151 void GpuVideoEncodeAccelerator::BitstreamBufferReady( | |
| 152 int32_t bitstream_buffer_id, | |
| 153 size_t payload_size, | |
| 154 bool key_frame) { | |
| 155 Send(new AcceleratedVideoEncoderHostMsg_BitstreamBufferReady( | |
| 156 host_route_id_, bitstream_buffer_id, payload_size, key_frame)); | |
| 157 } | |
| 158 | |
| 159 void GpuVideoEncodeAccelerator::NotifyError( | |
| 160 media::VideoEncodeAccelerator::Error error) { | |
| 161 Send(new AcceleratedVideoEncoderHostMsg_NotifyError(host_route_id_, error)); | |
| 162 } | |
| 163 | |
| 164 void GpuVideoEncodeAccelerator::OnWillDestroyStub() { | |
| 165 DCHECK(stub_); | |
| 166 stub_->channel()->RemoveRoute(host_route_id_); | |
| 167 stub_->RemoveDestructionObserver(this); | |
| 168 encoder_.reset(); | |
| 169 delete this; | |
| 170 } | |
| 171 | |
| 172 // static | |
| 173 gpu::VideoEncodeAcceleratorSupportedProfiles | |
| 174 GpuVideoEncodeAccelerator::GetSupportedProfiles( | |
| 175 const gpu::GpuPreferences& gpu_preferences) { | |
| 176 media::VideoEncodeAccelerator::SupportedProfiles profiles; | |
| 177 std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps = | |
| 178 CreateVEAFps(gpu_preferences); | |
| 179 | |
| 180 for (size_t i = 0; i < create_vea_fps.size(); ++i) { | |
| 181 std::unique_ptr<media::VideoEncodeAccelerator> encoder = | |
| 182 (*create_vea_fps[i])(); | |
| 183 if (!encoder) | |
| 184 continue; | |
| 185 media::VideoEncodeAccelerator::SupportedProfiles vea_profiles = | |
| 186 encoder->GetSupportedProfiles(); | |
| 187 media::GpuVideoAcceleratorUtil::InsertUniqueEncodeProfiles(vea_profiles, | |
| 188 &profiles); | |
| 189 } | |
| 190 return media::GpuVideoAcceleratorUtil::ConvertMediaToGpuEncodeProfiles( | |
| 191 profiles); | |
| 192 } | |
| 193 | |
| 194 // static | |
| 195 std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> | |
| 196 GpuVideoEncodeAccelerator::CreateVEAFps( | |
| 197 const gpu::GpuPreferences& gpu_preferences) { | |
| 198 std::vector<GpuVideoEncodeAccelerator::CreateVEAFp> create_vea_fps; | |
| 199 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC) | |
| 200 create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateV4L2VEA); | |
| 201 #endif | |
| 202 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) | |
| 203 if (!gpu_preferences.disable_vaapi_accelerated_video_encode) | |
| 204 create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateVaapiVEA); | |
| 205 #endif | |
| 206 #if defined(OS_ANDROID) && defined(ENABLE_WEBRTC) | |
| 207 if (!gpu_preferences.disable_web_rtc_hw_encoding) | |
| 208 create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateAndroidVEA); | |
| 209 #endif | |
| 210 #if defined(OS_MACOSX) | |
| 211 create_vea_fps.push_back(&GpuVideoEncodeAccelerator::CreateVTVEA); | |
| 212 #endif | |
| 213 return create_vea_fps; | |
| 214 } | |
| 215 | |
| 216 #if defined(OS_CHROMEOS) && defined(USE_V4L2_CODEC) | |
| 217 // static | |
| 218 std::unique_ptr<media::VideoEncodeAccelerator> | |
| 219 GpuVideoEncodeAccelerator::CreateV4L2VEA() { | |
| 220 std::unique_ptr<media::VideoEncodeAccelerator> encoder; | |
| 221 scoped_refptr<V4L2Device> device = V4L2Device::Create(V4L2Device::kEncoder); | |
| 222 if (device) | |
| 223 encoder.reset(new V4L2VideoEncodeAccelerator(device)); | |
| 224 return encoder; | |
| 225 } | |
| 226 #endif | |
| 227 | |
| 228 #if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) | |
| 229 // static | |
| 230 std::unique_ptr<media::VideoEncodeAccelerator> | |
| 231 GpuVideoEncodeAccelerator::CreateVaapiVEA() { | |
| 232 return base::WrapUnique<media::VideoEncodeAccelerator>( | |
| 233 new VaapiVideoEncodeAccelerator()); | |
| 234 } | |
| 235 #endif | |
| 236 | |
| 237 #if defined(OS_ANDROID) && defined(ENABLE_WEBRTC) | |
| 238 // static | |
| 239 std::unique_ptr<media::VideoEncodeAccelerator> | |
| 240 GpuVideoEncodeAccelerator::CreateAndroidVEA() { | |
| 241 return base::WrapUnique<media::VideoEncodeAccelerator>( | |
| 242 new AndroidVideoEncodeAccelerator()); | |
| 243 } | |
| 244 #endif | |
| 245 | |
| 246 #if defined(OS_MACOSX) | |
| 247 // static | |
| 248 std::unique_ptr<media::VideoEncodeAccelerator> | |
| 249 GpuVideoEncodeAccelerator::CreateVTVEA() { | |
| 250 return base::WrapUnique<media::VideoEncodeAccelerator>( | |
| 251 new VTVideoEncodeAccelerator()); | |
| 252 } | |
| 253 #endif | |
| 254 | |
| 255 void GpuVideoEncodeAccelerator::OnEncode( | |
| 256 const AcceleratedVideoEncoderMsg_Encode_Params& params) { | |
| 257 DVLOG(3) << "GpuVideoEncodeAccelerator::OnEncode: frame_id = " | |
| 258 << params.frame_id << ", buffer_size=" << params.buffer_size | |
| 259 << ", force_keyframe=" << params.force_keyframe; | |
| 260 DCHECK_EQ(media::PIXEL_FORMAT_I420, input_format_); | |
| 261 | |
| 262 // Wrap into a SharedMemory in the beginning, so that |params.buffer_handle| | |
| 263 // is cleaned properly in case of an early return. | |
| 264 std::unique_ptr<base::SharedMemory> shm( | |
| 265 new base::SharedMemory(params.buffer_handle, true)); | |
| 266 | |
| 267 if (!encoder_) | |
| 268 return; | |
| 269 | |
| 270 if (params.frame_id < 0) { | |
| 271 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): invalid " | |
| 272 "frame_id=" << params.frame_id; | |
| 273 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); | |
| 274 return; | |
| 275 } | |
| 276 | |
| 277 const uint32_t aligned_offset = | |
| 278 params.buffer_offset % base::SysInfo::VMAllocationGranularity(); | |
| 279 base::CheckedNumeric<off_t> map_offset = params.buffer_offset; | |
| 280 map_offset -= aligned_offset; | |
| 281 base::CheckedNumeric<size_t> map_size = params.buffer_size; | |
| 282 map_size += aligned_offset; | |
| 283 | |
| 284 if (!map_offset.IsValid() || !map_size.IsValid()) { | |
| 285 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode():" | |
| 286 << " invalid (buffer_offset,buffer_size)"; | |
| 287 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); | |
| 288 return; | |
| 289 } | |
| 290 | |
| 291 if (!shm->MapAt(map_offset.ValueOrDie(), map_size.ValueOrDie())) { | |
| 292 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): " | |
| 293 << "could not map frame_id=" << params.frame_id; | |
| 294 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); | |
| 295 return; | |
| 296 } | |
| 297 | |
| 298 uint8_t* shm_memory = | |
| 299 reinterpret_cast<uint8_t*>(shm->memory()) + aligned_offset; | |
| 300 scoped_refptr<media::VideoFrame> frame = | |
| 301 media::VideoFrame::WrapExternalSharedMemory( | |
| 302 input_format_, | |
| 303 input_coded_size_, | |
| 304 gfx::Rect(input_visible_size_), | |
| 305 input_visible_size_, | |
| 306 shm_memory, | |
| 307 params.buffer_size, | |
| 308 params.buffer_handle, | |
| 309 params.buffer_offset, | |
| 310 params.timestamp); | |
| 311 if (!frame) { | |
| 312 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnEncode(): " | |
| 313 << "could not create a frame"; | |
| 314 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); | |
| 315 return; | |
| 316 } | |
| 317 frame->AddDestructionObserver( | |
| 318 media::BindToCurrentLoop( | |
| 319 base::Bind(&GpuVideoEncodeAccelerator::EncodeFrameFinished, | |
| 320 weak_this_factory_.GetWeakPtr(), | |
| 321 params.frame_id, | |
| 322 base::Passed(&shm)))); | |
| 323 encoder_->Encode(frame, params.force_keyframe); | |
| 324 } | |
| 325 | |
| 326 void GpuVideoEncodeAccelerator::OnEncode2( | |
| 327 const AcceleratedVideoEncoderMsg_Encode_Params2& params) { | |
| 328 DVLOG(3) << "GpuVideoEncodeAccelerator::OnEncode2: frame_id = " | |
| 329 << params.frame_id << ", size=" << params.size.ToString() | |
| 330 << ", force_keyframe=" << params.force_keyframe << ", handle type=" | |
| 331 << params.gpu_memory_buffer_handles[0].type; | |
| 332 // Encoding GpuMemoryBuffer backed frames is not supported. | |
| 333 NOTREACHED(); | |
| 334 } | |
| 335 | |
| 336 void GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer( | |
| 337 int32_t buffer_id, | |
| 338 base::SharedMemoryHandle buffer_handle, | |
| 339 uint32_t buffer_size) { | |
| 340 DVLOG(3) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): " | |
| 341 "buffer_id=" << buffer_id | |
| 342 << ", buffer_size=" << buffer_size; | |
| 343 if (!encoder_) | |
| 344 return; | |
| 345 if (buffer_id < 0) { | |
| 346 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): " | |
| 347 "invalid buffer_id=" << buffer_id; | |
| 348 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); | |
| 349 return; | |
| 350 } | |
| 351 if (buffer_size < output_buffer_size_) { | |
| 352 DLOG(ERROR) << "GpuVideoEncodeAccelerator::OnUseOutputBitstreamBuffer(): " | |
| 353 "buffer too small for buffer_id=" << buffer_id; | |
| 354 NotifyError(media::VideoEncodeAccelerator::kPlatformFailureError); | |
| 355 return; | |
| 356 } | |
| 357 encoder_->UseOutputBitstreamBuffer( | |
| 358 media::BitstreamBuffer(buffer_id, buffer_handle, buffer_size)); | |
| 359 } | |
| 360 | |
| 361 void GpuVideoEncodeAccelerator::OnDestroy() { | |
| 362 DVLOG(2) << "GpuVideoEncodeAccelerator::OnDestroy()"; | |
| 363 OnWillDestroyStub(); | |
| 364 } | |
| 365 | |
| 366 void GpuVideoEncodeAccelerator::OnRequestEncodingParametersChange( | |
| 367 uint32_t bitrate, | |
| 368 uint32_t framerate) { | |
| 369 DVLOG(2) << "GpuVideoEncodeAccelerator::OnRequestEncodingParametersChange(): " | |
| 370 "bitrate=" << bitrate | |
| 371 << ", framerate=" << framerate; | |
| 372 if (!encoder_) | |
| 373 return; | |
| 374 encoder_->RequestEncodingParametersChange(bitrate, framerate); | |
| 375 } | |
| 376 | |
| 377 void GpuVideoEncodeAccelerator::EncodeFrameFinished( | |
| 378 int32_t frame_id, | |
| 379 std::unique_ptr<base::SharedMemory> shm) { | |
| 380 Send(new AcceleratedVideoEncoderHostMsg_NotifyInputDone(host_route_id_, | |
| 381 frame_id)); | |
| 382 // Just let |shm| fall out of scope. | |
| 383 } | |
| 384 | |
| 385 void GpuVideoEncodeAccelerator::Send(IPC::Message* message) { | |
| 386 stub_->channel()->Send(message); | |
| 387 } | |
| 388 | |
| 389 } // namespace content | |
| OLD | NEW |