Chromium Code Reviews| Index: remoting/codec/video_encoder_vpx.cc |
| diff --git a/remoting/codec/video_encoder_vpx.cc b/remoting/codec/video_encoder_vpx.cc |
| index 1d0f6eac2ba5e29c6e8bf26fd53bd219cdc8663e..20a7c4d85bdc1155ffaca9d1d6db9ae137831bcd 100644 |
| --- a/remoting/codec/video_encoder_vpx.cc |
| +++ b/remoting/codec/video_encoder_vpx.cc |
| @@ -7,9 +7,9 @@ |
| #include "base/bind.h" |
| #include "base/logging.h" |
| #include "base/sys_info.h" |
| -#include "media/base/yuv_convert.h" |
| #include "remoting/base/util.h" |
| #include "remoting/proto/video.pb.h" |
| +#include "third_party/libyuv/include/libyuv/convert_from_argb.h" |
| #include "third_party/webrtc/modules/desktop_capture/desktop_frame.h" |
| #include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h" |
| #include "third_party/webrtc/modules/desktop_capture/desktop_region.h" |
| @@ -24,10 +24,17 @@ namespace remoting { |
| namespace { |
| +// Number of bytes in an RGBx pixel. |
| +const int kBytesPerRgbPixel = 4; |
| + |
| // Defines the dimension of a macro block. This is used to compute the active |
| // map for the encoder. |
| const int kMacroBlockSize = 16; |
| +// Magic encoder profile numbers for I420 and I444 input formats. |
| +const int kVp9I420ProfileNumber = 0; |
| +const int kVp9I444ProfileNumber = 1; |
| + |
| void SetCommonCodecParameters(const webrtc::DesktopSize& size, |
| vpx_codec_enc_cfg_t* config) { |
| config->g_w = size.width(); |
| @@ -90,7 +97,7 @@ ScopedVpxCodec CreateVP8Codec(const webrtc::DesktopSize& size) { |
| return codec.Pass(); |
| } |
| -ScopedVpxCodec CreateVP9Codec(const webrtc::DesktopSize& size) { |
| +ScopedVpxCodec CreateVP9Codec(bool use_i444, const webrtc::DesktopSize& size) { |
| ScopedVpxCodec codec(new vpx_codec_ctx_t); |
| // Configure the encoder. |
| @@ -103,8 +110,8 @@ ScopedVpxCodec CreateVP9Codec(const webrtc::DesktopSize& size) { |
| SetCommonCodecParameters(size, &config); |
| - // Configure VP9 for I420 source frames. |
| - config.g_profile = 0; |
| + // Configure VP9 for I420 or I444 source frames. |
| + config.g_profile = use_i444 ? kVp9I444ProfileNumber : kVp9I444ProfileNumber; |
| // Disable quantization entirely, putting the encoder in "lossless" mode. |
| config.rc_min_quantizer = 0; |
| @@ -128,18 +135,90 @@ ScopedVpxCodec CreateVP9Codec(const webrtc::DesktopSize& size) { |
| return codec.Pass(); |
| } |
| -} // namespace |
| +void CreateImage(bool use_i444, |
| + const webrtc::DesktopSize& size, |
| + scoped_ptr<vpx_image_t>* out_image, |
| + scoped_ptr<uint8[]>* out_image_buffer) { |
| + DCHECK(!size.is_empty()); |
| + |
| + scoped_ptr<vpx_image_t> image(new vpx_image_t()); |
| + memset(image.get(), 0, sizeof(vpx_image_t)); |
| + |
| + // libvpx seems to require both to be assigned. |
| + image->d_w = size.width(); |
| + image->w = size.width(); |
| + image->d_h = size.height(); |
| + image->h = size.height(); |
| + |
| + // libvpx should derive chroma shifts from|fmt| but currently has a bug: |
| + // https://code.google.com/p/webm/issues/detail?id=627 |
| + if (use_i444) { |
| + image->fmt = VPX_IMG_FMT_I444; |
| + image->x_chroma_shift = 0; |
| + image->y_chroma_shift = 0; |
| + } else { // I420 |
| + image->fmt = VPX_IMG_FMT_YV12; |
| + image->x_chroma_shift = 1; |
| + image->y_chroma_shift = 1; |
| + } |
| + |
| + // libyuv's fast-path requires 16-byte aligned pointers and strides, so pad |
| + // the Y, U and V planes' strides to multiples of 16 bytes. |
| + const int y_stride = ((image->w - 1) & ~15) + 16; |
| + const int uv_unaligned_stride = y_stride >> image->x_chroma_shift; |
| + const int uv_stride = ((uv_unaligned_stride - 1) & ~15) + 16; |
| + |
| + // libvpx accesses the source image in macro blocks, and will over-read |
| + // if the image is not padded out to the next macroblock: crbug.com/119633. |
| + // Pad the Y, U and V planes' height out to compensate. |
| + // Assuming macroblocks are 16x16, aligning the planes' strides above also |
| + // macroblock aligned them. |
| + DCHECK_EQ(16, kMacroBlockSize); |
| + const int y_rows = ((image->h - 1) & ~(kMacroBlockSize-1)) + kMacroBlockSize; |
| + const int uv_rows = y_rows >> image->y_chroma_shift; |
| + |
| + // Allocate a YUV buffer large enough for the aligned data & padding. |
| + const int buffer_size = y_stride * y_rows + 2*uv_stride * uv_rows; |
| + scoped_ptr<uint8[]> image_buffer(new uint8[buffer_size]); |
| + |
| + // Reset image value to 128 so we just need to fill in the y plane. |
| + memset(image_buffer.get(), 128, buffer_size); |
| + |
| + // Fill in the information for |image_|. |
| + unsigned char* uchar_buffer = |
| + reinterpret_cast<unsigned char*>(image_buffer.get()); |
| + image->planes[0] = uchar_buffer; |
| + image->planes[1] = image->planes[0] + y_stride * y_rows; |
| + image->planes[2] = image->planes[1] + uv_stride * uv_rows; |
| + image->stride[0] = y_stride; |
| + image->stride[1] = uv_stride; |
| + image->stride[2] = uv_stride; |
| + |
| + *out_image = image.Pass(); |
| + *out_image_buffer = image_buffer.Pass(); |
| +} |
| + |
| +} // namespace |
| // static |
| scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP8() { |
| return scoped_ptr<VideoEncoderVpx>( |
| - new VideoEncoderVpx(base::Bind(&CreateVP8Codec))); |
| + new VideoEncoderVpx(base::Bind(&CreateVP8Codec), |
| + base::Bind(&CreateImage, false))); |
| +} |
| + |
| +// static |
| +scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP9I420() { |
| + return scoped_ptr<VideoEncoderVpx>( |
| + new VideoEncoderVpx(base::Bind(&CreateVP9Codec, false), |
| + base::Bind(&CreateImage, false))); |
| } |
| // static |
| -scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP9() { |
| +scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP9I444() { |
| return scoped_ptr<VideoEncoderVpx>( |
| - new VideoEncoderVpx(base::Bind(&CreateVP9Codec))); |
| + new VideoEncoderVpx(base::Bind(&CreateVP9Codec, true), |
| + base::Bind(&CreateImage, true))); |
| } |
| VideoEncoderVpx::~VideoEncoderVpx() {} |
| @@ -233,8 +312,10 @@ scoped_ptr<VideoPacket> VideoEncoderVpx::Encode( |
| return packet.Pass(); |
| } |
| -VideoEncoderVpx::VideoEncoderVpx(const InitializeCodecCallback& init_codec) |
| - : init_codec_(init_codec), |
| +VideoEncoderVpx::VideoEncoderVpx(const CreateCodecCallback& create_codec, |
| + const CreateImageCallback& create_image) |
| + : create_codec_(create_codec), |
| + create_image_(create_image), |
| active_map_width_(0), |
| active_map_height_(0) { |
| } |
| @@ -242,60 +323,16 @@ VideoEncoderVpx::VideoEncoderVpx(const InitializeCodecCallback& init_codec) |
| bool VideoEncoderVpx::Initialize(const webrtc::DesktopSize& size) { |
| codec_.reset(); |
| - image_.reset(new vpx_image_t()); |
| - memset(image_.get(), 0, sizeof(vpx_image_t)); |
| - |
| - image_->fmt = VPX_IMG_FMT_YV12; |
| - |
| - // libvpx seems to require both to be assigned. |
| - image_->d_w = size.width(); |
| - image_->w = size.width(); |
| - image_->d_h = size.height(); |
| - image_->h = size.height(); |
| - |
| - // libvpx should derive this from|fmt| but currently has a bug: |
| - // https://code.google.com/p/webm/issues/detail?id=627 |
| - image_->x_chroma_shift = 1; |
| - image_->y_chroma_shift = 1; |
| + // (Re)Create the VPX image structure and pixel buffer. |
| + create_image_.Run(size, &image_, &image_buffer_); |
| // Initialize active map. |
| active_map_width_ = (image_->w + kMacroBlockSize - 1) / kMacroBlockSize; |
| active_map_height_ = (image_->h + kMacroBlockSize - 1) / kMacroBlockSize; |
| active_map_.reset(new uint8[active_map_width_ * active_map_height_]); |
| - // libyuv's fast-path requires 16-byte aligned pointers and strides, so pad |
| - // the Y, U and V planes' strides to multiples of 16 bytes. |
| - const int y_stride = ((image_->w - 1) & ~15) + 16; |
| - const int uv_unaligned_stride = y_stride / 2; |
| - const int uv_stride = ((uv_unaligned_stride - 1) & ~15) + 16; |
| - |
| - // libvpx accesses the source image in macro blocks, and will over-read |
| - // if the image is not padded out to the next macroblock: crbug.com/119633. |
| - // Pad the Y, U and V planes' height out to compensate. |
| - // Assuming macroblocks are 16x16, aligning the planes' strides above also |
| - // macroblock aligned them. |
| - DCHECK_EQ(16, kMacroBlockSize); |
| - const int y_rows = active_map_height_ * kMacroBlockSize; |
| - const int uv_rows = y_rows / 2; |
| - |
| - // Allocate a YUV buffer large enough for the aligned data & padding. |
| - const int buffer_size = y_stride * y_rows + 2*uv_stride * uv_rows; |
| - yuv_image_.reset(new uint8[buffer_size]); |
| - |
| - // Reset image value to 128 so we just need to fill in the y plane. |
| - memset(yuv_image_.get(), 128, buffer_size); |
| - |
| - // Fill in the information for |image_|. |
| - unsigned char* image = reinterpret_cast<unsigned char*>(yuv_image_.get()); |
| - image_->planes[0] = image; |
| - image_->planes[1] = image_->planes[0] + y_stride * y_rows; |
| - image_->planes[2] = image_->planes[1] + uv_stride * uv_rows; |
| - image_->stride[0] = y_stride; |
| - image_->stride[1] = uv_stride; |
| - image_->stride[2] = uv_stride; |
| - |
| - // Initialize the codec. |
| - codec_ = init_codec_.Run(size); |
| + // (Re)Initialize the codec. |
| + codec_ = create_codec_.Run(size); |
| return codec_; |
| } |
| @@ -336,13 +373,43 @@ void VideoEncoderVpx::PrepareImage(const webrtc::DesktopFrame& frame, |
| uint8* y_data = image_->planes[0]; |
| uint8* u_data = image_->planes[1]; |
| uint8* v_data = image_->planes[2]; |
| - for (webrtc::DesktopRegion::Iterator r(*updated_region); !r.IsAtEnd(); |
| - r.Advance()) { |
| - const webrtc::DesktopRect& rect = r.rect(); |
| - ConvertRGB32ToYUVWithRect( |
| - rgb_data, y_data, u_data, v_data, |
| - rect.left(), rect.top(), rect.width(), rect.height(), |
| - rgb_stride, y_stride, uv_stride); |
| + |
| + switch (image_->fmt) { |
| + |
|
Sergey Ulanov
2014/05/04 19:09:17
nit: remove this line
Wez
2014/05/09 14:47:33
Done.
|
| + case VPX_IMG_FMT_I444: |
|
Sergey Ulanov
2014/05/04 19:09:17
nit: indent 2 spaces
Wez
2014/05/09 14:47:33
Done.
|
| + for (webrtc::DesktopRegion::Iterator r(*updated_region); !r.IsAtEnd(); |
| + r.Advance()) { |
| + const webrtc::DesktopRect& rect = r.rect(); |
| + int rgb_offset = rgb_stride * rect.top() + |
| + rect.left() * kBytesPerRgbPixel; |
| + int yuv_offset = uv_stride * rect.top() + rect.left(); |
| + libyuv::ARGBToI444(rgb_data + rgb_offset, rgb_stride, |
| + y_data + yuv_offset, y_stride, |
| + u_data + yuv_offset, uv_stride, |
| + v_data + yuv_offset, uv_stride, |
| + rect.width(), rect.height()); |
| + } |
| + break; |
| + |
| + case VPX_IMG_FMT_YV12: |
| + for (webrtc::DesktopRegion::Iterator r(*updated_region); !r.IsAtEnd(); |
| + r.Advance()) { |
| + const webrtc::DesktopRect& rect = r.rect(); |
| + int rgb_offset = rgb_stride * rect.top() + |
| + rect.left() * kBytesPerRgbPixel; |
| + int y_offset = (y_stride * rect.top()) + rect.left(); |
|
Sergey Ulanov
2014/05/04 19:09:17
nit: remove extra parentheses or add them on the f
Wez
2014/05/09 14:47:33
Done.
|
| + int uv_offset = uv_stride * rect.top() / 2 + rect.left() / 2; |
| + libyuv::ARGBToI420(rgb_data + rgb_offset, rgb_stride, |
| + y_data + y_offset, y_stride, |
| + u_data + uv_offset, uv_stride, |
| + v_data + uv_offset, uv_stride, |
| + rect.width(), rect.height()); |
| + } |
| + break; |
| + |
| + default: |
| + NOTREACHED(); |
| + |
| } |
| } |