| Index: media/filters/vpx_video_decoder.cc
 | 
| diff --git a/media/filters/vpx_video_decoder.cc b/media/filters/vpx_video_decoder.cc
 | 
| index 2d60d917a43d1855ee1226ac0c74dcf726002e30..437187ba4d979c493d6056b539dea5ab833352fa 100644
 | 
| --- a/media/filters/vpx_video_decoder.cc
 | 
| +++ b/media/filters/vpx_video_decoder.cc
 | 
| @@ -33,6 +33,7 @@
 | 
|  #include "media/base/bind_to_current_loop.h"
 | 
|  #include "media/base/decoder_buffer.h"
 | 
|  #include "media/base/media_switches.h"
 | 
| +#include "media/video/gpu_memory_buffer_video_frame_pool.h"
 | 
|  
 | 
|  // Include libvpx header files.
 | 
|  // VPX_CODEC_DISABLE_COMPAT excludes parts of the libvpx API that provide
 | 
| @@ -358,7 +359,14 @@ void VpxVideoDecoder::MemoryPool::OnVideoFrameDestroyed(
 | 
|  }
 | 
|  
 | 
|  VpxVideoDecoder::VpxVideoDecoder()
 | 
| -    : state_(kUninitialized), vpx_codec_(nullptr), vpx_codec_alpha_(nullptr) {
 | 
| +    : VpxVideoDecoder(std::unique_ptr<GpuMemoryBufferVideoFramePool>()) {}
 | 
| +
 | 
| +VpxVideoDecoder::VpxVideoDecoder(
 | 
| +    std::unique_ptr<GpuMemoryBufferVideoFramePool> gpu_video_frame_pool)
 | 
| +    : state_(kUninitialized),
 | 
| +      vpx_codec_(nullptr),
 | 
| +      vpx_codec_alpha_(nullptr),
 | 
| +      gpu_video_frame_pool_(std::move(gpu_video_frame_pool)) {
 | 
|    thread_checker_.DetachFromThread();
 | 
|  }
 | 
|  
 | 
| @@ -570,9 +578,13 @@ bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
 | 
|      return false;
 | 
|    }
 | 
|  
 | 
| -  if (!CopyVpxImageToVideoFrame(vpx_image, video_frame))
 | 
| +  if (!CopyVpxImageToVideoFrame(vpx_image, buffer, video_frame))
 | 
|      return false;
 | 
|  
 | 
| +  // VP8 alpha vpx_image_t is missing. Regard it as success.
 | 
| +  if (!(*video_frame))
 | 
| +    return true;
 | 
| +
 | 
|    (*video_frame)->set_timestamp(base::TimeDelta::FromMicroseconds(timestamp));
 | 
|  
 | 
|    // Default to the color space from the config, but if the bistream specifies
 | 
| @@ -586,74 +598,12 @@ bool VpxVideoDecoder::VpxDecode(const scoped_refptr<DecoderBuffer>& buffer,
 | 
|        ->metadata()
 | 
|        ->SetInteger(VideoFrameMetadata::COLOR_SPACE, color_space);
 | 
|  
 | 
| -  if (!vpx_codec_alpha_)
 | 
| -    return true;
 | 
| -
 | 
| -  if (buffer->side_data_size() < 8) {
 | 
| -    // TODO(mcasas): Is this a warning or an error?
 | 
| -    DLOG(WARNING) << "Making Alpha channel opaque due to missing input";
 | 
| -    const uint32_t kAlphaOpaqueValue = 255;
 | 
| -    libyuv::SetPlane((*video_frame)->visible_data(VideoFrame::kAPlane),
 | 
| -                     (*video_frame)->stride(VideoFrame::kAPlane),
 | 
| -                     (*video_frame)->visible_rect().width(),
 | 
| -                     (*video_frame)->visible_rect().height(),
 | 
| -                     kAlphaOpaqueValue);
 | 
| -    return true;
 | 
| -  }
 | 
| -
 | 
| -  // First 8 bytes of side data is |side_data_id| in big endian.
 | 
| -  const uint64_t side_data_id = base::NetToHost64(
 | 
| -      *(reinterpret_cast<const uint64_t*>(buffer->side_data())));
 | 
| -  if (side_data_id != 1)
 | 
| -    return true;
 | 
| -
 | 
| -  // Try and decode buffer->side_data() minus the first 8 bytes as a full frame.
 | 
| -  int64_t timestamp_alpha = buffer->timestamp().InMicroseconds();
 | 
| -  void* user_priv_alpha = reinterpret_cast<void*>(×tamp_alpha);
 | 
| -  {
 | 
| -    TRACE_EVENT1("media", "vpx_codec_decode_alpha", "timestamp_alpha",
 | 
| -                 timestamp_alpha);
 | 
| -    vpx_codec_err_t status = vpx_codec_decode(
 | 
| -        vpx_codec_alpha_, buffer->side_data() + 8, buffer->side_data_size() - 8,
 | 
| -        user_priv_alpha, 0 /* deadline */);
 | 
| -    if (status != VPX_CODEC_OK) {
 | 
| -      DLOG(ERROR) << "vpx_codec_decode() failed for the alpha: "
 | 
| -                  << vpx_codec_error(vpx_codec_);
 | 
| -      return false;
 | 
| -    }
 | 
| -  }
 | 
| -
 | 
| -  vpx_codec_iter_t iter_alpha = NULL;
 | 
| -  const vpx_image_t* vpx_image_alpha =
 | 
| -      vpx_codec_get_frame(vpx_codec_alpha_, &iter_alpha);
 | 
| -  if (!vpx_image_alpha) {
 | 
| -    *video_frame = nullptr;
 | 
| -    return true;
 | 
| -  }
 | 
| -
 | 
| -  if (vpx_image_alpha->user_priv != user_priv_alpha) {
 | 
| -    DLOG(ERROR) << "Invalid output timestamp on alpha.";
 | 
| -    return false;
 | 
| -  }
 | 
| -
 | 
| -  if (vpx_image_alpha->d_h != vpx_image->d_h ||
 | 
| -      vpx_image_alpha->d_w != vpx_image->d_w) {
 | 
| -    DLOG(ERROR) << "The alpha plane dimensions are not the same as the "
 | 
| -                   "image dimensions.";
 | 
| -    return false;
 | 
| -  }
 | 
| -
 | 
| -  libyuv::CopyPlane(vpx_image_alpha->planes[VPX_PLANE_Y],
 | 
| -                    vpx_image_alpha->stride[VPX_PLANE_Y],
 | 
| -                    (*video_frame)->visible_data(VideoFrame::kAPlane),
 | 
| -                    (*video_frame)->stride(VideoFrame::kAPlane),
 | 
| -                    (*video_frame)->visible_rect().width(),
 | 
| -                    (*video_frame)->visible_rect().height());
 | 
|    return true;
 | 
|  }
 | 
|  
 | 
|  bool VpxVideoDecoder::CopyVpxImageToVideoFrame(
 | 
|      const struct vpx_image* vpx_image,
 | 
| +    const scoped_refptr<DecoderBuffer>& buffer,
 | 
|      scoped_refptr<VideoFrame>* video_frame) {
 | 
|    DCHECK(vpx_image);
 | 
|  
 | 
| @@ -710,24 +660,87 @@ bool VpxVideoDecoder::CopyVpxImageToVideoFrame(
 | 
|    DCHECK(codec_format == PIXEL_FORMAT_YV12 ||
 | 
|           codec_format == PIXEL_FORMAT_YV12A);
 | 
|  
 | 
| -  *video_frame = frame_pool_.CreateFrame(
 | 
| -      codec_format, visible_size, gfx::Rect(visible_size),
 | 
| -      config_.natural_size(), kNoTimestamp());
 | 
| -  if (!(*video_frame))
 | 
| -    return false;
 | 
| -
 | 
| +  std::unique_ptr<VideoFrameFuture> video_frame_future =
 | 
| +      gpu_video_frame_pool_->CreateFrame(
 | 
| +          codec_format, visible_size, gfx::Rect(visible_size),
 | 
| +          config_.natural_size(), kNoTimestamp());
 | 
|    libyuv::I420Copy(
 | 
|        vpx_image->planes[VPX_PLANE_Y], vpx_image->stride[VPX_PLANE_Y],
 | 
|        vpx_image->planes[VPX_PLANE_U], vpx_image->stride[VPX_PLANE_U],
 | 
|        vpx_image->planes[VPX_PLANE_V], vpx_image->stride[VPX_PLANE_V],
 | 
| -      (*video_frame)->visible_data(VideoFrame::kYPlane),
 | 
| -      (*video_frame)->stride(VideoFrame::kYPlane),
 | 
| -      (*video_frame)->visible_data(VideoFrame::kUPlane),
 | 
| -      (*video_frame)->stride(VideoFrame::kUPlane),
 | 
| -      (*video_frame)->visible_data(VideoFrame::kVPlane),
 | 
| -      (*video_frame)->stride(VideoFrame::kVPlane), coded_size.width(),
 | 
| +      video_frame_future->data(VideoFrame::kYPlane),
 | 
| +      video_frame_future->stride(VideoFrame::kYPlane),
 | 
| +      video_frame_future->data(VideoFrame::kUPlane),
 | 
| +      video_frame_future->stride(VideoFrame::kUPlane),
 | 
| +      video_frame_future->data(VideoFrame::kVPlane),
 | 
| +      video_frame_future->stride(VideoFrame::kVPlane), coded_size.width(),
 | 
|        coded_size.height());
 | 
|  
 | 
| +  for (int i = 0; i < 1 && vpx_codec_alpha_; i++) {
 | 
| +    if (buffer->side_data_size() < 8) {
 | 
| +      // TODO(mcasas): Is this a warning or an error?
 | 
| +      DLOG(WARNING) << "Making Alpha channel opaque due to missing input";
 | 
| +      const uint32_t kAlphaOpaqueValue = 255;
 | 
| +      libyuv::SetPlane(video_frame_future->data(VideoFrame::kAPlane),
 | 
| +                       video_frame_future->stride(VideoFrame::kAPlane),
 | 
| +                       coded_size.width(), coded_size.height(),
 | 
| +                       kAlphaOpaqueValue);
 | 
| +      break;
 | 
| +    }
 | 
| +
 | 
| +    // First 8 bytes of side data is |side_data_id| in big endian.
 | 
| +    const uint64_t side_data_id = base::NetToHost64(
 | 
| +        *(reinterpret_cast<const uint64_t*>(buffer->side_data())));
 | 
| +    if (side_data_id != 1)
 | 
| +      break;
 | 
| +
 | 
| +    // Try and decode buffer->side_data() minus the first 8 bytes as a full
 | 
| +    // frame.
 | 
| +    int64_t timestamp_alpha = buffer->timestamp().InMicroseconds();
 | 
| +    void* user_priv_alpha = reinterpret_cast<void*>(×tamp_alpha);
 | 
| +    {
 | 
| +      TRACE_EVENT1("media", "vpx_codec_decode_alpha", "timestamp_alpha",
 | 
| +                   timestamp_alpha);
 | 
| +      vpx_codec_err_t status = vpx_codec_decode(
 | 
| +          vpx_codec_alpha_, buffer->side_data() + 8,
 | 
| +          buffer->side_data_size() - 8, user_priv_alpha, 0 /* deadline */);
 | 
| +      if (status != VPX_CODEC_OK) {
 | 
| +        DLOG(ERROR) << "vpx_codec_decode() failed for the alpha: "
 | 
| +                    << vpx_codec_error(vpx_codec_);
 | 
| +        return false;
 | 
| +      }
 | 
| +    }
 | 
| +
 | 
| +    vpx_codec_iter_t iter_alpha = NULL;
 | 
| +    const vpx_image_t* vpx_image_alpha =
 | 
| +        vpx_codec_get_frame(vpx_codec_alpha_, &iter_alpha);
 | 
| +    if (!vpx_image_alpha) {
 | 
| +      *video_frame = nullptr;
 | 
| +      return true;
 | 
| +    }
 | 
| +
 | 
| +    if (vpx_image_alpha->user_priv != user_priv_alpha) {
 | 
| +      DLOG(ERROR) << "Invalid output timestamp on alpha.";
 | 
| +      return false;
 | 
| +    }
 | 
| +
 | 
| +    if (vpx_image_alpha->d_h != vpx_image->d_h ||
 | 
| +        vpx_image_alpha->d_w != vpx_image->d_w) {
 | 
| +      DLOG(ERROR) << "The alpha plane dimensions are not the same as the "
 | 
| +                     "image dimensions.";
 | 
| +      return false;
 | 
| +    }
 | 
| +
 | 
| +    libyuv::CopyPlane(vpx_image_alpha->planes[VPX_PLANE_Y],
 | 
| +                      vpx_image_alpha->stride[VPX_PLANE_Y],
 | 
| +                      video_frame_future->data(VideoFrame::kAPlane),
 | 
| +                      video_frame_future->stride(VideoFrame::kAPlane),
 | 
| +                      coded_size.width(), coded_size.height());
 | 
| +  }
 | 
| +
 | 
| +  *video_frame = video_frame_future->Release();
 | 
| +  if (!(*video_frame))
 | 
| +    return false;
 | 
|    return true;
 | 
|  }
 | 
|  
 | 
| 
 |