Index: content/browser/renderer_host/media/video_capture_device_client.cc |
diff --git a/content/browser/renderer_host/media/video_capture_device_client.cc b/content/browser/renderer_host/media/video_capture_device_client.cc |
index f6f8cba1aa5ae165026c78a387bfb3508d2f8dc9..0c86a0eef2b4900ef902551824859833ca0ed794 100644 |
--- a/content/browser/renderer_host/media/video_capture_device_client.cc |
+++ b/content/browser/renderer_host/media/video_capture_device_client.cc |
@@ -28,6 +28,16 @@ using media::VideoCaptureFormat; |
using media::VideoFrame; |
using media::VideoFrameMetadata; |
+namespace { |
+ |
+bool isFormatSupported(media::VideoPixelFormat pixel_format) { |
+ // Currently, only I420, Y8 and Y16 pixel formats are supported. |
+ return (pixel_format == media::PIXEL_FORMAT_I420 || |
+ pixel_format == media::PIXEL_FORMAT_Y8 || |
+ pixel_format == media::PIXEL_FORMAT_Y16); |
+} |
+} |
+ |
namespace content { |
// Class combining a Client::Buffer interface implementation and a pool buffer |
@@ -108,12 +118,24 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData( |
if (!frame_format.IsValid()) |
return; |
+ // The input |length| can be greater than the required buffer size because of |
+ // paddings and/or alignments, but it cannot be smaller. |
+ DCHECK_GE(static_cast<size_t>(length), frame_format.ImageAllocationSize()); |
+ |
+ const bool useFullSize = |
+ frame_format.pixel_format == media::PIXEL_FORMAT_Y8 || |
+ frame_format.pixel_format == media::PIXEL_FORMAT_Y16; |
+ |
// |chopped_{width,height} and |new_unrotated_{width,height}| are the lowest |
// bit decomposition of {width, height}, grabbing the odd and even parts. |
const int chopped_width = frame_format.frame_size.width() & 1; |
const int chopped_height = frame_format.frame_size.height() & 1; |
- const int new_unrotated_width = frame_format.frame_size.width() & ~1; |
- const int new_unrotated_height = frame_format.frame_size.height() & ~1; |
+ const int new_unrotated_width = (useFullSize) |
+ ? frame_format.frame_size.width() |
+ : (frame_format.frame_size.width() & ~1); |
+ const int new_unrotated_height = |
+ (useFullSize) ? frame_format.frame_size.height() |
+ : (frame_format.frame_size.height() & ~1); |
int destination_width = new_unrotated_width; |
int destination_height = new_unrotated_height; |
@@ -135,6 +157,26 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData( |
use_gpu_memory_buffers_ ? media::PIXEL_STORAGE_GPUMEMORYBUFFER |
: media::PIXEL_STORAGE_CPU; |
uint8_t *y_plane_data, *u_plane_data, *v_plane_data; |
+ |
+ if (frame_format.pixel_format == media::PIXEL_FORMAT_Y8 || |
+ frame_format.pixel_format == media::PIXEL_FORMAT_Y16) { |
+ std::unique_ptr<Buffer> buffer(ReserveOutputBuffer( |
+ dimensions, frame_format.pixel_format, output_pixel_storage)); |
+ if (!buffer.get()) { |
+ DLOG(WARNING) << "Failed to reserve output buffer:" |
+ << media::VideoPixelFormatToString( |
+ frame_format.pixel_format); |
+ return; |
+ } |
+ memcpy(buffer->data(), data, length); |
+ const VideoCaptureFormat output_format = |
+ VideoCaptureFormat(dimensions, frame_format.frame_rate, |
+ frame_format.pixel_format, output_pixel_storage); |
+ OnIncomingCapturedBuffer(std::move(buffer), output_format, reference_time, |
+ timestamp); |
+ return; |
+ } |
+ |
std::unique_ptr<Buffer> buffer( |
ReserveI420OutputBuffer(dimensions, output_pixel_storage, &y_plane_data, |
&u_plane_data, &v_plane_data)); |
@@ -213,10 +255,6 @@ void VideoCaptureDeviceClient::OnIncomingCapturedData( |
NOTREACHED(); |
} |
- // The input |length| can be greater than the required buffer size because of |
- // paddings and/or alignments, but it cannot be smaller. |
- DCHECK_GE(static_cast<size_t>(length), frame_format.ImageAllocationSize()); |
- |
if (external_jpeg_decoder_) { |
const VideoCaptureGpuJpegDecoder::STATUS status = |
external_jpeg_decoder_->GetStatus(); |
@@ -267,8 +305,7 @@ VideoCaptureDeviceClient::ReserveOutputBuffer( |
media::VideoPixelStorage pixel_storage) { |
DCHECK_GT(frame_size.width(), 0); |
DCHECK_GT(frame_size.height(), 0); |
- // Currently, only I420 pixel format is supported. |
- DCHECK_EQ(media::PIXEL_FORMAT_I420, pixel_format); |
+ DCHECK(isFormatSupported(pixel_format)); |
// TODO(mcasas): For PIXEL_STORAGE_GPUMEMORYBUFFER, find a way to indicate if |
// it's a ShMem GMB or a DmaBuf GMB. |
@@ -292,12 +329,13 @@ void VideoCaptureDeviceClient::OnIncomingCapturedBuffer( |
const VideoCaptureFormat& frame_format, |
base::TimeTicks reference_time, |
base::TimeDelta timestamp) { |
- // Currently, only I420 pixel format is supported. |
- DCHECK_EQ(media::PIXEL_FORMAT_I420, frame_format.pixel_format); |
+ DCHECK(isFormatSupported(frame_format.pixel_format)); |
scoped_refptr<VideoFrame> frame; |
switch (frame_format.pixel_storage) { |
case media::PIXEL_STORAGE_GPUMEMORYBUFFER: { |
+ // TODO(astojilj) Check Y8 and Y16 support. |
+ DCHECK_EQ(media::PIXEL_FORMAT_I420, frame_format.pixel_format); |
// Create a VideoFrame to set the correct storage_type and pixel_format. |
gfx::GpuMemoryBufferHandle handle; |
frame = VideoFrame::WrapExternalYuvGpuMemoryBuffers( |
@@ -311,10 +349,10 @@ void VideoCaptureDeviceClient::OnIncomingCapturedBuffer( |
} |
case media::PIXEL_STORAGE_CPU: |
frame = VideoFrame::WrapExternalSharedMemory( |
- media::PIXEL_FORMAT_I420, frame_format.frame_size, |
+ frame_format.pixel_format, frame_format.frame_size, |
gfx::Rect(frame_format.frame_size), frame_format.frame_size, |
reinterpret_cast<uint8_t*>(buffer->data()), |
- VideoFrame::AllocationSize(media::PIXEL_FORMAT_I420, |
+ VideoFrame::AllocationSize(frame_format.pixel_format, |
frame_format.frame_size), |
base::SharedMemory::NULLHandle(), 0u, timestamp); |
break; |