Chromium Code Reviews| Index: media/capture/video/fake_video_capture_device.cc |
| diff --git a/media/capture/video/fake_video_capture_device.cc b/media/capture/video/fake_video_capture_device.cc |
| index 4aa961cc42c4f94e6d4090d3a0a5d1ea1bac5931..db71c6c9e863360b9f25030eb636b87ecda5f1f8 100644 |
| --- a/media/capture/video/fake_video_capture_device.cc |
| +++ b/media/capture/video/fake_video_capture_device.cc |
| @@ -8,11 +8,16 @@ |
| #include <algorithm> |
| #include <utility> |
| +#include "base/atomicops.h" |
| #include "base/bind.h" |
| #include "base/location.h" |
| +#include "base/macros.h" |
| +#include "base/memory/weak_ptr.h" |
| #include "base/single_thread_task_runner.h" |
| #include "base/strings/stringprintf.h" |
| +#include "base/threading/thread_checker.h" |
| #include "base/threading/thread_task_runner_handle.h" |
| +#include "base/time/time.h" |
| #include "media/audio/fake_audio_input_stream.h" |
| #include "media/base/video_frame.h" |
| #include "third_party/skia/include/core/SkBitmap.h" |
| @@ -23,6 +28,7 @@ |
| namespace media { |
| +namespace { |
| // Sweep at 600 deg/sec. |
| static const float kPacmanAngularVelocity = 600; |
| // Beep every 500 ms. |
| @@ -33,18 +39,264 @@ static const float kGradientFrequency = 1.f / 5; |
| static const double kMinZoom = 100.0; |
| static const double kMaxZoom = 400.0; |
| static const double kZoomStep = 1.0; |
| +static const double kInitialZoom = 100.0; |
| + |
| +static const gfx::Size kSupportedSizes[] = { |
| + gfx::Size(96, 96), gfx::Size(320, 240), gfx::Size(640, 480), |
| + gfx::Size(1280, 720), gfx::Size(1920, 1080)}; |
| +static const int kSupportedSizesCount = |
| + sizeof(kSupportedSizes) / sizeof(gfx::Size); |
| + |
| +static const VideoPixelFormat kSupportedPixelFormats[] = { |
| + PIXEL_FORMAT_I420, PIXEL_FORMAT_Y16, PIXEL_FORMAT_ARGB}; |
| + |
| +static gfx::Size SnapToSupportedSize(const gfx::Size& requested_size) { |
| + for (int i = 0; i < kSupportedSizesCount; i++) { |
| + const gfx::Size& supported_size = kSupportedSizes[i]; |
| + if (requested_size.width() <= supported_size.width()) { |
| + return supported_size; |
| + } |
|
mcasas
2017/02/15 00:44:19
No {} in one-line bodies.
chfremer
2017/02/15 18:11:29
Done.
|
| + } |
|
mcasas
2017/02/15 00:44:19
for (const gfx::Size& supported_size : kSupportedS
chfremer
2017/02/15 18:11:29
Done.
|
| + return kSupportedSizes[kSupportedSizesCount - 1]; |
| +} |
| + |
| +class FakeVideoCaptureDevice; |
|
mcasas
2017/02/15 00:44:19
Probably not needed? I don't see any refs to it
be
chfremer
2017/02/15 18:11:29
Done.
|
| + |
| +// Represents the current state of a FakeVideoCaptureDevice. |
| +// This is a separate struct because read-access to it is shared with several |
| +// collaborating classes. |
| +struct FakeDeviceState { |
| + FakeDeviceState(float zoom, float frame_rate, VideoPixelFormat pixel_format) |
| + : zoom(zoom), |
| + format(gfx::Size(), frame_rate, pixel_format, PIXEL_STORAGE_CPU) {} |
| + |
| + uint32_t zoom; |
| + VideoCaptureFormat format; |
| +}; |
| + |
| +// Paints a frame into the given |target_buffer|. |
| +class FramePainter { |
| + public: |
| + virtual void PaintFrame(base::TimeDelta elapsed_time, |
| + uint8_t* target_buffer) = 0; |
| +}; |
|
mcasas
2017/02/15 00:44:19
Why a base class if there's only one class
derivin
chfremer
2017/02/15 18:11:29
Short answer: For abstraction and loose coupling.
mcasas
2017/02/15 18:23:47
That's all good, but in Chromium we only abstract
chfremer
2017/02/15 18:37:16
Done.
|
| + |
| +// Paints a "pacman-like" animated circle including textual information such |
| +// as a frame count and timer. |
| +class PacmanFramePainter : public FramePainter { |
| + public: |
| + // Currently, only the following values are supported for |pixel_format|: |
| + // PIXEL_FORMAT_I420 |
| + // PIXEL_FORMAT_Y16 |
| + // PIXEL_FORMAT_ARGB |
| + PacmanFramePainter(VideoPixelFormat pixel_format, |
| + const FakeDeviceState* fake_device_state); |
| + |
| + // Implementation of FramePainter |
| + void PaintFrame(base::TimeDelta elapsed_time, |
| + uint8_t* target_buffer) override; |
| + |
| + private: |
| + void DrawGradientSquares(base::TimeDelta elapsed_time, |
| + uint8_t* target_buffer); |
| + |
| + void DrawPacman(base::TimeDelta elapsed_time, uint8_t* target_buffer); |
| + |
| + const VideoPixelFormat pixel_format_; |
| + const FakeDeviceState* fake_device_state_ = nullptr; |
| +}; |
| + |
| +// Delivers frames to a client, which is set via Initialize(). |
| +class FrameDeliveryStrategy { |
| + public: |
| + virtual ~FrameDeliveryStrategy() {} |
| + virtual void Initialize(VideoPixelFormat pixel_format, |
| + std::unique_ptr<VideoCaptureDevice::Client> client, |
| + const FakeDeviceState* device_state) = 0; |
| + virtual void Uninitialize() = 0; |
| + virtual uint8_t* PrepareBufferForNextFrame() = 0; |
| + virtual void DeliverFrame() = 0; |
| + |
| + protected: |
| + const FakeDeviceState* device_state_ = nullptr; |
| + std::unique_ptr<VideoCaptureDevice::Client> client_; |
| + // The system time when we receive the first frame. |
| + base::TimeTicks first_ref_time_; |
| +}; |
| + |
| +// Delivers frames using its own buffers via OnIncomingCapturedData(). |
| +class OwnBufferFrameDeliveryStrategy : public FrameDeliveryStrategy { |
| + public: |
| + OwnBufferFrameDeliveryStrategy(); |
| + ~OwnBufferFrameDeliveryStrategy() override; |
| + |
| + // Implementation of FrameDeliveryStrategy |
| + void Initialize(VideoPixelFormat pixel_format, |
| + std::unique_ptr<VideoCaptureDevice::Client> client, |
| + const FakeDeviceState* device_state) override; |
| + void Uninitialize() override; |
| + uint8_t* PrepareBufferForNextFrame() override; |
| + void DeliverFrame() override; |
| + |
| + private: |
| + std::unique_ptr<uint8_t[]> buffer_; |
| +}; |
| + |
| +// Delivers frames using buffers provided by the client via |
| +// OnIncomingCapturedBuffer(). |
| +class ClientBufferFrameDeliveryStrategy : public FrameDeliveryStrategy { |
| + public: |
| + ClientBufferFrameDeliveryStrategy(); |
| + ~ClientBufferFrameDeliveryStrategy() override; |
| + |
| + // Implementation of FrameDeliveryStrategy |
| + void Initialize(VideoPixelFormat pixel_format, |
| + std::unique_ptr<VideoCaptureDevice::Client> client, |
| + const FakeDeviceState* device_state) override; |
| + void Uninitialize() override; |
| + uint8_t* PrepareBufferForNextFrame() override; |
| + void DeliverFrame() override; |
| + |
| + private: |
| + VideoCaptureDevice::Client::Buffer capture_buffer_; |
| +}; |
| + |
| +// Implements the photo functionality of a VideoCaptureDevice |
| +class FakePhotoDevice { |
| + public: |
| + FakePhotoDevice(std::unique_ptr<FramePainter> argb_painter, |
| + const FakeDeviceState* fake_device_state); |
| + ~FakePhotoDevice(); |
| + |
| + void GetPhotoCapabilities( |
| + VideoCaptureDevice::GetPhotoCapabilitiesCallback callback); |
| + void TakePhoto(VideoCaptureDevice::TakePhotoCallback callback, |
| + base::TimeDelta elapsed_time); |
| + |
| + private: |
| + const std::unique_ptr<FramePainter> argb_painter_; |
| + const FakeDeviceState* const fake_device_state_; |
| +}; |
| + |
| +// Implementation of VideoCaptureDevice that generates test frames. This is |
| +// useful for testing the video capture components without having to use real |
| +// devices. The implementation schedules delayed tasks to itself to generate and |
| +// deliver frames at the requested rate. |
| +class FakeVideoCaptureDevice : public VideoCaptureDevice { |
| + public: |
| + FakeVideoCaptureDevice( |
| + std::unique_ptr<FramePainter> frame_painter, |
| + std::unique_ptr<FrameDeliveryStrategy> frame_delivery_strategy, |
| + std::unique_ptr<FakePhotoDevice> photo_device, |
| + std::unique_ptr<FakeDeviceState> device_state); |
| + ~FakeVideoCaptureDevice() override; |
| + |
| + // VideoCaptureDevice implementation. |
| + void AllocateAndStart(const VideoCaptureParams& params, |
| + std::unique_ptr<Client> client) override; |
| + void StopAndDeAllocate() override; |
| + void GetPhotoCapabilities(GetPhotoCapabilitiesCallback callback) override; |
| + void SetPhotoOptions(mojom::PhotoSettingsPtr settings, |
| + SetPhotoOptionsCallback callback) override; |
| + void TakePhoto(TakePhotoCallback callback) override; |
| + |
| + private: |
| + void BeepAndScheduleNextCapture(base::TimeTicks expected_execution_time); |
| + void OnNextFrameDue(base::TimeTicks expected_execution_time, int session_id); |
| + |
| + const std::unique_ptr<FramePainter> frame_painter_; |
| + const std::unique_ptr<FrameDeliveryStrategy> frame_delivery_strategy_; |
| + const std::unique_ptr<FakePhotoDevice> photo_device_; |
| + const std::unique_ptr<FakeDeviceState> device_state_; |
| + int current_session_id_ = 0; |
| + |
| + // Time when the next beep occurs. |
| + base::TimeDelta beep_time_; |
| + // Time since the fake video started rendering frames. |
| + base::TimeDelta elapsed_time_; |
| + |
| + base::ThreadChecker thread_checker_; |
| + |
| + // FakeVideoCaptureDevice post tasks to itself for frame construction and |
| + // needs to deal with asynchronous StopAndDeallocate(). |
| + base::WeakPtrFactory<FakeVideoCaptureDevice> weak_factory_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(FakeVideoCaptureDevice); |
| +}; |
| + |
| +} // anonymous namespace |
| + |
| +// static |
| +void FakeVideoCaptureDeviceMaker::GetSupportedSizes( |
| + std::vector<gfx::Size>* supported_sizes) { |
| + for (int i = 0; i < kSupportedSizesCount; i++) |
| + supported_sizes->push_back(kSupportedSizes[i]); |
| +} |
| + |
| +// static |
| +std::unique_ptr<VideoCaptureDevice> FakeVideoCaptureDeviceMaker::MakeInstance( |
| + VideoPixelFormat pixel_format, |
| + DeliveryMode delivery_mode, |
| + float frame_rate) { |
| + bool pixel_format_supported = false; |
| + for (const auto& supported_pixel_format : kSupportedPixelFormats) { |
| + if (pixel_format == supported_pixel_format) { |
| + pixel_format_supported = true; |
| + break; |
| + } |
| + } |
| + if (!pixel_format_supported) { |
| + DLOG(ERROR) << "Requested an unsupported pixel format " |
| + << VideoPixelFormatToString(pixel_format); |
| + return nullptr; |
| + } |
| + |
| + auto device_state = |
| + base::MakeUnique<FakeDeviceState>(kInitialZoom, frame_rate, pixel_format); |
| + auto video_frame_painter = |
| + base::MakeUnique<PacmanFramePainter>(pixel_format, device_state.get()); |
| + std::unique_ptr<FrameDeliveryStrategy> frame_delivery_strategy; |
| + switch (delivery_mode) { |
| + case DeliveryMode::USE_OWN_BUFFERS: |
| + frame_delivery_strategy = |
| + base::MakeUnique<OwnBufferFrameDeliveryStrategy>(); |
| + break; |
| + case DeliveryMode::USE_CLIENT_BUFFERS: |
| + frame_delivery_strategy = |
| + base::MakeUnique<ClientBufferFrameDeliveryStrategy>(); |
| + break; |
| + } |
| + |
| + auto photo_frame_painter = base::MakeUnique<PacmanFramePainter>( |
| + PIXEL_FORMAT_ARGB, device_state.get()); |
| + auto photo_device = base::MakeUnique<FakePhotoDevice>( |
| + std::move(photo_frame_painter), device_state.get()); |
| + |
| + return base::MakeUnique<FakeVideoCaptureDevice>( |
| + std::move(video_frame_painter), std::move(frame_delivery_strategy), |
| + std::move(photo_device), std::move(device_state)); |
| +} |
| + |
| +PacmanFramePainter::PacmanFramePainter(VideoPixelFormat pixel_format, |
| + const FakeDeviceState* fake_device_state) |
| + : pixel_format_(pixel_format), fake_device_state_(fake_device_state) {} |
| + |
| +void PacmanFramePainter::PaintFrame(base::TimeDelta elapsed_time, |
| + uint8_t* target_buffer) { |
| + DrawPacman(elapsed_time, target_buffer); |
| + DrawGradientSquares(elapsed_time, target_buffer); |
| +} |
| // Starting from top left, -45 deg gradient. Value at point (row, column) is |
| // calculated as (top_left_value + (row + column) * step) % MAX_VALUE, where |
| // step is MAX_VALUE / (width + height). MAX_VALUE is 255 (for 8 bit per |
| // component) or 65535 for Y16. |
| // This is handy for pixel tests where we use the squares to verify rendering. |
| -void DrawGradientSquares(VideoPixelFormat frame_format, |
| - uint8_t* const pixels, |
| - base::TimeDelta elapsed_time, |
| - const gfx::Size& frame_size) { |
| - const int width = frame_size.width(); |
| - const int height = frame_size.height(); |
| +void PacmanFramePainter::DrawGradientSquares(base::TimeDelta elapsed_time, |
| + uint8_t* target_buffer) { |
| + const int width = fake_device_state_->format.frame_size.width(); |
| + const int height = fake_device_state_->format.frame_size.height(); |
| + |
| const int side = width / 16; // square side length. |
| DCHECK(side); |
| const gfx::Point squares[] = {{0, 0}, |
| @@ -60,18 +312,18 @@ void DrawGradientSquares(VideoPixelFormat frame_format, |
| const unsigned int value = |
| static_cast<unsigned int>(start + (x + y) * color_step) & 0xFFFF; |
| size_t offset = (y * width) + x; |
| - switch (frame_format) { |
| + switch (pixel_format_) { |
| case PIXEL_FORMAT_Y16: |
| - pixels[offset * sizeof(uint16_t)] = value & 0xFF; |
| - pixels[offset * sizeof(uint16_t) + 1] = value >> 8; |
| + target_buffer[offset * sizeof(uint16_t)] = value & 0xFF; |
| + target_buffer[offset * sizeof(uint16_t) + 1] = value >> 8; |
| break; |
| case PIXEL_FORMAT_ARGB: |
| - pixels[offset * sizeof(uint32_t) + 1] = value >> 8; |
| - pixels[offset * sizeof(uint32_t) + 2] = value >> 8; |
| - pixels[offset * sizeof(uint32_t) + 3] = value >> 8; |
| + target_buffer[offset * sizeof(uint32_t) + 1] = value >> 8; |
| + target_buffer[offset * sizeof(uint32_t) + 2] = value >> 8; |
| + target_buffer[offset * sizeof(uint32_t) + 3] = value >> 8; |
| break; |
| default: |
| - pixels[offset] = value >> 8; |
| + target_buffer[offset] = value >> 8; |
| break; |
| } |
| } |
| @@ -79,37 +331,34 @@ void DrawGradientSquares(VideoPixelFormat frame_format, |
| } |
| } |
| -void DrawPacman(VideoPixelFormat frame_format, |
| - uint8_t* const data, |
| - base::TimeDelta elapsed_time, |
| - float frame_rate, |
| - const gfx::Size& frame_size, |
| - double zoom) { |
| +void PacmanFramePainter::DrawPacman(base::TimeDelta elapsed_time, |
| + uint8_t* target_buffer) { |
| + const int width = fake_device_state_->format.frame_size.width(); |
| + const int height = fake_device_state_->format.frame_size.height(); |
| + |
| // |kN32_SkColorType| stands for the appropriate RGBA/BGRA format. |
| - const SkColorType colorspace = (frame_format == PIXEL_FORMAT_ARGB) |
| + const SkColorType colorspace = (pixel_format_ == PIXEL_FORMAT_ARGB) |
| ? kN32_SkColorType |
| : kAlpha_8_SkColorType; |
| // Skia doesn't support 16 bit alpha rendering, so we 8 bit alpha and then use |
| // this as high byte values in 16 bit pixels. |
| - const SkImageInfo info = SkImageInfo::Make( |
| - frame_size.width(), frame_size.height(), colorspace, kOpaque_SkAlphaType); |
| + const SkImageInfo info = |
| + SkImageInfo::Make(width, height, colorspace, kOpaque_SkAlphaType); |
| SkBitmap bitmap; |
| bitmap.setInfo(info); |
| - bitmap.setPixels(data); |
| + bitmap.setPixels(target_buffer); |
| SkPaint paint; |
| paint.setStyle(SkPaint::kFill_Style); |
| SkCanvas canvas(bitmap); |
| - const SkScalar unscaled_zoom = zoom / 100.f; |
| + const SkScalar unscaled_zoom = fake_device_state_->zoom / 100.f; |
| SkMatrix matrix; |
| - matrix.setScale(unscaled_zoom, unscaled_zoom, frame_size.width() / 2, |
| - frame_size.height() / 2); |
| + matrix.setScale(unscaled_zoom, unscaled_zoom, width / 2, height / 2); |
| canvas.setMatrix(matrix); |
| // Equalize Alpha_8 that has light green background while RGBA has white. |
| - if (frame_format == PIXEL_FORMAT_ARGB) { |
| - const SkRect full_frame = |
| - SkRect::MakeWH(frame_size.width(), frame_size.height()); |
| + if (pixel_format_ == PIXEL_FORMAT_ARGB) { |
| + const SkRect full_frame = SkRect::MakeWH(width, height); |
| paint.setARGB(255, 0, 127, 0); |
| canvas.drawRect(full_frame, paint); |
| } |
| @@ -118,9 +367,8 @@ void DrawPacman(VideoPixelFormat frame_format, |
| // Draw a sweeping circle to show an animation. |
| const float end_angle = |
| fmod(kPacmanAngularVelocity * elapsed_time.InSecondsF(), 361); |
| - const int radius = std::min(frame_size.width(), frame_size.height()) / 4; |
| - const SkRect rect = SkRect::MakeXYWH(frame_size.width() / 2 - radius, |
| - frame_size.height() / 2 - radius, |
| + const int radius = std::min(width, height) / 4; |
| + const SkRect rect = SkRect::MakeXYWH(width / 2 - radius, height / 2 - radius, |
| 2 * radius, 2 * radius); |
| canvas.drawArc(rect, 0, end_angle, true, paint); |
| @@ -129,7 +377,8 @@ void DrawPacman(VideoPixelFormat frame_format, |
| const int seconds = elapsed_time.InSeconds() % 60; |
| const int minutes = elapsed_time.InMinutes() % 60; |
| const int hours = elapsed_time.InHours(); |
| - const int frame_count = elapsed_time.InMilliseconds() * frame_rate / 1000; |
| + const int frame_count = elapsed_time.InMilliseconds() * |
| + fake_device_state_->format.frame_rate / 1000; |
| const std::string time_string = |
| base::StringPrintf("%d:%02d:%02d:%03d %d", hours, minutes, seconds, |
| @@ -137,46 +386,49 @@ void DrawPacman(VideoPixelFormat frame_format, |
| canvas.scale(3, 3); |
| canvas.drawText(time_string.data(), time_string.length(), 30, 20, paint); |
| - if (frame_format == PIXEL_FORMAT_Y16) { |
| + if (pixel_format_ == PIXEL_FORMAT_Y16) { |
| // Use 8 bit bitmap rendered to first half of the buffer as high byte values |
| // for the whole buffer. Low byte values are not important. |
| - for (int i = frame_size.GetArea() - 1; i >= 0; --i) |
| - data[i * 2 + 1] = data[i]; |
| + for (int i = (width * height) - 1; i >= 0; --i) |
| + target_buffer[i * 2 + 1] = target_buffer[i]; |
| } |
| - DrawGradientSquares(frame_format, data, elapsed_time, frame_size); |
| } |
| -// Creates a PNG-encoded frame and sends it back to |callback|. The other |
| -// parameters are used to replicate the PacMan rendering. |
| -void DoTakeFakePhoto(VideoCaptureDevice::TakePhotoCallback callback, |
| - const VideoCaptureFormat& capture_format, |
| - base::TimeDelta elapsed_time, |
| - float fake_capture_rate, |
| - uint32_t zoom) { |
| - std::unique_ptr<uint8_t[]> buffer(new uint8_t[VideoFrame::AllocationSize( |
| - PIXEL_FORMAT_ARGB, capture_format.frame_size)]); |
| +FakePhotoDevice::FakePhotoDevice(std::unique_ptr<FramePainter> argb_painter, |
| + const FakeDeviceState* fake_device_state) |
| + : argb_painter_(std::move(argb_painter)), |
| + fake_device_state_(fake_device_state) {} |
| - DrawPacman(PIXEL_FORMAT_ARGB, buffer.get(), elapsed_time, fake_capture_rate, |
| - capture_format.frame_size, zoom); |
| +FakePhotoDevice::~FakePhotoDevice() = default; |
| +void FakePhotoDevice::TakePhoto(VideoCaptureDevice::TakePhotoCallback callback, |
| + base::TimeDelta elapsed_time) { |
| + // Create a PNG-encoded frame and send it back to |callback|. |
| + std::unique_ptr<uint8_t[]> buffer(new uint8_t[VideoFrame::AllocationSize( |
| + PIXEL_FORMAT_ARGB, fake_device_state_->format.frame_size)]); |
| + argb_painter_->PaintFrame(elapsed_time, buffer.get()); |
| mojom::BlobPtr blob = mojom::Blob::New(); |
| - const bool result = gfx::PNGCodec::Encode( |
| - buffer.get(), gfx::PNGCodec::FORMAT_RGBA, capture_format.frame_size, |
| - capture_format.frame_size.width() * 4, true /* discard_transparency */, |
| - std::vector<gfx::PNGCodec::Comment>(), &blob->data); |
| + const bool result = |
| + gfx::PNGCodec::Encode(buffer.get(), gfx::PNGCodec::FORMAT_RGBA, |
| + fake_device_state_->format.frame_size, |
| + fake_device_state_->format.frame_size.width() * 4, |
| + true /* discard_transparency */, |
| + std::vector<gfx::PNGCodec::Comment>(), &blob->data); |
| DCHECK(result); |
| blob->mime_type = "image/png"; |
| callback.Run(std::move(blob)); |
| } |
| -FakeVideoCaptureDevice::FakeVideoCaptureDevice(BufferOwnership buffer_ownership, |
| - float fake_capture_rate, |
| - VideoPixelFormat pixel_format) |
| - : buffer_ownership_(buffer_ownership), |
| - fake_capture_rate_(fake_capture_rate), |
| - pixel_format_(pixel_format), |
| - current_zoom_(kMinZoom), |
| +FakeVideoCaptureDevice::FakeVideoCaptureDevice( |
| + std::unique_ptr<FramePainter> frame_painter, |
| + std::unique_ptr<FrameDeliveryStrategy> frame_delivery_strategy, |
| + std::unique_ptr<FakePhotoDevice> photo_device, |
| + std::unique_ptr<FakeDeviceState> device_state) |
| + : frame_painter_(std::move(frame_painter)), |
| + frame_delivery_strategy_(std::move(frame_delivery_strategy)), |
| + photo_device_(std::move(photo_device)), |
| + device_state_(std::move(device_state)), |
| weak_factory_(this) {} |
| FakeVideoCaptureDevice::~FakeVideoCaptureDevice() { |
| @@ -188,61 +440,32 @@ void FakeVideoCaptureDevice::AllocateAndStart( |
| std::unique_ptr<VideoCaptureDevice::Client> client) { |
| DCHECK(thread_checker_.CalledOnValidThread()); |
| - client_ = std::move(client); |
| - |
| - // Incoming |params| can be none of the supported formats, so we get the |
| - // closest thing rounded up. TODO(mcasas): Use the |params|, if they belong to |
| - // the supported ones, when http://crbug.com/309554 is verified. |
| - capture_format_.frame_rate = fake_capture_rate_; |
| - if (params.requested_format.frame_size.width() > 1280) |
| - capture_format_.frame_size.SetSize(1920, 1080); |
| - else if (params.requested_format.frame_size.width() > 640) |
| - capture_format_.frame_size.SetSize(1280, 720); |
| - else if (params.requested_format.frame_size.width() > 320) |
| - capture_format_.frame_size.SetSize(640, 480); |
| - else if (params.requested_format.frame_size.width() > 96) |
| - capture_format_.frame_size.SetSize(320, 240); |
| - else |
| - capture_format_.frame_size.SetSize(96, 96); |
| - |
| - capture_format_.pixel_format = pixel_format_; |
| - if (buffer_ownership_ == BufferOwnership::CLIENT_BUFFERS) { |
| - capture_format_.pixel_storage = PIXEL_STORAGE_CPU; |
| - capture_format_.pixel_format = PIXEL_FORMAT_ARGB; |
| - DVLOG(1) << "starting with client argb buffers"; |
| - } else if (buffer_ownership_ == BufferOwnership::OWN_BUFFERS) { |
| - capture_format_.pixel_storage = PIXEL_STORAGE_CPU; |
| - DVLOG(1) << "starting with own " << VideoPixelFormatToString(pixel_format_) |
| - << " buffers"; |
| - } |
| - |
| - if (buffer_ownership_ == BufferOwnership::OWN_BUFFERS) { |
| - fake_frame_.reset(new uint8_t[VideoFrame::AllocationSize( |
| - pixel_format_, capture_format_.frame_size)]); |
| - } |
| - |
| beep_time_ = base::TimeDelta(); |
| elapsed_time_ = base::TimeDelta(); |
| - |
| - if (buffer_ownership_ == BufferOwnership::CLIENT_BUFFERS) |
| - BeepAndScheduleNextCapture( |
| - base::TimeTicks::Now(), |
| - base::Bind(&FakeVideoCaptureDevice::CaptureUsingClientBuffers, |
| - weak_factory_.GetWeakPtr())); |
| - else if (buffer_ownership_ == BufferOwnership::OWN_BUFFERS) |
| - BeepAndScheduleNextCapture( |
| - base::TimeTicks::Now(), |
| - base::Bind(&FakeVideoCaptureDevice::CaptureUsingOwnBuffers, |
| - weak_factory_.GetWeakPtr())); |
| + device_state_->format.frame_size = |
| + SnapToSupportedSize(params.requested_format.frame_size); |
| + frame_delivery_strategy_->Initialize(device_state_->format.pixel_format, |
| + std::move(client), device_state_.get()); |
| + current_session_id_++; |
| + BeepAndScheduleNextCapture(base::TimeTicks::Now()); |
| } |
| void FakeVideoCaptureDevice::StopAndDeAllocate() { |
| DCHECK(thread_checker_.CalledOnValidThread()); |
| - client_.reset(); |
| + |
| + // Invalidate WeakPtr to stop the perpetual scheduling of tasks. |
| + weak_factory_.InvalidateWeakPtrs(); |
| + frame_delivery_strategy_->Uninitialize(); |
| } |
| void FakeVideoCaptureDevice::GetPhotoCapabilities( |
| GetPhotoCapabilitiesCallback callback) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + photo_device_->GetPhotoCapabilities(std::move(callback)); |
| +} |
| + |
| +void FakePhotoDevice::GetPhotoCapabilities( |
| + VideoCaptureDevice::GetPhotoCapabilitiesCallback callback) { |
| mojom::PhotoCapabilitiesPtr photo_capabilities = |
| mojom::PhotoCapabilities::New(); |
| photo_capabilities->iso = mojom::Range::New(); |
| @@ -251,17 +474,19 @@ void FakeVideoCaptureDevice::GetPhotoCapabilities( |
| photo_capabilities->iso->min = 100.0; |
| photo_capabilities->iso->step = 0.0; |
| photo_capabilities->height = mojom::Range::New(); |
| - photo_capabilities->height->current = capture_format_.frame_size.height(); |
| + photo_capabilities->height->current = |
| + fake_device_state_->format.frame_size.height(); |
| photo_capabilities->height->max = 1080.0; |
| photo_capabilities->height->min = 96.0; |
| photo_capabilities->height->step = 1.0; |
| photo_capabilities->width = mojom::Range::New(); |
| - photo_capabilities->width->current = capture_format_.frame_size.width(); |
| + photo_capabilities->width->current = |
| + fake_device_state_->format.frame_size.width(); |
| photo_capabilities->width->max = 1920.0; |
| photo_capabilities->width->min = 96.0; |
| - photo_capabilities->width->step = 1; |
| + photo_capabilities->width->step = 1.0; |
| photo_capabilities->zoom = mojom::Range::New(); |
| - photo_capabilities->zoom->current = current_zoom_; |
| + photo_capabilities->zoom->current = fake_device_state_->zoom; |
| photo_capabilities->zoom->max = kMaxZoom; |
| photo_capabilities->zoom->min = kMinZoom; |
| photo_capabilities->zoom->step = kZoomStep; |
| @@ -281,80 +506,123 @@ void FakeVideoCaptureDevice::GetPhotoCapabilities( |
| void FakeVideoCaptureDevice::SetPhotoOptions(mojom::PhotoSettingsPtr settings, |
| SetPhotoOptionsCallback callback) { |
| - if (settings->has_zoom) |
| - current_zoom_ = std::max(kMinZoom, std::min(settings->zoom, kMaxZoom)); |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + if (settings->has_zoom) { |
| + device_state_->zoom = |
| + std::max(kMinZoom, std::min(settings->zoom, kMaxZoom)); |
| + } |
| + |
| callback.Run(true); |
| } |
| void FakeVideoCaptureDevice::TakePhoto(TakePhotoCallback callback) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| base::ThreadTaskRunnerHandle::Get()->PostTask( |
| - FROM_HERE, |
| - base::Bind(&DoTakeFakePhoto, base::Passed(&callback), capture_format_, |
| - elapsed_time_, fake_capture_rate_, current_zoom_)); |
| + FROM_HERE, base::Bind(&FakePhotoDevice::TakePhoto, |
| + base::Unretained(photo_device_.get()), |
| + base::Passed(&callback), elapsed_time_)); |
| } |
| -void FakeVideoCaptureDevice::CaptureUsingOwnBuffers( |
| - base::TimeTicks expected_execution_time) { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| - const size_t frame_size = capture_format_.ImageAllocationSize(); |
| +OwnBufferFrameDeliveryStrategy::OwnBufferFrameDeliveryStrategy() = default; |
| + |
| +OwnBufferFrameDeliveryStrategy::~OwnBufferFrameDeliveryStrategy() = default; |
| + |
| +void OwnBufferFrameDeliveryStrategy::Initialize( |
| + VideoPixelFormat pixel_format, |
| + std::unique_ptr<VideoCaptureDevice::Client> client, |
| + const FakeDeviceState* device_state) { |
| + client_ = std::move(client); |
| + device_state_ = device_state; |
| + buffer_.reset(new uint8_t[VideoFrame::AllocationSize( |
| + pixel_format, device_state_->format.frame_size)]); |
| +} |
| + |
| +void OwnBufferFrameDeliveryStrategy::Uninitialize() { |
| + client_.reset(); |
| + device_state_ = nullptr; |
| + buffer_.reset(); |
| +} |
| + |
| +uint8_t* OwnBufferFrameDeliveryStrategy::PrepareBufferForNextFrame() { |
| + if (client_ == nullptr) |
|
mcasas
2017/02/15 00:44:19
if (!client_)
here and in l.556
chfremer
2017/02/15 18:11:29
Done.
|
| + return nullptr; |
| - memset(fake_frame_.get(), 0, frame_size); |
| - DrawPacman(capture_format_.pixel_format, fake_frame_.get(), elapsed_time_, |
| - fake_capture_rate_, capture_format_.frame_size, current_zoom_); |
| - // Give the captured frame to the client. |
| + const size_t frame_size = device_state_->format.ImageAllocationSize(); |
| + memset(buffer_.get(), 0, frame_size); |
| + return buffer_.get(); |
| +} |
| + |
| +void OwnBufferFrameDeliveryStrategy::DeliverFrame() { |
| + if (client_ == nullptr) |
| + return; |
| + const size_t frame_size = device_state_->format.ImageAllocationSize(); |
| base::TimeTicks now = base::TimeTicks::Now(); |
| if (first_ref_time_.is_null()) |
| first_ref_time_ = now; |
| - client_->OnIncomingCapturedData(fake_frame_.get(), frame_size, |
| - capture_format_, 0 /* rotation */, now, |
| + client_->OnIncomingCapturedData(buffer_.get(), frame_size, |
| + device_state_->format, 0 /* rotation */, now, |
| now - first_ref_time_); |
| - BeepAndScheduleNextCapture( |
| - expected_execution_time, |
| - base::Bind(&FakeVideoCaptureDevice::CaptureUsingOwnBuffers, |
| - weak_factory_.GetWeakPtr())); |
| } |
| -void FakeVideoCaptureDevice::CaptureUsingClientBuffers( |
| - base::TimeTicks expected_execution_time) { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| +ClientBufferFrameDeliveryStrategy::ClientBufferFrameDeliveryStrategy() = |
| + default; |
| + |
| +ClientBufferFrameDeliveryStrategy::~ClientBufferFrameDeliveryStrategy() = |
| + default; |
| + |
| +void ClientBufferFrameDeliveryStrategy::Initialize( |
| + VideoPixelFormat, |
| + std::unique_ptr<VideoCaptureDevice::Client> client, |
| + const FakeDeviceState* device_state) { |
| + client_ = std::move(client); |
| + device_state_ = device_state; |
| +} |
| + |
| +void ClientBufferFrameDeliveryStrategy::Uninitialize() { |
| + client_.reset(); |
| + device_state_ = nullptr; |
| +} |
| + |
| +uint8_t* ClientBufferFrameDeliveryStrategy::PrepareBufferForNextFrame() { |
| + if (client_ == nullptr) |
| + return nullptr; |
| const int arbitrary_frame_feedback_id = 0; |
| - VideoCaptureDevice::Client::Buffer capture_buffer = |
| - client_->ReserveOutputBuffer( |
| - capture_format_.frame_size, capture_format_.pixel_format, |
| - capture_format_.pixel_storage, arbitrary_frame_feedback_id); |
| - DLOG_IF(ERROR, !capture_buffer.is_valid()) |
| + capture_buffer_ = client_->ReserveOutputBuffer( |
| + device_state_->format.frame_size, device_state_->format.pixel_format, |
| + device_state_->format.pixel_storage, arbitrary_frame_feedback_id); |
| + DLOG_IF(ERROR, !capture_buffer_.is_valid()) |
| << "Couldn't allocate Capture Buffer"; |
| auto buffer_access = |
| - capture_buffer.handle_provider()->GetHandleForInProcessAccess(); |
| + capture_buffer_.handle_provider()->GetHandleForInProcessAccess(); |
| DCHECK(buffer_access->data()) << "Buffer has NO backing memory"; |
| - DCHECK_EQ(PIXEL_STORAGE_CPU, capture_format_.pixel_storage); |
| + DCHECK_EQ(device_state_->format.pixel_storage, PIXEL_STORAGE_CPU); |
| + |
| uint8_t* data_ptr = buffer_access->data(); |
| memset(data_ptr, 0, buffer_access->mapped_size()); |
| - DrawPacman(capture_format_.pixel_format, data_ptr, elapsed_time_, |
| - fake_capture_rate_, capture_format_.frame_size, current_zoom_); |
| + return data_ptr; |
| +} |
| + |
| +void ClientBufferFrameDeliveryStrategy::DeliverFrame() { |
| + if (client_ == nullptr) |
| + return; |
| - // Give the captured frame to the client. |
| base::TimeTicks now = base::TimeTicks::Now(); |
| if (first_ref_time_.is_null()) |
| first_ref_time_ = now; |
| - client_->OnIncomingCapturedBuffer(std::move(capture_buffer), capture_format_, |
| - now, now - first_ref_time_); |
| - |
| - BeepAndScheduleNextCapture( |
| - expected_execution_time, |
| - base::Bind(&FakeVideoCaptureDevice::CaptureUsingClientBuffers, |
| - weak_factory_.GetWeakPtr())); |
| + client_->OnIncomingCapturedBuffer(std::move(capture_buffer_), |
| + device_state_->format, now, |
| + now - first_ref_time_); |
| } |
| void FakeVideoCaptureDevice::BeepAndScheduleNextCapture( |
| - base::TimeTicks expected_execution_time, |
| - const base::Callback<void(base::TimeTicks)>& next_capture) { |
| + base::TimeTicks expected_execution_time) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| const base::TimeDelta beep_interval = |
| base::TimeDelta::FromMilliseconds(kBeepInterval); |
| const base::TimeDelta frame_interval = |
| - base::TimeDelta::FromMicroseconds(1e6 / fake_capture_rate_); |
| + base::TimeDelta::FromMicroseconds(1e6 / device_state_->format.frame_rate); |
| beep_time_ += frame_interval; |
| elapsed_time_ += frame_interval; |
| @@ -372,7 +640,24 @@ void FakeVideoCaptureDevice::BeepAndScheduleNextCapture( |
| std::max(current_time, expected_execution_time + frame_interval); |
| const base::TimeDelta delay = next_execution_time - current_time; |
| base::ThreadTaskRunnerHandle::Get()->PostDelayedTask( |
| - FROM_HERE, base::Bind(next_capture, next_execution_time), delay); |
| + FROM_HERE, base::Bind(&FakeVideoCaptureDevice::OnNextFrameDue, |
| + weak_factory_.GetWeakPtr(), next_execution_time, |
| + current_session_id_), |
| + delay); |
| +} |
| + |
| +void FakeVideoCaptureDevice::OnNextFrameDue( |
| + base::TimeTicks expected_execution_time, |
| + int session_id) { |
| + DCHECK(thread_checker_.CalledOnValidThread()); |
| + if (session_id != current_session_id_) |
| + return; |
| + |
| + uint8_t* const buffer = frame_delivery_strategy_->PrepareBufferForNextFrame(); |
| + frame_painter_->PaintFrame(elapsed_time_, buffer); |
| + frame_delivery_strategy_->DeliverFrame(); |
| + |
| + BeepAndScheduleNextCapture(expected_execution_time); |
| } |
| } // namespace media |