Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h" | 5 #include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/memory/aligned_memory.h" | 8 #include "base/memory/aligned_memory.h" |
| 9 #include "base/memory/ref_counted.h" | |
| 10 #include "base/synchronization/waitable_event.h" | |
| 9 #include "base/trace_event/trace_event.h" | 11 #include "base/trace_event/trace_event.h" |
| 12 #include "content/common/gpu/client/context_provider_command_buffer.h" | |
| 10 #include "content/renderer/media/webrtc/webrtc_video_frame_adapter.h" | 13 #include "content/renderer/media/webrtc/webrtc_video_frame_adapter.h" |
| 14 #include "content/renderer/render_thread_impl.h" | |
| 11 #include "media/base/timestamp_constants.h" | 15 #include "media/base/timestamp_constants.h" |
| 12 #include "media/base/video_util.h" | 16 #include "media/base/video_util.h" |
| 17 #include "media/renderers/skcanvas_video_renderer.h" | |
| 18 #include "skia/ext/platform_canvas.h" | |
| 19 #include "third_party/libyuv/include/libyuv/convert.h" | |
| 13 #include "third_party/libyuv/include/libyuv/convert_from.h" | 20 #include "third_party/libyuv/include/libyuv/convert_from.h" |
| 14 #include "third_party/libyuv/include/libyuv/scale.h" | 21 #include "third_party/libyuv/include/libyuv/scale.h" |
| 22 #include "third_party/skia/include/core/SkSurface.h" | |
| 15 #include "third_party/webrtc/common_video/include/video_frame_buffer.h" | 23 #include "third_party/webrtc/common_video/include/video_frame_buffer.h" |
| 16 #include "third_party/webrtc/common_video/rotation.h" | 24 #include "third_party/webrtc/common_video/rotation.h" |
| 17 #include "third_party/webrtc/media/engine/webrtcvideoframe.h" | 25 #include "third_party/webrtc/media/engine/webrtcvideoframe.h" |
| 18 | 26 |
| 19 namespace content { | 27 namespace content { |
| 28 | |
| 20 namespace { | 29 namespace { |
| 21 | 30 |
| 22 // Empty method used for keeping a reference to the original media::VideoFrame. | 31 // Empty method used for keeping a reference to the original media::VideoFrame. |
| 23 // The reference to |frame| is kept in the closure that calls this method. | 32 // The reference to |frame| is kept in the closure that calls this method. |
| 24 void ReleaseOriginalFrame(const scoped_refptr<media::VideoFrame>& frame) { | 33 void ReleaseOriginalFrame(const scoped_refptr<media::VideoFrame>& frame) { |
| 25 } | 34 } |
| 26 | 35 |
| 36 // Helper class that signals a WaitableEvent when it goes out of scope. | |
| 37 class ScopedWaitableEvent { | |
| 38 public: | |
| 39 explicit ScopedWaitableEvent(base::WaitableEvent* event) : event_(event) {} | |
| 40 ~ScopedWaitableEvent() { | |
| 41 if (event_) | |
| 42 event_->Signal(); | |
| 43 } | |
| 44 | |
| 45 private: | |
| 46 base::WaitableEvent* const event_; | |
| 47 }; | |
| 48 | |
| 27 } // anonymous namespace | 49 } // anonymous namespace |
| 28 | 50 |
| 51 // Initializes the GL context environment and provides a method for copying | |
| 52 // texture backed frames into CPU mappable memory. | |
| 53 // The class is created and destroyed on the main render thread. | |
| 54 class WebRtcVideoCapturerAdapter::TextureFrameCopier | |
| 55 : public base::RefCounted<WebRtcVideoCapturerAdapter::TextureFrameCopier> { | |
| 56 public: | |
| 57 TextureFrameCopier() | |
| 58 : main_thread_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
| 59 canvas_video_renderer_(new media::SkCanvasVideoRenderer) { | |
| 60 RenderThreadImpl* const main_thread = RenderThreadImpl::current(); | |
| 61 if (main_thread) | |
| 62 provider_ = main_thread->SharedMainThreadContextProvider(); | |
| 63 } | |
| 64 | |
| 65 // Synchronous call to copy a texture backed |frame| into a CPU mappable | |
| 66 // |new_frame|. If it is not called on the main render thread, this call posts | |
| 67 // a task on main thread by calling CopyTextureFrameOnMainThread() and blocks | |
| 68 // until it is completed. | |
| 69 void CopyTextureFrame(const scoped_refptr<media::VideoFrame>& frame, | |
| 70 scoped_refptr<media::VideoFrame>* new_frame) { | |
| 71 if (main_thread_task_runner_->BelongsToCurrentThread()) { | |
| 72 CopyTextureFrameOnMainThread(frame, new_frame, nullptr); | |
| 73 return; | |
| 74 } | |
| 75 | |
| 76 base::WaitableEvent waiter(base::WaitableEvent::ResetPolicy::MANUAL, | |
| 77 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
| 78 main_thread_task_runner_->PostTask( | |
| 79 FROM_HERE, base::Bind(&TextureFrameCopier::CopyTextureFrameOnMainThread, | |
| 80 this, frame, new_frame, &waiter)); | |
| 81 waiter.Wait(); | |
| 82 } | |
| 83 | |
| 84 private: | |
| 85 friend class base::RefCounted<TextureFrameCopier>; | |
| 86 ~TextureFrameCopier() { | |
| 87 // |canvas_video_renderer_| should be deleted on the thread it was created. | |
| 88 if (!main_thread_task_runner_->BelongsToCurrentThread()) { | |
| 89 main_thread_task_runner_->DeleteSoon(FROM_HERE, | |
| 90 canvas_video_renderer_.release()); | |
| 91 } | |
| 92 } | |
| 93 | |
| 94 void CopyTextureFrameOnMainThread( | |
| 95 const scoped_refptr<media::VideoFrame>& frame, | |
| 96 scoped_refptr<media::VideoFrame>* new_frame, | |
| 97 base::WaitableEvent* waiter) { | |
| 98 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 99 DCHECK(frame->format() == media::PIXEL_FORMAT_ARGB || | |
| 100 frame->format() == media::PIXEL_FORMAT_XRGB || | |
| 101 frame->format() == media::PIXEL_FORMAT_I420 || | |
| 102 frame->format() == media::PIXEL_FORMAT_UYVY || | |
| 103 frame->format() == media::PIXEL_FORMAT_NV12); | |
| 104 ScopedWaitableEvent event(waiter); | |
| 105 sk_sp<SkSurface> surface = SkSurface::MakeRasterN32Premul( | |
| 106 frame->visible_rect().width(), frame->visible_rect().height()); | |
| 107 | |
| 108 if (!surface || !provider_) { | |
| 109 // Return a black frame (yuv = {0, 0x80, 0x80}). | |
| 110 *new_frame = media::VideoFrame::CreateColorFrame( | |
| 111 frame->visible_rect().size(), 0u, 0x80, 0x80, frame->timestamp()); | |
| 112 return; | |
| 113 } | |
| 114 | |
| 115 *new_frame = media::VideoFrame::CreateFrame( | |
| 116 media::PIXEL_FORMAT_I420, frame->coded_size(), frame->visible_rect(), | |
| 117 frame->natural_size(), frame->timestamp()); | |
| 118 DCHECK(provider_->ContextGL()); | |
| 119 canvas_video_renderer_->Copy( | |
| 120 frame.get(), surface->getCanvas(), | |
| 121 media::Context3D(provider_->ContextGL(), provider_->GrContext())); | |
| 122 | |
| 123 SkPixmap pixmap; | |
| 124 const bool result = surface->getCanvas()->peekPixels(&pixmap); | |
| 125 DCHECK(result) << "Error trying to access SkSurface's pixels"; | |
| 126 const uint32 source_pixel_format = | |
| 127 (kN32_SkColorType == kRGBA_8888_SkColorType) ? cricket::FOURCC_ABGR | |
| 128 : cricket::FOURCC_ARGB; | |
| 129 libyuv::ConvertToI420( | |
| 130 static_cast<const uint8*>(pixmap.addr(0, 0)), pixmap.getSafeSize64(), | |
| 131 (*new_frame)->visible_data(media::VideoFrame::kYPlane), | |
| 132 (*new_frame)->stride(media::VideoFrame::kYPlane), | |
| 133 (*new_frame)->visible_data(media::VideoFrame::kUPlane), | |
| 134 (*new_frame)->stride(media::VideoFrame::kUPlane), | |
| 135 (*new_frame)->visible_data(media::VideoFrame::kVPlane), | |
| 136 (*new_frame)->stride(media::VideoFrame::kVPlane), 0 /* crop_x */, | |
| 137 0 /* crop_y */, pixmap.width(), pixmap.height(), | |
| 138 (*new_frame)->visible_rect().width(), | |
| 139 (*new_frame)->visible_rect().height(), libyuv::kRotate0, | |
| 140 source_pixel_format); | |
| 141 } | |
| 142 | |
| 143 const scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner_; | |
| 144 scoped_refptr<ContextProviderCommandBuffer> provider_; | |
| 145 std::unique_ptr<media::SkCanvasVideoRenderer> canvas_video_renderer_; | |
| 146 }; | |
| 147 | |
| 29 WebRtcVideoCapturerAdapter::WebRtcVideoCapturerAdapter(bool is_screencast) | 148 WebRtcVideoCapturerAdapter::WebRtcVideoCapturerAdapter(bool is_screencast) |
| 30 : is_screencast_(is_screencast), | 149 : texture_copier_(new WebRtcVideoCapturerAdapter::TextureFrameCopier()), |
| 150 is_screencast_(is_screencast), | |
| 31 running_(false) { | 151 running_(false) { |
| 32 thread_checker_.DetachFromThread(); | 152 thread_checker_.DetachFromThread(); |
| 33 } | 153 } |
| 34 | 154 |
| 35 WebRtcVideoCapturerAdapter::~WebRtcVideoCapturerAdapter() { | 155 WebRtcVideoCapturerAdapter::~WebRtcVideoCapturerAdapter() { |
| 36 DVLOG(3) << " WebRtcVideoCapturerAdapter::dtor"; | 156 DVLOG(3) << __func__; |
| 37 } | |
| 38 | |
| 39 cricket::CaptureState WebRtcVideoCapturerAdapter::Start( | |
| 40 const cricket::VideoFormat& capture_format) { | |
| 41 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 42 DCHECK(!running_); | |
| 43 DVLOG(3) << " WebRtcVideoCapturerAdapter::Start w = " << capture_format.width | |
| 44 << " h = " << capture_format.height; | |
| 45 | |
| 46 running_ = true; | |
| 47 return cricket::CS_RUNNING; | |
| 48 } | |
| 49 | |
| 50 void WebRtcVideoCapturerAdapter::Stop() { | |
| 51 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 52 DVLOG(3) << " WebRtcVideoCapturerAdapter::Stop "; | |
| 53 DCHECK(running_); | |
| 54 running_ = false; | |
| 55 SetCaptureFormat(NULL); | |
| 56 SignalStateChange(this, cricket::CS_STOPPED); | |
| 57 } | |
| 58 | |
| 59 bool WebRtcVideoCapturerAdapter::IsRunning() { | |
| 60 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 61 return running_; | |
| 62 } | |
| 63 | |
| 64 bool WebRtcVideoCapturerAdapter::GetPreferredFourccs( | |
| 65 std::vector<uint32_t>* fourccs) { | |
| 66 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 67 DCHECK(!fourccs || fourccs->empty()); | |
| 68 if (fourccs) | |
| 69 fourccs->push_back(cricket::FOURCC_I420); | |
| 70 return fourccs != NULL; | |
| 71 } | |
| 72 | |
| 73 bool WebRtcVideoCapturerAdapter::IsScreencast() const { | |
| 74 return is_screencast_; | |
| 75 } | |
| 76 | |
| 77 bool WebRtcVideoCapturerAdapter::GetBestCaptureFormat( | |
| 78 const cricket::VideoFormat& desired, | |
| 79 cricket::VideoFormat* best_format) { | |
| 80 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 81 DVLOG(3) << " GetBestCaptureFormat:: " | |
| 82 << " w = " << desired.width | |
| 83 << " h = " << desired.height; | |
| 84 | |
| 85 // Capability enumeration is done in MediaStreamVideoSource. The adapter can | |
| 86 // just use what is provided. | |
| 87 // Use the desired format as the best format. | |
| 88 best_format->width = desired.width; | |
| 89 best_format->height = desired.height; | |
| 90 best_format->fourcc = cricket::FOURCC_I420; | |
| 91 best_format->interval = desired.interval; | |
| 92 return true; | |
| 93 } | 157 } |
| 94 | 158 |
| 95 void WebRtcVideoCapturerAdapter::OnFrameCaptured( | 159 void WebRtcVideoCapturerAdapter::OnFrameCaptured( |
| 96 const scoped_refptr<media::VideoFrame>& input_frame) { | 160 const scoped_refptr<media::VideoFrame>& input_frame) { |
| 97 DCHECK(thread_checker_.CalledOnValidThread()); | 161 DCHECK(thread_checker_.CalledOnValidThread()); |
| 98 TRACE_EVENT0("video", "WebRtcVideoCapturerAdapter::OnFrameCaptured"); | 162 TRACE_EVENT0("video", "WebRtcVideoCapturerAdapter::OnFrameCaptured"); |
| 99 if (!(input_frame->IsMappable() && | 163 if (!(input_frame->IsMappable() && |
| 100 (input_frame->format() == media::PIXEL_FORMAT_I420 || | 164 (input_frame->format() == media::PIXEL_FORMAT_I420 || |
| 101 input_frame->format() == media::PIXEL_FORMAT_YV12 || | 165 input_frame->format() == media::PIXEL_FORMAT_YV12 || |
| 102 input_frame->format() == media::PIXEL_FORMAT_YV12A))) { | 166 input_frame->format() == media::PIXEL_FORMAT_YV12A)) && |
| 167 !input_frame->HasTextures()) { | |
| 103 // Since connecting sources and sinks do not check the format, we need to | 168 // Since connecting sources and sinks do not check the format, we need to |
| 104 // just ignore formats that we can not handle. | 169 // just ignore formats that we can not handle. |
| 170 LOG(ERROR) << "We cannot send frame with storage type: " | |
| 171 << input_frame->storage_type() << " format: " | |
| 172 << media::VideoPixelFormatToString(input_frame->format()); | |
|
mcasas
2016/10/28 19:49:35
nit: my suggestion was ToString() the whole thing,
emircan
2016/10/28 22:08:17
AsHumanReadableString() is awesome. Thanks.
| |
| 105 NOTREACHED(); | 173 NOTREACHED(); |
| 106 return; | 174 return; |
| 107 } | 175 } |
| 108 scoped_refptr<media::VideoFrame> frame = input_frame; | 176 scoped_refptr<media::VideoFrame> frame = input_frame; |
| 109 // Drop alpha channel since we do not support it yet. | 177 // Drop alpha channel since we do not support it yet. |
| 110 if (frame->format() == media::PIXEL_FORMAT_YV12A) | 178 if (frame->format() == media::PIXEL_FORMAT_YV12A) |
| 111 frame = media::WrapAsI420VideoFrame(input_frame); | 179 frame = media::WrapAsI420VideoFrame(input_frame); |
| 112 | 180 |
| 113 const int orig_width = frame->natural_size().width(); | 181 const int orig_width = frame->natural_size().width(); |
| 114 const int orig_height = frame->natural_size().height(); | 182 const int orig_height = frame->natural_size().height(); |
| 115 int adapted_width; | 183 int adapted_width; |
| 116 int adapted_height; | 184 int adapted_height; |
| 117 // The VideoAdapter is only used for cpu-adaptation downscaling, no | 185 // The VideoAdapter is only used for cpu-adaptation downscaling, no |
| 118 // aspect changes. So we ignore these crop-related outputs. | 186 // aspect changes. So we ignore these crop-related outputs. |
| 119 int crop_width; | 187 int crop_width; |
| 120 int crop_height; | 188 int crop_height; |
| 121 int crop_x; | 189 int crop_x; |
| 122 int crop_y; | 190 int crop_y; |
| 123 int64_t translated_camera_time_us; | 191 int64_t translated_camera_time_us; |
| 124 | 192 |
| 125 if (!AdaptFrame(orig_width, orig_height, | 193 if (!AdaptFrame(orig_width, orig_height, |
| 126 frame->timestamp().InMicroseconds(), | 194 frame->timestamp().InMicroseconds(), |
| 127 rtc::TimeMicros(), | 195 rtc::TimeMicros(), |
| 128 &adapted_width, &adapted_height, | 196 &adapted_width, &adapted_height, |
| 129 &crop_width, &crop_height, &crop_x, &crop_y, | 197 &crop_width, &crop_height, &crop_x, &crop_y, |
| 130 &translated_camera_time_us)) { | 198 &translated_camera_time_us)) { |
| 131 return; | 199 return; |
| 132 } | 200 } |
| 133 | 201 |
| 202 WebRtcVideoFrameAdapter::CopyTextureFrameCallback copy_texture_callback = | |
| 203 base::Bind(&TextureFrameCopier::CopyTextureFrame, texture_copier_); | |
| 134 // Return |frame| directly if it is texture backed, because there is no | 204 // Return |frame| directly if it is texture backed, because there is no |
| 135 // cropping support for texture yet. See http://crbug/503653. | 205 // cropping support for texture yet. See http://crbug/503653. |
| 136 // Return |frame| directly if it is GpuMemoryBuffer backed, as we want to | |
| 137 // keep the frame on native buffers. | |
| 138 if (frame->HasTextures()) { | 206 if (frame->HasTextures()) { |
| 139 OnFrame(cricket::WebRtcVideoFrame( | 207 OnFrame(cricket::WebRtcVideoFrame( |
| 140 new rtc::RefCountedObject<WebRtcVideoFrameAdapter>(frame), | 208 new rtc::RefCountedObject<WebRtcVideoFrameAdapter>( |
| 209 frame, copy_texture_callback), | |
| 141 webrtc::kVideoRotation_0, translated_camera_time_us), | 210 webrtc::kVideoRotation_0, translated_camera_time_us), |
| 142 orig_width, orig_height); | 211 orig_width, orig_height); |
| 143 return; | 212 return; |
| 144 } | 213 } |
| 145 | 214 |
| 146 // Translate crop rectangle from natural size to visible size. | 215 // Translate crop rectangle from natural size to visible size. |
| 147 gfx::Rect cropped_visible_rect( | 216 gfx::Rect cropped_visible_rect( |
| 148 frame->visible_rect().x() + | 217 frame->visible_rect().x() + |
| 149 crop_x * frame->visible_rect().width() / orig_width, | 218 crop_x * frame->visible_rect().width() / orig_width, |
| 150 frame->visible_rect().y() + | 219 frame->visible_rect().y() + |
| 151 crop_y * frame->visible_rect().height() / orig_height, | 220 crop_y * frame->visible_rect().height() / orig_height, |
| 152 crop_width * frame->visible_rect().width() / orig_width, | 221 crop_width * frame->visible_rect().width() / orig_width, |
| 153 crop_height * frame->visible_rect().height() / orig_height); | 222 crop_height * frame->visible_rect().height() / orig_height); |
| 154 | 223 |
| 155 const gfx::Size adapted_size(adapted_width, adapted_height); | 224 const gfx::Size adapted_size(adapted_width, adapted_height); |
| 156 scoped_refptr<media::VideoFrame> video_frame = | 225 scoped_refptr<media::VideoFrame> video_frame = |
| 157 media::VideoFrame::WrapVideoFrame(frame, frame->format(), | 226 media::VideoFrame::WrapVideoFrame(frame, frame->format(), |
| 158 cropped_visible_rect, adapted_size); | 227 cropped_visible_rect, adapted_size); |
| 159 if (!video_frame) | 228 if (!video_frame) |
| 160 return; | 229 return; |
| 161 | 230 |
| 162 video_frame->AddDestructionObserver(base::Bind(&ReleaseOriginalFrame, frame)); | 231 video_frame->AddDestructionObserver(base::Bind(&ReleaseOriginalFrame, frame)); |
| 163 | 232 |
| 164 // If no scaling is needed, return a wrapped version of |frame| directly. | 233 // If no scaling is needed, return a wrapped version of |frame| directly. |
| 165 if (video_frame->natural_size() == video_frame->visible_rect().size()) { | 234 if (video_frame->natural_size() == video_frame->visible_rect().size()) { |
| 166 OnFrame(cricket::WebRtcVideoFrame( | 235 OnFrame(cricket::WebRtcVideoFrame( |
| 167 new rtc::RefCountedObject<WebRtcVideoFrameAdapter>(video_frame), | 236 new rtc::RefCountedObject<WebRtcVideoFrameAdapter>( |
| 237 video_frame, copy_texture_callback), | |
| 168 webrtc::kVideoRotation_0, translated_camera_time_us), | 238 webrtc::kVideoRotation_0, translated_camera_time_us), |
| 169 orig_width, orig_height); | 239 orig_width, orig_height); |
| 170 return; | 240 return; |
| 171 } | 241 } |
| 172 | 242 |
| 173 // We need to scale the frame before we hand it over to webrtc. | 243 // We need to scale the frame before we hand it over to webrtc. |
| 174 scoped_refptr<media::VideoFrame> scaled_frame = | 244 scoped_refptr<media::VideoFrame> scaled_frame = |
| 175 scaled_frame_pool_.CreateFrame(media::PIXEL_FORMAT_I420, adapted_size, | 245 scaled_frame_pool_.CreateFrame(media::PIXEL_FORMAT_I420, adapted_size, |
| 176 gfx::Rect(adapted_size), adapted_size, | 246 gfx::Rect(adapted_size), adapted_size, |
| 177 frame->timestamp()); | 247 frame->timestamp()); |
| 178 libyuv::I420Scale(video_frame->visible_data(media::VideoFrame::kYPlane), | 248 libyuv::I420Scale(video_frame->visible_data(media::VideoFrame::kYPlane), |
| 179 video_frame->stride(media::VideoFrame::kYPlane), | 249 video_frame->stride(media::VideoFrame::kYPlane), |
| 180 video_frame->visible_data(media::VideoFrame::kUPlane), | 250 video_frame->visible_data(media::VideoFrame::kUPlane), |
| 181 video_frame->stride(media::VideoFrame::kUPlane), | 251 video_frame->stride(media::VideoFrame::kUPlane), |
| 182 video_frame->visible_data(media::VideoFrame::kVPlane), | 252 video_frame->visible_data(media::VideoFrame::kVPlane), |
| 183 video_frame->stride(media::VideoFrame::kVPlane), | 253 video_frame->stride(media::VideoFrame::kVPlane), |
| 184 video_frame->visible_rect().width(), | 254 video_frame->visible_rect().width(), |
| 185 video_frame->visible_rect().height(), | 255 video_frame->visible_rect().height(), |
| 186 scaled_frame->data(media::VideoFrame::kYPlane), | 256 scaled_frame->data(media::VideoFrame::kYPlane), |
| 187 scaled_frame->stride(media::VideoFrame::kYPlane), | 257 scaled_frame->stride(media::VideoFrame::kYPlane), |
| 188 scaled_frame->data(media::VideoFrame::kUPlane), | 258 scaled_frame->data(media::VideoFrame::kUPlane), |
| 189 scaled_frame->stride(media::VideoFrame::kUPlane), | 259 scaled_frame->stride(media::VideoFrame::kUPlane), |
| 190 scaled_frame->data(media::VideoFrame::kVPlane), | 260 scaled_frame->data(media::VideoFrame::kVPlane), |
| 191 scaled_frame->stride(media::VideoFrame::kVPlane), | 261 scaled_frame->stride(media::VideoFrame::kVPlane), |
| 192 adapted_width, adapted_height, libyuv::kFilterBilinear); | 262 adapted_width, adapted_height, libyuv::kFilterBilinear); |
| 193 | 263 |
| 194 OnFrame(cricket::WebRtcVideoFrame( | 264 OnFrame(cricket::WebRtcVideoFrame( |
| 195 new rtc::RefCountedObject<WebRtcVideoFrameAdapter>(scaled_frame), | 265 new rtc::RefCountedObject<WebRtcVideoFrameAdapter>( |
| 266 scaled_frame, copy_texture_callback), | |
| 196 webrtc::kVideoRotation_0, translated_camera_time_us), | 267 webrtc::kVideoRotation_0, translated_camera_time_us), |
|
mcasas
2016/10/28 19:49:35
nit: Since here and in l.237 we know that
frame->H
emircan
2016/10/28 22:08:17
Empty callback makes more sense as reference is he
| |
| 197 orig_width, orig_height); | 268 orig_width, orig_height); |
| 198 } | 269 } |
| 199 | 270 |
| 271 cricket::CaptureState WebRtcVideoCapturerAdapter::Start( | |
| 272 const cricket::VideoFormat& capture_format) { | |
| 273 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 274 DCHECK(!running_); | |
| 275 DVLOG(3) << __func__ << " capture format: " << capture_format.ToString(); | |
| 276 | |
| 277 running_ = true; | |
| 278 return cricket::CS_RUNNING; | |
| 279 } | |
| 280 | |
| 281 void WebRtcVideoCapturerAdapter::Stop() { | |
| 282 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 283 DVLOG(3) << __func__; | |
| 284 DCHECK(running_); | |
| 285 running_ = false; | |
| 286 SetCaptureFormat(NULL); | |
| 287 SignalStateChange(this, cricket::CS_STOPPED); | |
| 288 } | |
| 289 | |
| 290 bool WebRtcVideoCapturerAdapter::IsRunning() { | |
| 291 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 292 return running_; | |
| 293 } | |
| 294 | |
| 295 bool WebRtcVideoCapturerAdapter::GetPreferredFourccs( | |
| 296 std::vector<uint32_t>* fourccs) { | |
| 297 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 298 if (!fourccs) | |
| 299 return false; | |
| 300 DCHECK(fourccs->empty()); | |
| 301 fourccs->push_back(cricket::FOURCC_I420); | |
| 302 return true; | |
| 303 } | |
| 304 | |
| 305 bool WebRtcVideoCapturerAdapter::IsScreencast() const { | |
| 306 return is_screencast_; | |
| 307 } | |
| 308 | |
| 309 bool WebRtcVideoCapturerAdapter::GetBestCaptureFormat( | |
| 310 const cricket::VideoFormat& desired, | |
| 311 cricket::VideoFormat* best_format) { | |
| 312 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 313 DVLOG(3) << __func__ << " desired: " << desired.ToString(); | |
| 314 | |
| 315 // Capability enumeration is done in MediaStreamVideoSource. The adapter can | |
| 316 // just use what is provided. | |
| 317 // Use the desired format as the best format. | |
| 318 best_format->width = desired.width; | |
| 319 best_format->height = desired.height; | |
| 320 best_format->fourcc = cricket::FOURCC_I420; | |
| 321 best_format->interval = desired.interval; | |
| 322 return true; | |
| 323 } | |
| 324 | |
| 200 } // namespace content | 325 } // namespace content |
| OLD | NEW |