OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "remoting/codec/video_encoder_vpx.h" | 5 #include "remoting/codec/video_encoder_vpx.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/sys_info.h" | 9 #include "base/sys_info.h" |
10 #include "media/base/yuv_convert.h" | |
11 #include "remoting/base/util.h" | 10 #include "remoting/base/util.h" |
12 #include "remoting/proto/video.pb.h" | 11 #include "remoting/proto/video.pb.h" |
| 12 #include "third_party/libyuv/include/libyuv/convert_from_argb.h" |
13 #include "third_party/webrtc/modules/desktop_capture/desktop_frame.h" | 13 #include "third_party/webrtc/modules/desktop_capture/desktop_frame.h" |
14 #include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h" | 14 #include "third_party/webrtc/modules/desktop_capture/desktop_geometry.h" |
15 #include "third_party/webrtc/modules/desktop_capture/desktop_region.h" | 15 #include "third_party/webrtc/modules/desktop_capture/desktop_region.h" |
16 | 16 |
17 extern "C" { | 17 extern "C" { |
18 #define VPX_CODEC_DISABLE_COMPAT 1 | 18 #define VPX_CODEC_DISABLE_COMPAT 1 |
19 #include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h" | 19 #include "third_party/libvpx/source/libvpx/vpx/vpx_encoder.h" |
20 #include "third_party/libvpx/source/libvpx/vpx/vp8cx.h" | 20 #include "third_party/libvpx/source/libvpx/vpx/vp8cx.h" |
21 } | 21 } |
22 | 22 |
23 namespace remoting { | 23 namespace remoting { |
24 | 24 |
25 namespace { | 25 namespace { |
26 | 26 |
| 27 // Number of bytes in an RGBx pixel. |
| 28 const int kBytesPerRgbPixel = 4; |
| 29 |
27 // Defines the dimension of a macro block. This is used to compute the active | 30 // Defines the dimension of a macro block. This is used to compute the active |
28 // map for the encoder. | 31 // map for the encoder. |
29 const int kMacroBlockSize = 16; | 32 const int kMacroBlockSize = 16; |
30 | 33 |
| 34 // Magic encoder profile numbers for I420 and I444 input formats. |
| 35 const int kVp9I420ProfileNumber = 0; |
| 36 const int kVp9I444ProfileNumber = 1; |
| 37 |
31 void SetCommonCodecParameters(const webrtc::DesktopSize& size, | 38 void SetCommonCodecParameters(const webrtc::DesktopSize& size, |
32 vpx_codec_enc_cfg_t* config) { | 39 vpx_codec_enc_cfg_t* config) { |
33 // Use millisecond granularity time base. | 40 // Use millisecond granularity time base. |
34 config->g_timebase.num = 1; | 41 config->g_timebase.num = 1; |
35 config->g_timebase.den = 1000; | 42 config->g_timebase.den = 1000; |
36 | 43 |
37 // Adjust default target bit-rate to account for actual desktop size. | 44 // Adjust default target bit-rate to account for actual desktop size. |
38 config->rc_target_bitrate = size.width() * size.height() * | 45 config->rc_target_bitrate = size.width() * size.height() * |
39 config->rc_target_bitrate / config->g_w / config->g_h; | 46 config->rc_target_bitrate / config->g_w / config->g_h; |
40 | 47 |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
83 return ScopedVpxCodec(); | 90 return ScopedVpxCodec(); |
84 | 91 |
85 // Use the lowest level of noise sensitivity so as to spend less time | 92 // Use the lowest level of noise sensitivity so as to spend less time |
86 // on motion estimation and inter-prediction mode. | 93 // on motion estimation and inter-prediction mode. |
87 if (vpx_codec_control(codec.get(), VP8E_SET_NOISE_SENSITIVITY, 0)) | 94 if (vpx_codec_control(codec.get(), VP8E_SET_NOISE_SENSITIVITY, 0)) |
88 return ScopedVpxCodec(); | 95 return ScopedVpxCodec(); |
89 | 96 |
90 return codec.Pass(); | 97 return codec.Pass(); |
91 } | 98 } |
92 | 99 |
93 ScopedVpxCodec CreateVP9Codec(const webrtc::DesktopSize& size) { | 100 ScopedVpxCodec CreateVP9Codec(bool use_i444, const webrtc::DesktopSize& size) { |
94 ScopedVpxCodec codec(new vpx_codec_ctx_t); | 101 ScopedVpxCodec codec(new vpx_codec_ctx_t); |
95 | 102 |
96 // Configure the encoder. | 103 // Configure the encoder. |
97 vpx_codec_enc_cfg_t config; | 104 vpx_codec_enc_cfg_t config; |
98 const vpx_codec_iface_t* algo = vpx_codec_vp9_cx(); | 105 const vpx_codec_iface_t* algo = vpx_codec_vp9_cx(); |
99 CHECK(algo); | 106 CHECK(algo); |
100 vpx_codec_err_t ret = vpx_codec_enc_config_default(algo, &config, 0); | 107 vpx_codec_err_t ret = vpx_codec_enc_config_default(algo, &config, 0); |
101 if (ret != VPX_CODEC_OK) | 108 if (ret != VPX_CODEC_OK) |
102 return ScopedVpxCodec(); | 109 return ScopedVpxCodec(); |
103 | 110 |
104 SetCommonCodecParameters(size, &config); | 111 SetCommonCodecParameters(size, &config); |
105 | 112 |
106 // Configure VP9 for I420 source frames. | 113 // Configure VP9 for I420 or I444 source frames. |
107 config.g_profile = 0; | 114 config.g_profile = use_i444 ? kVp9I444ProfileNumber : kVp9I420ProfileNumber; |
108 | 115 |
109 // Disable quantization entirely, putting the encoder in "lossless" mode. | 116 // Disable quantization entirely, putting the encoder in "lossless" mode. |
110 config.rc_min_quantizer = 0; | 117 config.rc_min_quantizer = 0; |
111 config.rc_max_quantizer = 0; | 118 config.rc_max_quantizer = 0; |
112 | 119 |
113 if (vpx_codec_enc_init(codec.get(), algo, &config, 0)) | 120 if (vpx_codec_enc_init(codec.get(), algo, &config, 0)) |
114 return ScopedVpxCodec(); | 121 return ScopedVpxCodec(); |
115 | 122 |
116 // VP9 encode doesn't yet support Realtime, so falls back to Good quality, | 123 // VP9 encode doesn't yet support Realtime, so falls back to Good quality, |
117 // for which 4 is the lowest CPU usage. | 124 // for which 4 is the lowest CPU usage. |
118 // Note that this is configured via the same parameter as for VP8. | 125 // Note that this is configured via the same parameter as for VP8. |
119 if (vpx_codec_control(codec.get(), VP8E_SET_CPUUSED, 4)) | 126 if (vpx_codec_control(codec.get(), VP8E_SET_CPUUSED, 4)) |
120 return ScopedVpxCodec(); | 127 return ScopedVpxCodec(); |
121 | 128 |
122 // Use the lowest level of noise sensitivity so as to spend less time | 129 // Use the lowest level of noise sensitivity so as to spend less time |
123 // on motion estimation and inter-prediction mode. | 130 // on motion estimation and inter-prediction mode. |
124 // Note that this is configured via the same parameter as for VP8. | 131 // Note that this is configured via the same parameter as for VP8. |
125 if (vpx_codec_control(codec.get(), VP8E_SET_NOISE_SENSITIVITY, 0)) | 132 if (vpx_codec_control(codec.get(), VP8E_SET_NOISE_SENSITIVITY, 0)) |
126 return ScopedVpxCodec(); | 133 return ScopedVpxCodec(); |
127 | 134 |
128 return codec.Pass(); | 135 return codec.Pass(); |
129 } | 136 } |
130 | 137 |
131 } // namespace | 138 void CreateImage(bool use_i444, |
| 139 const webrtc::DesktopSize& size, |
| 140 scoped_ptr<vpx_image_t>* out_image, |
| 141 scoped_ptr<uint8[]>* out_image_buffer) { |
| 142 DCHECK(!size.is_empty()); |
| 143 |
| 144 scoped_ptr<vpx_image_t> image(new vpx_image_t()); |
| 145 memset(image.get(), 0, sizeof(vpx_image_t)); |
| 146 |
| 147 // libvpx seems to require both to be assigned. |
| 148 image->d_w = size.width(); |
| 149 image->w = size.width(); |
| 150 image->d_h = size.height(); |
| 151 image->h = size.height(); |
| 152 |
| 153 // libvpx should derive chroma shifts from|fmt| but currently has a bug: |
| 154 // https://code.google.com/p/webm/issues/detail?id=627 |
| 155 if (use_i444) { |
| 156 image->fmt = VPX_IMG_FMT_I444; |
| 157 image->x_chroma_shift = 0; |
| 158 image->y_chroma_shift = 0; |
| 159 } else { // I420 |
| 160 image->fmt = VPX_IMG_FMT_YV12; |
| 161 image->x_chroma_shift = 1; |
| 162 image->y_chroma_shift = 1; |
| 163 } |
| 164 |
| 165 // libyuv's fast-path requires 16-byte aligned pointers and strides, so pad |
| 166 // the Y, U and V planes' strides to multiples of 16 bytes. |
| 167 const int y_stride = ((image->w - 1) & ~15) + 16; |
| 168 const int uv_unaligned_stride = y_stride >> image->x_chroma_shift; |
| 169 const int uv_stride = ((uv_unaligned_stride - 1) & ~15) + 16; |
| 170 |
| 171 // libvpx accesses the source image in macro blocks, and will over-read |
| 172 // if the image is not padded out to the next macroblock: crbug.com/119633. |
| 173 // Pad the Y, U and V planes' height out to compensate. |
| 174 // Assuming macroblocks are 16x16, aligning the planes' strides above also |
| 175 // macroblock aligned them. |
| 176 DCHECK_EQ(16, kMacroBlockSize); |
| 177 const int y_rows = ((image->h - 1) & ~(kMacroBlockSize-1)) + kMacroBlockSize; |
| 178 const int uv_rows = y_rows >> image->y_chroma_shift; |
| 179 |
| 180 // Allocate a YUV buffer large enough for the aligned data & padding. |
| 181 const int buffer_size = y_stride * y_rows + 2*uv_stride * uv_rows; |
| 182 scoped_ptr<uint8[]> image_buffer(new uint8[buffer_size]); |
| 183 |
| 184 // Reset image value to 128 so we just need to fill in the y plane. |
| 185 memset(image_buffer.get(), 128, buffer_size); |
| 186 |
| 187 // Fill in the information for |image_|. |
| 188 unsigned char* uchar_buffer = |
| 189 reinterpret_cast<unsigned char*>(image_buffer.get()); |
| 190 image->planes[0] = uchar_buffer; |
| 191 image->planes[1] = image->planes[0] + y_stride * y_rows; |
| 192 image->planes[2] = image->planes[1] + uv_stride * uv_rows; |
| 193 image->stride[0] = y_stride; |
| 194 image->stride[1] = uv_stride; |
| 195 image->stride[2] = uv_stride; |
| 196 |
| 197 *out_image = image.Pass(); |
| 198 *out_image_buffer = image_buffer.Pass(); |
| 199 } |
| 200 |
| 201 } // namespace |
132 | 202 |
133 // static | 203 // static |
134 scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP8() { | 204 scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP8() { |
135 return scoped_ptr<VideoEncoderVpx>( | 205 return scoped_ptr<VideoEncoderVpx>( |
136 new VideoEncoderVpx(base::Bind(&CreateVP8Codec))); | 206 new VideoEncoderVpx(base::Bind(&CreateVP8Codec), |
| 207 base::Bind(&CreateImage, false))); |
137 } | 208 } |
138 | 209 |
139 // static | 210 // static |
140 scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP9() { | 211 scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP9I420() { |
141 return scoped_ptr<VideoEncoderVpx>( | 212 return scoped_ptr<VideoEncoderVpx>( |
142 new VideoEncoderVpx(base::Bind(&CreateVP9Codec))); | 213 new VideoEncoderVpx(base::Bind(&CreateVP9Codec, false), |
| 214 base::Bind(&CreateImage, false))); |
| 215 } |
| 216 |
| 217 // static |
| 218 scoped_ptr<VideoEncoderVpx> VideoEncoderVpx::CreateForVP9I444() { |
| 219 return scoped_ptr<VideoEncoderVpx>( |
| 220 new VideoEncoderVpx(base::Bind(&CreateVP9Codec, true), |
| 221 base::Bind(&CreateImage, true))); |
143 } | 222 } |
144 | 223 |
145 VideoEncoderVpx::~VideoEncoderVpx() {} | 224 VideoEncoderVpx::~VideoEncoderVpx() {} |
146 | 225 |
147 scoped_ptr<VideoPacket> VideoEncoderVpx::Encode( | 226 scoped_ptr<VideoPacket> VideoEncoderVpx::Encode( |
148 const webrtc::DesktopFrame& frame) { | 227 const webrtc::DesktopFrame& frame) { |
149 DCHECK_LE(32, frame.size().width()); | 228 DCHECK_LE(32, frame.size().width()); |
150 DCHECK_LE(32, frame.size().height()); | 229 DCHECK_LE(32, frame.size().height()); |
151 | 230 |
152 base::TimeTicks encode_start_time = base::TimeTicks::Now(); | 231 base::TimeTicks encode_start_time = base::TimeTicks::Now(); |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
226 Rect* rect = packet->add_dirty_rects(); | 305 Rect* rect = packet->add_dirty_rects(); |
227 rect->set_x(r.rect().left()); | 306 rect->set_x(r.rect().left()); |
228 rect->set_y(r.rect().top()); | 307 rect->set_y(r.rect().top()); |
229 rect->set_width(r.rect().width()); | 308 rect->set_width(r.rect().width()); |
230 rect->set_height(r.rect().height()); | 309 rect->set_height(r.rect().height()); |
231 } | 310 } |
232 | 311 |
233 return packet.Pass(); | 312 return packet.Pass(); |
234 } | 313 } |
235 | 314 |
236 VideoEncoderVpx::VideoEncoderVpx(const InitializeCodecCallback& init_codec) | 315 VideoEncoderVpx::VideoEncoderVpx(const CreateCodecCallback& create_codec, |
237 : init_codec_(init_codec), | 316 const CreateImageCallback& create_image) |
| 317 : create_codec_(create_codec), |
| 318 create_image_(create_image), |
238 active_map_width_(0), | 319 active_map_width_(0), |
239 active_map_height_(0) { | 320 active_map_height_(0) { |
240 } | 321 } |
241 | 322 |
242 bool VideoEncoderVpx::Initialize(const webrtc::DesktopSize& size) { | 323 bool VideoEncoderVpx::Initialize(const webrtc::DesktopSize& size) { |
243 codec_.reset(); | 324 codec_.reset(); |
244 | 325 |
245 image_.reset(new vpx_image_t()); | 326 // (Re)Create the VPX image structure and pixel buffer. |
246 memset(image_.get(), 0, sizeof(vpx_image_t)); | 327 create_image_.Run(size, &image_, &image_buffer_); |
247 | |
248 image_->fmt = VPX_IMG_FMT_YV12; | |
249 | |
250 // libvpx seems to require both to be assigned. | |
251 image_->d_w = size.width(); | |
252 image_->w = size.width(); | |
253 image_->d_h = size.height(); | |
254 image_->h = size.height(); | |
255 | |
256 // libvpx should derive this from|fmt| but currently has a bug: | |
257 // https://code.google.com/p/webm/issues/detail?id=627 | |
258 image_->x_chroma_shift = 1; | |
259 image_->y_chroma_shift = 1; | |
260 | 328 |
261 // Initialize active map. | 329 // Initialize active map. |
262 active_map_width_ = (image_->w + kMacroBlockSize - 1) / kMacroBlockSize; | 330 active_map_width_ = (image_->w + kMacroBlockSize - 1) / kMacroBlockSize; |
263 active_map_height_ = (image_->h + kMacroBlockSize - 1) / kMacroBlockSize; | 331 active_map_height_ = (image_->h + kMacroBlockSize - 1) / kMacroBlockSize; |
264 active_map_.reset(new uint8[active_map_width_ * active_map_height_]); | 332 active_map_.reset(new uint8[active_map_width_ * active_map_height_]); |
265 | 333 |
266 // libyuv's fast-path requires 16-byte aligned pointers and strides, so pad | 334 // (Re)Initialize the codec. |
267 // the Y, U and V planes' strides to multiples of 16 bytes. | 335 codec_ = create_codec_.Run(size); |
268 const int y_stride = ((image_->w - 1) & ~15) + 16; | |
269 const int uv_unaligned_stride = y_stride / 2; | |
270 const int uv_stride = ((uv_unaligned_stride - 1) & ~15) + 16; | |
271 | |
272 // libvpx accesses the source image in macro blocks, and will over-read | |
273 // if the image is not padded out to the next macroblock: crbug.com/119633. | |
274 // Pad the Y, U and V planes' height out to compensate. | |
275 // Assuming macroblocks are 16x16, aligning the planes' strides above also | |
276 // macroblock aligned them. | |
277 DCHECK_EQ(16, kMacroBlockSize); | |
278 const int y_rows = active_map_height_ * kMacroBlockSize; | |
279 const int uv_rows = y_rows / 2; | |
280 | |
281 // Allocate a YUV buffer large enough for the aligned data & padding. | |
282 const int buffer_size = y_stride * y_rows + 2 * uv_stride * uv_rows; | |
283 yuv_image_.reset(new uint8[buffer_size]); | |
284 | |
285 // Reset image value to 128 so we just need to fill in the y plane. | |
286 memset(yuv_image_.get(), 128, buffer_size); | |
287 | |
288 // Fill in the information for |image_|. | |
289 unsigned char* image = reinterpret_cast<unsigned char*>(yuv_image_.get()); | |
290 image_->planes[0] = image; | |
291 image_->planes[1] = image_->planes[0] + y_stride * y_rows; | |
292 image_->planes[2] = image_->planes[1] + uv_stride * uv_rows; | |
293 image_->stride[0] = y_stride; | |
294 image_->stride[1] = uv_stride; | |
295 image_->stride[2] = uv_stride; | |
296 | |
297 // Initialize the codec. | |
298 codec_ = init_codec_.Run(size); | |
299 | 336 |
300 return codec_; | 337 return codec_; |
301 } | 338 } |
302 | 339 |
303 void VideoEncoderVpx::PrepareImage(const webrtc::DesktopFrame& frame, | 340 void VideoEncoderVpx::PrepareImage(const webrtc::DesktopFrame& frame, |
304 webrtc::DesktopRegion* updated_region) { | 341 webrtc::DesktopRegion* updated_region) { |
305 if (frame.updated_region().is_empty()) { | 342 if (frame.updated_region().is_empty()) { |
306 updated_region->Clear(); | 343 updated_region->Clear(); |
307 return; | 344 return; |
308 } | 345 } |
(...skipping 20 matching lines...) Expand all Loading... |
329 | 366 |
330 // Convert the updated region to YUV ready for encoding. | 367 // Convert the updated region to YUV ready for encoding. |
331 const uint8* rgb_data = frame.data(); | 368 const uint8* rgb_data = frame.data(); |
332 const int rgb_stride = frame.stride(); | 369 const int rgb_stride = frame.stride(); |
333 const int y_stride = image_->stride[0]; | 370 const int y_stride = image_->stride[0]; |
334 DCHECK_EQ(image_->stride[1], image_->stride[2]); | 371 DCHECK_EQ(image_->stride[1], image_->stride[2]); |
335 const int uv_stride = image_->stride[1]; | 372 const int uv_stride = image_->stride[1]; |
336 uint8* y_data = image_->planes[0]; | 373 uint8* y_data = image_->planes[0]; |
337 uint8* u_data = image_->planes[1]; | 374 uint8* u_data = image_->planes[1]; |
338 uint8* v_data = image_->planes[2]; | 375 uint8* v_data = image_->planes[2]; |
339 for (webrtc::DesktopRegion::Iterator r(*updated_region); !r.IsAtEnd(); | 376 |
340 r.Advance()) { | 377 switch (image_->fmt) { |
341 const webrtc::DesktopRect& rect = r.rect(); | 378 case VPX_IMG_FMT_I444: |
342 ConvertRGB32ToYUVWithRect( | 379 for (webrtc::DesktopRegion::Iterator r(*updated_region); !r.IsAtEnd(); |
343 rgb_data, y_data, u_data, v_data, | 380 r.Advance()) { |
344 rect.left(), rect.top(), rect.width(), rect.height(), | 381 const webrtc::DesktopRect& rect = r.rect(); |
345 rgb_stride, y_stride, uv_stride); | 382 int rgb_offset = rgb_stride * rect.top() + |
| 383 rect.left() * kBytesPerRgbPixel; |
| 384 int yuv_offset = uv_stride * rect.top() + rect.left(); |
| 385 libyuv::ARGBToI444(rgb_data + rgb_offset, rgb_stride, |
| 386 y_data + yuv_offset, y_stride, |
| 387 u_data + yuv_offset, uv_stride, |
| 388 v_data + yuv_offset, uv_stride, |
| 389 rect.width(), rect.height()); |
| 390 } |
| 391 break; |
| 392 case VPX_IMG_FMT_YV12: |
| 393 for (webrtc::DesktopRegion::Iterator r(*updated_region); !r.IsAtEnd(); |
| 394 r.Advance()) { |
| 395 const webrtc::DesktopRect& rect = r.rect(); |
| 396 int rgb_offset = rgb_stride * rect.top() + |
| 397 rect.left() * kBytesPerRgbPixel; |
| 398 int y_offset = y_stride * rect.top() + rect.left(); |
| 399 int uv_offset = uv_stride * rect.top() / 2 + rect.left() / 2; |
| 400 libyuv::ARGBToI420(rgb_data + rgb_offset, rgb_stride, |
| 401 y_data + y_offset, y_stride, |
| 402 u_data + uv_offset, uv_stride, |
| 403 v_data + uv_offset, uv_stride, |
| 404 rect.width(), rect.height()); |
| 405 } |
| 406 break; |
| 407 default: |
| 408 NOTREACHED(); |
| 409 break; |
346 } | 410 } |
347 } | 411 } |
348 | 412 |
349 void VideoEncoderVpx::PrepareActiveMap( | 413 void VideoEncoderVpx::PrepareActiveMap( |
350 const webrtc::DesktopRegion& updated_region) { | 414 const webrtc::DesktopRegion& updated_region) { |
351 // Clear active map first. | 415 // Clear active map first. |
352 memset(active_map_.get(), 0, active_map_width_ * active_map_height_); | 416 memset(active_map_.get(), 0, active_map_width_ * active_map_height_); |
353 | 417 |
354 // Mark updated areas active. | 418 // Mark updated areas active. |
355 for (webrtc::DesktopRegion::Iterator r(updated_region); !r.IsAtEnd(); | 419 for (webrtc::DesktopRegion::Iterator r(updated_region); !r.IsAtEnd(); |
356 r.Advance()) { | 420 r.Advance()) { |
357 const webrtc::DesktopRect& rect = r.rect(); | 421 const webrtc::DesktopRect& rect = r.rect(); |
358 int left = rect.left() / kMacroBlockSize; | 422 int left = rect.left() / kMacroBlockSize; |
359 int right = (rect.right() - 1) / kMacroBlockSize; | 423 int right = (rect.right() - 1) / kMacroBlockSize; |
360 int top = rect.top() / kMacroBlockSize; | 424 int top = rect.top() / kMacroBlockSize; |
361 int bottom = (rect.bottom() - 1) / kMacroBlockSize; | 425 int bottom = (rect.bottom() - 1) / kMacroBlockSize; |
362 DCHECK_LT(right, active_map_width_); | 426 DCHECK_LT(right, active_map_width_); |
363 DCHECK_LT(bottom, active_map_height_); | 427 DCHECK_LT(bottom, active_map_height_); |
364 | 428 |
365 uint8* map = active_map_.get() + top * active_map_width_; | 429 uint8* map = active_map_.get() + top * active_map_width_; |
366 for (int y = top; y <= bottom; ++y) { | 430 for (int y = top; y <= bottom; ++y) { |
367 for (int x = left; x <= right; ++x) | 431 for (int x = left; x <= right; ++x) |
368 map[x] = 1; | 432 map[x] = 1; |
369 map += active_map_width_; | 433 map += active_map_width_; |
370 } | 434 } |
371 } | 435 } |
372 } | 436 } |
373 | 437 |
374 } // namespace remoting | 438 } // namespace remoting |
OLD | NEW |