| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/resources/video_resource_updater.h" | 5 #include "cc/resources/video_resource_updater.h" |
| 6 | 6 |
| 7 #include <stddef.h> | 7 #include <stddef.h> |
| 8 #include <stdint.h> | 8 #include <stdint.h> |
| 9 | 9 |
| 10 #include <algorithm> | 10 #include <algorithm> |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 150 void VideoResourceUpdater::PlaneResource::SetUniqueId(int unique_frame_id, | 150 void VideoResourceUpdater::PlaneResource::SetUniqueId(int unique_frame_id, |
| 151 size_t plane_index) { | 151 size_t plane_index) { |
| 152 DCHECK_EQ(ref_count_, 1); | 152 DCHECK_EQ(ref_count_, 1); |
| 153 plane_index_ = plane_index; | 153 plane_index_ = plane_index; |
| 154 unique_frame_id_ = unique_frame_id; | 154 unique_frame_id_ = unique_frame_id; |
| 155 has_unique_frame_id_and_plane_index_ = true; | 155 has_unique_frame_id_and_plane_index_ = true; |
| 156 } | 156 } |
| 157 | 157 |
| 158 VideoFrameExternalResources::VideoFrameExternalResources() | 158 VideoFrameExternalResources::VideoFrameExternalResources() |
| 159 : type(NONE), | 159 : type(NONE), |
| 160 format(RGBA_8888), |
| 160 read_lock_fences_enabled(false), | 161 read_lock_fences_enabled(false), |
| 161 offset(0.0f), | 162 offset(0.0f), |
| 162 multiplier(1.0f), | 163 multiplier(1.0f), |
| 163 bits_per_channel(8) {} | 164 bits_per_channel(8) {} |
| 164 | 165 |
| 165 VideoFrameExternalResources::VideoFrameExternalResources( | 166 VideoFrameExternalResources::VideoFrameExternalResources( |
| 166 const VideoFrameExternalResources& other) = default; | 167 const VideoFrameExternalResources& other) = default; |
| 167 | 168 |
| 168 VideoFrameExternalResources::~VideoFrameExternalResources() {} | 169 VideoFrameExternalResources::~VideoFrameExternalResources() {} |
| 169 | 170 |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 307 int rows = 1; | 308 int rows = 1; |
| 308 libyuv::HalfFloatPlane(src, stride, dst, stride, 1.0f / max_value, num, rows); | 309 libyuv::HalfFloatPlane(src, stride, dst, stride, 1.0f / max_value, num, rows); |
| 309 } | 310 } |
| 310 | 311 |
| 311 VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( | 312 VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
| 312 scoped_refptr<media::VideoFrame> video_frame) { | 313 scoped_refptr<media::VideoFrame> video_frame) { |
| 313 TRACE_EVENT0("cc", "VideoResourceUpdater::CreateForSoftwarePlanes"); | 314 TRACE_EVENT0("cc", "VideoResourceUpdater::CreateForSoftwarePlanes"); |
| 314 const media::VideoPixelFormat input_frame_format = video_frame->format(); | 315 const media::VideoPixelFormat input_frame_format = video_frame->format(); |
| 315 | 316 |
| 316 // TODO(hubbe): Make this a video frame method. | 317 // TODO(hubbe): Make this a video frame method. |
| 318 // TODO(dshwang): handle YUV4XXPX by GMBs pool code. crbug.com/445071 |
| 317 int bits_per_channel = 0; | 319 int bits_per_channel = 0; |
| 318 switch (input_frame_format) { | 320 switch (input_frame_format) { |
| 319 case media::PIXEL_FORMAT_UNKNOWN: | 321 case media::PIXEL_FORMAT_UNKNOWN: |
| 320 NOTREACHED(); | 322 NOTREACHED(); |
| 321 // Fall through! | 323 // Fall through! |
| 322 case media::PIXEL_FORMAT_I420: | 324 case media::PIXEL_FORMAT_I420: |
| 323 case media::PIXEL_FORMAT_YV12: | 325 case media::PIXEL_FORMAT_YV12: |
| 324 case media::PIXEL_FORMAT_YV16: | 326 case media::PIXEL_FORMAT_YV16: |
| 325 case media::PIXEL_FORMAT_YV12A: | 327 case media::PIXEL_FORMAT_YV12A: |
| 326 case media::PIXEL_FORMAT_YV24: | 328 case media::PIXEL_FORMAT_YV24: |
| (...skipping 23 matching lines...) Expand all Loading... |
| 350 case media::PIXEL_FORMAT_YUV420P12: | 352 case media::PIXEL_FORMAT_YUV420P12: |
| 351 case media::PIXEL_FORMAT_YUV422P12: | 353 case media::PIXEL_FORMAT_YUV422P12: |
| 352 case media::PIXEL_FORMAT_YUV444P12: | 354 case media::PIXEL_FORMAT_YUV444P12: |
| 353 bits_per_channel = 12; | 355 bits_per_channel = 12; |
| 354 break; | 356 break; |
| 355 case media::PIXEL_FORMAT_Y16: | 357 case media::PIXEL_FORMAT_Y16: |
| 356 bits_per_channel = 16; | 358 bits_per_channel = 16; |
| 357 break; | 359 break; |
| 358 } | 360 } |
| 359 | 361 |
| 360 // TODO(dshwang): support PIXEL_FORMAT_Y16. crbug.com/624436 | |
| 361 DCHECK_NE(bits_per_channel, 16); | |
| 362 | |
| 363 // Only YUV software video frames are supported. | 362 // Only YUV software video frames are supported. |
| 364 if (!media::IsYuvPlanar(input_frame_format)) { | 363 if (!media::IsYuvPlanar(input_frame_format)) { |
| 365 NOTREACHED() << media::VideoPixelFormatToString(input_frame_format); | 364 NOTREACHED() << media::VideoPixelFormatToString(input_frame_format); |
| 366 return VideoFrameExternalResources(); | 365 return VideoFrameExternalResources(); |
| 367 } | 366 } |
| 368 | 367 |
| 369 const bool software_compositor = context_provider_ == NULL; | 368 const bool software_compositor = context_provider_ == NULL; |
| 370 | 369 |
| 370 const auto caps = context_provider_->ContextCapabilities(); |
| 371 bool disable_one_component_textures = caps.disable_one_component_textures; |
| 372 |
| 371 ResourceFormat output_resource_format = | 373 ResourceFormat output_resource_format = |
| 372 resource_provider_->YuvResourceFormat(bits_per_channel); | 374 resource_provider_->YuvResourceFormat(bits_per_channel); |
| 373 | 375 |
| 374 // If GPU compositing is enabled, but the output resource format | 376 // If GPU compositing is enabled, but the output resource format |
| 375 // returned by the resource provider is RGBA_8888, then a GPU driver | 377 // returned by the resource provider is RGBA_8888, then a GPU driver |
| 376 // bug workaround requires that YUV frames must be converted to RGB | 378 // bug workaround requires that YUV frames must be converted to RGB |
| 377 // before texture upload. | 379 // before texture upload. |
| 378 bool texture_needs_rgb_conversion = | 380 bool texture_needs_rgb_conversion = |
| 379 !software_compositor && | 381 !software_compositor && disable_one_component_textures; |
| 380 output_resource_format == ResourceFormat::RGBA_8888; | |
| 381 size_t output_plane_count = media::VideoFrame::NumPlanes(input_frame_format); | 382 size_t output_plane_count = media::VideoFrame::NumPlanes(input_frame_format); |
| 382 | 383 |
| 383 // TODO(skaslev): If we're in software compositing mode, we do the YUV -> RGB | 384 // TODO(skaslev): If we're in software compositing mode, we do the YUV -> RGB |
| 384 // conversion here. That involves an extra copy of each frame to a bitmap. | 385 // conversion here. That involves an extra copy of each frame to a bitmap. |
| 385 // Obviously, this is suboptimal and should be addressed once ubercompositor | 386 // Obviously, this is suboptimal and should be addressed once ubercompositor |
| 386 // starts shaping up. | 387 // starts shaping up. |
| 387 if (software_compositor || texture_needs_rgb_conversion) { | 388 if (software_compositor || texture_needs_rgb_conversion) { |
| 388 output_resource_format = kRGBResourceFormat; | 389 output_resource_format = kRGBResourceFormat; |
| 389 output_plane_count = 1; | 390 output_plane_count = 1; |
| 390 bits_per_channel = 8; | 391 bits_per_channel = 8; |
| (...skipping 26 matching lines...) Expand all Loading... |
| 417 ResourceList::iterator resource_it = RecycleOrAllocateResource( | 418 ResourceList::iterator resource_it = RecycleOrAllocateResource( |
| 418 output_plane_resource_size, output_resource_format, | 419 output_plane_resource_size, output_resource_format, |
| 419 video_frame->ColorSpace(), software_compositor, is_immutable, | 420 video_frame->ColorSpace(), software_compositor, is_immutable, |
| 420 video_frame->unique_id(), i); | 421 video_frame->unique_id(), i); |
| 421 | 422 |
| 422 resource_it->add_ref(); | 423 resource_it->add_ref(); |
| 423 plane_resources.push_back(resource_it); | 424 plane_resources.push_back(resource_it); |
| 424 } | 425 } |
| 425 | 426 |
| 426 VideoFrameExternalResources external_resources; | 427 VideoFrameExternalResources external_resources; |
| 427 | |
| 428 external_resources.bits_per_channel = bits_per_channel; | 428 external_resources.bits_per_channel = bits_per_channel; |
| 429 external_resources.format = output_resource_format; |
| 429 | 430 |
| 430 if (software_compositor || texture_needs_rgb_conversion) { | 431 if (software_compositor || texture_needs_rgb_conversion) { |
| 431 DCHECK_EQ(plane_resources.size(), 1u); | 432 DCHECK_EQ(plane_resources.size(), 1u); |
| 432 PlaneResource& plane_resource = *plane_resources[0]; | 433 PlaneResource& plane_resource = *plane_resources[0]; |
| 433 DCHECK_EQ(plane_resource.resource_format(), kRGBResourceFormat); | 434 DCHECK_EQ(plane_resource.resource_format(), kRGBResourceFormat); |
| 434 DCHECK_EQ(software_compositor, plane_resource.mailbox().IsZero()); | 435 DCHECK_EQ(software_compositor, plane_resource.mailbox().IsZero()); |
| 435 | 436 |
| 436 if (!plane_resource.Matches(video_frame->unique_id(), 0)) { | 437 if (!plane_resource.Matches(video_frame->unique_id(), 0)) { |
| 437 // We need to transfer data from |video_frame| to the plane resource. | 438 // We need to transfer data from |video_frame| to the plane resource. |
| 438 if (software_compositor) { | 439 if (software_compositor) { |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 476 plane_resource.resource_id())); | 477 plane_resource.resource_id())); |
| 477 mailbox.set_color_space(video_frame->ColorSpace()); | 478 mailbox.set_color_space(video_frame->ColorSpace()); |
| 478 external_resources.mailboxes.push_back(mailbox); | 479 external_resources.mailboxes.push_back(mailbox); |
| 479 external_resources.release_callbacks.push_back(base::Bind( | 480 external_resources.release_callbacks.push_back(base::Bind( |
| 480 &RecycleResource, AsWeakPtr(), plane_resource.resource_id())); | 481 &RecycleResource, AsWeakPtr(), plane_resource.resource_id())); |
| 481 external_resources.type = VideoFrameExternalResources::RGBA_RESOURCE; | 482 external_resources.type = VideoFrameExternalResources::RGBA_RESOURCE; |
| 482 } | 483 } |
| 483 return external_resources; | 484 return external_resources; |
| 484 } | 485 } |
| 485 | 486 |
| 486 for (size_t i = 0; i < plane_resources.size(); ++i) { | 487 const bool highbit_rg_rgba_fallback = |
| 487 PlaneResource& plane_resource = *plane_resources[i]; | 488 bits_per_channel > 8 && output_resource_format == RGBA_8888; |
| 489 bool needs_conversion = false; |
| 490 int shift = 0; |
| 491 if (output_resource_format == LUMINANCE_F16) { |
| 492 // LUMINANCE_F16 uses half-floats, so we always need a conversion step. |
| 493 needs_conversion = true; |
| 494 |
| 495 // If the input data was 9 or 10 bit, and we output to half-floats, |
| 496 // then we used the OR path below, which means that we need to |
| 497 // adjust the resource offset and multiplier accordingly. If the |
| 498 // input data uses more than 10 bits, it will already be normalized |
| 499 // to 0.0..1.0, so there is no need to do anything. |
| 500 if (bits_per_channel <= 10) { |
| 501 // By OR-ing with 0x3800, 10-bit numbers become half-floats in the |
| 502 // range [0.5..1) and 9-bit numbers get the range [0.5..0.75). |
| 503 // |
| 504 // Half-floats are evaluated as: |
| 505 // float value = pow(2.0, exponent - 25) * (0x400 + fraction); |
| 506 // |
| 507 // In our case the exponent is 14 (since we or with 0x3800) and |
| 508 // pow(2.0, 14-25) * 0x400 evaluates to 0.5 (our offset) and |
| 509 // pow(2.0, 14-25) * fraction is [0..0.49951171875] for 10-bit and |
| 510 // [0..0.24951171875] for 9-bit. |
| 511 // |
| 512 // https://en.wikipedia.org/wiki/Half-precision_floating-point_format |
| 513 // |
| 514 // PLEASE NOTE: |
| 515 // All planes are assumed to use the same multiplier/offset. |
| 516 external_resources.offset = 0.5f; |
| 517 // Max value from input data. |
| 518 int max_input_value = (1 << bits_per_channel) - 1; |
| 519 // 2 << 11 = 2048 would be 1.0 with our exponent. |
| 520 external_resources.multiplier = 2048.0 / max_input_value; |
| 521 } |
| 522 } else if (output_resource_format == RG_88) { |
| 523 // RG_88 can represent 16bit int, so we don't need a conversion step. |
| 524 needs_conversion = false; |
| 525 } else if (highbit_rg_rgba_fallback) { |
| 526 // RG channels is used to represent 16bit int. |
| 527 needs_conversion = true; |
| 528 } else if (bits_per_channel > 8) { |
| 529 // If bits_per_channel > 8 and we can't use RG_88, we need to |
| 530 // shift the data down and create an 8-bit texture. |
| 531 needs_conversion = true; |
| 532 shift = bits_per_channel - 8; |
| 533 external_resources.bits_per_channel = 8; |
| 534 } |
| 535 |
| 536 for (size_t plane = 0; plane < plane_resources.size(); ++plane) { |
| 537 PlaneResource& plane_resource = *plane_resources[plane]; |
| 488 // Update each plane's resource id with its content. | 538 // Update each plane's resource id with its content. |
| 489 DCHECK_EQ(plane_resource.resource_format(), | 539 DCHECK_EQ(plane_resource.resource_format(), output_resource_format); |
| 490 resource_provider_->YuvResourceFormat(bits_per_channel)); | |
| 491 | 540 |
| 492 if (!plane_resource.Matches(video_frame->unique_id(), i)) { | 541 if (!plane_resource.Matches(video_frame->unique_id(), plane)) { |
| 493 // TODO(hubbe): Move all conversion (and upload?) code to media/. | 542 // TODO(hubbe): Move all conversion (and upload?) code to media/. |
| 494 // We need to transfer data from |video_frame| to the plane resource. | 543 // We need to transfer data from |video_frame| to the plane resource. |
| 495 // TODO(reveman): Can use GpuMemoryBuffers here to improve performance. | 544 // TODO(reveman): Can use GpuMemoryBuffers here to improve performance. |
| 496 | 545 |
| 497 // The |resource_size_pixels| is the size of the resource we want to | 546 // The |resource_size_pixels| is the size of the resource we want to |
| 498 // upload to. | 547 // upload to. |
| 499 gfx::Size resource_size_pixels = plane_resource.resource_size(); | 548 gfx::Size resource_size_pixels = plane_resource.resource_size(); |
| 500 // The |video_stride_bytes| is the width of the video frame we are | 549 // The |video_stride_bytes| is the width of the video frame we are |
| 501 // uploading (including non-frame data to fill in the stride). | 550 // uploading (including non-frame data to fill in the stride). |
| 502 int video_stride_bytes = video_frame->stride(i); | 551 int video_stride_bytes = video_frame->stride(plane); |
| 503 | 552 |
| 504 size_t bytes_per_row = ResourceUtil::CheckedWidthInBytes<size_t>( | 553 size_t bytes_per_row = ResourceUtil::CheckedWidthInBytes<size_t>( |
| 505 resource_size_pixels.width(), plane_resource.resource_format()); | 554 resource_size_pixels.width(), output_resource_format); |
| 506 // Use 4-byte row alignment (OpenGL default) for upload performance. | 555 // Use 4-byte row alignment (OpenGL default) for upload performance. |
| 507 // Assuming that GL_UNPACK_ALIGNMENT has not changed from default. | 556 // Assuming that GL_UNPACK_ALIGNMENT has not changed from default. |
| 508 size_t upload_image_stride = | 557 size_t upload_image_stride = |
| 509 MathUtil::CheckedRoundUp<size_t>(bytes_per_row, 4u); | 558 MathUtil::CheckedRoundUp<size_t>(bytes_per_row, 4u); |
| 510 | 559 |
| 511 bool needs_conversion = false; | |
| 512 int shift = 0; | |
| 513 | |
| 514 // LUMINANCE_F16 uses half-floats, so we always need a conversion step. | |
| 515 if (plane_resource.resource_format() == LUMINANCE_F16) { | |
| 516 needs_conversion = true; | |
| 517 | |
| 518 // If the input data was 9 or 10 bit, and we output to half-floats, | |
| 519 // then we used the OR path below, which means that we need to | |
| 520 // adjust the resource offset and multiplier accordingly. If the | |
| 521 // input data uses more than 10 bits, it will already be normalized | |
| 522 // to 0.0..1.0, so there is no need to do anything. | |
| 523 if (bits_per_channel <= 10) { | |
| 524 // By OR-ing with 0x3800, 10-bit numbers become half-floats in the | |
| 525 // range [0.5..1) and 9-bit numbers get the range [0.5..0.75). | |
| 526 // | |
| 527 // Half-floats are evaluated as: | |
| 528 // float value = pow(2.0, exponent - 25) * (0x400 + fraction); | |
| 529 // | |
| 530 // In our case the exponent is 14 (since we or with 0x3800) and | |
| 531 // pow(2.0, 14-25) * 0x400 evaluates to 0.5 (our offset) and | |
| 532 // pow(2.0, 14-25) * fraction is [0..0.49951171875] for 10-bit and | |
| 533 // [0..0.24951171875] for 9-bit. | |
| 534 // | |
| 535 // https://en.wikipedia.org/wiki/Half-precision_floating-point_format | |
| 536 // | |
| 537 // PLEASE NOTE: | |
| 538 // All planes are assumed to use the same multiplier/offset. | |
| 539 external_resources.offset = 0.5f; | |
| 540 // Max value from input data. | |
| 541 int max_input_value = (1 << bits_per_channel) - 1; | |
| 542 // 2 << 11 = 2048 would be 1.0 with our exponent. | |
| 543 external_resources.multiplier = 2048.0 / max_input_value; | |
| 544 } | |
| 545 } else if (bits_per_channel > 8) { | |
| 546 // If bits_per_channel > 8 and we can't use LUMINANCE_F16, we need to | |
| 547 // shift the data down and create an 8-bit texture. | |
| 548 needs_conversion = true; | |
| 549 shift = bits_per_channel - 8; | |
| 550 } | |
| 551 const uint8_t* pixels; | 560 const uint8_t* pixels; |
| 552 if (static_cast<int>(upload_image_stride) == video_stride_bytes && | 561 if (static_cast<int>(upload_image_stride) == video_stride_bytes && |
| 553 !needs_conversion) { | 562 !needs_conversion) { |
| 554 pixels = video_frame->data(i); | 563 pixels = video_frame->data(plane); |
| 555 } else { | 564 } else { |
| 556 // Avoid malloc for each frame/plane if possible. | 565 // Avoid malloc for each frame/plane if possible. |
| 557 size_t needed_size = | 566 size_t needed_size = |
| 558 upload_image_stride * resource_size_pixels.height(); | 567 upload_image_stride * resource_size_pixels.height(); |
| 559 if (upload_pixels_.size() < needed_size) | 568 if (upload_pixels_.size() < needed_size) |
| 560 upload_pixels_.resize(needed_size); | 569 upload_pixels_.resize(needed_size); |
| 561 | 570 |
| 562 for (int row = 0; row < resource_size_pixels.height(); ++row) { | 571 for (int row = 0; row < resource_size_pixels.height(); ++row) { |
| 563 if (plane_resource.resource_format() == LUMINANCE_F16) { | 572 if (output_resource_format == LUMINANCE_F16) { |
| 564 uint16_t* dst = reinterpret_cast<uint16_t*>( | 573 uint16_t* dst = reinterpret_cast<uint16_t*>( |
| 565 &upload_pixels_[upload_image_stride * row]); | 574 &upload_pixels_[upload_image_stride * row]); |
| 566 const uint16_t* src = reinterpret_cast<uint16_t*>( | 575 const uint16_t* src = reinterpret_cast<uint16_t*>( |
| 567 video_frame->data(i) + (video_stride_bytes * row)); | 576 video_frame->data(plane) + (video_stride_bytes * row)); |
| 568 if (bits_per_channel <= 10) { | 577 if (bits_per_channel <= 10) { |
| 569 // Micro-benchmarking indicates that the compiler does | 578 // Micro-benchmarking indicates that the compiler does |
| 570 // a good enough job of optimizing this loop that trying | 579 // a good enough job of optimizing this loop that trying |
| 571 // to manually operate on one uint64 at a time is not | 580 // to manually operate on one uint64 at a time is not |
| 572 // actually helpful. | 581 // actually helpful. |
| 573 // Note to future optimizers: Benchmark your optimizations! | 582 // Note to future optimizers: Benchmark your optimizations! |
| 574 for (size_t i = 0; i < bytes_per_row / 2; i++) | 583 for (size_t i = 0; i < bytes_per_row / 2; i++) |
| 575 dst[i] = src[i] | 0x3800; | 584 dst[i] = src[i] | 0x3800; |
| 576 } else { | 585 } else { |
| 577 MakeHalfFloats(src, bits_per_channel, bytes_per_row / 2, dst); | 586 MakeHalfFloats(src, bits_per_channel, bytes_per_row / 2, dst); |
| 578 } | 587 } |
| 588 } else if (highbit_rg_rgba_fallback) { |
| 589 uint32_t* dst = reinterpret_cast<uint32_t*>( |
| 590 &upload_pixels_[upload_image_stride * row]); |
| 591 const uint16_t* src = reinterpret_cast<uint16_t*>( |
| 592 video_frame->data(plane) + (video_stride_bytes * row)); |
| 593 for (int i = 0; i < resource_size_pixels.width(); i++) |
| 594 dst[i] = src[i]; |
| 579 } else if (shift != 0) { | 595 } else if (shift != 0) { |
| 580 // We have more-than-8-bit input which we need to shift | 596 // We have more-than-8-bit input which we need to shift |
| 581 // down to fit it into an 8-bit texture. | 597 // down to fit it into an 8-bit texture. |
| 582 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; | 598 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; |
| 583 const uint16_t* src = reinterpret_cast<uint16_t*>( | 599 const uint16_t* src = reinterpret_cast<uint16_t*>( |
| 584 video_frame->data(i) + (video_stride_bytes * row)); | 600 video_frame->data(plane) + (video_stride_bytes * row)); |
| 585 for (size_t i = 0; i < bytes_per_row; i++) | 601 for (size_t i = 0; i < bytes_per_row; i++) |
| 586 dst[i] = src[i] >> shift; | 602 dst[i] = src[i] >> shift; |
| 587 } else { | 603 } else { |
| 588 // Input and output are the same size and format, but | 604 // Input and output are the same size and format, but |
| 589 // differ in stride, copy one row at a time. | 605 // differ in stride, copy one row at a time. |
| 590 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; | 606 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; |
| 591 const uint8_t* src = | 607 const uint8_t* src = |
| 592 video_frame->data(i) + (video_stride_bytes * row); | 608 video_frame->data(plane) + (video_stride_bytes * row); |
| 593 memcpy(dst, src, bytes_per_row); | 609 memcpy(dst, src, bytes_per_row); |
| 594 } | 610 } |
| 595 } | 611 } |
| 596 pixels = &upload_pixels_[0]; | 612 pixels = &upload_pixels_[0]; |
| 597 } | 613 } |
| 598 | 614 |
| 599 resource_provider_->CopyToResource(plane_resource.resource_id(), pixels, | 615 resource_provider_->CopyToResource(plane_resource.resource_id(), pixels, |
| 600 resource_size_pixels); | 616 resource_size_pixels); |
| 601 plane_resource.SetUniqueId(video_frame->unique_id(), i); | 617 plane_resource.SetUniqueId(video_frame->unique_id(), plane); |
| 602 } | 618 } |
| 603 | 619 |
| 604 | |
| 605 // VideoResourceUpdater shares a context with the compositor so a | 620 // VideoResourceUpdater shares a context with the compositor so a |
| 606 // sync token is not required. | 621 // sync token is not required. |
| 607 TextureMailbox mailbox(plane_resource.mailbox(), gpu::SyncToken(), | 622 TextureMailbox mailbox(plane_resource.mailbox(), gpu::SyncToken(), |
| 608 resource_provider_->GetResourceTextureTarget( | 623 resource_provider_->GetResourceTextureTarget( |
| 609 plane_resource.resource_id())); | 624 plane_resource.resource_id())); |
| 610 mailbox.set_color_space(video_frame->ColorSpace()); | 625 mailbox.set_color_space(video_frame->ColorSpace()); |
| 611 external_resources.mailboxes.push_back(mailbox); | 626 external_resources.mailboxes.push_back(mailbox); |
| 612 external_resources.release_callbacks.push_back(base::Bind( | 627 external_resources.release_callbacks.push_back(base::Bind( |
| 613 &RecycleResource, AsWeakPtr(), plane_resource.resource_id())); | 628 &RecycleResource, AsWeakPtr(), plane_resource.resource_id())); |
| 614 } | 629 } |
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 761 if (lost_resource) { | 776 if (lost_resource) { |
| 762 resource_it->clear_refs(); | 777 resource_it->clear_refs(); |
| 763 updater->DeleteResource(resource_it); | 778 updater->DeleteResource(resource_it); |
| 764 return; | 779 return; |
| 765 } | 780 } |
| 766 | 781 |
| 767 resource_it->remove_ref(); | 782 resource_it->remove_ref(); |
| 768 } | 783 } |
| 769 | 784 |
| 770 } // namespace cc | 785 } // namespace cc |
| OLD | NEW |