| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/resources/video_resource_updater.h" | 5 #include "cc/resources/video_resource_updater.h" |
| 6 | 6 |
| 7 #include <stddef.h> | 7 #include <stddef.h> |
| 8 #include <stdint.h> | 8 #include <stdint.h> |
| 9 | 9 |
| 10 #include <algorithm> | 10 #include <algorithm> |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 149 void VideoResourceUpdater::PlaneResource::SetUniqueId(int unique_frame_id, | 149 void VideoResourceUpdater::PlaneResource::SetUniqueId(int unique_frame_id, |
| 150 size_t plane_index) { | 150 size_t plane_index) { |
| 151 DCHECK_EQ(ref_count_, 1); | 151 DCHECK_EQ(ref_count_, 1); |
| 152 plane_index_ = plane_index; | 152 plane_index_ = plane_index; |
| 153 unique_frame_id_ = unique_frame_id; | 153 unique_frame_id_ = unique_frame_id; |
| 154 has_unique_frame_id_and_plane_index_ = true; | 154 has_unique_frame_id_and_plane_index_ = true; |
| 155 } | 155 } |
| 156 | 156 |
| 157 VideoFrameExternalResources::VideoFrameExternalResources() | 157 VideoFrameExternalResources::VideoFrameExternalResources() |
| 158 : type(NONE), | 158 : type(NONE), |
| 159 format(RGBA_8888), |
| 159 read_lock_fences_enabled(false), | 160 read_lock_fences_enabled(false), |
| 160 offset(0.0f), | 161 offset(0.0f), |
| 161 multiplier(1.0f), | 162 multiplier(1.0f), |
| 162 bits_per_channel(8) {} | 163 bits_per_channel(8) {} |
| 163 | 164 |
| 164 VideoFrameExternalResources::VideoFrameExternalResources( | 165 VideoFrameExternalResources::VideoFrameExternalResources( |
| 165 const VideoFrameExternalResources& other) = default; | 166 const VideoFrameExternalResources& other) = default; |
| 166 | 167 |
| 167 VideoFrameExternalResources::~VideoFrameExternalResources() {} | 168 VideoFrameExternalResources::~VideoFrameExternalResources() {} |
| 168 | 169 |
| (...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 312 dst[i] = (*(uint32_t*)&value) >> 13; | 313 dst[i] = (*(uint32_t*)&value) >> 13; |
| 313 } | 314 } |
| 314 } | 315 } |
| 315 | 316 |
| 316 VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( | 317 VideoFrameExternalResources VideoResourceUpdater::CreateForSoftwarePlanes( |
| 317 scoped_refptr<media::VideoFrame> video_frame) { | 318 scoped_refptr<media::VideoFrame> video_frame) { |
| 318 TRACE_EVENT0("cc", "VideoResourceUpdater::CreateForSoftwarePlanes"); | 319 TRACE_EVENT0("cc", "VideoResourceUpdater::CreateForSoftwarePlanes"); |
| 319 const media::VideoPixelFormat input_frame_format = video_frame->format(); | 320 const media::VideoPixelFormat input_frame_format = video_frame->format(); |
| 320 | 321 |
| 321 // TODO(hubbe): Make this a video frame method. | 322 // TODO(hubbe): Make this a video frame method. |
| 323 // TODO(dshwang): handle YUV4XXPX by GMBs pool code. crbug.com/445071 |
| 322 int bits_per_channel = 0; | 324 int bits_per_channel = 0; |
| 323 switch (input_frame_format) { | 325 switch (input_frame_format) { |
| 324 case media::PIXEL_FORMAT_UNKNOWN: | 326 case media::PIXEL_FORMAT_UNKNOWN: |
| 325 NOTREACHED(); | 327 NOTREACHED(); |
| 326 // Fall through! | 328 // Fall through! |
| 327 case media::PIXEL_FORMAT_I420: | 329 case media::PIXEL_FORMAT_I420: |
| 328 case media::PIXEL_FORMAT_YV12: | 330 case media::PIXEL_FORMAT_YV12: |
| 329 case media::PIXEL_FORMAT_YV16: | 331 case media::PIXEL_FORMAT_YV16: |
| 330 case media::PIXEL_FORMAT_YV12A: | 332 case media::PIXEL_FORMAT_YV12A: |
| 331 case media::PIXEL_FORMAT_YV24: | 333 case media::PIXEL_FORMAT_YV24: |
| (...skipping 23 matching lines...) Expand all Loading... |
| 355 case media::PIXEL_FORMAT_YUV420P12: | 357 case media::PIXEL_FORMAT_YUV420P12: |
| 356 case media::PIXEL_FORMAT_YUV422P12: | 358 case media::PIXEL_FORMAT_YUV422P12: |
| 357 case media::PIXEL_FORMAT_YUV444P12: | 359 case media::PIXEL_FORMAT_YUV444P12: |
| 358 bits_per_channel = 12; | 360 bits_per_channel = 12; |
| 359 break; | 361 break; |
| 360 case media::PIXEL_FORMAT_Y16: | 362 case media::PIXEL_FORMAT_Y16: |
| 361 bits_per_channel = 16; | 363 bits_per_channel = 16; |
| 362 break; | 364 break; |
| 363 } | 365 } |
| 364 | 366 |
| 365 // TODO(dshwang): support PIXEL_FORMAT_Y16. crbug.com/624436 | |
| 366 DCHECK_NE(bits_per_channel, 16); | |
| 367 | |
| 368 // Only YUV software video frames are supported. | 367 // Only YUV software video frames are supported. |
| 369 if (!media::IsYuvPlanar(input_frame_format)) { | 368 if (!media::IsYuvPlanar(input_frame_format)) { |
| 370 NOTREACHED() << media::VideoPixelFormatToString(input_frame_format); | 369 NOTREACHED() << media::VideoPixelFormatToString(input_frame_format); |
| 371 return VideoFrameExternalResources(); | 370 return VideoFrameExternalResources(); |
| 372 } | 371 } |
| 373 | 372 |
| 374 const bool software_compositor = context_provider_ == NULL; | 373 const bool software_compositor = context_provider_ == NULL; |
| 375 | 374 |
| 376 ResourceFormat output_resource_format = | 375 ResourceFormat output_resource_format = |
| 377 resource_provider_->YuvResourceFormat(bits_per_channel); | 376 resource_provider_->YuvResourceFormat(bits_per_channel); |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 422 ResourceList::iterator resource_it = RecycleOrAllocateResource( | 421 ResourceList::iterator resource_it = RecycleOrAllocateResource( |
| 423 output_plane_resource_size, output_resource_format, | 422 output_plane_resource_size, output_resource_format, |
| 424 video_frame->ColorSpace(), software_compositor, is_immutable, | 423 video_frame->ColorSpace(), software_compositor, is_immutable, |
| 425 video_frame->unique_id(), i); | 424 video_frame->unique_id(), i); |
| 426 | 425 |
| 427 resource_it->add_ref(); | 426 resource_it->add_ref(); |
| 428 plane_resources.push_back(resource_it); | 427 plane_resources.push_back(resource_it); |
| 429 } | 428 } |
| 430 | 429 |
| 431 VideoFrameExternalResources external_resources; | 430 VideoFrameExternalResources external_resources; |
| 432 | |
| 433 external_resources.bits_per_channel = bits_per_channel; | 431 external_resources.bits_per_channel = bits_per_channel; |
| 432 external_resources.format = output_resource_format; |
| 434 | 433 |
| 435 if (software_compositor || texture_needs_rgb_conversion) { | 434 if (software_compositor || texture_needs_rgb_conversion) { |
| 436 DCHECK_EQ(plane_resources.size(), 1u); | 435 DCHECK_EQ(plane_resources.size(), 1u); |
| 437 PlaneResource& plane_resource = *plane_resources[0]; | 436 PlaneResource& plane_resource = *plane_resources[0]; |
| 438 DCHECK_EQ(plane_resource.resource_format(), kRGBResourceFormat); | 437 DCHECK_EQ(plane_resource.resource_format(), kRGBResourceFormat); |
| 439 DCHECK_EQ(software_compositor, plane_resource.mailbox().IsZero()); | 438 DCHECK_EQ(software_compositor, plane_resource.mailbox().IsZero()); |
| 440 | 439 |
| 441 if (!plane_resource.Matches(video_frame->unique_id(), 0)) { | 440 if (!plane_resource.Matches(video_frame->unique_id(), 0)) { |
| 442 // We need to transfer data from |video_frame| to the plane resource. | 441 // We need to transfer data from |video_frame| to the plane resource. |
| 443 if (software_compositor) { | 442 if (software_compositor) { |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 481 plane_resource.resource_id())); | 480 plane_resource.resource_id())); |
| 482 mailbox.set_color_space(video_frame->ColorSpace()); | 481 mailbox.set_color_space(video_frame->ColorSpace()); |
| 483 external_resources.mailboxes.push_back(mailbox); | 482 external_resources.mailboxes.push_back(mailbox); |
| 484 external_resources.release_callbacks.push_back(base::Bind( | 483 external_resources.release_callbacks.push_back(base::Bind( |
| 485 &RecycleResource, AsWeakPtr(), plane_resource.resource_id())); | 484 &RecycleResource, AsWeakPtr(), plane_resource.resource_id())); |
| 486 external_resources.type = VideoFrameExternalResources::RGBA_RESOURCE; | 485 external_resources.type = VideoFrameExternalResources::RGBA_RESOURCE; |
| 487 } | 486 } |
| 488 return external_resources; | 487 return external_resources; |
| 489 } | 488 } |
| 490 | 489 |
| 491 for (size_t i = 0; i < plane_resources.size(); ++i) { | 490 bool needs_conversion = false; |
| 492 PlaneResource& plane_resource = *plane_resources[i]; | 491 int shift = 0; |
| 492 if (output_resource_format == LUMINANCE_F16) { |
| 493 // LUMINANCE_F16 uses half-floats, so we always need a conversion step. |
| 494 needs_conversion = true; |
| 495 |
| 496 // If the input data was 9 or 10 bit, and we output to half-floats, |
| 497 // then we used the OR path below, which means that we need to |
| 498 // adjust the resource offset and multiplier accordingly. If the |
| 499 // input data uses more than 10 bits, it will already be normalized |
| 500 // to 0.0..1.0, so there is no need to do anything. |
| 501 if (bits_per_channel <= 10) { |
| 502 // By OR-ing with 0x3800, 10-bit numbers become half-floats in the |
| 503 // range [0.5..1) and 9-bit numbers get the range [0.5..0.75). |
| 504 // |
| 505 // Half-floats are evaluated as: |
| 506 // float value = pow(2.0, exponent - 25) * (0x400 + fraction); |
| 507 // |
| 508 // In our case the exponent is 14 (since we or with 0x3800) and |
| 509 // pow(2.0, 14-25) * 0x400 evaluates to 0.5 (our offset) and |
| 510 // pow(2.0, 14-25) * fraction is [0..0.49951171875] for 10-bit and |
| 511 // [0..0.24951171875] for 9-bit. |
| 512 // |
| 513 // https://en.wikipedia.org/wiki/Half-precision_floating-point_format |
| 514 // |
| 515 // PLEASE NOTE: |
| 516 // All planes are assumed to use the same multiplier/offset. |
| 517 external_resources.offset = 0.5f; |
| 518 // Max value from input data. |
| 519 int max_input_value = (1 << bits_per_channel) - 1; |
| 520 // 2 << 11 = 2048 would be 1.0 with our exponent. |
| 521 external_resources.multiplier = 2048.0 / max_input_value; |
| 522 } |
| 523 } else if (output_resource_format == RG_88) { |
| 524 // RG_88 can represent 16bit int, so we don't need a conversion step. |
| 525 needs_conversion = false; |
| 526 } else if (bits_per_channel > 8) { |
| 527 // If bits_per_channel > 8 and we can't use RG_88, we need to |
| 528 // shift the data down and create an 8-bit texture. |
| 529 needs_conversion = true; |
| 530 shift = bits_per_channel - 8; |
| 531 external_resources.bits_per_channel = 8; |
| 532 } |
| 533 |
| 534 for (size_t plane = 0; plane < plane_resources.size(); ++plane) { |
| 535 PlaneResource& plane_resource = *plane_resources[plane]; |
| 493 // Update each plane's resource id with its content. | 536 // Update each plane's resource id with its content. |
| 494 DCHECK_EQ(plane_resource.resource_format(), | 537 DCHECK_EQ(plane_resource.resource_format(), output_resource_format); |
| 495 resource_provider_->YuvResourceFormat(bits_per_channel)); | |
| 496 | 538 |
| 497 if (!plane_resource.Matches(video_frame->unique_id(), i)) { | 539 if (!plane_resource.Matches(video_frame->unique_id(), plane)) { |
| 498 // We need to transfer data from |video_frame| to the plane resource. | 540 // We need to transfer data from |video_frame| to the plane resource. |
| 499 // TODO(reveman): Can use GpuMemoryBuffers here to improve performance. | 541 // TODO(reveman): Can use GpuMemoryBuffers here to improve performance. |
| 500 | 542 |
| 501 // The |resource_size_pixels| is the size of the resource we want to | 543 // The |resource_size_pixels| is the size of the resource we want to |
| 502 // upload to. | 544 // upload to. |
| 503 gfx::Size resource_size_pixels = plane_resource.resource_size(); | 545 gfx::Size resource_size_pixels = plane_resource.resource_size(); |
| 504 // The |video_stride_bytes| is the width of the video frame we are | 546 // The |video_stride_bytes| is the width of the video frame we are |
| 505 // uploading (including non-frame data to fill in the stride). | 547 // uploading (including non-frame data to fill in the stride). |
| 506 int video_stride_bytes = video_frame->stride(i); | 548 int video_stride_bytes = video_frame->stride(plane); |
| 507 | 549 |
| 508 size_t bytes_per_row = ResourceUtil::CheckedWidthInBytes<size_t>( | 550 size_t bytes_per_row = ResourceUtil::CheckedWidthInBytes<size_t>( |
| 509 resource_size_pixels.width(), plane_resource.resource_format()); | 551 resource_size_pixels.width(), plane_resource.resource_format()); |
| 510 // Use 4-byte row alignment (OpenGL default) for upload performance. | 552 // Use 4-byte row alignment (OpenGL default) for upload performance. |
| 511 // Assuming that GL_UNPACK_ALIGNMENT has not changed from default. | 553 // Assuming that GL_UNPACK_ALIGNMENT has not changed from default. |
| 512 size_t upload_image_stride = | 554 size_t upload_image_stride = |
| 513 MathUtil::CheckedRoundUp<size_t>(bytes_per_row, 4u); | 555 MathUtil::CheckedRoundUp<size_t>(bytes_per_row, 4u); |
| 514 | 556 |
| 515 bool needs_conversion = false; | |
| 516 int shift = 0; | |
| 517 | |
| 518 // LUMINANCE_F16 uses half-floats, so we always need a conversion step. | |
| 519 if (plane_resource.resource_format() == LUMINANCE_F16) { | |
| 520 needs_conversion = true; | |
| 521 | |
| 522 // If the input data was 9 or 10 bit, and we output to half-floats, | |
| 523 // then we used the OR path below, which means that we need to | |
| 524 // adjust the resource offset and multiplier accordingly. If the | |
| 525 // input data uses more than 10 bits, it will already be normalized | |
| 526 // to 0.0..1.0, so there is no need to do anything. | |
| 527 if (bits_per_channel <= 10) { | |
| 528 // By OR-ing with 0x3800, 10-bit numbers become half-floats in the | |
| 529 // range [0.5..1) and 9-bit numbers get the range [0.5..0.75). | |
| 530 // | |
| 531 // Half-floats are evaluated as: | |
| 532 // float value = pow(2.0, exponent - 25) * (0x400 + fraction); | |
| 533 // | |
| 534 // In our case the exponent is 14 (since we or with 0x3800) and | |
| 535 // pow(2.0, 14-25) * 0x400 evaluates to 0.5 (our offset) and | |
| 536 // pow(2.0, 14-25) * fraction is [0..0.49951171875] for 10-bit and | |
| 537 // [0..0.24951171875] for 9-bit. | |
| 538 // | |
| 539 // https://en.wikipedia.org/wiki/Half-precision_floating-point_format | |
| 540 // | |
| 541 // PLEASE NOTE: | |
| 542 // All planes are assumed to use the same multiplier/offset. | |
| 543 external_resources.offset = 0.5f; | |
| 544 // Max value from input data. | |
| 545 int max_input_value = (1 << bits_per_channel) - 1; | |
| 546 // 2 << 11 = 2048 would be 1.0 with our exponent. | |
| 547 external_resources.multiplier = 2048.0 / max_input_value; | |
| 548 } | |
| 549 } else if (bits_per_channel > 8) { | |
| 550 // If bits_per_channel > 8 and we can't use LUMINANCE_F16, we need to | |
| 551 // shift the data down and create an 8-bit texture. | |
| 552 needs_conversion = true; | |
| 553 shift = bits_per_channel - 8; | |
| 554 } | |
| 555 const uint8_t* pixels; | 557 const uint8_t* pixels; |
| 556 if (static_cast<int>(upload_image_stride) == video_stride_bytes && | 558 if (static_cast<int>(upload_image_stride) == video_stride_bytes && |
| 557 !needs_conversion) { | 559 !needs_conversion) { |
| 558 pixels = video_frame->data(i); | 560 pixels = video_frame->data(plane); |
| 559 } else { | 561 } else { |
| 560 // Avoid malloc for each frame/plane if possible. | 562 // Avoid malloc for each frame/plane if possible. |
| 561 size_t needed_size = | 563 size_t needed_size = |
| 562 upload_image_stride * resource_size_pixels.height(); | 564 upload_image_stride * resource_size_pixels.height(); |
| 563 if (upload_pixels_.size() < needed_size) | 565 if (upload_pixels_.size() < needed_size) |
| 564 upload_pixels_.resize(needed_size); | 566 upload_pixels_.resize(needed_size); |
| 565 | 567 |
| 566 for (int row = 0; row < resource_size_pixels.height(); ++row) { | 568 for (int row = 0; row < resource_size_pixels.height(); ++row) { |
| 567 if (plane_resource.resource_format() == LUMINANCE_F16) { | 569 if (plane_resource.resource_format() == LUMINANCE_F16) { |
| 568 uint16_t* dst = reinterpret_cast<uint16_t*>( | 570 uint16_t* dst = reinterpret_cast<uint16_t*>( |
| 569 &upload_pixels_[upload_image_stride * row]); | 571 &upload_pixels_[upload_image_stride * row]); |
| 570 const uint16_t* src = reinterpret_cast<uint16_t*>( | 572 const uint16_t* src = reinterpret_cast<uint16_t*>( |
| 571 video_frame->data(i) + (video_stride_bytes * row)); | 573 video_frame->data(plane) + (video_stride_bytes * row)); |
| 572 if (bits_per_channel <= 10) { | 574 if (bits_per_channel <= 10) { |
| 573 // Micro-benchmarking indicates that the compiler does | 575 // Micro-benchmarking indicates that the compiler does |
| 574 // a good enough job of optimizing this loop that trying | 576 // a good enough job of optimizing this loop that trying |
| 575 // to manually operate on one uint64 at a time is not | 577 // to manually operate on one uint64 at a time is not |
| 576 // actually helpful. | 578 // actually helpful. |
| 577 // Note to future optimizers: Benchmark your optimizations! | 579 // Note to future optimizers: Benchmark your optimizations! |
| 578 for (size_t i = 0; i < bytes_per_row / 2; i++) | 580 for (size_t i = 0; i < bytes_per_row / 2; i++) |
| 579 dst[i] = src[i] | 0x3800; | 581 dst[i] = src[i] | 0x3800; |
| 580 } else { | 582 } else { |
| 581 MakeHalfFloats(src, bits_per_channel, bytes_per_row / 2, dst); | 583 MakeHalfFloats(src, bits_per_channel, bytes_per_row / 2, dst); |
| 582 } | 584 } |
| 583 } else if (shift != 0) { | 585 } else if (shift != 0) { |
| 584 // We have more-than-8-bit input which we need to shift | 586 // We have more-than-8-bit input which we need to shift |
| 585 // down to fit it into an 8-bit texture. | 587 // down to fit it into an 8-bit texture. |
| 586 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; | 588 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; |
| 587 const uint16_t* src = reinterpret_cast<uint16_t*>( | 589 const uint16_t* src = reinterpret_cast<uint16_t*>( |
| 588 video_frame->data(i) + (video_stride_bytes * row)); | 590 video_frame->data(plane) + (video_stride_bytes * row)); |
| 589 for (size_t i = 0; i < bytes_per_row; i++) | 591 for (size_t i = 0; i < bytes_per_row; i++) |
| 590 dst[i] = src[i] >> shift; | 592 dst[i] = src[i] >> shift; |
| 591 } else { | 593 } else { |
| 592 // Input and output are the same size and format, but | 594 // Input and output are the same size and format, but |
| 593 // differ in stride, copy one row at a time. | 595 // differ in stride, copy one row at a time. |
| 594 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; | 596 uint8_t* dst = &upload_pixels_[upload_image_stride * row]; |
| 595 const uint8_t* src = | 597 const uint8_t* src = |
| 596 video_frame->data(i) + (video_stride_bytes * row); | 598 video_frame->data(plane) + (video_stride_bytes * row); |
| 597 memcpy(dst, src, bytes_per_row); | 599 memcpy(dst, src, bytes_per_row); |
| 598 } | 600 } |
| 599 } | 601 } |
| 600 pixels = &upload_pixels_[0]; | 602 pixels = &upload_pixels_[0]; |
| 601 } | 603 } |
| 602 | 604 |
| 603 resource_provider_->CopyToResource(plane_resource.resource_id(), pixels, | 605 resource_provider_->CopyToResource(plane_resource.resource_id(), pixels, |
| 604 resource_size_pixels); | 606 resource_size_pixels); |
| 605 plane_resource.SetUniqueId(video_frame->unique_id(), i); | 607 plane_resource.SetUniqueId(video_frame->unique_id(), plane); |
| 606 } | 608 } |
| 607 | 609 |
| 608 | |
| 609 // VideoResourceUpdater shares a context with the compositor so a | 610 // VideoResourceUpdater shares a context with the compositor so a |
| 610 // sync token is not required. | 611 // sync token is not required. |
| 611 TextureMailbox mailbox(plane_resource.mailbox(), gpu::SyncToken(), | 612 TextureMailbox mailbox(plane_resource.mailbox(), gpu::SyncToken(), |
| 612 resource_provider_->GetResourceTextureTarget( | 613 resource_provider_->GetResourceTextureTarget( |
| 613 plane_resource.resource_id())); | 614 plane_resource.resource_id())); |
| 614 mailbox.set_color_space(video_frame->ColorSpace()); | 615 mailbox.set_color_space(video_frame->ColorSpace()); |
| 615 external_resources.mailboxes.push_back(mailbox); | 616 external_resources.mailboxes.push_back(mailbox); |
| 616 external_resources.release_callbacks.push_back(base::Bind( | 617 external_resources.release_callbacks.push_back(base::Bind( |
| 617 &RecycleResource, AsWeakPtr(), plane_resource.resource_id())); | 618 &RecycleResource, AsWeakPtr(), plane_resource.resource_id())); |
| 618 } | 619 } |
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 765 if (lost_resource) { | 766 if (lost_resource) { |
| 766 resource_it->clear_refs(); | 767 resource_it->clear_refs(); |
| 767 updater->DeleteResource(resource_it); | 768 updater->DeleteResource(resource_it); |
| 768 return; | 769 return; |
| 769 } | 770 } |
| 770 | 771 |
| 771 resource_it->remove_ref(); | 772 resource_it->remove_ref(); |
| 772 } | 773 } |
| 773 | 774 |
| 774 } // namespace cc | 775 } // namespace cc |
| OLD | NEW |