| OLD | NEW |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "cc/raster/one_copy_tile_task_worker_pool.h" | 5 #include "cc/raster/one_copy_tile_task_worker_pool.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> | 8 #include <limits> |
| 9 | 9 |
| 10 #include "base/strings/stringprintf.h" | 10 #include "base/strings/stringprintf.h" |
| (...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 319 it != completed_tasks_.end(); ++it) { | 319 it != completed_tasks_.end(); ++it) { |
| 320 TileTask* task = static_cast<TileTask*>(it->get()); | 320 TileTask* task = static_cast<TileTask*>(it->get()); |
| 321 | 321 |
| 322 task->WillComplete(); | 322 task->WillComplete(); |
| 323 task->CompleteOnOriginThread(this); | 323 task->CompleteOnOriginThread(this); |
| 324 task->DidComplete(); | 324 task->DidComplete(); |
| 325 } | 325 } |
| 326 completed_tasks_.clear(); | 326 completed_tasks_.clear(); |
| 327 } | 327 } |
| 328 | 328 |
| 329 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat() const { | 329 ResourceFormat OneCopyTileTaskWorkerPool::GetResourceFormat( |
| 330 return resource_provider_->memory_efficient_texture_format(); | 330 bool must_support_alpha) const { |
| 331 return resource_provider_->memory_efficient_texture_format( |
| 332 must_support_alpha); |
| 331 } | 333 } |
| 332 | 334 |
| 333 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle() const { | 335 bool OneCopyTileTaskWorkerPool::GetResourceRequiresSwizzle( |
| 334 return !PlatformColor::SameComponentOrder(GetResourceFormat()); | 336 bool must_support_alpha) const { |
| 337 return !PlatformColor::SameComponentOrder( |
| 338 GetResourceFormat(must_support_alpha)); |
| 335 } | 339 } |
| 336 | 340 |
| 337 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( | 341 scoped_ptr<RasterBuffer> OneCopyTileTaskWorkerPool::AcquireBufferForRaster( |
| 338 const Resource* resource, | 342 const Resource* resource, |
| 339 uint64_t resource_content_id, | 343 uint64_t resource_content_id, |
| 340 uint64_t previous_content_id) { | 344 uint64_t previous_content_id) { |
| 341 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload | 345 // TODO(danakj): If resource_content_id != 0, we only need to copy/upload |
| 342 // the dirty rect. | 346 // the dirty rect. |
| 343 DCHECK_EQ(resource->format(), | 347 return make_scoped_ptr<RasterBuffer>( |
| 344 resource_provider_->memory_efficient_texture_format()); | 348 new RasterBufferImpl(this, resource_provider_, resource->format(), |
| 345 return make_scoped_ptr<RasterBuffer>(new RasterBufferImpl( | 349 resource, previous_content_id)); |
| 346 this, resource_provider_, | |
| 347 resource_provider_->memory_efficient_texture_format(), resource, | |
| 348 previous_content_id)); | |
| 349 } | 350 } |
| 350 | 351 |
| 351 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( | 352 void OneCopyTileTaskWorkerPool::ReleaseBufferForRaster( |
| 352 scoped_ptr<RasterBuffer> buffer) { | 353 scoped_ptr<RasterBuffer> buffer) { |
| 353 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. | 354 // Nothing to do here. RasterBufferImpl destructor cleans up after itself. |
| 354 } | 355 } |
| 355 | 356 |
| 356 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( | 357 void OneCopyTileTaskWorkerPool::PlaybackAndCopyOnWorkerThread( |
| 357 const Resource* resource, | 358 const Resource* resource, |
| 358 const ResourceProvider::ScopedWriteLockGL* resource_lock, | 359 const ResourceProvider::ScopedWriteLockGL* resource_lock, |
| (...skipping 10 matching lines...) Expand all Loading... |
| 369 AcquireStagingBuffer(resource, previous_content_id); | 370 AcquireStagingBuffer(resource, previous_content_id); |
| 370 DCHECK(staging_buffer); | 371 DCHECK(staging_buffer); |
| 371 | 372 |
| 372 { | 373 { |
| 373 base::AutoUnlock unlock(lock_); | 374 base::AutoUnlock unlock(lock_); |
| 374 | 375 |
| 375 // Allocate GpuMemoryBuffer if necessary. | 376 // Allocate GpuMemoryBuffer if necessary. |
| 376 if (!staging_buffer->gpu_memory_buffer) { | 377 if (!staging_buffer->gpu_memory_buffer) { |
| 377 staging_buffer->gpu_memory_buffer = | 378 staging_buffer->gpu_memory_buffer = |
| 378 resource_provider_->gpu_memory_buffer_manager() | 379 resource_provider_->gpu_memory_buffer_manager() |
| 379 ->AllocateGpuMemoryBuffer( | 380 ->AllocateGpuMemoryBuffer(staging_buffer->size, |
| 380 staging_buffer->size, | 381 BufferFormat(resource->format()), |
| 381 BufferFormat( | 382 use_persistent_gpu_memory_buffers_ |
| 382 resource_provider_->memory_efficient_texture_format()), | 383 ? gfx::BufferUsage::PERSISTENT_MAP |
| 383 use_persistent_gpu_memory_buffers_ | 384 : gfx::BufferUsage::MAP); |
| 384 ? gfx::BufferUsage::PERSISTENT_MAP | |
| 385 : gfx::BufferUsage::MAP); | |
| 386 DCHECK_EQ(gfx::NumberOfPlanesForBufferFormat( | 385 DCHECK_EQ(gfx::NumberOfPlanesForBufferFormat( |
| 387 staging_buffer->gpu_memory_buffer->GetFormat()), | 386 staging_buffer->gpu_memory_buffer->GetFormat()), |
| 388 1u); | 387 1u); |
| 389 } | 388 } |
| 390 | 389 |
| 391 gfx::Rect playback_rect = raster_full_rect; | 390 gfx::Rect playback_rect = raster_full_rect; |
| 392 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { | 391 if (use_persistent_gpu_memory_buffers_ && previous_content_id) { |
| 393 // Reduce playback rect to dirty region if the content id of the staging | 392 // Reduce playback rect to dirty region if the content id of the staging |
| 394 // buffer matches the prevous content id. | 393 // buffer matches the prevous content id. |
| 395 if (previous_content_id == staging_buffer->content_id) | 394 if (previous_content_id == staging_buffer->content_id) |
| 396 playback_rect.Intersect(raster_dirty_rect); | 395 playback_rect.Intersect(raster_dirty_rect); |
| 397 } | 396 } |
| 398 | 397 |
| 399 if (staging_buffer->gpu_memory_buffer) { | 398 if (staging_buffer->gpu_memory_buffer) { |
| 400 void* data = nullptr; | 399 void* data = nullptr; |
| 401 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); | 400 bool rv = staging_buffer->gpu_memory_buffer->Map(&data); |
| 402 DCHECK(rv); | 401 DCHECK(rv); |
| 403 int stride; | 402 int stride; |
| 404 staging_buffer->gpu_memory_buffer->GetStride(&stride); | 403 staging_buffer->gpu_memory_buffer->GetStride(&stride); |
| 405 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. | 404 // TileTaskWorkerPool::PlaybackToMemory only supports unsigned strides. |
| 406 DCHECK_GE(stride, 0); | 405 DCHECK_GE(stride, 0); |
| 407 | 406 |
| 408 DCHECK(!playback_rect.IsEmpty()) | 407 DCHECK(!playback_rect.IsEmpty()) |
| 409 << "Why are we rastering a tile that's not dirty?"; | 408 << "Why are we rastering a tile that's not dirty?"; |
| 410 TileTaskWorkerPool::PlaybackToMemory( | 409 TileTaskWorkerPool::PlaybackToMemory( |
| 411 data, resource_provider_->memory_efficient_texture_format(), | 410 data, resource->format(), staging_buffer->size, |
| 412 staging_buffer->size, static_cast<size_t>(stride), raster_source, | 411 static_cast<size_t>(stride), raster_source, raster_full_rect, |
| 413 raster_full_rect, playback_rect, scale, include_images); | 412 playback_rect, scale, include_images); |
| 414 staging_buffer->gpu_memory_buffer->Unmap(); | 413 staging_buffer->gpu_memory_buffer->Unmap(); |
| 415 staging_buffer->content_id = new_content_id; | 414 staging_buffer->content_id = new_content_id; |
| 416 } | 415 } |
| 417 } | 416 } |
| 418 | 417 |
| 419 ContextProvider* context_provider = | 418 ContextProvider* context_provider = |
| 420 resource_provider_->output_surface()->worker_context_provider(); | 419 resource_provider_->output_surface()->worker_context_provider(); |
| 421 DCHECK(context_provider); | 420 DCHECK(context_provider); |
| 422 | 421 |
| 423 { | 422 { |
| 424 ContextProvider::ScopedContextLock scoped_context(context_provider); | 423 ContextProvider::ScopedContextLock scoped_context(context_provider); |
| 425 | 424 |
| 426 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); | 425 gpu::gles2::GLES2Interface* gl = scoped_context.ContextGL(); |
| 427 DCHECK(gl); | 426 DCHECK(gl); |
| 428 | 427 |
| 429 unsigned image_target = resource_provider_->GetImageTextureTarget( | 428 unsigned image_target = |
| 430 resource_provider_->memory_efficient_texture_format()); | 429 resource_provider_->GetImageTextureTarget(resource->format()); |
| 431 | 430 |
| 432 // Create and bind staging texture. | 431 // Create and bind staging texture. |
| 433 if (!staging_buffer->texture_id) { | 432 if (!staging_buffer->texture_id) { |
| 434 gl->GenTextures(1, &staging_buffer->texture_id); | 433 gl->GenTextures(1, &staging_buffer->texture_id); |
| 435 gl->BindTexture(image_target, staging_buffer->texture_id); | 434 gl->BindTexture(image_target, staging_buffer->texture_id); |
| 436 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | 435 gl->TexParameteri(image_target, GL_TEXTURE_MIN_FILTER, GL_NEAREST); |
| 437 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); | 436 gl->TexParameteri(image_target, GL_TEXTURE_MAG_FILTER, GL_NEAREST); |
| 438 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | 437 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
| 439 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | 438 gl->TexParameteri(image_target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
| 440 } else { | 439 } else { |
| 441 gl->BindTexture(image_target, staging_buffer->texture_id); | 440 gl->BindTexture(image_target, staging_buffer->texture_id); |
| 442 } | 441 } |
| 443 | 442 |
| 444 // Create and bind image. | 443 // Create and bind image. |
| 445 if (!staging_buffer->image_id) { | 444 if (!staging_buffer->image_id) { |
| 446 if (staging_buffer->gpu_memory_buffer) { | 445 if (staging_buffer->gpu_memory_buffer) { |
| 447 staging_buffer->image_id = gl->CreateImageCHROMIUM( | 446 staging_buffer->image_id = gl->CreateImageCHROMIUM( |
| 448 staging_buffer->gpu_memory_buffer->AsClientBuffer(), | 447 staging_buffer->gpu_memory_buffer->AsClientBuffer(), |
| 449 staging_buffer->size.width(), staging_buffer->size.height(), | 448 staging_buffer->size.width(), staging_buffer->size.height(), |
| 450 GLInternalFormat( | 449 GLInternalFormat(resource->format())); |
| 451 resource_provider_->memory_efficient_texture_format())); | |
| 452 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | 450 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
| 453 } | 451 } |
| 454 } else { | 452 } else { |
| 455 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | 453 gl->ReleaseTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
| 456 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); | 454 gl->BindTexImage2DCHROMIUM(image_target, staging_buffer->image_id); |
| 457 } | 455 } |
| 458 | 456 |
| 459 // Unbind staging texture. | 457 // Unbind staging texture. |
| 460 gl->BindTexture(image_target, 0); | 458 gl->BindTexture(image_target, 0); |
| 461 | 459 |
| 462 if (resource_provider_->use_sync_query()) { | 460 if (resource_provider_->use_sync_query()) { |
| 463 if (!staging_buffer->query_id) | 461 if (!staging_buffer->query_id) |
| 464 gl->GenQueriesEXT(1, &staging_buffer->query_id); | 462 gl->GenQueriesEXT(1, &staging_buffer->query_id); |
| 465 | 463 |
| 466 #if defined(OS_CHROMEOS) | 464 #if defined(OS_CHROMEOS) |
| 467 // TODO(reveman): This avoids a performance problem on some ChromeOS | 465 // TODO(reveman): This avoids a performance problem on some ChromeOS |
| 468 // devices. This needs to be removed to support native GpuMemoryBuffer | 466 // devices. This needs to be removed to support native GpuMemoryBuffer |
| 469 // implementations. crbug.com/436314 | 467 // implementations. crbug.com/436314 |
| 470 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); | 468 gl->BeginQueryEXT(GL_COMMANDS_ISSUED_CHROMIUM, staging_buffer->query_id); |
| 471 #else | 469 #else |
| 472 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, | 470 gl->BeginQueryEXT(GL_COMMANDS_COMPLETED_CHROMIUM, |
| 473 staging_buffer->query_id); | 471 staging_buffer->query_id); |
| 474 #endif | 472 #endif |
| 475 } | 473 } |
| 476 | 474 |
| 477 int bytes_per_row = | 475 int bytes_per_row = |
| 478 (BitsPerPixel(resource_provider_->memory_efficient_texture_format()) * | 476 (BitsPerPixel(resource->format()) * resource->size().width()) / 8; |
| 479 resource->size().width()) / | |
| 480 8; | |
| 481 int chunk_size_in_rows = | 477 int chunk_size_in_rows = |
| 482 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); | 478 std::max(1, max_bytes_per_copy_operation_ / bytes_per_row); |
| 483 // Align chunk size to 4. Required to support compressed texture formats. | 479 // Align chunk size to 4. Required to support compressed texture formats. |
| 484 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); | 480 chunk_size_in_rows = MathUtil::UncheckedRoundUp(chunk_size_in_rows, 4); |
| 485 int y = 0; | 481 int y = 0; |
| 486 int height = resource->size().height(); | 482 int height = resource->size().height(); |
| 487 while (y < height) { | 483 while (y < height) { |
| 488 // Copy at most |chunk_size_in_rows|. | 484 // Copy at most |chunk_size_in_rows|. |
| 489 int rows_to_copy = std::min(chunk_size_in_rows, height - y); | 485 int rows_to_copy = std::min(chunk_size_in_rows, height - y); |
| 490 DCHECK_GT(rows_to_copy, 0); | 486 DCHECK_GT(rows_to_copy, 0); |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 522 | 518 |
| 523 ScheduleReduceMemoryUsage(); | 519 ScheduleReduceMemoryUsage(); |
| 524 } | 520 } |
| 525 | 521 |
| 526 bool OneCopyTileTaskWorkerPool::OnMemoryDump( | 522 bool OneCopyTileTaskWorkerPool::OnMemoryDump( |
| 527 const base::trace_event::MemoryDumpArgs& args, | 523 const base::trace_event::MemoryDumpArgs& args, |
| 528 base::trace_event::ProcessMemoryDump* pmd) { | 524 base::trace_event::ProcessMemoryDump* pmd) { |
| 529 base::AutoLock lock(lock_); | 525 base::AutoLock lock(lock_); |
| 530 | 526 |
| 531 for (const auto& buffer : buffers_) { | 527 for (const auto& buffer : buffers_) { |
| 532 buffer->OnMemoryDump(pmd, | 528 buffer->OnMemoryDump( |
| 533 resource_provider_->memory_efficient_texture_format(), | 529 pmd, |
| 534 std::find(free_buffers_.begin(), free_buffers_.end(), | 530 BufferFormatToResourceFormat(buffer->gpu_memory_buffer->GetFormat()), |
| 535 buffer) != free_buffers_.end()); | 531 std::find(free_buffers_.begin(), free_buffers_.end(), buffer) != |
| 532 free_buffers_.end()); |
| 536 } | 533 } |
| 537 | 534 |
| 538 return true; | 535 return true; |
| 539 } | 536 } |
| 540 | 537 |
| 541 void OneCopyTileTaskWorkerPool::AddStagingBuffer( | 538 void OneCopyTileTaskWorkerPool::AddStagingBuffer( |
| 542 const StagingBuffer* staging_buffer) { | 539 const StagingBuffer* staging_buffer, |
| 540 ResourceFormat format) { |
| 543 lock_.AssertAcquired(); | 541 lock_.AssertAcquired(); |
| 544 | 542 |
| 545 DCHECK(buffers_.find(staging_buffer) == buffers_.end()); | 543 DCHECK(buffers_.find(staging_buffer) == buffers_.end()); |
| 546 buffers_.insert(staging_buffer); | 544 buffers_.insert(staging_buffer); |
| 547 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( | 545 int buffer_usage_in_bytes = |
| 548 staging_buffer->size, | 546 ResourceUtil::UncheckedSizeInBytes<int>(staging_buffer->size, format); |
| 549 resource_provider_->memory_efficient_texture_format()); | |
| 550 staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes; | 547 staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes; |
| 551 } | 548 } |
| 552 | 549 |
| 553 void OneCopyTileTaskWorkerPool::RemoveStagingBuffer( | 550 void OneCopyTileTaskWorkerPool::RemoveStagingBuffer( |
| 554 const StagingBuffer* staging_buffer) { | 551 const StagingBuffer* staging_buffer) { |
| 555 lock_.AssertAcquired(); | 552 lock_.AssertAcquired(); |
| 556 | 553 |
| 557 DCHECK(buffers_.find(staging_buffer) != buffers_.end()); | 554 DCHECK(buffers_.find(staging_buffer) != buffers_.end()); |
| 558 buffers_.erase(staging_buffer); | 555 buffers_.erase(staging_buffer); |
| 559 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( | 556 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( |
| 560 staging_buffer->size, | 557 staging_buffer->size, |
| 561 resource_provider_->memory_efficient_texture_format()); | 558 BufferFormatToResourceFormat( |
| 559 staging_buffer->gpu_memory_buffer->GetFormat())); |
| 562 DCHECK_GE(staging_buffer_usage_in_bytes_, buffer_usage_in_bytes); | 560 DCHECK_GE(staging_buffer_usage_in_bytes_, buffer_usage_in_bytes); |
| 563 staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes; | 561 staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes; |
| 564 } | 562 } |
| 565 | 563 |
| 566 void OneCopyTileTaskWorkerPool::MarkStagingBufferAsFree( | 564 void OneCopyTileTaskWorkerPool::MarkStagingBufferAsFree( |
| 567 const StagingBuffer* staging_buffer) { | 565 const StagingBuffer* staging_buffer) { |
| 568 lock_.AssertAcquired(); | 566 lock_.AssertAcquired(); |
| 569 | 567 |
| 570 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( | 568 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( |
| 571 staging_buffer->size, | 569 staging_buffer->size, |
| 572 resource_provider_->memory_efficient_texture_format()); | 570 BufferFormatToResourceFormat( |
| 571 staging_buffer->gpu_memory_buffer->GetFormat())); |
| 573 free_staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes; | 572 free_staging_buffer_usage_in_bytes_ += buffer_usage_in_bytes; |
| 574 } | 573 } |
| 575 | 574 |
| 576 void OneCopyTileTaskWorkerPool::MarkStagingBufferAsBusy( | 575 void OneCopyTileTaskWorkerPool::MarkStagingBufferAsBusy( |
| 577 const StagingBuffer* staging_buffer) { | 576 const StagingBuffer* staging_buffer) { |
| 578 lock_.AssertAcquired(); | 577 lock_.AssertAcquired(); |
| 579 | 578 |
| 580 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( | 579 int buffer_usage_in_bytes = ResourceUtil::UncheckedSizeInBytes<int>( |
| 581 staging_buffer->size, | 580 staging_buffer->size, |
| 582 resource_provider_->memory_efficient_texture_format()); | 581 BufferFormatToResourceFormat( |
| 582 staging_buffer->gpu_memory_buffer->GetFormat())); |
| 583 DCHECK_GE(free_staging_buffer_usage_in_bytes_, buffer_usage_in_bytes); | 583 DCHECK_GE(free_staging_buffer_usage_in_bytes_, buffer_usage_in_bytes); |
| 584 free_staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes; | 584 free_staging_buffer_usage_in_bytes_ -= buffer_usage_in_bytes; |
| 585 } | 585 } |
| 586 | 586 |
| 587 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer> | 587 scoped_ptr<OneCopyTileTaskWorkerPool::StagingBuffer> |
| 588 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, | 588 OneCopyTileTaskWorkerPool::AcquireStagingBuffer(const Resource* resource, |
| 589 uint64_t previous_content_id) { | 589 uint64_t previous_content_id) { |
| 590 lock_.AssertAcquired(); | 590 lock_.AssertAcquired(); |
| 591 | 591 |
| 592 scoped_ptr<StagingBuffer> staging_buffer; | 592 scoped_ptr<StagingBuffer> staging_buffer; |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 640 std::find_if(free_buffers_.begin(), free_buffers_.end(), | 640 std::find_if(free_buffers_.begin(), free_buffers_.end(), |
| 641 [previous_content_id](const StagingBuffer* buffer) { | 641 [previous_content_id](const StagingBuffer* buffer) { |
| 642 return buffer->content_id == previous_content_id; | 642 return buffer->content_id == previous_content_id; |
| 643 }); | 643 }); |
| 644 if (it != free_buffers_.end()) { | 644 if (it != free_buffers_.end()) { |
| 645 staging_buffer = free_buffers_.take(it); | 645 staging_buffer = free_buffers_.take(it); |
| 646 MarkStagingBufferAsBusy(staging_buffer.get()); | 646 MarkStagingBufferAsBusy(staging_buffer.get()); |
| 647 } | 647 } |
| 648 } | 648 } |
| 649 | 649 |
| 650 // Find staging buffer of correct size. | 650 // Find staging buffer of correct size and format. |
| 651 if (!staging_buffer) { | 651 if (!staging_buffer) { |
| 652 StagingBufferDeque::iterator it = | 652 StagingBufferDeque::iterator it = |
| 653 std::find_if(free_buffers_.begin(), free_buffers_.end(), | 653 std::find_if(free_buffers_.begin(), free_buffers_.end(), |
| 654 [resource](const StagingBuffer* buffer) { | 654 [resource](const StagingBuffer* buffer) { |
| 655 return buffer->size == resource->size(); | 655 return buffer->size == resource->size() && |
| 656 BufferFormatToResourceFormat( |
| 657 buffer->gpu_memory_buffer->GetFormat()) == |
| 658 resource->format(); |
| 656 }); | 659 }); |
| 657 if (it != free_buffers_.end()) { | 660 if (it != free_buffers_.end()) { |
| 658 staging_buffer = free_buffers_.take(it); | 661 staging_buffer = free_buffers_.take(it); |
| 659 MarkStagingBufferAsBusy(staging_buffer.get()); | 662 MarkStagingBufferAsBusy(staging_buffer.get()); |
| 660 } | 663 } |
| 661 } | 664 } |
| 662 | 665 |
| 663 // Create new staging buffer if necessary. | 666 // Create new staging buffer if necessary. |
| 664 if (!staging_buffer) { | 667 if (!staging_buffer) { |
| 665 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size())); | 668 staging_buffer = make_scoped_ptr(new StagingBuffer(resource->size())); |
| 666 AddStagingBuffer(staging_buffer.get()); | 669 AddStagingBuffer(staging_buffer.get(), resource->format()); |
| 667 } | 670 } |
| 668 | 671 |
| 669 // Release enough free buffers to stay within the limit. | 672 // Release enough free buffers to stay within the limit. |
| 670 while (staging_buffer_usage_in_bytes_ > max_staging_buffer_usage_in_bytes_) { | 673 while (staging_buffer_usage_in_bytes_ > max_staging_buffer_usage_in_bytes_) { |
| 671 if (free_buffers_.empty()) | 674 if (free_buffers_.empty()) |
| 672 break; | 675 break; |
| 673 | 676 |
| 674 free_buffers_.front()->DestroyGLResources(gl); | 677 free_buffers_.front()->DestroyGLResources(gl); |
| 675 MarkStagingBufferAsBusy(free_buffers_.front()); | 678 MarkStagingBufferAsBusy(free_buffers_.front()); |
| 676 RemoveStagingBuffer(free_buffers_.front()); | 679 RemoveStagingBuffer(free_buffers_.front()); |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 807 | 810 |
| 808 staging_state->SetInteger("staging_buffer_count", | 811 staging_state->SetInteger("staging_buffer_count", |
| 809 static_cast<int>(buffers_.size())); | 812 static_cast<int>(buffers_.size())); |
| 810 staging_state->SetInteger("busy_count", | 813 staging_state->SetInteger("busy_count", |
| 811 static_cast<int>(busy_buffers_.size())); | 814 static_cast<int>(busy_buffers_.size())); |
| 812 staging_state->SetInteger("free_count", | 815 staging_state->SetInteger("free_count", |
| 813 static_cast<int>(free_buffers_.size())); | 816 static_cast<int>(free_buffers_.size())); |
| 814 } | 817 } |
| 815 | 818 |
| 816 } // namespace cc | 819 } // namespace cc |
| OLD | NEW |