| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" | 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" |
| 6 | 6 |
| 7 #include <queue> | 7 #include <queue> |
| 8 #include <set> | 8 #include <set> |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 58 base::StaticAtomicSequenceNumber g_next_command_buffer_id; | 58 base::StaticAtomicSequenceNumber g_next_command_buffer_id; |
| 59 | 59 |
| 60 template <typename T> | 60 template <typename T> |
| 61 static void RunTaskWithResult(base::Callback<T(void)> task, | 61 static void RunTaskWithResult(base::Callback<T(void)> task, |
| 62 T* result, | 62 T* result, |
| 63 base::WaitableEvent* completion) { | 63 base::WaitableEvent* completion) { |
| 64 *result = task.Run(); | 64 *result = task.Run(); |
| 65 completion->Signal(); | 65 completion->Signal(); |
| 66 } | 66 } |
| 67 | 67 |
| 68 struct ScopedOrderNumberProcessor { |
| 69 ScopedOrderNumberProcessor(SyncPointOrderData* order_data, uint32_t order_num) |
| 70 : order_data_(order_data), order_num_(order_num) { |
| 71 order_data_->BeginProcessingOrderNumber(order_num_); |
| 72 } |
| 73 |
| 74 ~ScopedOrderNumberProcessor() { |
| 75 order_data_->FinishProcessingOrderNumber(order_num_); |
| 76 } |
| 77 |
| 78 private: |
| 79 SyncPointOrderData* order_data_; |
| 80 uint32_t order_num_; |
| 81 }; |
| 82 |
| 68 struct GpuInProcessThreadHolder { | 83 struct GpuInProcessThreadHolder { |
| 69 GpuInProcessThreadHolder() | 84 GpuInProcessThreadHolder() |
| 70 : sync_point_manager(new SyncPointManager(false)), | 85 : sync_point_manager(new SyncPointManager(false)), |
| 71 gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {} | 86 gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {} |
| 72 scoped_ptr<SyncPointManager> sync_point_manager; | 87 scoped_ptr<SyncPointManager> sync_point_manager; |
| 73 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread; | 88 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread; |
| 74 }; | 89 }; |
| 75 | 90 |
| 76 base::LazyInstance<GpuInProcessThreadHolder> g_default_service = | 91 base::LazyInstance<GpuInProcessThreadHolder> g_default_service = |
| 77 LAZY_INSTANCE_INITIALIZER; | 92 LAZY_INSTANCE_INITIALIZER; |
| (...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 490 GetStateFast(); | 505 GetStateFast(); |
| 491 return last_state_.token; | 506 return last_state_.token; |
| 492 } | 507 } |
| 493 | 508 |
| 494 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset, | 509 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset, |
| 495 uint32_t order_num) { | 510 uint32_t order_num) { |
| 496 CheckSequencedThread(); | 511 CheckSequencedThread(); |
| 497 ScopedEvent handle_flush(&flush_event_); | 512 ScopedEvent handle_flush(&flush_event_); |
| 498 base::AutoLock lock(command_buffer_lock_); | 513 base::AutoLock lock(command_buffer_lock_); |
| 499 | 514 |
| 500 sync_point_order_data_->BeginProcessingOrderNumber(order_num); | |
| 501 command_buffer_->Flush(put_offset); | |
| 502 { | 515 { |
| 503 // Update state before signaling the flush event. | 516 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(), |
| 504 base::AutoLock lock(state_after_last_flush_lock_); | 517 order_num); |
| 505 state_after_last_flush_ = command_buffer_->GetLastState(); | 518 command_buffer_->Flush(put_offset); |
| 519 { |
| 520 // Update state before signaling the flush event. |
| 521 base::AutoLock lock(state_after_last_flush_lock_); |
| 522 state_after_last_flush_ = command_buffer_->GetLastState(); |
| 523 } |
| 524 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || |
| 525 (error::IsError(state_after_last_flush_.error) && context_lost_)); |
| 526 |
| 527 // Currently the in process command buffer does not support being |
| 528 // descheduled, if it does we would need to back off on calling the finish |
| 529 // processing number function until the message is rescheduled and finished |
| 530 // processing. This DCHECK is to enforce this. |
| 531 DCHECK(context_lost_ || put_offset == state_after_last_flush_.get_offset); |
| 506 } | 532 } |
| 507 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || | |
| 508 (error::IsError(state_after_last_flush_.error) && context_lost_)); | |
| 509 | |
| 510 // Currently the in process command buffer does not support being descheduled, | |
| 511 // if it does we would need to back off on calling the finish processing | |
| 512 // order number function until the message is rescheduled and finished | |
| 513 // processing. This DCHECK is to enforce this. | |
| 514 DCHECK(context_lost_ || put_offset == state_after_last_flush_.get_offset); | |
| 515 sync_point_order_data_->FinishProcessingOrderNumber(order_num); | |
| 516 | 533 |
| 517 // If we've processed all pending commands but still have pending queries, | 534 // If we've processed all pending commands but still have pending queries, |
| 518 // pump idle work until the query is passed. | 535 // pump idle work until the query is passed. |
| 519 if (put_offset == state_after_last_flush_.get_offset && | 536 if (put_offset == state_after_last_flush_.get_offset && |
| 520 (gpu_scheduler_->HasMoreIdleWork() || | 537 (gpu_scheduler_->HasMoreIdleWork() || |
| 521 gpu_scheduler_->HasPendingQueries())) { | 538 gpu_scheduler_->HasPendingQueries())) { |
| 522 ScheduleDelayedWorkOnGpuThread(); | 539 ScheduleDelayedWorkOnGpuThread(); |
| 523 } | 540 } |
| 524 } | 541 } |
| 525 | 542 |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 661 internalformat, gpu_memory_buffer->GetFormat())); | 678 internalformat, gpu_memory_buffer->GetFormat())); |
| 662 | 679 |
| 663 // This handle is owned by the GPU thread and must be passed to it or it | 680 // This handle is owned by the GPU thread and must be passed to it or it |
| 664 // will leak. In otherwords, do not early out on error between here and the | 681 // will leak. In otherwords, do not early out on error between here and the |
| 665 // queuing of the CreateImage task below. | 682 // queuing of the CreateImage task below. |
| 666 bool requires_sync_point = false; | 683 bool requires_sync_point = false; |
| 667 gfx::GpuMemoryBufferHandle handle = | 684 gfx::GpuMemoryBufferHandle handle = |
| 668 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer->GetHandle(), | 685 ShareGpuMemoryBufferToGpuThread(gpu_memory_buffer->GetHandle(), |
| 669 &requires_sync_point); | 686 &requires_sync_point); |
| 670 | 687 |
| 688 SyncPointManager* sync_manager = service_->sync_point_manager(); |
| 689 const uint32_t order_num = |
| 690 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); |
| 691 |
| 692 uint64_t fence_sync = 0; |
| 693 if (requires_sync_point) { |
| 694 fence_sync = GenerateFenceSyncRelease(); |
| 695 |
| 696 // Previous fence syncs should be flushed already. |
| 697 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_); |
| 698 } |
| 699 |
| 671 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, | 700 QueueTask(base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, |
| 672 base::Unretained(this), | 701 base::Unretained(this), new_id, handle, |
| 673 new_id, | 702 gfx::Size(width, height), gpu_memory_buffer->GetFormat(), |
| 674 handle, | 703 internalformat, order_num, fence_sync)); |
| 675 gfx::Size(width, height), | |
| 676 gpu_memory_buffer->GetFormat(), | |
| 677 internalformat)); | |
| 678 | 704 |
| 679 if (requires_sync_point) { | 705 if (fence_sync) { |
| 680 gpu_memory_buffer_manager_->SetDestructionSyncPoint(gpu_memory_buffer, | 706 flushed_fence_sync_release_ = fence_sync; |
| 681 InsertSyncPoint()); | 707 SyncToken sync_token(GetNamespaceID(), GetCommandBufferID(), fence_sync); |
| 708 sync_token.SetVerifyFlush(); |
| 709 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer, |
| 710 sync_token); |
| 682 } | 711 } |
| 683 | 712 |
| 684 return new_id; | 713 return new_id; |
| 685 } | 714 } |
| 686 | 715 |
| 687 void InProcessCommandBuffer::CreateImageOnGpuThread( | 716 void InProcessCommandBuffer::CreateImageOnGpuThread( |
| 688 int32 id, | 717 int32 id, |
| 689 const gfx::GpuMemoryBufferHandle& handle, | 718 const gfx::GpuMemoryBufferHandle& handle, |
| 690 const gfx::Size& size, | 719 const gfx::Size& size, |
| 691 gfx::BufferFormat format, | 720 gfx::BufferFormat format, |
| 692 uint32 internalformat) { | 721 uint32 internalformat, |
| 722 uint32_t order_num, |
| 723 uint64_t fence_sync) { |
| 724 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(), |
| 725 order_num); |
| 693 if (!decoder_) | 726 if (!decoder_) |
| 694 return; | 727 return; |
| 695 | 728 |
| 696 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); | 729 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
| 697 DCHECK(image_manager); | 730 DCHECK(image_manager); |
| 698 if (image_manager->LookupImage(id)) { | 731 if (image_manager->LookupImage(id)) { |
| 699 LOG(ERROR) << "Image already exists with same ID."; | 732 LOG(ERROR) << "Image already exists with same ID."; |
| 700 return; | 733 return; |
| 701 } | 734 } |
| 702 | 735 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 726 handle, size, format, internalformat, kClientId); | 759 handle, size, format, internalformat, kClientId); |
| 727 if (!image.get()) { | 760 if (!image.get()) { |
| 728 LOG(ERROR) << "Failed to create image for buffer."; | 761 LOG(ERROR) << "Failed to create image for buffer."; |
| 729 return; | 762 return; |
| 730 } | 763 } |
| 731 | 764 |
| 732 image_manager->AddImage(image.get(), id); | 765 image_manager->AddImage(image.get(), id); |
| 733 break; | 766 break; |
| 734 } | 767 } |
| 735 } | 768 } |
| 769 |
| 770 if (fence_sync) { |
| 771 sync_point_client_->ReleaseFenceSync(fence_sync); |
| 772 } |
| 736 } | 773 } |
| 737 | 774 |
| 738 void InProcessCommandBuffer::DestroyImage(int32 id) { | 775 void InProcessCommandBuffer::DestroyImage(int32 id) { |
| 739 CheckSequencedThread(); | 776 CheckSequencedThread(); |
| 740 | 777 |
| 741 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, | 778 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, |
| 742 base::Unretained(this), | 779 base::Unretained(this), |
| 743 id)); | 780 id)); |
| 744 } | 781 } |
| 745 | 782 |
| (...skipping 337 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1083 framebuffer_completeness_cache_ = | 1120 framebuffer_completeness_cache_ = |
| 1084 new gpu::gles2::FramebufferCompletenessCache; | 1121 new gpu::gles2::FramebufferCompletenessCache; |
| 1085 return framebuffer_completeness_cache_; | 1122 return framebuffer_completeness_cache_; |
| 1086 } | 1123 } |
| 1087 | 1124 |
| 1088 SyncPointManager* GpuInProcessThread::sync_point_manager() { | 1125 SyncPointManager* GpuInProcessThread::sync_point_manager() { |
| 1089 return sync_point_manager_; | 1126 return sync_point_manager_; |
| 1090 } | 1127 } |
| 1091 | 1128 |
| 1092 } // namespace gpu | 1129 } // namespace gpu |
| OLD | NEW |