OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" | 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" |
6 | 6 |
7 #include <queue> | 7 #include <queue> |
8 #include <set> | 8 #include <set> |
9 #include <utility> | 9 #include <utility> |
10 | 10 |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
126 class SyncPointManager { | 126 class SyncPointManager { |
127 public: | 127 public: |
128 SyncPointManager(); | 128 SyncPointManager(); |
129 ~SyncPointManager(); | 129 ~SyncPointManager(); |
130 | 130 |
131 uint32 GenerateSyncPoint(); | 131 uint32 GenerateSyncPoint(); |
132 void RetireSyncPoint(uint32 sync_point); | 132 void RetireSyncPoint(uint32 sync_point); |
133 | 133 |
134 bool IsSyncPointPassed(uint32 sync_point); | 134 bool IsSyncPointPassed(uint32 sync_point); |
135 void WaitSyncPoint(uint32 sync_point); | 135 void WaitSyncPoint(uint32 sync_point); |
| 136 bool WasSyncPointGenerated(uint32 sync_point); |
136 | 137 |
137 private: | 138 private: |
138 // This lock protects access to pending_sync_points_ and next_sync_point_ and | 139 // This lock protects access to pending_sync_points_ and next_sync_point_ and |
139 // is used with the ConditionVariable to signal when a sync point is retired. | 140 // is used with the ConditionVariable to signal when a sync point is retired. |
140 base::Lock lock_; | 141 base::Lock lock_; |
141 std::set<uint32> pending_sync_points_; | 142 std::set<uint32> pending_sync_points_; |
142 uint32 next_sync_point_; | 143 uint32 next_sync_point_; |
143 base::ConditionVariable cond_var_; | 144 base::ConditionVariable cond_var_; |
144 }; | 145 }; |
145 | 146 |
(...skipping 23 matching lines...) Expand all Loading... |
169 return pending_sync_points_.count(sync_point) == 0; | 170 return pending_sync_points_.count(sync_point) == 0; |
170 } | 171 } |
171 | 172 |
172 void SyncPointManager::WaitSyncPoint(uint32 sync_point) { | 173 void SyncPointManager::WaitSyncPoint(uint32 sync_point) { |
173 base::AutoLock lock(lock_); | 174 base::AutoLock lock(lock_); |
174 while (pending_sync_points_.count(sync_point)) { | 175 while (pending_sync_points_.count(sync_point)) { |
175 cond_var_.Wait(); | 176 cond_var_.Wait(); |
176 } | 177 } |
177 } | 178 } |
178 | 179 |
| 180 bool SyncPointManager::WasSyncPointGenerated(uint32 sync_point) { |
| 181 base::AutoLock lock(lock_); |
| 182 return next_sync_point_ - 1 - sync_point < 0x80000000; |
| 183 } |
| 184 |
179 base::LazyInstance<SyncPointManager> g_sync_point_manager = | 185 base::LazyInstance<SyncPointManager> g_sync_point_manager = |
180 LAZY_INSTANCE_INITIALIZER; | 186 LAZY_INSTANCE_INITIALIZER; |
181 | 187 |
182 } // anonyous namespace | 188 } // anonyous namespace |
183 | 189 |
184 InProcessCommandBuffer::Service::Service() {} | 190 InProcessCommandBuffer::Service::Service() {} |
185 | 191 |
186 InProcessCommandBuffer::Service::~Service() {} | 192 InProcessCommandBuffer::Service::~Service() {} |
187 | 193 |
188 scoped_refptr<gles2::MailboxManager> | 194 scoped_refptr<gles2::MailboxManager> |
(...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
418 LOG(ERROR) << "Could not initialize decoder."; | 424 LOG(ERROR) << "Could not initialize decoder."; |
419 DestroyOnGpuThread(); | 425 DestroyOnGpuThread(); |
420 return false; | 426 return false; |
421 } | 427 } |
422 *params.capabilities = decoder_->GetCapabilities(); | 428 *params.capabilities = decoder_->GetCapabilities(); |
423 | 429 |
424 if (!params.is_offscreen) { | 430 if (!params.is_offscreen) { |
425 decoder_->SetResizeCallback(base::Bind( | 431 decoder_->SetResizeCallback(base::Bind( |
426 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_)); | 432 &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_)); |
427 } | 433 } |
428 decoder_->SetWaitSyncPointCallback( | |
429 base::Bind(&InProcessCommandBuffer::WaitSyncPointOnGpuThread, | |
430 base::Unretained(this))); | |
431 | 434 |
432 image_factory_ = params.image_factory; | 435 image_factory_ = params.image_factory; |
433 params.capabilities->image = params.capabilities->image && image_factory_; | 436 params.capabilities->image = params.capabilities->image && image_factory_; |
434 | 437 |
435 return true; | 438 return true; |
436 } | 439 } |
437 | 440 |
438 void InProcessCommandBuffer::Destroy() { | 441 void InProcessCommandBuffer::Destroy() { |
439 CheckSequencedThread(); | 442 CheckSequencedThread(); |
440 | 443 |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
494 CheckSequencedThread(); | 497 CheckSequencedThread(); |
495 return last_state_; | 498 return last_state_; |
496 } | 499 } |
497 | 500 |
498 int32 InProcessCommandBuffer::GetLastToken() { | 501 int32 InProcessCommandBuffer::GetLastToken() { |
499 CheckSequencedThread(); | 502 CheckSequencedThread(); |
500 GetStateFast(); | 503 GetStateFast(); |
501 return last_state_.token; | 504 return last_state_.token; |
502 } | 505 } |
503 | 506 |
504 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { | 507 void InProcessCommandBuffer::FlushOnGpuThread( |
| 508 int32 put_offset, |
| 509 const std::vector<uint32>& sync_points) { |
505 CheckSequencedThread(); | 510 CheckSequencedThread(); |
| 511 for (uint32 sync_point : sync_points) { |
| 512 WaitSyncPointOnGpuThread(sync_point); |
| 513 } |
506 ScopedEvent handle_flush(&flush_event_); | 514 ScopedEvent handle_flush(&flush_event_); |
507 base::AutoLock lock(command_buffer_lock_); | 515 base::AutoLock lock(command_buffer_lock_); |
508 command_buffer_->Flush(put_offset); | 516 command_buffer_->Flush(put_offset, sync_points); |
509 { | 517 { |
510 // Update state before signaling the flush event. | 518 // Update state before signaling the flush event. |
511 base::AutoLock lock(state_after_last_flush_lock_); | 519 base::AutoLock lock(state_after_last_flush_lock_); |
512 state_after_last_flush_ = command_buffer_->GetLastState(); | 520 state_after_last_flush_ = command_buffer_->GetLastState(); |
513 } | 521 } |
514 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || | 522 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || |
515 (error::IsError(state_after_last_flush_.error) && context_lost_)); | 523 (error::IsError(state_after_last_flush_.error) && context_lost_)); |
516 | 524 |
517 // If we've processed all pending commands but still have pending queries, | 525 // If we've processed all pending commands but still have pending queries, |
518 // pump idle work until the query is passed. | 526 // pump idle work until the query is passed. |
(...skipping 16 matching lines...) Expand all Loading... |
535 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() { | 543 void InProcessCommandBuffer::ScheduleIdleWorkOnGpuThread() { |
536 CheckSequencedThread(); | 544 CheckSequencedThread(); |
537 if (idle_work_pending_) | 545 if (idle_work_pending_) |
538 return; | 546 return; |
539 idle_work_pending_ = true; | 547 idle_work_pending_ = true; |
540 service_->ScheduleIdleWork( | 548 service_->ScheduleIdleWork( |
541 base::Bind(&InProcessCommandBuffer::PerformIdleWork, | 549 base::Bind(&InProcessCommandBuffer::PerformIdleWork, |
542 gpu_thread_weak_ptr_)); | 550 gpu_thread_weak_ptr_)); |
543 } | 551 } |
544 | 552 |
545 void InProcessCommandBuffer::Flush(int32 put_offset) { | 553 void InProcessCommandBuffer::Flush(int32 put_offset, |
| 554 const std::vector<uint32>& sync_points) { |
546 CheckSequencedThread(); | 555 CheckSequencedThread(); |
547 if (last_state_.error != gpu::error::kNoError) | 556 if (last_state_.error != gpu::error::kNoError) |
548 return; | 557 return; |
549 | 558 |
550 if (last_put_offset_ == put_offset) | 559 if (last_put_offset_ == put_offset) |
551 return; | 560 return; |
552 | 561 |
553 last_put_offset_ = put_offset; | 562 last_put_offset_ = put_offset; |
554 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, | 563 std::vector<uint32> sanitized_sync_points; |
555 gpu_thread_weak_ptr_, | 564 for (const int& sync_point : sync_points) { |
556 put_offset); | 565 if (g_sync_point_manager.Get().WasSyncPointGenerated(sync_point)) |
| 566 sanitized_sync_points.push_back(sync_point); |
| 567 } |
| 568 |
| 569 base::Closure task = |
| 570 base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
| 571 gpu_thread_weak_ptr_, put_offset, sanitized_sync_points); |
557 QueueTask(task); | 572 QueueTask(task); |
558 } | 573 } |
559 | 574 |
560 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) { | 575 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) { |
561 CheckSequencedThread(); | 576 CheckSequencedThread(); |
562 while (!InRange(start, end, GetLastToken()) && | 577 while (!InRange(start, end, GetLastToken()) && |
563 last_state_.error == gpu::error::kNoError) | 578 last_state_.error == gpu::error::kNoError) |
564 flush_event_.Wait(); | 579 flush_event_.Wait(); |
565 } | 580 } |
566 | 581 |
(...skipping 301 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
868 | 883 |
869 #if defined(OS_ANDROID) | 884 #if defined(OS_ANDROID) |
870 scoped_refptr<gfx::SurfaceTexture> | 885 scoped_refptr<gfx::SurfaceTexture> |
871 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) { | 886 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) { |
872 DCHECK(stream_texture_manager_); | 887 DCHECK(stream_texture_manager_); |
873 return stream_texture_manager_->GetSurfaceTexture(stream_id); | 888 return stream_texture_manager_->GetSurfaceTexture(stream_id); |
874 } | 889 } |
875 #endif | 890 #endif |
876 | 891 |
877 } // namespace gpu | 892 } // namespace gpu |
OLD | NEW |