OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" | 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" |
6 | 6 |
7 #include <stddef.h> | 7 #include <stddef.h> |
8 #include <stdint.h> | 8 #include <stdint.h> |
9 | 9 |
10 #include <queue> | 10 #include <queue> |
11 #include <set> | 11 #include <set> |
12 #include <utility> | 12 #include <utility> |
13 | 13 |
14 #include "base/bind.h" | 14 #include "base/bind.h" |
15 #include "base/bind_helpers.h" | 15 #include "base/bind_helpers.h" |
16 #include "base/command_line.h" | 16 #include "base/command_line.h" |
17 #include "base/lazy_instance.h" | 17 #include "base/lazy_instance.h" |
18 #include "base/location.h" | 18 #include "base/location.h" |
19 #include "base/logging.h" | 19 #include "base/logging.h" |
20 #include "base/memory/weak_ptr.h" | 20 #include "base/memory/weak_ptr.h" |
21 #include "base/numerics/safe_conversions.h" | 21 #include "base/numerics/safe_conversions.h" |
22 #include "base/sequence_checker.h" | 22 #include "base/sequence_checker.h" |
23 #include "base/single_thread_task_runner.h" | 23 #include "base/single_thread_task_runner.h" |
24 #include "base/thread_task_runner_handle.h" | 24 #include "base/thread_task_runner_handle.h" |
| 25 #include "gpu/command_buffer/client/gpu_control_client.h" |
25 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | 26 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
26 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" | 27 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" |
27 #include "gpu/command_buffer/common/sync_token.h" | 28 #include "gpu/command_buffer/common/sync_token.h" |
28 #include "gpu/command_buffer/common/value_state.h" | 29 #include "gpu/command_buffer/common/value_state.h" |
29 #include "gpu/command_buffer/service/command_buffer_service.h" | 30 #include "gpu/command_buffer/service/command_buffer_service.h" |
30 #include "gpu/command_buffer/service/command_executor.h" | 31 #include "gpu/command_buffer/service/command_executor.h" |
31 #include "gpu/command_buffer/service/context_group.h" | 32 #include "gpu/command_buffer/service/context_group.h" |
32 #include "gpu/command_buffer/service/gl_context_virtual.h" | 33 #include "gpu/command_buffer/service/gl_context_virtual.h" |
33 #include "gpu/command_buffer/service/gpu_preferences.h" | 34 #include "gpu/command_buffer/service/gpu_preferences.h" |
34 #include "gpu/command_buffer/service/image_factory.h" | 35 #include "gpu/command_buffer/service/image_factory.h" |
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
203 gpu_preferences().gpu_program_cache_size, | 204 gpu_preferences().gpu_program_cache_size, |
204 gpu_preferences().disable_gpu_shader_disk_cache)); | 205 gpu_preferences().disable_gpu_shader_disk_cache)); |
205 } | 206 } |
206 return program_cache_.get(); | 207 return program_cache_.get(); |
207 } | 208 } |
208 | 209 |
209 InProcessCommandBuffer::InProcessCommandBuffer( | 210 InProcessCommandBuffer::InProcessCommandBuffer( |
210 const scoped_refptr<Service>& service) | 211 const scoped_refptr<Service>& service) |
211 : command_buffer_id_( | 212 : command_buffer_id_( |
212 CommandBufferId::FromUnsafeValue(g_next_command_buffer_id.GetNext())), | 213 CommandBufferId::FromUnsafeValue(g_next_command_buffer_id.GetNext())), |
| 214 gpu_control_client_(nullptr), |
| 215 context_lost_(false), |
213 delayed_work_pending_(false), | 216 delayed_work_pending_(false), |
214 image_factory_(nullptr), | 217 image_factory_(nullptr), |
215 last_put_offset_(-1), | 218 last_put_offset_(-1), |
216 gpu_memory_buffer_manager_(nullptr), | 219 gpu_memory_buffer_manager_(nullptr), |
217 next_fence_sync_release_(1), | 220 next_fence_sync_release_(1), |
218 flushed_fence_sync_release_(0), | 221 flushed_fence_sync_release_(0), |
219 flush_event_(false, false), | 222 flush_event_(false, false), |
220 service_(GetInitialService(service)), | 223 service_(GetInitialService(service)), |
221 fence_sync_wait_event_(false, false), | 224 fence_sync_wait_event_(false, false), |
| 225 client_thread_weak_ptr_factory_(this), |
222 gpu_thread_weak_ptr_factory_(this) { | 226 gpu_thread_weak_ptr_factory_(this) { |
223 DCHECK(service_.get()); | 227 DCHECK(service_.get()); |
224 next_image_id_.GetNext(); | 228 next_image_id_.GetNext(); |
225 } | 229 } |
226 | 230 |
227 InProcessCommandBuffer::~InProcessCommandBuffer() { | 231 InProcessCommandBuffer::~InProcessCommandBuffer() { |
228 Destroy(); | 232 Destroy(); |
229 } | 233 } |
230 | 234 |
231 bool InProcessCommandBuffer::MakeCurrent() { | 235 bool InProcessCommandBuffer::MakeCurrent() { |
232 CheckSequencedThread(); | 236 CheckSequencedThread(); |
233 command_buffer_lock_.AssertAcquired(); | 237 command_buffer_lock_.AssertAcquired(); |
234 | 238 |
235 if (error::IsError(command_buffer_->GetLastState().error)) { | 239 if (error::IsError(command_buffer_->GetLastState().error)) { |
236 DLOG(ERROR) << "MakeCurrent failed because context lost."; | 240 DLOG(ERROR) << "MakeCurrent failed because context lost."; |
237 return false; | 241 return false; |
238 } | 242 } |
239 if (!decoder_->MakeCurrent()) { | 243 if (!decoder_->MakeCurrent()) { |
240 DLOG(ERROR) << "Context lost because MakeCurrent failed."; | 244 DLOG(ERROR) << "Context lost because MakeCurrent failed."; |
241 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); | 245 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
242 command_buffer_->SetParseError(gpu::error::kLostContext); | 246 command_buffer_->SetParseError(gpu::error::kLostContext); |
243 return false; | 247 return false; |
244 } | 248 } |
245 return true; | 249 return true; |
246 } | 250 } |
247 | 251 |
248 void InProcessCommandBuffer::PumpCommands() { | 252 void InProcessCommandBuffer::PumpCommandsOnGpuThread() { |
249 CheckSequencedThread(); | 253 CheckSequencedThread(); |
250 command_buffer_lock_.AssertAcquired(); | 254 command_buffer_lock_.AssertAcquired(); |
251 | 255 |
252 if (!MakeCurrent()) | 256 if (!MakeCurrent()) |
253 return; | 257 return; |
254 | 258 |
255 executor_->PutChanged(); | 259 executor_->PutChanged(); |
256 } | 260 } |
257 | 261 |
258 bool InProcessCommandBuffer::GetBufferChanged(int32_t transfer_buffer_id) { | |
259 CheckSequencedThread(); | |
260 command_buffer_lock_.AssertAcquired(); | |
261 command_buffer_->SetGetBuffer(transfer_buffer_id); | |
262 return true; | |
263 } | |
264 | |
265 bool InProcessCommandBuffer::Initialize( | 262 bool InProcessCommandBuffer::Initialize( |
266 scoped_refptr<gfx::GLSurface> surface, | 263 scoped_refptr<gfx::GLSurface> surface, |
267 bool is_offscreen, | 264 bool is_offscreen, |
268 gfx::AcceleratedWidget window, | 265 gfx::AcceleratedWidget window, |
269 const gfx::Size& size, | 266 const gfx::Size& size, |
270 const std::vector<int32_t>& attribs, | 267 const std::vector<int32_t>& attribs, |
271 gfx::GpuPreference gpu_preference, | 268 gfx::GpuPreference gpu_preference, |
272 const base::Closure& context_lost_callback, | |
273 InProcessCommandBuffer* share_group, | 269 InProcessCommandBuffer* share_group, |
274 GpuMemoryBufferManager* gpu_memory_buffer_manager, | 270 GpuMemoryBufferManager* gpu_memory_buffer_manager, |
275 ImageFactory* image_factory) { | 271 ImageFactory* image_factory) { |
276 DCHECK(!share_group || service_.get() == share_group->service_.get()); | 272 DCHECK(!share_group || service_.get() == share_group->service_.get()); |
277 context_lost_callback_ = WrapCallback(context_lost_callback); | |
278 | 273 |
279 if (surface.get()) { | 274 if (surface.get()) { |
280 // GPU thread must be the same as client thread due to GLSurface not being | 275 // GPU thread must be the same as client thread due to GLSurface not being |
281 // thread safe. | 276 // thread safe. |
282 sequence_checker_.reset(new base::SequenceChecker); | 277 sequence_checker_.reset(new base::SequenceChecker); |
283 surface_ = surface; | 278 surface_ = surface; |
284 } | 279 } |
285 | 280 |
| 281 origin_task_runner_ = base::ThreadTaskRunnerHandle::Get(); |
| 282 client_thread_weak_ptr_ = client_thread_weak_ptr_factory_.GetWeakPtr(); |
| 283 |
286 gpu::Capabilities capabilities; | 284 gpu::Capabilities capabilities; |
287 InitializeOnGpuThreadParams params(is_offscreen, | 285 InitializeOnGpuThreadParams params(is_offscreen, |
288 window, | 286 window, |
289 size, | 287 size, |
290 attribs, | 288 attribs, |
291 gpu_preference, | 289 gpu_preference, |
292 &capabilities, | 290 &capabilities, |
293 share_group, | 291 share_group, |
294 image_factory); | 292 image_factory); |
295 | 293 |
(...skipping 25 matching lines...) Expand all Loading... |
321 | 319 |
322 DCHECK(params.size.width() >= 0 && params.size.height() >= 0); | 320 DCHECK(params.size.width() >= 0 && params.size.height() >= 0); |
323 | 321 |
324 TransferBufferManager* manager = new TransferBufferManager(nullptr); | 322 TransferBufferManager* manager = new TransferBufferManager(nullptr); |
325 transfer_buffer_manager_ = manager; | 323 transfer_buffer_manager_ = manager; |
326 manager->Initialize(); | 324 manager->Initialize(); |
327 | 325 |
328 scoped_ptr<CommandBufferService> command_buffer( | 326 scoped_ptr<CommandBufferService> command_buffer( |
329 new CommandBufferService(transfer_buffer_manager_.get())); | 327 new CommandBufferService(transfer_buffer_manager_.get())); |
330 command_buffer->SetPutOffsetChangeCallback(base::Bind( | 328 command_buffer->SetPutOffsetChangeCallback(base::Bind( |
331 &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_)); | 329 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_)); |
332 command_buffer->SetParseErrorCallback(base::Bind( | 330 command_buffer->SetParseErrorCallback(base::Bind( |
333 &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_)); | 331 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_)); |
334 | 332 |
335 if (!command_buffer->Initialize()) { | 333 if (!command_buffer->Initialize()) { |
336 LOG(ERROR) << "Could not initialize command buffer."; | 334 LOG(ERROR) << "Could not initialize command buffer."; |
337 DestroyOnGpuThread(); | 335 DestroyOnGpuThread(); |
338 return false; | 336 return false; |
339 } | 337 } |
340 | 338 |
341 gl_share_group_ = params.context_group | 339 gl_share_group_ = params.context_group |
342 ? params.context_group->gl_share_group_ | 340 ? params.context_group->gl_share_group_ |
343 : service_->share_group(); | 341 : service_->share_group(); |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
448 base::Bind(&InProcessCommandBuffer::WaitFenceSyncOnGpuThread, | 446 base::Bind(&InProcessCommandBuffer::WaitFenceSyncOnGpuThread, |
449 base::Unretained(this))); | 447 base::Unretained(this))); |
450 | 448 |
451 image_factory_ = params.image_factory; | 449 image_factory_ = params.image_factory; |
452 | 450 |
453 return true; | 451 return true; |
454 } | 452 } |
455 | 453 |
456 void InProcessCommandBuffer::Destroy() { | 454 void InProcessCommandBuffer::Destroy() { |
457 CheckSequencedThread(); | 455 CheckSequencedThread(); |
458 | 456 client_thread_weak_ptr_factory_.InvalidateWeakPtrs(); |
459 base::WaitableEvent completion(true, false); | 457 base::WaitableEvent completion(true, false); |
460 bool result = false; | 458 bool result = false; |
461 base::Callback<bool(void)> destroy_task = base::Bind( | 459 base::Callback<bool(void)> destroy_task = base::Bind( |
462 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); | 460 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); |
463 QueueTask( | 461 QueueTask( |
464 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); | 462 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); |
465 completion.Wait(); | 463 completion.Wait(); |
466 } | 464 } |
467 | 465 |
468 bool InProcessCommandBuffer::DestroyOnGpuThread() { | 466 bool InProcessCommandBuffer::DestroyOnGpuThread() { |
469 CheckSequencedThread(); | 467 CheckSequencedThread(); |
470 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); | 468 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); |
471 command_buffer_.reset(); | 469 command_buffer_.reset(); |
472 // Clean up GL resources if possible. | 470 // Clean up GL resources if possible. |
473 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); | 471 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); |
474 if (decoder_) { | 472 if (decoder_) { |
475 decoder_->Destroy(have_context); | 473 decoder_->Destroy(have_context); |
476 decoder_.reset(); | 474 decoder_.reset(); |
477 } | 475 } |
478 context_ = NULL; | 476 context_ = nullptr; |
479 surface_ = NULL; | 477 surface_ = nullptr; |
480 sync_point_client_ = NULL; | 478 sync_point_client_ = nullptr; |
481 if (sync_point_order_data_) { | 479 if (sync_point_order_data_) { |
482 sync_point_order_data_->Destroy(); | 480 sync_point_order_data_->Destroy(); |
483 sync_point_order_data_ = nullptr; | 481 sync_point_order_data_ = nullptr; |
484 } | 482 } |
485 gl_share_group_ = NULL; | 483 gl_share_group_ = nullptr; |
486 #if defined(OS_ANDROID) | 484 #if defined(OS_ANDROID) |
487 stream_texture_manager_.reset(); | 485 stream_texture_manager_.reset(); |
488 #endif | 486 #endif |
489 | 487 |
490 return true; | 488 return true; |
491 } | 489 } |
492 | 490 |
493 void InProcessCommandBuffer::CheckSequencedThread() { | 491 void InProcessCommandBuffer::CheckSequencedThread() { |
494 DCHECK(!sequence_checker_ || | 492 DCHECK(!sequence_checker_ || |
495 sequence_checker_->CalledOnValidSequencedThread()); | 493 sequence_checker_->CalledOnValidSequencedThread()); |
496 } | 494 } |
497 | 495 |
| 496 void InProcessCommandBuffer::OnContextLostOnGpuThread() { |
| 497 if (context_lost_) |
| 498 return; |
| 499 if (!origin_task_runner_ || origin_task_runner_->BelongsToCurrentThread()) |
| 500 return OnContextLost(); |
| 501 origin_task_runner_->PostTask( |
| 502 FROM_HERE, base::Bind(&InProcessCommandBuffer::OnContextLost, |
| 503 client_thread_weak_ptr_)); |
| 504 } |
| 505 |
498 void InProcessCommandBuffer::OnContextLost() { | 506 void InProcessCommandBuffer::OnContextLost() { |
499 CheckSequencedThread(); | 507 CheckSequencedThread(); |
500 if (!context_lost_callback_.is_null()) { | 508 DCHECK(gpu_control_client_); |
501 context_lost_callback_.Run(); | 509 if (!context_lost_) |
502 context_lost_callback_.Reset(); | 510 gpu_control_client_->OnGpuControlLostContext(); |
503 } | 511 context_lost_ = true; |
504 } | 512 } |
505 | 513 |
506 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { | 514 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { |
507 CheckSequencedThread(); | 515 CheckSequencedThread(); |
508 base::AutoLock lock(state_after_last_flush_lock_); | 516 base::AutoLock lock(state_after_last_flush_lock_); |
509 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) | 517 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) |
510 last_state_ = state_after_last_flush_; | 518 last_state_ = state_after_last_flush_; |
511 return last_state_; | 519 return last_state_; |
512 } | 520 } |
513 | 521 |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
547 } | 555 } |
548 | 556 |
549 // If we've processed all pending commands but still have pending queries, | 557 // If we've processed all pending commands but still have pending queries, |
550 // pump idle work until the query is passed. | 558 // pump idle work until the query is passed. |
551 if (put_offset == state_after_last_flush_.get_offset && | 559 if (put_offset == state_after_last_flush_.get_offset && |
552 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) { | 560 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) { |
553 ScheduleDelayedWorkOnGpuThread(); | 561 ScheduleDelayedWorkOnGpuThread(); |
554 } | 562 } |
555 } | 563 } |
556 | 564 |
557 void InProcessCommandBuffer::PerformDelayedWork() { | 565 void InProcessCommandBuffer::PerformDelayedWorkOnGpuThread() { |
558 CheckSequencedThread(); | 566 CheckSequencedThread(); |
559 delayed_work_pending_ = false; | 567 delayed_work_pending_ = false; |
560 base::AutoLock lock(command_buffer_lock_); | 568 base::AutoLock lock(command_buffer_lock_); |
561 if (MakeCurrent()) { | 569 if (MakeCurrent()) { |
562 executor_->PerformIdleWork(); | 570 executor_->PerformIdleWork(); |
563 executor_->ProcessPendingQueries(); | 571 executor_->ProcessPendingQueries(); |
564 if (executor_->HasMoreIdleWork() || executor_->HasPendingQueries()) { | 572 if (executor_->HasMoreIdleWork() || executor_->HasPendingQueries()) { |
565 ScheduleDelayedWorkOnGpuThread(); | 573 ScheduleDelayedWorkOnGpuThread(); |
566 } | 574 } |
567 } | 575 } |
568 } | 576 } |
569 | 577 |
570 void InProcessCommandBuffer::ScheduleDelayedWorkOnGpuThread() { | 578 void InProcessCommandBuffer::ScheduleDelayedWorkOnGpuThread() { |
571 CheckSequencedThread(); | 579 CheckSequencedThread(); |
572 if (delayed_work_pending_) | 580 if (delayed_work_pending_) |
573 return; | 581 return; |
574 delayed_work_pending_ = true; | 582 delayed_work_pending_ = true; |
575 service_->ScheduleDelayedWork(base::Bind( | 583 service_->ScheduleDelayedWork( |
576 &InProcessCommandBuffer::PerformDelayedWork, gpu_thread_weak_ptr_)); | 584 base::Bind(&InProcessCommandBuffer::PerformDelayedWorkOnGpuThread, |
| 585 gpu_thread_weak_ptr_)); |
577 } | 586 } |
578 | 587 |
579 void InProcessCommandBuffer::Flush(int32_t put_offset) { | 588 void InProcessCommandBuffer::Flush(int32_t put_offset) { |
580 CheckSequencedThread(); | 589 CheckSequencedThread(); |
581 if (last_state_.error != gpu::error::kNoError) | 590 if (last_state_.error != gpu::error::kNoError) |
582 return; | 591 return; |
583 | 592 |
584 if (last_put_offset_ == put_offset) | 593 if (last_put_offset_ == put_offset) |
585 return; | 594 return; |
586 | 595 |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
663 id); | 672 id); |
664 | 673 |
665 QueueTask(task); | 674 QueueTask(task); |
666 } | 675 } |
667 | 676 |
668 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) { | 677 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) { |
669 base::AutoLock lock(command_buffer_lock_); | 678 base::AutoLock lock(command_buffer_lock_); |
670 command_buffer_->DestroyTransferBuffer(id); | 679 command_buffer_->DestroyTransferBuffer(id); |
671 } | 680 } |
672 | 681 |
| 682 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) { |
| 683 gpu_control_client_ = client; |
| 684 } |
| 685 |
673 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() { | 686 gpu::Capabilities InProcessCommandBuffer::GetCapabilities() { |
674 return capabilities_; | 687 return capabilities_; |
675 } | 688 } |
676 | 689 |
677 int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer, | 690 int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer, |
678 size_t width, | 691 size_t width, |
679 size_t height, | 692 size_t height, |
680 unsigned internalformat) { | 693 unsigned internalformat) { |
681 CheckSequencedThread(); | 694 CheckSequencedThread(); |
682 | 695 |
(...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1095 framebuffer_completeness_cache_ = | 1108 framebuffer_completeness_cache_ = |
1096 new gpu::gles2::FramebufferCompletenessCache; | 1109 new gpu::gles2::FramebufferCompletenessCache; |
1097 return framebuffer_completeness_cache_; | 1110 return framebuffer_completeness_cache_; |
1098 } | 1111 } |
1099 | 1112 |
1100 SyncPointManager* GpuInProcessThread::sync_point_manager() { | 1113 SyncPointManager* GpuInProcessThread::sync_point_manager() { |
1101 return sync_point_manager_; | 1114 return sync_point_manager_; |
1102 } | 1115 } |
1103 | 1116 |
1104 } // namespace gpu | 1117 } // namespace gpu |
OLD | NEW |