OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" | 5 #include "gpu/command_buffer/service/in_process_command_buffer.h" |
6 | 6 |
7 #include <queue> | 7 #include <queue> |
8 #include <set> | 8 #include <set> |
9 #include <utility> | 9 #include <utility> |
10 | 10 |
(...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
337 else | 337 else |
338 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window); | 338 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window); |
339 } | 339 } |
340 | 340 |
341 if (!surface_.get()) { | 341 if (!surface_.get()) { |
342 LOG(ERROR) << "Could not create GLSurface."; | 342 LOG(ERROR) << "Could not create GLSurface."; |
343 DestroyOnGpuThread(); | 343 DestroyOnGpuThread(); |
344 return false; | 344 return false; |
345 } | 345 } |
346 | 346 |
347 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient( | |
348 GetNamespaceID(), GetCommandBufferID()); | |
349 | |
347 if (service_->UseVirtualizedGLContexts() || | 350 if (service_->UseVirtualizedGLContexts() || |
348 decoder_->GetContextGroup() | 351 decoder_->GetContextGroup() |
349 ->feature_info() | 352 ->feature_info() |
350 ->workarounds() | 353 ->workarounds() |
351 .use_virtualized_gl_contexts) { | 354 .use_virtualized_gl_contexts) { |
352 context_ = gl_share_group_->GetSharedContext(); | 355 context_ = gl_share_group_->GetSharedContext(); |
353 if (!context_.get()) { | 356 if (!context_.get()) { |
354 context_ = gfx::GLContext::CreateGLContext( | 357 context_ = gfx::GLContext::CreateGLContext( |
355 gl_share_group_.get(), surface_.get(), params.gpu_preference); | 358 gl_share_group_.get(), surface_.get(), params.gpu_preference); |
356 gl_share_group_->SetSharedContext(context_.get()); | 359 gl_share_group_->SetSharedContext(context_.get()); |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
432 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); | 435 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); |
433 command_buffer_.reset(); | 436 command_buffer_.reset(); |
434 // Clean up GL resources if possible. | 437 // Clean up GL resources if possible. |
435 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); | 438 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); |
436 if (decoder_) { | 439 if (decoder_) { |
437 decoder_->Destroy(have_context); | 440 decoder_->Destroy(have_context); |
438 decoder_.reset(); | 441 decoder_.reset(); |
439 } | 442 } |
440 context_ = NULL; | 443 context_ = NULL; |
441 surface_ = NULL; | 444 surface_ = NULL; |
445 sync_point_client_ = NULL; | |
442 gl_share_group_ = NULL; | 446 gl_share_group_ = NULL; |
443 #if defined(OS_ANDROID) | 447 #if defined(OS_ANDROID) |
444 stream_texture_manager_.reset(); | 448 stream_texture_manager_.reset(); |
445 #endif | 449 #endif |
446 | 450 |
447 return true; | 451 return true; |
448 } | 452 } |
449 | 453 |
450 void InProcessCommandBuffer::CheckSequencedThread() { | 454 void InProcessCommandBuffer::CheckSequencedThread() { |
451 DCHECK(!sequence_checker_ || | 455 DCHECK(!sequence_checker_ || |
(...skipping 22 matching lines...) Expand all Loading... | |
474 CheckSequencedThread(); | 478 CheckSequencedThread(); |
475 return last_state_; | 479 return last_state_; |
476 } | 480 } |
477 | 481 |
478 int32 InProcessCommandBuffer::GetLastToken() { | 482 int32 InProcessCommandBuffer::GetLastToken() { |
479 CheckSequencedThread(); | 483 CheckSequencedThread(); |
480 GetStateFast(); | 484 GetStateFast(); |
481 return last_state_.token; | 485 return last_state_.token; |
482 } | 486 } |
483 | 487 |
484 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { | 488 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset, |
489 uint32_t order_num) { | |
485 CheckSequencedThread(); | 490 CheckSequencedThread(); |
486 ScopedEvent handle_flush(&flush_event_); | 491 ScopedEvent handle_flush(&flush_event_); |
487 base::AutoLock lock(command_buffer_lock_); | 492 base::AutoLock lock(command_buffer_lock_); |
493 | |
494 sync_point_client_->BeginProcessingOrderNumber(order_num); | |
488 command_buffer_->Flush(put_offset); | 495 command_buffer_->Flush(put_offset); |
489 { | 496 { |
490 // Update state before signaling the flush event. | 497 // Update state before signaling the flush event. |
491 base::AutoLock lock(state_after_last_flush_lock_); | 498 base::AutoLock lock(state_after_last_flush_lock_); |
492 state_after_last_flush_ = command_buffer_->GetLastState(); | 499 state_after_last_flush_ = command_buffer_->GetLastState(); |
493 } | 500 } |
494 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || | 501 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || |
495 (error::IsError(state_after_last_flush_.error) && context_lost_)); | 502 (error::IsError(state_after_last_flush_.error) && context_lost_)); |
503 DCHECK(context_lost_ || put_offset == state_after_last_flush_.get_offset); | |
piman
2015/09/18 21:39:15
I don't think this DCHECK is true, if the command
David Yen
2015/09/18 22:39:58
Yeah, at least currently it can't happen. If it do
| |
504 sync_point_client_->FinishProcessingOrderNumber(order_num); | |
496 | 505 |
497 // If we've processed all pending commands but still have pending queries, | 506 // If we've processed all pending commands but still have pending queries, |
498 // pump idle work until the query is passed. | 507 // pump idle work until the query is passed. |
499 if (put_offset == state_after_last_flush_.get_offset && | 508 if (put_offset == state_after_last_flush_.get_offset && |
500 (gpu_scheduler_->HasMoreIdleWork() || | 509 (gpu_scheduler_->HasMoreIdleWork() || |
501 gpu_scheduler_->HasPendingQueries())) { | 510 gpu_scheduler_->HasPendingQueries())) { |
502 ScheduleDelayedWorkOnGpuThread(); | 511 ScheduleDelayedWorkOnGpuThread(); |
503 } | 512 } |
504 } | 513 } |
505 | 514 |
(...skipping 21 matching lines...) Expand all Loading... | |
527 } | 536 } |
528 | 537 |
529 void InProcessCommandBuffer::Flush(int32 put_offset) { | 538 void InProcessCommandBuffer::Flush(int32 put_offset) { |
530 CheckSequencedThread(); | 539 CheckSequencedThread(); |
531 if (last_state_.error != gpu::error::kNoError) | 540 if (last_state_.error != gpu::error::kNoError) |
532 return; | 541 return; |
533 | 542 |
534 if (last_put_offset_ == put_offset) | 543 if (last_put_offset_ == put_offset) |
535 return; | 544 return; |
536 | 545 |
546 uint32_t order_num = sync_point_client_->GenerateUnprocessedOrderNumber(); | |
537 last_put_offset_ = put_offset; | 547 last_put_offset_ = put_offset; |
538 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, | 548 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
539 gpu_thread_weak_ptr_, | 549 gpu_thread_weak_ptr_, |
540 put_offset); | 550 put_offset, |
551 order_num); | |
541 QueueTask(task); | 552 QueueTask(task); |
542 } | 553 } |
543 | 554 |
544 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset) { | 555 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset) { |
545 Flush(put_offset); | 556 Flush(put_offset); |
546 } | 557 } |
547 | 558 |
548 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) { | 559 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) { |
549 CheckSequencedThread(); | 560 CheckSequencedThread(); |
550 while (!InRange(start, end, GetLastToken()) && | 561 while (!InRange(start, end, GetLastToken()) && |
(...skipping 416 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
967 framebuffer_completeness_cache_ = | 978 framebuffer_completeness_cache_ = |
968 new gpu::gles2::FramebufferCompletenessCache; | 979 new gpu::gles2::FramebufferCompletenessCache; |
969 return framebuffer_completeness_cache_; | 980 return framebuffer_completeness_cache_; |
970 } | 981 } |
971 | 982 |
972 SyncPointManager* GpuInProcessThread::sync_point_manager() { | 983 SyncPointManager* GpuInProcessThread::sync_point_manager() { |
973 return sync_point_manager_; | 984 return sync_point_manager_; |
974 } | 985 } |
975 | 986 |
976 } // namespace gpu | 987 } // namespace gpu |
OLD | NEW |