Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(229)

Side by Side Diff: gpu/command_buffer/service/in_process_command_buffer.cc

Issue 1339203002: Added global order numbers to in process command buffers. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: minor changes Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" 5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <set> 8 #include <set>
9 #include <utility> 9 #include <utility>
10 10
(...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 else 337 else
338 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window); 338 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
339 } 339 }
340 340
341 if (!surface_.get()) { 341 if (!surface_.get()) {
342 LOG(ERROR) << "Could not create GLSurface."; 342 LOG(ERROR) << "Could not create GLSurface.";
343 DestroyOnGpuThread(); 343 DestroyOnGpuThread();
344 return false; 344 return false;
345 } 345 }
346 346
347 sync_point_client_state_ = SyncPointClientState::Create();
348 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient(
349 sync_point_client_state_,
350 GetNamespaceID(), GetCommandBufferID());
351
347 if (service_->UseVirtualizedGLContexts() || 352 if (service_->UseVirtualizedGLContexts() ||
348 decoder_->GetContextGroup() 353 decoder_->GetContextGroup()
349 ->feature_info() 354 ->feature_info()
350 ->workarounds() 355 ->workarounds()
351 .use_virtualized_gl_contexts) { 356 .use_virtualized_gl_contexts) {
352 context_ = gl_share_group_->GetSharedContext(); 357 context_ = gl_share_group_->GetSharedContext();
353 if (!context_.get()) { 358 if (!context_.get()) {
354 context_ = gfx::GLContext::CreateGLContext( 359 context_ = gfx::GLContext::CreateGLContext(
355 gl_share_group_.get(), surface_.get(), params.gpu_preference); 360 gl_share_group_.get(), surface_.get(), params.gpu_preference);
356 gl_share_group_->SetSharedContext(context_.get()); 361 gl_share_group_->SetSharedContext(context_.get());
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
432 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); 437 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
433 command_buffer_.reset(); 438 command_buffer_.reset();
434 // Clean up GL resources if possible. 439 // Clean up GL resources if possible.
435 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); 440 bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
436 if (decoder_) { 441 if (decoder_) {
437 decoder_->Destroy(have_context); 442 decoder_->Destroy(have_context);
438 decoder_.reset(); 443 decoder_.reset();
439 } 444 }
440 context_ = NULL; 445 context_ = NULL;
441 surface_ = NULL; 446 surface_ = NULL;
447 sync_point_client_ = NULL;
448 sync_point_client_state_ = NULL;
442 gl_share_group_ = NULL; 449 gl_share_group_ = NULL;
443 #if defined(OS_ANDROID) 450 #if defined(OS_ANDROID)
444 stream_texture_manager_.reset(); 451 stream_texture_manager_.reset();
445 #endif 452 #endif
446 453
447 return true; 454 return true;
448 } 455 }
449 456
450 void InProcessCommandBuffer::CheckSequencedThread() { 457 void InProcessCommandBuffer::CheckSequencedThread() {
451 DCHECK(!sequence_checker_ || 458 DCHECK(!sequence_checker_ ||
(...skipping 22 matching lines...) Expand all
474 CheckSequencedThread(); 481 CheckSequencedThread();
475 return last_state_; 482 return last_state_;
476 } 483 }
477 484
478 int32 InProcessCommandBuffer::GetLastToken() { 485 int32 InProcessCommandBuffer::GetLastToken() {
479 CheckSequencedThread(); 486 CheckSequencedThread();
480 GetStateFast(); 487 GetStateFast();
481 return last_state_.token; 488 return last_state_.token;
482 } 489 }
483 490
484 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { 491 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset,
492 uint32_t order_num) {
485 CheckSequencedThread(); 493 CheckSequencedThread();
486 ScopedEvent handle_flush(&flush_event_); 494 ScopedEvent handle_flush(&flush_event_);
487 base::AutoLock lock(command_buffer_lock_); 495 base::AutoLock lock(command_buffer_lock_);
496
497 sync_point_client_state_->BeginProcessingOrderNumber(order_num);
488 command_buffer_->Flush(put_offset); 498 command_buffer_->Flush(put_offset);
489 { 499 {
490 // Update state before signaling the flush event. 500 // Update state before signaling the flush event.
491 base::AutoLock lock(state_after_last_flush_lock_); 501 base::AutoLock lock(state_after_last_flush_lock_);
492 state_after_last_flush_ = command_buffer_->GetLastState(); 502 state_after_last_flush_ = command_buffer_->GetLastState();
493 } 503 }
494 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || 504 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
495 (error::IsError(state_after_last_flush_.error) && context_lost_)); 505 (error::IsError(state_after_last_flush_.error) && context_lost_));
496 506
507 // Currently the in process command buffer does not support being descheduled,
508 // if it does we would need to back off on calling the finish processing
509 // order number function until the message is rescheduled and finished
510 // processing. This DCHECK is to enforce this.
511 DCHECK(context_lost_ || put_offset == state_after_last_flush_.get_offset);
512 sync_point_client_state_->FinishProcessingOrderNumber(order_num);
513
497 // If we've processed all pending commands but still have pending queries, 514 // If we've processed all pending commands but still have pending queries,
498 // pump idle work until the query is passed. 515 // pump idle work until the query is passed.
499 if (put_offset == state_after_last_flush_.get_offset && 516 if (put_offset == state_after_last_flush_.get_offset &&
500 (gpu_scheduler_->HasMoreIdleWork() || 517 (gpu_scheduler_->HasMoreIdleWork() ||
501 gpu_scheduler_->HasPendingQueries())) { 518 gpu_scheduler_->HasPendingQueries())) {
502 ScheduleDelayedWorkOnGpuThread(); 519 ScheduleDelayedWorkOnGpuThread();
503 } 520 }
504 } 521 }
505 522
506 void InProcessCommandBuffer::PerformDelayedWork() { 523 void InProcessCommandBuffer::PerformDelayedWork() {
(...skipping 20 matching lines...) Expand all
527 } 544 }
528 545
529 void InProcessCommandBuffer::Flush(int32 put_offset) { 546 void InProcessCommandBuffer::Flush(int32 put_offset) {
530 CheckSequencedThread(); 547 CheckSequencedThread();
531 if (last_state_.error != gpu::error::kNoError) 548 if (last_state_.error != gpu::error::kNoError)
532 return; 549 return;
533 550
534 if (last_put_offset_ == put_offset) 551 if (last_put_offset_ == put_offset)
535 return; 552 return;
536 553
554 SyncPointManager* sync_manager = service_->sync_point_manager();
555 const uint32_t order_num =
556 sync_point_client_state_->GenerateUnprocessedOrderNumber(sync_manager);
537 last_put_offset_ = put_offset; 557 last_put_offset_ = put_offset;
538 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, 558 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
539 gpu_thread_weak_ptr_, 559 gpu_thread_weak_ptr_,
540 put_offset); 560 put_offset,
561 order_num);
541 QueueTask(task); 562 QueueTask(task);
542 } 563 }
543 564
544 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset) { 565 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset) {
545 Flush(put_offset); 566 Flush(put_offset);
546 } 567 }
547 568
548 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) { 569 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
549 CheckSequencedThread(); 570 CheckSequencedThread();
550 while (!InRange(start, end, GetLastToken()) && 571 while (!InRange(start, end, GetLastToken()) &&
(...skipping 416 matching lines...) Expand 10 before | Expand all | Expand 10 after
967 framebuffer_completeness_cache_ = 988 framebuffer_completeness_cache_ =
968 new gpu::gles2::FramebufferCompletenessCache; 989 new gpu::gles2::FramebufferCompletenessCache;
969 return framebuffer_completeness_cache_; 990 return framebuffer_completeness_cache_;
970 } 991 }
971 992
972 SyncPointManager* GpuInProcessThread::sync_point_manager() { 993 SyncPointManager* GpuInProcessThread::sync_point_manager() {
973 return sync_point_manager_; 994 return sync_point_manager_;
974 } 995 }
975 996
976 } // namespace gpu 997 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/command_buffer/service/in_process_command_buffer.h ('k') | gpu/command_buffer/service/sync_point_manager.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698