Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(182)

Side by Side Diff: gpu/command_buffer/service/in_process_command_buffer.cc

Issue 1339203002: Added global order numbers to in process command buffers. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2013 The Chromium Authors. All rights reserved. 1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h" 5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <set> 8 #include <set>
9 #include <utility> 9 #include <utility>
10 10
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
47 47
48 #if defined(OS_WIN) 48 #if defined(OS_WIN)
49 #include <windows.h> 49 #include <windows.h>
50 #include "base/process/process_handle.h" 50 #include "base/process/process_handle.h"
51 #endif 51 #endif
52 52
53 namespace gpu { 53 namespace gpu {
54 54
55 namespace { 55 namespace {
56 56
57 base::StaticAtomicSequenceNumber g_next_command_buffer_id;
58
57 template <typename T> 59 template <typename T>
58 static void RunTaskWithResult(base::Callback<T(void)> task, 60 static void RunTaskWithResult(base::Callback<T(void)> task,
59 T* result, 61 T* result,
60 base::WaitableEvent* completion) { 62 base::WaitableEvent* completion) {
61 *result = task.Run(); 63 *result = task.Run();
62 completion->Signal(); 64 completion->Signal();
63 } 65 }
64 66
65 struct GpuInProcessThreadHolder { 67 struct GpuInProcessThreadHolder {
66 GpuInProcessThreadHolder() 68 GpuInProcessThreadHolder()
(...skipping 267 matching lines...) Expand 10 before | Expand all | Expand 10 after
334 else 336 else
335 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window); 337 surface_ = gfx::GLSurface::CreateViewGLSurface(params.window);
336 } 338 }
337 339
338 if (!surface_.get()) { 340 if (!surface_.get()) {
339 LOG(ERROR) << "Could not create GLSurface."; 341 LOG(ERROR) << "Could not create GLSurface.";
340 DestroyOnGpuThread(); 342 DestroyOnGpuThread();
341 return false; 343 return false;
342 } 344 }
343 345
346 const int command_buffer_id = g_next_command_buffer_id.GetNext();
347 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient(
348 kSyncPointNamespace_GpuIO,
349 static_cast<uint64_t>(command_buffer_id));
350
344 if (service_->UseVirtualizedGLContexts() || 351 if (service_->UseVirtualizedGLContexts() ||
345 decoder_->GetContextGroup() 352 decoder_->GetContextGroup()
346 ->feature_info() 353 ->feature_info()
347 ->workarounds() 354 ->workarounds()
348 .use_virtualized_gl_contexts) { 355 .use_virtualized_gl_contexts) {
349 context_ = gl_share_group_->GetSharedContext(); 356 context_ = gl_share_group_->GetSharedContext();
350 if (!context_.get()) { 357 if (!context_.get()) {
351 context_ = gfx::GLContext::CreateGLContext( 358 context_ = gfx::GLContext::CreateGLContext(
352 gl_share_group_.get(), surface_.get(), params.gpu_preference); 359 gl_share_group_.get(), surface_.get(), params.gpu_preference);
353 gl_share_group_->SetSharedContext(context_.get()); 360 gl_share_group_->SetSharedContext(context_.get());
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
429 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); 436 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
430 command_buffer_.reset(); 437 command_buffer_.reset();
431 // Clean up GL resources if possible. 438 // Clean up GL resources if possible.
432 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); 439 bool have_context = context_.get() && context_->MakeCurrent(surface_.get());
433 if (decoder_) { 440 if (decoder_) {
434 decoder_->Destroy(have_context); 441 decoder_->Destroy(have_context);
435 decoder_.reset(); 442 decoder_.reset();
436 } 443 }
437 context_ = NULL; 444 context_ = NULL;
438 surface_ = NULL; 445 surface_ = NULL;
446 sync_point_client_ = NULL;
439 gl_share_group_ = NULL; 447 gl_share_group_ = NULL;
440 #if defined(OS_ANDROID) 448 #if defined(OS_ANDROID)
441 stream_texture_manager_.reset(); 449 stream_texture_manager_.reset();
442 #endif 450 #endif
443 451
444 return true; 452 return true;
445 } 453 }
446 454
447 void InProcessCommandBuffer::CheckSequencedThread() { 455 void InProcessCommandBuffer::CheckSequencedThread() {
448 DCHECK(!sequence_checker_ || 456 DCHECK(!sequence_checker_ ||
(...skipping 22 matching lines...) Expand all
471 CheckSequencedThread(); 479 CheckSequencedThread();
472 return last_state_; 480 return last_state_;
473 } 481 }
474 482
475 int32 InProcessCommandBuffer::GetLastToken() { 483 int32 InProcessCommandBuffer::GetLastToken() {
476 CheckSequencedThread(); 484 CheckSequencedThread();
477 GetStateFast(); 485 GetStateFast();
478 return last_state_.token; 486 return last_state_.token;
479 } 487 }
480 488
481 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) { 489 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset,
490 uint32_t order_num) {
482 CheckSequencedThread(); 491 CheckSequencedThread();
483 ScopedEvent handle_flush(&flush_event_); 492 ScopedEvent handle_flush(&flush_event_);
484 base::AutoLock lock(command_buffer_lock_); 493 base::AutoLock lock(command_buffer_lock_);
494
495 sync_point_client_->BeginProcessingOrderNumber(order_num);
485 command_buffer_->Flush(put_offset); 496 command_buffer_->Flush(put_offset);
486 { 497 {
487 // Update state before signaling the flush event. 498 // Update state before signaling the flush event.
488 base::AutoLock lock(state_after_last_flush_lock_); 499 base::AutoLock lock(state_after_last_flush_lock_);
489 state_after_last_flush_ = command_buffer_->GetLastState(); 500 state_after_last_flush_ = command_buffer_->GetLastState();
490 } 501 }
491 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) || 502 DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
492 (error::IsError(state_after_last_flush_.error) && context_lost_)); 503 (error::IsError(state_after_last_flush_.error) && context_lost_));
504 DCHECK(context_lost_ || put_offset == state_after_last_flush_.get_offset);
505 sync_point_client_->FinishProcessingOrderNumber(order_num);
493 506
494 // If we've processed all pending commands but still have pending queries, 507 // If we've processed all pending commands but still have pending queries,
495 // pump idle work until the query is passed. 508 // pump idle work until the query is passed.
496 if (put_offset == state_after_last_flush_.get_offset && 509 if (put_offset == state_after_last_flush_.get_offset &&
497 gpu_scheduler_->HasMoreWork()) { 510 gpu_scheduler_->HasMoreWork()) {
498 ScheduleIdleWorkOnGpuThread(); 511 ScheduleIdleWorkOnGpuThread();
499 } 512 }
500 } 513 }
501 514
502 void InProcessCommandBuffer::PerformIdleWork() { 515 void InProcessCommandBuffer::PerformIdleWork() {
(...skipping 17 matching lines...) Expand all
520 } 533 }
521 534
522 void InProcessCommandBuffer::Flush(int32 put_offset) { 535 void InProcessCommandBuffer::Flush(int32 put_offset) {
523 CheckSequencedThread(); 536 CheckSequencedThread();
524 if (last_state_.error != gpu::error::kNoError) 537 if (last_state_.error != gpu::error::kNoError)
525 return; 538 return;
526 539
527 if (last_put_offset_ == put_offset) 540 if (last_put_offset_ == put_offset)
528 return; 541 return;
529 542
543 uint32_t order_num = service_->sync_point_manager()->GenerateOrderNumber();
530 last_put_offset_ = put_offset; 544 last_put_offset_ = put_offset;
531 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, 545 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
532 gpu_thread_weak_ptr_, 546 gpu_thread_weak_ptr_,
533 put_offset); 547 put_offset,
548 order_num);
534 QueueTask(task); 549 QueueTask(task);
535 } 550 }
536 551
537 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset) { 552 void InProcessCommandBuffer::OrderingBarrier(int32 put_offset) {
538 Flush(put_offset); 553 Flush(put_offset);
539 } 554 }
540 555
541 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) { 556 void InProcessCommandBuffer::WaitForTokenInRange(int32 start, int32 end) {
542 CheckSequencedThread(); 557 CheckSequencedThread();
543 while (!InRange(start, end, GetLastToken()) && 558 while (!InRange(start, end, GetLastToken()) &&
(...skipping 408 matching lines...) Expand 10 before | Expand all | Expand 10 after
952 framebuffer_completeness_cache_ = 967 framebuffer_completeness_cache_ =
953 new gpu::gles2::FramebufferCompletenessCache; 968 new gpu::gles2::FramebufferCompletenessCache;
954 return framebuffer_completeness_cache_; 969 return framebuffer_completeness_cache_;
955 } 970 }
956 971
957 SyncPointManager* GpuInProcessThread::sync_point_manager() { 972 SyncPointManager* GpuInProcessThread::sync_point_manager() {
958 return sync_point_manager_; 973 return sync_point_manager_;
959 } 974 }
960 975
961 } // namespace gpu 976 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698