| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/command_buffer/service/gpu_scheduler.h" | 5 #include "gpu/command_buffer/service/gpu_scheduler.h" |
| 6 | 6 |
| 7 #include "base/bind.h" | 7 #include "base/bind.h" |
| 8 #include "base/command_line.h" | 8 #include "base/command_line.h" |
| 9 #include "base/compiler_specific.h" | 9 #include "base/compiler_specific.h" |
| 10 #include "base/debug/trace_event.h" | 10 #include "base/debug/trace_event.h" |
| 11 #include "base/message_loop.h" | 11 #include "base/message_loop.h" |
| 12 #include "base/time.h" | 12 #include "base/time.h" |
| 13 #include "ui/gfx/gl/gl_bindings.h" | 13 #include "ui/gfx/gl/gl_bindings.h" |
| 14 #include "ui/gfx/gl/gl_switches.h" | 14 #include "ui/gfx/gl/gl_switches.h" |
| 15 | 15 |
| 16 using ::base::SharedMemory; | 16 using ::base::SharedMemory; |
| 17 | 17 |
| 18 namespace gpu { | 18 namespace gpu { |
| 19 | 19 |
| 20 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, | 20 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, |
| 21 gles2::GLES2Decoder* decoder, | 21 gles2::GLES2Decoder* decoder, |
| 22 CommandParser* parser) | 22 CommandParser* parser) |
| 23 : command_buffer_(command_buffer), | 23 : command_buffer_(command_buffer), |
| 24 decoder_(decoder), | 24 decoder_(decoder), |
| 25 parser_(parser), | 25 parser_(parser), |
| 26 unscheduled_count_(0) { | 26 unscheduled_count_(0) { |
| 27 // Map the ring buffer and create the parser. | |
| 28 if (!parser) { | |
| 29 Buffer ring_buffer = command_buffer_->GetRingBuffer(); | |
| 30 if (ring_buffer.ptr) { | |
| 31 parser_.reset(new CommandParser(ring_buffer.ptr, | |
| 32 ring_buffer.size, | |
| 33 0, | |
| 34 ring_buffer.size, | |
| 35 0, | |
| 36 decoder_)); | |
| 37 } else { | |
| 38 parser_.reset(new CommandParser(NULL, 0, 0, 0, 0, | |
| 39 decoder_)); | |
| 40 } | |
| 41 } | |
| 42 } | 27 } |
| 43 | 28 |
| 44 GpuScheduler::~GpuScheduler() { | 29 GpuScheduler::~GpuScheduler() { |
| 45 } | 30 } |
| 46 | 31 |
| 47 void GpuScheduler::PutChanged() { | 32 void GpuScheduler::PutChanged() { |
| 48 TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); | 33 TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); |
| 49 | 34 |
| 50 CommandBuffer::State state = command_buffer_->GetState(); | 35 CommandBuffer::State state = command_buffer_->GetState(); |
| 36 |
| 37 // If there is no parser, exit. |
| 38 if (!parser_.get()) { |
| 39 DCHECK_EQ(state.get_offset, state.put_offset); |
| 40 return; |
| 41 } |
| 42 |
| 51 parser_->set_put(state.put_offset); | 43 parser_->set_put(state.put_offset); |
| 52 if (state.error != error::kNoError) | 44 if (state.error != error::kNoError) |
| 53 return; | 45 return; |
| 54 | 46 |
| 55 // Check that the GPU has passed all fences. | 47 // Check that the GPU has passed all fences. |
| 56 if (!PollUnscheduleFences()) | 48 if (!PollUnscheduleFences()) |
| 57 return; | 49 return; |
| 58 | 50 |
| 59 // One of the unschedule fence tasks might have unscheduled us. | 51 // One of the unschedule fence tasks might have unscheduled us. |
| 60 if (!IsScheduled()) | 52 if (!IsScheduled()) |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 115 } | 107 } |
| 116 | 108 |
| 117 Buffer GpuScheduler::GetSharedMemoryBuffer(int32 shm_id) { | 109 Buffer GpuScheduler::GetSharedMemoryBuffer(int32 shm_id) { |
| 118 return command_buffer_->GetTransferBuffer(shm_id); | 110 return command_buffer_->GetTransferBuffer(shm_id); |
| 119 } | 111 } |
| 120 | 112 |
| 121 void GpuScheduler::set_token(int32 token) { | 113 void GpuScheduler::set_token(int32 token) { |
| 122 command_buffer_->SetToken(token); | 114 command_buffer_->SetToken(token); |
| 123 } | 115 } |
| 124 | 116 |
| 117 bool GpuScheduler::SetGetBuffer(int32 transfer_buffer_id) { |
| 118 Buffer ring_buffer = command_buffer_->GetTransferBuffer(transfer_buffer_id); |
| 119 if (!ring_buffer.ptr) { |
| 120 return false; |
| 121 } |
| 122 |
| 123 if (!parser_.get()) { |
| 124 parser_.reset(new CommandParser(decoder_)); |
| 125 } |
| 126 |
| 127 parser_->SetBuffer( |
| 128 ring_buffer.ptr, |
| 129 ring_buffer.size, |
| 130 0, |
| 131 ring_buffer.size); |
| 132 |
| 133 SetGetOffset(0); |
| 134 return true; |
| 135 } |
| 136 |
| 125 bool GpuScheduler::SetGetOffset(int32 offset) { | 137 bool GpuScheduler::SetGetOffset(int32 offset) { |
| 126 if (parser_->set_get(offset)) { | 138 if (parser_->set_get(offset)) { |
| 127 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); | 139 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); |
| 128 return true; | 140 return true; |
| 129 } | 141 } |
| 130 return false; | 142 return false; |
| 131 } | 143 } |
| 132 | 144 |
| 133 int32 GpuScheduler::GetGetOffset() { | 145 int32 GpuScheduler::GetGetOffset() { |
| 134 return parser_->get(); | 146 return parser_->get(); |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 189 return true; | 201 return true; |
| 190 } | 202 } |
| 191 | 203 |
| 192 GpuScheduler::UnscheduleFence::UnscheduleFence() : fence(0) { | 204 GpuScheduler::UnscheduleFence::UnscheduleFence() : fence(0) { |
| 193 } | 205 } |
| 194 | 206 |
| 195 GpuScheduler::UnscheduleFence::~UnscheduleFence() { | 207 GpuScheduler::UnscheduleFence::~UnscheduleFence() { |
| 196 } | 208 } |
| 197 | 209 |
| 198 } // namespace gpu | 210 } // namespace gpu |
| OLD | NEW |