| OLD | NEW | 
|---|
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "gpu/command_buffer/service/gpu_scheduler.h" | 5 #include "gpu/command_buffer/service/gpu_scheduler.h" | 
| 6 | 6 | 
| 7 #include "base/callback.h" | 7 #include "base/callback.h" | 
| 8 #include "base/command_line.h" | 8 #include "base/command_line.h" | 
| 9 #include "base/compiler_specific.h" | 9 #include "base/compiler_specific.h" | 
| 10 #include "base/debug/trace_event.h" | 10 #include "base/debug/trace_event.h" | 
| 11 #include "base/message_loop.h" | 11 #include "base/message_loop.h" | 
|  | 12 #include "base/time.h" | 
| 12 #include "ui/gfx/gl/gl_context.h" | 13 #include "ui/gfx/gl/gl_context.h" | 
| 13 #include "ui/gfx/gl/gl_bindings.h" | 14 #include "ui/gfx/gl/gl_bindings.h" | 
| 14 #include "ui/gfx/gl/gl_surface.h" | 15 #include "ui/gfx/gl/gl_surface.h" | 
| 15 #include "ui/gfx/gl/gl_switches.h" | 16 #include "ui/gfx/gl/gl_switches.h" | 
| 16 | 17 | 
| 17 using ::base::SharedMemory; | 18 using ::base::SharedMemory; | 
| 18 | 19 | 
| 19 namespace gpu { | 20 namespace gpu { | 
| 20 | 21 | 
| 21 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, | 22 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, | 
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 129   parser_.reset(); | 130   parser_.reset(); | 
| 130 } | 131 } | 
| 131 | 132 | 
| 132 #if defined(OS_MACOSX) | 133 #if defined(OS_MACOSX) | 
| 133 namespace { | 134 namespace { | 
| 134 const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1; | 135 const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1; | 
| 135 } | 136 } | 
| 136 #endif | 137 #endif | 
| 137 | 138 | 
| 138 void GpuScheduler::PutChanged(bool sync) { | 139 void GpuScheduler::PutChanged(bool sync) { | 
|  | 140   TRACE_EVENT0("gpu", "GpuScheduler:PutChanged"); | 
| 139   CommandBuffer::State state = command_buffer_->GetState(); | 141   CommandBuffer::State state = command_buffer_->GetState(); | 
| 140   parser_->set_put(state.put_offset); | 142   parser_->set_put(state.put_offset); | 
| 141 | 143 | 
| 142   if (sync) | 144   if (sync) | 
| 143     ProcessCommands(); | 145     ProcessCommands(); | 
| 144   else | 146   else | 
| 145     ScheduleProcessCommands(); | 147     ScheduleProcessCommands(); | 
| 146 } | 148 } | 
| 147 | 149 | 
| 148 void GpuScheduler::ProcessCommands() { | 150 void GpuScheduler::ProcessCommands() { | 
| 149   TRACE_EVENT0("gpu", "GpuScheduler:ProcessCommands"); | 151   TRACE_EVENT0("gpu", "GpuScheduler:ProcessCommands"); | 
| 150   CommandBuffer::State state = command_buffer_->GetState(); | 152   CommandBuffer::State state = command_buffer_->GetState(); | 
| 151   if (state.error != error::kNoError) | 153   if (state.error != error::kNoError) | 
| 152     return; | 154     return; | 
| 153 | 155 | 
| 154   if (unscheduled_count_ > 0) | 156   if (unscheduled_count_ > 0) { | 
|  | 157     TRACE_EVENT1("gpu", "EarlyOut_Unscheduled", | 
|  | 158                  "unscheduled_count_", unscheduled_count_); | 
| 155     return; | 159     return; | 
|  | 160   } | 
| 156 | 161 | 
| 157   if (decoder_.get()) { | 162   if (decoder_.get()) { | 
| 158     if (!decoder_->MakeCurrent()) { | 163     if (!decoder_->MakeCurrent()) { | 
| 159       LOG(ERROR) << "Context lost because MakeCurrent failed."; | 164       LOG(ERROR) << "Context lost because MakeCurrent failed."; | 
| 160       command_buffer_->SetParseError(error::kLostContext); | 165       command_buffer_->SetParseError(error::kLostContext); | 
| 161       return; | 166       return; | 
| 162     } | 167     } | 
| 163   } | 168   } | 
| 164 | 169 | 
| 165 #if defined(OS_MACOSX) | 170 #if defined(OS_MACOSX) | 
| 166   bool do_rate_limiting = surface_.get() != NULL; | 171   bool do_rate_limiting = surface_.get() != NULL; | 
| 167   // Don't swamp the browser process with SwapBuffers calls it can't handle. | 172   // Don't swamp the browser process with SwapBuffers calls it can't handle. | 
| 168   if (do_rate_limiting && | 173   if (do_rate_limiting && | 
| 169       swap_buffers_count_ - acknowledged_swap_buffers_count_ >= | 174       swap_buffers_count_ - acknowledged_swap_buffers_count_ >= | 
| 170       kMaxOutstandingSwapBuffersCallsPerOnscreenContext) { | 175       kMaxOutstandingSwapBuffersCallsPerOnscreenContext) { | 
| 171     // Stop doing work on this command buffer. In the GPU process, | 176     // Stop doing work on this command buffer. In the GPU process, | 
| 172     // receipt of the GpuMsg_AcceleratedSurfaceBuffersSwappedACK | 177     // receipt of the GpuMsg_AcceleratedSurfaceBuffersSwappedACK | 
| 173     // message causes ProcessCommands to be scheduled again. | 178     // message causes ProcessCommands to be scheduled again. | 
| 174     return; | 179     return; | 
| 175   } | 180   } | 
| 176 #endif | 181 #endif | 
| 177 | 182 | 
|  | 183   base::TimeTicks start_time = base::TimeTicks::Now(); | 
|  | 184   base::TimeDelta elapsed; | 
|  | 185   bool is_break = false; | 
| 178   error::Error error = error::kNoError; | 186   error::Error error = error::kNoError; | 
| 179   int commands_processed = 0; | 187   do { | 
| 180   while (commands_processed < commands_per_update_ && | 188     int commands_processed = 0; | 
| 181          !parser_->IsEmpty()) { | 189     while (commands_processed < commands_per_update_ && | 
| 182     error = parser_->ProcessCommand(); | 190            !parser_->IsEmpty()) { | 
|  | 191       error = parser_->ProcessCommand(); | 
| 183 | 192 | 
| 184     // TODO(piman): various classes duplicate various pieces of state, leading | 193       // TODO(piman): various classes duplicate various pieces of state, leading | 
| 185     // to needlessly complex update logic. It should be possible to simply share | 194       // to needlessly complex update logic. It should be possible to simply | 
| 186     // the state across all of them. | 195       // share the state across all of them. | 
| 187     command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); | 196       command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); | 
| 188 | 197 | 
| 189     if (error == error::kWaiting || error == error::kYield) { | 198       if (error == error::kWaiting || error == error::kYield) { | 
| 190       break; | 199         is_break = true; | 
| 191     } else if (error::IsError(error)) { | 200         break; | 
| 192       command_buffer_->SetParseError(error); | 201       } else if (error::IsError(error)) { | 
| 193       return; | 202         command_buffer_->SetParseError(error); | 
|  | 203         return; | 
|  | 204       } | 
|  | 205 | 
|  | 206       if (unscheduled_count_ > 0) { | 
|  | 207         is_break = true; | 
|  | 208         break; | 
|  | 209       } | 
|  | 210 | 
|  | 211       ++commands_processed; | 
|  | 212       if (command_processed_callback_.get()) { | 
|  | 213         command_processed_callback_->Run(); | 
|  | 214       } | 
| 194     } | 215     } | 
| 195 | 216     elapsed = base::TimeTicks::Now() - start_time; | 
| 196     if (unscheduled_count_ > 0) | 217   } while(!is_break && | 
| 197       break; | 218           !parser_->IsEmpty() && | 
| 198 | 219           elapsed.InMicroseconds() < kMinimumSchedulerQuantumMicros); | 
| 199     ++commands_processed; |  | 
| 200     if (command_processed_callback_.get()) { |  | 
| 201       command_processed_callback_->Run(); |  | 
| 202     } |  | 
| 203   } |  | 
| 204 | 220 | 
| 205   if (unscheduled_count_ == 0 && | 221   if (unscheduled_count_ == 0 && | 
| 206       error != error::kWaiting && | 222       error != error::kWaiting && | 
| 207       !parser_->IsEmpty()) { | 223       !parser_->IsEmpty()) { | 
| 208     ScheduleProcessCommands(); | 224     ScheduleProcessCommands(); | 
| 209   } | 225   } | 
| 210 } | 226 } | 
| 211 | 227 | 
| 212 void GpuScheduler::SetScheduled(bool scheduled) { | 228 void GpuScheduler::SetScheduled(bool scheduled) { | 
|  | 229   TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "scheduled", scheduled, | 
|  | 230                "unscheduled_count_", unscheduled_count_); | 
| 213   if (scheduled) { | 231   if (scheduled) { | 
| 214     --unscheduled_count_; | 232     --unscheduled_count_; | 
| 215     DCHECK_GE(unscheduled_count_, 0); | 233     DCHECK_GE(unscheduled_count_, 0); | 
| 216 | 234 | 
| 217     if (unscheduled_count_ == 0) { | 235     if (unscheduled_count_ == 0) { | 
| 218       if (scheduled_callback_.get()) | 236       if (scheduled_callback_.get()) | 
| 219         scheduled_callback_->Run(); | 237         scheduled_callback_->Run(); | 
| 220 | 238 | 
| 221       ScheduleProcessCommands(); | 239       ScheduleProcessCommands(); | 
| 222     } | 240     } | 
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 274   command_processed_callback_.reset(callback); | 292   command_processed_callback_.reset(callback); | 
| 275 } | 293 } | 
| 276 | 294 | 
| 277 void GpuScheduler::ScheduleProcessCommands() { | 295 void GpuScheduler::ScheduleProcessCommands() { | 
| 278   MessageLoop::current()->PostTask( | 296   MessageLoop::current()->PostTask( | 
| 279       FROM_HERE, | 297       FROM_HERE, | 
| 280       method_factory_.NewRunnableMethod(&GpuScheduler::ProcessCommands)); | 298       method_factory_.NewRunnableMethod(&GpuScheduler::ProcessCommands)); | 
| 281 } | 299 } | 
| 282 | 300 | 
| 283 }  // namespace gpu | 301 }  // namespace gpu | 
| OLD | NEW | 
|---|