OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "gpu/command_buffer/service/gpu_scheduler.h" | 5 #include "gpu/command_buffer/service/gpu_scheduler.h" |
6 | 6 |
7 #include "base/callback.h" | 7 #include "base/callback.h" |
8 #include "base/command_line.h" | 8 #include "base/command_line.h" |
9 #include "base/compiler_specific.h" | 9 #include "base/compiler_specific.h" |
10 #include "base/debug/trace_event.h" | 10 #include "base/debug/trace_event.h" |
11 #include "base/message_loop.h" | 11 #include "base/message_loop.h" |
12 #include "base/time.h" | 12 #include "base/time.h" |
13 #include "ui/gfx/gl/gl_context.h" | 13 #include "ui/gfx/gl/gl_context.h" |
14 #include "ui/gfx/gl/gl_bindings.h" | 14 #include "ui/gfx/gl/gl_bindings.h" |
15 #include "ui/gfx/gl/gl_surface.h" | 15 #include "ui/gfx/gl/gl_surface.h" |
16 #include "ui/gfx/gl/gl_switches.h" | 16 #include "ui/gfx/gl/gl_switches.h" |
17 | 17 |
18 using ::base::SharedMemory; | 18 using ::base::SharedMemory; |
19 | 19 |
20 namespace gpu { | 20 namespace gpu { |
21 | 21 |
22 GpuScheduler* GpuScheduler::Create(CommandBuffer* command_buffer, | 22 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, |
23 SurfaceManager* surface_manager, | 23 SurfaceManager* surface_manager, |
24 gles2::ContextGroup* group) { | 24 gles2::ContextGroup* group) |
| 25 : command_buffer_(command_buffer), |
| 26 commands_per_update_(100), |
| 27 unscheduled_count_(0), |
| 28 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
| 29 swap_buffers_count_(0), |
| 30 acknowledged_swap_buffers_count_(0), |
| 31 #endif |
| 32 method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { |
25 DCHECK(command_buffer); | 33 DCHECK(command_buffer); |
26 | 34 decoder_.reset(gles2::GLES2Decoder::Create(surface_manager, group)); |
27 gles2::GLES2Decoder* decoder = | 35 decoder_->set_engine(this); |
28 gles2::GLES2Decoder::Create(surface_manager, group); | |
29 | |
30 GpuScheduler* scheduler = new GpuScheduler(command_buffer, | |
31 decoder, | |
32 NULL); | |
33 | |
34 decoder->set_engine(scheduler); | |
35 | |
36 if (CommandLine::ForCurrentProcess()->HasSwitch( | 36 if (CommandLine::ForCurrentProcess()->HasSwitch( |
37 switches::kEnableGPUServiceLogging)) { | 37 switches::kEnableGPUServiceLogging)) { |
38 decoder->set_debug(true); | 38 decoder_->set_debug(true); |
39 } | 39 } |
40 | |
41 return scheduler; | |
42 } | 40 } |
43 | 41 |
44 GpuScheduler* GpuScheduler::CreateForTests(CommandBuffer* command_buffer, | 42 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, |
45 gles2::GLES2Decoder* decoder, | 43 gles2::GLES2Decoder* decoder, |
46 CommandParser* parser) { | 44 CommandParser* parser, |
| 45 int commands_per_update) |
| 46 : command_buffer_(command_buffer), |
| 47 commands_per_update_(commands_per_update), |
| 48 unscheduled_count_(0), |
| 49 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
| 50 swap_buffers_count_(0), |
| 51 acknowledged_swap_buffers_count_(0), |
| 52 #endif |
| 53 method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { |
47 DCHECK(command_buffer); | 54 DCHECK(command_buffer); |
48 GpuScheduler* scheduler = new GpuScheduler(command_buffer, | 55 decoder_.reset(decoder); |
49 decoder, | 56 parser_.reset(parser); |
50 parser); | |
51 | |
52 return scheduler; | |
53 } | 57 } |
54 | 58 |
55 GpuScheduler::~GpuScheduler() { | 59 GpuScheduler::~GpuScheduler() { |
56 Destroy(); | 60 Destroy(); |
57 } | 61 } |
58 | 62 |
59 bool GpuScheduler::InitializeCommon( | 63 bool GpuScheduler::InitializeCommon( |
60 const scoped_refptr<gfx::GLSurface>& surface, | 64 const scoped_refptr<gfx::GLSurface>& surface, |
61 const scoped_refptr<gfx::GLContext>& context, | 65 const scoped_refptr<gfx::GLContext>& context, |
62 const gfx::Size& size, | 66 const gfx::Size& size, |
63 const gles2::DisallowedExtensions& disallowed_extensions, | 67 const gles2::DisallowedExtensions& disallowed_extensions, |
64 const char* allowed_extensions, | 68 const char* allowed_extensions, |
65 const std::vector<int32>& attribs) { | 69 const std::vector<int32>& attribs) { |
66 DCHECK(context); | 70 DCHECK(context); |
67 | 71 |
68 if (!context->MakeCurrent(surface)) | 72 if (!context->MakeCurrent(surface)) |
69 return false; | 73 return false; |
70 | 74 |
71 #if !defined(OS_MACOSX) | 75 #if !defined(OS_MACOSX) |
72 // Set up swap interval for onscreen contexts. | 76 // Set up swap interval for onscreen contexts. |
73 if (!surface->IsOffscreen()) { | 77 if (!surface->IsOffscreen()) { |
74 if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kDisableGpuVsync)) | 78 if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kDisableGpuVsync)) |
75 context->SetSwapInterval(0); | 79 context->SetSwapInterval(0); |
76 else | 80 else |
77 context->SetSwapInterval(1); | 81 context->SetSwapInterval(1); |
78 } | 82 } |
79 #endif | 83 #endif |
80 | 84 |
| 85 // Do not limit to a certain number of commands before scheduling another |
| 86 // update when rendering onscreen. |
| 87 if (!surface->IsOffscreen()) |
| 88 commands_per_update_ = INT_MAX; |
| 89 |
81 // Map the ring buffer and create the parser. | 90 // Map the ring buffer and create the parser. |
82 Buffer ring_buffer = command_buffer_->GetRingBuffer(); | 91 Buffer ring_buffer = command_buffer_->GetRingBuffer(); |
83 if (ring_buffer.ptr) { | 92 if (ring_buffer.ptr) { |
84 parser_.reset(new CommandParser(ring_buffer.ptr, | 93 parser_.reset(new CommandParser(ring_buffer.ptr, |
85 ring_buffer.size, | 94 ring_buffer.size, |
86 0, | 95 0, |
87 ring_buffer.size, | 96 ring_buffer.size, |
88 0, | 97 0, |
89 decoder_.get())); | 98 decoder_.get())); |
90 } else { | 99 } else { |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
128 else | 137 else |
129 return decoder_->SetParent(NULL, 0); | 138 return decoder_->SetParent(NULL, 0); |
130 } | 139 } |
131 | 140 |
132 #if defined(OS_MACOSX) || defined(TOUCH_UI) | 141 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
133 namespace { | 142 namespace { |
134 const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1; | 143 const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1; |
135 } | 144 } |
136 #endif | 145 #endif |
137 | 146 |
138 void GpuScheduler::PutChanged() { | 147 void GpuScheduler::PutChanged(bool sync) { |
139 TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); | 148 TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); |
140 | |
141 DCHECK(IsScheduled()); | |
142 | |
143 CommandBuffer::State state = command_buffer_->GetState(); | 149 CommandBuffer::State state = command_buffer_->GetState(); |
144 parser_->set_put(state.put_offset); | 150 parser_->set_put(state.put_offset); |
| 151 |
| 152 if (sync) |
| 153 ProcessCommands(); |
| 154 else |
| 155 ScheduleProcessCommands(); |
| 156 } |
| 157 |
| 158 void GpuScheduler::ProcessCommands() { |
| 159 TRACE_EVENT1("gpu", "GpuScheduler:ProcessCommands", "this", this); |
| 160 CommandBuffer::State state = command_buffer_->GetState(); |
145 if (state.error != error::kNoError) | 161 if (state.error != error::kNoError) |
146 return; | 162 return; |
147 | 163 |
| 164 if (unscheduled_count_ > 0) { |
| 165 TRACE_EVENT1("gpu", "EarlyOut_Unscheduled", |
| 166 "unscheduled_count_", unscheduled_count_); |
| 167 return; |
| 168 } |
| 169 |
148 if (decoder_.get()) { | 170 if (decoder_.get()) { |
149 if (!decoder_->MakeCurrent()) { | 171 if (!decoder_->MakeCurrent()) { |
150 LOG(ERROR) << "Context lost because MakeCurrent failed."; | 172 LOG(ERROR) << "Context lost because MakeCurrent failed."; |
151 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); | 173 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
152 command_buffer_->SetParseError(error::kLostContext); | 174 command_buffer_->SetParseError(error::kLostContext); |
153 return; | 175 return; |
154 } | 176 } |
155 } | 177 } |
156 | 178 |
157 #if defined(OS_MACOSX) | 179 #if defined(OS_MACOSX) |
158 bool do_rate_limiting = surface_.get() != NULL; | 180 bool do_rate_limiting = surface_.get() != NULL; |
159 #elif defined(TOUCH_UI) | 181 #elif defined(TOUCH_UI) |
160 bool do_rate_limiting = back_surface_.get() != NULL; | 182 bool do_rate_limiting = back_surface_.get() != NULL; |
161 #endif | 183 #endif |
162 | 184 |
163 #if defined(OS_MACOSX) || defined(TOUCH_UI) | 185 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
164 // Don't swamp the browser process with SwapBuffers calls it can't handle. | 186 // Don't swamp the browser process with SwapBuffers calls it can't handle. |
165 DCHECK(!do_rate_limiting || | 187 if (do_rate_limiting && |
166 swap_buffers_count_ - acknowledged_swap_buffers_count_ == 0); | 188 swap_buffers_count_ - acknowledged_swap_buffers_count_ >= |
| 189 kMaxOutstandingSwapBuffersCallsPerOnscreenContext) { |
| 190 TRACE_EVENT0("gpu", "EarlyOut_OSX_Throttle"); |
| 191 // Stop doing work on this command buffer. In the GPU process, |
| 192 // receipt of the GpuMsg_AcceleratedSurfaceBuffersSwappedACK |
| 193 // message causes ProcessCommands to be scheduled again. |
| 194 return; |
| 195 } |
167 #endif | 196 #endif |
168 | 197 |
| 198 base::TimeTicks start_time = base::TimeTicks::Now(); |
| 199 base::TimeDelta elapsed; |
| 200 bool is_break = false; |
169 error::Error error = error::kNoError; | 201 error::Error error = error::kNoError; |
170 while (!parser_->IsEmpty()) { | 202 do { |
171 error = parser_->ProcessCommand(); | 203 int commands_processed = 0; |
| 204 while (commands_processed < commands_per_update_ && |
| 205 !parser_->IsEmpty()) { |
| 206 error = parser_->ProcessCommand(); |
172 | 207 |
173 // TODO(piman): various classes duplicate various pieces of state, leading | 208 // TODO(piman): various classes duplicate various pieces of state, leading |
174 // to needlessly complex update logic. It should be possible to simply | 209 // to needlessly complex update logic. It should be possible to simply |
175 // share the state across all of them. | 210 // share the state across all of them. |
176 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); | 211 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); |
177 | 212 |
178 if (error::IsError(error)) { | 213 if (error == error::kWaiting || error == error::kYield) { |
179 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); | 214 is_break = true; |
180 command_buffer_->SetParseError(error); | 215 break; |
181 return; | 216 } else if (error::IsError(error)) { |
| 217 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
| 218 command_buffer_->SetParseError(error); |
| 219 return; |
| 220 } |
| 221 |
| 222 if (unscheduled_count_ > 0) { |
| 223 is_break = true; |
| 224 break; |
| 225 } |
| 226 |
| 227 ++commands_processed; |
| 228 if (command_processed_callback_.get()) { |
| 229 command_processed_callback_->Run(); |
| 230 } |
182 } | 231 } |
| 232 elapsed = base::TimeTicks::Now() - start_time; |
| 233 } while(!is_break && |
| 234 !parser_->IsEmpty() && |
| 235 elapsed.InMicroseconds() < kMinimumSchedulerQuantumMicros); |
183 | 236 |
184 if (command_processed_callback_.get()) | 237 if (unscheduled_count_ == 0 && |
185 command_processed_callback_->Run(); | 238 error != error::kWaiting && |
186 | 239 !parser_->IsEmpty()) { |
187 if (unscheduled_count_ > 0) | 240 ScheduleProcessCommands(); |
188 return; | |
189 } | 241 } |
190 } | 242 } |
191 | 243 |
192 void GpuScheduler::SetScheduled(bool scheduled) { | 244 void GpuScheduler::SetScheduled(bool scheduled) { |
193 TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this, | 245 TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this, |
194 "new unscheduled_count_", | 246 "new unscheduled_count_", |
195 unscheduled_count_ + (scheduled? -1 : 1)); | 247 unscheduled_count_ + (scheduled? -1 : 1)); |
196 if (scheduled) { | 248 if (scheduled) { |
197 --unscheduled_count_; | 249 --unscheduled_count_; |
198 DCHECK_GE(unscheduled_count_, 0); | 250 DCHECK_GE(unscheduled_count_, 0); |
199 | 251 |
200 if (unscheduled_count_ == 0 && scheduled_callback_.get()) | 252 if (unscheduled_count_ == 0) { |
201 scheduled_callback_->Run(); | 253 if (scheduled_callback_.get()) |
| 254 scheduled_callback_->Run(); |
| 255 |
| 256 ScheduleProcessCommands(); |
| 257 } |
202 } else { | 258 } else { |
203 ++unscheduled_count_; | 259 ++unscheduled_count_; |
204 } | 260 } |
205 } | 261 } |
206 | 262 |
207 bool GpuScheduler::IsScheduled() { | 263 bool GpuScheduler::IsScheduled() { |
208 return unscheduled_count_ == 0; | 264 return unscheduled_count_ == 0; |
209 } | 265 } |
210 | 266 |
211 void GpuScheduler::SetScheduledCallback(Callback0::Type* scheduled_callback) { | 267 void GpuScheduler::SetScheduledCallback(Callback0::Type* scheduled_callback) { |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
257 Callback0::Type* callback) { | 313 Callback0::Type* callback) { |
258 command_processed_callback_.reset(callback); | 314 command_processed_callback_.reset(callback); |
259 } | 315 } |
260 | 316 |
261 void GpuScheduler::SetTokenCallback( | 317 void GpuScheduler::SetTokenCallback( |
262 const base::Callback<void(int32)>& callback) { | 318 const base::Callback<void(int32)>& callback) { |
263 DCHECK(set_token_callback_.is_null()); | 319 DCHECK(set_token_callback_.is_null()); |
264 set_token_callback_ = callback; | 320 set_token_callback_ = callback; |
265 } | 321 } |
266 | 322 |
267 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, | 323 void GpuScheduler::ScheduleProcessCommands() { |
268 gles2::GLES2Decoder* decoder, | 324 MessageLoop::current()->PostTask( |
269 CommandParser* parser) | 325 FROM_HERE, |
270 : command_buffer_(command_buffer), | 326 method_factory_.NewRunnableMethod(&GpuScheduler::ProcessCommands)); |
271 decoder_(decoder), | |
272 parser_(parser), | |
273 unscheduled_count_(0), | |
274 #if defined(OS_MACOSX) || defined(TOUCH_UI) | |
275 swap_buffers_count_(0), | |
276 acknowledged_swap_buffers_count_(0), | |
277 #endif | |
278 method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { | |
279 } | 327 } |
280 | 328 |
281 void GpuScheduler::WillResize(gfx::Size size) { | 329 void GpuScheduler::WillResize(gfx::Size size) { |
282 if (wrapped_resize_callback_.get()) { | 330 if (wrapped_resize_callback_.get()) { |
283 wrapped_resize_callback_->Run(size); | 331 wrapped_resize_callback_->Run(size); |
284 } | 332 } |
285 } | 333 } |
286 | 334 |
287 } // namespace gpu | 335 } // namespace gpu |
OLD | NEW |