OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "gpu/command_buffer/service/gpu_scheduler.h" | 5 #include "gpu/command_buffer/service/gpu_scheduler.h" |
6 | 6 |
7 #include "base/callback.h" | 7 #include "base/callback.h" |
8 #include "base/command_line.h" | 8 #include "base/command_line.h" |
9 #include "base/compiler_specific.h" | 9 #include "base/compiler_specific.h" |
10 #include "base/debug/trace_event.h" | 10 #include "base/debug/trace_event.h" |
11 #include "base/message_loop.h" | 11 #include "base/message_loop.h" |
12 #include "base/time.h" | 12 #include "base/time.h" |
13 #include "ui/gfx/gl/gl_context.h" | 13 #include "ui/gfx/gl/gl_context.h" |
14 #include "ui/gfx/gl/gl_bindings.h" | 14 #include "ui/gfx/gl/gl_bindings.h" |
15 #include "ui/gfx/gl/gl_surface.h" | 15 #include "ui/gfx/gl/gl_surface.h" |
16 #include "ui/gfx/gl/gl_switches.h" | 16 #include "ui/gfx/gl/gl_switches.h" |
17 | 17 |
18 using ::base::SharedMemory; | 18 using ::base::SharedMemory; |
19 | 19 |
20 namespace gpu { | 20 namespace gpu { |
21 | 21 |
22 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, | 22 GpuScheduler* GpuScheduler::Create(CommandBuffer* command_buffer, |
23 SurfaceManager* surface_manager, | 23 SurfaceManager* surface_manager, |
24 gles2::ContextGroup* group) | 24 gles2::ContextGroup* group) { |
25 : command_buffer_(command_buffer), | |
26 commands_per_update_(100), | |
27 unscheduled_count_(0), | |
28 #if defined(OS_MACOSX) || defined(TOUCH_UI) | |
29 swap_buffers_count_(0), | |
30 acknowledged_swap_buffers_count_(0), | |
31 #endif | |
32 method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { | |
33 DCHECK(command_buffer); | 25 DCHECK(command_buffer); |
34 decoder_.reset(gles2::GLES2Decoder::Create(surface_manager, group)); | 26 |
35 decoder_->set_engine(this); | 27 gles2::GLES2Decoder* decoder = |
| 28 gles2::GLES2Decoder::Create(surface_manager, group); |
| 29 |
| 30 GpuScheduler* scheduler = new GpuScheduler(command_buffer, |
| 31 decoder, |
| 32 NULL); |
| 33 |
| 34 decoder->set_engine(scheduler); |
| 35 |
36 if (CommandLine::ForCurrentProcess()->HasSwitch( | 36 if (CommandLine::ForCurrentProcess()->HasSwitch( |
37 switches::kEnableGPUServiceLogging)) { | 37 switches::kEnableGPUServiceLogging)) { |
38 decoder_->set_debug(true); | 38 decoder->set_debug(true); |
39 } | 39 } |
| 40 |
| 41 return scheduler; |
40 } | 42 } |
41 | 43 |
42 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, | 44 GpuScheduler* GpuScheduler::CreateForTests(CommandBuffer* command_buffer, |
43 gles2::GLES2Decoder* decoder, | 45 gles2::GLES2Decoder* decoder, |
44 CommandParser* parser, | 46 CommandParser* parser) { |
45 int commands_per_update) | |
46 : command_buffer_(command_buffer), | |
47 commands_per_update_(commands_per_update), | |
48 unscheduled_count_(0), | |
49 #if defined(OS_MACOSX) || defined(TOUCH_UI) | |
50 swap_buffers_count_(0), | |
51 acknowledged_swap_buffers_count_(0), | |
52 #endif | |
53 method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { | |
54 DCHECK(command_buffer); | 47 DCHECK(command_buffer); |
55 decoder_.reset(decoder); | 48 GpuScheduler* scheduler = new GpuScheduler(command_buffer, |
56 parser_.reset(parser); | 49 decoder, |
| 50 parser); |
| 51 |
| 52 return scheduler; |
57 } | 53 } |
58 | 54 |
59 GpuScheduler::~GpuScheduler() { | 55 GpuScheduler::~GpuScheduler() { |
60 Destroy(); | 56 Destroy(); |
61 } | 57 } |
62 | 58 |
63 bool GpuScheduler::InitializeCommon( | 59 bool GpuScheduler::InitializeCommon( |
64 const scoped_refptr<gfx::GLSurface>& surface, | 60 const scoped_refptr<gfx::GLSurface>& surface, |
65 const scoped_refptr<gfx::GLContext>& context, | 61 const scoped_refptr<gfx::GLContext>& context, |
66 const gfx::Size& size, | 62 const gfx::Size& size, |
67 const gles2::DisallowedExtensions& disallowed_extensions, | 63 const gles2::DisallowedExtensions& disallowed_extensions, |
68 const char* allowed_extensions, | 64 const char* allowed_extensions, |
69 const std::vector<int32>& attribs) { | 65 const std::vector<int32>& attribs) { |
70 DCHECK(context); | 66 DCHECK(context); |
71 | 67 |
72 if (!context->MakeCurrent(surface)) | 68 if (!context->MakeCurrent(surface)) |
73 return false; | 69 return false; |
74 | 70 |
75 #if !defined(OS_MACOSX) | 71 #if !defined(OS_MACOSX) |
76 // Set up swap interval for onscreen contexts. | 72 // Set up swap interval for onscreen contexts. |
77 if (!surface->IsOffscreen()) { | 73 if (!surface->IsOffscreen()) { |
78 if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kDisableGpuVsync)) | 74 if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kDisableGpuVsync)) |
79 context->SetSwapInterval(0); | 75 context->SetSwapInterval(0); |
80 else | 76 else |
81 context->SetSwapInterval(1); | 77 context->SetSwapInterval(1); |
82 } | 78 } |
83 #endif | 79 #endif |
84 | 80 |
85 // Do not limit to a certain number of commands before scheduling another | |
86 // update when rendering onscreen. | |
87 if (!surface->IsOffscreen()) | |
88 commands_per_update_ = INT_MAX; | |
89 | |
90 // Map the ring buffer and create the parser. | 81 // Map the ring buffer and create the parser. |
91 Buffer ring_buffer = command_buffer_->GetRingBuffer(); | 82 Buffer ring_buffer = command_buffer_->GetRingBuffer(); |
92 if (ring_buffer.ptr) { | 83 if (ring_buffer.ptr) { |
93 parser_.reset(new CommandParser(ring_buffer.ptr, | 84 parser_.reset(new CommandParser(ring_buffer.ptr, |
94 ring_buffer.size, | 85 ring_buffer.size, |
95 0, | 86 0, |
96 ring_buffer.size, | 87 ring_buffer.size, |
97 0, | 88 0, |
98 decoder_.get())); | 89 decoder_.get())); |
99 } else { | 90 } else { |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
137 else | 128 else |
138 return decoder_->SetParent(NULL, 0); | 129 return decoder_->SetParent(NULL, 0); |
139 } | 130 } |
140 | 131 |
141 #if defined(OS_MACOSX) || defined(TOUCH_UI) | 132 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
142 namespace { | 133 namespace { |
143 const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1; | 134 const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1; |
144 } | 135 } |
145 #endif | 136 #endif |
146 | 137 |
147 void GpuScheduler::PutChanged(bool sync) { | 138 void GpuScheduler::PutChanged() { |
148 TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); | 139 TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); |
| 140 |
| 141 DCHECK(IsScheduled()); |
| 142 |
149 CommandBuffer::State state = command_buffer_->GetState(); | 143 CommandBuffer::State state = command_buffer_->GetState(); |
150 parser_->set_put(state.put_offset); | 144 parser_->set_put(state.put_offset); |
151 | |
152 if (sync) | |
153 ProcessCommands(); | |
154 else | |
155 ScheduleProcessCommands(); | |
156 } | |
157 | |
158 void GpuScheduler::ProcessCommands() { | |
159 TRACE_EVENT1("gpu", "GpuScheduler:ProcessCommands", "this", this); | |
160 CommandBuffer::State state = command_buffer_->GetState(); | |
161 if (state.error != error::kNoError) | 145 if (state.error != error::kNoError) |
162 return; | 146 return; |
163 | 147 |
164 if (unscheduled_count_ > 0) { | |
165 TRACE_EVENT1("gpu", "EarlyOut_Unscheduled", | |
166 "unscheduled_count_", unscheduled_count_); | |
167 return; | |
168 } | |
169 | |
170 if (decoder_.get()) { | 148 if (decoder_.get()) { |
171 if (!decoder_->MakeCurrent()) { | 149 if (!decoder_->MakeCurrent()) { |
172 LOG(ERROR) << "Context lost because MakeCurrent failed."; | 150 LOG(ERROR) << "Context lost because MakeCurrent failed."; |
173 command_buffer_->SetParseError(error::kLostContext); | 151 command_buffer_->SetParseError(error::kLostContext); |
174 return; | 152 return; |
175 } | 153 } |
176 } | 154 } |
177 | 155 |
178 #if defined(OS_MACOSX) | 156 #if defined(OS_MACOSX) |
179 bool do_rate_limiting = surface_.get() != NULL; | 157 bool do_rate_limiting = surface_.get() != NULL; |
180 #elif defined(TOUCH_UI) | 158 #elif defined(TOUCH_UI) |
181 bool do_rate_limiting = back_surface_.get() != NULL; | 159 bool do_rate_limiting = back_surface_.get() != NULL; |
182 #endif | 160 #endif |
183 | 161 |
184 #if defined(OS_MACOSX) || defined(TOUCH_UI) | 162 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
185 // Don't swamp the browser process with SwapBuffers calls it can't handle. | 163 // Don't swamp the browser process with SwapBuffers calls it can't handle. |
186 if (do_rate_limiting && | 164 DCHECK(!do_rate_limiting || |
187 swap_buffers_count_ - acknowledged_swap_buffers_count_ >= | 165 swap_buffers_count_ - acknowledged_swap_buffers_count_ == 0); |
188 kMaxOutstandingSwapBuffersCallsPerOnscreenContext) { | |
189 TRACE_EVENT0("gpu", "EarlyOut_OSX_Throttle"); | |
190 // Stop doing work on this command buffer. In the GPU process, | |
191 // receipt of the GpuMsg_AcceleratedSurfaceBuffersSwappedACK | |
192 // message causes ProcessCommands to be scheduled again. | |
193 return; | |
194 } | |
195 #endif | 166 #endif |
196 | 167 |
197 base::TimeTicks start_time = base::TimeTicks::Now(); | |
198 base::TimeDelta elapsed; | |
199 bool is_break = false; | |
200 error::Error error = error::kNoError; | 168 error::Error error = error::kNoError; |
201 do { | 169 while (!parser_->IsEmpty()) { |
202 int commands_processed = 0; | 170 error = parser_->ProcessCommand(); |
203 while (commands_processed < commands_per_update_ && | |
204 !parser_->IsEmpty()) { | |
205 error = parser_->ProcessCommand(); | |
206 | 171 |
207 // TODO(piman): various classes duplicate various pieces of state, leading | 172 // TODO(piman): various classes duplicate various pieces of state, leading |
208 // to needlessly complex update logic. It should be possible to simply | 173 // to needlessly complex update logic. It should be possible to simply |
209 // share the state across all of them. | 174 // share the state across all of them. |
210 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); | 175 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); |
211 | 176 |
212 if (error == error::kWaiting || error == error::kYield) { | 177 if (error::IsError(error)) { |
213 is_break = true; | 178 command_buffer_->SetParseError(error); |
214 break; | 179 return; |
215 } else if (error::IsError(error)) { | 180 } |
216 command_buffer_->SetParseError(error); | |
217 return; | |
218 } | |
219 | 181 |
220 if (unscheduled_count_ > 0) { | 182 if (command_processed_callback_.get()) |
221 is_break = true; | 183 command_processed_callback_->Run(); |
222 break; | |
223 } | |
224 | 184 |
225 ++commands_processed; | 185 if (unscheduled_count_ > 0) |
226 if (command_processed_callback_.get()) { | 186 return; |
227 command_processed_callback_->Run(); | |
228 } | |
229 } | |
230 elapsed = base::TimeTicks::Now() - start_time; | |
231 } while(!is_break && | |
232 !parser_->IsEmpty() && | |
233 elapsed.InMicroseconds() < kMinimumSchedulerQuantumMicros); | |
234 | |
235 if (unscheduled_count_ == 0 && | |
236 error != error::kWaiting && | |
237 !parser_->IsEmpty()) { | |
238 ScheduleProcessCommands(); | |
239 } | 187 } |
240 } | 188 } |
241 | 189 |
242 void GpuScheduler::SetScheduled(bool scheduled) { | 190 void GpuScheduler::SetScheduled(bool scheduled) { |
243 TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this, | 191 TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this, |
244 "new unscheduled_count_", | 192 "new unscheduled_count_", |
245 unscheduled_count_ + (scheduled? -1 : 1)); | 193 unscheduled_count_ + (scheduled? -1 : 1)); |
246 if (scheduled) { | 194 if (scheduled) { |
247 --unscheduled_count_; | 195 --unscheduled_count_; |
248 DCHECK_GE(unscheduled_count_, 0); | 196 DCHECK_GE(unscheduled_count_, 0); |
249 | 197 |
250 if (unscheduled_count_ == 0) { | 198 if (unscheduled_count_ == 0 && scheduled_callback_.get()) |
251 if (scheduled_callback_.get()) | 199 scheduled_callback_->Run(); |
252 scheduled_callback_->Run(); | |
253 | |
254 ScheduleProcessCommands(); | |
255 } | |
256 } else { | 200 } else { |
257 ++unscheduled_count_; | 201 ++unscheduled_count_; |
258 } | 202 } |
259 } | 203 } |
260 | 204 |
261 bool GpuScheduler::IsScheduled() { | 205 bool GpuScheduler::IsScheduled() { |
262 return unscheduled_count_ == 0; | 206 return unscheduled_count_ == 0; |
263 } | 207 } |
264 | 208 |
265 void GpuScheduler::SetScheduledCallback(Callback0::Type* scheduled_callback) { | 209 void GpuScheduler::SetScheduledCallback(Callback0::Type* scheduled_callback) { |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
311 Callback0::Type* callback) { | 255 Callback0::Type* callback) { |
312 command_processed_callback_.reset(callback); | 256 command_processed_callback_.reset(callback); |
313 } | 257 } |
314 | 258 |
315 void GpuScheduler::SetTokenCallback( | 259 void GpuScheduler::SetTokenCallback( |
316 const base::Callback<void(int32)>& callback) { | 260 const base::Callback<void(int32)>& callback) { |
317 DCHECK(set_token_callback_.is_null()); | 261 DCHECK(set_token_callback_.is_null()); |
318 set_token_callback_ = callback; | 262 set_token_callback_ = callback; |
319 } | 263 } |
320 | 264 |
321 void GpuScheduler::ScheduleProcessCommands() { | 265 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, |
322 MessageLoop::current()->PostTask( | 266 gles2::GLES2Decoder* decoder, |
323 FROM_HERE, | 267 CommandParser* parser) |
324 method_factory_.NewRunnableMethod(&GpuScheduler::ProcessCommands)); | 268 : command_buffer_(command_buffer), |
| 269 decoder_(decoder), |
| 270 parser_(parser), |
| 271 unscheduled_count_(0), |
| 272 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
| 273 swap_buffers_count_(0), |
| 274 acknowledged_swap_buffers_count_(0), |
| 275 #endif |
| 276 method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { |
325 } | 277 } |
326 | 278 |
327 void GpuScheduler::WillResize(gfx::Size size) { | 279 void GpuScheduler::WillResize(gfx::Size size) { |
328 if (wrapped_resize_callback_.get()) { | 280 if (wrapped_resize_callback_.get()) { |
329 wrapped_resize_callback_->Run(size); | 281 wrapped_resize_callback_->Run(size); |
330 } | 282 } |
331 } | 283 } |
332 | 284 |
333 } // namespace gpu | 285 } // namespace gpu |
OLD | NEW |