OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "gpu/command_buffer/service/gpu_scheduler.h" | 5 #include "gpu/command_buffer/service/gpu_scheduler.h" |
6 | 6 |
7 #include "base/callback.h" | 7 #include "base/callback.h" |
8 #include "base/command_line.h" | 8 #include "base/command_line.h" |
9 #include "base/compiler_specific.h" | 9 #include "base/compiler_specific.h" |
10 #include "base/debug/trace_event.h" | 10 #include "base/debug/trace_event.h" |
11 #include "base/message_loop.h" | 11 #include "base/message_loop.h" |
12 #include "base/time.h" | 12 #include "base/time.h" |
13 #include "ui/gfx/gl/gl_context.h" | 13 #include "ui/gfx/gl/gl_context.h" |
14 #include "ui/gfx/gl/gl_bindings.h" | 14 #include "ui/gfx/gl/gl_bindings.h" |
15 #include "ui/gfx/gl/gl_surface.h" | 15 #include "ui/gfx/gl/gl_surface.h" |
16 #include "ui/gfx/gl/gl_switches.h" | 16 #include "ui/gfx/gl/gl_switches.h" |
17 | 17 |
18 using ::base::SharedMemory; | 18 using ::base::SharedMemory; |
19 | 19 |
20 namespace gpu { | 20 namespace gpu { |
21 | 21 |
22 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, | 22 GpuScheduler* GpuScheduler::Create(CommandBuffer* command_buffer, |
23 SurfaceManager* surface_manager, | 23 SurfaceManager* surface_manager, |
24 gles2::ContextGroup* group) | 24 gles2::ContextGroup* group) { |
25 : command_buffer_(command_buffer), | |
26 commands_per_update_(100), | |
27 unscheduled_count_(0), | |
28 #if defined(OS_MACOSX) || defined(TOUCH_UI) | |
29 swap_buffers_count_(0), | |
30 acknowledged_swap_buffers_count_(0), | |
31 #endif | |
32 method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { | |
33 DCHECK(command_buffer); | 25 DCHECK(command_buffer); |
34 decoder_.reset(gles2::GLES2Decoder::Create(surface_manager, group)); | 26 |
35 decoder_->set_engine(this); | 27 gles2::GLES2Decoder* decoder = |
| 28 gles2::GLES2Decoder::Create(surface_manager, group); |
| 29 |
| 30 GpuScheduler* scheduler = new GpuScheduler(command_buffer, |
| 31 decoder, |
| 32 NULL); |
| 33 |
| 34 decoder->set_engine(scheduler); |
| 35 |
36 if (CommandLine::ForCurrentProcess()->HasSwitch( | 36 if (CommandLine::ForCurrentProcess()->HasSwitch( |
37 switches::kEnableGPUServiceLogging)) { | 37 switches::kEnableGPUServiceLogging)) { |
38 decoder_->set_debug(true); | 38 decoder->set_debug(true); |
39 } | 39 } |
| 40 |
| 41 return scheduler; |
40 } | 42 } |
41 | 43 |
42 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, | 44 GpuScheduler* GpuScheduler::CreateForTests(CommandBuffer* command_buffer, |
43 gles2::GLES2Decoder* decoder, | 45 gles2::GLES2Decoder* decoder, |
44 CommandParser* parser, | 46 CommandParser* parser) { |
45 int commands_per_update) | |
46 : command_buffer_(command_buffer), | |
47 commands_per_update_(commands_per_update), | |
48 unscheduled_count_(0), | |
49 #if defined(OS_MACOSX) || defined(TOUCH_UI) | |
50 swap_buffers_count_(0), | |
51 acknowledged_swap_buffers_count_(0), | |
52 #endif | |
53 method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { | |
54 DCHECK(command_buffer); | 47 DCHECK(command_buffer); |
55 decoder_.reset(decoder); | 48 GpuScheduler* scheduler = new GpuScheduler(command_buffer, |
56 parser_.reset(parser); | 49 decoder, |
| 50 parser); |
| 51 |
| 52 return scheduler; |
57 } | 53 } |
58 | 54 |
59 GpuScheduler::~GpuScheduler() { | 55 GpuScheduler::~GpuScheduler() { |
60 Destroy(); | 56 Destroy(); |
61 } | 57 } |
62 | 58 |
63 bool GpuScheduler::InitializeCommon( | 59 bool GpuScheduler::InitializeCommon( |
64 const scoped_refptr<gfx::GLSurface>& surface, | 60 const scoped_refptr<gfx::GLSurface>& surface, |
65 const scoped_refptr<gfx::GLContext>& context, | 61 const scoped_refptr<gfx::GLContext>& context, |
66 const gfx::Size& size, | 62 const gfx::Size& size, |
67 const gles2::DisallowedExtensions& disallowed_extensions, | 63 const gles2::DisallowedExtensions& disallowed_extensions, |
68 const char* allowed_extensions, | 64 const char* allowed_extensions, |
69 const std::vector<int32>& attribs) { | 65 const std::vector<int32>& attribs) { |
70 DCHECK(context); | 66 DCHECK(context); |
71 | 67 |
72 if (!context->MakeCurrent(surface)) | 68 if (!context->MakeCurrent(surface)) |
73 return false; | 69 return false; |
74 | 70 |
75 #if !defined(OS_MACOSX) | 71 #if !defined(OS_MACOSX) |
76 // Set up swap interval for onscreen contexts. | 72 // Set up swap interval for onscreen contexts. |
77 if (!surface->IsOffscreen()) { | 73 if (!surface->IsOffscreen()) { |
78 if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kDisableGpuVsync)) | 74 if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kDisableGpuVsync)) |
79 context->SetSwapInterval(0); | 75 context->SetSwapInterval(0); |
80 else | 76 else |
81 context->SetSwapInterval(1); | 77 context->SetSwapInterval(1); |
82 } | 78 } |
83 #endif | 79 #endif |
84 | 80 |
85 // Do not limit to a certain number of commands before scheduling another | |
86 // update when rendering onscreen. | |
87 if (!surface->IsOffscreen()) | |
88 commands_per_update_ = INT_MAX; | |
89 | |
90 // Map the ring buffer and create the parser. | 81 // Map the ring buffer and create the parser. |
91 Buffer ring_buffer = command_buffer_->GetRingBuffer(); | 82 Buffer ring_buffer = command_buffer_->GetRingBuffer(); |
92 if (ring_buffer.ptr) { | 83 if (ring_buffer.ptr) { |
93 parser_.reset(new CommandParser(ring_buffer.ptr, | 84 parser_.reset(new CommandParser(ring_buffer.ptr, |
94 ring_buffer.size, | 85 ring_buffer.size, |
95 0, | 86 0, |
96 ring_buffer.size, | 87 ring_buffer.size, |
97 0, | 88 0, |
98 decoder_.get())); | 89 decoder_.get())); |
99 } else { | 90 } else { |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
137 else | 128 else |
138 return decoder_->SetParent(NULL, 0); | 129 return decoder_->SetParent(NULL, 0); |
139 } | 130 } |
140 | 131 |
141 #if defined(OS_MACOSX) || defined(TOUCH_UI) | 132 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
142 namespace { | 133 namespace { |
143 const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1; | 134 const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1; |
144 } | 135 } |
145 #endif | 136 #endif |
146 | 137 |
147 void GpuScheduler::PutChanged(bool sync) { | 138 void GpuScheduler::PutChanged() { |
148 TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); | 139 TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); |
| 140 |
| 141 DCHECK(IsScheduled()); |
| 142 |
149 CommandBuffer::State state = command_buffer_->GetState(); | 143 CommandBuffer::State state = command_buffer_->GetState(); |
150 parser_->set_put(state.put_offset); | 144 parser_->set_put(state.put_offset); |
151 | |
152 if (sync) | |
153 ProcessCommands(); | |
154 else | |
155 ScheduleProcessCommands(); | |
156 } | |
157 | |
158 void GpuScheduler::ProcessCommands() { | |
159 TRACE_EVENT1("gpu", "GpuScheduler:ProcessCommands", "this", this); | |
160 CommandBuffer::State state = command_buffer_->GetState(); | |
161 if (state.error != error::kNoError) | 145 if (state.error != error::kNoError) |
162 return; | 146 return; |
163 | 147 |
164 if (unscheduled_count_ > 0) { | |
165 TRACE_EVENT1("gpu", "EarlyOut_Unscheduled", | |
166 "unscheduled_count_", unscheduled_count_); | |
167 return; | |
168 } | |
169 | |
170 if (decoder_.get()) { | 148 if (decoder_.get()) { |
171 if (!decoder_->MakeCurrent()) { | 149 if (!decoder_->MakeCurrent()) { |
172 LOG(ERROR) << "Context lost because MakeCurrent failed."; | 150 LOG(ERROR) << "Context lost because MakeCurrent failed."; |
173 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); | 151 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
174 command_buffer_->SetParseError(error::kLostContext); | 152 command_buffer_->SetParseError(error::kLostContext); |
175 return; | 153 return; |
176 } | 154 } |
177 } | 155 } |
178 | 156 |
179 #if defined(OS_MACOSX) | 157 #if defined(OS_MACOSX) |
180 bool do_rate_limiting = surface_.get() != NULL; | 158 bool do_rate_limiting = surface_.get() != NULL; |
181 #elif defined(TOUCH_UI) | 159 #elif defined(TOUCH_UI) |
182 bool do_rate_limiting = back_surface_.get() != NULL; | 160 bool do_rate_limiting = back_surface_.get() != NULL; |
183 #endif | 161 #endif |
184 | 162 |
185 #if defined(OS_MACOSX) || defined(TOUCH_UI) | 163 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
186 // Don't swamp the browser process with SwapBuffers calls it can't handle. | 164 // Don't swamp the browser process with SwapBuffers calls it can't handle. |
187 if (do_rate_limiting && | 165 DCHECK(!do_rate_limiting || |
188 swap_buffers_count_ - acknowledged_swap_buffers_count_ >= | 166 swap_buffers_count_ - acknowledged_swap_buffers_count_ == 0); |
189 kMaxOutstandingSwapBuffersCallsPerOnscreenContext) { | |
190 TRACE_EVENT0("gpu", "EarlyOut_OSX_Throttle"); | |
191 // Stop doing work on this command buffer. In the GPU process, | |
192 // receipt of the GpuMsg_AcceleratedSurfaceBuffersSwappedACK | |
193 // message causes ProcessCommands to be scheduled again. | |
194 return; | |
195 } | |
196 #endif | 167 #endif |
197 | 168 |
198 base::TimeTicks start_time = base::TimeTicks::Now(); | |
199 base::TimeDelta elapsed; | |
200 bool is_break = false; | |
201 error::Error error = error::kNoError; | 169 error::Error error = error::kNoError; |
202 do { | 170 while (!parser_->IsEmpty()) { |
203 int commands_processed = 0; | 171 error = parser_->ProcessCommand(); |
204 while (commands_processed < commands_per_update_ && | |
205 !parser_->IsEmpty()) { | |
206 error = parser_->ProcessCommand(); | |
207 | 172 |
208 // TODO(piman): various classes duplicate various pieces of state, leading | 173 // TODO(piman): various classes duplicate various pieces of state, leading |
209 // to needlessly complex update logic. It should be possible to simply | 174 // to needlessly complex update logic. It should be possible to simply |
210 // share the state across all of them. | 175 // share the state across all of them. |
211 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); | 176 command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); |
212 | 177 |
213 if (error == error::kWaiting || error == error::kYield) { | 178 if (error::IsError(error)) { |
214 is_break = true; | 179 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
215 break; | 180 command_buffer_->SetParseError(error); |
216 } else if (error::IsError(error)) { | 181 return; |
217 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); | 182 } |
218 command_buffer_->SetParseError(error); | |
219 return; | |
220 } | |
221 | 183 |
222 if (unscheduled_count_ > 0) { | 184 if (command_processed_callback_.get()) |
223 is_break = true; | 185 command_processed_callback_->Run(); |
224 break; | |
225 } | |
226 | 186 |
227 ++commands_processed; | 187 if (unscheduled_count_ > 0) |
228 if (command_processed_callback_.get()) { | 188 return; |
229 command_processed_callback_->Run(); | |
230 } | |
231 } | |
232 elapsed = base::TimeTicks::Now() - start_time; | |
233 } while(!is_break && | |
234 !parser_->IsEmpty() && | |
235 elapsed.InMicroseconds() < kMinimumSchedulerQuantumMicros); | |
236 | |
237 if (unscheduled_count_ == 0 && | |
238 error != error::kWaiting && | |
239 !parser_->IsEmpty()) { | |
240 ScheduleProcessCommands(); | |
241 } | 189 } |
242 } | 190 } |
243 | 191 |
244 void GpuScheduler::SetScheduled(bool scheduled) { | 192 void GpuScheduler::SetScheduled(bool scheduled) { |
245 TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this, | 193 TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this, |
246 "new unscheduled_count_", | 194 "new unscheduled_count_", |
247 unscheduled_count_ + (scheduled? -1 : 1)); | 195 unscheduled_count_ + (scheduled? -1 : 1)); |
248 if (scheduled) { | 196 if (scheduled) { |
249 --unscheduled_count_; | 197 --unscheduled_count_; |
250 DCHECK_GE(unscheduled_count_, 0); | 198 DCHECK_GE(unscheduled_count_, 0); |
251 | 199 |
252 if (unscheduled_count_ == 0) { | 200 if (unscheduled_count_ == 0 && scheduled_callback_.get()) |
253 if (scheduled_callback_.get()) | 201 scheduled_callback_->Run(); |
254 scheduled_callback_->Run(); | |
255 | |
256 ScheduleProcessCommands(); | |
257 } | |
258 } else { | 202 } else { |
259 ++unscheduled_count_; | 203 ++unscheduled_count_; |
260 } | 204 } |
261 } | 205 } |
262 | 206 |
263 bool GpuScheduler::IsScheduled() { | 207 bool GpuScheduler::IsScheduled() { |
264 return unscheduled_count_ == 0; | 208 return unscheduled_count_ == 0; |
265 } | 209 } |
266 | 210 |
267 void GpuScheduler::SetScheduledCallback(Callback0::Type* scheduled_callback) { | 211 void GpuScheduler::SetScheduledCallback(Callback0::Type* scheduled_callback) { |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
313 Callback0::Type* callback) { | 257 Callback0::Type* callback) { |
314 command_processed_callback_.reset(callback); | 258 command_processed_callback_.reset(callback); |
315 } | 259 } |
316 | 260 |
317 void GpuScheduler::SetTokenCallback( | 261 void GpuScheduler::SetTokenCallback( |
318 const base::Callback<void(int32)>& callback) { | 262 const base::Callback<void(int32)>& callback) { |
319 DCHECK(set_token_callback_.is_null()); | 263 DCHECK(set_token_callback_.is_null()); |
320 set_token_callback_ = callback; | 264 set_token_callback_ = callback; |
321 } | 265 } |
322 | 266 |
323 void GpuScheduler::ScheduleProcessCommands() { | 267 GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, |
324 MessageLoop::current()->PostTask( | 268 gles2::GLES2Decoder* decoder, |
325 FROM_HERE, | 269 CommandParser* parser) |
326 method_factory_.NewRunnableMethod(&GpuScheduler::ProcessCommands)); | 270 : command_buffer_(command_buffer), |
| 271 decoder_(decoder), |
| 272 parser_(parser), |
| 273 unscheduled_count_(0), |
| 274 #if defined(OS_MACOSX) || defined(TOUCH_UI) |
| 275 swap_buffers_count_(0), |
| 276 acknowledged_swap_buffers_count_(0), |
| 277 #endif |
| 278 method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { |
327 } | 279 } |
328 | 280 |
329 void GpuScheduler::WillResize(gfx::Size size) { | 281 void GpuScheduler::WillResize(gfx::Size size) { |
330 if (wrapped_resize_callback_.get()) { | 282 if (wrapped_resize_callback_.get()) { |
331 wrapped_resize_callback_->Run(size); | 283 wrapped_resize_callback_->Run(size); |
332 } | 284 } |
333 } | 285 } |
334 | 286 |
335 } // namespace gpu | 287 } // namespace gpu |
OLD | NEW |