Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(781)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 1308913004: GPU Channel's now maintain a global order number for each processed IPC. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: GPU Channel message queue placed in own class, applied suggestions Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/bind.h" 5 #include "base/bind.h"
6 #include "base/bind_helpers.h" 6 #include "base/bind_helpers.h"
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/hash.h" 8 #include "base/hash.h"
9 #include "base/json/json_writer.h" 9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h" 10 #include "base/memory/shared_memory.h"
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
193 use_virtualized_gl_context_(use_virtualized_gl_context), 193 use_virtualized_gl_context_(use_virtualized_gl_context),
194 stream_id_(stream_id), 194 stream_id_(stream_id),
195 route_id_(route_id), 195 route_id_(route_id),
196 surface_id_(surface_id), 196 surface_id_(surface_id),
197 software_(software), 197 software_(software),
198 last_flush_count_(0), 198 last_flush_count_(0),
199 last_memory_allocation_valid_(false), 199 last_memory_allocation_valid_(false),
200 watchdog_(watchdog), 200 watchdog_(watchdog),
201 sync_point_wait_count_(0), 201 sync_point_wait_count_(0),
202 delayed_work_scheduled_(false), 202 delayed_work_scheduled_(false),
203 previous_messages_processed_(0), 203 previous_processed_num_(0),
204 active_url_(active_url), 204 active_url_(active_url),
205 total_gpu_memory_(0) { 205 total_gpu_memory_(0) {
206 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); 206 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
207 FastSetActiveURL(active_url_, active_url_hash_); 207 FastSetActiveURL(active_url_, active_url_hash_);
208 208
209 gpu::gles2::ContextCreationAttribHelper attrib_parser; 209 gpu::gles2::ContextCreationAttribHelper attrib_parser;
210 attrib_parser.Parse(requested_attribs_); 210 attrib_parser.Parse(requested_attribs_);
211 211
212 if (share_group) { 212 if (share_group) {
213 context_group_ = share_group->context_group_; 213 context_group_ = share_group->context_group_;
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
335 } 335 }
336 336
337 void GpuCommandBufferStub::PollWork() { 337 void GpuCommandBufferStub::PollWork() {
338 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); 338 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
339 delayed_work_scheduled_ = false; 339 delayed_work_scheduled_ = false;
340 FastSetActiveURL(active_url_, active_url_hash_); 340 FastSetActiveURL(active_url_, active_url_hash_);
341 if (decoder_.get() && !MakeCurrent()) 341 if (decoder_.get() && !MakeCurrent())
342 return; 342 return;
343 343
344 if (scheduler_) { 344 if (scheduler_) {
345 uint64 current_messages_processed = 345 const uint32_t current_processed_num =
346 channel()->gpu_channel_manager()->MessagesProcessed(); 346 channel()->gpu_channel_manager()->ProcessedOrderNumber();
347 // We're idle when no messages were processed or scheduled. 347 // We're idle when no messages were processed or scheduled.
348 bool is_idle = 348 bool is_idle =
349 (previous_messages_processed_ == current_messages_processed) && 349 (previous_processed_num_ == current_processed_num) &&
350 !channel()->gpu_channel_manager()->HandleMessagesScheduled(); 350 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
351 if (!is_idle && !last_idle_time_.is_null()) { 351 if (!is_idle && !last_idle_time_.is_null()) {
352 base::TimeDelta time_since_idle = 352 base::TimeDelta time_since_idle =
353 base::TimeTicks::Now() - last_idle_time_; 353 base::TimeTicks::Now() - last_idle_time_;
354 base::TimeDelta max_time_since_idle = 354 base::TimeDelta max_time_since_idle =
355 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs); 355 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
356 356
357 // Force idle when it's been too long since last time we were idle. 357 // Force idle when it's been too long since last time we were idle.
358 if (time_since_idle > max_time_since_idle) 358 if (time_since_idle > max_time_since_idle)
359 is_idle = true; 359 is_idle = true;
(...skipping 21 matching lines...) Expand all
381 last_idle_time_ = base::TimeTicks(); 381 last_idle_time_ = base::TimeTicks();
382 return; 382 return;
383 } 383 }
384 384
385 if (delayed_work_scheduled_) 385 if (delayed_work_scheduled_)
386 return; 386 return;
387 delayed_work_scheduled_ = true; 387 delayed_work_scheduled_ = true;
388 388
389 // Idle when no messages are processed between now and when 389 // Idle when no messages are processed between now and when
390 // PollWork is called. 390 // PollWork is called.
391 previous_messages_processed_ = 391
392 channel()->gpu_channel_manager()->MessagesProcessed(); 392 previous_processed_num_ =
393 channel()->gpu_channel_manager()->ProcessedOrderNumber();
393 if (last_idle_time_.is_null()) 394 if (last_idle_time_.is_null())
394 last_idle_time_ = base::TimeTicks::Now(); 395 last_idle_time_ = base::TimeTicks::Now();
395 396
396 // IsScheduled() returns true after passing all unschedule fences 397 // IsScheduled() returns true after passing all unschedule fences
397 // and this is when we can start performing idle work. Idle work 398 // and this is when we can start performing idle work. Idle work
398 // is done synchronously so we can set delay to 0 and instead poll 399 // is done synchronously so we can set delay to 0 and instead poll
399 // for more work at the rate idle work is performed. This also ensures 400 // for more work at the rate idle work is performed. This also ensures
400 // that idle work is done as efficiently as possible without any 401 // that idle work is done as efficiently as possible without any
401 // unnecessary delays. 402 // unnecessary delays.
402 if (scheduler_.get() && 403 if (scheduler_.get() &&
(...skipping 777 matching lines...) Expand 10 before | Expand all | Expand 10 after
1180 result)); 1181 result));
1181 } 1182 }
1182 1183
1183 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase, 1184 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1184 base::TimeDelta interval) { 1185 base::TimeDelta interval) {
1185 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase, 1186 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1186 interval)); 1187 interval));
1187 } 1188 }
1188 1189
1189 } // namespace content 1190 } // namespace content
OLDNEW
« content/common/gpu/gpu_channel.cc ('K') | « content/common/gpu/gpu_command_buffer_stub.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698