Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(589)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 1308913004: GPU Channel's now maintain a global order number for each processed IPC. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Updated command buffer stub to use 32 bit order numbers Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/bind.h" 5 #include "base/bind.h"
6 #include "base/bind_helpers.h" 6 #include "base/bind_helpers.h"
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/hash.h" 8 #include "base/hash.h"
9 #include "base/json/json_writer.h" 9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h" 10 #include "base/memory/shared_memory.h"
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
192 use_virtualized_gl_context_(use_virtualized_gl_context), 192 use_virtualized_gl_context_(use_virtualized_gl_context),
193 stream_id_(stream_id), 193 stream_id_(stream_id),
194 route_id_(route_id), 194 route_id_(route_id),
195 surface_id_(surface_id), 195 surface_id_(surface_id),
196 software_(software), 196 software_(software),
197 last_flush_count_(0), 197 last_flush_count_(0),
198 last_memory_allocation_valid_(false), 198 last_memory_allocation_valid_(false),
199 watchdog_(watchdog), 199 watchdog_(watchdog),
200 sync_point_wait_count_(0), 200 sync_point_wait_count_(0),
201 delayed_work_scheduled_(false), 201 delayed_work_scheduled_(false),
202 previous_messages_processed_(0), 202 previous_processed_num_(0),
203 active_url_(active_url), 203 active_url_(active_url),
204 total_gpu_memory_(0) { 204 total_gpu_memory_(0) {
205 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); 205 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
206 FastSetActiveURL(active_url_, active_url_hash_); 206 FastSetActiveURL(active_url_, active_url_hash_);
207 207
208 gpu::gles2::ContextCreationAttribHelper attrib_parser; 208 gpu::gles2::ContextCreationAttribHelper attrib_parser;
209 attrib_parser.Parse(requested_attribs_); 209 attrib_parser.Parse(requested_attribs_);
210 210
211 if (share_group) { 211 if (share_group) {
212 context_group_ = share_group->context_group_; 212 context_group_ = share_group->context_group_;
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
334 } 334 }
335 335
336 void GpuCommandBufferStub::PollWork() { 336 void GpuCommandBufferStub::PollWork() {
337 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); 337 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
338 delayed_work_scheduled_ = false; 338 delayed_work_scheduled_ = false;
339 FastSetActiveURL(active_url_, active_url_hash_); 339 FastSetActiveURL(active_url_, active_url_hash_);
340 if (decoder_.get() && !MakeCurrent()) 340 if (decoder_.get() && !MakeCurrent())
341 return; 341 return;
342 342
343 if (scheduler_) { 343 if (scheduler_) {
344 uint64 current_messages_processed = 344 const uint32_t current_processed_num =
345 channel()->gpu_channel_manager()->MessagesProcessed(); 345 channel()->gpu_channel_manager()->ProcessedOrderNumber();
346 // We're idle when no messages were processed or scheduled. 346 // We're idle when no messages were processed or scheduled.
347 bool is_idle = 347 bool is_idle =
348 (previous_messages_processed_ == current_messages_processed) && 348 (previous_processed_num_ == current_processed_num) &&
349 !channel()->gpu_channel_manager()->HandleMessagesScheduled(); 349 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
350 if (!is_idle && !last_idle_time_.is_null()) { 350 if (!is_idle && !last_idle_time_.is_null()) {
351 base::TimeDelta time_since_idle = 351 base::TimeDelta time_since_idle =
352 base::TimeTicks::Now() - last_idle_time_; 352 base::TimeTicks::Now() - last_idle_time_;
353 base::TimeDelta max_time_since_idle = 353 base::TimeDelta max_time_since_idle =
354 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs); 354 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
355 355
356 // Force idle when it's been too long since last time we were idle. 356 // Force idle when it's been too long since last time we were idle.
357 if (time_since_idle > max_time_since_idle) 357 if (time_since_idle > max_time_since_idle)
358 is_idle = true; 358 is_idle = true;
(...skipping 21 matching lines...) Expand all
380 last_idle_time_ = base::TimeTicks(); 380 last_idle_time_ = base::TimeTicks();
381 return; 381 return;
382 } 382 }
383 383
384 if (delayed_work_scheduled_) 384 if (delayed_work_scheduled_)
385 return; 385 return;
386 delayed_work_scheduled_ = true; 386 delayed_work_scheduled_ = true;
387 387
388 // Idle when no messages are processed between now and when 388 // Idle when no messages are processed between now and when
389 // PollWork is called. 389 // PollWork is called.
390 previous_messages_processed_ = 390
391 channel()->gpu_channel_manager()->MessagesProcessed(); 391 previous_processed_num_ =
392 channel()->gpu_channel_manager()->ProcessedOrderNumber();
392 if (last_idle_time_.is_null()) 393 if (last_idle_time_.is_null())
393 last_idle_time_ = base::TimeTicks::Now(); 394 last_idle_time_ = base::TimeTicks::Now();
394 395
395 // IsScheduled() returns true after passing all unschedule fences 396 // IsScheduled() returns true after passing all unschedule fences
396 // and this is when we can start performing idle work. Idle work 397 // and this is when we can start performing idle work. Idle work
397 // is done synchronously so we can set delay to 0 and instead poll 398 // is done synchronously so we can set delay to 0 and instead poll
398 // for more work at the rate idle work is performed. This also ensures 399 // for more work at the rate idle work is performed. This also ensures
399 // that idle work is done as efficiently as possible without any 400 // that idle work is done as efficiently as possible without any
400 // unnecessary delays. 401 // unnecessary delays.
401 if (scheduler_.get() && 402 if (scheduler_.get() &&
(...skipping 777 matching lines...) Expand 10 before | Expand all | Expand 10 after
1179 result)); 1180 result));
1180 } 1181 }
1181 1182
1182 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase, 1183 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1183 base::TimeDelta interval) { 1184 base::TimeDelta interval) {
1184 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase, 1185 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1185 interval)); 1186 interval));
1186 } 1187 }
1187 1188
1188 } // namespace content 1189 } // namespace content
OLDNEW
« content/common/gpu/gpu_channel_manager.cc ('K') | « content/common/gpu/gpu_command_buffer_stub.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698