Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(399)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 1308913004: GPU Channel's now maintain a global order number for each processed IPC. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix merge error Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/bind.h" 5 #include "base/bind.h"
6 #include "base/bind_helpers.h" 6 #include "base/bind_helpers.h"
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/hash.h" 8 #include "base/hash.h"
9 #include "base/json/json_writer.h" 9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h" 10 #include "base/memory/shared_memory.h"
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
193 use_virtualized_gl_context_(use_virtualized_gl_context), 193 use_virtualized_gl_context_(use_virtualized_gl_context),
194 stream_id_(stream_id), 194 stream_id_(stream_id),
195 route_id_(route_id), 195 route_id_(route_id),
196 surface_id_(surface_id), 196 surface_id_(surface_id),
197 software_(software), 197 software_(software),
198 last_flush_count_(0), 198 last_flush_count_(0),
199 last_memory_allocation_valid_(false), 199 last_memory_allocation_valid_(false),
200 watchdog_(watchdog), 200 watchdog_(watchdog),
201 sync_point_wait_count_(0), 201 sync_point_wait_count_(0),
202 delayed_work_scheduled_(false), 202 delayed_work_scheduled_(false),
203 previous_messages_processed_(0), 203 previous_processed_num_(0),
204 active_url_(active_url), 204 active_url_(active_url),
205 total_gpu_memory_(0) { 205 total_gpu_memory_(0) {
206 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); 206 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
207 FastSetActiveURL(active_url_, active_url_hash_); 207 FastSetActiveURL(active_url_, active_url_hash_);
208 208
209 gpu::gles2::ContextCreationAttribHelper attrib_parser; 209 gpu::gles2::ContextCreationAttribHelper attrib_parser;
210 attrib_parser.Parse(requested_attribs_); 210 attrib_parser.Parse(requested_attribs_);
211 211
212 if (share_group) { 212 if (share_group) {
213 context_group_ = share_group->context_group_; 213 context_group_ = share_group->context_group_;
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
335 } 335 }
336 336
337 void GpuCommandBufferStub::PollWork() { 337 void GpuCommandBufferStub::PollWork() {
338 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); 338 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
339 delayed_work_scheduled_ = false; 339 delayed_work_scheduled_ = false;
340 FastSetActiveURL(active_url_, active_url_hash_); 340 FastSetActiveURL(active_url_, active_url_hash_);
341 if (decoder_.get() && !MakeCurrent()) 341 if (decoder_.get() && !MakeCurrent())
342 return; 342 return;
343 343
344 if (scheduler_) { 344 if (scheduler_) {
345 uint64 current_messages_processed = 345 const uint32_t current_unprocessed_num =
346 channel()->gpu_channel_manager()->MessagesProcessed(); 346 channel()->gpu_channel_manager()->UnprocessedOrderNumber();
347 // We're idle when no messages were processed or scheduled. 347 // We're idle when no messages were processed or scheduled.
348 bool is_idle = 348 bool is_idle = (previous_processed_num_ == current_unprocessed_num);
349 (previous_messages_processed_ == current_messages_processed) &&
350 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
351 if (!is_idle && !last_idle_time_.is_null()) { 349 if (!is_idle && !last_idle_time_.is_null()) {
352 base::TimeDelta time_since_idle = 350 base::TimeDelta time_since_idle =
353 base::TimeTicks::Now() - last_idle_time_; 351 base::TimeTicks::Now() - last_idle_time_;
354 base::TimeDelta max_time_since_idle = 352 base::TimeDelta max_time_since_idle =
355 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs); 353 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
356 354
357 // Force idle when it's been too long since last time we were idle. 355 // Force idle when it's been too long since last time we were idle.
358 if (time_since_idle > max_time_since_idle) 356 if (time_since_idle > max_time_since_idle)
359 is_idle = true; 357 is_idle = true;
360 } 358 }
(...skipping 20 matching lines...) Expand all
381 last_idle_time_ = base::TimeTicks(); 379 last_idle_time_ = base::TimeTicks();
382 return; 380 return;
383 } 381 }
384 382
385 if (delayed_work_scheduled_) 383 if (delayed_work_scheduled_)
386 return; 384 return;
387 delayed_work_scheduled_ = true; 385 delayed_work_scheduled_ = true;
388 386
389 // Idle when no messages are processed between now and when 387 // Idle when no messages are processed between now and when
390 // PollWork is called. 388 // PollWork is called.
391 previous_messages_processed_ = 389 previous_processed_num_ =
392 channel()->gpu_channel_manager()->MessagesProcessed(); 390 channel()->gpu_channel_manager()->ProcessedOrderNumber();
393 if (last_idle_time_.is_null()) 391 if (last_idle_time_.is_null())
394 last_idle_time_ = base::TimeTicks::Now(); 392 last_idle_time_ = base::TimeTicks::Now();
395 393
396 // IsScheduled() returns true after passing all unschedule fences 394 // IsScheduled() returns true after passing all unschedule fences
397 // and this is when we can start performing idle work. Idle work 395 // and this is when we can start performing idle work. Idle work
398 // is done synchronously so we can set delay to 0 and instead poll 396 // is done synchronously so we can set delay to 0 and instead poll
399 // for more work at the rate idle work is performed. This also ensures 397 // for more work at the rate idle work is performed. This also ensures
400 // that idle work is done as efficiently as possible without any 398 // that idle work is done as efficiently as possible without any
401 // unnecessary delays. 399 // unnecessary delays.
402 if (scheduler_.get() && 400 if (scheduler_.get() &&
(...skipping 777 matching lines...) Expand 10 before | Expand all | Expand 10 after
1180 result)); 1178 result));
1181 } 1179 }
1182 1180
1183 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase, 1181 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1184 base::TimeDelta interval) { 1182 base::TimeDelta interval) {
1185 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase, 1183 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1186 interval)); 1184 interval));
1187 } 1185 }
1188 1186
1189 } // namespace content 1187 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698