Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(583)

Side by Side Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 12040049: gpu: Implement idle async pixel transfers. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Add DCHECKs to ensure idle async uploads are only used with GL_TEXTURE_2D target Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/bind.h" 5 #include "base/bind.h"
6 #include "base/bind_helpers.h" 6 #include "base/bind_helpers.h"
7 #include "base/command_line.h" 7 #include "base/command_line.h"
8 #include "base/debug/trace_event.h" 8 #include "base/debug/trace_event.h"
9 #include "base/hash.h" 9 #include "base/hash.h"
10 #include "base/shared_memory.h" 10 #include "base/shared_memory.h"
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
84 GetContentClient()->SetActiveURL(url); 84 GetContentClient()->SetActiveURL(url);
85 } 85 }
86 } 86 }
87 87
88 // The first time polling a fence, delay some extra time to allow other 88 // The first time polling a fence, delay some extra time to allow other
89 // stubs to process some work, or else the timing of the fences could 89 // stubs to process some work, or else the timing of the fences could
90 // allow a pattern of alternating fast and slow frames to occur. 90 // allow a pattern of alternating fast and slow frames to occur.
91 const int64 kHandleMoreWorkPeriodMs = 2; 91 const int64 kHandleMoreWorkPeriodMs = 2;
92 const int64 kHandleMoreWorkPeriodBusyMs = 1; 92 const int64 kHandleMoreWorkPeriodBusyMs = 1;
93 93
94 // Prevents idle work from being starved.
95 const int64 kMaxTimeSinceIdleMs = 10;
96
94 } // namespace 97 } // namespace
95 98
96 GpuCommandBufferStub::GpuCommandBufferStub( 99 GpuCommandBufferStub::GpuCommandBufferStub(
97 GpuChannel* channel, 100 GpuChannel* channel,
98 GpuCommandBufferStub* share_group, 101 GpuCommandBufferStub* share_group,
99 const gfx::GLSurfaceHandle& handle, 102 const gfx::GLSurfaceHandle& handle,
100 gpu::gles2::MailboxManager* mailbox_manager, 103 gpu::gles2::MailboxManager* mailbox_manager,
101 gpu::gles2::ImageManager* image_manager, 104 gpu::gles2::ImageManager* image_manager,
102 const gfx::Size& size, 105 const gfx::Size& size,
103 const gpu::gles2::DisallowedFeatures& disallowed_features, 106 const gpu::gles2::DisallowedFeatures& disallowed_features,
(...skipping 15 matching lines...) Expand all
119 route_id_(route_id), 122 route_id_(route_id),
120 surface_id_(surface_id), 123 surface_id_(surface_id),
121 software_(software), 124 software_(software),
122 last_flush_count_(0), 125 last_flush_count_(0),
123 last_memory_allocation_valid_(false), 126 last_memory_allocation_valid_(false),
124 parent_stub_for_initialization_(), 127 parent_stub_for_initialization_(),
125 parent_texture_for_initialization_(0), 128 parent_texture_for_initialization_(0),
126 watchdog_(watchdog), 129 watchdog_(watchdog),
127 sync_point_wait_count_(0), 130 sync_point_wait_count_(0),
128 delayed_work_scheduled_(false), 131 delayed_work_scheduled_(false),
132 previous_messages_processed_(0),
129 active_url_(active_url), 133 active_url_(active_url),
130 total_gpu_memory_(0) { 134 total_gpu_memory_(0) {
131 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); 135 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
132 FastSetActiveURL(active_url_, active_url_hash_); 136 FastSetActiveURL(active_url_, active_url_hash_);
133 if (share_group) { 137 if (share_group) {
134 context_group_ = share_group->context_group_; 138 context_group_ = share_group->context_group_;
135 } else { 139 } else {
136 context_group_ = new gpu::gles2::ContextGroup( 140 context_group_ = new gpu::gles2::ContextGroup(
137 mailbox_manager, 141 mailbox_manager,
138 image_manager, 142 image_manager,
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
228 bool GpuCommandBufferStub::HasMoreWork() { 232 bool GpuCommandBufferStub::HasMoreWork() {
229 return scheduler_.get() && scheduler_->HasMoreWork(); 233 return scheduler_.get() && scheduler_->HasMoreWork();
230 } 234 }
231 235
232 void GpuCommandBufferStub::PollWork() { 236 void GpuCommandBufferStub::PollWork() {
233 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); 237 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
234 delayed_work_scheduled_ = false; 238 delayed_work_scheduled_ = false;
235 FastSetActiveURL(active_url_, active_url_hash_); 239 FastSetActiveURL(active_url_, active_url_hash_);
236 if (decoder_.get() && !MakeCurrent()) 240 if (decoder_.get() && !MakeCurrent())
237 return; 241 return;
238 if (scheduler_.get()) 242
239 scheduler_->PollUnscheduleFences(); 243 if (scheduler_.get()) {
244 bool fences_complete = scheduler_->PollUnscheduleFences();
245 // Perform idle work if all fences are complete.
246 if (fences_complete) {
247 uint64 current_messages_processed =
248 channel()->gpu_channel_manager()->MessagesProcessed();
249 // We're idle when no messages were processed or scheduled.
250 bool is_idle =
251 (previous_messages_processed_ == current_messages_processed) &&
252 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
253 if (!is_idle && !last_idle_time_.is_null()) {
254 base::TimeDelta time_since_idle = base::TimeTicks::Now() -
255 last_idle_time_;
256 base::TimeDelta max_time_since_idle =
257 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
258
259 // Force idle when it's been too long since last time we were idle.
260 if (time_since_idle > max_time_since_idle)
261 is_idle = true;
262 }
263
264 if (is_idle) {
265 last_idle_time_ = base::TimeTicks::Now();
266 scheduler_->PerformIdleWork();
267 }
268 }
269 }
240 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs); 270 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
241 } 271 }
242 272
243 bool GpuCommandBufferStub::HasUnprocessedCommands() { 273 bool GpuCommandBufferStub::HasUnprocessedCommands() {
244 if (command_buffer_.get()) { 274 if (command_buffer_.get()) {
245 gpu::CommandBuffer::State state = command_buffer_->GetLastState(); 275 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
246 return state.put_offset != state.get_offset && 276 return state.put_offset != state.get_offset &&
247 !gpu::error::IsError(state.error); 277 !gpu::error::IsError(state.error);
248 } 278 }
249 return false; 279 return false;
250 } 280 }
251 281
252 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) { 282 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
253 if (HasMoreWork() && !delayed_work_scheduled_) { 283 if (!HasMoreWork()) {
254 delayed_work_scheduled_ = true; 284 last_idle_time_ = base::TimeTicks();
255 MessageLoop::current()->PostDelayedTask( 285 return;
256 FROM_HERE,
257 base::Bind(&GpuCommandBufferStub::PollWork,
258 AsWeakPtr()),
259 base::TimeDelta::FromMilliseconds(delay));
260 } 286 }
287
288 if (delayed_work_scheduled_)
289 return;
290 delayed_work_scheduled_ = true;
291
292 // Idle when no messages are processed between now and when
293 // PollWork is called.
294 previous_messages_processed_ =
295 channel()->gpu_channel_manager()->MessagesProcessed();
296 if (last_idle_time_.is_null())
297 last_idle_time_ = base::TimeTicks::Now();
298
299 // IsScheduled() returns true after passing all unschedule fences
300 // and this is when we can start performing idle work. Idle work
301 // is done synchronously so we can set delay to 0 and instead poll
302 // for more work at the rate idle work is performed. This also ensures
303 // that idle work is done as efficiently as possible without any
304 // unnecessary delays.
305 if (scheduler_.get() &&
306 scheduler_->IsScheduled() &&
307 scheduler_->HasMoreIdleWork()) {
308 delay = 0;
309 }
310
311 MessageLoop::current()->PostDelayedTask(
312 FROM_HERE,
313 base::Bind(&GpuCommandBufferStub::PollWork,
314 AsWeakPtr()),
315 base::TimeDelta::FromMilliseconds(delay));
261 } 316 }
262 317
263 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) { 318 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) {
264 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho"); 319 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho");
265 Send(new IPC::Message(message)); 320 Send(new IPC::Message(message));
266 } 321 }
267 322
268 bool GpuCommandBufferStub::MakeCurrent() { 323 bool GpuCommandBufferStub::MakeCurrent() {
269 if (decoder_->MakeCurrent()) 324 if (decoder_->MakeCurrent())
270 return true; 325 return true;
(...skipping 610 matching lines...) Expand 10 before | Expand all | Expand 10 after
881 if (surface_ && MakeCurrent()) 936 if (surface_ && MakeCurrent())
882 surface_->SetFrontbufferAllocation( 937 surface_->SetFrontbufferAllocation(
883 allocation.browser_allocation.suggest_have_frontbuffer); 938 allocation.browser_allocation.suggest_have_frontbuffer);
884 } 939 }
885 940
886 last_memory_allocation_valid_ = true; 941 last_memory_allocation_valid_ = true;
887 last_memory_allocation_ = allocation; 942 last_memory_allocation_ = allocation;
888 } 943 }
889 944
890 } // namespace content 945 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_command_buffer_stub.h ('k') | gpu/command_buffer/service/async_pixel_transfer_delegate_mock.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698