Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/bind.h" | 5 #include "base/bind.h" |
| 6 #include "base/bind_helpers.h" | 6 #include "base/bind_helpers.h" |
| 7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
| 8 #include "base/debug/trace_event.h" | 8 #include "base/debug/trace_event.h" |
| 9 #include "base/hash.h" | 9 #include "base/hash.h" |
| 10 #include "base/shared_memory.h" | 10 #include "base/shared_memory.h" |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 82 if (url_hash != g_last_url_hash) { | 82 if (url_hash != g_last_url_hash) { |
| 83 g_last_url_hash = url_hash; | 83 g_last_url_hash = url_hash; |
| 84 GetContentClient()->SetActiveURL(url); | 84 GetContentClient()->SetActiveURL(url); |
| 85 } | 85 } |
| 86 } | 86 } |
| 87 | 87 |
| 88 // The first time polling a fence, delay some extra time to allow other | 88 // The first time polling a fence, delay some extra time to allow other |
| 89 // stubs to process some work, or else the timing of the fences could | 89 // stubs to process some work, or else the timing of the fences could |
| 90 // allow a pattern of alternating fast and slow frames to occur. | 90 // allow a pattern of alternating fast and slow frames to occur. |
| 91 const int64 kHandleMoreWorkPeriodMs = 2; | 91 const int64 kHandleMoreWorkPeriodMs = 2; |
| 92 const int64 kHandleMoreWorkPeriodBusyMs = 1; | 92 const int64 kHandleMoreWorkPeriodBusyMs = 1; |
|
Sami
2013/03/14 15:18:07
This seems unused now?
reveman
2013/03/14 19:21:47
Oh, thanks for noticing this. Latest patch makes s
| |
| 93 | 93 |
| 94 // Prevents idle work from being starved. | |
| 95 const int64 kMaxTimeSinceIdleMs = 10; | |
| 96 | |
| 94 } // namespace | 97 } // namespace |
| 95 | 98 |
| 96 GpuCommandBufferStub::GpuCommandBufferStub( | 99 GpuCommandBufferStub::GpuCommandBufferStub( |
| 97 GpuChannel* channel, | 100 GpuChannel* channel, |
| 98 GpuCommandBufferStub* share_group, | 101 GpuCommandBufferStub* share_group, |
| 99 const gfx::GLSurfaceHandle& handle, | 102 const gfx::GLSurfaceHandle& handle, |
| 100 gpu::gles2::MailboxManager* mailbox_manager, | 103 gpu::gles2::MailboxManager* mailbox_manager, |
| 101 gpu::gles2::ImageManager* image_manager, | 104 gpu::gles2::ImageManager* image_manager, |
| 102 const gfx::Size& size, | 105 const gfx::Size& size, |
| 103 const gpu::gles2::DisallowedFeatures& disallowed_features, | 106 const gpu::gles2::DisallowedFeatures& disallowed_features, |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 119 route_id_(route_id), | 122 route_id_(route_id), |
| 120 surface_id_(surface_id), | 123 surface_id_(surface_id), |
| 121 software_(software), | 124 software_(software), |
| 122 last_flush_count_(0), | 125 last_flush_count_(0), |
| 123 last_memory_allocation_valid_(false), | 126 last_memory_allocation_valid_(false), |
| 124 parent_stub_for_initialization_(), | 127 parent_stub_for_initialization_(), |
| 125 parent_texture_for_initialization_(0), | 128 parent_texture_for_initialization_(0), |
| 126 watchdog_(watchdog), | 129 watchdog_(watchdog), |
| 127 sync_point_wait_count_(0), | 130 sync_point_wait_count_(0), |
| 128 delayed_work_scheduled_(false), | 131 delayed_work_scheduled_(false), |
| 132 previous_messages_processed_(0), | |
| 129 active_url_(active_url), | 133 active_url_(active_url), |
| 130 total_gpu_memory_(0) { | 134 total_gpu_memory_(0) { |
| 131 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); | 135 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); |
| 132 FastSetActiveURL(active_url_, active_url_hash_); | 136 FastSetActiveURL(active_url_, active_url_hash_); |
| 133 if (share_group) { | 137 if (share_group) { |
| 134 context_group_ = share_group->context_group_; | 138 context_group_ = share_group->context_group_; |
| 135 } else { | 139 } else { |
| 136 context_group_ = new gpu::gles2::ContextGroup( | 140 context_group_ = new gpu::gles2::ContextGroup( |
| 137 mailbox_manager, | 141 mailbox_manager, |
| 138 image_manager, | 142 image_manager, |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 204 OnSignalSyncPoint) | 208 OnSignalSyncPoint) |
| 205 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats, | 209 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats, |
| 206 OnReceivedClientManagedMemoryStats) | 210 OnReceivedClientManagedMemoryStats) |
| 207 IPC_MESSAGE_HANDLER( | 211 IPC_MESSAGE_HANDLER( |
| 208 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback, | 212 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback, |
| 209 OnSetClientHasMemoryAllocationChangedCallback) | 213 OnSetClientHasMemoryAllocationChangedCallback) |
| 210 IPC_MESSAGE_UNHANDLED(handled = false) | 214 IPC_MESSAGE_UNHANDLED(handled = false) |
| 211 IPC_END_MESSAGE_MAP() | 215 IPC_END_MESSAGE_MAP() |
| 212 | 216 |
| 213 // Ensure that any delayed work that was created will be handled. | 217 // Ensure that any delayed work that was created will be handled. |
| 214 ScheduleDelayedWork(kHandleMoreWorkPeriodMs); | 218 ScheduleDelayedWork(); |
| 215 | 219 |
| 216 DCHECK(handled); | 220 DCHECK(handled); |
| 217 return handled; | 221 return handled; |
| 218 } | 222 } |
| 219 | 223 |
| 220 bool GpuCommandBufferStub::Send(IPC::Message* message) { | 224 bool GpuCommandBufferStub::Send(IPC::Message* message) { |
| 221 return channel_->Send(message); | 225 return channel_->Send(message); |
| 222 } | 226 } |
| 223 | 227 |
| 224 bool GpuCommandBufferStub::IsScheduled() { | 228 bool GpuCommandBufferStub::IsScheduled() { |
| 225 return (!scheduler_.get() || scheduler_->IsScheduled()); | 229 return (!scheduler_.get() || scheduler_->IsScheduled()); |
| 226 } | 230 } |
| 227 | 231 |
| 228 bool GpuCommandBufferStub::HasMoreWork() { | 232 bool GpuCommandBufferStub::HasMoreWork() { |
| 229 return scheduler_.get() && scheduler_->HasMoreWork(); | 233 return scheduler_.get() && scheduler_->HasMoreWork(); |
| 230 } | 234 } |
| 231 | 235 |
| 232 void GpuCommandBufferStub::PollWork() { | 236 void GpuCommandBufferStub::PollWork() { |
| 233 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); | 237 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); |
| 234 delayed_work_scheduled_ = false; | 238 delayed_work_scheduled_ = false; |
| 235 FastSetActiveURL(active_url_, active_url_hash_); | 239 FastSetActiveURL(active_url_, active_url_hash_); |
| 236 if (decoder_.get() && !MakeCurrent()) | 240 if (decoder_.get() && !MakeCurrent()) |
| 237 return; | 241 return; |
| 238 if (scheduler_.get()) | 242 |
| 239 scheduler_->PollUnscheduleFences(); | 243 if (scheduler_.get()) { |
| 240 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs); | 244 bool fences_complete = scheduler_->PollUnscheduleFences(); |
| 245 // Perform idle work if all fences are complete. | |
| 246 if (fences_complete) { | |
| 247 uint64 current_messages_processed = | |
| 248 channel()->gpu_channel_manager()->MessagesProcessed(); | |
| 249 // We're idle when no messages were processed or scheduled. | |
| 250 bool is_idle = | |
| 251 (previous_messages_processed_ == current_messages_processed) && | |
| 252 !channel()->gpu_channel_manager()->HandleMessagesScheduled(); | |
| 253 if (!is_idle && !last_idle_time_.is_null()) { | |
| 254 base::TimeDelta time_since_idle = base::TimeTicks::Now() - | |
| 255 last_idle_time_; | |
| 256 base::TimeDelta max_time_since_idle = | |
| 257 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs); | |
| 258 | |
| 259 // Force idle when it's been too long since last time we were idle. | |
| 260 if (time_since_idle > max_time_since_idle) | |
| 261 is_idle = true; | |
| 262 } | |
| 263 | |
| 264 if (is_idle) { | |
| 265 last_idle_time_ = base::TimeTicks::Now(); | |
| 266 scheduler_->PerformIdleWork(); | |
| 267 } | |
| 268 } | |
| 269 } | |
| 270 ScheduleDelayedWork(); | |
| 241 } | 271 } |
| 242 | 272 |
| 243 bool GpuCommandBufferStub::HasUnprocessedCommands() { | 273 bool GpuCommandBufferStub::HasUnprocessedCommands() { |
| 244 if (command_buffer_.get()) { | 274 if (command_buffer_.get()) { |
| 245 gpu::CommandBuffer::State state = command_buffer_->GetLastState(); | 275 gpu::CommandBuffer::State state = command_buffer_->GetLastState(); |
| 246 return state.put_offset != state.get_offset && | 276 return state.put_offset != state.get_offset && |
| 247 !gpu::error::IsError(state.error); | 277 !gpu::error::IsError(state.error); |
| 248 } | 278 } |
| 249 return false; | 279 return false; |
| 250 } | 280 } |
| 251 | 281 |
| 252 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) { | 282 void GpuCommandBufferStub::ScheduleDelayedWork() { |
| 253 if (HasMoreWork() && !delayed_work_scheduled_) { | 283 if (!HasMoreWork()) { |
| 254 delayed_work_scheduled_ = true; | 284 last_idle_time_ = base::TimeTicks(); |
| 255 MessageLoop::current()->PostDelayedTask( | 285 return; |
| 256 FROM_HERE, | |
| 257 base::Bind(&GpuCommandBufferStub::PollWork, | |
| 258 AsWeakPtr()), | |
| 259 base::TimeDelta::FromMilliseconds(delay)); | |
| 260 } | 286 } |
| 287 | |
| 288 if (delayed_work_scheduled_) | |
| 289 return; | |
| 290 delayed_work_scheduled_ = true; | |
| 291 | |
| 292 // Idle when no messages are processed between now and when | |
| 293 // PollWork is called. | |
| 294 previous_messages_processed_ = | |
| 295 channel()->gpu_channel_manager()->MessagesProcessed(); | |
| 296 if (last_idle_time_.is_null()) | |
| 297 last_idle_time_ = base::TimeTicks::Now(); | |
| 298 | |
| 299 int64 delay = kHandleMoreWorkPeriodMs; | |
| 300 // No need for a delay when there's idle work to be done and we're | |
|
epenner
2013/03/14 18:23:24
I think I get this, but would help to have "becaus
reveman
2013/03/14 19:21:47
I've updated this comment to make it more clear wh
| |
| 301 // not waiting on unschedule fences. | |
|
Sami
2013/03/14 15:18:07
Typo: unschedule
reveman
2013/03/14 19:21:47
It sounds awkward but this is what we currently ca
| |
| 302 if (scheduler_.get() && | |
| 303 scheduler_->IsScheduled() && | |
| 304 scheduler_->HasMoreIdleWork()) { | |
| 305 delay = 0; | |
| 306 } | |
| 307 | |
| 308 MessageLoop::current()->PostDelayedTask( | |
| 309 FROM_HERE, | |
| 310 base::Bind(&GpuCommandBufferStub::PollWork, | |
| 311 AsWeakPtr()), | |
| 312 base::TimeDelta::FromMilliseconds(delay)); | |
| 261 } | 313 } |
| 262 | 314 |
| 263 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) { | 315 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) { |
| 264 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho"); | 316 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho"); |
| 265 Send(new IPC::Message(message)); | 317 Send(new IPC::Message(message)); |
| 266 } | 318 } |
| 267 | 319 |
| 268 bool GpuCommandBufferStub::MakeCurrent() { | 320 bool GpuCommandBufferStub::MakeCurrent() { |
| 269 if (decoder_->MakeCurrent()) | 321 if (decoder_->MakeCurrent()) |
| 270 return true; | 322 return true; |
| (...skipping 610 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 881 if (surface_ && MakeCurrent()) | 933 if (surface_ && MakeCurrent()) |
| 882 surface_->SetFrontbufferAllocation( | 934 surface_->SetFrontbufferAllocation( |
| 883 allocation.browser_allocation.suggest_have_frontbuffer); | 935 allocation.browser_allocation.suggest_have_frontbuffer); |
| 884 } | 936 } |
| 885 | 937 |
| 886 last_memory_allocation_valid_ = true; | 938 last_memory_allocation_valid_ = true; |
| 887 last_memory_allocation_ = allocation; | 939 last_memory_allocation_ = allocation; |
| 888 } | 940 } |
| 889 | 941 |
| 890 } // namespace content | 942 } // namespace content |
| OLD | NEW |