Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/bind.h" | 5 #include "base/bind.h" |
| 6 #include "base/bind_helpers.h" | 6 #include "base/bind_helpers.h" |
| 7 #include "base/command_line.h" | 7 #include "base/command_line.h" |
| 8 #include "base/debug/trace_event.h" | 8 #include "base/debug/trace_event.h" |
| 9 #include "base/hash.h" | 9 #include "base/hash.h" |
| 10 #include "base/shared_memory.h" | 10 #include "base/shared_memory.h" |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 81 static size_t g_last_url_hash = 0; | 81 static size_t g_last_url_hash = 0; |
| 82 if (url_hash != g_last_url_hash) { | 82 if (url_hash != g_last_url_hash) { |
| 83 g_last_url_hash = url_hash; | 83 g_last_url_hash = url_hash; |
| 84 GetContentClient()->SetActiveURL(url); | 84 GetContentClient()->SetActiveURL(url); |
| 85 } | 85 } |
| 86 } | 86 } |
| 87 | 87 |
| 88 // The first time polling a fence, delay some extra time to allow other | 88 // The first time polling a fence, delay some extra time to allow other |
| 89 // stubs to process some work, or else the timing of the fences could | 89 // stubs to process some work, or else the timing of the fences could |
| 90 // allow a pattern of alternating fast and slow frames to occur. | 90 // allow a pattern of alternating fast and slow frames to occur. |
| 91 const int64 kHandleMoreWorkPeriodMs = 2; | 91 const int64 kHandleMoreWorkPeriodMicroseconds = 2000; |
| 92 const int64 kHandleMoreWorkPeriodBusyMs = 1; | 92 const int64 kHandleMoreWorkPeriodBusyMicroseconds = 1000; |
| 93 | |
| 94 // Use a shorter delay when there's idle work to be done. | |
| 95 const int64 kHandleMoreIdleWorkPeriodBusyMicroseconds = 100; | |
| 96 // Prevents idle work from being starved. | |
| 97 const int64 kMaxTimeSinceIdleMicroseconds = 10000; | |
| 93 | 98 |
| 94 } // namespace | 99 } // namespace |
| 95 | 100 |
| 96 GpuCommandBufferStub::GpuCommandBufferStub( | 101 GpuCommandBufferStub::GpuCommandBufferStub( |
| 97 GpuChannel* channel, | 102 GpuChannel* channel, |
| 98 GpuCommandBufferStub* share_group, | 103 GpuCommandBufferStub* share_group, |
| 99 const gfx::GLSurfaceHandle& handle, | 104 const gfx::GLSurfaceHandle& handle, |
| 100 gpu::gles2::MailboxManager* mailbox_manager, | 105 gpu::gles2::MailboxManager* mailbox_manager, |
| 101 gpu::gles2::ImageManager* image_manager, | 106 gpu::gles2::ImageManager* image_manager, |
| 102 const gfx::Size& size, | 107 const gfx::Size& size, |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 119 route_id_(route_id), | 124 route_id_(route_id), |
| 120 surface_id_(surface_id), | 125 surface_id_(surface_id), |
| 121 software_(software), | 126 software_(software), |
| 122 last_flush_count_(0), | 127 last_flush_count_(0), |
| 123 last_memory_allocation_valid_(false), | 128 last_memory_allocation_valid_(false), |
| 124 parent_stub_for_initialization_(), | 129 parent_stub_for_initialization_(), |
| 125 parent_texture_for_initialization_(0), | 130 parent_texture_for_initialization_(0), |
| 126 watchdog_(watchdog), | 131 watchdog_(watchdog), |
| 127 sync_point_wait_count_(0), | 132 sync_point_wait_count_(0), |
| 128 delayed_work_scheduled_(false), | 133 delayed_work_scheduled_(false), |
| 134 is_idle_(false), | |
| 129 active_url_(active_url), | 135 active_url_(active_url), |
| 130 total_gpu_memory_(0) { | 136 total_gpu_memory_(0) { |
| 131 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); | 137 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); |
| 132 FastSetActiveURL(active_url_, active_url_hash_); | 138 FastSetActiveURL(active_url_, active_url_hash_); |
| 133 if (share_group) { | 139 if (share_group) { |
| 134 context_group_ = share_group->context_group_; | 140 context_group_ = share_group->context_group_; |
| 135 } else { | 141 } else { |
| 136 context_group_ = new gpu::gles2::ContextGroup( | 142 context_group_ = new gpu::gles2::ContextGroup( |
| 137 mailbox_manager, | 143 mailbox_manager, |
| 138 image_manager, | 144 image_manager, |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 159 // messages directed at the command buffer. This ensures that the message | 165 // messages directed at the command buffer. This ensures that the message |
| 160 // handler can assume that the context is current (not necessary for | 166 // handler can assume that the context is current (not necessary for |
| 161 // Echo, RetireSyncPoint, or WaitSyncPoint). | 167 // Echo, RetireSyncPoint, or WaitSyncPoint). |
| 162 if (decoder_.get() && | 168 if (decoder_.get() && |
| 163 message.type() != GpuCommandBufferMsg_Echo::ID && | 169 message.type() != GpuCommandBufferMsg_Echo::ID && |
| 164 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID) { | 170 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID) { |
| 165 if (!MakeCurrent()) | 171 if (!MakeCurrent()) |
| 166 return false; | 172 return false; |
| 167 } | 173 } |
| 168 | 174 |
| 175 is_idle_ = false; | |
| 176 | |
| 169 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers | 177 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers |
| 170 // here. This is so the reply can be delayed if the scheduler is unscheduled. | 178 // here. This is so the reply can be delayed if the scheduler is unscheduled. |
| 171 bool handled = true; | 179 bool handled = true; |
| 172 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) | 180 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) |
| 173 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, | 181 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, |
| 174 OnInitialize); | 182 OnInitialize); |
| 175 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer, | 183 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer, |
| 176 OnSetGetBuffer); | 184 OnSetGetBuffer); |
| 177 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent, | 185 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent, |
| 178 OnSetParent); | 186 OnSetParent); |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 204 OnSignalSyncPoint) | 212 OnSignalSyncPoint) |
| 205 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats, | 213 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SendClientManagedMemoryStats, |
| 206 OnReceivedClientManagedMemoryStats) | 214 OnReceivedClientManagedMemoryStats) |
| 207 IPC_MESSAGE_HANDLER( | 215 IPC_MESSAGE_HANDLER( |
| 208 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback, | 216 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback, |
| 209 OnSetClientHasMemoryAllocationChangedCallback) | 217 OnSetClientHasMemoryAllocationChangedCallback) |
| 210 IPC_MESSAGE_UNHANDLED(handled = false) | 218 IPC_MESSAGE_UNHANDLED(handled = false) |
| 211 IPC_END_MESSAGE_MAP() | 219 IPC_END_MESSAGE_MAP() |
| 212 | 220 |
| 213 // Ensure that any delayed work that was created will be handled. | 221 // Ensure that any delayed work that was created will be handled. |
| 214 ScheduleDelayedWork(kHandleMoreWorkPeriodMs); | 222 ScheduleDelayedWork(kHandleMoreWorkPeriodMicroseconds); |
| 215 | 223 |
| 216 DCHECK(handled); | 224 DCHECK(handled); |
| 217 return handled; | 225 return handled; |
| 218 } | 226 } |
| 219 | 227 |
| 220 bool GpuCommandBufferStub::Send(IPC::Message* message) { | 228 bool GpuCommandBufferStub::Send(IPC::Message* message) { |
| 221 return channel_->Send(message); | 229 return channel_->Send(message); |
| 222 } | 230 } |
| 223 | 231 |
| 224 bool GpuCommandBufferStub::IsScheduled() { | 232 bool GpuCommandBufferStub::IsScheduled() { |
| 225 return (!scheduler_.get() || scheduler_->IsScheduled()); | 233 return (!scheduler_.get() || scheduler_->IsScheduled()); |
| 226 } | 234 } |
| 227 | 235 |
| 228 bool GpuCommandBufferStub::HasMoreWork() { | 236 bool GpuCommandBufferStub::HasMoreWork() { |
| 229 return scheduler_.get() && scheduler_->HasMoreWork(); | 237 return scheduler_.get() && scheduler_->HasMoreWork(); |
| 230 } | 238 } |
| 231 | 239 |
| 232 void GpuCommandBufferStub::PollWork() { | 240 void GpuCommandBufferStub::PollWork() { |
| 233 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); | 241 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork"); |
| 234 delayed_work_scheduled_ = false; | 242 delayed_work_scheduled_ = false; |
| 235 FastSetActiveURL(active_url_, active_url_hash_); | 243 FastSetActiveURL(active_url_, active_url_hash_); |
| 236 if (decoder_.get() && !MakeCurrent()) | 244 if (decoder_.get() && !MakeCurrent()) |
| 237 return; | 245 return; |
| 238 if (scheduler_.get()) | 246 int64 delay = kHandleMoreWorkPeriodBusyMicroseconds; |
| 239 scheduler_->PollUnscheduleFences(); | 247 if (scheduler_.get()) { |
| 240 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs); | 248 bool fences_complete = scheduler_->PollUnscheduleFences(); |
| 249 // Perform idle work if all fences are complete. | |
| 250 if (fences_complete) { | |
| 251 DCHECK(!last_idle_time_.is_null()); | |
| 252 base::TimeDelta time_since_idle = base::TimeTicks::Now() - | |
| 253 last_idle_time_; | |
| 254 base::TimeDelta max_time_since_idle = | |
| 255 base::TimeDelta::FromMicroseconds(kMaxTimeSinceIdleMicroseconds); | |
| 256 | |
| 257 // Perform some idle work if idle or it's been too long since | |
| 258 // last time we were idle. | |
| 259 if (is_idle_ || time_since_idle > max_time_since_idle) { | |
| 260 last_idle_time_ = base::TimeTicks::Now(); | |
| 261 // Use idle work period delay when there's more idle work pending. | |
| 262 if (scheduler_->PerformIdleWork()) | |
| 263 delay = kHandleMoreIdleWorkPeriodBusyMicroseconds; | |
| 264 } | |
| 265 } | |
| 266 } | |
| 267 ScheduleDelayedWork(delay); | |
| 241 } | 268 } |
| 242 | 269 |
| 243 bool GpuCommandBufferStub::HasUnprocessedCommands() { | 270 bool GpuCommandBufferStub::HasUnprocessedCommands() { |
| 244 if (command_buffer_.get()) { | 271 if (command_buffer_.get()) { |
| 245 gpu::CommandBuffer::State state = command_buffer_->GetLastState(); | 272 gpu::CommandBuffer::State state = command_buffer_->GetLastState(); |
| 246 return state.put_offset != state.get_offset && | 273 return state.put_offset != state.get_offset && |
| 247 !gpu::error::IsError(state.error); | 274 !gpu::error::IsError(state.error); |
| 248 } | 275 } |
| 249 return false; | 276 return false; |
| 250 } | 277 } |
| 251 | 278 |
| 252 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) { | 279 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) { |
| 253 if (HasMoreWork() && !delayed_work_scheduled_) { | 280 if (!HasMoreWork()) { |
| 254 delayed_work_scheduled_ = true; | 281 last_idle_time_ = base::TimeTicks(); |
| 255 MessageLoop::current()->PostDelayedTask( | 282 return; |
| 256 FROM_HERE, | |
| 257 base::Bind(&GpuCommandBufferStub::PollWork, | |
| 258 AsWeakPtr()), | |
| 259 base::TimeDelta::FromMilliseconds(delay)); | |
| 260 } | 283 } |
| 284 | |
| 285 if (delayed_work_scheduled_) | |
| 286 return; | |
| 287 delayed_work_scheduled_ = true; | |
| 288 | |
| 289 // Consider the command buffer idle if no messages are received between | |
|
epenner
2013/03/08 21:44:57
It seems like there might be a way this could be d
reveman
2013/03/12 00:31:42
Latest patch has a much more efficient way of dete
| |
| 290 // now and when PollWork is called. | |
| 291 is_idle_ = true; | |
| 292 if (last_idle_time_.is_null()) | |
| 293 last_idle_time_ = base::TimeTicks::Now(); | |
| 294 | |
| 295 MessageLoop::current()->PostDelayedTask( | |
| 296 FROM_HERE, | |
| 297 base::Bind(&GpuCommandBufferStub::PollWork, | |
| 298 AsWeakPtr()), | |
| 299 base::TimeDelta::FromMicroseconds(delay)); | |
| 261 } | 300 } |
| 262 | 301 |
| 263 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) { | 302 void GpuCommandBufferStub::OnEcho(const IPC::Message& message) { |
| 264 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho"); | 303 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnEcho"); |
| 265 Send(new IPC::Message(message)); | 304 Send(new IPC::Message(message)); |
| 266 } | 305 } |
| 267 | 306 |
| 268 bool GpuCommandBufferStub::MakeCurrent() { | 307 bool GpuCommandBufferStub::MakeCurrent() { |
| 269 if (decoder_->MakeCurrent()) | 308 if (decoder_->MakeCurrent()) |
| 270 return true; | 309 return true; |
| (...skipping 602 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 873 if (surface_ && MakeCurrent()) | 912 if (surface_ && MakeCurrent()) |
| 874 surface_->SetFrontbufferAllocation( | 913 surface_->SetFrontbufferAllocation( |
| 875 allocation.browser_allocation.suggest_have_frontbuffer); | 914 allocation.browser_allocation.suggest_have_frontbuffer); |
| 876 } | 915 } |
| 877 | 916 |
| 878 last_memory_allocation_valid_ = true; | 917 last_memory_allocation_valid_ = true; |
| 879 last_memory_allocation_ = allocation; | 918 last_memory_allocation_ = allocation; |
| 880 } | 919 } |
| 881 | 920 |
| 882 } // namespace content | 921 } // namespace content |
| OLD | NEW |