| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/common/gpu/gpu_memory_manager.h" | 5 #include "content/common/gpu/gpu_memory_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| 11 #include "base/message_loop/message_loop.h" | 11 #include "base/message_loop/message_loop.h" |
| 12 #include "base/process/process_handle.h" | 12 #include "base/process/process_handle.h" |
| 13 #include "base/strings/string_number_conversions.h" | 13 #include "base/strings/string_number_conversions.h" |
| 14 #include "base/trace_event/trace_event.h" | 14 #include "base/trace_event/trace_event.h" |
| 15 #include "content/common/gpu/gpu_channel_manager.h" | 15 #include "content/common/gpu/gpu_channel_manager.h" |
| 16 #include "content/common/gpu/gpu_memory_manager_client.h" | |
| 17 #include "content/common/gpu/gpu_memory_tracking.h" | 16 #include "content/common/gpu/gpu_memory_tracking.h" |
| 18 #include "content/common/gpu/gpu_memory_uma_stats.h" | 17 #include "content/common/gpu/gpu_memory_uma_stats.h" |
| 19 #include "content/common/gpu/gpu_messages.h" | 18 #include "content/common/gpu/gpu_messages.h" |
| 20 #include "gpu/command_buffer/common/gpu_memory_allocation.h" | 19 #include "gpu/command_buffer/common/gpu_memory_allocation.h" |
| 21 #include "gpu/command_buffer/service/gpu_switches.h" | 20 #include "gpu/command_buffer/service/gpu_switches.h" |
| 22 | 21 |
| 23 using gpu::MemoryAllocation; | 22 using gpu::MemoryAllocation; |
| 24 | 23 |
| 25 namespace content { | 24 namespace content { |
| 26 namespace { | 25 namespace { |
| 27 | 26 |
| 28 const int kDelayedScheduleManageTimeoutMs = 67; | |
| 29 | |
| 30 const uint64 kBytesAllocatedStep = 16 * 1024 * 1024; | 27 const uint64 kBytesAllocatedStep = 16 * 1024 * 1024; |
| 31 | 28 |
| 32 void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) { | 29 void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) { |
| 33 DCHECK(new_size > old_size || *total_size >= (old_size - new_size)); | 30 DCHECK(new_size > old_size || *total_size >= (old_size - new_size)); |
| 34 *total_size += (new_size - old_size); | 31 *total_size += (new_size - old_size); |
| 35 } | 32 } |
| 36 | 33 |
| 37 } | 34 } |
| 38 | 35 |
| 39 GpuMemoryManager::GpuMemoryManager( | 36 GpuMemoryManager::GpuMemoryManager(GpuChannelManager* channel_manager) |
| 40 GpuChannelManager* channel_manager, | |
| 41 uint64 max_surfaces_with_frontbuffer_soft_limit) | |
| 42 : channel_manager_(channel_manager), | 37 : channel_manager_(channel_manager), |
| 43 manage_immediate_scheduled_(false), | |
| 44 disable_schedule_manage_(false), | |
| 45 max_surfaces_with_frontbuffer_soft_limit_( | |
| 46 max_surfaces_with_frontbuffer_soft_limit), | |
| 47 client_hard_limit_bytes_(0), | |
| 48 bytes_allocated_current_(0), | 38 bytes_allocated_current_(0), |
| 49 bytes_allocated_historical_max_(0) | 39 bytes_allocated_historical_max_(0) {} |
| 50 { } | |
| 51 | 40 |
| 52 GpuMemoryManager::~GpuMemoryManager() { | 41 GpuMemoryManager::~GpuMemoryManager() { |
| 53 DCHECK(tracking_groups_.empty()); | 42 DCHECK(tracking_groups_.empty()); |
| 54 DCHECK(clients_visible_mru_.empty()); | |
| 55 DCHECK(clients_nonvisible_mru_.empty()); | |
| 56 DCHECK(clients_nonsurface_.empty()); | |
| 57 DCHECK(!bytes_allocated_current_); | 43 DCHECK(!bytes_allocated_current_); |
| 58 } | 44 } |
| 59 | 45 |
| 60 void GpuMemoryManager::ScheduleManage( | |
| 61 ScheduleManageTime schedule_manage_time) { | |
| 62 if (disable_schedule_manage_) | |
| 63 return; | |
| 64 if (manage_immediate_scheduled_) | |
| 65 return; | |
| 66 if (schedule_manage_time == kScheduleManageNow) { | |
| 67 base::ThreadTaskRunnerHandle::Get()->PostTask( | |
| 68 FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr())); | |
| 69 manage_immediate_scheduled_ = true; | |
| 70 if (!delayed_manage_callback_.IsCancelled()) | |
| 71 delayed_manage_callback_.Cancel(); | |
| 72 } else { | |
| 73 if (!delayed_manage_callback_.IsCancelled()) | |
| 74 return; | |
| 75 delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage, | |
| 76 AsWeakPtr())); | |
| 77 base::ThreadTaskRunnerHandle::Get()->PostDelayedTask( | |
| 78 FROM_HERE, delayed_manage_callback_.callback(), | |
| 79 base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs)); | |
| 80 } | |
| 81 } | |
| 82 | |
| 83 void GpuMemoryManager::TrackMemoryAllocatedChange( | 46 void GpuMemoryManager::TrackMemoryAllocatedChange( |
| 84 GpuMemoryTrackingGroup* tracking_group, | 47 GpuMemoryTrackingGroup* tracking_group, |
| 85 uint64 old_size, | 48 uint64 old_size, |
| 86 uint64 new_size) { | 49 uint64 new_size) { |
| 87 TrackValueChanged(old_size, new_size, &tracking_group->size_); | 50 TrackValueChanged(old_size, new_size, &tracking_group->size_); |
| 88 TrackValueChanged(old_size, new_size, &bytes_allocated_current_); | 51 TrackValueChanged(old_size, new_size, &bytes_allocated_current_); |
| 89 | 52 |
| 90 if (GetCurrentUsage() > bytes_allocated_historical_max_ + | 53 if (GetCurrentUsage() > bytes_allocated_historical_max_ + |
| 91 kBytesAllocatedStep) { | 54 kBytesAllocatedStep) { |
| 92 bytes_allocated_historical_max_ = GetCurrentUsage(); | 55 bytes_allocated_historical_max_ = GetCurrentUsage(); |
| 93 // If we're blowing into new memory usage territory, spam the browser | 56 // If we're blowing into new memory usage territory, spam the browser |
| 94 // process with the most up-to-date information about our memory usage. | 57 // process with the most up-to-date information about our memory usage. |
| 95 SendUmaStatsToBrowser(); | 58 SendUmaStatsToBrowser(); |
| 96 } | 59 } |
| 97 } | 60 } |
| 98 | 61 |
| 99 bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) { | 62 bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) { |
| 100 // TODO: Check if there is enough space. Lose contexts until there is. | 63 // TODO: Check if there is enough space. Lose contexts until there is. |
| 101 return true; | 64 return true; |
| 102 } | 65 } |
| 103 | 66 |
| 104 GpuMemoryManagerClientState* GpuMemoryManager::CreateClientState( | |
| 105 GpuMemoryManagerClient* client, | |
| 106 bool has_surface, | |
| 107 bool visible) { | |
| 108 TrackingGroupMap::iterator tracking_group_it = | |
| 109 tracking_groups_.find(client->GetMemoryTracker()); | |
| 110 DCHECK(tracking_group_it != tracking_groups_.end()); | |
| 111 GpuMemoryTrackingGroup* tracking_group = tracking_group_it->second; | |
| 112 | |
| 113 GpuMemoryManagerClientState* client_state = new GpuMemoryManagerClientState( | |
| 114 this, client, tracking_group, has_surface, visible); | |
| 115 AddClientToList(client_state); | |
| 116 ScheduleManage(kScheduleManageNow); | |
| 117 return client_state; | |
| 118 } | |
| 119 | |
| 120 void GpuMemoryManager::OnDestroyClientState( | |
| 121 GpuMemoryManagerClientState* client_state) { | |
| 122 RemoveClientFromList(client_state); | |
| 123 ScheduleManage(kScheduleManageLater); | |
| 124 } | |
| 125 | |
| 126 void GpuMemoryManager::SetClientStateVisible( | |
| 127 GpuMemoryManagerClientState* client_state, bool visible) { | |
| 128 DCHECK(client_state->has_surface_); | |
| 129 if (client_state->visible_ == visible) | |
| 130 return; | |
| 131 | |
| 132 RemoveClientFromList(client_state); | |
| 133 client_state->visible_ = visible; | |
| 134 AddClientToList(client_state); | |
| 135 ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater); | |
| 136 } | |
| 137 | |
| 138 uint64 GpuMemoryManager::GetTrackerMemoryUsage( | 67 uint64 GpuMemoryManager::GetTrackerMemoryUsage( |
| 139 gpu::gles2::MemoryTracker* tracker) const { | 68 gpu::gles2::MemoryTracker* tracker) const { |
| 140 TrackingGroupMap::const_iterator tracking_group_it = | 69 TrackingGroupMap::const_iterator tracking_group_it = |
| 141 tracking_groups_.find(tracker); | 70 tracking_groups_.find(tracker); |
| 142 DCHECK(tracking_group_it != tracking_groups_.end()); | 71 DCHECK(tracking_group_it != tracking_groups_.end()); |
| 143 return tracking_group_it->second->GetSize(); | 72 return tracking_group_it->second->GetSize(); |
| 144 } | 73 } |
| 145 | 74 |
| 146 GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup( | 75 GpuMemoryTrackingGroup* GpuMemoryManager::CreateTrackingGroup( |
| 147 base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) { | 76 base::ProcessId pid, gpu::gles2::MemoryTracker* memory_tracker) { |
| (...skipping 26 matching lines...) Expand all Loading... |
| 174 video_memory_usage_stats->process_map[ | 103 video_memory_usage_stats->process_map[ |
| 175 base::GetCurrentProcId()].video_memory = GetCurrentUsage(); | 104 base::GetCurrentProcId()].video_memory = GetCurrentUsage(); |
| 176 video_memory_usage_stats->process_map[ | 105 video_memory_usage_stats->process_map[ |
| 177 base::GetCurrentProcId()].has_duplicates = true; | 106 base::GetCurrentProcId()].has_duplicates = true; |
| 178 | 107 |
| 179 video_memory_usage_stats->bytes_allocated = GetCurrentUsage(); | 108 video_memory_usage_stats->bytes_allocated = GetCurrentUsage(); |
| 180 video_memory_usage_stats->bytes_allocated_historical_max = | 109 video_memory_usage_stats->bytes_allocated_historical_max = |
| 181 bytes_allocated_historical_max_; | 110 bytes_allocated_historical_max_; |
| 182 } | 111 } |
| 183 | 112 |
| 184 void GpuMemoryManager::Manage() { | |
| 185 manage_immediate_scheduled_ = false; | |
| 186 delayed_manage_callback_.Cancel(); | |
| 187 | |
| 188 // Determine which clients are "hibernated" (which determines the | |
| 189 // distribution of frontbuffers and memory among clients that don't have | |
| 190 // surfaces). | |
| 191 SetClientsHibernatedState(); | |
| 192 | |
| 193 SendUmaStatsToBrowser(); | |
| 194 } | |
| 195 | |
| 196 void GpuMemoryManager::SetClientsHibernatedState() const { | |
| 197 // Re-set all tracking groups as being hibernated. | |
| 198 for (TrackingGroupMap::const_iterator it = tracking_groups_.begin(); | |
| 199 it != tracking_groups_.end(); | |
| 200 ++it) { | |
| 201 GpuMemoryTrackingGroup* tracking_group = it->second; | |
| 202 tracking_group->hibernated_ = true; | |
| 203 } | |
| 204 // All clients with surfaces that are visible are non-hibernated. | |
| 205 uint64 non_hibernated_clients = 0; | |
| 206 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | |
| 207 it != clients_visible_mru_.end(); | |
| 208 ++it) { | |
| 209 GpuMemoryManagerClientState* client_state = *it; | |
| 210 client_state->hibernated_ = false; | |
| 211 client_state->tracking_group_->hibernated_ = false; | |
| 212 non_hibernated_clients++; | |
| 213 } | |
| 214 // Then an additional few clients with surfaces are non-hibernated too, up to | |
| 215 // a fixed limit. | |
| 216 for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin(); | |
| 217 it != clients_nonvisible_mru_.end(); | |
| 218 ++it) { | |
| 219 GpuMemoryManagerClientState* client_state = *it; | |
| 220 if (non_hibernated_clients < max_surfaces_with_frontbuffer_soft_limit_) { | |
| 221 client_state->hibernated_ = false; | |
| 222 client_state->tracking_group_->hibernated_ = false; | |
| 223 non_hibernated_clients++; | |
| 224 } else { | |
| 225 client_state->hibernated_ = true; | |
| 226 } | |
| 227 } | |
| 228 // Clients that don't have surfaces are non-hibernated if they are | |
| 229 // in a GL share group with a non-hibernated surface. | |
| 230 for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); | |
| 231 it != clients_nonsurface_.end(); | |
| 232 ++it) { | |
| 233 GpuMemoryManagerClientState* client_state = *it; | |
| 234 client_state->hibernated_ = client_state->tracking_group_->hibernated_; | |
| 235 } | |
| 236 } | |
| 237 | |
| 238 void GpuMemoryManager::SendUmaStatsToBrowser() { | 113 void GpuMemoryManager::SendUmaStatsToBrowser() { |
| 239 if (!channel_manager_) | 114 if (!channel_manager_) |
| 240 return; | 115 return; |
| 241 GPUMemoryUmaStats params; | 116 GPUMemoryUmaStats params; |
| 242 params.bytes_allocated_current = GetCurrentUsage(); | 117 params.bytes_allocated_current = GetCurrentUsage(); |
| 243 params.bytes_allocated_max = bytes_allocated_historical_max_; | 118 params.bytes_allocated_max = bytes_allocated_historical_max_; |
| 244 params.bytes_limit = client_hard_limit_bytes_; | |
| 245 params.client_count = clients_visible_mru_.size() + | |
| 246 clients_nonvisible_mru_.size() + | |
| 247 clients_nonsurface_.size(); | |
| 248 params.context_group_count = tracking_groups_.size(); | 119 params.context_group_count = tracking_groups_.size(); |
| 249 channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params)); | 120 channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params)); |
| 250 } | 121 } |
| 251 | |
| 252 GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList( | |
| 253 GpuMemoryManagerClientState* client_state) { | |
| 254 if (client_state->has_surface_) { | |
| 255 if (client_state->visible_) | |
| 256 return &clients_visible_mru_; | |
| 257 else | |
| 258 return &clients_nonvisible_mru_; | |
| 259 } | |
| 260 return &clients_nonsurface_; | |
| 261 } | |
| 262 | |
| 263 void GpuMemoryManager::AddClientToList( | |
| 264 GpuMemoryManagerClientState* client_state) { | |
| 265 DCHECK(!client_state->list_iterator_valid_); | |
| 266 ClientStateList* client_list = GetClientList(client_state); | |
| 267 client_state->list_iterator_ = client_list->insert( | |
| 268 client_list->begin(), client_state); | |
| 269 client_state->list_iterator_valid_ = true; | |
| 270 } | |
| 271 | |
| 272 void GpuMemoryManager::RemoveClientFromList( | |
| 273 GpuMemoryManagerClientState* client_state) { | |
| 274 DCHECK(client_state->list_iterator_valid_); | |
| 275 ClientStateList* client_list = GetClientList(client_state); | |
| 276 client_list->erase(client_state->list_iterator_); | |
| 277 client_state->list_iterator_valid_ = false; | |
| 278 } | |
| 279 | |
| 280 } // namespace content | 122 } // namespace content |
| OLD | NEW |