| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/common/gpu/gpu_memory_manager.h" | 5 #include "content/common/gpu/gpu_memory_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 52 | 52 |
| 53 GpuMemoryManager::~GpuMemoryManager() { | 53 GpuMemoryManager::~GpuMemoryManager() { |
| 54 DCHECK(tracking_groups_.empty()); | 54 DCHECK(tracking_groups_.empty()); |
| 55 DCHECK(clients_visible_mru_.empty()); | 55 DCHECK(clients_visible_mru_.empty()); |
| 56 DCHECK(clients_nonvisible_mru_.empty()); | 56 DCHECK(clients_nonvisible_mru_.empty()); |
| 57 DCHECK(clients_nonsurface_.empty()); | 57 DCHECK(clients_nonsurface_.empty()); |
| 58 DCHECK(!bytes_allocated_managed_current_); | 58 DCHECK(!bytes_allocated_managed_current_); |
| 59 DCHECK(!bytes_allocated_unmanaged_current_); | 59 DCHECK(!bytes_allocated_unmanaged_current_); |
| 60 } | 60 } |
| 61 | 61 |
| 62 void GpuMemoryManager::UpdateAvailableGpuMemory() { | |
| 63 // If the value was overridden on the command line, use the specified value. | |
| 64 static bool client_hard_limit_bytes_overridden = | |
| 65 base::CommandLine::ForCurrentProcess()->HasSwitch( | |
| 66 switches::kForceGpuMemAvailableMb); | |
| 67 if (client_hard_limit_bytes_overridden) { | |
| 68 base::StringToUint64( | |
| 69 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII( | |
| 70 switches::kForceGpuMemAvailableMb), | |
| 71 &client_hard_limit_bytes_); | |
| 72 client_hard_limit_bytes_ *= 1024 * 1024; | |
| 73 return; | |
| 74 } | |
| 75 | |
| 76 #if defined(OS_ANDROID) | |
| 77 // On non-Android, we use an operating system query when possible. | |
| 78 // We do not have a reliable concept of multiple GPUs existing in | |
| 79 // a system, so just be safe and go with the minimum encountered. | |
| 80 uint64 bytes_min = 0; | |
| 81 | |
| 82 // Only use the clients that are visible, because otherwise the set of clients | |
| 83 // we are querying could become extremely large. | |
| 84 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | |
| 85 it != clients_visible_mru_.end(); | |
| 86 ++it) { | |
| 87 const GpuMemoryManagerClientState* client_state = *it; | |
| 88 if (!client_state->has_surface_) | |
| 89 continue; | |
| 90 if (!client_state->visible_) | |
| 91 continue; | |
| 92 | |
| 93 uint64 bytes = 0; | |
| 94 if (client_state->client_->GetTotalGpuMemory(&bytes)) { | |
| 95 if (!bytes_min || bytes < bytes_min) | |
| 96 bytes_min = bytes; | |
| 97 } | |
| 98 } | |
| 99 | |
| 100 client_hard_limit_bytes_ = bytes_min; | |
| 101 // Clamp the observed value to a specific range on Android. | |
| 102 client_hard_limit_bytes_ = std::max(client_hard_limit_bytes_, | |
| 103 static_cast<uint64>(8 * 1024 * 1024)); | |
| 104 client_hard_limit_bytes_ = std::min(client_hard_limit_bytes_, | |
| 105 static_cast<uint64>(256 * 1024 * 1024)); | |
| 106 #else | |
| 107 // Ignore what the system said and give all clients the same maximum | |
| 108 // allocation on desktop platforms. | |
| 109 client_hard_limit_bytes_ = 512 * 1024 * 1024; | |
| 110 #endif | |
| 111 } | |
| 112 | |
| 113 void GpuMemoryManager::ScheduleManage( | 62 void GpuMemoryManager::ScheduleManage( |
| 114 ScheduleManageTime schedule_manage_time) { | 63 ScheduleManageTime schedule_manage_time) { |
| 115 if (disable_schedule_manage_) | 64 if (disable_schedule_manage_) |
| 116 return; | 65 return; |
| 117 if (manage_immediate_scheduled_) | 66 if (manage_immediate_scheduled_) |
| 118 return; | 67 return; |
| 119 if (schedule_manage_time == kScheduleManageNow) { | 68 if (schedule_manage_time == kScheduleManageNow) { |
| 120 base::ThreadTaskRunnerHandle::Get()->PostTask( | 69 base::ThreadTaskRunnerHandle::Get()->PostTask( |
| 121 FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr())); | 70 FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr())); |
| 122 manage_immediate_scheduled_ = true; | 71 manage_immediate_scheduled_ = true; |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 244 | 193 |
| 245 video_memory_usage_stats->bytes_allocated = GetCurrentUsage(); | 194 video_memory_usage_stats->bytes_allocated = GetCurrentUsage(); |
| 246 video_memory_usage_stats->bytes_allocated_historical_max = | 195 video_memory_usage_stats->bytes_allocated_historical_max = |
| 247 bytes_allocated_historical_max_; | 196 bytes_allocated_historical_max_; |
| 248 } | 197 } |
| 249 | 198 |
| 250 void GpuMemoryManager::Manage() { | 199 void GpuMemoryManager::Manage() { |
| 251 manage_immediate_scheduled_ = false; | 200 manage_immediate_scheduled_ = false; |
| 252 delayed_manage_callback_.Cancel(); | 201 delayed_manage_callback_.Cancel(); |
| 253 | 202 |
| 254 // Update the amount of GPU memory available on the system. | |
| 255 UpdateAvailableGpuMemory(); | |
| 256 | |
| 257 // Determine which clients are "hibernated" (which determines the | 203 // Determine which clients are "hibernated" (which determines the |
| 258 // distribution of frontbuffers and memory among clients that don't have | 204 // distribution of frontbuffers and memory among clients that don't have |
| 259 // surfaces). | 205 // surfaces). |
| 260 SetClientsHibernatedState(); | 206 SetClientsHibernatedState(); |
| 261 | 207 |
| 262 // Assign memory allocations to clients that have surfaces. | |
| 263 AssignSurfacesAllocations(); | |
| 264 | |
| 265 // Assign memory allocations to clients that don't have surfaces. | |
| 266 AssignNonSurfacesAllocations(); | |
| 267 | |
| 268 SendUmaStatsToBrowser(); | 208 SendUmaStatsToBrowser(); |
| 269 } | 209 } |
| 270 | 210 |
| 271 void GpuMemoryManager::AssignSurfacesAllocations() { | |
| 272 // Send that allocation to the clients. | |
| 273 ClientStateList clients = clients_visible_mru_; | |
| 274 clients.insert(clients.end(), | |
| 275 clients_nonvisible_mru_.begin(), | |
| 276 clients_nonvisible_mru_.end()); | |
| 277 for (ClientStateList::const_iterator it = clients.begin(); | |
| 278 it != clients.end(); | |
| 279 ++it) { | |
| 280 GpuMemoryManagerClientState* client_state = *it; | |
| 281 | |
| 282 // Populate and send the allocation to the client | |
| 283 MemoryAllocation allocation; | |
| 284 allocation.bytes_limit_when_visible = client_hard_limit_bytes_; | |
| 285 #if defined(OS_ANDROID) | |
| 286 // On Android, because there is only one visible tab at any time, allow | |
| 287 // that renderer to cache as much as it can. | |
| 288 allocation.priority_cutoff_when_visible = | |
| 289 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING; | |
| 290 #else | |
| 291 // On desktop platforms, instruct the renderers to cache only a smaller | |
| 292 // set, to play nice with other renderers and other applications. If this | |
| 293 // if not done, then the system can become unstable. | |
| 294 // http://crbug.com/145600 (Linux) | |
| 295 // http://crbug.com/141377 (Mac) | |
| 296 allocation.priority_cutoff_when_visible = | |
| 297 MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE; | |
| 298 #endif | |
| 299 | |
| 300 client_state->client_->SetMemoryAllocation(allocation); | |
| 301 client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_); | |
| 302 } | |
| 303 } | |
| 304 | |
| 305 void GpuMemoryManager::AssignNonSurfacesAllocations() { | |
| 306 for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); | |
| 307 it != clients_nonsurface_.end(); | |
| 308 ++it) { | |
| 309 GpuMemoryManagerClientState* client_state = *it; | |
| 310 MemoryAllocation allocation; | |
| 311 | |
| 312 if (!client_state->hibernated_) { | |
| 313 allocation.bytes_limit_when_visible = client_hard_limit_bytes_; | |
| 314 allocation.priority_cutoff_when_visible = | |
| 315 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING; | |
| 316 } | |
| 317 | |
| 318 client_state->client_->SetMemoryAllocation(allocation); | |
| 319 } | |
| 320 } | |
| 321 | |
| 322 void GpuMemoryManager::SetClientsHibernatedState() const { | 211 void GpuMemoryManager::SetClientsHibernatedState() const { |
| 323 // Re-set all tracking groups as being hibernated. | 212 // Re-set all tracking groups as being hibernated. |
| 324 for (TrackingGroupMap::const_iterator it = tracking_groups_.begin(); | 213 for (TrackingGroupMap::const_iterator it = tracking_groups_.begin(); |
| 325 it != tracking_groups_.end(); | 214 it != tracking_groups_.end(); |
| 326 ++it) { | 215 ++it) { |
| 327 GpuMemoryTrackingGroup* tracking_group = it->second; | 216 GpuMemoryTrackingGroup* tracking_group = it->second; |
| 328 tracking_group->hibernated_ = true; | 217 tracking_group->hibernated_ = true; |
| 329 } | 218 } |
| 330 // All clients with surfaces that are visible are non-hibernated. | 219 // All clients with surfaces that are visible are non-hibernated. |
| 331 uint64 non_hibernated_clients = 0; | 220 uint64 non_hibernated_clients = 0; |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 397 | 286 |
| 398 void GpuMemoryManager::RemoveClientFromList( | 287 void GpuMemoryManager::RemoveClientFromList( |
| 399 GpuMemoryManagerClientState* client_state) { | 288 GpuMemoryManagerClientState* client_state) { |
| 400 DCHECK(client_state->list_iterator_valid_); | 289 DCHECK(client_state->list_iterator_valid_); |
| 401 ClientStateList* client_list = GetClientList(client_state); | 290 ClientStateList* client_list = GetClientList(client_state); |
| 402 client_list->erase(client_state->list_iterator_); | 291 client_list->erase(client_state->list_iterator_); |
| 403 client_state->list_iterator_valid_ = false; | 292 client_state->list_iterator_valid_ = false; |
| 404 } | 293 } |
| 405 | 294 |
| 406 } // namespace content | 295 } // namespace content |
| OLD | NEW |