| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/common/gpu/gpu_memory_manager.h" | 5 #include "content/common/gpu/gpu_memory_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| (...skipping 17 matching lines...) Expand all Loading... |
| 28 | 28 |
| 29 const int kDelayedScheduleManageTimeoutMs = 67; | 29 const int kDelayedScheduleManageTimeoutMs = 67; |
| 30 | 30 |
| 31 const uint64 kBytesAllocatedUnmanagedStep = 16 * 1024 * 1024; | 31 const uint64 kBytesAllocatedUnmanagedStep = 16 * 1024 * 1024; |
| 32 | 32 |
| 33 void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) { | 33 void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) { |
| 34 DCHECK(new_size > old_size || *total_size >= (old_size - new_size)); | 34 DCHECK(new_size > old_size || *total_size >= (old_size - new_size)); |
| 35 *total_size += (new_size - old_size); | 35 *total_size += (new_size - old_size); |
| 36 } | 36 } |
| 37 | 37 |
| 38 template<typename T> | |
| 39 T RoundUp(T n, T mul) { | |
| 40 return ((n + mul - 1) / mul) * mul; | |
| 41 } | |
| 42 | |
| 43 template<typename T> | |
| 44 T RoundDown(T n, T mul) { | |
| 45 return (n / mul) * mul; | |
| 46 } | |
| 47 | |
| 48 } | 38 } |
| 49 | 39 |
| 50 GpuMemoryManager::GpuMemoryManager( | 40 GpuMemoryManager::GpuMemoryManager( |
| 51 GpuChannelManager* channel_manager, | 41 GpuChannelManager* channel_manager, |
| 52 uint64 max_surfaces_with_frontbuffer_soft_limit) | 42 uint64 max_surfaces_with_frontbuffer_soft_limit) |
| 53 : channel_manager_(channel_manager), | 43 : channel_manager_(channel_manager), |
| 54 manage_immediate_scheduled_(false), | 44 manage_immediate_scheduled_(false), |
| 45 disable_schedule_manage_(false), |
| 55 max_surfaces_with_frontbuffer_soft_limit_( | 46 max_surfaces_with_frontbuffer_soft_limit_( |
| 56 max_surfaces_with_frontbuffer_soft_limit), | 47 max_surfaces_with_frontbuffer_soft_limit), |
| 57 priority_cutoff_(MemoryAllocation::CUTOFF_ALLOW_EVERYTHING), | 48 client_hard_limit_bytes_(0), |
| 58 bytes_available_gpu_memory_(0), | |
| 59 bytes_available_gpu_memory_overridden_(false), | |
| 60 bytes_minimum_per_client_(0), | |
| 61 bytes_default_per_client_(0), | |
| 62 bytes_allocated_managed_current_(0), | 49 bytes_allocated_managed_current_(0), |
| 63 bytes_allocated_unmanaged_current_(0), | 50 bytes_allocated_unmanaged_current_(0), |
| 64 bytes_allocated_historical_max_(0), | 51 bytes_allocated_historical_max_(0) |
| 65 bytes_allocated_unmanaged_high_(0), | 52 { } |
| 66 bytes_allocated_unmanaged_low_(0), | |
| 67 bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep), | |
| 68 disable_schedule_manage_(false) | |
| 69 { | |
| 70 CommandLine* command_line = CommandLine::ForCurrentProcess(); | |
| 71 | |
| 72 // Use a more conservative memory allocation policy on Linux and Mac because | |
| 73 // the platform is unstable when under memory pressure. | |
| 74 // http://crbug.com/145600 (Linux) | |
| 75 // http://crbug.com/141377 (Mac) | |
| 76 #if defined(OS_MACOSX) || (defined(OS_LINUX) && !defined(OS_CHROMEOS)) | |
| 77 priority_cutoff_ = MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE; | |
| 78 #endif | |
| 79 | |
| 80 #if defined(OS_ANDROID) | |
| 81 bytes_default_per_client_ = 8 * 1024 * 1024; | |
| 82 bytes_minimum_per_client_ = 8 * 1024 * 1024; | |
| 83 #elif defined(OS_CHROMEOS) | |
| 84 bytes_default_per_client_ = 64 * 1024 * 1024; | |
| 85 bytes_minimum_per_client_ = 4 * 1024 * 1024; | |
| 86 #elif defined(OS_MACOSX) | |
| 87 bytes_default_per_client_ = 128 * 1024 * 1024; | |
| 88 bytes_minimum_per_client_ = 128 * 1024 * 1024; | |
| 89 #else | |
| 90 bytes_default_per_client_ = 64 * 1024 * 1024; | |
| 91 bytes_minimum_per_client_ = 64 * 1024 * 1024; | |
| 92 #endif | |
| 93 | |
| 94 if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { | |
| 95 base::StringToUint64( | |
| 96 command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), | |
| 97 &bytes_available_gpu_memory_); | |
| 98 bytes_available_gpu_memory_ *= 1024 * 1024; | |
| 99 bytes_available_gpu_memory_overridden_ = true; | |
| 100 } else | |
| 101 bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory(); | |
| 102 } | |
| 103 | 53 |
| 104 GpuMemoryManager::~GpuMemoryManager() { | 54 GpuMemoryManager::~GpuMemoryManager() { |
| 105 DCHECK(tracking_groups_.empty()); | 55 DCHECK(tracking_groups_.empty()); |
| 106 DCHECK(clients_visible_mru_.empty()); | 56 DCHECK(clients_visible_mru_.empty()); |
| 107 DCHECK(clients_nonvisible_mru_.empty()); | 57 DCHECK(clients_nonvisible_mru_.empty()); |
| 108 DCHECK(clients_nonsurface_.empty()); | 58 DCHECK(clients_nonsurface_.empty()); |
| 109 DCHECK(!bytes_allocated_managed_current_); | 59 DCHECK(!bytes_allocated_managed_current_); |
| 110 DCHECK(!bytes_allocated_unmanaged_current_); | 60 DCHECK(!bytes_allocated_unmanaged_current_); |
| 111 } | 61 } |
| 112 | 62 |
| 113 uint64 GpuMemoryManager::GetAvailableGpuMemory() const { | |
| 114 // Allow unmanaged allocations to over-subscribe by at most (high_ - low_) | |
| 115 // before restricting managed (compositor) memory based on unmanaged usage. | |
| 116 if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_) | |
| 117 return 0; | |
| 118 return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_; | |
| 119 } | |
| 120 | |
| 121 uint64 GpuMemoryManager::GetDefaultAvailableGpuMemory() const { | |
| 122 #if defined(OS_ANDROID) | |
| 123 return 16 * 1024 * 1024; | |
| 124 #elif defined(OS_CHROMEOS) | |
| 125 return 1024 * 1024 * 1024; | |
| 126 #else | |
| 127 return 256 * 1024 * 1024; | |
| 128 #endif | |
| 129 } | |
| 130 | |
| 131 uint64 GpuMemoryManager::GetMaximumTotalGpuMemory() const { | |
| 132 #if defined(OS_ANDROID) | |
| 133 return 256 * 1024 * 1024; | |
| 134 #else | |
| 135 return 1024 * 1024 * 1024; | |
| 136 #endif | |
| 137 } | |
| 138 | |
| 139 uint64 GpuMemoryManager::GetMaximumClientAllocation() const { | |
| 140 #if defined(OS_ANDROID) || defined(OS_CHROMEOS) | |
| 141 return bytes_available_gpu_memory_; | |
| 142 #else | |
| 143 // This is to avoid allowing a single page on to use a full 256MB of memory | |
| 144 // (the current total limit). Long-scroll pages will hit this limit, | |
| 145 // resulting in instability on some platforms (e.g, issue 141377). | |
| 146 return bytes_available_gpu_memory_ / 2; | |
| 147 #endif | |
| 148 } | |
| 149 | |
| 150 uint64 GpuMemoryManager::CalcAvailableFromGpuTotal(uint64 total_gpu_memory) { | |
| 151 #if defined(OS_ANDROID) | |
| 152 // We don't need to reduce the total on Android, since | |
| 153 // the total is an estimate to begin with. | |
| 154 return total_gpu_memory; | |
| 155 #else | |
| 156 // Allow Chrome to use 75% of total GPU memory, or all-but-64MB of GPU | |
| 157 // memory, whichever is less. | |
| 158 return std::min(3 * total_gpu_memory / 4, total_gpu_memory - 64*1024*1024); | |
| 159 #endif | |
| 160 } | |
| 161 | |
| 162 void GpuMemoryManager::UpdateAvailableGpuMemory() { | 63 void GpuMemoryManager::UpdateAvailableGpuMemory() { |
| 163 // If the amount of video memory to use was specified at the command | 64 // If the value was overridden on the command line, use the specified value. |
| 164 // line, never change it. | 65 static bool client_hard_limit_bytes_overridden = |
| 165 if (bytes_available_gpu_memory_overridden_) | 66 CommandLine::ForCurrentProcess()->HasSwitch( |
| 67 switches::kForceGpuMemAvailableMb); |
| 68 if (client_hard_limit_bytes_overridden) { |
| 69 base::StringToUint64( |
| 70 CommandLine::ForCurrentProcess()->GetSwitchValueASCII( |
| 71 switches::kForceGpuMemAvailableMb), |
| 72 &client_hard_limit_bytes_); |
| 73 client_hard_limit_bytes_ *= 1024 * 1024; |
| 166 return; | 74 return; |
| 75 } |
| 167 | 76 |
| 168 // On non-Android, we use an operating system query when possible. | 77 // On non-Android, we use an operating system query when possible. |
| 169 // We do not have a reliable concept of multiple GPUs existing in | 78 // We do not have a reliable concept of multiple GPUs existing in |
| 170 // a system, so just be safe and go with the minimum encountered. | 79 // a system, so just be safe and go with the minimum encountered. |
| 171 uint64 bytes_min = 0; | 80 uint64 bytes_min = 0; |
| 172 | 81 |
| 173 // Only use the clients that are visible, because otherwise the set of clients | 82 // Only use the clients that are visible, because otherwise the set of clients |
| 174 // we are querying could become extremely large. | 83 // we are querying could become extremely large. |
| 175 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | 84 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); |
| 176 it != clients_visible_mru_.end(); | 85 it != clients_visible_mru_.end(); |
| 177 ++it) { | 86 ++it) { |
| 178 const GpuMemoryManagerClientState* client_state = *it; | 87 const GpuMemoryManagerClientState* client_state = *it; |
| 179 if (!client_state->has_surface_) | 88 if (!client_state->has_surface_) |
| 180 continue; | 89 continue; |
| 181 if (!client_state->visible_) | 90 if (!client_state->visible_) |
| 182 continue; | 91 continue; |
| 183 | 92 |
| 184 uint64 bytes = 0; | 93 uint64 bytes = 0; |
| 185 if (client_state->client_->GetTotalGpuMemory(&bytes)) { | 94 if (client_state->client_->GetTotalGpuMemory(&bytes)) { |
| 186 if (!bytes_min || bytes < bytes_min) | 95 if (!bytes_min || bytes < bytes_min) |
| 187 bytes_min = bytes; | 96 bytes_min = bytes; |
| 188 } | 97 } |
| 189 } | 98 } |
| 190 | 99 |
| 191 if (!bytes_min) | 100 if (!bytes_min) |
| 192 return; | 101 return; |
| 193 | 102 |
| 194 bytes_available_gpu_memory_ = CalcAvailableFromGpuTotal(bytes_min); | 103 client_hard_limit_bytes_ = bytes_min; |
| 195 | 104 |
| 196 // Never go below the default allocation | 105 #if defined(OS_ANDROID) |
| 197 bytes_available_gpu_memory_ = std::max(bytes_available_gpu_memory_, | 106 // Clamp the observed value to a specific range on Android. |
| 198 GetDefaultAvailableGpuMemory()); | 107 client_hard_limit_bytes_ = std::max(client_hard_limit_bytes_, |
| 199 | 108 static_cast<uint64>(16 * 1024 * 1024)); |
| 200 // Never go above the maximum. | 109 client_hard_limit_bytes_ = std::min(client_hard_limit_bytes_, |
| 201 bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_, | 110 static_cast<uint64>(256 * 1024 * 1024)); |
| 202 GetMaximumTotalGpuMemory()); | 111 #else |
| 203 } | 112 // Ignore what the system said and give all clients the same maximum |
| 204 | 113 // allocation on desktop platforms. |
| 205 void GpuMemoryManager::UpdateUnmanagedMemoryLimits() { | 114 client_hard_limit_bytes_ = 256 * 1024 * 1024; |
| 206 // Set the limit to be [current_, current_ + step_ / 4), with the endpoints | 115 #endif |
| 207 // of the intervals rounded down and up to the nearest step_, to avoid | |
| 208 // thrashing the interval. | |
| 209 bytes_allocated_unmanaged_high_ = RoundUp( | |
| 210 bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4, | |
| 211 bytes_unmanaged_limit_step_); | |
| 212 bytes_allocated_unmanaged_low_ = RoundDown( | |
| 213 bytes_allocated_unmanaged_current_, | |
| 214 bytes_unmanaged_limit_step_); | |
| 215 } | 116 } |
| 216 | 117 |
| 217 void GpuMemoryManager::ScheduleManage( | 118 void GpuMemoryManager::ScheduleManage( |
| 218 ScheduleManageTime schedule_manage_time) { | 119 ScheduleManageTime schedule_manage_time) { |
| 219 if (disable_schedule_manage_) | 120 if (disable_schedule_manage_) |
| 220 return; | 121 return; |
| 221 if (manage_immediate_scheduled_) | 122 if (manage_immediate_scheduled_) |
| 222 return; | 123 return; |
| 223 if (schedule_manage_time == kScheduleManageNow) { | 124 if (schedule_manage_time == kScheduleManageNow) { |
| 224 base::MessageLoop::current()->PostTask( | 125 base::MessageLoop::current()->PostTask( |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 256 default: | 157 default: |
| 257 NOTREACHED(); | 158 NOTREACHED(); |
| 258 break; | 159 break; |
| 259 } | 160 } |
| 260 if (new_size != old_size) { | 161 if (new_size != old_size) { |
| 261 TRACE_COUNTER1("gpu", | 162 TRACE_COUNTER1("gpu", |
| 262 "GpuMemoryUsage", | 163 "GpuMemoryUsage", |
| 263 GetCurrentUsage()); | 164 GetCurrentUsage()); |
| 264 } | 165 } |
| 265 | 166 |
| 266 // If we've gone past our current limit on unmanaged memory, schedule a | 167 if (GetCurrentUsage() > bytes_allocated_historical_max_ + |
| 267 // re-manage to take int account the unmanaged memory. | 168 kBytesAllocatedUnmanagedStep) { |
| 268 if (bytes_allocated_unmanaged_current_ >= bytes_allocated_unmanaged_high_) | |
| 269 ScheduleManage(kScheduleManageNow); | |
| 270 if (bytes_allocated_unmanaged_current_ < bytes_allocated_unmanaged_low_) | |
| 271 ScheduleManage(kScheduleManageLater); | |
| 272 | |
| 273 if (GetCurrentUsage() > bytes_allocated_historical_max_) { | |
| 274 bytes_allocated_historical_max_ = GetCurrentUsage(); | 169 bytes_allocated_historical_max_ = GetCurrentUsage(); |
| 275 // If we're blowing into new memory usage territory, spam the browser | 170 // If we're blowing into new memory usage territory, spam the browser |
| 276 // process with the most up-to-date information about our memory usage. | 171 // process with the most up-to-date information about our memory usage. |
| 277 SendUmaStatsToBrowser(); | 172 SendUmaStatsToBrowser(); |
| 278 } | 173 } |
| 279 } | 174 } |
| 280 | 175 |
| 281 bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) { | 176 bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) { |
| 282 // TODO: Check if there is enough space. Lose contexts until there is. | 177 // TODO: Check if there is enough space. Lose contexts until there is. |
| 283 return true; | 178 return true; |
| (...skipping 28 matching lines...) Expand all Loading... |
| 312 return; | 207 return; |
| 313 | 208 |
| 314 RemoveClientFromList(client_state); | 209 RemoveClientFromList(client_state); |
| 315 client_state->visible_ = visible; | 210 client_state->visible_ = visible; |
| 316 AddClientToList(client_state); | 211 AddClientToList(client_state); |
| 317 ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater); | 212 ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater); |
| 318 } | 213 } |
| 319 | 214 |
| 320 void GpuMemoryManager::SetClientStateManagedMemoryStats( | 215 void GpuMemoryManager::SetClientStateManagedMemoryStats( |
| 321 GpuMemoryManagerClientState* client_state, | 216 GpuMemoryManagerClientState* client_state, |
| 322 const ManagedMemoryStats& stats) | 217 const ManagedMemoryStats& stats) { |
| 323 { | 218 // TODO(ccameron): delete this from the full stack. |
| 324 client_state->managed_memory_stats_ = stats; | |
| 325 | |
| 326 // If this is the first time that stats have been received for this | |
| 327 // client, use them immediately. | |
| 328 if (!client_state->managed_memory_stats_received_) { | |
| 329 client_state->managed_memory_stats_received_ = true; | |
| 330 ScheduleManage(kScheduleManageNow); | |
| 331 return; | |
| 332 } | |
| 333 | |
| 334 // If these statistics sit outside of the range that we used in our | |
| 335 // computation of memory allocations then recompute the allocations. | |
| 336 if (client_state->managed_memory_stats_.bytes_nice_to_have > | |
| 337 client_state->bytes_nicetohave_limit_high_) { | |
| 338 ScheduleManage(kScheduleManageNow); | |
| 339 } else if (client_state->managed_memory_stats_.bytes_nice_to_have < | |
| 340 client_state->bytes_nicetohave_limit_low_) { | |
| 341 ScheduleManage(kScheduleManageLater); | |
| 342 } | |
| 343 } | 219 } |
| 344 | 220 |
| 345 uint64 GpuMemoryManager::GetClientMemoryUsage( | 221 uint64 GpuMemoryManager::GetClientMemoryUsage( |
| 346 const GpuMemoryManagerClient* client) const { | 222 const GpuMemoryManagerClient* client) const { |
| 347 TrackingGroupMap::const_iterator tracking_group_it = | 223 TrackingGroupMap::const_iterator tracking_group_it = |
| 348 tracking_groups_.find(client->GetMemoryTracker()); | 224 tracking_groups_.find(client->GetMemoryTracker()); |
| 349 DCHECK(tracking_group_it != tracking_groups_.end()); | 225 DCHECK(tracking_group_it != tracking_groups_.end()); |
| 350 return tracking_group_it->second->GetSize(); | 226 return tracking_group_it->second->GetSize(); |
| 351 } | 227 } |
| 352 | 228 |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 388 bytes_allocated_historical_max_; | 264 bytes_allocated_historical_max_; |
| 389 } | 265 } |
| 390 | 266 |
| 391 void GpuMemoryManager::Manage() { | 267 void GpuMemoryManager::Manage() { |
| 392 manage_immediate_scheduled_ = false; | 268 manage_immediate_scheduled_ = false; |
| 393 delayed_manage_callback_.Cancel(); | 269 delayed_manage_callback_.Cancel(); |
| 394 | 270 |
| 395 // Update the amount of GPU memory available on the system. | 271 // Update the amount of GPU memory available on the system. |
| 396 UpdateAvailableGpuMemory(); | 272 UpdateAvailableGpuMemory(); |
| 397 | 273 |
| 398 // Update the limit on unmanaged memory. | |
| 399 UpdateUnmanagedMemoryLimits(); | |
| 400 | |
| 401 // Determine which clients are "hibernated" (which determines the | 274 // Determine which clients are "hibernated" (which determines the |
| 402 // distribution of frontbuffers and memory among clients that don't have | 275 // distribution of frontbuffers and memory among clients that don't have |
| 403 // surfaces). | 276 // surfaces). |
| 404 SetClientsHibernatedState(); | 277 SetClientsHibernatedState(); |
| 405 | 278 |
| 406 // Assign memory allocations to clients that have surfaces. | 279 // Assign memory allocations to clients that have surfaces. |
| 407 AssignSurfacesAllocations(); | 280 AssignSurfacesAllocations(); |
| 408 | 281 |
| 409 // Assign memory allocations to clients that don't have surfaces. | 282 // Assign memory allocations to clients that don't have surfaces. |
| 410 AssignNonSurfacesAllocations(); | 283 AssignNonSurfacesAllocations(); |
| 411 | 284 |
| 412 SendUmaStatsToBrowser(); | 285 SendUmaStatsToBrowser(); |
| 413 } | 286 } |
| 414 | 287 |
| 415 // static | |
| 416 uint64 GpuMemoryManager::ComputeCap( | |
| 417 std::vector<uint64> bytes, uint64 bytes_sum_limit) | |
| 418 { | |
| 419 size_t bytes_size = bytes.size(); | |
| 420 uint64 bytes_sum = 0; | |
| 421 | |
| 422 if (bytes_size == 0) | |
| 423 return std::numeric_limits<uint64>::max(); | |
| 424 | |
| 425 // Sort and add up all entries | |
| 426 std::sort(bytes.begin(), bytes.end()); | |
| 427 for (size_t i = 0; i < bytes_size; ++i) | |
| 428 bytes_sum += bytes[i]; | |
| 429 | |
| 430 // As we go through the below loop, let bytes_partial_sum be the | |
| 431 // sum of bytes[0] + ... + bytes[bytes_size - i - 1] | |
| 432 uint64 bytes_partial_sum = bytes_sum; | |
| 433 | |
| 434 // Try using each entry as a cap, and see where we get cut off. | |
| 435 for (size_t i = 0; i < bytes_size; ++i) { | |
| 436 // Try limiting cap to bytes[bytes_size - i - 1] | |
| 437 uint64 test_cap = bytes[bytes_size - i - 1]; | |
| 438 uint64 bytes_sum_with_test_cap = i * test_cap + bytes_partial_sum; | |
| 439 | |
| 440 // If that fits, raise test_cap to give an even distribution to the | |
| 441 // last i entries. | |
| 442 if (bytes_sum_with_test_cap <= bytes_sum_limit) { | |
| 443 if (i == 0) | |
| 444 return std::numeric_limits<uint64>::max(); | |
| 445 else | |
| 446 return test_cap + (bytes_sum_limit - bytes_sum_with_test_cap) / i; | |
| 447 } else { | |
| 448 bytes_partial_sum -= test_cap; | |
| 449 } | |
| 450 } | |
| 451 | |
| 452 // If we got here, then we can't fully accommodate any of the clients, | |
| 453 // so distribute bytes_sum_limit evenly. | |
| 454 return bytes_sum_limit / bytes_size; | |
| 455 } | |
| 456 | |
| 457 uint64 GpuMemoryManager::ComputeClientAllocationWhenVisible( | |
| 458 GpuMemoryManagerClientState* client_state, | |
| 459 uint64 bytes_above_required_cap, | |
| 460 uint64 bytes_above_minimum_cap, | |
| 461 uint64 bytes_overall_cap) { | |
| 462 ManagedMemoryStats* stats = &client_state->managed_memory_stats_; | |
| 463 | |
| 464 if (!client_state->managed_memory_stats_received_) | |
| 465 return GetDefaultClientAllocation(); | |
| 466 | |
| 467 uint64 bytes_required = 9 * stats->bytes_required / 8; | |
| 468 bytes_required = std::min(bytes_required, GetMaximumClientAllocation()); | |
| 469 bytes_required = std::max(bytes_required, GetMinimumClientAllocation()); | |
| 470 | |
| 471 uint64 bytes_nicetohave = 4 * stats->bytes_nice_to_have / 3; | |
| 472 bytes_nicetohave = std::min(bytes_nicetohave, GetMaximumClientAllocation()); | |
| 473 bytes_nicetohave = std::max(bytes_nicetohave, GetMinimumClientAllocation()); | |
| 474 bytes_nicetohave = std::max(bytes_nicetohave, bytes_required); | |
| 475 | |
| 476 uint64 allocation = GetMinimumClientAllocation(); | |
| 477 allocation += std::min(bytes_required - GetMinimumClientAllocation(), | |
| 478 bytes_above_minimum_cap); | |
| 479 allocation += std::min(bytes_nicetohave - bytes_required, | |
| 480 bytes_above_required_cap); | |
| 481 allocation = std::min(allocation, | |
| 482 bytes_overall_cap); | |
| 483 return allocation; | |
| 484 } | |
| 485 | |
| 486 void GpuMemoryManager::ComputeVisibleSurfacesAllocations() { | |
| 487 uint64 bytes_available_total = GetAvailableGpuMemory(); | |
| 488 uint64 bytes_above_required_cap = std::numeric_limits<uint64>::max(); | |
| 489 uint64 bytes_above_minimum_cap = std::numeric_limits<uint64>::max(); | |
| 490 uint64 bytes_overall_cap_visible = GetMaximumClientAllocation(); | |
| 491 | |
| 492 // Compute memory usage at three levels | |
| 493 // - painting everything that is nicetohave for visible clients | |
| 494 // - painting only what that is visible | |
| 495 // - giving every client the minimum allocation | |
| 496 uint64 bytes_nicetohave_visible = 0; | |
| 497 uint64 bytes_required_visible = 0; | |
| 498 uint64 bytes_minimum_visible = 0; | |
| 499 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | |
| 500 it != clients_visible_mru_.end(); | |
| 501 ++it) { | |
| 502 GpuMemoryManagerClientState* client_state = *it; | |
| 503 client_state->bytes_allocation_ideal_nicetohave_ = | |
| 504 ComputeClientAllocationWhenVisible( | |
| 505 client_state, | |
| 506 bytes_above_required_cap, | |
| 507 bytes_above_minimum_cap, | |
| 508 bytes_overall_cap_visible); | |
| 509 client_state->bytes_allocation_ideal_required_ = | |
| 510 ComputeClientAllocationWhenVisible( | |
| 511 client_state, | |
| 512 0, | |
| 513 bytes_above_minimum_cap, | |
| 514 bytes_overall_cap_visible); | |
| 515 client_state->bytes_allocation_ideal_minimum_ = | |
| 516 ComputeClientAllocationWhenVisible( | |
| 517 client_state, | |
| 518 0, | |
| 519 0, | |
| 520 bytes_overall_cap_visible); | |
| 521 | |
| 522 bytes_nicetohave_visible += | |
| 523 client_state->bytes_allocation_ideal_nicetohave_; | |
| 524 bytes_required_visible += | |
| 525 client_state->bytes_allocation_ideal_required_; | |
| 526 bytes_minimum_visible += | |
| 527 client_state->bytes_allocation_ideal_minimum_; | |
| 528 } | |
| 529 | |
| 530 // Determine which of those three points we can satisfy, and limit | |
| 531 // bytes_above_required_cap and bytes_above_minimum_cap to not go | |
| 532 // over the limit. | |
| 533 if (bytes_minimum_visible > bytes_available_total) { | |
| 534 bytes_above_required_cap = 0; | |
| 535 bytes_above_minimum_cap = 0; | |
| 536 } else if (bytes_required_visible > bytes_available_total) { | |
| 537 std::vector<uint64> bytes_to_fit; | |
| 538 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | |
| 539 it != clients_visible_mru_.end(); | |
| 540 ++it) { | |
| 541 GpuMemoryManagerClientState* client_state = *it; | |
| 542 bytes_to_fit.push_back(client_state->bytes_allocation_ideal_required_ - | |
| 543 client_state->bytes_allocation_ideal_minimum_); | |
| 544 } | |
| 545 bytes_above_required_cap = 0; | |
| 546 bytes_above_minimum_cap = ComputeCap( | |
| 547 bytes_to_fit, bytes_available_total - bytes_minimum_visible); | |
| 548 } else if (bytes_nicetohave_visible > bytes_available_total) { | |
| 549 std::vector<uint64> bytes_to_fit; | |
| 550 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | |
| 551 it != clients_visible_mru_.end(); | |
| 552 ++it) { | |
| 553 GpuMemoryManagerClientState* client_state = *it; | |
| 554 bytes_to_fit.push_back(client_state->bytes_allocation_ideal_nicetohave_ - | |
| 555 client_state->bytes_allocation_ideal_required_); | |
| 556 } | |
| 557 bytes_above_required_cap = ComputeCap( | |
| 558 bytes_to_fit, bytes_available_total - bytes_required_visible); | |
| 559 bytes_above_minimum_cap = std::numeric_limits<uint64>::max(); | |
| 560 } | |
| 561 | |
| 562 // Given those computed limits, set the actual memory allocations for the | |
| 563 // visible clients, tracking the largest allocation and the total allocation | |
| 564 // for future use. | |
| 565 uint64 bytes_allocated_visible = 0; | |
| 566 uint64 bytes_allocated_max_client_allocation = 0; | |
| 567 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | |
| 568 it != clients_visible_mru_.end(); | |
| 569 ++it) { | |
| 570 GpuMemoryManagerClientState* client_state = *it; | |
| 571 client_state->bytes_allocation_when_visible_ = | |
| 572 ComputeClientAllocationWhenVisible( | |
| 573 client_state, | |
| 574 bytes_above_required_cap, | |
| 575 bytes_above_minimum_cap, | |
| 576 bytes_overall_cap_visible); | |
| 577 bytes_allocated_visible += client_state->bytes_allocation_when_visible_; | |
| 578 bytes_allocated_max_client_allocation = std::max( | |
| 579 bytes_allocated_max_client_allocation, | |
| 580 client_state->bytes_allocation_when_visible_); | |
| 581 } | |
| 582 | |
| 583 // Set the limit for nonvisible clients for when they become visible. | |
| 584 // Use the same formula, with a lowered overall cap in case any of the | |
| 585 // currently-nonvisible clients are much more resource-intensive than any | |
| 586 // of the existing clients. | |
| 587 uint64 bytes_overall_cap_nonvisible = bytes_allocated_max_client_allocation; | |
| 588 if (bytes_available_total > bytes_allocated_visible) { | |
| 589 bytes_overall_cap_nonvisible += | |
| 590 bytes_available_total - bytes_allocated_visible; | |
| 591 } | |
| 592 bytes_overall_cap_nonvisible = std::min(bytes_overall_cap_nonvisible, | |
| 593 GetMaximumClientAllocation()); | |
| 594 for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin(); | |
| 595 it != clients_nonvisible_mru_.end(); | |
| 596 ++it) { | |
| 597 GpuMemoryManagerClientState* client_state = *it; | |
| 598 client_state->bytes_allocation_when_visible_ = | |
| 599 ComputeClientAllocationWhenVisible( | |
| 600 client_state, | |
| 601 bytes_above_required_cap, | |
| 602 bytes_above_minimum_cap, | |
| 603 bytes_overall_cap_nonvisible); | |
| 604 } | |
| 605 } | |
| 606 | |
| 607 void GpuMemoryManager::DistributeRemainingMemoryToVisibleSurfaces() { | |
| 608 uint64 bytes_available_total = GetAvailableGpuMemory(); | |
| 609 uint64 bytes_allocated_total = 0; | |
| 610 | |
| 611 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | |
| 612 it != clients_visible_mru_.end(); | |
| 613 ++it) { | |
| 614 GpuMemoryManagerClientState* client_state = *it; | |
| 615 bytes_allocated_total += client_state->bytes_allocation_when_visible_; | |
| 616 } | |
| 617 | |
| 618 if (bytes_allocated_total >= bytes_available_total) | |
| 619 return; | |
| 620 | |
| 621 std::vector<uint64> bytes_extra_requests; | |
| 622 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | |
| 623 it != clients_visible_mru_.end(); | |
| 624 ++it) { | |
| 625 GpuMemoryManagerClientState* client_state = *it; | |
| 626 CHECK(GetMaximumClientAllocation() >= | |
| 627 client_state->bytes_allocation_when_visible_); | |
| 628 uint64 bytes_extra = GetMaximumClientAllocation() - | |
| 629 client_state->bytes_allocation_when_visible_; | |
| 630 bytes_extra_requests.push_back(bytes_extra); | |
| 631 } | |
| 632 uint64 bytes_extra_cap = ComputeCap( | |
| 633 bytes_extra_requests, bytes_available_total - bytes_allocated_total); | |
| 634 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); | |
| 635 it != clients_visible_mru_.end(); | |
| 636 ++it) { | |
| 637 GpuMemoryManagerClientState* client_state = *it; | |
| 638 uint64 bytes_extra = GetMaximumClientAllocation() - | |
| 639 client_state->bytes_allocation_when_visible_; | |
| 640 client_state->bytes_allocation_when_visible_ += std::min( | |
| 641 bytes_extra, bytes_extra_cap); | |
| 642 } | |
| 643 } | |
| 644 | |
| 645 void GpuMemoryManager::AssignSurfacesAllocations() { | 288 void GpuMemoryManager::AssignSurfacesAllocations() { |
| 646 // Compute allocation when for all clients. | |
| 647 ComputeVisibleSurfacesAllocations(); | |
| 648 | |
| 649 // Distribute the remaining memory to visible clients. | |
| 650 DistributeRemainingMemoryToVisibleSurfaces(); | |
| 651 | |
| 652 // Send that allocation to the clients. | 289 // Send that allocation to the clients. |
| 653 ClientStateList clients = clients_visible_mru_; | 290 ClientStateList clients = clients_visible_mru_; |
| 654 clients.insert(clients.end(), | 291 clients.insert(clients.end(), |
| 655 clients_nonvisible_mru_.begin(), | 292 clients_nonvisible_mru_.begin(), |
| 656 clients_nonvisible_mru_.end()); | 293 clients_nonvisible_mru_.end()); |
| 657 for (ClientStateList::const_iterator it = clients.begin(); | 294 for (ClientStateList::const_iterator it = clients.begin(); |
| 658 it != clients.end(); | 295 it != clients.end(); |
| 659 ++it) { | 296 ++it) { |
| 660 GpuMemoryManagerClientState* client_state = *it; | 297 GpuMemoryManagerClientState* client_state = *it; |
| 661 | 298 |
| 662 // Re-assign memory limits to this client when its "nice to have" bucket | |
| 663 // grows or shrinks by 1/4. | |
| 664 client_state->bytes_nicetohave_limit_high_ = | |
| 665 5 * client_state->managed_memory_stats_.bytes_nice_to_have / 4; | |
| 666 client_state->bytes_nicetohave_limit_low_ = | |
| 667 3 * client_state->managed_memory_stats_.bytes_nice_to_have / 4; | |
| 668 | |
| 669 // Populate and send the allocation to the client | 299 // Populate and send the allocation to the client |
| 670 MemoryAllocation allocation; | 300 MemoryAllocation allocation; |
| 671 | 301 allocation.bytes_limit_when_visible = client_hard_limit_bytes_; |
| 672 allocation.bytes_limit_when_visible = | 302 #if defined(OS_ANDROID) |
| 673 client_state->bytes_allocation_when_visible_; | 303 // On Android, because there is only one visible tab at any time, allow |
| 674 allocation.priority_cutoff_when_visible = priority_cutoff_; | 304 // that renderer to cache as much as it can. |
| 305 allocation.priority_cutoff_when_visible = |
| 306 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING; |
| 307 #else |
| 308 // On desktop platforms, instruct the renderers to cache only a smaller |
| 309 // set, to play nice with other renderers and other applications. If this |
| 310 // if not done, then the system can become unstable. |
| 311 // http://crbug.com/145600 (Linux) |
| 312 // http://crbug.com/141377 (Mac) |
| 313 allocation.priority_cutoff_when_visible = |
| 314 MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE; |
| 315 #endif |
| 675 | 316 |
| 676 client_state->client_->SetMemoryAllocation(allocation); | 317 client_state->client_->SetMemoryAllocation(allocation); |
| 677 client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_); | 318 client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_); |
| 678 } | 319 } |
| 679 } | 320 } |
| 680 | 321 |
| 681 void GpuMemoryManager::AssignNonSurfacesAllocations() { | 322 void GpuMemoryManager::AssignNonSurfacesAllocations() { |
| 682 for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); | 323 for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); |
| 683 it != clients_nonsurface_.end(); | 324 it != clients_nonsurface_.end(); |
| 684 ++it) { | 325 ++it) { |
| 685 GpuMemoryManagerClientState* client_state = *it; | 326 GpuMemoryManagerClientState* client_state = *it; |
| 686 MemoryAllocation allocation; | 327 MemoryAllocation allocation; |
| 687 | 328 |
| 688 if (!client_state->hibernated_) { | 329 if (!client_state->hibernated_) { |
| 689 allocation.bytes_limit_when_visible = | 330 allocation.bytes_limit_when_visible = 1; |
| 690 GetMinimumClientAllocation(); | |
| 691 allocation.priority_cutoff_when_visible = | 331 allocation.priority_cutoff_when_visible = |
| 692 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING; | 332 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING; |
| 693 } | 333 } |
| 694 | 334 |
| 695 client_state->client_->SetMemoryAllocation(allocation); | 335 client_state->client_->SetMemoryAllocation(allocation); |
| 696 } | 336 } |
| 697 } | 337 } |
| 698 | 338 |
| 699 void GpuMemoryManager::SetClientsHibernatedState() const { | 339 void GpuMemoryManager::SetClientsHibernatedState() const { |
| 700 // Re-set all tracking groups as being hibernated. | 340 // Re-set all tracking groups as being hibernated. |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 737 client_state->hibernated_ = client_state->tracking_group_->hibernated_; | 377 client_state->hibernated_ = client_state->tracking_group_->hibernated_; |
| 738 } | 378 } |
| 739 } | 379 } |
| 740 | 380 |
| 741 void GpuMemoryManager::SendUmaStatsToBrowser() { | 381 void GpuMemoryManager::SendUmaStatsToBrowser() { |
| 742 if (!channel_manager_) | 382 if (!channel_manager_) |
| 743 return; | 383 return; |
| 744 GPUMemoryUmaStats params; | 384 GPUMemoryUmaStats params; |
| 745 params.bytes_allocated_current = GetCurrentUsage(); | 385 params.bytes_allocated_current = GetCurrentUsage(); |
| 746 params.bytes_allocated_max = bytes_allocated_historical_max_; | 386 params.bytes_allocated_max = bytes_allocated_historical_max_; |
| 747 params.bytes_limit = bytes_available_gpu_memory_; | 387 params.bytes_limit = client_hard_limit_bytes_; |
| 748 params.client_count = clients_visible_mru_.size() + | 388 params.client_count = clients_visible_mru_.size() + |
| 749 clients_nonvisible_mru_.size() + | 389 clients_nonvisible_mru_.size() + |
| 750 clients_nonsurface_.size(); | 390 clients_nonsurface_.size(); |
| 751 params.context_group_count = tracking_groups_.size(); | 391 params.context_group_count = tracking_groups_.size(); |
| 752 channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params)); | 392 channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params)); |
| 753 } | 393 } |
| 754 | 394 |
| 755 GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList( | 395 GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList( |
| 756 GpuMemoryManagerClientState* client_state) { | 396 GpuMemoryManagerClientState* client_state) { |
| 757 if (client_state->has_surface_) { | 397 if (client_state->has_surface_) { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 774 | 414 |
| 775 void GpuMemoryManager::RemoveClientFromList( | 415 void GpuMemoryManager::RemoveClientFromList( |
| 776 GpuMemoryManagerClientState* client_state) { | 416 GpuMemoryManagerClientState* client_state) { |
| 777 DCHECK(client_state->list_iterator_valid_); | 417 DCHECK(client_state->list_iterator_valid_); |
| 778 ClientStateList* client_list = GetClientList(client_state); | 418 ClientStateList* client_list = GetClientList(client_state); |
| 779 client_list->erase(client_state->list_iterator_); | 419 client_list->erase(client_state->list_iterator_); |
| 780 client_state->list_iterator_valid_ = false; | 420 client_state->list_iterator_valid_ = false; |
| 781 } | 421 } |
| 782 | 422 |
| 783 } // namespace content | 423 } // namespace content |
| OLD | NEW |