OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/gpu_memory_manager.h" | 5 #include "content/common/gpu/gpu_memory_manager.h" |
6 | 6 |
7 #if defined(ENABLE_GPU) | 7 #if defined(ENABLE_GPU) |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 | 10 |
(...skipping 10 matching lines...) Expand all Loading... |
21 #include "content/common/gpu/gpu_memory_tracking.h" | 21 #include "content/common/gpu/gpu_memory_tracking.h" |
22 #include "content/common/gpu/gpu_memory_uma_stats.h" | 22 #include "content/common/gpu/gpu_memory_uma_stats.h" |
23 #include "content/common/gpu/gpu_messages.h" | 23 #include "content/common/gpu/gpu_messages.h" |
24 #include "gpu/command_buffer/service/gpu_switches.h" | 24 #include "gpu/command_buffer/service/gpu_switches.h" |
25 | 25 |
26 namespace content { | 26 namespace content { |
27 namespace { | 27 namespace { |
28 | 28 |
29 const int kDelayedScheduleManageTimeoutMs = 67; | 29 const int kDelayedScheduleManageTimeoutMs = 67; |
30 | 30 |
| 31 const size_t kBytesAllocatedUnmanagedStep = 16 * 1024 * 1024; |
| 32 |
31 void TrackValueChanged(size_t old_size, size_t new_size, size_t* total_size) { | 33 void TrackValueChanged(size_t old_size, size_t new_size, size_t* total_size) { |
32 DCHECK(new_size > old_size || *total_size >= (old_size - new_size)); | 34 DCHECK(new_size > old_size || *total_size >= (old_size - new_size)); |
33 *total_size += (new_size - old_size); | 35 *total_size += (new_size - old_size); |
34 } | 36 } |
35 | 37 |
| 38 template<typename T> |
| 39 T RoundUp(T n, T mul) { |
| 40 return ((n + mul - 1) / mul) * mul; |
| 41 } |
| 42 |
| 43 template<typename T> |
| 44 T RoundDown(T n, T mul) { |
| 45 return (n / mul) * mul; |
| 46 } |
| 47 |
36 } | 48 } |
37 | 49 |
38 GpuMemoryManager::GpuMemoryManager( | 50 GpuMemoryManager::GpuMemoryManager( |
39 GpuChannelManager* channel_manager, | 51 GpuChannelManager* channel_manager, |
40 size_t max_surfaces_with_frontbuffer_soft_limit) | 52 size_t max_surfaces_with_frontbuffer_soft_limit) |
41 : channel_manager_(channel_manager), | 53 : channel_manager_(channel_manager), |
42 manage_immediate_scheduled_(false), | 54 manage_immediate_scheduled_(false), |
43 max_surfaces_with_frontbuffer_soft_limit_( | 55 max_surfaces_with_frontbuffer_soft_limit_( |
44 max_surfaces_with_frontbuffer_soft_limit), | 56 max_surfaces_with_frontbuffer_soft_limit), |
45 bytes_available_gpu_memory_(0), | 57 bytes_available_gpu_memory_(0), |
46 bytes_available_gpu_memory_overridden_(false), | 58 bytes_available_gpu_memory_overridden_(false), |
| 59 bytes_minimum_per_client_(0), |
| 60 bytes_minimum_per_client_overridden_(false), |
47 bytes_backgrounded_available_gpu_memory_(0), | 61 bytes_backgrounded_available_gpu_memory_(0), |
48 bytes_allocated_managed_current_(0), | 62 bytes_allocated_managed_current_(0), |
49 bytes_allocated_managed_visible_(0), | 63 bytes_allocated_managed_visible_(0), |
50 bytes_allocated_managed_backgrounded_(0), | 64 bytes_allocated_managed_backgrounded_(0), |
51 bytes_allocated_unmanaged_current_(0), | 65 bytes_allocated_unmanaged_current_(0), |
52 bytes_allocated_historical_max_(0), | 66 bytes_allocated_historical_max_(0), |
| 67 bytes_allocated_unmanaged_high_(0), |
| 68 bytes_allocated_unmanaged_low_(0), |
| 69 bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep), |
53 window_count_has_been_received_(false), | 70 window_count_has_been_received_(false), |
54 window_count_(0), | 71 window_count_(0), |
55 disable_schedule_manage_(false) | 72 disable_schedule_manage_(false) |
56 { | 73 { |
57 CommandLine* command_line = CommandLine::ForCurrentProcess(); | 74 CommandLine* command_line = CommandLine::ForCurrentProcess(); |
58 if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { | 75 if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { |
59 base::StringToSizeT( | 76 base::StringToSizeT( |
60 command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), | 77 command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), |
61 &bytes_available_gpu_memory_); | 78 &bytes_available_gpu_memory_); |
62 bytes_available_gpu_memory_ *= 1024 * 1024; | 79 bytes_available_gpu_memory_ *= 1024 * 1024; |
63 bytes_available_gpu_memory_overridden_ = true; | 80 bytes_available_gpu_memory_overridden_ = true; |
64 } else | 81 } else |
65 bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory(); | 82 bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory(); |
66 UpdateBackgroundedAvailableGpuMemory(); | 83 UpdateBackgroundedAvailableGpuMemory(); |
67 } | 84 } |
68 | 85 |
69 GpuMemoryManager::~GpuMemoryManager() { | 86 GpuMemoryManager::~GpuMemoryManager() { |
70 DCHECK(tracking_groups_.empty()); | 87 DCHECK(tracking_groups_.empty()); |
71 DCHECK(clients_visible_mru_.empty()); | 88 DCHECK(clients_visible_mru_.empty()); |
72 DCHECK(clients_nonvisible_mru_.empty()); | 89 DCHECK(clients_nonvisible_mru_.empty()); |
73 DCHECK(clients_nonsurface_.empty()); | 90 DCHECK(clients_nonsurface_.empty()); |
74 DCHECK(!bytes_allocated_managed_current_); | 91 DCHECK(!bytes_allocated_managed_current_); |
75 DCHECK(!bytes_allocated_unmanaged_current_); | 92 DCHECK(!bytes_allocated_unmanaged_current_); |
76 DCHECK(!bytes_allocated_managed_visible_); | 93 DCHECK(!bytes_allocated_managed_visible_); |
77 DCHECK(!bytes_allocated_managed_backgrounded_); | 94 DCHECK(!bytes_allocated_managed_backgrounded_); |
78 } | 95 } |
79 | 96 |
80 size_t GpuMemoryManager::GetAvailableGpuMemory() const { | 97 size_t GpuMemoryManager::GetAvailableGpuMemory() const { |
81 return bytes_available_gpu_memory_; | 98 // Allow unmanaged allocations to over-subscribe by at most (high_ - low_) |
| 99 // before restricting managed (compositor) memory based on unmanaged usage. |
| 100 if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_) |
| 101 return 0; |
| 102 return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_; |
82 } | 103 } |
83 | 104 |
84 size_t GpuMemoryManager::GetCurrentBackgroundedAvailableGpuMemory() const { | 105 size_t GpuMemoryManager::GetCurrentBackgroundedAvailableGpuMemory() const { |
85 if (bytes_allocated_managed_visible_ < GetAvailableGpuMemory()) { | 106 if (bytes_allocated_managed_visible_ < GetAvailableGpuMemory()) { |
86 return std::min(bytes_backgrounded_available_gpu_memory_, | 107 return std::min(bytes_backgrounded_available_gpu_memory_, |
87 GetAvailableGpuMemory() - bytes_allocated_managed_visible_); | 108 GetAvailableGpuMemory() - bytes_allocated_managed_visible_); |
88 } | 109 } |
89 return 0; | 110 return 0; |
90 } | 111 } |
91 | 112 |
(...skipping 20 matching lines...) Expand all Loading... |
112 return bytes_available_gpu_memory_; | 133 return bytes_available_gpu_memory_; |
113 #else | 134 #else |
114 // This is to avoid allowing a single page on to use a full 256MB of memory | 135 // This is to avoid allowing a single page on to use a full 256MB of memory |
115 // (the current total limit). Long-scroll pages will hit this limit, | 136 // (the current total limit). Long-scroll pages will hit this limit, |
116 // resulting in instability on some platforms (e.g, issue 141377). | 137 // resulting in instability on some platforms (e.g, issue 141377). |
117 return bytes_available_gpu_memory_ / 2; | 138 return bytes_available_gpu_memory_ / 2; |
118 #endif | 139 #endif |
119 } | 140 } |
120 | 141 |
121 size_t GpuMemoryManager::GetMinimumTabAllocation() const { | 142 size_t GpuMemoryManager::GetMinimumTabAllocation() const { |
| 143 if (bytes_minimum_per_client_overridden_) |
| 144 return bytes_minimum_per_client_; |
122 #if defined(OS_ANDROID) | 145 #if defined(OS_ANDROID) |
123 return 32 * 1024 * 1024; | 146 return 32 * 1024 * 1024; |
124 #elif defined(OS_CHROMEOS) | 147 #elif defined(OS_CHROMEOS) |
125 return 64 * 1024 * 1024; | 148 return 64 * 1024 * 1024; |
126 #else | 149 #else |
127 return 64 * 1024 * 1024; | 150 return 64 * 1024 * 1024; |
128 #endif | 151 #endif |
129 } | 152 } |
130 | 153 |
131 size_t GpuMemoryManager::CalcAvailableFromViewportArea(int viewport_area) { | 154 size_t GpuMemoryManager::CalcAvailableFromViewportArea(int viewport_area) { |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
204 bytes_available_gpu_memory_ = CalcAvailableFromGpuTotal(bytes_min); | 227 bytes_available_gpu_memory_ = CalcAvailableFromGpuTotal(bytes_min); |
205 #endif | 228 #endif |
206 | 229 |
207 // Never go below the default allocation | 230 // Never go below the default allocation |
208 bytes_available_gpu_memory_ = std::max(bytes_available_gpu_memory_, | 231 bytes_available_gpu_memory_ = std::max(bytes_available_gpu_memory_, |
209 GetDefaultAvailableGpuMemory()); | 232 GetDefaultAvailableGpuMemory()); |
210 | 233 |
211 // Never go above the maximum. | 234 // Never go above the maximum. |
212 bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_, | 235 bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_, |
213 GetMaximumTotalGpuMemory()); | 236 GetMaximumTotalGpuMemory()); |
| 237 } |
214 | 238 |
215 // Update the backgrounded available gpu memory because it depends on | 239 void GpuMemoryManager::UpdateUnmanagedMemoryLimits() { |
216 // the available GPU memory. | 240 // Set the limit to be [current_, current_ + step_ / 4), with the endpoints |
217 UpdateBackgroundedAvailableGpuMemory(); | 241 // of the intervals rounded down and up to the nearest step_, to avoid |
| 242 // thrashing the interval. |
| 243 bytes_allocated_unmanaged_high_ = RoundUp( |
| 244 bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4, |
| 245 bytes_unmanaged_limit_step_); |
| 246 bytes_allocated_unmanaged_low_ = RoundDown( |
| 247 bytes_allocated_unmanaged_current_, |
| 248 bytes_unmanaged_limit_step_); |
218 } | 249 } |
219 | 250 |
220 void GpuMemoryManager::UpdateBackgroundedAvailableGpuMemory() { | 251 void GpuMemoryManager::UpdateBackgroundedAvailableGpuMemory() { |
221 // Be conservative and disable saving backgrounded tabs' textures on Android | 252 // Be conservative and disable saving backgrounded tabs' textures on Android |
222 // for the moment | 253 // for the moment |
223 #if defined(OS_ANDROID) | 254 #if defined(OS_ANDROID) |
224 bytes_backgrounded_available_gpu_memory_ = 0; | 255 bytes_backgrounded_available_gpu_memory_ = 0; |
225 #else | 256 #else |
226 bytes_backgrounded_available_gpu_memory_ = bytes_available_gpu_memory_ / 4; | 257 bytes_backgrounded_available_gpu_memory_ = GetAvailableGpuMemory() / 4; |
227 #endif | 258 #endif |
228 } | 259 } |
229 | 260 |
230 void GpuMemoryManager::ScheduleManage(bool immediate) { | 261 void GpuMemoryManager::ScheduleManage(bool immediate) { |
231 if (disable_schedule_manage_) | 262 if (disable_schedule_manage_) |
232 return; | 263 return; |
233 if (manage_immediate_scheduled_) | 264 if (manage_immediate_scheduled_) |
234 return; | 265 return; |
235 if (immediate) { | 266 if (immediate) { |
236 MessageLoop::current()->PostTask( | 267 MessageLoop::current()->PostTask( |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
268 break; | 299 break; |
269 default: | 300 default: |
270 NOTREACHED(); | 301 NOTREACHED(); |
271 break; | 302 break; |
272 } | 303 } |
273 if (new_size != old_size) { | 304 if (new_size != old_size) { |
274 TRACE_COUNTER1("gpu", | 305 TRACE_COUNTER1("gpu", |
275 "GpuMemoryUsage", | 306 "GpuMemoryUsage", |
276 GetCurrentUsage()); | 307 GetCurrentUsage()); |
277 } | 308 } |
| 309 |
| 310 // If we've gone past our current limit on unmanaged memory, schedule a |
| 311 // re-manage to take int account the unmanaged memory. |
| 312 if (bytes_allocated_unmanaged_current_ >= bytes_allocated_unmanaged_high_) |
| 313 ScheduleManage(true); |
| 314 if (bytes_allocated_unmanaged_current_ < bytes_allocated_unmanaged_low_) |
| 315 ScheduleManage(false); |
| 316 |
278 if (GetCurrentUsage() > bytes_allocated_historical_max_) { | 317 if (GetCurrentUsage() > bytes_allocated_historical_max_) { |
279 bytes_allocated_historical_max_ = GetCurrentUsage(); | 318 bytes_allocated_historical_max_ = GetCurrentUsage(); |
280 // If we're blowing into new memory usage territory, spam the browser | 319 // If we're blowing into new memory usage territory, spam the browser |
281 // process with the most up-to-date information about our memory usage. | 320 // process with the most up-to-date information about our memory usage. |
282 SendUmaStatsToBrowser(); | 321 SendUmaStatsToBrowser(); |
283 } | 322 } |
284 } | 323 } |
285 | 324 |
286 GpuMemoryManagerClientState* GpuMemoryManager::CreateClientState( | 325 GpuMemoryManagerClientState* GpuMemoryManager::CreateClientState( |
287 GpuMemoryManagerClient* client, | 326 GpuMemoryManagerClient* client, |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
424 // As such, the rule for categorizing contexts without a surface is: | 463 // As such, the rule for categorizing contexts without a surface is: |
425 // 1. Find the most visible context-with-a-surface within each | 464 // 1. Find the most visible context-with-a-surface within each |
426 // context-without-a-surface's share group, and inherit its visibilty. | 465 // context-without-a-surface's share group, and inherit its visibilty. |
427 void GpuMemoryManager::Manage() { | 466 void GpuMemoryManager::Manage() { |
428 manage_immediate_scheduled_ = false; | 467 manage_immediate_scheduled_ = false; |
429 delayed_manage_callback_.Cancel(); | 468 delayed_manage_callback_.Cancel(); |
430 | 469 |
431 // Update the amount of GPU memory available on the system. | 470 // Update the amount of GPU memory available on the system. |
432 UpdateAvailableGpuMemory(); | 471 UpdateAvailableGpuMemory(); |
433 | 472 |
| 473 // Update the limit on unmanaged memory. |
| 474 UpdateUnmanagedMemoryLimits(); |
| 475 |
| 476 // Update the backgrounded available gpu memory because it depends on |
| 477 // the available GPU memory. |
| 478 UpdateBackgroundedAvailableGpuMemory(); |
| 479 |
434 // Determine which clients are "hibernated" (which determines the | 480 // Determine which clients are "hibernated" (which determines the |
435 // distribution of frontbuffers and memory among clients that don't have | 481 // distribution of frontbuffers and memory among clients that don't have |
436 // surfaces). | 482 // surfaces). |
437 SetClientsHibernatedState(); | 483 SetClientsHibernatedState(); |
438 | 484 |
439 // Determine how much memory to assign to give to visible and backgrounded | 485 // Determine how much memory to assign to give to visible and backgrounded |
440 // clients. | 486 // clients. |
441 size_t bytes_limit_when_visible = GetVisibleClientAllocation(); | 487 size_t bytes_limit_when_visible = GetVisibleClientAllocation(); |
442 | 488 |
443 // Experiment to determine if aggressively discarding tiles on OS X | 489 // Experiment to determine if aggressively discarding tiles on OS X |
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
651 GpuMemoryManagerClientState* client_state) { | 697 GpuMemoryManagerClientState* client_state) { |
652 DCHECK(client_state->list_iterator_valid_); | 698 DCHECK(client_state->list_iterator_valid_); |
653 ClientStateList* client_list = GetClientList(client_state); | 699 ClientStateList* client_list = GetClientList(client_state); |
654 client_list->erase(client_state->list_iterator_); | 700 client_list->erase(client_state->list_iterator_); |
655 client_state->list_iterator_valid_ = false; | 701 client_state->list_iterator_valid_ = false; |
656 } | 702 } |
657 | 703 |
658 } // namespace content | 704 } // namespace content |
659 | 705 |
660 #endif | 706 #endif |
OLD | NEW |