Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(266)

Side by Side Diff: trunk/src/content/common/gpu/gpu_memory_manager.cc

Issue 313163002: Revert 274326 "Lobotomize the GPU memory manager" (Closed) Base URL: svn://svn.chromium.org/chrome/
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/command_line.h" 10 #include "base/command_line.h"
(...skipping 17 matching lines...) Expand all
28 28
29 const int kDelayedScheduleManageTimeoutMs = 67; 29 const int kDelayedScheduleManageTimeoutMs = 67;
30 30
31 const uint64 kBytesAllocatedUnmanagedStep = 16 * 1024 * 1024; 31 const uint64 kBytesAllocatedUnmanagedStep = 16 * 1024 * 1024;
32 32
33 void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) { 33 void TrackValueChanged(uint64 old_size, uint64 new_size, uint64* total_size) {
34 DCHECK(new_size > old_size || *total_size >= (old_size - new_size)); 34 DCHECK(new_size > old_size || *total_size >= (old_size - new_size));
35 *total_size += (new_size - old_size); 35 *total_size += (new_size - old_size);
36 } 36 }
37 37
38 template<typename T>
39 T RoundUp(T n, T mul) {
40 return ((n + mul - 1) / mul) * mul;
41 }
42
43 template<typename T>
44 T RoundDown(T n, T mul) {
45 return (n / mul) * mul;
46 }
47
38 } 48 }
39 49
40 GpuMemoryManager::GpuMemoryManager( 50 GpuMemoryManager::GpuMemoryManager(
41 GpuChannelManager* channel_manager, 51 GpuChannelManager* channel_manager,
42 uint64 max_surfaces_with_frontbuffer_soft_limit) 52 uint64 max_surfaces_with_frontbuffer_soft_limit)
43 : channel_manager_(channel_manager), 53 : channel_manager_(channel_manager),
44 manage_immediate_scheduled_(false), 54 manage_immediate_scheduled_(false),
45 disable_schedule_manage_(false),
46 max_surfaces_with_frontbuffer_soft_limit_( 55 max_surfaces_with_frontbuffer_soft_limit_(
47 max_surfaces_with_frontbuffer_soft_limit), 56 max_surfaces_with_frontbuffer_soft_limit),
48 client_hard_limit_bytes_(0), 57 priority_cutoff_(MemoryAllocation::CUTOFF_ALLOW_EVERYTHING),
58 bytes_available_gpu_memory_(0),
59 bytes_available_gpu_memory_overridden_(false),
60 bytes_minimum_per_client_(0),
61 bytes_default_per_client_(0),
49 bytes_allocated_managed_current_(0), 62 bytes_allocated_managed_current_(0),
50 bytes_allocated_unmanaged_current_(0), 63 bytes_allocated_unmanaged_current_(0),
51 bytes_allocated_historical_max_(0) 64 bytes_allocated_historical_max_(0),
52 { } 65 bytes_allocated_unmanaged_high_(0),
66 bytes_allocated_unmanaged_low_(0),
67 bytes_unmanaged_limit_step_(kBytesAllocatedUnmanagedStep),
68 disable_schedule_manage_(false)
69 {
70 CommandLine* command_line = CommandLine::ForCurrentProcess();
71
72 // Use a more conservative memory allocation policy on Linux and Mac because
73 // the platform is unstable when under memory pressure.
74 // http://crbug.com/145600 (Linux)
75 // http://crbug.com/141377 (Mac)
76 #if defined(OS_MACOSX) || (defined(OS_LINUX) && !defined(OS_CHROMEOS))
77 priority_cutoff_ = MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE;
78 #endif
79
80 #if defined(OS_ANDROID)
81 bytes_default_per_client_ = 8 * 1024 * 1024;
82 bytes_minimum_per_client_ = 8 * 1024 * 1024;
83 #elif defined(OS_CHROMEOS)
84 bytes_default_per_client_ = 64 * 1024 * 1024;
85 bytes_minimum_per_client_ = 4 * 1024 * 1024;
86 #elif defined(OS_MACOSX)
87 bytes_default_per_client_ = 128 * 1024 * 1024;
88 bytes_minimum_per_client_ = 128 * 1024 * 1024;
89 #else
90 bytes_default_per_client_ = 64 * 1024 * 1024;
91 bytes_minimum_per_client_ = 64 * 1024 * 1024;
92 #endif
93
94 if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) {
95 base::StringToUint64(
96 command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb),
97 &bytes_available_gpu_memory_);
98 bytes_available_gpu_memory_ *= 1024 * 1024;
99 bytes_available_gpu_memory_overridden_ = true;
100 } else
101 bytes_available_gpu_memory_ = GetDefaultAvailableGpuMemory();
102 }
53 103
54 GpuMemoryManager::~GpuMemoryManager() { 104 GpuMemoryManager::~GpuMemoryManager() {
55 DCHECK(tracking_groups_.empty()); 105 DCHECK(tracking_groups_.empty());
56 DCHECK(clients_visible_mru_.empty()); 106 DCHECK(clients_visible_mru_.empty());
57 DCHECK(clients_nonvisible_mru_.empty()); 107 DCHECK(clients_nonvisible_mru_.empty());
58 DCHECK(clients_nonsurface_.empty()); 108 DCHECK(clients_nonsurface_.empty());
59 DCHECK(!bytes_allocated_managed_current_); 109 DCHECK(!bytes_allocated_managed_current_);
60 DCHECK(!bytes_allocated_unmanaged_current_); 110 DCHECK(!bytes_allocated_unmanaged_current_);
61 } 111 }
62 112
113 uint64 GpuMemoryManager::GetAvailableGpuMemory() const {
114 // Allow unmanaged allocations to over-subscribe by at most (high_ - low_)
115 // before restricting managed (compositor) memory based on unmanaged usage.
116 if (bytes_allocated_unmanaged_low_ > bytes_available_gpu_memory_)
117 return 0;
118 return bytes_available_gpu_memory_ - bytes_allocated_unmanaged_low_;
119 }
120
121 uint64 GpuMemoryManager::GetDefaultAvailableGpuMemory() const {
122 #if defined(OS_ANDROID)
123 return 16 * 1024 * 1024;
124 #elif defined(OS_CHROMEOS)
125 return 1024 * 1024 * 1024;
126 #else
127 return 256 * 1024 * 1024;
128 #endif
129 }
130
131 uint64 GpuMemoryManager::GetMaximumTotalGpuMemory() const {
132 #if defined(OS_ANDROID)
133 return 256 * 1024 * 1024;
134 #else
135 return 1024 * 1024 * 1024;
136 #endif
137 }
138
139 uint64 GpuMemoryManager::GetMaximumClientAllocation() const {
140 #if defined(OS_ANDROID) || defined(OS_CHROMEOS)
141 return bytes_available_gpu_memory_;
142 #else
143 // This is to avoid allowing a single page on to use a full 256MB of memory
144 // (the current total limit). Long-scroll pages will hit this limit,
145 // resulting in instability on some platforms (e.g, issue 141377).
146 return bytes_available_gpu_memory_ / 2;
147 #endif
148 }
149
150 uint64 GpuMemoryManager::CalcAvailableFromGpuTotal(uint64 total_gpu_memory) {
151 #if defined(OS_ANDROID)
152 // We don't need to reduce the total on Android, since
153 // the total is an estimate to begin with.
154 return total_gpu_memory;
155 #else
156 // Allow Chrome to use 75% of total GPU memory, or all-but-64MB of GPU
157 // memory, whichever is less.
158 return std::min(3 * total_gpu_memory / 4, total_gpu_memory - 64*1024*1024);
159 #endif
160 }
161
63 void GpuMemoryManager::UpdateAvailableGpuMemory() { 162 void GpuMemoryManager::UpdateAvailableGpuMemory() {
64 // If the value was overridden on the command line, use the specified value. 163 // If the amount of video memory to use was specified at the command
65 static bool client_hard_limit_bytes_overridden = 164 // line, never change it.
66 CommandLine::ForCurrentProcess()->HasSwitch( 165 if (bytes_available_gpu_memory_overridden_)
67 switches::kForceGpuMemAvailableMb);
68 if (client_hard_limit_bytes_overridden) {
69 base::StringToUint64(
70 CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
71 switches::kForceGpuMemAvailableMb),
72 &client_hard_limit_bytes_);
73 client_hard_limit_bytes_ *= 1024 * 1024;
74 return; 166 return;
75 }
76 167
77 // On non-Android, we use an operating system query when possible. 168 // On non-Android, we use an operating system query when possible.
78 // We do not have a reliable concept of multiple GPUs existing in 169 // We do not have a reliable concept of multiple GPUs existing in
79 // a system, so just be safe and go with the minimum encountered. 170 // a system, so just be safe and go with the minimum encountered.
80 uint64 bytes_min = 0; 171 uint64 bytes_min = 0;
81 172
82 // Only use the clients that are visible, because otherwise the set of clients 173 // Only use the clients that are visible, because otherwise the set of clients
83 // we are querying could become extremely large. 174 // we are querying could become extremely large.
84 for (ClientStateList::const_iterator it = clients_visible_mru_.begin(); 175 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
85 it != clients_visible_mru_.end(); 176 it != clients_visible_mru_.end();
86 ++it) { 177 ++it) {
87 const GpuMemoryManagerClientState* client_state = *it; 178 const GpuMemoryManagerClientState* client_state = *it;
88 if (!client_state->has_surface_) 179 if (!client_state->has_surface_)
89 continue; 180 continue;
90 if (!client_state->visible_) 181 if (!client_state->visible_)
91 continue; 182 continue;
92 183
93 uint64 bytes = 0; 184 uint64 bytes = 0;
94 if (client_state->client_->GetTotalGpuMemory(&bytes)) { 185 if (client_state->client_->GetTotalGpuMemory(&bytes)) {
95 if (!bytes_min || bytes < bytes_min) 186 if (!bytes_min || bytes < bytes_min)
96 bytes_min = bytes; 187 bytes_min = bytes;
97 } 188 }
98 } 189 }
99 190
100 if (!bytes_min) 191 if (!bytes_min)
101 return; 192 return;
102 193
103 client_hard_limit_bytes_ = bytes_min; 194 bytes_available_gpu_memory_ = CalcAvailableFromGpuTotal(bytes_min);
104 195
105 #if defined(OS_ANDROID) 196 // Never go below the default allocation
106 // Clamp the observed value to a specific range on Android. 197 bytes_available_gpu_memory_ = std::max(bytes_available_gpu_memory_,
107 client_hard_limit_bytes_ = std::max(client_hard_limit_bytes_, 198 GetDefaultAvailableGpuMemory());
108 static_cast<uint64>(16 * 1024 * 1024)); 199
109 client_hard_limit_bytes_ = std::min(client_hard_limit_bytes_, 200 // Never go above the maximum.
110 static_cast<uint64>(256 * 1024 * 1024)); 201 bytes_available_gpu_memory_ = std::min(bytes_available_gpu_memory_,
111 #else 202 GetMaximumTotalGpuMemory());
112 // Ignore what the system said and give all clients the same maximum 203 }
113 // allocation on desktop platforms. 204
114 client_hard_limit_bytes_ = 256 * 1024 * 1024; 205 void GpuMemoryManager::UpdateUnmanagedMemoryLimits() {
115 #endif 206 // Set the limit to be [current_, current_ + step_ / 4), with the endpoints
207 // of the intervals rounded down and up to the nearest step_, to avoid
208 // thrashing the interval.
209 bytes_allocated_unmanaged_high_ = RoundUp(
210 bytes_allocated_unmanaged_current_ + bytes_unmanaged_limit_step_ / 4,
211 bytes_unmanaged_limit_step_);
212 bytes_allocated_unmanaged_low_ = RoundDown(
213 bytes_allocated_unmanaged_current_,
214 bytes_unmanaged_limit_step_);
116 } 215 }
117 216
118 void GpuMemoryManager::ScheduleManage( 217 void GpuMemoryManager::ScheduleManage(
119 ScheduleManageTime schedule_manage_time) { 218 ScheduleManageTime schedule_manage_time) {
120 if (disable_schedule_manage_) 219 if (disable_schedule_manage_)
121 return; 220 return;
122 if (manage_immediate_scheduled_) 221 if (manage_immediate_scheduled_)
123 return; 222 return;
124 if (schedule_manage_time == kScheduleManageNow) { 223 if (schedule_manage_time == kScheduleManageNow) {
125 base::MessageLoop::current()->PostTask( 224 base::MessageLoop::current()->PostTask(
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
157 default: 256 default:
158 NOTREACHED(); 257 NOTREACHED();
159 break; 258 break;
160 } 259 }
161 if (new_size != old_size) { 260 if (new_size != old_size) {
162 TRACE_COUNTER1("gpu", 261 TRACE_COUNTER1("gpu",
163 "GpuMemoryUsage", 262 "GpuMemoryUsage",
164 GetCurrentUsage()); 263 GetCurrentUsage());
165 } 264 }
166 265
167 if (GetCurrentUsage() > bytes_allocated_historical_max_ + 266 // If we've gone past our current limit on unmanaged memory, schedule a
168 kBytesAllocatedUnmanagedStep) { 267 // re-manage to take int account the unmanaged memory.
268 if (bytes_allocated_unmanaged_current_ >= bytes_allocated_unmanaged_high_)
269 ScheduleManage(kScheduleManageNow);
270 if (bytes_allocated_unmanaged_current_ < bytes_allocated_unmanaged_low_)
271 ScheduleManage(kScheduleManageLater);
272
273 if (GetCurrentUsage() > bytes_allocated_historical_max_) {
169 bytes_allocated_historical_max_ = GetCurrentUsage(); 274 bytes_allocated_historical_max_ = GetCurrentUsage();
170 // If we're blowing into new memory usage territory, spam the browser 275 // If we're blowing into new memory usage territory, spam the browser
171 // process with the most up-to-date information about our memory usage. 276 // process with the most up-to-date information about our memory usage.
172 SendUmaStatsToBrowser(); 277 SendUmaStatsToBrowser();
173 } 278 }
174 } 279 }
175 280
176 bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) { 281 bool GpuMemoryManager::EnsureGPUMemoryAvailable(uint64 /* size_needed */) {
177 // TODO: Check if there is enough space. Lose contexts until there is. 282 // TODO: Check if there is enough space. Lose contexts until there is.
178 return true; 283 return true;
(...skipping 28 matching lines...) Expand all
207 return; 312 return;
208 313
209 RemoveClientFromList(client_state); 314 RemoveClientFromList(client_state);
210 client_state->visible_ = visible; 315 client_state->visible_ = visible;
211 AddClientToList(client_state); 316 AddClientToList(client_state);
212 ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater); 317 ScheduleManage(visible ? kScheduleManageNow : kScheduleManageLater);
213 } 318 }
214 319
215 void GpuMemoryManager::SetClientStateManagedMemoryStats( 320 void GpuMemoryManager::SetClientStateManagedMemoryStats(
216 GpuMemoryManagerClientState* client_state, 321 GpuMemoryManagerClientState* client_state,
217 const ManagedMemoryStats& stats) { 322 const ManagedMemoryStats& stats)
218 // TODO(ccameron): delete this from the full stack. 323 {
324 client_state->managed_memory_stats_ = stats;
325
326 // If this is the first time that stats have been received for this
327 // client, use them immediately.
328 if (!client_state->managed_memory_stats_received_) {
329 client_state->managed_memory_stats_received_ = true;
330 ScheduleManage(kScheduleManageNow);
331 return;
332 }
333
334 // If these statistics sit outside of the range that we used in our
335 // computation of memory allocations then recompute the allocations.
336 if (client_state->managed_memory_stats_.bytes_nice_to_have >
337 client_state->bytes_nicetohave_limit_high_) {
338 ScheduleManage(kScheduleManageNow);
339 } else if (client_state->managed_memory_stats_.bytes_nice_to_have <
340 client_state->bytes_nicetohave_limit_low_) {
341 ScheduleManage(kScheduleManageLater);
342 }
219 } 343 }
220 344
221 uint64 GpuMemoryManager::GetClientMemoryUsage( 345 uint64 GpuMemoryManager::GetClientMemoryUsage(
222 const GpuMemoryManagerClient* client) const { 346 const GpuMemoryManagerClient* client) const {
223 TrackingGroupMap::const_iterator tracking_group_it = 347 TrackingGroupMap::const_iterator tracking_group_it =
224 tracking_groups_.find(client->GetMemoryTracker()); 348 tracking_groups_.find(client->GetMemoryTracker());
225 DCHECK(tracking_group_it != tracking_groups_.end()); 349 DCHECK(tracking_group_it != tracking_groups_.end());
226 return tracking_group_it->second->GetSize(); 350 return tracking_group_it->second->GetSize();
227 } 351 }
228 352
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
264 bytes_allocated_historical_max_; 388 bytes_allocated_historical_max_;
265 } 389 }
266 390
267 void GpuMemoryManager::Manage() { 391 void GpuMemoryManager::Manage() {
268 manage_immediate_scheduled_ = false; 392 manage_immediate_scheduled_ = false;
269 delayed_manage_callback_.Cancel(); 393 delayed_manage_callback_.Cancel();
270 394
271 // Update the amount of GPU memory available on the system. 395 // Update the amount of GPU memory available on the system.
272 UpdateAvailableGpuMemory(); 396 UpdateAvailableGpuMemory();
273 397
398 // Update the limit on unmanaged memory.
399 UpdateUnmanagedMemoryLimits();
400
274 // Determine which clients are "hibernated" (which determines the 401 // Determine which clients are "hibernated" (which determines the
275 // distribution of frontbuffers and memory among clients that don't have 402 // distribution of frontbuffers and memory among clients that don't have
276 // surfaces). 403 // surfaces).
277 SetClientsHibernatedState(); 404 SetClientsHibernatedState();
278 405
279 // Assign memory allocations to clients that have surfaces. 406 // Assign memory allocations to clients that have surfaces.
280 AssignSurfacesAllocations(); 407 AssignSurfacesAllocations();
281 408
282 // Assign memory allocations to clients that don't have surfaces. 409 // Assign memory allocations to clients that don't have surfaces.
283 AssignNonSurfacesAllocations(); 410 AssignNonSurfacesAllocations();
284 411
285 SendUmaStatsToBrowser(); 412 SendUmaStatsToBrowser();
286 } 413 }
287 414
415 // static
416 uint64 GpuMemoryManager::ComputeCap(
417 std::vector<uint64> bytes, uint64 bytes_sum_limit)
418 {
419 size_t bytes_size = bytes.size();
420 uint64 bytes_sum = 0;
421
422 if (bytes_size == 0)
423 return std::numeric_limits<uint64>::max();
424
425 // Sort and add up all entries
426 std::sort(bytes.begin(), bytes.end());
427 for (size_t i = 0; i < bytes_size; ++i)
428 bytes_sum += bytes[i];
429
430 // As we go through the below loop, let bytes_partial_sum be the
431 // sum of bytes[0] + ... + bytes[bytes_size - i - 1]
432 uint64 bytes_partial_sum = bytes_sum;
433
434 // Try using each entry as a cap, and see where we get cut off.
435 for (size_t i = 0; i < bytes_size; ++i) {
436 // Try limiting cap to bytes[bytes_size - i - 1]
437 uint64 test_cap = bytes[bytes_size - i - 1];
438 uint64 bytes_sum_with_test_cap = i * test_cap + bytes_partial_sum;
439
440 // If that fits, raise test_cap to give an even distribution to the
441 // last i entries.
442 if (bytes_sum_with_test_cap <= bytes_sum_limit) {
443 if (i == 0)
444 return std::numeric_limits<uint64>::max();
445 else
446 return test_cap + (bytes_sum_limit - bytes_sum_with_test_cap) / i;
447 } else {
448 bytes_partial_sum -= test_cap;
449 }
450 }
451
452 // If we got here, then we can't fully accommodate any of the clients,
453 // so distribute bytes_sum_limit evenly.
454 return bytes_sum_limit / bytes_size;
455 }
456
457 uint64 GpuMemoryManager::ComputeClientAllocationWhenVisible(
458 GpuMemoryManagerClientState* client_state,
459 uint64 bytes_above_required_cap,
460 uint64 bytes_above_minimum_cap,
461 uint64 bytes_overall_cap) {
462 ManagedMemoryStats* stats = &client_state->managed_memory_stats_;
463
464 if (!client_state->managed_memory_stats_received_)
465 return GetDefaultClientAllocation();
466
467 uint64 bytes_required = 9 * stats->bytes_required / 8;
468 bytes_required = std::min(bytes_required, GetMaximumClientAllocation());
469 bytes_required = std::max(bytes_required, GetMinimumClientAllocation());
470
471 uint64 bytes_nicetohave = 4 * stats->bytes_nice_to_have / 3;
472 bytes_nicetohave = std::min(bytes_nicetohave, GetMaximumClientAllocation());
473 bytes_nicetohave = std::max(bytes_nicetohave, GetMinimumClientAllocation());
474 bytes_nicetohave = std::max(bytes_nicetohave, bytes_required);
475
476 uint64 allocation = GetMinimumClientAllocation();
477 allocation += std::min(bytes_required - GetMinimumClientAllocation(),
478 bytes_above_minimum_cap);
479 allocation += std::min(bytes_nicetohave - bytes_required,
480 bytes_above_required_cap);
481 allocation = std::min(allocation,
482 bytes_overall_cap);
483 return allocation;
484 }
485
486 void GpuMemoryManager::ComputeVisibleSurfacesAllocations() {
487 uint64 bytes_available_total = GetAvailableGpuMemory();
488 uint64 bytes_above_required_cap = std::numeric_limits<uint64>::max();
489 uint64 bytes_above_minimum_cap = std::numeric_limits<uint64>::max();
490 uint64 bytes_overall_cap_visible = GetMaximumClientAllocation();
491
492 // Compute memory usage at three levels
493 // - painting everything that is nicetohave for visible clients
494 // - painting only what that is visible
495 // - giving every client the minimum allocation
496 uint64 bytes_nicetohave_visible = 0;
497 uint64 bytes_required_visible = 0;
498 uint64 bytes_minimum_visible = 0;
499 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
500 it != clients_visible_mru_.end();
501 ++it) {
502 GpuMemoryManagerClientState* client_state = *it;
503 client_state->bytes_allocation_ideal_nicetohave_ =
504 ComputeClientAllocationWhenVisible(
505 client_state,
506 bytes_above_required_cap,
507 bytes_above_minimum_cap,
508 bytes_overall_cap_visible);
509 client_state->bytes_allocation_ideal_required_ =
510 ComputeClientAllocationWhenVisible(
511 client_state,
512 0,
513 bytes_above_minimum_cap,
514 bytes_overall_cap_visible);
515 client_state->bytes_allocation_ideal_minimum_ =
516 ComputeClientAllocationWhenVisible(
517 client_state,
518 0,
519 0,
520 bytes_overall_cap_visible);
521
522 bytes_nicetohave_visible +=
523 client_state->bytes_allocation_ideal_nicetohave_;
524 bytes_required_visible +=
525 client_state->bytes_allocation_ideal_required_;
526 bytes_minimum_visible +=
527 client_state->bytes_allocation_ideal_minimum_;
528 }
529
530 // Determine which of those three points we can satisfy, and limit
531 // bytes_above_required_cap and bytes_above_minimum_cap to not go
532 // over the limit.
533 if (bytes_minimum_visible > bytes_available_total) {
534 bytes_above_required_cap = 0;
535 bytes_above_minimum_cap = 0;
536 } else if (bytes_required_visible > bytes_available_total) {
537 std::vector<uint64> bytes_to_fit;
538 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
539 it != clients_visible_mru_.end();
540 ++it) {
541 GpuMemoryManagerClientState* client_state = *it;
542 bytes_to_fit.push_back(client_state->bytes_allocation_ideal_required_ -
543 client_state->bytes_allocation_ideal_minimum_);
544 }
545 bytes_above_required_cap = 0;
546 bytes_above_minimum_cap = ComputeCap(
547 bytes_to_fit, bytes_available_total - bytes_minimum_visible);
548 } else if (bytes_nicetohave_visible > bytes_available_total) {
549 std::vector<uint64> bytes_to_fit;
550 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
551 it != clients_visible_mru_.end();
552 ++it) {
553 GpuMemoryManagerClientState* client_state = *it;
554 bytes_to_fit.push_back(client_state->bytes_allocation_ideal_nicetohave_ -
555 client_state->bytes_allocation_ideal_required_);
556 }
557 bytes_above_required_cap = ComputeCap(
558 bytes_to_fit, bytes_available_total - bytes_required_visible);
559 bytes_above_minimum_cap = std::numeric_limits<uint64>::max();
560 }
561
562 // Given those computed limits, set the actual memory allocations for the
563 // visible clients, tracking the largest allocation and the total allocation
564 // for future use.
565 uint64 bytes_allocated_visible = 0;
566 uint64 bytes_allocated_max_client_allocation = 0;
567 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
568 it != clients_visible_mru_.end();
569 ++it) {
570 GpuMemoryManagerClientState* client_state = *it;
571 client_state->bytes_allocation_when_visible_ =
572 ComputeClientAllocationWhenVisible(
573 client_state,
574 bytes_above_required_cap,
575 bytes_above_minimum_cap,
576 bytes_overall_cap_visible);
577 bytes_allocated_visible += client_state->bytes_allocation_when_visible_;
578 bytes_allocated_max_client_allocation = std::max(
579 bytes_allocated_max_client_allocation,
580 client_state->bytes_allocation_when_visible_);
581 }
582
583 // Set the limit for nonvisible clients for when they become visible.
584 // Use the same formula, with a lowered overall cap in case any of the
585 // currently-nonvisible clients are much more resource-intensive than any
586 // of the existing clients.
587 uint64 bytes_overall_cap_nonvisible = bytes_allocated_max_client_allocation;
588 if (bytes_available_total > bytes_allocated_visible) {
589 bytes_overall_cap_nonvisible +=
590 bytes_available_total - bytes_allocated_visible;
591 }
592 bytes_overall_cap_nonvisible = std::min(bytes_overall_cap_nonvisible,
593 GetMaximumClientAllocation());
594 for (ClientStateList::const_iterator it = clients_nonvisible_mru_.begin();
595 it != clients_nonvisible_mru_.end();
596 ++it) {
597 GpuMemoryManagerClientState* client_state = *it;
598 client_state->bytes_allocation_when_visible_ =
599 ComputeClientAllocationWhenVisible(
600 client_state,
601 bytes_above_required_cap,
602 bytes_above_minimum_cap,
603 bytes_overall_cap_nonvisible);
604 }
605 }
606
607 void GpuMemoryManager::DistributeRemainingMemoryToVisibleSurfaces() {
608 uint64 bytes_available_total = GetAvailableGpuMemory();
609 uint64 bytes_allocated_total = 0;
610
611 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
612 it != clients_visible_mru_.end();
613 ++it) {
614 GpuMemoryManagerClientState* client_state = *it;
615 bytes_allocated_total += client_state->bytes_allocation_when_visible_;
616 }
617
618 if (bytes_allocated_total >= bytes_available_total)
619 return;
620
621 std::vector<uint64> bytes_extra_requests;
622 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
623 it != clients_visible_mru_.end();
624 ++it) {
625 GpuMemoryManagerClientState* client_state = *it;
626 CHECK(GetMaximumClientAllocation() >=
627 client_state->bytes_allocation_when_visible_);
628 uint64 bytes_extra = GetMaximumClientAllocation() -
629 client_state->bytes_allocation_when_visible_;
630 bytes_extra_requests.push_back(bytes_extra);
631 }
632 uint64 bytes_extra_cap = ComputeCap(
633 bytes_extra_requests, bytes_available_total - bytes_allocated_total);
634 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
635 it != clients_visible_mru_.end();
636 ++it) {
637 GpuMemoryManagerClientState* client_state = *it;
638 uint64 bytes_extra = GetMaximumClientAllocation() -
639 client_state->bytes_allocation_when_visible_;
640 client_state->bytes_allocation_when_visible_ += std::min(
641 bytes_extra, bytes_extra_cap);
642 }
643 }
644
288 void GpuMemoryManager::AssignSurfacesAllocations() { 645 void GpuMemoryManager::AssignSurfacesAllocations() {
646 // Compute allocation when for all clients.
647 ComputeVisibleSurfacesAllocations();
648
649 // Distribute the remaining memory to visible clients.
650 DistributeRemainingMemoryToVisibleSurfaces();
651
289 // Send that allocation to the clients. 652 // Send that allocation to the clients.
290 ClientStateList clients = clients_visible_mru_; 653 ClientStateList clients = clients_visible_mru_;
291 clients.insert(clients.end(), 654 clients.insert(clients.end(),
292 clients_nonvisible_mru_.begin(), 655 clients_nonvisible_mru_.begin(),
293 clients_nonvisible_mru_.end()); 656 clients_nonvisible_mru_.end());
294 for (ClientStateList::const_iterator it = clients.begin(); 657 for (ClientStateList::const_iterator it = clients.begin();
295 it != clients.end(); 658 it != clients.end();
296 ++it) { 659 ++it) {
297 GpuMemoryManagerClientState* client_state = *it; 660 GpuMemoryManagerClientState* client_state = *it;
298 661
662 // Re-assign memory limits to this client when its "nice to have" bucket
663 // grows or shrinks by 1/4.
664 client_state->bytes_nicetohave_limit_high_ =
665 5 * client_state->managed_memory_stats_.bytes_nice_to_have / 4;
666 client_state->bytes_nicetohave_limit_low_ =
667 3 * client_state->managed_memory_stats_.bytes_nice_to_have / 4;
668
299 // Populate and send the allocation to the client 669 // Populate and send the allocation to the client
300 MemoryAllocation allocation; 670 MemoryAllocation allocation;
301 allocation.bytes_limit_when_visible = client_hard_limit_bytes_; 671
302 #if defined(OS_ANDROID) 672 allocation.bytes_limit_when_visible =
303 // On Android, because there is only one visible tab at any time, allow 673 client_state->bytes_allocation_when_visible_;
304 // that renderer to cache as much as it can. 674 allocation.priority_cutoff_when_visible = priority_cutoff_;
305 allocation.priority_cutoff_when_visible =
306 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING;
307 #else
308 // On desktop platforms, instruct the renderers to cache only a smaller
309 // set, to play nice with other renderers and other applications. If this
310 // if not done, then the system can become unstable.
311 // http://crbug.com/145600 (Linux)
312 // http://crbug.com/141377 (Mac)
313 allocation.priority_cutoff_when_visible =
314 MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE;
315 #endif
316 675
317 client_state->client_->SetMemoryAllocation(allocation); 676 client_state->client_->SetMemoryAllocation(allocation);
318 client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_); 677 client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_);
319 } 678 }
320 } 679 }
321 680
322 void GpuMemoryManager::AssignNonSurfacesAllocations() { 681 void GpuMemoryManager::AssignNonSurfacesAllocations() {
323 for (ClientStateList::const_iterator it = clients_nonsurface_.begin(); 682 for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
324 it != clients_nonsurface_.end(); 683 it != clients_nonsurface_.end();
325 ++it) { 684 ++it) {
326 GpuMemoryManagerClientState* client_state = *it; 685 GpuMemoryManagerClientState* client_state = *it;
327 MemoryAllocation allocation; 686 MemoryAllocation allocation;
328 687
329 if (!client_state->hibernated_) { 688 if (!client_state->hibernated_) {
330 allocation.bytes_limit_when_visible = 1; 689 allocation.bytes_limit_when_visible =
690 GetMinimumClientAllocation();
331 allocation.priority_cutoff_when_visible = 691 allocation.priority_cutoff_when_visible =
332 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING; 692 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING;
333 } 693 }
334 694
335 client_state->client_->SetMemoryAllocation(allocation); 695 client_state->client_->SetMemoryAllocation(allocation);
336 } 696 }
337 } 697 }
338 698
339 void GpuMemoryManager::SetClientsHibernatedState() const { 699 void GpuMemoryManager::SetClientsHibernatedState() const {
340 // Re-set all tracking groups as being hibernated. 700 // Re-set all tracking groups as being hibernated.
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
377 client_state->hibernated_ = client_state->tracking_group_->hibernated_; 737 client_state->hibernated_ = client_state->tracking_group_->hibernated_;
378 } 738 }
379 } 739 }
380 740
381 void GpuMemoryManager::SendUmaStatsToBrowser() { 741 void GpuMemoryManager::SendUmaStatsToBrowser() {
382 if (!channel_manager_) 742 if (!channel_manager_)
383 return; 743 return;
384 GPUMemoryUmaStats params; 744 GPUMemoryUmaStats params;
385 params.bytes_allocated_current = GetCurrentUsage(); 745 params.bytes_allocated_current = GetCurrentUsage();
386 params.bytes_allocated_max = bytes_allocated_historical_max_; 746 params.bytes_allocated_max = bytes_allocated_historical_max_;
387 params.bytes_limit = client_hard_limit_bytes_; 747 params.bytes_limit = bytes_available_gpu_memory_;
388 params.client_count = clients_visible_mru_.size() + 748 params.client_count = clients_visible_mru_.size() +
389 clients_nonvisible_mru_.size() + 749 clients_nonvisible_mru_.size() +
390 clients_nonsurface_.size(); 750 clients_nonsurface_.size();
391 params.context_group_count = tracking_groups_.size(); 751 params.context_group_count = tracking_groups_.size();
392 channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params)); 752 channel_manager_->Send(new GpuHostMsg_GpuMemoryUmaStats(params));
393 } 753 }
394 754
395 GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList( 755 GpuMemoryManager::ClientStateList* GpuMemoryManager::GetClientList(
396 GpuMemoryManagerClientState* client_state) { 756 GpuMemoryManagerClientState* client_state) {
397 if (client_state->has_surface_) { 757 if (client_state->has_surface_) {
(...skipping 16 matching lines...) Expand all
414 774
415 void GpuMemoryManager::RemoveClientFromList( 775 void GpuMemoryManager::RemoveClientFromList(
416 GpuMemoryManagerClientState* client_state) { 776 GpuMemoryManagerClientState* client_state) {
417 DCHECK(client_state->list_iterator_valid_); 777 DCHECK(client_state->list_iterator_valid_);
418 ClientStateList* client_list = GetClientList(client_state); 778 ClientStateList* client_list = GetClientList(client_state);
419 client_list->erase(client_state->list_iterator_); 779 client_list->erase(client_state->list_iterator_);
420 client_state->list_iterator_valid_ = false; 780 client_state->list_iterator_valid_ = false;
421 } 781 }
422 782
423 } // namespace content 783 } // namespace content
OLDNEW
« no previous file with comments | « trunk/src/content/common/gpu/gpu_memory_manager.h ('k') | trunk/src/content/common/gpu/gpu_memory_manager_client.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698