Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(561)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 1412923004: Revert of Move gpu memory calculations to Compositor. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: rebase Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/gpu_memory_manager.h ('k') | content/common/gpu/gpu_memory_manager_client.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/command_line.h" 10 #include "base/command_line.h"
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
50 { } 50 { }
51 51
52 GpuMemoryManager::~GpuMemoryManager() { 52 GpuMemoryManager::~GpuMemoryManager() {
53 DCHECK(tracking_groups_.empty()); 53 DCHECK(tracking_groups_.empty());
54 DCHECK(clients_visible_mru_.empty()); 54 DCHECK(clients_visible_mru_.empty());
55 DCHECK(clients_nonvisible_mru_.empty()); 55 DCHECK(clients_nonvisible_mru_.empty());
56 DCHECK(clients_nonsurface_.empty()); 56 DCHECK(clients_nonsurface_.empty());
57 DCHECK(!bytes_allocated_current_); 57 DCHECK(!bytes_allocated_current_);
58 } 58 }
59 59
60 void GpuMemoryManager::UpdateAvailableGpuMemory() {
61 // If the value was overridden on the command line, use the specified value.
62 static bool client_hard_limit_bytes_overridden =
63 base::CommandLine::ForCurrentProcess()->HasSwitch(
64 switches::kForceGpuMemAvailableMb);
65 if (client_hard_limit_bytes_overridden) {
66 base::StringToUint64(
67 base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
68 switches::kForceGpuMemAvailableMb),
69 &client_hard_limit_bytes_);
70 client_hard_limit_bytes_ *= 1024 * 1024;
71 return;
72 }
73
74 #if defined(OS_ANDROID)
75 // On non-Android, we use an operating system query when possible.
76 // We do not have a reliable concept of multiple GPUs existing in
77 // a system, so just be safe and go with the minimum encountered.
78 uint64 bytes_min = 0;
79
80 // Only use the clients that are visible, because otherwise the set of clients
81 // we are querying could become extremely large.
82 for (ClientStateList::const_iterator it = clients_visible_mru_.begin();
83 it != clients_visible_mru_.end();
84 ++it) {
85 const GpuMemoryManagerClientState* client_state = *it;
86 if (!client_state->has_surface_)
87 continue;
88 if (!client_state->visible_)
89 continue;
90
91 uint64 bytes = 0;
92 if (client_state->client_->GetTotalGpuMemory(&bytes)) {
93 if (!bytes_min || bytes < bytes_min)
94 bytes_min = bytes;
95 }
96 }
97
98 client_hard_limit_bytes_ = bytes_min;
99 // Clamp the observed value to a specific range on Android.
100 client_hard_limit_bytes_ = std::max(client_hard_limit_bytes_,
101 static_cast<uint64>(8 * 1024 * 1024));
102 client_hard_limit_bytes_ = std::min(client_hard_limit_bytes_,
103 static_cast<uint64>(256 * 1024 * 1024));
104 #else
105 // Ignore what the system said and give all clients the same maximum
106 // allocation on desktop platforms.
107 client_hard_limit_bytes_ = 512 * 1024 * 1024;
108 #endif
109 }
110
60 void GpuMemoryManager::ScheduleManage( 111 void GpuMemoryManager::ScheduleManage(
61 ScheduleManageTime schedule_manage_time) { 112 ScheduleManageTime schedule_manage_time) {
62 if (disable_schedule_manage_) 113 if (disable_schedule_manage_)
63 return; 114 return;
64 if (manage_immediate_scheduled_) 115 if (manage_immediate_scheduled_)
65 return; 116 return;
66 if (schedule_manage_time == kScheduleManageNow) { 117 if (schedule_manage_time == kScheduleManageNow) {
67 base::ThreadTaskRunnerHandle::Get()->PostTask( 118 base::ThreadTaskRunnerHandle::Get()->PostTask(
68 FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr())); 119 FROM_HERE, base::Bind(&GpuMemoryManager::Manage, AsWeakPtr()));
69 manage_immediate_scheduled_ = true; 120 manage_immediate_scheduled_ = true;
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
178 229
179 video_memory_usage_stats->bytes_allocated = GetCurrentUsage(); 230 video_memory_usage_stats->bytes_allocated = GetCurrentUsage();
180 video_memory_usage_stats->bytes_allocated_historical_max = 231 video_memory_usage_stats->bytes_allocated_historical_max =
181 bytes_allocated_historical_max_; 232 bytes_allocated_historical_max_;
182 } 233 }
183 234
184 void GpuMemoryManager::Manage() { 235 void GpuMemoryManager::Manage() {
185 manage_immediate_scheduled_ = false; 236 manage_immediate_scheduled_ = false;
186 delayed_manage_callback_.Cancel(); 237 delayed_manage_callback_.Cancel();
187 238
239 // Update the amount of GPU memory available on the system.
240 UpdateAvailableGpuMemory();
241
188 // Determine which clients are "hibernated" (which determines the 242 // Determine which clients are "hibernated" (which determines the
189 // distribution of frontbuffers and memory among clients that don't have 243 // distribution of frontbuffers and memory among clients that don't have
190 // surfaces). 244 // surfaces).
191 SetClientsHibernatedState(); 245 SetClientsHibernatedState();
192 246
247 // Assign memory allocations to clients that have surfaces.
248 AssignSurfacesAllocations();
249
250 // Assign memory allocations to clients that don't have surfaces.
251 AssignNonSurfacesAllocations();
252
193 SendUmaStatsToBrowser(); 253 SendUmaStatsToBrowser();
194 } 254 }
195 255
256 void GpuMemoryManager::AssignSurfacesAllocations() {
257 // Send that allocation to the clients.
258 ClientStateList clients = clients_visible_mru_;
259 clients.insert(clients.end(),
260 clients_nonvisible_mru_.begin(),
261 clients_nonvisible_mru_.end());
262 for (ClientStateList::const_iterator it = clients.begin();
263 it != clients.end();
264 ++it) {
265 GpuMemoryManagerClientState* client_state = *it;
266
267 // Populate and send the allocation to the client
268 MemoryAllocation allocation;
269 allocation.bytes_limit_when_visible = client_hard_limit_bytes_;
270 #if defined(OS_ANDROID)
271 // On Android, because there is only one visible tab at any time, allow
272 // that renderer to cache as much as it can.
273 allocation.priority_cutoff_when_visible =
274 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING;
275 #else
276 // On desktop platforms, instruct the renderers to cache only a smaller
277 // set, to play nice with other renderers and other applications. If this
278 // if not done, then the system can become unstable.
279 // http://crbug.com/145600 (Linux)
280 // http://crbug.com/141377 (Mac)
281 allocation.priority_cutoff_when_visible =
282 MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE;
283 #endif
284
285 client_state->client_->SetMemoryAllocation(allocation);
286 client_state->client_->SuggestHaveFrontBuffer(!client_state->hibernated_);
287 }
288 }
289
290 void GpuMemoryManager::AssignNonSurfacesAllocations() {
291 for (ClientStateList::const_iterator it = clients_nonsurface_.begin();
292 it != clients_nonsurface_.end();
293 ++it) {
294 GpuMemoryManagerClientState* client_state = *it;
295 MemoryAllocation allocation;
296
297 if (!client_state->hibernated_) {
298 allocation.bytes_limit_when_visible = client_hard_limit_bytes_;
299 allocation.priority_cutoff_when_visible =
300 MemoryAllocation::CUTOFF_ALLOW_EVERYTHING;
301 }
302
303 client_state->client_->SetMemoryAllocation(allocation);
304 }
305 }
306
196 void GpuMemoryManager::SetClientsHibernatedState() const { 307 void GpuMemoryManager::SetClientsHibernatedState() const {
197 // Re-set all tracking groups as being hibernated. 308 // Re-set all tracking groups as being hibernated.
198 for (TrackingGroupMap::const_iterator it = tracking_groups_.begin(); 309 for (TrackingGroupMap::const_iterator it = tracking_groups_.begin();
199 it != tracking_groups_.end(); 310 it != tracking_groups_.end();
200 ++it) { 311 ++it) {
201 GpuMemoryTrackingGroup* tracking_group = it->second; 312 GpuMemoryTrackingGroup* tracking_group = it->second;
202 tracking_group->hibernated_ = true; 313 tracking_group->hibernated_ = true;
203 } 314 }
204 // All clients with surfaces that are visible are non-hibernated. 315 // All clients with surfaces that are visible are non-hibernated.
205 uint64 non_hibernated_clients = 0; 316 uint64 non_hibernated_clients = 0;
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
271 382
272 void GpuMemoryManager::RemoveClientFromList( 383 void GpuMemoryManager::RemoveClientFromList(
273 GpuMemoryManagerClientState* client_state) { 384 GpuMemoryManagerClientState* client_state) {
274 DCHECK(client_state->list_iterator_valid_); 385 DCHECK(client_state->list_iterator_valid_);
275 ClientStateList* client_list = GetClientList(client_state); 386 ClientStateList* client_list = GetClientList(client_state);
276 client_list->erase(client_state->list_iterator_); 387 client_list->erase(client_state->list_iterator_);
277 client_state->list_iterator_valid_ = false; 388 client_state->list_iterator_valid_ = false;
278 } 389 }
279 390
280 } // namespace content 391 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_memory_manager.h ('k') | content/common/gpu/gpu_memory_manager_client.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698