Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(331)

Side by Side Diff: content/common/gpu/gpu_memory_manager.cc

Issue 10083056: GpuMemoryManager suggests values for renderer Contents Texture Managers' preferred memory limit. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fixing issue with every proxy registering a callback, even when it is null. Created 8 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_memory_manager.h" 5 #include "content/common/gpu/gpu_memory_manager.h"
6 6
7 #if defined(ENABLE_GPU) 7 #if defined(ENABLE_GPU)
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/message_loop.h" 12 #include "base/message_loop.h"
13 #include "content/common/gpu/gpu_command_buffer_stub.h" 13 #include "content/common/gpu/gpu_command_buffer_stub.h"
14 #include "content/common/gpu/gpu_memory_allocation.h" 14 #include "content/common/gpu/gpu_memory_allocation.h"
15 15
16 namespace { 16 namespace {
17 17
18 // These are predefined values (in bytes) for 18 // These are predefined values (in bytes) for
19 // GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only 19 // GpuMemoryAllocation::gpuResourceSizeInBytes.
20 // used to check if it is 0 or non-0. In the future, these values will not 20 // Maximum Allocation for all tabs is a soft limit that can be exceeded
21 // come from constants, but rather will be distributed dynamically. 21 // during the time it takes for renderers to respect new allocations, including
22 // when switching tabs or opening a new window.
23 // To alleviate some pressure, we decrease our desired limit by "one tabs'
24 // worth" of memory.
22 enum { 25 enum {
23 kResourceSizeNonHibernatedTab = 1, 26 kMinimumAllocationForTab = 64 * 1024 * 1024,
24 kResourceSizeHibernatedTab = 0 27 kMaximumAllocationForTabs = 512 * 1024 * 1024 - kMinimumAllocationForTab,
25 }; 28 };
26 29
27 bool IsInSameContextShareGroupAsAnyOf( 30 bool IsInSameContextShareGroupAsAnyOf(
28 const GpuCommandBufferStubBase* stub, 31 const GpuCommandBufferStubBase* stub,
29 const std::vector<GpuCommandBufferStubBase*>& stubs) { 32 const std::vector<GpuCommandBufferStubBase*>& stubs) {
30 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 33 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
31 stubs.begin(); it != stubs.end(); ++it) { 34 stubs.begin(); it != stubs.end(); ++it) {
32 if (stub->IsInSameContextShareGroup(**it)) 35 if (stub->IsInSameContextShareGroup(**it))
33 return true; 36 return true;
34 } 37 }
35 return false; 38 return false;
36 } 39 }
37 40
41 void AssignMemoryAllocations(std::vector<GpuCommandBufferStubBase*>& stubs,
42 GpuMemoryAllocation allocation) {
43 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
44 it != stubs.end(); ++it) {
45 (*it)->SetMemoryAllocation(allocation);
46 }
47 }
48
38 } 49 }
39 50
40 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, 51 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client,
41 size_t max_surfaces_with_frontbuffer_soft_limit) 52 size_t max_surfaces_with_frontbuffer_soft_limit)
42 : client_(client), 53 : client_(client),
43 manage_scheduled_(false), 54 manage_scheduled_(false),
44 max_surfaces_with_frontbuffer_soft_limit_( 55 max_surfaces_with_frontbuffer_soft_limit_(
45 max_surfaces_with_frontbuffer_soft_limit), 56 max_surfaces_with_frontbuffer_soft_limit),
46 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { 57 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
47 } 58 }
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 // 1. These contexts do not track {visibility,last_used_time}, so cannot 104 // 1. These contexts do not track {visibility,last_used_time}, so cannot
94 // sort them directly. 105 // sort them directly.
95 // 2. These contexts may be used by, and thus affect, other contexts, and so 106 // 2. These contexts may be used by, and thus affect, other contexts, and so
96 // cannot be less visible than any affected context. 107 // cannot be less visible than any affected context.
97 // 3. Contexts belong to share groups within which resources can be shared. 108 // 3. Contexts belong to share groups within which resources can be shared.
98 // 109 //
99 // As such, the rule for categorizing contexts without a surface is: 110 // As such, the rule for categorizing contexts without a surface is:
100 // 1. Find the most visible context-with-a-surface within each 111 // 1. Find the most visible context-with-a-surface within each
101 // context-without-a-surface's share group, and inherit its visibilty. 112 // context-without-a-surface's share group, and inherit its visibilty.
102 void GpuMemoryManager::Manage() { 113 void GpuMemoryManager::Manage() {
103 // Set up three allocation values for the three possible stub states
104 const GpuMemoryAllocation all_buffers_allocation(
105 kResourceSizeNonHibernatedTab, true, true);
106 const GpuMemoryAllocation front_buffers_allocation(
107 kResourceSizeNonHibernatedTab, false, true);
108 const GpuMemoryAllocation no_buffers_allocation(
109 kResourceSizeHibernatedTab, false, false);
110
111 manage_scheduled_ = false; 114 manage_scheduled_ = false;
112 115
113 // Create stub lists by separating out the two types received from client 116 // Create stub lists by separating out the two types received from client
114 std::vector<GpuCommandBufferStubBase*> stubs_with_surface; 117 std::vector<GpuCommandBufferStubBase*> stubs_with_surface;
115 std::vector<GpuCommandBufferStubBase*> stubs_without_surface; 118 std::vector<GpuCommandBufferStubBase*> stubs_without_surface;
116 { 119 {
117 std::vector<GpuCommandBufferStubBase*> stubs; 120 std::vector<GpuCommandBufferStubBase*> stubs;
118 client_->AppendAllCommandBufferStubs(stubs); 121 client_->AppendAllCommandBufferStubs(stubs);
119 122
120 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); 123 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin();
121 it != stubs.end(); ++it) { 124 it != stubs.end(); ++it) {
122 GpuCommandBufferStubBase* stub = *it; 125 GpuCommandBufferStubBase* stub = *it;
126 if (!stub->handles_memory_allocations())
127 continue;
123 if (stub->has_surface_state()) 128 if (stub->has_surface_state())
124 stubs_with_surface.push_back(stub); 129 stubs_with_surface.push_back(stub);
125 else 130 else
126 stubs_without_surface.push_back(stub); 131 stubs_without_surface.push_back(stub);
127 } 132 }
128 } 133 }
129 134
130 // Sort stubs with surface into {visibility,last_used_time} order using 135 // Sort stubs with surface into {visibility,last_used_time} order using
131 // custom comparator 136 // custom comparator
132 std::sort(stubs_with_surface.begin(), 137 std::sort(stubs_with_surface.begin(),
133 stubs_with_surface.end(), 138 stubs_with_surface.end(),
134 StubWithSurfaceComparator()); 139 StubWithSurfaceComparator());
135 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) == 140 DCHECK(std::unique(stubs_with_surface.begin(), stubs_with_surface.end()) ==
136 stubs_with_surface.end()); 141 stubs_with_surface.end());
137 142
138 // Separate stubs into memory allocation sets. 143 // Separate stubs into memory allocation sets.
139 std::vector<GpuCommandBufferStubBase*> all_buffers, front_buffers, no_buffers; 144 std::vector<GpuCommandBufferStubBase*> stubs_with_surface_foreground,
145 stubs_with_surface_background,
146 stubs_with_surface_hibernated,
147 stubs_without_surface_foreground,
148 stubs_without_surface_background,
149 stubs_without_surface_hibernated;
140 150
141 for (size_t i = 0; i < stubs_with_surface.size(); ++i) { 151 for (size_t i = 0; i < stubs_with_surface.size(); ++i) {
142 GpuCommandBufferStubBase* stub = stubs_with_surface[i]; 152 GpuCommandBufferStubBase* stub = stubs_with_surface[i];
143 DCHECK(stub->has_surface_state()); 153 DCHECK(stub->has_surface_state());
144 if (stub->surface_state().visible) { 154 if (stub->surface_state().visible)
145 all_buffers.push_back(stub); 155 stubs_with_surface_foreground.push_back(stub);
146 stub->SetMemoryAllocation(all_buffers_allocation); 156 else if (i < max_surfaces_with_frontbuffer_soft_limit_)
147 } else if (i < max_surfaces_with_frontbuffer_soft_limit_) { 157 stubs_with_surface_background.push_back(stub);
148 front_buffers.push_back(stub); 158 else
149 stub->SetMemoryAllocation(front_buffers_allocation); 159 stubs_with_surface_hibernated.push_back(stub);
150 } else {
151 no_buffers.push_back(stub);
152 stub->SetMemoryAllocation(no_buffers_allocation);
153 }
154 } 160 }
155
156 // Now, go through the stubs without surfaces and deduce visibility using the
157 // visibility of stubs which are in the same context share group.
158 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = 161 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it =
159 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { 162 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) {
160 GpuCommandBufferStubBase* stub = *it; 163 GpuCommandBufferStubBase* stub = *it;
161 DCHECK(!stub->has_surface_state()); 164 DCHECK(!stub->has_surface_state());
162 if (IsInSameContextShareGroupAsAnyOf(stub, all_buffers)) { 165
163 stub->SetMemoryAllocation(all_buffers_allocation); 166 // Stubs without surfaces have deduced allocation state using the state
164 } else if (IsInSameContextShareGroupAsAnyOf(stub, front_buffers)) { 167 // of surface stubs which are in the same context share group.
165 stub->SetMemoryAllocation(front_buffers_allocation); 168 if (IsInSameContextShareGroupAsAnyOf(stub, stubs_with_surface_foreground))
166 } else { 169 stubs_without_surface_foreground.push_back(stub);
167 stub->SetMemoryAllocation(no_buffers_allocation); 170 else if (IsInSameContextShareGroupAsAnyOf(
168 } 171 stub, stubs_with_surface_background))
172 stubs_without_surface_background.push_back(stub);
173 else
174 stubs_without_surface_hibernated.push_back(stub);
169 } 175 }
176
177 // Calculate memory allocation size in bytes given to each stub, by sharing
178 // global limit equally among those that need it.
179 size_t num_stubs_need_mem = stubs_with_surface_foreground.size() +
180 stubs_without_surface_foreground.size() +
181 stubs_without_surface_background.size();
182 size_t base_allocation_size = kMinimumAllocationForTab * num_stubs_need_mem;
183 size_t bonus_allocation = 0;
184 if (base_allocation_size < kMaximumAllocationForTabs &&
185 !stubs_with_surface_foreground.empty())
186 bonus_allocation = (kMaximumAllocationForTabs - base_allocation_size) /
187 stubs_with_surface_foreground.size();
188
189 // Now give out allocations to everyone.
190 AssignMemoryAllocations(stubs_with_surface_foreground,
191 GpuMemoryAllocation(kMinimumAllocationForTab + bonus_allocation,
192 GpuMemoryAllocation::kHasFrontbuffer |
193 GpuMemoryAllocation::kHasBackbuffer));
194
195 AssignMemoryAllocations(stubs_with_surface_background,
196 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer));
197
198 AssignMemoryAllocations(stubs_with_surface_hibernated,
199 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
200
201 AssignMemoryAllocations(stubs_without_surface_foreground,
202 GpuMemoryAllocation(kMinimumAllocationForTab,
203 GpuMemoryAllocation::kHasNoBuffers));
204
205 AssignMemoryAllocations(stubs_without_surface_background,
206 GpuMemoryAllocation(kMinimumAllocationForTab,
207 GpuMemoryAllocation::kHasNoBuffers));
208
209 AssignMemoryAllocations(stubs_without_surface_hibernated,
210 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers));
170 } 211 }
171 212
172 #endif 213 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698