OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/gpu_memory_manager.h" | 5 #include "content/common/gpu/gpu_memory_manager.h" |
6 | 6 |
7 #if defined(ENABLE_GPU) | 7 #if defined(ENABLE_GPU) |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 | 10 |
11 #include "base/bind.h" | 11 #include "base/bind.h" |
12 #include "base/command_line.h" | |
12 #include "base/debug/trace_event.h" | 13 #include "base/debug/trace_event.h" |
13 #include "base/message_loop.h" | 14 #include "base/message_loop.h" |
15 #include "base/string_number_conversions.h" | |
14 #include "content/common/gpu/gpu_command_buffer_stub.h" | 16 #include "content/common/gpu/gpu_command_buffer_stub.h" |
15 #include "content/common/gpu/gpu_memory_allocation.h" | 17 #include "content/common/gpu/gpu_memory_allocation.h" |
18 #include "gpu/command_buffer/service/gpu_switches.h" | |
16 | 19 |
17 namespace { | 20 namespace { |
18 | 21 |
19 const int kDelayedScheduleManageTimeoutMs = 67; | 22 const int kDelayedScheduleManageTimeoutMs = 67; |
20 | 23 |
21 bool IsInSameContextShareGroupAsAnyOf( | 24 bool IsInSameContextShareGroupAsAnyOf( |
22 const GpuCommandBufferStubBase* stub, | 25 const GpuCommandBufferStubBase* stub, |
23 const std::vector<GpuCommandBufferStubBase*>& stubs) { | 26 const std::vector<GpuCommandBufferStubBase*>& stubs) { |
24 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = | 27 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = |
25 stubs.begin(); it != stubs.end(); ++it) { | 28 stubs.begin(); it != stubs.end(); ++it) { |
26 if (stub->IsInSameContextShareGroup(**it)) | 29 if (stub->IsInSameContextShareGroup(**it)) |
27 return true; | 30 return true; |
28 } | 31 } |
29 return false; | 32 return false; |
30 } | 33 } |
31 | 34 |
32 #if defined(OS_ANDROID) | |
33 size_t CalculateBonusMemoryAllocationBasedOnSize(gfx::Size size) { | |
34 const int viewportMultiplier = 16; | |
35 const unsigned int componentsPerPixel = 4; // GraphicsContext3D::RGBA | |
36 const unsigned int bytesPerComponent = 1; // sizeof(GC3Dubyte) | |
37 | |
38 if (size.IsEmpty()) | |
39 return 0; | |
40 | |
41 size_t limit = viewportMultiplier * size.width() * size.height() * | |
42 componentsPerPixel * bytesPerComponent; | |
43 if (limit < GpuMemoryManager::kMinimumAllocationForTab) | |
44 limit = GpuMemoryManager::kMinimumAllocationForTab; | |
45 else if (limit > GpuMemoryManager::kMaximumAllocationForTabs) | |
46 limit = GpuMemoryManager::kMaximumAllocationForTabs; | |
47 return limit - GpuMemoryManager::kMinimumAllocationForTab; | |
48 } | |
49 #endif | |
50 | |
51 void AssignMemoryAllocations( | 35 void AssignMemoryAllocations( |
52 GpuMemoryManager::StubMemoryStatMap* stub_memory_stats, | 36 GpuMemoryManager::StubMemoryStatMap* stub_memory_stats, |
53 const std::vector<GpuCommandBufferStubBase*>& stubs, | 37 const std::vector<GpuCommandBufferStubBase*>& stubs, |
54 GpuMemoryAllocation allocation, | 38 GpuMemoryAllocation allocation, |
55 bool visible) { | 39 bool visible) { |
56 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = | 40 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = |
57 stubs.begin(); | 41 stubs.begin(); |
58 it != stubs.end(); | 42 it != stubs.end(); |
59 ++it) { | 43 ++it) { |
60 (*it)->SetMemoryAllocation(allocation); | 44 (*it)->SetMemoryAllocation(allocation); |
61 (*stub_memory_stats)[*it].allocation = allocation; | 45 (*stub_memory_stats)[*it].allocation = allocation; |
62 (*stub_memory_stats)[*it].visible = visible; | 46 (*stub_memory_stats)[*it].visible = visible; |
63 } | 47 } |
64 } | 48 } |
65 | 49 |
66 } | 50 } |
67 | 51 |
52 #if defined(OS_ANDROID) | |
53 size_t GpuMemoryManager::CalculateBonusMemoryAllocationBasedOnSize( | |
54 gfx::Size size) const { | |
55 const int viewportMultiplier = 16; | |
greggman
2012/07/31 00:30:41
style: constants are kCamelCase
ccameron
2012/07/31 00:52:49
Done. This was a copy-paste of the function (movi
| |
56 const unsigned int componentsPerPixel = 4; // GraphicsContext3D::RGBA | |
57 const unsigned int bytesPerComponent = 1; // sizeof(GC3Dubyte) | |
58 | |
59 if (size.IsEmpty()) | |
60 return 0; | |
61 | |
62 size_t limit = viewportMultiplier * size.width() * size.height() * | |
63 componentsPerPixel * bytesPerComponent; | |
64 if (limit < GetMinimumTabAllocation()) | |
65 limit = GetMinimumTabAllocation(); | |
66 else if (limit > GetAvailableGpuMemory()) | |
67 limit = GetAvailableGpuMemory(); | |
68 return limit - GetMinimumTabAllocation(); | |
69 } | |
70 #endif | |
71 | |
68 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, | 72 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, |
69 size_t max_surfaces_with_frontbuffer_soft_limit) | 73 size_t max_surfaces_with_frontbuffer_soft_limit) |
70 : client_(client), | 74 : client_(client), |
71 manage_immediate_scheduled_(false), | 75 manage_immediate_scheduled_(false), |
72 max_surfaces_with_frontbuffer_soft_limit_( | 76 max_surfaces_with_frontbuffer_soft_limit_( |
73 max_surfaces_with_frontbuffer_soft_limit), | 77 max_surfaces_with_frontbuffer_soft_limit), |
78 bytes_available_gpu_memory_(0), | |
74 bytes_allocated_current_(0), | 79 bytes_allocated_current_(0), |
75 bytes_allocated_historical_max_(0) { | 80 bytes_allocated_historical_max_(0) { |
81 CommandLine* command_line = CommandLine::ForCurrentProcess(); | |
82 if (command_line->HasSwitch(switches::kForceGpuMemAvailableMb)) { | |
83 base::StringToSizeT( | |
84 command_line->GetSwitchValueASCII(switches::kForceGpuMemAvailableMb), | |
85 &bytes_available_gpu_memory_); | |
86 bytes_available_gpu_memory_ *= 1024 * 1024; | |
87 } | |
88 else { | |
greggman
2012/07/31 00:30:41
style: open an close braces go on same line for 'e
ccameron
2012/07/31 00:52:49
Done. There was another instance of this later in
| |
89 #if defined(OS_ANDROID) | |
90 bytes_available_gpu_memory_ = 64 * 1024 * 1024; | |
91 #else | |
92 bytes_available_gpu_memory_ = 448 * 1024 * 1024; | |
93 #endif | |
94 } | |
76 } | 95 } |
77 | 96 |
78 GpuMemoryManager::~GpuMemoryManager() { | 97 GpuMemoryManager::~GpuMemoryManager() { |
79 } | 98 } |
80 | 99 |
81 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( | 100 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( |
82 GpuCommandBufferStubBase* lhs, | 101 GpuCommandBufferStubBase* lhs, |
83 GpuCommandBufferStubBase* rhs) { | 102 GpuCommandBufferStubBase* rhs) { |
84 DCHECK(lhs->has_surface_state() && rhs->has_surface_state()); | 103 DCHECK(lhs->has_surface_state() && rhs->has_surface_state()); |
85 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state(); | 104 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state(); |
(...skipping 19 matching lines...) Expand all Loading... | |
105 return; | 124 return; |
106 delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage, | 125 delayed_manage_callback_.Reset(base::Bind(&GpuMemoryManager::Manage, |
107 AsWeakPtr())); | 126 AsWeakPtr())); |
108 MessageLoop::current()->PostDelayedTask( | 127 MessageLoop::current()->PostDelayedTask( |
109 FROM_HERE, | 128 FROM_HERE, |
110 delayed_manage_callback_.callback(), | 129 delayed_manage_callback_.callback(), |
111 base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs)); | 130 base::TimeDelta::FromMilliseconds(kDelayedScheduleManageTimeoutMs)); |
112 } | 131 } |
113 } | 132 } |
114 | 133 |
115 size_t GpuMemoryManager::GetAvailableGpuMemory() const { | |
116 // TODO(mmocny): Implement this with real system figures. | |
117 return kMaximumAllocationForTabs; | |
118 } | |
119 | |
120 void GpuMemoryManager::TrackMemoryAllocatedChange(size_t old_size, | 134 void GpuMemoryManager::TrackMemoryAllocatedChange(size_t old_size, |
121 size_t new_size) | 135 size_t new_size) |
122 { | 136 { |
123 if (new_size < old_size) { | 137 if (new_size < old_size) { |
124 size_t delta = old_size - new_size; | 138 size_t delta = old_size - new_size; |
125 DCHECK(bytes_allocated_current_ >= delta); | 139 DCHECK(bytes_allocated_current_ >= delta); |
126 bytes_allocated_current_ -= delta; | 140 bytes_allocated_current_ -= delta; |
127 } | 141 } |
128 else { | 142 else { |
129 size_t delta = new_size - old_size; | 143 size_t delta = new_size - old_size; |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
234 stubs_without_surface_hibernated.push_back(stub); | 248 stubs_without_surface_hibernated.push_back(stub); |
235 } | 249 } |
236 | 250 |
237 size_t bonus_allocation = 0; | 251 size_t bonus_allocation = 0; |
238 #if !defined(OS_ANDROID) | 252 #if !defined(OS_ANDROID) |
239 // Calculate bonus allocation by splitting remainder of global limit equally | 253 // Calculate bonus allocation by splitting remainder of global limit equally |
240 // after giving out the minimum to those that need it. | 254 // after giving out the minimum to those that need it. |
241 size_t num_stubs_need_mem = stubs_with_surface_foreground.size() + | 255 size_t num_stubs_need_mem = stubs_with_surface_foreground.size() + |
242 stubs_without_surface_foreground.size() + | 256 stubs_without_surface_foreground.size() + |
243 stubs_without_surface_background.size(); | 257 stubs_without_surface_background.size(); |
244 size_t base_allocation_size = kMinimumAllocationForTab * num_stubs_need_mem; | 258 size_t base_allocation_size = GetMinimumTabAllocation() * num_stubs_need_mem; |
245 if (base_allocation_size < kMaximumAllocationForTabs && | 259 if (base_allocation_size < GetAvailableGpuMemory() && |
246 !stubs_with_surface_foreground.empty()) | 260 !stubs_with_surface_foreground.empty()) |
247 bonus_allocation = (kMaximumAllocationForTabs - base_allocation_size) / | 261 bonus_allocation = (GetAvailableGpuMemory() - base_allocation_size) / |
248 stubs_with_surface_foreground.size(); | 262 stubs_with_surface_foreground.size(); |
greggman
2012/07/31 00:30:41
style: indent? i'm not 100% sure this is wrong. It
ccameron
2012/07/31 00:52:49
Done. Lined up with the =, since that seems most
| |
249 #else | 263 #else |
250 // On android, calculate bonus allocation based on surface size. | 264 // On android, calculate bonus allocation based on surface size. |
251 if (!stubs_with_surface_foreground.empty()) | 265 if (!stubs_with_surface_foreground.empty()) |
252 bonus_allocation = CalculateBonusMemoryAllocationBasedOnSize( | 266 bonus_allocation = CalculateBonusMemoryAllocationBasedOnSize( |
253 stubs_with_surface_foreground[0]->GetSurfaceSize()); | 267 stubs_with_surface_foreground[0]->GetSurfaceSize()); |
254 #endif | 268 #endif |
255 | 269 |
256 stub_memory_stats_for_last_manage_.clear(); | 270 stub_memory_stats_for_last_manage_.clear(); |
257 | 271 |
258 // Now give out allocations to everyone. | 272 // Now give out allocations to everyone. |
259 AssignMemoryAllocations( | 273 AssignMemoryAllocations( |
260 &stub_memory_stats_for_last_manage_, | 274 &stub_memory_stats_for_last_manage_, |
261 stubs_with_surface_foreground, | 275 stubs_with_surface_foreground, |
262 GpuMemoryAllocation(kMinimumAllocationForTab + bonus_allocation, | 276 GpuMemoryAllocation(GetMinimumTabAllocation() + bonus_allocation, |
263 GpuMemoryAllocation::kHasFrontbuffer | | 277 GpuMemoryAllocation::kHasFrontbuffer | |
264 GpuMemoryAllocation::kHasBackbuffer), | 278 GpuMemoryAllocation::kHasBackbuffer), |
265 true); | 279 true); |
266 | 280 |
267 AssignMemoryAllocations( | 281 AssignMemoryAllocations( |
268 &stub_memory_stats_for_last_manage_, | 282 &stub_memory_stats_for_last_manage_, |
269 stubs_with_surface_background, | 283 stubs_with_surface_background, |
270 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer), | 284 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasFrontbuffer), |
271 false); | 285 false); |
272 | 286 |
273 AssignMemoryAllocations( | 287 AssignMemoryAllocations( |
274 &stub_memory_stats_for_last_manage_, | 288 &stub_memory_stats_for_last_manage_, |
275 stubs_with_surface_hibernated, | 289 stubs_with_surface_hibernated, |
276 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers), | 290 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers), |
277 false); | 291 false); |
278 | 292 |
279 AssignMemoryAllocations( | 293 AssignMemoryAllocations( |
280 &stub_memory_stats_for_last_manage_, | 294 &stub_memory_stats_for_last_manage_, |
281 stubs_without_surface_foreground, | 295 stubs_without_surface_foreground, |
282 GpuMemoryAllocation(kMinimumAllocationForTab, | 296 GpuMemoryAllocation(GetMinimumTabAllocation(), |
283 GpuMemoryAllocation::kHasNoBuffers), | 297 GpuMemoryAllocation::kHasNoBuffers), |
284 true); | 298 true); |
285 | 299 |
286 AssignMemoryAllocations( | 300 AssignMemoryAllocations( |
287 &stub_memory_stats_for_last_manage_, | 301 &stub_memory_stats_for_last_manage_, |
288 stubs_without_surface_background, | 302 stubs_without_surface_background, |
289 GpuMemoryAllocation(kMinimumAllocationForTab, | 303 GpuMemoryAllocation(GetMinimumTabAllocation(), |
290 GpuMemoryAllocation::kHasNoBuffers), | 304 GpuMemoryAllocation::kHasNoBuffers), |
291 false); | 305 false); |
292 | 306 |
293 AssignMemoryAllocations( | 307 AssignMemoryAllocations( |
294 &stub_memory_stats_for_last_manage_, | 308 &stub_memory_stats_for_last_manage_, |
295 stubs_without_surface_hibernated, | 309 stubs_without_surface_hibernated, |
296 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers), | 310 GpuMemoryAllocation(0, GpuMemoryAllocation::kHasNoBuffers), |
297 false); | 311 false); |
298 | |
299 size_t assigned_allocation_sum = 0; | |
300 for (StubMemoryStatMap::iterator it = | |
301 stub_memory_stats_for_last_manage_.begin(); | |
302 it != stub_memory_stats_for_last_manage_.end(); | |
303 ++it) { | |
304 assigned_allocation_sum += it->second.allocation.gpu_resource_size_in_bytes; | |
305 } | |
306 } | 312 } |
307 | 313 |
308 #endif | 314 #endif |
OLD | NEW |