Chromium Code Reviews| Index: content/common/gpu/gpu_memory_manager.cc |
| diff --git a/content/common/gpu/gpu_memory_manager.cc b/content/common/gpu/gpu_memory_manager.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..411b84a6c30faa2730c7b228acfb40490dee75e3 |
| --- /dev/null |
| +++ b/content/common/gpu/gpu_memory_manager.cc |
| @@ -0,0 +1,172 @@ |
| +// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "content/common/gpu/gpu_memory_manager.h" |
| + |
| +#if defined(ENABLE_GPU) |
| + |
| +#include <set> |
| +#include <algorithm> |
| + |
| +#include "base/bind.h" |
| +#include "base/message_loop.h" |
| +#include "content/common/gpu/gpu_command_buffer_stub.h" |
| +#include "content/common/gpu/gpu_memory_allocation.h" |
| + |
| +namespace { |
| + |
| +// These are predefined values (in bytes) for |
| +// GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only |
| +// used to check if it is 0 or non-0. In the future, these values will not |
| +// come from constants, but rather will be distributed dynamically. |
| +const uint32 kResourceSizeNonHibernatedTab = 1; |
| +const uint32 kResourceSizeHibernatedTab = 0; |
| + |
| +// Set up three allocation values for the three possible stub states |
| +const GpuMemoryAllocation all_buffers_allocation( |
| + kResourceSizeNonHibernatedTab, true, true); |
| +const GpuMemoryAllocation front_buffers_allocation( |
| + kResourceSizeNonHibernatedTab, true, false); |
| +const GpuMemoryAllocation no_buffers_allocation( |
| + kResourceSizeHibernatedTab, false, false); |
| + |
| +} |
| + |
| +GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, |
| + size_t max_surfaces_with_frontbuffer_soft_limit) |
|
jonathan.backer
2012/02/01 19:43:17
I'm not sure that I understand the advantage of th
mmocny
2012/02/01 20:44:19
Specifically: for testing, so that you can control
|
| + : client_(client), |
| + manage_scheduled_(false), |
| + max_surfaces_with_frontbuffer_soft_limit_( |
| + max_surfaces_with_frontbuffer_soft_limit), |
| + weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { |
| +} |
| + |
| +GpuMemoryManager::~GpuMemoryManager() { |
| +} |
| + |
| +bool GpuMemoryManager::StubWithSurfaceComparator::operator()( |
| + GpuCommandBufferStubBase* lhs, |
| + GpuCommandBufferStubBase* rhs) { |
| + GpuCommandBufferStubBase::SurfaceState* lhs_ss = lhs->surface_state(); |
| + GpuCommandBufferStubBase::SurfaceState* rhs_ss = rhs->surface_state(); |
| + DCHECK(lhs_ss && rhs_ss); |
| + if (lhs_ss->visible) |
| + return !rhs_ss->visible || |
| + (lhs_ss->last_used_time > rhs_ss->last_used_time); |
| + else |
| + return !rhs_ss->visible && |
| + (lhs_ss->last_used_time > rhs_ss->last_used_time); |
| +}; |
| + |
| +void GpuMemoryManager::ScheduleManage() { |
| + if (manage_scheduled_) |
| + return; |
| + MessageLoop::current()->PostTask( |
| + FROM_HERE, |
| + base::Bind(&GpuMemoryManager::Manage, weak_factory_.GetWeakPtr())); |
| + manage_scheduled_ = true; |
| +} |
| + |
| +// The current Manage algorithm simply classifies contexts (stubs) into |
| +// "foreground", "background", or "hibernated" categories. |
| +// For each of these three categories, there are predefined memory allocation |
| +// limits and front/backbuffer states. |
| +// |
| +// Stubs may or may not have a surfaces, and the rules are different for each. |
| +// |
| +// The rules for categorizing contexts with a surface are: |
| +// 1. Foreground: All visible surfaces. |
| +// * Must have both front and back buffer. |
| +// |
| +// 2. Background: Non visible surfaces, which have not surpassed the |
|
jonathan.backer
2012/02/01 19:43:17
s/Non/No?
mmocny
2012/02/01 20:44:19
I don't think so..
s/Non/Not or s/Non /In if anyth
jonathan.backer
2012/02/01 21:39:14
I think is see. You mean non-visible.
On 2012/02/
|
| +// max_surfaces_with_frontbuffer_soft_limit_ limit. |
| +// * Will have only a frontbuffer. |
| +// |
| +// 3. Hibernated: Non visible surfaces, which have surpassed the |
| +// max_surfaces_with_frontbuffer_soft_limit_ limit. |
| +// * Will not have either buffer. |
| +// |
| +// The rule for categorizing contexts without a surface is: |
| +// * Stubs without a surface instead have an affected_surface_ids list. |
| +// Its state must be the same as the most visible surface it affects. |
| +void GpuMemoryManager::Manage() { |
| + manage_scheduled_ = false; |
| + |
| + // Create stub lists by separating out the two types received from client |
| + std::vector<GpuCommandBufferStubBase*> stubs_with_surface; |
| + std::vector<GpuCommandBufferStubBase*> stubs_without_surface; |
| + { |
| + std::vector<GpuCommandBufferStubBase*> stubs; |
| + client_->AppendAllCommandBufferStubs(stubs); |
| + |
| + for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); |
| + it != stubs.end(); ++it) { |
| + GpuCommandBufferStubBase* stub = *it; |
| + if (stub->surface_state()) |
| + stubs_with_surface.push_back(stub); |
| + else |
| + stubs_without_surface.push_back(stub); |
| + } |
| + } |
| + |
| + // Sort stubs with surface into {visibility,last_used_time} order using |
| + // custom comparator |
| + std::sort(stubs_with_surface.begin(), stubs_with_surface.end(), |
| + StubWithSurfaceComparator()); |
| + |
| + // Separate stubs with surfaces into three sets and send memory allocation |
| + std::set<int32> all_buffers, front_buffers, no_buffers; |
| + |
| + for (size_t i = 0; i < stubs_with_surface.size(); ++i) { |
| + GpuCommandBufferStubBase* stub = stubs_with_surface[i]; |
| + if (stub->surface_state()->visible) { |
| + all_buffers.insert(stub->surface_state()->surface_id); |
| + stub->SendMemoryAllocationToProxy(all_buffers_allocation); |
| + } else if (i < max_surfaces_with_frontbuffer_soft_limit_) { |
| + front_buffers.insert(stub->surface_state()->surface_id); |
| + stub->SendMemoryAllocationToProxy(front_buffers_allocation); |
| + } else { |
| + no_buffers.insert(stub->surface_state()->surface_id); |
| + stub->SendMemoryAllocationToProxy(no_buffers_allocation); |
| + } |
| + } |
| + |
| + // Now, go through the stubs without surfaces and send memory allocations |
| + // based on buckets we just divided. Because there may be multiple affected |
| + // surfaces, use the state of the most "important" affected surface. |
| + for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = |
| + stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { |
| + GpuCommandBufferStubBase* stub = *it; |
| + if (stub->affected_surface_ids().empty()) |
| + continue; |
| + if (std::find_first_of(all_buffers.begin(), |
| + all_buffers.end(), |
| + stub->affected_surface_ids().begin(), |
| + stub->affected_surface_ids().end()) != |
| + all_buffers.end()) { |
| + stub->SendMemoryAllocationToProxy(all_buffers_allocation); |
| + } else if (std::find_first_of(front_buffers.begin(), |
| + front_buffers.end(), |
| + stub->affected_surface_ids().begin(), |
| + stub->affected_surface_ids().end()) != |
| + front_buffers.end()) { |
| + stub->SendMemoryAllocationToProxy(front_buffers_allocation); |
| + } else if (std::find_first_of(no_buffers.begin(), |
| + no_buffers.end(), |
| + stub->affected_surface_ids().begin(), |
| + stub->affected_surface_ids().end()) != |
| + no_buffers.end()) { |
| + stub->SendMemoryAllocationToProxy(no_buffers_allocation); |
| + } else { |
| + DLOG(ERROR) << "GpuCommandBufferStub::affected_surface_ids are not " |
| + "valid."; |
| + } |
| + } |
| +} |
| + |
| +void GpuMemoryManager::SetMaxSurfacesWithFrontbufferSoftLimit(size_t limit) { |
| + max_surfaces_with_frontbuffer_soft_limit_ = limit; |
| +} |
| + |
| +#endif |