Index: content/common/gpu/gpu_memory_manager.cc |
diff --git a/content/common/gpu/gpu_memory_manager.cc b/content/common/gpu/gpu_memory_manager.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..57d6f82d45124485a8e770f0a6f6aed771e4bc3e |
--- /dev/null |
+++ b/content/common/gpu/gpu_memory_manager.cc |
@@ -0,0 +1,124 @@ |
+// Copyright (c) 2012 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "content/common/gpu/gpu_memory_manager.h" |
+ |
+#if defined(ENABLE_GPU) |
+ |
+#include <set> |
+#include <algorithm> |
+ |
+#include "base/bind.h" |
+#include "base/message_loop.h" |
+ |
+//////////////////////////////////////////////////////////////////////////////// |
+// Constructors/Destructors |
+ |
+GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client) |
+ : client_(client) |
+ , manage_scheduled_(false) { |
jonathan.backer
2012/01/31 18:13:58
nit: sorry, but I think this is the WK way, not th
mmocny
2012/01/31 18:54:57
Done.
|
+} |
+ |
+GpuMemoryManager::~GpuMemoryManager() { |
+} |
+ |
+//////////////////////////////////////////////////////////////////////////////// |
+ |
+void GpuMemoryManager::ScheduleManage() { |
+ if (manage_scheduled_) |
+ return; |
+ manage_scheduled_ = true; |
+ MessageLoop::current()->PostTask( |
+ FROM_HERE, base::Bind(&GpuMemoryManager::Manage, base::Unretained(this))); |
nduca
2012/01/31 06:53:47
Hmmm isn't this going to crash if the task is post
|
+} |
+ |
+void GpuMemoryManager::Manage() { |
nduca
2012/01/31 06:53:47
I think you should take some of the comments from
|
+ manage_scheduled_ = false; |
+ |
+ // Ask client for command buffer stubs |
+ std::vector<GpuMemoryManageableCommandBufferStub*> stubs_with_surface; |
nduca
2012/01/31 06:53:47
this is where I was saying you should just get all
mmocny
2012/01/31 18:54:57
Done.
|
+ std::vector<GpuMemoryManageableCommandBufferStub*> stubs_without_surface; |
+ client_->AppendAllCommandBufferStubs(stubs_with_surface, |
+ stubs_without_surface); |
+ |
+ // Sort stubs with surface into {visibility,last_used_time} order using |
+ // custom comparator |
+ std::sort(stubs_with_surface.begin(), stubs_with_surface.end(), |
+ StubComparator()); |
+ |
+ // Create allocations |
nduca
2012/01/31 06:53:47
This comment isn't adding a lot of value. You're b
mmocny
2012/01/31 18:54:57
Done.
|
+ GpuMemoryAllocation all_buffers_allocation; |
+ all_buffers_allocation.gpuResourceSizeInBytes = |
+ GpuMemoryAllocation::kResourceSizeForegroundTab; |
+ all_buffers_allocation.hasFrontbuffer = true; |
+ all_buffers_allocation.hasBackbuffer = true; |
+ |
+ GpuMemoryAllocation front_buffers_allocation; |
+ front_buffers_allocation.gpuResourceSizeInBytes = |
+ GpuMemoryAllocation::kResourceSizeBackgroundTab; |
+ front_buffers_allocation.hasFrontbuffer = true; |
+ front_buffers_allocation.hasBackbuffer = false; |
+ |
+ GpuMemoryAllocation no_buffers_allocation; |
+ no_buffers_allocation.gpuResourceSizeInBytes = |
+ GpuMemoryAllocation::kResourceSizeHibernatedTab; |
+ no_buffers_allocation.hasFrontbuffer = false; |
+ no_buffers_allocation.hasBackbuffer = false; |
+ |
jonathan.backer
2012/01/31 18:13:58
AFAICT, these never change. Define them as constan
mmocny
2012/01/31 18:54:57
Done.
|
+ // Separate stubs with surfaces into three sets and send memory allocation |
nduca
2012/01/31 06:53:47
This should really be put up in the overall commen
|
+ // 1. all_buffers: Front, Back, and RootLayerTiles [all visible surfaces] |
+ // 2. front_buffers: Front only [based on #tab limit] |
+ // 3. no_buffers: None [the rest] |
+ static const size_t kMaxSurfacesWithFrontBufferSoftLimit = 8; |
+ std::set<int32> all_buffers, front_buffers, no_buffers; |
+ |
+ for (size_t i = 0; i < stubs_with_surface.size(); ++i) { |
+ GpuMemoryManageableCommandBufferStub* stub = stubs_with_surface[i]; |
+ if (stub->surface_state().visible) { |
+ all_buffers.insert(stub->surface_state().surface_id); |
+ stub->SendMemoryAllocation(all_buffers_allocation); |
jonathan.backer
2012/01/31 18:13:58
Is this necessary given the code below? Aren't we
mmocny
2012/01/31 18:54:57
The first loop is over stubs_with_surfaces, the se
|
+ } else if (i < kMaxSurfacesWithFrontBufferSoftLimit) { |
+ front_buffers.insert(stub->surface_state().surface_id); |
+ stub->SendMemoryAllocation(front_buffers_allocation); |
+ } else { |
+ no_buffers.insert(stub->surface_state().surface_id); |
+ stub->SendMemoryAllocation(no_buffers_allocation); |
+ } |
+ } |
+ |
+ // Now, go through the stubs without surfaces and send memory allocations |
+ // based on buckets we just divided. Because there may be multiple affected |
+ // surfaces, use the state of the most "important" affected surface. |
+ for (std::vector<GpuMemoryManageableCommandBufferStub*>::const_iterator it = |
+ stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { |
+ GpuMemoryManageableCommandBufferStub* stub = *it; |
+ if (std::find_first_of(all_buffers.begin(), |
+ all_buffers.end(), |
+ stub->affected_surface_ids().begin(), |
+ stub->affected_surface_ids().end()) != |
+ all_buffers.end()) { |
+ stub->SendMemoryAllocation(all_buffers_allocation); |
+ } else if (std::find_first_of(front_buffers.begin(), |
+ front_buffers.end(), |
+ stub->affected_surface_ids().begin(), |
+ stub->affected_surface_ids().end()) != |
+ front_buffers.end()) { |
+ stub->SendMemoryAllocation(front_buffers_allocation); |
+ } else if (std::find_first_of(no_buffers.begin(), |
+ no_buffers.end(), |
+ stub->affected_surface_ids().begin(), |
+ stub->affected_surface_ids().end()) != |
+ no_buffers.end()) { |
+ stub->SendMemoryAllocation(no_buffers_allocation); |
+ } else { |
+ // TODO(mmocny): Either (a) no affected surfaces, or |
nduca
2012/01/31 06:53:47
Fix this so you put an if(affected_surface_ids().s
mmocny
2012/01/31 18:54:57
Done.
|
+ // (b) your affected surfaces are incorrect |
+ // (a) is fine, (b) is not |
+ } |
+ } |
+} |
+ |
+//////////////////////////////////////////////////////////////////////////////// |
+ |
+#endif |