Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/common/gpu/gpu_memory_manager.h" | |
| 6 | |
| 7 #if defined(ENABLE_GPU) | |
| 8 | |
| 9 #include <set> | |
| 10 #include <algorithm> | |
| 11 | |
| 12 #include "base/bind.h" | |
| 13 #include "base/message_loop.h" | |
| 14 #include "content/common/gpu/gpu_command_buffer_stub.h" | |
| 15 #include "content/common/gpu/gpu_memory_allocation.h" | |
| 16 | |
| 17 namespace { | |
| 18 | |
| 19 // These are predefined values (in bytes) for | |
| 20 // GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only | |
| 21 // used to check if it is 0 or non-0. In the future, these values will not | |
| 22 // come from constants, but rather will be distributed dynamically. | |
| 23 const uint32 kResourceSizeNonHibernatedTab = 1; | |
| 24 const uint32 kResourceSizeHibernatedTab = 0; | |
| 25 | |
| 26 // Set up three allocation values for the three possible stub states | |
| 27 const GpuMemoryAllocation all_buffers_allocation( | |
| 28 kResourceSizeNonHibernatedTab, true, true); | |
| 29 const GpuMemoryAllocation front_buffers_allocation( | |
| 30 kResourceSizeNonHibernatedTab, true, false); | |
| 31 const GpuMemoryAllocation no_buffers_allocation( | |
| 32 kResourceSizeHibernatedTab, false, false); | |
| 33 | |
| 34 } | |
| 35 | |
| 36 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, | |
| 37 size_t max_surfaces_with_frontbuffer_soft_limit) | |
|
jonathan.backer
2012/02/01 19:43:17
I'm not sure that I understand the advantage of th
mmocny
2012/02/01 20:44:19
Specifically: for testing, so that you can control
| |
| 38 : client_(client), | |
| 39 manage_scheduled_(false), | |
| 40 max_surfaces_with_frontbuffer_soft_limit_( | |
| 41 max_surfaces_with_frontbuffer_soft_limit), | |
| 42 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { | |
| 43 } | |
| 44 | |
| 45 GpuMemoryManager::~GpuMemoryManager() { | |
| 46 } | |
| 47 | |
| 48 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( | |
| 49 GpuCommandBufferStubBase* lhs, | |
| 50 GpuCommandBufferStubBase* rhs) { | |
| 51 GpuCommandBufferStubBase::SurfaceState* lhs_ss = lhs->surface_state(); | |
| 52 GpuCommandBufferStubBase::SurfaceState* rhs_ss = rhs->surface_state(); | |
| 53 DCHECK(lhs_ss && rhs_ss); | |
| 54 if (lhs_ss->visible) | |
| 55 return !rhs_ss->visible || | |
| 56 (lhs_ss->last_used_time > rhs_ss->last_used_time); | |
| 57 else | |
| 58 return !rhs_ss->visible && | |
| 59 (lhs_ss->last_used_time > rhs_ss->last_used_time); | |
| 60 }; | |
| 61 | |
| 62 void GpuMemoryManager::ScheduleManage() { | |
| 63 if (manage_scheduled_) | |
| 64 return; | |
| 65 MessageLoop::current()->PostTask( | |
| 66 FROM_HERE, | |
| 67 base::Bind(&GpuMemoryManager::Manage, weak_factory_.GetWeakPtr())); | |
| 68 manage_scheduled_ = true; | |
| 69 } | |
| 70 | |
| 71 // The current Manage algorithm simply classifies contexts (stubs) into | |
| 72 // "foreground", "background", or "hibernated" categories. | |
| 73 // For each of these three categories, there are predefined memory allocation | |
| 74 // limits and front/backbuffer states. | |
| 75 // | |
| 76 // Stubs may or may not have a surfaces, and the rules are different for each. | |
| 77 // | |
| 78 // The rules for categorizing contexts with a surface are: | |
| 79 // 1. Foreground: All visible surfaces. | |
| 80 // * Must have both front and back buffer. | |
| 81 // | |
| 82 // 2. Background: Non visible surfaces, which have not surpassed the | |
|
jonathan.backer
2012/02/01 19:43:17
s/Non/No?
mmocny
2012/02/01 20:44:19
I don't think so..
s/Non/Not or s/Non /In if anyth
jonathan.backer
2012/02/01 21:39:14
I think is see. You mean non-visible.
On 2012/02/
| |
| 83 // max_surfaces_with_frontbuffer_soft_limit_ limit. | |
| 84 // * Will have only a frontbuffer. | |
| 85 // | |
| 86 // 3. Hibernated: Non visible surfaces, which have surpassed the | |
| 87 // max_surfaces_with_frontbuffer_soft_limit_ limit. | |
| 88 // * Will not have either buffer. | |
| 89 // | |
| 90 // The rule for categorizing contexts without a surface is: | |
| 91 // * Stubs without a surface instead have an affected_surface_ids list. | |
| 92 // Its state must be the same as the most visible surface it affects. | |
| 93 void GpuMemoryManager::Manage() { | |
| 94 manage_scheduled_ = false; | |
| 95 | |
| 96 // Create stub lists by separating out the two types received from client | |
| 97 std::vector<GpuCommandBufferStubBase*> stubs_with_surface; | |
| 98 std::vector<GpuCommandBufferStubBase*> stubs_without_surface; | |
| 99 { | |
| 100 std::vector<GpuCommandBufferStubBase*> stubs; | |
| 101 client_->AppendAllCommandBufferStubs(stubs); | |
| 102 | |
| 103 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); | |
| 104 it != stubs.end(); ++it) { | |
| 105 GpuCommandBufferStubBase* stub = *it; | |
| 106 if (stub->surface_state()) | |
| 107 stubs_with_surface.push_back(stub); | |
| 108 else | |
| 109 stubs_without_surface.push_back(stub); | |
| 110 } | |
| 111 } | |
| 112 | |
| 113 // Sort stubs with surface into {visibility,last_used_time} order using | |
| 114 // custom comparator | |
| 115 std::sort(stubs_with_surface.begin(), stubs_with_surface.end(), | |
| 116 StubWithSurfaceComparator()); | |
| 117 | |
| 118 // Separate stubs with surfaces into three sets and send memory allocation | |
| 119 std::set<int32> all_buffers, front_buffers, no_buffers; | |
| 120 | |
| 121 for (size_t i = 0; i < stubs_with_surface.size(); ++i) { | |
| 122 GpuCommandBufferStubBase* stub = stubs_with_surface[i]; | |
| 123 if (stub->surface_state()->visible) { | |
| 124 all_buffers.insert(stub->surface_state()->surface_id); | |
| 125 stub->SendMemoryAllocationToProxy(all_buffers_allocation); | |
| 126 } else if (i < max_surfaces_with_frontbuffer_soft_limit_) { | |
| 127 front_buffers.insert(stub->surface_state()->surface_id); | |
| 128 stub->SendMemoryAllocationToProxy(front_buffers_allocation); | |
| 129 } else { | |
| 130 no_buffers.insert(stub->surface_state()->surface_id); | |
| 131 stub->SendMemoryAllocationToProxy(no_buffers_allocation); | |
| 132 } | |
| 133 } | |
| 134 | |
| 135 // Now, go through the stubs without surfaces and send memory allocations | |
| 136 // based on buckets we just divided. Because there may be multiple affected | |
| 137 // surfaces, use the state of the most "important" affected surface. | |
| 138 for (std::vector<GpuCommandBufferStubBase*>::const_iterator it = | |
| 139 stubs_without_surface.begin(); it != stubs_without_surface.end(); ++it) { | |
| 140 GpuCommandBufferStubBase* stub = *it; | |
| 141 if (stub->affected_surface_ids().empty()) | |
| 142 continue; | |
| 143 if (std::find_first_of(all_buffers.begin(), | |
| 144 all_buffers.end(), | |
| 145 stub->affected_surface_ids().begin(), | |
| 146 stub->affected_surface_ids().end()) != | |
| 147 all_buffers.end()) { | |
| 148 stub->SendMemoryAllocationToProxy(all_buffers_allocation); | |
| 149 } else if (std::find_first_of(front_buffers.begin(), | |
| 150 front_buffers.end(), | |
| 151 stub->affected_surface_ids().begin(), | |
| 152 stub->affected_surface_ids().end()) != | |
| 153 front_buffers.end()) { | |
| 154 stub->SendMemoryAllocationToProxy(front_buffers_allocation); | |
| 155 } else if (std::find_first_of(no_buffers.begin(), | |
| 156 no_buffers.end(), | |
| 157 stub->affected_surface_ids().begin(), | |
| 158 stub->affected_surface_ids().end()) != | |
| 159 no_buffers.end()) { | |
| 160 stub->SendMemoryAllocationToProxy(no_buffers_allocation); | |
| 161 } else { | |
| 162 DLOG(ERROR) << "GpuCommandBufferStub::affected_surface_ids are not " | |
| 163 "valid."; | |
| 164 } | |
| 165 } | |
| 166 } | |
| 167 | |
| 168 void GpuMemoryManager::SetMaxSurfacesWithFrontbufferSoftLimit(size_t limit) { | |
| 169 max_surfaces_with_frontbuffer_soft_limit_ = limit; | |
| 170 } | |
| 171 | |
| 172 #endif | |
| OLD | NEW |