OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "content/common/gpu/gpu_memory_manager.h" |
| 6 |
| 7 #if defined(ENABLE_GPU) |
| 8 |
| 9 #include <set> |
| 10 #include <algorithm> |
| 11 |
| 12 #include "base/bind.h" |
| 13 #include "base/message_loop.h" |
| 14 #include "content/common/gpu/gpu_command_buffer_stub.h" |
| 15 #include "content/common/gpu/gpu_memory_allocation.h" |
| 16 |
| 17 const size_t GpuMemoryManager::kDefaultMaxSurfacesWithFrontbufferSoftLimit = 8; |
| 18 |
| 19 namespace { |
| 20 |
| 21 // These are predefined values (in bytes) for |
| 22 // GpuMemoryAllocation::gpuResourceSizeInBytes. Currently, the value is only |
| 23 // used to check if it is 0 or non-0. In the future, these values will not |
| 24 // come from constants, but rather will be distributed dynamically. |
| 25 const uint32 kResourceSizeNonHibernatedTab = 1; |
| 26 const uint32 kResourceSizeHibernatedTab = 0; |
| 27 |
| 28 // Set up three allocation values for the three possible stub states |
| 29 const GpuMemoryAllocation all_buffers_allocation( |
| 30 kResourceSizeNonHibernatedTab, true, true); |
| 31 const GpuMemoryAllocation front_buffers_allocation( |
| 32 kResourceSizeNonHibernatedTab, true, false); |
| 33 const GpuMemoryAllocation no_buffers_allocation( |
| 34 kResourceSizeHibernatedTab, false, false); |
| 35 |
| 36 } |
| 37 |
| 38 GpuMemoryManager::GpuMemoryManager(GpuMemoryManagerClient* client, |
| 39 size_t max_surfaces_with_frontbuffer_soft_limit) |
| 40 : client_(client), |
| 41 manage_scheduled_(false), |
| 42 max_surfaces_with_frontbuffer_soft_limit_( |
| 43 max_surfaces_with_frontbuffer_soft_limit), |
| 44 weak_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { |
| 45 } |
| 46 |
| 47 GpuMemoryManager::~GpuMemoryManager() { |
| 48 } |
| 49 |
| 50 bool GpuMemoryManager::StubWithSurfaceComparator::operator()( |
| 51 GpuCommandBufferStubBase* lhs, |
| 52 GpuCommandBufferStubBase* rhs) { |
| 53 DCHECK(lhs->has_surface_state() && rhs->has_surface_state()); |
| 54 const GpuCommandBufferStubBase::SurfaceState& lhs_ss = lhs->surface_state(); |
| 55 const GpuCommandBufferStubBase::SurfaceState& rhs_ss = rhs->surface_state(); |
| 56 if (lhs_ss.visible) |
| 57 return !rhs_ss.visible || (lhs_ss.last_used_time > rhs_ss.last_used_time); |
| 58 else |
| 59 return !rhs_ss.visible && (lhs_ss.last_used_time > rhs_ss.last_used_time); |
| 60 }; |
| 61 |
| 62 void GpuMemoryManager::ScheduleManage() { |
| 63 if (manage_scheduled_) |
| 64 return; |
| 65 MessageLoop::current()->PostTask( |
| 66 FROM_HERE, |
| 67 base::Bind(&GpuMemoryManager::Manage, weak_factory_.GetWeakPtr())); |
| 68 manage_scheduled_ = true; |
| 69 } |
| 70 |
| 71 // The current Manage algorithm simply classifies contexts (stubs) into |
| 72 // "foreground", "background", or "hibernated" categories. |
| 73 // For each of these three categories, there are predefined memory allocation |
| 74 // limits and front/backbuffer states. |
| 75 // |
| 76 // Stubs may or may not have a surfaces, and the rules are different for each. |
| 77 // |
| 78 // The rules for categorizing contexts with a surface are: |
| 79 // 1. Foreground: All visible surfaces. |
| 80 // * Must have both front and back buffer. |
| 81 // |
| 82 // 2. Background: Non visible surfaces, which have not surpassed the |
| 83 // max_surfaces_with_frontbuffer_soft_limit_ limit. |
| 84 // * Will have only a frontbuffer. |
| 85 // |
| 86 // 3. Hibernated: Non visible surfaces, which have surpassed the |
| 87 // max_surfaces_with_frontbuffer_soft_limit_ limit. |
| 88 // * Will not have either buffer. |
| 89 void GpuMemoryManager::Manage() { |
| 90 manage_scheduled_ = false; |
| 91 |
| 92 // Create stub lists by separating out the two types received from client |
| 93 std::vector<GpuCommandBufferStubBase*> stubs_with_surface; |
| 94 { |
| 95 std::vector<GpuCommandBufferStubBase*> stubs; |
| 96 client_->AppendAllCommandBufferStubs(stubs); |
| 97 |
| 98 for (std::vector<GpuCommandBufferStubBase*>::iterator it = stubs.begin(); |
| 99 it != stubs.end(); ++it) { |
| 100 GpuCommandBufferStubBase* stub = *it; |
| 101 if (stub->has_surface_state()) |
| 102 stubs_with_surface.push_back(stub); |
| 103 } |
| 104 } |
| 105 |
| 106 // Sort stubs with surface into {visibility,last_used_time} order using |
| 107 // custom comparator |
| 108 std::sort(stubs_with_surface.begin(), stubs_with_surface.end(), |
| 109 StubWithSurfaceComparator()); |
| 110 |
| 111 // Separate stubs with surfaces into three sets and send memory allocation |
| 112 std::set<int32> all_buffers, front_buffers, no_buffers; |
| 113 |
| 114 for (size_t i = 0; i < stubs_with_surface.size(); ++i) { |
| 115 GpuCommandBufferStubBase* stub = stubs_with_surface[i]; |
| 116 DCHECK(stub->has_surface_state()); |
| 117 if (stub->surface_state().visible) { |
| 118 all_buffers.insert(stub->surface_state().surface_id); |
| 119 stub->SendMemoryAllocationToProxy(all_buffers_allocation); |
| 120 } else if (i < max_surfaces_with_frontbuffer_soft_limit_) { |
| 121 front_buffers.insert(stub->surface_state().surface_id); |
| 122 stub->SendMemoryAllocationToProxy(front_buffers_allocation); |
| 123 } else { |
| 124 no_buffers.insert(stub->surface_state().surface_id); |
| 125 stub->SendMemoryAllocationToProxy(no_buffers_allocation); |
| 126 } |
| 127 } |
| 128 } |
| 129 |
| 130 #endif |
OLD | NEW |