Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(15)

Side by Side Diff: gpu/command_buffer/client/fenced_allocator.cc

Issue 23130004: Enforce a memory limit on MappedMemoryManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Set chunk size Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file contains the implementation of the FencedAllocator class. 5 // This file contains the implementation of the FencedAllocator class.
6 6
7 #include "gpu/command_buffer/client/fenced_allocator.h" 7 #include "gpu/command_buffer/client/fenced_allocator.h"
8
no sievers 2013/08/20 02:46:54 nit: whitespace change
kaanb 2013/08/20 22:15:46 Whitespace is advised per the style guide: http://
8 #include <algorithm> 9 #include <algorithm>
10
9 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
10 12
11 namespace gpu { 13 namespace gpu {
12 14
13 namespace { 15 namespace {
14 16
15 // Allocation alignment, must be a power of two. 17 // Allocation alignment, must be a power of two.
16 const unsigned int kAllocAlignment = 16; 18 const unsigned int kAllocAlignment = 16;
17 19
18 // Round down to the largest multiple of kAllocAlignment no greater than |size|. 20 // Round down to the largest multiple of kAllocAlignment no greater than |size|.
19 unsigned int RoundDown(unsigned int size) { 21 unsigned int RoundDown(unsigned int size) {
20 return size & ~(kAllocAlignment - 1); 22 return size & ~(kAllocAlignment - 1);
21 } 23 }
22 24
23 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|. 25 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
24 unsigned int RoundUp(unsigned int size) { 26 unsigned int RoundUp(unsigned int size) {
25 return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); 27 return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
26 } 28 }
27 29
28 } // namespace 30 } // namespace
29 31
30 #ifndef _MSC_VER 32 #ifndef _MSC_VER
31 const FencedAllocator::Offset FencedAllocator::kInvalidOffset; 33 const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
32 #endif 34 #endif
33 35
34 FencedAllocator::FencedAllocator(unsigned int size, 36 FencedAllocator::FencedAllocator(unsigned int size,
35 CommandBufferHelper *helper) 37 CommandBufferHelper *helper)
36 : helper_(helper) { 38 : helper_(helper),
39 bytes_in_use_(0) {
37 Block block = { FREE, 0, RoundDown(size), kUnusedToken }; 40 Block block = { FREE, 0, RoundDown(size), kUnusedToken };
38 blocks_.push_back(block); 41 blocks_.push_back(block);
39 } 42 }
40 43
41 FencedAllocator::~FencedAllocator() { 44 FencedAllocator::~FencedAllocator() {
42 // Free blocks pending tokens. 45 // Free blocks pending tokens.
43 for (unsigned int i = 0; i < blocks_.size(); ++i) { 46 for (unsigned int i = 0; i < blocks_.size(); ++i) {
44 if (blocks_[i].state == FREE_PENDING_TOKEN) { 47 if (blocks_[i].state == FREE_PENDING_TOKEN) {
45 i = WaitForTokenAndFreeBlock(i); 48 i = WaitForTokenAndFreeBlock(i);
46 } 49 }
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
83 return AllocInBlock(i, size); 86 return AllocInBlock(i, size);
84 } 87 }
85 return kInvalidOffset; 88 return kInvalidOffset;
86 } 89 }
87 90
88 // Looks for the corresponding block, mark it FREE, and collapse it if 91 // Looks for the corresponding block, mark it FREE, and collapse it if
89 // necessary. 92 // necessary.
90 void FencedAllocator::Free(FencedAllocator::Offset offset) { 93 void FencedAllocator::Free(FencedAllocator::Offset offset) {
91 BlockIndex index = GetBlockByOffset(offset); 94 BlockIndex index = GetBlockByOffset(offset);
92 GPU_DCHECK_NE(blocks_[index].state, FREE); 95 GPU_DCHECK_NE(blocks_[index].state, FREE);
96
97 if (blocks_[index].state == IN_USE)
piman 2013/08/20 02:56:16 nit: only do map lookup once (like in FreePendingT
kaanb 2013/08/20 22:15:46 Done.
98 bytes_in_use_ -= blocks_[index].size;
99
93 blocks_[index].state = FREE; 100 blocks_[index].state = FREE;
94 CollapseFreeBlock(index); 101 CollapseFreeBlock(index);
95 } 102 }
96 103
97 // Looks for the corresponding block, mark it FREE_PENDING_TOKEN. 104 // Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
98 void FencedAllocator::FreePendingToken( 105 void FencedAllocator::FreePendingToken(
99 FencedAllocator::Offset offset, int32 token) { 106 FencedAllocator::Offset offset, int32 token) {
100 BlockIndex index = GetBlockByOffset(offset); 107 BlockIndex index = GetBlockByOffset(offset);
101 Block &block = blocks_[index]; 108 Block &block = blocks_[index];
109 if (block.state == IN_USE)
110 bytes_in_use_ -= block.size;
102 block.state = FREE_PENDING_TOKEN; 111 block.state = FREE_PENDING_TOKEN;
103 block.token = token; 112 block.token = token;
104 } 113 }
105 114
106 // Gets the max of the size of the blocks marked as free. 115 // Gets the max of the size of the blocks marked as free.
107 unsigned int FencedAllocator::GetLargestFreeSize() { 116 unsigned int FencedAllocator::GetLargestFreeSize() {
108 FreeUnused(); 117 FreeUnused();
109 unsigned int max_size = 0; 118 unsigned int max_size = 0;
110 for (unsigned int i = 0; i < blocks_.size(); ++i) { 119 for (unsigned int i = 0; i < blocks_.size(); ++i) {
111 Block &block = blocks_[i]; 120 Block &block = blocks_[i];
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
147 return false; 156 return false;
148 if (next.offset != current.offset + current.size) 157 if (next.offset != current.offset + current.size)
149 return false; 158 return false;
150 if (current.state == FREE && next.state == FREE) 159 if (current.state == FREE && next.state == FREE)
151 return false; 160 return false;
152 } 161 }
153 return true; 162 return true;
154 } 163 }
155 164
156 bool FencedAllocator::InUse() { 165 bool FencedAllocator::InUse() {
157 return blocks_.size() != 1 || blocks_[0].state != FREE; 166 return blocks_.size() != 1 || blocks_[0].state != FREE;
no sievers 2013/08/20 02:46:54 Hmm, this function is misleading, right? If you ha
kaanb 2013/08/20 22:15:46 Done.
piman 2013/08/20 22:26:13 Not sure we should change this - I actually don't
no sievers 2013/08/20 22:39:37 My bad. Shouldn't have commented on this at all. I
kaanb 2013/08/20 22:40:53 Done.
158 } 167 }
159 168
160 // Collapse the block to the next one, then to the previous one. Provided the 169 // Collapse the block to the next one, then to the previous one. Provided the
161 // structure is consistent, those are the only blocks eligible for collapse. 170 // structure is consistent, those are the only blocks eligible for collapse.
162 FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock( 171 FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
163 BlockIndex index) { 172 BlockIndex index) {
164 if (index + 1 < blocks_.size()) { 173 if (index + 1 < blocks_.size()) {
165 Block &next = blocks_[index + 1]; 174 Block &next = blocks_[index + 1];
166 if (next.state == FREE) { 175 if (next.state == FREE) {
167 blocks_[index].size += next.size; 176 blocks_[index].size += next.size;
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
205 214
206 // If the block is exactly the requested size, simply mark it IN_USE, otherwise 215 // If the block is exactly the requested size, simply mark it IN_USE, otherwise
207 // split it and mark the first one (of the requested size) IN_USE. 216 // split it and mark the first one (of the requested size) IN_USE.
208 FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index, 217 FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
209 unsigned int size) { 218 unsigned int size) {
210 Block &block = blocks_[index]; 219 Block &block = blocks_[index];
211 GPU_DCHECK_GE(block.size, size); 220 GPU_DCHECK_GE(block.size, size);
212 GPU_DCHECK_EQ(block.state, FREE); 221 GPU_DCHECK_EQ(block.state, FREE);
213 Offset offset = block.offset; 222 Offset offset = block.offset;
214 if (block.size == size) { 223 if (block.size == size) {
224 bytes_in_use_ += size;
215 block.state = IN_USE; 225 block.state = IN_USE;
216 return offset; 226 return offset;
217 } 227 }
218 Block newblock = { FREE, offset + size, block.size - size, kUnusedToken}; 228 Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
229 bytes_in_use_ += size;
piman 2013/08/20 02:56:16 nit: factor this to before the if?
kaanb 2013/08/20 22:15:46 Done.
219 block.state = IN_USE; 230 block.state = IN_USE;
220 block.size = size; 231 block.size = size;
221 // this is the last thing being done because it may invalidate block; 232 // this is the last thing being done because it may invalidate block;
222 blocks_.insert(blocks_.begin() + index + 1, newblock); 233 blocks_.insert(blocks_.begin() + index + 1, newblock);
223 return offset; 234 return offset;
224 } 235 }
225 236
226 // The blocks are in offset order, so we can do a binary search. 237 // The blocks are in offset order, so we can do a binary search.
227 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { 238 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
228 Block templ = { IN_USE, offset, 0, kUnusedToken }; 239 Block templ = { IN_USE, offset, 0, kUnusedToken };
229 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), 240 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
230 templ, OffsetCmp()); 241 templ, OffsetCmp());
231 GPU_DCHECK(it != blocks_.end() && it->offset == offset); 242 GPU_DCHECK(it != blocks_.end() && it->offset == offset);
232 return it-blocks_.begin(); 243 return it-blocks_.begin();
233 } 244 }
234 245
235 } // namespace gpu 246 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698