Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(291)

Side by Side Diff: gpu/command_buffer/client/fenced_allocator.cc

Issue 23130004: Enforce a memory limit on MappedMemoryManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fix another namespace error Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file contains the implementation of the FencedAllocator class. 5 // This file contains the implementation of the FencedAllocator class.
6 6
7 #include "gpu/command_buffer/client/fenced_allocator.h" 7 #include "gpu/command_buffer/client/fenced_allocator.h"
8
8 #include <algorithm> 9 #include <algorithm>
10
9 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
10 12
11 namespace gpu { 13 namespace gpu {
12 14
13 namespace { 15 namespace {
14 16
15 // Allocation alignment, must be a power of two. 17 // Allocation alignment, must be a power of two.
16 const unsigned int kAllocAlignment = 16; 18 const unsigned int kAllocAlignment = 16;
17 19
18 // Round down to the largest multiple of kAllocAlignment no greater than |size|. 20 // Round down to the largest multiple of kAllocAlignment no greater than |size|.
19 unsigned int RoundDown(unsigned int size) { 21 unsigned int RoundDown(unsigned int size) {
20 return size & ~(kAllocAlignment - 1); 22 return size & ~(kAllocAlignment - 1);
21 } 23 }
22 24
23 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|. 25 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
24 unsigned int RoundUp(unsigned int size) { 26 unsigned int RoundUp(unsigned int size) {
25 return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); 27 return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
26 } 28 }
27 29
28 } // namespace 30 } // namespace
29 31
30 #ifndef _MSC_VER 32 #ifndef _MSC_VER
31 const FencedAllocator::Offset FencedAllocator::kInvalidOffset; 33 const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
32 #endif 34 #endif
33 35
34 FencedAllocator::FencedAllocator(unsigned int size, 36 FencedAllocator::FencedAllocator(unsigned int size,
35 CommandBufferHelper *helper) 37 CommandBufferHelper *helper)
36 : helper_(helper) { 38 : helper_(helper),
39 bytes_in_use_(0) {
37 Block block = { FREE, 0, RoundDown(size), kUnusedToken }; 40 Block block = { FREE, 0, RoundDown(size), kUnusedToken };
38 blocks_.push_back(block); 41 blocks_.push_back(block);
39 } 42 }
40 43
41 FencedAllocator::~FencedAllocator() { 44 FencedAllocator::~FencedAllocator() {
42 // Free blocks pending tokens. 45 // Free blocks pending tokens.
43 for (unsigned int i = 0; i < blocks_.size(); ++i) { 46 for (unsigned int i = 0; i < blocks_.size(); ++i) {
44 if (blocks_[i].state == FREE_PENDING_TOKEN) { 47 if (blocks_[i].state == FREE_PENDING_TOKEN) {
45 i = WaitForTokenAndFreeBlock(i); 48 i = WaitForTokenAndFreeBlock(i);
46 } 49 }
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
83 return AllocInBlock(i, size); 86 return AllocInBlock(i, size);
84 } 87 }
85 return kInvalidOffset; 88 return kInvalidOffset;
86 } 89 }
87 90
88 // Looks for the corresponding block, mark it FREE, and collapse it if 91 // Looks for the corresponding block, mark it FREE, and collapse it if
89 // necessary. 92 // necessary.
90 void FencedAllocator::Free(FencedAllocator::Offset offset) { 93 void FencedAllocator::Free(FencedAllocator::Offset offset) {
91 BlockIndex index = GetBlockByOffset(offset); 94 BlockIndex index = GetBlockByOffset(offset);
92 GPU_DCHECK_NE(blocks_[index].state, FREE); 95 GPU_DCHECK_NE(blocks_[index].state, FREE);
93 blocks_[index].state = FREE; 96 Block &block = blocks_[index];
97
98 if (block.state == IN_USE)
99 bytes_in_use_ -= block.size;
100
101 block.state = FREE;
94 CollapseFreeBlock(index); 102 CollapseFreeBlock(index);
95 } 103 }
96 104
97 // Looks for the corresponding block, mark it FREE_PENDING_TOKEN. 105 // Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
98 void FencedAllocator::FreePendingToken( 106 void FencedAllocator::FreePendingToken(
99 FencedAllocator::Offset offset, int32 token) { 107 FencedAllocator::Offset offset, int32 token) {
100 BlockIndex index = GetBlockByOffset(offset); 108 BlockIndex index = GetBlockByOffset(offset);
101 Block &block = blocks_[index]; 109 Block &block = blocks_[index];
110 if (block.state == IN_USE)
111 bytes_in_use_ -= block.size;
102 block.state = FREE_PENDING_TOKEN; 112 block.state = FREE_PENDING_TOKEN;
103 block.token = token; 113 block.token = token;
104 } 114 }
105 115
106 // Gets the max of the size of the blocks marked as free. 116 // Gets the max of the size of the blocks marked as free.
107 unsigned int FencedAllocator::GetLargestFreeSize() { 117 unsigned int FencedAllocator::GetLargestFreeSize() {
108 FreeUnused(); 118 FreeUnused();
109 unsigned int max_size = 0; 119 unsigned int max_size = 0;
110 for (unsigned int i = 0; i < blocks_.size(); ++i) { 120 for (unsigned int i = 0; i < blocks_.size(); ++i) {
111 Block &block = blocks_[i]; 121 Block &block = blocks_[i];
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
146 if (next.offset <= current.offset) 156 if (next.offset <= current.offset)
147 return false; 157 return false;
148 if (next.offset != current.offset + current.size) 158 if (next.offset != current.offset + current.size)
149 return false; 159 return false;
150 if (current.state == FREE && next.state == FREE) 160 if (current.state == FREE && next.state == FREE)
151 return false; 161 return false;
152 } 162 }
153 return true; 163 return true;
154 } 164 }
155 165
166 // Returns false if all blocks are actually FREE, in which
167 // case they would be coalesced into one block, true otherwise.
156 bool FencedAllocator::InUse() { 168 bool FencedAllocator::InUse() {
157 return blocks_.size() != 1 || blocks_[0].state != FREE; 169 return blocks_.size() != 1 || blocks_[0].state != FREE;
158 } 170 }
159 171
160 // Collapse the block to the next one, then to the previous one. Provided the 172 // Collapse the block to the next one, then to the previous one. Provided the
161 // structure is consistent, those are the only blocks eligible for collapse. 173 // structure is consistent, those are the only blocks eligible for collapse.
162 FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock( 174 FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
163 BlockIndex index) { 175 BlockIndex index) {
164 if (index + 1 < blocks_.size()) { 176 if (index + 1 < blocks_.size()) {
165 Block &next = blocks_[index + 1]; 177 Block &next = blocks_[index + 1];
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
204 } 216 }
205 217
206 // If the block is exactly the requested size, simply mark it IN_USE, otherwise 218 // If the block is exactly the requested size, simply mark it IN_USE, otherwise
207 // split it and mark the first one (of the requested size) IN_USE. 219 // split it and mark the first one (of the requested size) IN_USE.
208 FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index, 220 FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
209 unsigned int size) { 221 unsigned int size) {
210 Block &block = blocks_[index]; 222 Block &block = blocks_[index];
211 GPU_DCHECK_GE(block.size, size); 223 GPU_DCHECK_GE(block.size, size);
212 GPU_DCHECK_EQ(block.state, FREE); 224 GPU_DCHECK_EQ(block.state, FREE);
213 Offset offset = block.offset; 225 Offset offset = block.offset;
226 bytes_in_use_ += size;
214 if (block.size == size) { 227 if (block.size == size) {
215 block.state = IN_USE; 228 block.state = IN_USE;
216 return offset; 229 return offset;
217 } 230 }
218 Block newblock = { FREE, offset + size, block.size - size, kUnusedToken}; 231 Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
219 block.state = IN_USE; 232 block.state = IN_USE;
220 block.size = size; 233 block.size = size;
221 // this is the last thing being done because it may invalidate block; 234 // this is the last thing being done because it may invalidate block;
222 blocks_.insert(blocks_.begin() + index + 1, newblock); 235 blocks_.insert(blocks_.begin() + index + 1, newblock);
223 return offset; 236 return offset;
224 } 237 }
225 238
226 // The blocks are in offset order, so we can do a binary search. 239 // The blocks are in offset order, so we can do a binary search.
227 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { 240 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
228 Block templ = { IN_USE, offset, 0, kUnusedToken }; 241 Block templ = { IN_USE, offset, 0, kUnusedToken };
229 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), 242 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
230 templ, OffsetCmp()); 243 templ, OffsetCmp());
231 GPU_DCHECK(it != blocks_.end() && it->offset == offset); 244 GPU_DCHECK(it != blocks_.end() && it->offset == offset);
232 return it-blocks_.begin(); 245 return it-blocks_.begin();
233 } 246 }
234 247
235 } // namespace gpu 248 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/command_buffer/client/fenced_allocator.h ('k') | gpu/command_buffer/client/gl_in_process_context.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698