Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(83)

Side by Side Diff: gpu/command_buffer/client/fenced_allocator.cc

Issue 116863003: gpu: Reuse transfer buffers more aggresively (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file contains the implementation of the FencedAllocator class. 5 // This file contains the implementation of the FencedAllocator class.
6 6
7 #include "gpu/command_buffer/client/fenced_allocator.h" 7 #include "gpu/command_buffer/client/fenced_allocator.h"
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
(...skipping 16 matching lines...) Expand all
27 return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); 27 return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
28 } 28 }
29 29
30 } // namespace 30 } // namespace
31 31
32 #ifndef _MSC_VER 32 #ifndef _MSC_VER
33 const FencedAllocator::Offset FencedAllocator::kInvalidOffset; 33 const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
34 #endif 34 #endif
35 35
36 FencedAllocator::FencedAllocator(unsigned int size, 36 FencedAllocator::FencedAllocator(unsigned int size,
37 bool aggressive_reuse,
37 CommandBufferHelper *helper) 38 CommandBufferHelper *helper)
38 : helper_(helper), 39 : helper_(helper),
40 aggressive_reuse_(aggressive_reuse),
39 bytes_in_use_(0) { 41 bytes_in_use_(0) {
40 Block block = { FREE, 0, RoundDown(size), kUnusedToken }; 42 Block block = { FREE, 0, RoundDown(size), kUnusedToken };
41 blocks_.push_back(block); 43 blocks_.push_back(block);
42 } 44 }
43 45
44 FencedAllocator::~FencedAllocator() { 46 FencedAllocator::~FencedAllocator() {
45 // Free blocks pending tokens. 47 // Free blocks pending tokens.
46 for (unsigned int i = 0; i < blocks_.size(); ++i) { 48 for (unsigned int i = 0; i < blocks_.size(); ++i) {
47 if (blocks_[i].state == FREE_PENDING_TOKEN) { 49 if (blocks_[i].state == FREE_PENDING_TOKEN) {
48 i = WaitForTokenAndFreeBlock(i); 50 i = WaitForTokenAndFreeBlock(i);
(...skipping 12 matching lines...) Expand all
61 FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) { 63 FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
62 // size of 0 is not allowed because it would be inconsistent to only sometimes 64 // size of 0 is not allowed because it would be inconsistent to only sometimes
63 // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0). 65 // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
64 if (size == 0) { 66 if (size == 0) {
65 return kInvalidOffset; 67 return kInvalidOffset;
66 } 68 }
67 69
68 // Round up the allocation size to ensure alignment. 70 // Round up the allocation size to ensure alignment.
69 size = RoundUp(size); 71 size = RoundUp(size);
70 72
73 if (aggressive_reuse_) {
74 return AggressiveAlloc(size);
75 }
76
71 // Try first to allocate in a free block. 77 // Try first to allocate in a free block.
72 for (unsigned int i = 0; i < blocks_.size(); ++i) { 78 for (unsigned int i = 0; i < blocks_.size(); ++i) {
73 Block &block = blocks_[i]; 79 Block &block = blocks_[i];
74 if (block.state == FREE && block.size >= size) { 80 if (block.state == FREE && block.size >= size) {
75 return AllocInBlock(i, size); 81 return AllocInBlock(i, size);
76 } 82 }
77 } 83 }
78 84
79 // No free block is available. Look for blocks pending tokens, and wait for 85 // No free block is available. Look for blocks pending tokens, and wait for
80 // them to be re-usable. 86 // them to be re-usable.
81 for (unsigned int i = 0; i < blocks_.size(); ++i) { 87 for (unsigned int i = 0; i < blocks_.size(); ++i) {
82 if (blocks_[i].state != FREE_PENDING_TOKEN) 88 if (blocks_[i].state != FREE_PENDING_TOKEN)
83 continue; 89 continue;
84 i = WaitForTokenAndFreeBlock(i); 90 i = WaitForTokenAndFreeBlock(i);
85 if (blocks_[i].size >= size) 91 if (blocks_[i].size >= size)
86 return AllocInBlock(i, size); 92 return AllocInBlock(i, size);
87 } 93 }
88 return kInvalidOffset; 94 return kInvalidOffset;
89 } 95 }
90 96
97 FencedAllocator::Offset FencedAllocator::AggressiveAlloc(unsigned int size) {
98 // Find first series of blocks that together make a large enough area.
99 unsigned int range_start = 0, range_stop = 0;
100 unsigned int current_size = 0;
101 for (unsigned int i = 0; i < blocks_.size(); ++i) {
102 Block &block = blocks_[i];
103 if (block.state == IN_USE) {
104 current_size = 0;
105 } else {
106 DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
107
108 if (current_size == 0)
109 range_start = i;
110 range_stop = i;
111
112 current_size += block.size;
113 if (current_size >= size)
114 break;
115 }
116 }
117
118 if (current_size < size)
119 return kInvalidOffset;
120
121 // Collapse the blocks in the range |range_start| -> |range_stop| into
122 // one block. Override the previous state, as we don't care whether
123 // the block was FREE or FREE_PENDING_TOKEN here.
124 unsigned int new_size = blocks_[range_start].size;
125 const unsigned int second_in_range = range_start + 1;
126 for (unsigned int i = range_start; i < range_stop; ++i) {
127 new_size += blocks_[second_in_range].size;
128 blocks_.erase(blocks_.begin() + second_in_range);
129 }
130 blocks_[range_start].size = new_size;
131 blocks_[range_start].state = FREE;
132
133 return AllocInBlock(range_start, size);
134 }
135
91 // Looks for the corresponding block, mark it FREE, and collapse it if 136 // Looks for the corresponding block, mark it FREE, and collapse it if
92 // necessary. 137 // necessary.
93 void FencedAllocator::Free(FencedAllocator::Offset offset) { 138 void FencedAllocator::Free(FencedAllocator::Offset offset) {
94 BlockIndex index = GetBlockByOffset(offset); 139 BlockIndex index = GetBlockByOffset(offset);
95 DCHECK_NE(blocks_[index].state, FREE); 140 DCHECK_NE(blocks_[index].state, FREE);
96 Block &block = blocks_[index]; 141 Block &block = blocks_[index];
97 142
98 if (block.state == IN_USE) 143 if (block.state == IN_USE)
99 bytes_in_use_ -= block.size; 144 bytes_in_use_ -= block.size;
100 145
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
221 unsigned int size) { 266 unsigned int size) {
222 Block &block = blocks_[index]; 267 Block &block = blocks_[index];
223 DCHECK_GE(block.size, size); 268 DCHECK_GE(block.size, size);
224 DCHECK_EQ(block.state, FREE); 269 DCHECK_EQ(block.state, FREE);
225 Offset offset = block.offset; 270 Offset offset = block.offset;
226 bytes_in_use_ += size; 271 bytes_in_use_ += size;
227 if (block.size == size) { 272 if (block.size == size) {
228 block.state = IN_USE; 273 block.state = IN_USE;
229 return offset; 274 return offset;
230 } 275 }
231 Block newblock = { FREE, offset + size, block.size - size, kUnusedToken}; 276 Block newblock = {
277 block.state,
278 offset + size,
279 block.size - size,
280 kUnusedToken
281 };
232 block.state = IN_USE; 282 block.state = IN_USE;
233 block.size = size; 283 block.size = size;
234 // this is the last thing being done because it may invalidate block; 284 // this is the last thing being done because it may invalidate block;
235 blocks_.insert(blocks_.begin() + index + 1, newblock); 285 blocks_.insert(blocks_.begin() + index + 1, newblock);
236 return offset; 286 return offset;
237 } 287 }
238 288
239 // The blocks are in offset order, so we can do a binary search. 289 // The blocks are in offset order, so we can do a binary search.
240 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { 290 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
241 Block templ = { IN_USE, offset, 0, kUnusedToken }; 291 Block templ = { IN_USE, offset, 0, kUnusedToken };
242 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), 292 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
243 templ, OffsetCmp()); 293 templ, OffsetCmp());
244 DCHECK(it != blocks_.end() && it->offset == offset); 294 DCHECK(it != blocks_.end() && it->offset == offset);
245 return it-blocks_.begin(); 295 return it-blocks_.begin();
246 } 296 }
247 297
248 } // namespace gpu 298 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698