Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(111)

Side by Side Diff: gpu/command_buffer/client/fenced_allocator.cc

Issue 116863003: gpu: Reuse transfer buffers more aggresively (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: Async upload token part of existing Async command; use separate shared memory to sync async upload … Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file contains the implementation of the FencedAllocator class. 5 // This file contains the implementation of the FencedAllocator class.
6 6
7 #include "gpu/command_buffer/client/fenced_allocator.h" 7 #include "gpu/command_buffer/client/fenced_allocator.h"
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
(...skipping 16 matching lines...) Expand all
27 return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); 27 return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
28 } 28 }
29 29
30 } // namespace 30 } // namespace
31 31
32 #ifndef _MSC_VER 32 #ifndef _MSC_VER
33 const FencedAllocator::Offset FencedAllocator::kInvalidOffset; 33 const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
34 #endif 34 #endif
35 35
36 FencedAllocator::FencedAllocator(unsigned int size, 36 FencedAllocator::FencedAllocator(unsigned int size,
37 CommandBufferHelper *helper) 37 CommandBufferHelper *helper,
38 const base::Closure& poll_callback)
38 : helper_(helper), 39 : helper_(helper),
40 poll_callback_(poll_callback),
39 bytes_in_use_(0) { 41 bytes_in_use_(0) {
40 Block block = { FREE, 0, RoundDown(size), kUnusedToken }; 42 Block block = { FREE, 0, RoundDown(size), kUnusedToken };
41 blocks_.push_back(block); 43 blocks_.push_back(block);
42 } 44 }
43 45
44 FencedAllocator::~FencedAllocator() { 46 FencedAllocator::~FencedAllocator() {
45 // Free blocks pending tokens. 47 // Free blocks pending tokens.
46 for (unsigned int i = 0; i < blocks_.size(); ++i) { 48 for (unsigned int i = 0; i < blocks_.size(); ++i) {
47 if (blocks_[i].state == FREE_PENDING_TOKEN) { 49 if (blocks_[i].state == FREE_PENDING_TOKEN) {
48 i = WaitForTokenAndFreeBlock(i); 50 i = WaitForTokenAndFreeBlock(i);
49 } 51 }
50 } 52 }
51 // These checks are not valid if the service has crashed or lost the context. 53 // These checks are not valid if the service has crashed or lost the context.
52 // DCHECK_EQ(blocks_.size(), 1u); 54 // DCHECK_EQ(blocks_.size(), 1u);
53 // DCHECK_EQ(blocks_[0].state, FREE); 55 // DCHECK_EQ(blocks_[0].state, FREE);
piman 2014/02/07 22:58:20 I would like to reinstate those checks, for the as
jadahl 2014/02/08 09:18:25 Should maybe the ~GLES2Implementation() take care
jadahl 2014/02/14 15:32:21 I added lost-context handling (free's all non-free
54 } 56 }
55 57
56 // Looks for a non-allocated block that is big enough. Search in the FREE 58 // Looks for a non-allocated block that is big enough. Search in the FREE
57 // blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN 59 // blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
58 // blocks, waiting for them. The current implementation isn't smart about 60 // blocks, waiting for them. The current implementation isn't smart about
59 // optimizing what to wait for, just looks inside the block in order (first-fit 61 // optimizing what to wait for, just looks inside the block in order (first-fit
60 // as well). 62 // as well).
61 FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) { 63 FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
62 // size of 0 is not allowed because it would be inconsistent to only sometimes 64 // size of 0 is not allowed because it would be inconsistent to only sometimes
63 // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0). 65 // have it succeed. Example: Alloc(SizeOfBuffer), Alloc(0).
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
196 BlockIndex index) { 198 BlockIndex index) {
197 Block &block = blocks_[index]; 199 Block &block = blocks_[index];
198 DCHECK_EQ(block.state, FREE_PENDING_TOKEN); 200 DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
199 helper_->WaitForToken(block.token); 201 helper_->WaitForToken(block.token);
200 block.state = FREE; 202 block.state = FREE;
201 return CollapseFreeBlock(index); 203 return CollapseFreeBlock(index);
202 } 204 }
203 205
204 // Frees any blocks pending a token for which the token has been read. 206 // Frees any blocks pending a token for which the token has been read.
205 void FencedAllocator::FreeUnused() { 207 void FencedAllocator::FreeUnused() {
208 // Free any potential blocks that has its lifetime handled outside.
209 poll_callback_.Run();
210
206 int32 last_token_read = helper_->last_token_read(); 211 int32 last_token_read = helper_->last_token_read();
207 for (unsigned int i = 0; i < blocks_.size();) { 212 for (unsigned int i = 0; i < blocks_.size();) {
208 Block& block = blocks_[i]; 213 Block& block = blocks_[i];
209 if (block.state == FREE_PENDING_TOKEN && block.token <= last_token_read) { 214 if (block.state == FREE_PENDING_TOKEN && block.token <= last_token_read) {
piman 2014/02/07 22:58:20 While you're here... it looks like the 'block.toke
jadahl 2014/02/08 09:18:25 Sure.
210 block.state = FREE; 215 block.state = FREE;
211 i = CollapseFreeBlock(i); 216 i = CollapseFreeBlock(i);
212 } else { 217 } else {
213 ++i; 218 ++i;
214 } 219 }
215 } 220 }
216 } 221 }
217 222
218 // If the block is exactly the requested size, simply mark it IN_USE, otherwise 223 // If the block is exactly the requested size, simply mark it IN_USE, otherwise
219 // split it and mark the first one (of the requested size) IN_USE. 224 // split it and mark the first one (of the requested size) IN_USE.
(...skipping 19 matching lines...) Expand all
239 // The blocks are in offset order, so we can do a binary search. 244 // The blocks are in offset order, so we can do a binary search.
240 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { 245 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
241 Block templ = { IN_USE, offset, 0, kUnusedToken }; 246 Block templ = { IN_USE, offset, 0, kUnusedToken };
242 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), 247 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
243 templ, OffsetCmp()); 248 templ, OffsetCmp());
244 DCHECK(it != blocks_.end() && it->offset == offset); 249 DCHECK(it != blocks_.end() && it->offset == offset);
245 return it-blocks_.begin(); 250 return it-blocks_.begin();
246 } 251 }
247 252
248 } // namespace gpu 253 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698