| Index: gpu/command_buffer/client/fenced_allocator.cc
|
| diff --git a/gpu/command_buffer/client/fenced_allocator.cc b/gpu/command_buffer/client/fenced_allocator.cc
|
| index 0e90bf385b4d0f8a9dded299cfb5a04b1aef2137..39720c4a199786eb5a0974058dbf25a9272849e7 100644
|
| --- a/gpu/command_buffer/client/fenced_allocator.cc
|
| +++ b/gpu/command_buffer/client/fenced_allocator.cc
|
| @@ -34,8 +34,10 @@ const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
|
| #endif
|
|
|
| FencedAllocator::FencedAllocator(unsigned int size,
|
| + bool aggressive_reuse,
|
| CommandBufferHelper *helper)
|
| : helper_(helper),
|
| + aggressive_reuse_(aggressive_reuse),
|
| bytes_in_use_(0) {
|
| Block block = { FREE, 0, RoundDown(size), kUnusedToken };
|
| blocks_.push_back(block);
|
| @@ -68,6 +70,10 @@ FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
|
| // Round up the allocation size to ensure alignment.
|
| size = RoundUp(size);
|
|
|
| + if (aggressive_reuse_) {
|
| + return AggressiveAlloc(size);
|
| + }
|
| +
|
| // Try first to allocate in a free block.
|
| for (unsigned int i = 0; i < blocks_.size(); ++i) {
|
| Block &block = blocks_[i];
|
| @@ -88,6 +94,45 @@ FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
|
| return kInvalidOffset;
|
| }
|
|
|
| +FencedAllocator::Offset FencedAllocator::AggressiveAlloc(unsigned int size) {
|
| + // Find first series of blocks that together make a large enough area.
|
| + unsigned int range_start = 0, range_stop = 0;
|
| + unsigned int current_size = 0;
|
| + for (unsigned int i = 0; i < blocks_.size(); ++i) {
|
| + Block &block = blocks_[i];
|
| + if (block.state == IN_USE) {
|
| + current_size = 0;
|
| + } else {
|
| + DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
|
| +
|
| + if (current_size == 0)
|
| + range_start = i;
|
| + range_stop = i;
|
| +
|
| + current_size += block.size;
|
| + if (current_size >= size)
|
| + break;
|
| + }
|
| + }
|
| +
|
| + if (current_size < size)
|
| + return kInvalidOffset;
|
| +
|
| + // Collapse the blocks in the range |range_start| -> |range_stop| into
|
| + // one block. Override the previous state, as we don't care whether
|
| + // the block was FREE or FREE_PENDING_TOKEN here.
|
| + unsigned int new_size = blocks_[range_start].size;
|
| + const unsigned int second_in_range = range_start + 1;
|
| + for (unsigned int i = range_start; i < range_stop; ++i) {
|
| + new_size += blocks_[second_in_range].size;
|
| + blocks_.erase(blocks_.begin() + second_in_range);
|
| + }
|
| + blocks_[range_start].size = new_size;
|
| + blocks_[range_start].state = FREE;
|
| +
|
| + return AllocInBlock(range_start, size);
|
| +}
|
| +
|
| // Looks for the corresponding block, mark it FREE, and collapse it if
|
| // necessary.
|
| void FencedAllocator::Free(FencedAllocator::Offset offset) {
|
| @@ -228,7 +273,12 @@ FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
|
| block.state = IN_USE;
|
| return offset;
|
| }
|
| - Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
|
| + Block newblock = {
|
| + block.state,
|
| + offset + size,
|
| + block.size - size,
|
| + kUnusedToken
|
| + };
|
| block.state = IN_USE;
|
| block.size = size;
|
| // this is the last thing being done because it may invalidate block;
|
|
|