| Index: gpu/command_buffer/client/fenced_allocator.cc
|
| diff --git a/gpu/command_buffer/client/fenced_allocator.cc b/gpu/command_buffer/client/fenced_allocator.cc
|
| index 0e90bf385b4d0f8a9dded299cfb5a04b1aef2137..7b277c632b3a6290406e0f12d3c0da0fe8f66c91 100644
|
| --- a/gpu/command_buffer/client/fenced_allocator.cc
|
| +++ b/gpu/command_buffer/client/fenced_allocator.cc
|
| @@ -34,8 +34,10 @@ const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
|
| #endif
|
|
|
| FencedAllocator::FencedAllocator(unsigned int size,
|
| - CommandBufferHelper *helper)
|
| + CommandBufferHelper *helper,
|
| + const base::Closure& poll_callback)
|
| : helper_(helper),
|
| + poll_callback_(poll_callback),
|
| bytes_in_use_(0) {
|
| Block block = { FREE, 0, RoundDown(size), kUnusedToken };
|
| blocks_.push_back(block);
|
| @@ -203,6 +205,9 @@ FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
|
|
|
| // Frees any blocks pending a token for which the token has been read.
|
| void FencedAllocator::FreeUnused() {
|
| + // Free any potential blocks that has its lifetime handled outside.
|
| + poll_callback_.Run();
|
| +
|
| int32 last_token_read = helper_->last_token_read();
|
| for (unsigned int i = 0; i < blocks_.size();) {
|
| Block& block = blocks_[i];
|
|
|