| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file contains the implementation of the FencedAllocator class. | 5 // This file contains the implementation of the FencedAllocator class. |
| 6 | 6 |
| 7 #include "gpu/command_buffer/client/fenced_allocator.h" | 7 #include "gpu/command_buffer/client/fenced_allocator.h" |
| 8 | 8 |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 | 10 |
| 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h" | 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h" |
| 12 | 12 |
| 13 namespace gpu { | 13 namespace gpu { |
| 14 | 14 |
| 15 namespace { | 15 namespace { |
| 16 | 16 |
| 17 // Round down to the largest multiple of kAllocAlignment no greater than |size|. | 17 // Round down to the largest multiple of kAllocAlignment no greater than |size|. |
| 18 unsigned int RoundDown(unsigned int size) { | 18 unsigned int RoundDown(unsigned int size) { |
| 19 return size & ~(FencedAllocator::kAllocAlignment - 1); | 19 return size & ~(FencedAllocator::kAllocAlignment - 1); |
| 20 } | 20 } |
| 21 | 21 |
| 22 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|. | 22 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|. |
| 23 unsigned int RoundUp(unsigned int size) { | 23 unsigned int RoundUp(unsigned int size) { |
| 24 return (size + (FencedAllocator::kAllocAlignment - 1)) & | 24 return (size + (FencedAllocator::kAllocAlignment - 1)) & |
| 25 ~(FencedAllocator::kAllocAlignment - 1); | 25 ~(FencedAllocator::kAllocAlignment - 1); |
| 26 } | 26 } |
| 27 | 27 |
| 28 } // namespace | 28 } // namespace |
| 29 | 29 |
| 30 #ifndef _MSC_VER | |
| 31 const FencedAllocator::Offset FencedAllocator::kInvalidOffset; | |
| 32 const unsigned int FencedAllocator::kAllocAlignment; | |
| 33 #endif | |
| 34 | |
| 35 FencedAllocator::FencedAllocator(unsigned int size, CommandBufferHelper* helper) | 30 FencedAllocator::FencedAllocator(unsigned int size, CommandBufferHelper* helper) |
| 36 : helper_(helper), bytes_in_use_(0) { | 31 : helper_(helper), bytes_in_use_(0) { |
| 37 Block block = { FREE, 0, RoundDown(size), kUnusedToken }; | 32 Block block = { FREE, 0, RoundDown(size), kUnusedToken }; |
| 38 blocks_.push_back(block); | 33 blocks_.push_back(block); |
| 39 } | 34 } |
| 40 | 35 |
| 41 FencedAllocator::~FencedAllocator() { | 36 FencedAllocator::~FencedAllocator() { |
| 42 // Free blocks pending tokens. | 37 // Free blocks pending tokens. |
| 43 for (unsigned int i = 0; i < blocks_.size(); ++i) { | 38 for (unsigned int i = 0; i < blocks_.size(); ++i) { |
| 44 if (blocks_[i].state == FREE_PENDING_TOKEN) { | 39 if (blocks_[i].state == FREE_PENDING_TOKEN) { |
| (...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 248 // The blocks are in offset order, so we can do a binary search. | 243 // The blocks are in offset order, so we can do a binary search. |
| 249 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { | 244 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { |
| 250 Block templ = { IN_USE, offset, 0, kUnusedToken }; | 245 Block templ = { IN_USE, offset, 0, kUnusedToken }; |
| 251 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), | 246 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), |
| 252 templ, OffsetCmp()); | 247 templ, OffsetCmp()); |
| 253 DCHECK(it != blocks_.end() && it->offset == offset); | 248 DCHECK(it != blocks_.end() && it->offset == offset); |
| 254 return it-blocks_.begin(); | 249 return it-blocks_.begin(); |
| 255 } | 250 } |
| 256 | 251 |
| 257 } // namespace gpu | 252 } // namespace gpu |
| OLD | NEW |