| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file contains the implementation of the FencedAllocator class. | 5 // This file contains the implementation of the FencedAllocator class. |
| 6 | 6 |
| 7 #include "gpu/command_buffer/client/fenced_allocator.h" | 7 #include "gpu/command_buffer/client/fenced_allocator.h" |
| 8 | 8 |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 | 10 |
| 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h" | 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h" |
| 12 | 12 |
| 13 namespace gpu { | 13 namespace gpu { |
| 14 | 14 |
| 15 namespace { | 15 namespace { |
| 16 | 16 |
| 17 // Allocation alignment, must be a power of two. | |
| 18 const unsigned int kAllocAlignment = 16; | |
| 19 | |
| 20 // Round down to the largest multiple of kAllocAlignment no greater than |size|. | 17 // Round down to the largest multiple of kAllocAlignment no greater than |size|. |
| 21 unsigned int RoundDown(unsigned int size) { | 18 unsigned int RoundDown(unsigned int size) { |
| 22 return size & ~(kAllocAlignment - 1); | 19 return size & ~(FencedAllocator::kAllocAlignment - 1); |
| 23 } | 20 } |
| 24 | 21 |
| 25 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|. | 22 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|. |
| 26 unsigned int RoundUp(unsigned int size) { | 23 unsigned int RoundUp(unsigned int size) { |
| 27 return (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); | 24 return (size + (FencedAllocator::kAllocAlignment - 1)) & |
| 25 ~(FencedAllocator::kAllocAlignment - 1); |
| 28 } | 26 } |
| 29 | 27 |
| 30 } // namespace | 28 } // namespace |
| 31 | 29 |
| 32 #ifndef _MSC_VER | 30 #ifndef _MSC_VER |
| 33 const FencedAllocator::Offset FencedAllocator::kInvalidOffset; | 31 const FencedAllocator::Offset FencedAllocator::kInvalidOffset; |
| 34 #endif | 32 #endif |
| 35 | 33 |
| 36 FencedAllocator::FencedAllocator(unsigned int size, | 34 FencedAllocator::FencedAllocator(unsigned int size, |
| 37 CommandBufferHelper* helper, | 35 CommandBufferHelper* helper, |
| (...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 244 // The blocks are in offset order, so we can do a binary search. | 242 // The blocks are in offset order, so we can do a binary search. |
| 245 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { | 243 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { |
| 246 Block templ = { IN_USE, offset, 0, kUnusedToken }; | 244 Block templ = { IN_USE, offset, 0, kUnusedToken }; |
| 247 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), | 245 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), |
| 248 templ, OffsetCmp()); | 246 templ, OffsetCmp()); |
| 249 DCHECK(it != blocks_.end() && it->offset == offset); | 247 DCHECK(it != blocks_.end() && it->offset == offset); |
| 250 return it-blocks_.begin(); | 248 return it-blocks_.begin(); |
| 251 } | 249 } |
| 252 | 250 |
| 253 } // namespace gpu | 251 } // namespace gpu |
| OLD | NEW |