| OLD | NEW |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file contains the implementation of the FencedAllocator class. | 5 // This file contains the implementation of the FencedAllocator class. |
| 6 | 6 |
| 7 #include "gpu/command_buffer/client/fenced_allocator.h" | 7 #include "gpu/command_buffer/client/fenced_allocator.h" |
| 8 | 8 |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 | 10 |
| 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h" | 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h" |
| 12 | 12 |
| 13 namespace gpu { | 13 namespace gpu { |
| 14 | 14 |
| 15 namespace { | 15 namespace { |
| 16 | 16 |
| 17 // Round down to the largest multiple of kAllocAlignment no greater than |size|. | 17 // Round down to the largest multiple of kAllocAlignment no greater than |size|. |
| 18 unsigned int RoundDown(unsigned int size) { | 18 unsigned int RoundDown(unsigned int size) { |
| 19 return size & ~(FencedAllocator::kAllocAlignment - 1); | 19 return size & ~(FencedAllocator::kAllocAlignment - 1); |
| 20 } | 20 } |
| 21 | 21 |
| 22 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|. | 22 // Round up to the smallest multiple of kAllocAlignment no smaller than |size|. |
| 23 unsigned int RoundUp(unsigned int size) { | 23 unsigned int RoundUp(unsigned int size) { |
| 24 return (size + (FencedAllocator::kAllocAlignment - 1)) & | 24 return (size + (FencedAllocator::kAllocAlignment - 1)) & |
| 25 ~(FencedAllocator::kAllocAlignment - 1); | 25 ~(FencedAllocator::kAllocAlignment - 1); |
| 26 } | 26 } |
| 27 | 27 |
| 28 } // namespace | 28 } // namespace |
| 29 | 29 |
| 30 #ifndef _MSC_VER | 30 // VS 2015 and above allow these definitions and in this case require them. |
| 31 #if !defined(_MSC_VER) || _MSC_VER >= 1900 |
| 31 const FencedAllocator::Offset FencedAllocator::kInvalidOffset; | 32 const FencedAllocator::Offset FencedAllocator::kInvalidOffset; |
| 32 const unsigned int FencedAllocator::kAllocAlignment; | 33 const unsigned int FencedAllocator::kAllocAlignment; |
| 33 #endif | 34 #endif |
| 34 | 35 |
| 35 FencedAllocator::FencedAllocator(unsigned int size, CommandBufferHelper* helper) | 36 FencedAllocator::FencedAllocator(unsigned int size, CommandBufferHelper* helper) |
| 36 : helper_(helper), bytes_in_use_(0) { | 37 : helper_(helper), bytes_in_use_(0) { |
| 37 Block block = { FREE, 0, RoundDown(size), kUnusedToken }; | 38 Block block = { FREE, 0, RoundDown(size), kUnusedToken }; |
| 38 blocks_.push_back(block); | 39 blocks_.push_back(block); |
| 39 } | 40 } |
| 40 | 41 |
| (...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 248 // The blocks are in offset order, so we can do a binary search. | 249 // The blocks are in offset order, so we can do a binary search. |
| 249 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { | 250 FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) { |
| 250 Block templ = { IN_USE, offset, 0, kUnusedToken }; | 251 Block templ = { IN_USE, offset, 0, kUnusedToken }; |
| 251 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), | 252 Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(), |
| 252 templ, OffsetCmp()); | 253 templ, OffsetCmp()); |
| 253 DCHECK(it != blocks_.end() && it->offset == offset); | 254 DCHECK(it != blocks_.end() && it->offset == offset); |
| 254 return it-blocks_.begin(); | 255 return it-blocks_.begin(); |
| 255 } | 256 } |
| 256 | 257 |
| 257 } // namespace gpu | 258 } // namespace gpu |
| OLD | NEW |