Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/graphics/ContiguousContainer.h" | 5 #include "platform/graphics/ContiguousContainer.h" |
| 6 | 6 |
| 7 #include "wtf/Allocator.h" | 7 #include "wtf/Allocator.h" |
| 8 #include "wtf/ContainerAnnotations.h" | 8 #include "wtf/ContainerAnnotations.h" |
| 9 #include "wtf/PtrUtil.h" | 9 #include "wtf/PtrUtil.h" |
| 10 #include "wtf/allocator/PartitionAlloc.h" | 10 #include "wtf/allocator/PartitionAlloc.h" |
| 11 #include "wtf/allocator/Partitions.h" | 11 #include "wtf/allocator/Partitions.h" |
| 12 #include <algorithm> | 12 #include <algorithm> |
| 13 #include <memory> | 13 #include <memory> |
| 14 | 14 |
| 15 namespace blink { | 15 namespace blink { |
| 16 | 16 |
| 17 // Default number of max-sized elements to allocate space for, if there is no | 17 // Default number of max-sized elements to allocate space for, if there is no |
| 18 // initial buffer. | 18 // initial buffer. |
| 19 static const unsigned kDefaultInitialBufferSize = 32; | 19 static const unsigned kDefaultInitialBufferSize = 32; |
| 20 | 20 |
| 21 static inline char* alignAddress(char* address, unsigned log2Alignment) | |
| 22 { | |
| 23 char* aligned = reinterpret_cast<char*>( | |
| 24 (((reinterpret_cast<size_t>(address) - 1) >> log2Alignment) + 1) << log2 Alignment); | |
|
jbroman
2016/07/04 19:21:35
why size_t and not uintptr_t?
| |
| 25 DCHECK_EQ(0u, reinterpret_cast<size_t>(aligned) % (1 << log2Alignment)); | |
| 26 DCHECK(aligned >= address); | |
| 27 DCHECK(aligned < address + (1 << log2Alignment)); | |
| 28 return aligned; | |
| 29 } | |
| 30 | |
| 21 class ContiguousContainerBase::Buffer { | 31 class ContiguousContainerBase::Buffer { |
| 22 WTF_MAKE_NONCOPYABLE(Buffer); | 32 WTF_MAKE_NONCOPYABLE(Buffer); |
| 23 USING_FAST_MALLOC(Buffer); | 33 USING_FAST_MALLOC(Buffer); |
| 24 public: | 34 public: |
| 25 Buffer(size_t bufferSize, const char* typeName) | 35 Buffer(size_t bufferSize, const char* typeName) |
| 26 { | 36 { |
| 27 m_capacity = WTF::Partitions::bufferActualSize(bufferSize); | 37 m_capacity = WTF::Partitions::bufferActualSize(bufferSize); |
| 28 m_begin = m_end = static_cast<char*>( | 38 m_begin = m_end = static_cast<char*>( |
| 29 WTF::Partitions::bufferMalloc(m_capacity, typeName)); | 39 WTF::Partitions::bufferMalloc(m_capacity, typeName)); |
| 30 ANNOTATE_NEW_BUFFER(m_begin, m_capacity, 0); | 40 ANNOTATE_NEW_BUFFER(m_begin, m_capacity, 0); |
| 31 } | 41 } |
| 32 | 42 |
| 33 ~Buffer() | 43 ~Buffer() |
| 34 { | 44 { |
| 35 ANNOTATE_DELETE_BUFFER(m_begin, m_capacity, usedCapacity()); | 45 ANNOTATE_DELETE_BUFFER(m_begin, m_capacity, usedCapacity()); |
| 36 WTF::Partitions::bufferFree(m_begin); | 46 WTF::Partitions::bufferFree(m_begin); |
| 37 } | 47 } |
| 38 | 48 |
| 39 size_t capacity() const { return m_capacity; } | 49 size_t capacity() const { return m_capacity; } |
| 40 size_t usedCapacity() const { return m_end - m_begin; } | 50 size_t usedCapacity() const { return m_end - m_begin; } |
| 41 size_t unusedCapacity() const { return capacity() - usedCapacity(); } | |
| 42 bool isEmpty() const { return usedCapacity() == 0; } | 51 bool isEmpty() const { return usedCapacity() == 0; } |
| 43 | 52 |
| 44 void* allocate(size_t objectSize) | 53 void* allocate(size_t objectSize, unsigned log2Alignment) |
| 45 { | 54 { |
| 46 ASSERT(unusedCapacity() >= objectSize); | 55 char* alignedAddress; |
| 56 if (isEmpty()) { | |
| 57 // m_begin's alignment must be suitable for all possible types. | |
| 58 DCHECK(m_begin == alignAddress(m_begin, log2Alignment)); | |
| 59 DCHECK(objectSize <= m_capacity); | |
| 60 alignedAddress = m_begin; | |
| 61 } else { | |
| 62 alignedAddress = alignAddress(m_end, log2Alignment); | |
| 63 if (alignedAddress - m_begin + objectSize > m_capacity) | |
| 64 return nullptr; | |
| 65 } | |
| 47 ANNOTATE_CHANGE_SIZE( | 66 ANNOTATE_CHANGE_SIZE( |
| 48 m_begin, m_capacity, usedCapacity(), usedCapacity() + objectSize); | 67 m_begin, m_capacity, usedCapacity(), usedCapacity() + (alignedAddres s - m_end) + objectSize); |
| 49 void* result = m_end; | 68 m_end = alignedAddress + objectSize; |
| 50 m_end += objectSize; | 69 return alignedAddress; |
| 51 return result; | |
| 52 } | 70 } |
| 53 | 71 |
| 54 void deallocateLastObject(void* object) | 72 void deallocateLastObject(void* object) |
| 55 { | 73 { |
| 56 RELEASE_ASSERT(m_begin <= object && object < m_end); | 74 RELEASE_ASSERT(m_begin <= object && object < m_end); |
| 57 ANNOTATE_CHANGE_SIZE( | 75 ANNOTATE_CHANGE_SIZE( |
| 58 m_begin, m_capacity, usedCapacity(), static_cast<char*>(object) - m_ begin); | 76 m_begin, m_capacity, usedCapacity(), static_cast<char*>(object) - m_ begin); |
| 59 m_end = static_cast<char*>(object); | 77 m_end = static_cast<char*>(object); |
| 78 // We may leave a gap between the end of the previous object (which we d on't know) | |
| 79 // and m_end because of alignment of the deallocated object. | |
|
jbroman
2016/07/04 19:21:35
...which seems weird to me.
| |
| 60 } | 80 } |
| 61 | 81 |
| 62 private: | 82 private: |
| 63 // m_begin <= m_end <= m_begin + m_capacity | 83 // m_begin <= m_end <= m_begin + m_capacity |
| 64 char* m_begin; | 84 char* m_begin; |
| 65 char* m_end; | 85 char* m_end; |
| 66 size_t m_capacity; | 86 size_t m_capacity; |
| 67 }; | 87 }; |
| 68 | 88 |
| 69 ContiguousContainerBase::ContiguousContainerBase(size_t maxObjectSize) | 89 ContiguousContainerBase::ContiguousContainerBase(size_t maxObjectSize) |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 108 { | 128 { |
| 109 return sizeof(*this) + capacityInBytes() | 129 return sizeof(*this) + capacityInBytes() |
| 110 + m_elements.capacity() * sizeof(m_elements[0]); | 130 + m_elements.capacity() * sizeof(m_elements[0]); |
| 111 } | 131 } |
| 112 | 132 |
| 113 void ContiguousContainerBase::reserveInitialCapacity(size_t bufferSize, const ch ar* typeName) | 133 void ContiguousContainerBase::reserveInitialCapacity(size_t bufferSize, const ch ar* typeName) |
| 114 { | 134 { |
| 115 allocateNewBufferForNextAllocation(bufferSize, typeName); | 135 allocateNewBufferForNextAllocation(bufferSize, typeName); |
| 116 } | 136 } |
| 117 | 137 |
| 118 void* ContiguousContainerBase::allocate(size_t objectSize, const char* typeName) | 138 void* ContiguousContainerBase::allocate(size_t objectSize, unsigned log2Alignmen t, const char* typeName) |
| 119 { | 139 { |
| 120 ASSERT(objectSize <= m_maxObjectSize); | 140 DCHECK(objectSize <= m_maxObjectSize); |
| 121 | 141 |
| 122 Buffer* bufferForAlloc = nullptr; | 142 void* element = m_buffers.isEmpty() ? nullptr : m_buffers[m_endIndex]->alloc ate(objectSize, log2Alignment); |
| 123 if (!m_buffers.isEmpty()) { | 143 if (!element) { |
| 124 Buffer* endBuffer = m_buffers[m_endIndex].get(); | 144 Buffer* bufferForAlloc; |
| 125 if (endBuffer->unusedCapacity() >= objectSize) | 145 if (m_endIndex + 1 < m_buffers.size()) { |
| 126 bufferForAlloc = endBuffer; | |
| 127 else if (m_endIndex + 1 < m_buffers.size()) | |
| 128 bufferForAlloc = m_buffers[++m_endIndex].get(); | 146 bufferForAlloc = m_buffers[++m_endIndex].get(); |
| 147 } else { | |
| 148 size_t newBufferSize = m_buffers.isEmpty() | |
| 149 ? kDefaultInitialBufferSize * m_maxObjectSize | |
| 150 : 2 * m_buffers.last()->capacity(); | |
| 151 bufferForAlloc = allocateNewBufferForNextAllocation(newBufferSize, t ypeName); | |
| 152 } | |
| 153 element = bufferForAlloc->allocate(objectSize, log2Alignment); | |
| 154 DCHECK(element); | |
| 129 } | 155 } |
| 130 | |
| 131 if (!bufferForAlloc) { | |
| 132 size_t newBufferSize = m_buffers.isEmpty() | |
| 133 ? kDefaultInitialBufferSize * m_maxObjectSize | |
| 134 : 2 * m_buffers.last()->capacity(); | |
| 135 bufferForAlloc = allocateNewBufferForNextAllocation(newBufferSize, typeN ame); | |
| 136 } | |
| 137 | |
| 138 void* element = bufferForAlloc->allocate(objectSize); | |
| 139 m_elements.append(element); | 156 m_elements.append(element); |
| 140 return element; | 157 return element; |
| 141 } | 158 } |
| 142 | 159 |
| 143 void ContiguousContainerBase::removeLast() | 160 void ContiguousContainerBase::removeLast() |
| 144 { | 161 { |
| 145 void* object = m_elements.last(); | 162 void* object = m_elements.last(); |
| 146 m_elements.removeLast(); | 163 m_elements.removeLast(); |
| 147 | 164 |
| 148 Buffer* endBuffer = m_buffers[m_endIndex].get(); | 165 Buffer* endBuffer = m_buffers[m_endIndex].get(); |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 176 { | 193 { |
| 177 ASSERT(m_buffers.isEmpty() || m_endIndex == m_buffers.size() - 1); | 194 ASSERT(m_buffers.isEmpty() || m_endIndex == m_buffers.size() - 1); |
| 178 std::unique_ptr<Buffer> newBuffer = wrapUnique(new Buffer(bufferSize, typeNa me)); | 195 std::unique_ptr<Buffer> newBuffer = wrapUnique(new Buffer(bufferSize, typeNa me)); |
| 179 Buffer* bufferToReturn = newBuffer.get(); | 196 Buffer* bufferToReturn = newBuffer.get(); |
| 180 m_buffers.append(std::move(newBuffer)); | 197 m_buffers.append(std::move(newBuffer)); |
| 181 m_endIndex = m_buffers.size() - 1; | 198 m_endIndex = m_buffers.size() - 1; |
| 182 return bufferToReturn; | 199 return bufferToReturn; |
| 183 } | 200 } |
| 184 | 201 |
| 185 } // namespace blink | 202 } // namespace blink |
| OLD | NEW |