| OLD | NEW |
| 1 // Protocol Buffers - Google's data interchange format | 1 // Protocol Buffers - Google's data interchange format |
| 2 // Copyright 2008 Google Inc. All rights reserved. | 2 // Copyright 2008 Google Inc. All rights reserved. |
| 3 // https://developers.google.com/protocol-buffers/ | 3 // https://developers.google.com/protocol-buffers/ |
| 4 // | 4 // |
| 5 // Redistribution and use in source and binary forms, with or without | 5 // Redistribution and use in source and binary forms, with or without |
| 6 // modification, are permitted provided that the following conditions are | 6 // modification, are permitted provided that the following conditions are |
| 7 // met: | 7 // met: |
| 8 // | 8 // |
| 9 // * Redistributions of source code must retain the above copyright | 9 // * Redistributions of source code must retain the above copyright |
| 10 // notice, this list of conditions and the following disclaimer. | 10 // notice, this list of conditions and the following disclaimer. |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | 30 |
| 31 #include <google/protobuf/arena.h> | 31 #include <google/protobuf/arena.h> |
| 32 | 32 |
| 33 #include <algorithm> | |
| 34 #include <limits> | |
| 35 | |
| 36 | 33 |
| 37 #ifdef ADDRESS_SANITIZER | 34 #ifdef ADDRESS_SANITIZER |
| 38 #include <sanitizer/asan_interface.h> | 35 #include <sanitizer/asan_interface.h> |
| 39 #endif // ADDRESS_SANITIZER | 36 #endif |
| 40 | 37 |
| 41 namespace google { | 38 namespace google { |
| 42 namespace protobuf { | 39 namespace protobuf { |
| 43 | 40 |
| 44 | 41 |
| 45 google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_; | 42 google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_; |
| 46 #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) | 43 #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) |
| 47 Arena::ThreadCache& Arena::thread_cache() { | 44 Arena::ThreadCache& Arena::thread_cache() { |
| 48 static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ = | 45 static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ = |
| 49 new internal::ThreadLocalStorage<ThreadCache>(); | 46 new internal::ThreadLocalStorage<ThreadCache>(); |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 121 Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n, | 118 Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n, |
| 122 size_t start_block_size, size_t max_block_size) { | 119 size_t start_block_size, size_t max_block_size) { |
| 123 size_t size; | 120 size_t size; |
| 124 if (my_last_block != NULL) { | 121 if (my_last_block != NULL) { |
| 125 // Double the current block size, up to a limit. | 122 // Double the current block size, up to a limit. |
| 126 size = 2 * (my_last_block->size); | 123 size = 2 * (my_last_block->size); |
| 127 if (size > max_block_size) size = max_block_size; | 124 if (size > max_block_size) size = max_block_size; |
| 128 } else { | 125 } else { |
| 129 size = start_block_size; | 126 size = start_block_size; |
| 130 } | 127 } |
| 131 // Verify that n + kHeaderSize won't overflow. | 128 if (n > size - kHeaderSize) { |
| 132 GOOGLE_CHECK_LE(n, std::numeric_limits<size_t>::max() - kHeaderSize); | 129 // TODO(sanjay): Check if n + kHeaderSize would overflow |
| 133 size = std::max(size, kHeaderSize + n); | 130 size = kHeaderSize + n; |
| 131 } |
| 134 | 132 |
| 135 Block* b = reinterpret_cast<Block*>(options_.block_alloc(size)); | 133 Block* b = reinterpret_cast<Block*>(options_.block_alloc(size)); |
| 136 b->pos = kHeaderSize + n; | 134 b->pos = kHeaderSize + n; |
| 137 b->size = size; | 135 b->size = size; |
| 138 b->owner = me; | 136 if (b->avail() == 0) { |
| 137 // Do not attempt to reuse this block. |
| 138 b->owner = NULL; |
| 139 } else { |
| 140 b->owner = me; |
| 141 } |
| 139 #ifdef ADDRESS_SANITIZER | 142 #ifdef ADDRESS_SANITIZER |
| 140 // Poison the rest of the block for ASAN. It was unpoisoned by the underlying | 143 // Poison the rest of the block for ASAN. It was unpoisoned by the underlying |
| 141 // malloc but it's not yet usable until we return it as part of an allocation. | 144 // malloc but it's not yet usable until we return it as part of an allocation. |
| 142 ASAN_POISON_MEMORY_REGION( | 145 ASAN_POISON_MEMORY_REGION( |
| 143 reinterpret_cast<char*>(b) + b->pos, b->size - b->pos); | 146 reinterpret_cast<char*>(b) + b->pos, b->size - b->pos); |
| 144 #endif // ADDRESS_SANITIZER | 147 #endif |
| 145 return b; | 148 return b; |
| 146 } | 149 } |
| 147 | 150 |
| 148 void Arena::AddBlock(Block* b) { | 151 void Arena::AddBlock(Block* b) { |
| 149 MutexLock l(&blocks_lock_); | 152 MutexLock l(&blocks_lock_); |
| 150 AddBlockInternal(b); | 153 AddBlockInternal(b); |
| 151 } | 154 } |
| 152 | 155 |
| 153 void Arena::AddBlockInternal(Block* b) { | 156 void Arena::AddBlockInternal(Block* b) { |
| 154 b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(
&blocks_)); | 157 b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(
&blocks_)); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 198 return SlowAlloc(n); | 201 return SlowAlloc(n); |
| 199 } | 202 } |
| 200 return AllocFromBlock(b, n); | 203 return AllocFromBlock(b, n); |
| 201 } | 204 } |
| 202 | 205 |
| 203 void* Arena::AllocFromBlock(Block* b, size_t n) { | 206 void* Arena::AllocFromBlock(Block* b, size_t n) { |
| 204 size_t p = b->pos; | 207 size_t p = b->pos; |
| 205 b->pos = p + n; | 208 b->pos = p + n; |
| 206 #ifdef ADDRESS_SANITIZER | 209 #ifdef ADDRESS_SANITIZER |
| 207 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n); | 210 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n); |
| 208 #endif // ADDRESS_SANITIZER | 211 #endif |
| 209 return reinterpret_cast<char*>(b) + p; | 212 return reinterpret_cast<char*>(b) + p; |
| 210 } | 213 } |
| 211 | 214 |
| 212 void* Arena::SlowAlloc(size_t n) { | 215 void* Arena::SlowAlloc(size_t n) { |
| 213 void* me = &thread_cache(); | 216 void* me = &thread_cache(); |
| 214 Block* b = FindBlock(me); // Find block owned by me. | 217 Block* b = FindBlock(me); // Find block owned by me. |
| 215 // See if allocation fits in my latest block. | 218 // See if allocation fits in my latest block. |
| 216 if (b != NULL && b->avail() >= n) { | 219 if (b != NULL && b->avail() >= n) { |
| 217 SetThreadCacheBlock(b); | 220 SetThreadCacheBlock(b); |
| 218 google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google:
:protobuf::internal::AtomicWord>(b)); | 221 google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google:
:protobuf::internal::AtomicWord>(b)); |
| 219 return AllocFromBlock(b, n); | 222 return AllocFromBlock(b, n); |
| 220 } | 223 } |
| 221 b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size); | 224 b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size); |
| 222 AddBlock(b); | 225 AddBlock(b); |
| 223 SetThreadCacheBlock(b); | 226 if (b->owner == me) { // If this block can be reused (see NewBlock()). |
| 227 SetThreadCacheBlock(b); |
| 228 } |
| 224 return reinterpret_cast<char*>(b) + kHeaderSize; | 229 return reinterpret_cast<char*>(b) + kHeaderSize; |
| 225 } | 230 } |
| 226 | 231 |
| 227 uint64 Arena::SpaceAllocated() const { | 232 uint64 Arena::SpaceAllocated() const { |
| 228 uint64 space_allocated = 0; | 233 uint64 space_allocated = 0; |
| 229 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load
(&blocks_)); | 234 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load
(&blocks_)); |
| 230 while (b != NULL) { | 235 while (b != NULL) { |
| 231 space_allocated += (b->size); | 236 space_allocated += (b->size); |
| 232 b = b->next; | 237 b = b->next; |
| 233 } | 238 } |
| 234 return space_allocated; | 239 return space_allocated; |
| 235 } | 240 } |
| 236 | 241 |
| 237 uint64 Arena::SpaceUsed() const { | 242 uint64 Arena::SpaceUsed() const { |
| 238 uint64 space_used = 0; | 243 uint64 space_used = 0; |
| 239 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load
(&blocks_)); | 244 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load
(&blocks_)); |
| 240 while (b != NULL) { | 245 while (b != NULL) { |
| 241 space_used += (b->pos - kHeaderSize); | 246 space_used += (b->pos - kHeaderSize); |
| 242 b = b->next; | 247 b = b->next; |
| 243 } | 248 } |
| 244 return space_used; | 249 return space_used; |
| 245 } | 250 } |
| 246 | 251 |
| 247 std::pair<uint64, uint64> Arena::SpaceAllocatedAndUsed() const { | 252 pair<uint64, uint64> Arena::SpaceAllocatedAndUsed() const { |
| 248 uint64 allocated = 0; | 253 uint64 allocated = 0; |
| 249 uint64 used = 0; | 254 uint64 used = 0; |
| 250 | 255 |
| 251 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load
(&blocks_)); | 256 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load
(&blocks_)); |
| 252 while (b != NULL) { | 257 while (b != NULL) { |
| 253 allocated += b->size; | 258 allocated += b->size; |
| 254 used += (b->pos - kHeaderSize); | 259 used += (b->pos - kHeaderSize); |
| 255 b = b->next; | 260 b = b->next; |
| 256 } | 261 } |
| 257 return std::make_pair(allocated, used); | 262 return std::make_pair(allocated, used); |
| 258 } | 263 } |
| 259 | 264 |
| 260 uint64 Arena::FreeBlocks() { | 265 uint64 Arena::FreeBlocks() { |
| 261 uint64 space_allocated = 0; | 266 uint64 space_allocated = 0; |
| 262 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load
(&blocks_)); | 267 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load
(&blocks_)); |
| 263 Block* first_block = NULL; | 268 Block* first_block = NULL; |
| 264 while (b != NULL) { | 269 while (b != NULL) { |
| 265 space_allocated += (b->size); | 270 space_allocated += (b->size); |
| 266 Block* next = b->next; | 271 Block* next = b->next; |
| 267 if (next != NULL) { | 272 if (next != NULL) { |
| 268 #ifdef ADDRESS_SANITIZER | |
| 269 // This memory was provided by the underlying allocator as unpoisoned, so | |
| 270 // return it in an unpoisoned state. | |
| 271 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b), b->size); | |
| 272 #endif // ADDRESS_SANITIZER | |
| 273 options_.block_dealloc(b, b->size); | 273 options_.block_dealloc(b, b->size); |
| 274 } else { | 274 } else { |
| 275 if (owns_first_block_) { | 275 if (owns_first_block_) { |
| 276 #ifdef ADDRESS_SANITIZER | |
| 277 // This memory was provided by the underlying allocator as unpoisoned, | |
| 278 // so return it in an unpoisoned state. | |
| 279 ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b), b->size); | |
| 280 #endif // ADDRESS_SANITIZER | |
| 281 options_.block_dealloc(b, b->size); | 276 options_.block_dealloc(b, b->size); |
| 282 } else { | 277 } else { |
| 283 // User passed in the first block, skip free'ing the memory. | 278 // User passed in the first block, skip free'ing the memory. |
| 284 first_block = b; | 279 first_block = b; |
| 285 } | 280 } |
| 286 } | 281 } |
| 287 b = next; | 282 b = next; |
| 288 } | 283 } |
| 289 blocks_ = 0; | 284 blocks_ = 0; |
| 290 hint_ = 0; | 285 hint_ = 0; |
| (...skipping 26 matching lines...) Expand all Loading... |
| 317 // entry per thread. | 312 // entry per thread. |
| 318 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&
blocks_)); | 313 Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&
blocks_)); |
| 319 while (b != NULL && b->owner != me) { | 314 while (b != NULL && b->owner != me) { |
| 320 b = b->next; | 315 b = b->next; |
| 321 } | 316 } |
| 322 return b; | 317 return b; |
| 323 } | 318 } |
| 324 | 319 |
| 325 } // namespace protobuf | 320 } // namespace protobuf |
| 326 } // namespace google | 321 } // namespace google |
| OLD | NEW |