| Index: third_party/protobuf/src/google/protobuf/arena.cc
|
| diff --git a/third_party/protobuf/src/google/protobuf/arena.cc b/third_party/protobuf/src/google/protobuf/arena.cc
|
| index 21fd1be61ca2d6e99cd9d70859263a38903d42ce..613e5897144151967594abc84da94bc610cb0ce2 100755
|
| --- a/third_party/protobuf/src/google/protobuf/arena.cc
|
| +++ b/third_party/protobuf/src/google/protobuf/arena.cc
|
| @@ -30,13 +30,10 @@
|
|
|
| #include <google/protobuf/arena.h>
|
|
|
| -#include <algorithm>
|
| -#include <limits>
|
| -
|
|
|
| #ifdef ADDRESS_SANITIZER
|
| #include <sanitizer/asan_interface.h>
|
| -#endif // ADDRESS_SANITIZER
|
| +#endif
|
|
|
| namespace google {
|
| namespace protobuf {
|
| @@ -128,20 +125,26 @@ Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n,
|
| } else {
|
| size = start_block_size;
|
| }
|
| - // Verify that n + kHeaderSize won't overflow.
|
| - GOOGLE_CHECK_LE(n, std::numeric_limits<size_t>::max() - kHeaderSize);
|
| - size = std::max(size, kHeaderSize + n);
|
| + if (n > size - kHeaderSize) {
|
| + // TODO(sanjay): Check if n + kHeaderSize would overflow
|
| + size = kHeaderSize + n;
|
| + }
|
|
|
| Block* b = reinterpret_cast<Block*>(options_.block_alloc(size));
|
| b->pos = kHeaderSize + n;
|
| b->size = size;
|
| - b->owner = me;
|
| + if (b->avail() == 0) {
|
| + // Do not attempt to reuse this block.
|
| + b->owner = NULL;
|
| + } else {
|
| + b->owner = me;
|
| + }
|
| #ifdef ADDRESS_SANITIZER
|
| // Poison the rest of the block for ASAN. It was unpoisoned by the underlying
|
| // malloc but it's not yet usable until we return it as part of an allocation.
|
| ASAN_POISON_MEMORY_REGION(
|
| reinterpret_cast<char*>(b) + b->pos, b->size - b->pos);
|
| -#endif // ADDRESS_SANITIZER
|
| +#endif
|
| return b;
|
| }
|
|
|
| @@ -205,7 +208,7 @@ void* Arena::AllocFromBlock(Block* b, size_t n) {
|
| b->pos = p + n;
|
| #ifdef ADDRESS_SANITIZER
|
| ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n);
|
| -#endif // ADDRESS_SANITIZER
|
| +#endif
|
| return reinterpret_cast<char*>(b) + p;
|
| }
|
|
|
| @@ -220,7 +223,9 @@ void* Arena::SlowAlloc(size_t n) {
|
| }
|
| b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size);
|
| AddBlock(b);
|
| - SetThreadCacheBlock(b);
|
| + if (b->owner == me) { // If this block can be reused (see NewBlock()).
|
| + SetThreadCacheBlock(b);
|
| + }
|
| return reinterpret_cast<char*>(b) + kHeaderSize;
|
| }
|
|
|
| @@ -244,7 +249,7 @@ uint64 Arena::SpaceUsed() const {
|
| return space_used;
|
| }
|
|
|
| -std::pair<uint64, uint64> Arena::SpaceAllocatedAndUsed() const {
|
| +pair<uint64, uint64> Arena::SpaceAllocatedAndUsed() const {
|
| uint64 allocated = 0;
|
| uint64 used = 0;
|
|
|
| @@ -265,19 +270,9 @@ uint64 Arena::FreeBlocks() {
|
| space_allocated += (b->size);
|
| Block* next = b->next;
|
| if (next != NULL) {
|
| -#ifdef ADDRESS_SANITIZER
|
| - // This memory was provided by the underlying allocator as unpoisoned, so
|
| - // return it in an unpoisoned state.
|
| - ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b), b->size);
|
| -#endif // ADDRESS_SANITIZER
|
| options_.block_dealloc(b, b->size);
|
| } else {
|
| if (owns_first_block_) {
|
| -#ifdef ADDRESS_SANITIZER
|
| - // This memory was provided by the underlying allocator as unpoisoned,
|
| - // so return it in an unpoisoned state.
|
| - ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b), b->size);
|
| -#endif // ADDRESS_SANITIZER
|
| options_.block_dealloc(b, b->size);
|
| } else {
|
| // User passed in the first block, skip free'ing the memory.
|
|
|