| Index: third_party/tcmalloc/chromium/src/base/low_level_alloc.cc
|
| ===================================================================
|
| --- third_party/tcmalloc/chromium/src/base/low_level_alloc.cc (revision 41940)
|
| +++ third_party/tcmalloc/chromium/src/base/low_level_alloc.cc (working copy)
|
| @@ -67,7 +67,7 @@
|
| // first. Valid in both allocated and unallocated blocks
|
| intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
|
| LowLevelAlloc::Arena *arena; // pointer to parent arena
|
| - void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
|
| + void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
|
| } header;
|
|
|
| // Next two fields: in unallocated blocks: freelist skiplist data
|
| @@ -197,57 +197,15 @@
|
| // pointer.
|
| static struct LowLevelAlloc::Arena default_arena;
|
|
|
| -// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
|
| +// A non-malloc-hooked arena: used only to allocate metadata for arenas that
|
| // do not want malloc hook reporting, so that for them there's no malloc hook
|
| // reporting even during arena creation.
|
| static struct LowLevelAlloc::Arena unhooked_arena;
|
| -static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
|
|
|
| // magic numbers to identify allocated and unallocated blocks
|
| static const intptr_t kMagicAllocated = 0x4c833e95;
|
| static const intptr_t kMagicUnallocated = ~kMagicAllocated;
|
|
|
| -namespace {
|
| - class ArenaLock {
|
| - public:
|
| - explicit ArenaLock(LowLevelAlloc::Arena *arena) :
|
| - left_(false), mask_valid_(false), arena_(arena) {
|
| - if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
|
| - // We've decided not to support async-signal-safe arena use until
|
| - // there a demonstrated need. Here's how one could do it though
|
| - // (would need to be made more portable).
|
| -#if 0
|
| - sigset_t all;
|
| - sigfillset(&all);
|
| - this->mask_valid_ =
|
| - (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
|
| -#else
|
| - RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
|
| -#endif
|
| - }
|
| - this->arena_->mu.Lock();
|
| - }
|
| - ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
|
| - void Leave() {
|
| - this->arena_->mu.Unlock();
|
| -#if 0
|
| - if (this->mask_valid_) {
|
| - pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
|
| - }
|
| -#endif
|
| - this->left_ = true;
|
| - }
|
| - private:
|
| - bool left_; // whether left region
|
| - bool mask_valid_;
|
| -#if 0
|
| - sigset_t mask_; // old mask of blocked signals
|
| -#endif
|
| - LowLevelAlloc::Arena *arena_;
|
| - DISALLOW_COPY_AND_ASSIGN(ArenaLock);
|
| - };
|
| -} // anonymous namespace
|
| -
|
| // create an appropriate magic number for an object at "ptr"
|
| // "magic" should be kMagicAllocated or kMagicUnallocated
|
| inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
|
| @@ -277,8 +235,6 @@
|
| // Default arena should be hooked, e.g. for heap-checker to trace
|
| // pointer chains through objects in the default arena.
|
| arena->flags = LowLevelAlloc::kCallMallocHook;
|
| - } else if (arena == &unhooked_async_sig_safe_arena) {
|
| - arena->flags = LowLevelAlloc::kAsyncSignalSafe;
|
| } else {
|
| arena->flags = 0; // other arenas' flags may be overridden by client,
|
| // but unhooked_arena will have 0 in 'flags'.
|
| @@ -290,12 +246,9 @@
|
| LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
|
| Arena *meta_data_arena) {
|
| RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
|
| - if (meta_data_arena == &default_arena) {
|
| - if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
|
| - meta_data_arena = &unhooked_async_sig_safe_arena;
|
| - } else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
|
| - meta_data_arena = &unhooked_arena;
|
| - }
|
| + if (meta_data_arena == &default_arena &&
|
| + (flags & LowLevelAlloc::kCallMallocHook) == 0) {
|
| + meta_data_arena = &unhooked_arena;
|
| }
|
| // Arena(0) uses the constructor for non-static contexts
|
| Arena *result =
|
| @@ -309,9 +262,9 @@
|
| bool LowLevelAlloc::DeleteArena(Arena *arena) {
|
| RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
|
| "may not delete default arena");
|
| - ArenaLock section(arena);
|
| + arena->mu.Lock();
|
| bool empty = (arena->allocation_count == 0);
|
| - section.Leave();
|
| + arena->mu.Unlock();
|
| if (empty) {
|
| while (arena->freelist.next[0] != 0) {
|
| AllocList *region = arena->freelist.next[0];
|
| @@ -326,13 +279,7 @@
|
| "empty arena has non-page-aligned block size");
|
| RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
|
| "empty arena has non-page-aligned block");
|
| - int munmap_result;
|
| - if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
|
| - munmap_result = munmap(region, size);
|
| - } else {
|
| - munmap_result = MallocHook::UnhookedMUnmap(region, size);
|
| - }
|
| - RAW_CHECK(munmap_result == 0,
|
| + RAW_CHECK(munmap(region, size) == 0,
|
| "LowLevelAlloc::DeleteArena: munmap failed address");
|
| }
|
| Free(arena);
|
| @@ -416,21 +363,21 @@
|
| if ((arena->flags & kCallMallocHook) != 0) {
|
| MallocHook::InvokeDeleteHook(v);
|
| }
|
| - ArenaLock section(arena);
|
| + arena->mu.Lock();
|
| AddToFreelist(v, arena);
|
| RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
|
| arena->allocation_count--;
|
| - section.Leave();
|
| + arena->mu.Unlock();
|
| }
|
| }
|
|
|
| // allocates and returns a block of size bytes, to be freed with Free()
|
| // L < arena->mu
|
| -static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
|
| +void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
|
| void *result = 0;
|
| if (request != 0) {
|
| AllocList *s; // will point to region that satisfies request
|
| - ArenaLock section(arena);
|
| + arena->mu.Lock();
|
| ArenaInit(arena);
|
| // round up with header
|
| size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
|
| @@ -452,14 +399,8 @@
|
| // mmap generous 64K chunks to decrease
|
| // the chances/impact of fragmentation:
|
| size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
|
| - void *new_pages;
|
| - if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
|
| - new_pages = MallocHook::UnhookedMMap(0, new_pages_size,
|
| - PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
| - } else {
|
| - new_pages = mmap(0, new_pages_size,
|
| - PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
| - }
|
| + void *new_pages = mmap(0, new_pages_size,
|
| + PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
|
| RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
|
| arena->mu.Lock();
|
| s = reinterpret_cast<AllocList *>(new_pages);
|
| @@ -484,7 +425,7 @@
|
| s->header.magic = Magic(kMagicAllocated, &s->header);
|
| RAW_CHECK(s->header.arena == arena, "");
|
| arena->allocation_count++;
|
| - section.Leave();
|
| + arena->mu.Unlock();
|
| result = &s->levels;
|
| }
|
| ANNOTATE_NEW_MEMORY(result, request);
|
|
|