Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Unified Diff: third_party/tcmalloc/chromium/src/base/low_level_alloc.cc

Issue 576001: Merged third_party/tcmalloc/vendor/src(google-perftools r87) into... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Removed the unnecessary printf and ASSERT(0) Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/tcmalloc/chromium/src/base/low_level_alloc.cc
===================================================================
--- third_party/tcmalloc/chromium/src/base/low_level_alloc.cc (revision 41942)
+++ third_party/tcmalloc/chromium/src/base/low_level_alloc.cc (working copy)
@@ -67,7 +67,7 @@
// first. Valid in both allocated and unallocated blocks
intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
LowLevelAlloc::Arena *arena; // pointer to parent arena
- void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
+ void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
} header;
// Next two fields: in unallocated blocks: freelist skiplist data
@@ -197,15 +197,57 @@
// pointer.
static struct LowLevelAlloc::Arena default_arena;
-// A non-malloc-hooked arena: used only to allocate metadata for arenas that
+// Non-malloc-hooked arenas: used only to allocate metadata for arenas that
// do not want malloc hook reporting, so that for them there's no malloc hook
// reporting even during arena creation.
static struct LowLevelAlloc::Arena unhooked_arena;
+static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
// magic numbers to identify allocated and unallocated blocks
static const intptr_t kMagicAllocated = 0x4c833e95;
static const intptr_t kMagicUnallocated = ~kMagicAllocated;
+namespace {
+ class ArenaLock {
+ public:
+ explicit ArenaLock(LowLevelAlloc::Arena *arena) :
+ left_(false), mask_valid_(false), arena_(arena) {
+ if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+ // We've decided not to support async-signal-safe arena use until
+ // there a demonstrated need. Here's how one could do it though
+ // (would need to be made more portable).
+#if 0
+ sigset_t all;
+ sigfillset(&all);
+ this->mask_valid_ =
+ (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
+#else
+ RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
+#endif
+ }
+ this->arena_->mu.Lock();
+ }
+ ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
+ void Leave() {
+ this->arena_->mu.Unlock();
+#if 0
+ if (this->mask_valid_) {
+ pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
+ }
+#endif
+ this->left_ = true;
+ }
+ private:
+ bool left_; // whether left region
+ bool mask_valid_;
+#if 0
+ sigset_t mask_; // old mask of blocked signals
+#endif
+ LowLevelAlloc::Arena *arena_;
+ DISALLOW_COPY_AND_ASSIGN(ArenaLock);
+ };
+} // anonymous namespace
+
// create an appropriate magic number for an object at "ptr"
// "magic" should be kMagicAllocated or kMagicUnallocated
inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
@@ -235,6 +277,8 @@
// Default arena should be hooked, e.g. for heap-checker to trace
// pointer chains through objects in the default arena.
arena->flags = LowLevelAlloc::kCallMallocHook;
+ } else if (arena == &unhooked_async_sig_safe_arena) {
+ arena->flags = LowLevelAlloc::kAsyncSignalSafe;
} else {
arena->flags = 0; // other arenas' flags may be overridden by client,
// but unhooked_arena will have 0 in 'flags'.
@@ -246,9 +290,12 @@
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
Arena *meta_data_arena) {
RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
- if (meta_data_arena == &default_arena &&
- (flags & LowLevelAlloc::kCallMallocHook) == 0) {
- meta_data_arena = &unhooked_arena;
+ if (meta_data_arena == &default_arena) {
+ if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+ meta_data_arena = &unhooked_async_sig_safe_arena;
+ } else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
+ meta_data_arena = &unhooked_arena;
+ }
}
// Arena(0) uses the constructor for non-static contexts
Arena *result =
@@ -262,9 +309,9 @@
bool LowLevelAlloc::DeleteArena(Arena *arena) {
RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
"may not delete default arena");
- arena->mu.Lock();
+ ArenaLock section(arena);
bool empty = (arena->allocation_count == 0);
- arena->mu.Unlock();
+ section.Leave();
if (empty) {
while (arena->freelist.next[0] != 0) {
AllocList *region = arena->freelist.next[0];
@@ -279,7 +326,13 @@
"empty arena has non-page-aligned block size");
RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
"empty arena has non-page-aligned block");
- RAW_CHECK(munmap(region, size) == 0,
+ int munmap_result;
+ if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
+ munmap_result = munmap(region, size);
+ } else {
+ munmap_result = MallocHook::UnhookedMUnmap(region, size);
+ }
+ RAW_CHECK(munmap_result == 0,
"LowLevelAlloc::DeleteArena: munmap failed address");
}
Free(arena);
@@ -363,21 +416,21 @@
if ((arena->flags & kCallMallocHook) != 0) {
MallocHook::InvokeDeleteHook(v);
}
- arena->mu.Lock();
+ ArenaLock section(arena);
AddToFreelist(v, arena);
RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
arena->allocation_count--;
- arena->mu.Unlock();
+ section.Leave();
}
}
// allocates and returns a block of size bytes, to be freed with Free()
// L < arena->mu
-void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
+static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
void *result = 0;
if (request != 0) {
AllocList *s; // will point to region that satisfies request
- arena->mu.Lock();
+ ArenaLock section(arena);
ArenaInit(arena);
// round up with header
size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
@@ -399,8 +452,14 @@
// mmap generous 64K chunks to decrease
// the chances/impact of fragmentation:
size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
- void *new_pages = mmap(0, new_pages_size,
- PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ void *new_pages;
+ if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+ new_pages = MallocHook::UnhookedMMap(0, new_pages_size,
+ PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ } else {
+ new_pages = mmap(0, new_pages_size,
+ PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ }
RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
arena->mu.Lock();
s = reinterpret_cast<AllocList *>(new_pages);
@@ -425,7 +484,7 @@
s->header.magic = Magic(kMagicAllocated, &s->header);
RAW_CHECK(s->header.arena == arena, "");
arena->allocation_count++;
- arena->mu.Unlock();
+ section.Leave();
result = &s->levels;
}
ANNOTATE_NEW_MEMORY(result, request);
« no previous file with comments | « third_party/tcmalloc/chromium/src/base/low_level_alloc.h ('k') | third_party/tcmalloc/chromium/src/base/spinlock.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698