Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(90)

Side by Side Diff: third_party/tcmalloc/chromium/src/base/low_level_alloc.cc

Issue 7050034: Merge google-perftools r109 (the current contents of third_party/tcmalloc/vendor) (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* Copyright (c) 2006, Google Inc. 1 /* Copyright (c) 2006, Google Inc.
2 * All rights reserved. 2 * All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
52 // form of the name instead. 52 // form of the name instead.
53 #ifndef MAP_ANONYMOUS 53 #ifndef MAP_ANONYMOUS
54 # define MAP_ANONYMOUS MAP_ANON 54 # define MAP_ANONYMOUS MAP_ANON
55 #endif 55 #endif
56 56
57 // A first-fit allocator with amortized logarithmic free() time. 57 // A first-fit allocator with amortized logarithmic free() time.
58 58
59 // --------------------------------------------------------------------------- 59 // ---------------------------------------------------------------------------
60 static const int kMaxLevel = 30; 60 static const int kMaxLevel = 30;
61 61
62 namespace { 62 // We put this class-only struct in a namespace to avoid polluting the
63 // global namespace with this struct name (thus risking an ODR violation).
64 namespace low_level_alloc_internal {
63 // This struct describes one allocated block, or one free block. 65 // This struct describes one allocated block, or one free block.
64 struct AllocList { 66 struct AllocList {
65 struct Header { 67 struct Header {
66 intptr_t size; // size of entire region, including this field. Must be 68 intptr_t size; // size of entire region, including this field. Must be
67 // first. Valid in both allocated and unallocated blocks 69 // first. Valid in both allocated and unallocated blocks
68 intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this 70 intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
69 LowLevelAlloc::Arena *arena; // pointer to parent arena 71 LowLevelAlloc::Arena *arena; // pointer to parent arena
70 void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*) 72 void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
71 } header; 73 } header;
72 74
73 // Next two fields: in unallocated blocks: freelist skiplist data 75 // Next two fields: in unallocated blocks: freelist skiplist data
74 // in allocated blocks: overlaps with client data 76 // in allocated blocks: overlaps with client data
75 int levels; // levels in skiplist used 77 int levels; // levels in skiplist used
76 AllocList *next[kMaxLevel]; // actually has levels elements. 78 AllocList *next[kMaxLevel]; // actually has levels elements.
77 // The AllocList node may not have room for 79 // The AllocList node may not have room for
78 // all kMaxLevel entries. See max_fit in 80 // all kMaxLevel entries. See max_fit in
79 // LLA_SkiplistLevels() 81 // LLA_SkiplistLevels()
80 }; 82 };
81 } 83 }
84 using low_level_alloc_internal::AllocList;
85
82 86
83 // --------------------------------------------------------------------------- 87 // ---------------------------------------------------------------------------
84 // A trivial skiplist implementation. This is used to keep the freelist 88 // A trivial skiplist implementation. This is used to keep the freelist
85 // in address order while taking only logarithmic time per insert and delete. 89 // in address order while taking only logarithmic time per insert and delete.
86 90
87 // An integer approximation of log2(size/base) 91 // An integer approximation of log2(size/base)
88 // Requires size >= base. 92 // Requires size >= base.
89 static int IntLog2(size_t size, size_t base) { 93 static int IntLog2(size_t size, size_t base) {
90 int result = 0; 94 int result = 0;
91 for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) 95 for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
201 // do not want malloc hook reporting, so that for them there's no malloc hook 205 // do not want malloc hook reporting, so that for them there's no malloc hook
202 // reporting even during arena creation. 206 // reporting even during arena creation.
203 static struct LowLevelAlloc::Arena unhooked_arena; 207 static struct LowLevelAlloc::Arena unhooked_arena;
204 static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena; 208 static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
205 209
206 // magic numbers to identify allocated and unallocated blocks 210 // magic numbers to identify allocated and unallocated blocks
207 static const intptr_t kMagicAllocated = 0x4c833e95; 211 static const intptr_t kMagicAllocated = 0x4c833e95;
208 static const intptr_t kMagicUnallocated = ~kMagicAllocated; 212 static const intptr_t kMagicUnallocated = ~kMagicAllocated;
209 213
210 namespace { 214 namespace {
211 class ArenaLock { 215 class SCOPED_LOCKABLE ArenaLock {
212 public: 216 public:
213 explicit ArenaLock(LowLevelAlloc::Arena *arena) 217 explicit ArenaLock(LowLevelAlloc::Arena *arena)
214 EXCLUSIVE_LOCK_FUNCTION(arena->mu) 218 EXCLUSIVE_LOCK_FUNCTION(arena->mu)
215 : left_(false), mask_valid_(false), arena_(arena) { 219 : left_(false), mask_valid_(false), arena_(arena) {
216 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { 220 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
217 // We've decided not to support async-signal-safe arena use until 221 // We've decided not to support async-signal-safe arena use until
218 // there a demonstrated need. Here's how one could do it though 222 // there a demonstrated need. Here's how one could do it though
219 // (would need to be made more portable). 223 // (would need to be made more portable).
220 #if 0 224 #if 0
221 sigset_t all; 225 sigset_t all;
222 sigfillset(&all); 226 sigfillset(&all);
223 this->mask_valid_ = 227 this->mask_valid_ =
224 (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0); 228 (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
225 #else 229 #else
226 RAW_CHECK(false, "We do not yet support async-signal-safe arena."); 230 RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
227 #endif 231 #endif
228 } 232 }
229 this->arena_->mu.Lock(); 233 this->arena_->mu.Lock();
230 } 234 }
231 ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); } 235 ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
232 void Leave() UNLOCK_FUNCTION(arena_->mu) { 236 void Leave() UNLOCK_FUNCTION() {
233 this->arena_->mu.Unlock(); 237 this->arena_->mu.Unlock();
234 #if 0 238 #if 0
235 if (this->mask_valid_) { 239 if (this->mask_valid_) {
236 pthread_sigmask(SIG_SETMASK, &this->mask_, 0); 240 pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
237 } 241 }
238 #endif 242 #endif
239 this->left_ = true; 243 this->left_ = true;
240 } 244 }
241 private: 245 private:
242 bool left_; // whether left region 246 bool left_; // whether left region
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after
509 // this call must be directly in the user-called allocator function 513 // this call must be directly in the user-called allocator function
510 // for MallocHook::GetCallerStackTrace to work properly 514 // for MallocHook::GetCallerStackTrace to work properly
511 MallocHook::InvokeNewHook(result, request); 515 MallocHook::InvokeNewHook(result, request);
512 } 516 }
513 return result; 517 return result;
514 } 518 }
515 519
516 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { 520 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
517 return &default_arena; 521 return &default_arena;
518 } 522 }
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/base/logging.h ('k') | third_party/tcmalloc/chromium/src/base/spinlock.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698