Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(342)

Side by Side Diff: third_party/tcmalloc/chromium/src/base/low_level_alloc.cc

Issue 576001: Merged third_party/tcmalloc/vendor/src(google-perftools r87) into... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Removed the unnecessary printf and ASSERT(0) Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* Copyright (c) 2006, Google Inc. 1 /* Copyright (c) 2006, Google Inc.
2 * All rights reserved. 2 * All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
60 static const int kMaxLevel = 30; 60 static const int kMaxLevel = 30;
61 61
62 namespace { 62 namespace {
63 // This struct describes one allocated block, or one free block. 63 // This struct describes one allocated block, or one free block.
64 struct AllocList { 64 struct AllocList {
65 struct Header { 65 struct Header {
66 intptr_t size; // size of entire region, including this field. Must be 66 intptr_t size; // size of entire region, including this field. Must be
67 // first. Valid in both allocated and unallocated blocks 67 // first. Valid in both allocated and unallocated blocks
68 intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this 68 intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
69 LowLevelAlloc::Arena *arena; // pointer to parent arena 69 LowLevelAlloc::Arena *arena; // pointer to parent arena
70 void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*) 70 void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
71 } header; 71 } header;
72 72
73 // Next two fields: in unallocated blocks: freelist skiplist data 73 // Next two fields: in unallocated blocks: freelist skiplist data
74 // in allocated blocks: overlaps with client data 74 // in allocated blocks: overlaps with client data
75 int levels; // levels in skiplist used 75 int levels; // levels in skiplist used
76 AllocList *next[kMaxLevel]; // actually has levels elements. 76 AllocList *next[kMaxLevel]; // actually has levels elements.
77 // The AllocList node may not have room for 77 // The AllocList node may not have room for
78 // all kMaxLevel entries. See max_fit in 78 // all kMaxLevel entries. See max_fit in
79 // LLA_SkiplistLevels() 79 // LLA_SkiplistLevels()
80 }; 80 };
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
190 size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList)) 190 size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList))
191 // (init under mu, then ro) 191 // (init under mu, then ro)
192 size_t min_size; // smallest allocation block size 192 size_t min_size; // smallest allocation block size
193 // (init under mu, then ro) 193 // (init under mu, then ro)
194 }; 194 };
195 195
196 // The default arena, which is used when 0 is passed instead of an Arena 196 // The default arena, which is used when 0 is passed instead of an Arena
197 // pointer. 197 // pointer.
198 static struct LowLevelAlloc::Arena default_arena; 198 static struct LowLevelAlloc::Arena default_arena;
199 199
200 // A non-malloc-hooked arena: used only to allocate metadata for arenas that 200 // Non-malloc-hooked arenas: used only to allocate metadata for arenas that
201 // do not want malloc hook reporting, so that for them there's no malloc hook 201 // do not want malloc hook reporting, so that for them there's no malloc hook
202 // reporting even during arena creation. 202 // reporting even during arena creation.
203 static struct LowLevelAlloc::Arena unhooked_arena; 203 static struct LowLevelAlloc::Arena unhooked_arena;
204 static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
204 205
205 // magic numbers to identify allocated and unallocated blocks 206 // magic numbers to identify allocated and unallocated blocks
206 static const intptr_t kMagicAllocated = 0x4c833e95; 207 static const intptr_t kMagicAllocated = 0x4c833e95;
207 static const intptr_t kMagicUnallocated = ~kMagicAllocated; 208 static const intptr_t kMagicUnallocated = ~kMagicAllocated;
208 209
210 namespace {
211 class ArenaLock {
212 public:
213 explicit ArenaLock(LowLevelAlloc::Arena *arena) :
214 left_(false), mask_valid_(false), arena_(arena) {
215 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
216 // We've decided not to support async-signal-safe arena use until
217 // there a demonstrated need. Here's how one could do it though
218 // (would need to be made more portable).
219 #if 0
220 sigset_t all;
221 sigfillset(&all);
222 this->mask_valid_ =
223 (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
224 #else
225 RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
226 #endif
227 }
228 this->arena_->mu.Lock();
229 }
230 ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
231 void Leave() {
232 this->arena_->mu.Unlock();
233 #if 0
234 if (this->mask_valid_) {
235 pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
236 }
237 #endif
238 this->left_ = true;
239 }
240 private:
241 bool left_; // whether left region
242 bool mask_valid_;
243 #if 0
244 sigset_t mask_; // old mask of blocked signals
245 #endif
246 LowLevelAlloc::Arena *arena_;
247 DISALLOW_COPY_AND_ASSIGN(ArenaLock);
248 };
249 } // anonymous namespace
250
209 // create an appropriate magic number for an object at "ptr" 251 // create an appropriate magic number for an object at "ptr"
210 // "magic" should be kMagicAllocated or kMagicUnallocated 252 // "magic" should be kMagicAllocated or kMagicUnallocated
211 inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) { 253 inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
212 return magic ^ reinterpret_cast<intptr_t>(ptr); 254 return magic ^ reinterpret_cast<intptr_t>(ptr);
213 } 255 }
214 256
215 // Initialize the fields of an Arena 257 // Initialize the fields of an Arena
216 static void ArenaInit(LowLevelAlloc::Arena *arena) { 258 static void ArenaInit(LowLevelAlloc::Arena *arena) {
217 if (arena->pagesize == 0) { 259 if (arena->pagesize == 0) {
218 arena->pagesize = getpagesize(); 260 arena->pagesize = getpagesize();
219 // Round up block sizes to a power of two close to the header size. 261 // Round up block sizes to a power of two close to the header size.
220 arena->roundup = 16; 262 arena->roundup = 16;
221 while (arena->roundup < sizeof (arena->freelist.header)) { 263 while (arena->roundup < sizeof (arena->freelist.header)) {
222 arena->roundup += arena->roundup; 264 arena->roundup += arena->roundup;
223 } 265 }
224 // Don't allocate blocks less than twice the roundup size to avoid tiny 266 // Don't allocate blocks less than twice the roundup size to avoid tiny
225 // free blocks. 267 // free blocks.
226 arena->min_size = 2 * arena->roundup; 268 arena->min_size = 2 * arena->roundup;
227 arena->freelist.header.size = 0; 269 arena->freelist.header.size = 0;
228 arena->freelist.header.magic = 270 arena->freelist.header.magic =
229 Magic(kMagicUnallocated, &arena->freelist.header); 271 Magic(kMagicUnallocated, &arena->freelist.header);
230 arena->freelist.header.arena = arena; 272 arena->freelist.header.arena = arena;
231 arena->freelist.levels = 0; 273 arena->freelist.levels = 0;
232 memset(arena->freelist.next, 0, sizeof (arena->freelist.next)); 274 memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
233 arena->allocation_count = 0; 275 arena->allocation_count = 0;
234 if (arena == &default_arena) { 276 if (arena == &default_arena) {
235 // Default arena should be hooked, e.g. for heap-checker to trace 277 // Default arena should be hooked, e.g. for heap-checker to trace
236 // pointer chains through objects in the default arena. 278 // pointer chains through objects in the default arena.
237 arena->flags = LowLevelAlloc::kCallMallocHook; 279 arena->flags = LowLevelAlloc::kCallMallocHook;
280 } else if (arena == &unhooked_async_sig_safe_arena) {
281 arena->flags = LowLevelAlloc::kAsyncSignalSafe;
238 } else { 282 } else {
239 arena->flags = 0; // other arenas' flags may be overridden by client, 283 arena->flags = 0; // other arenas' flags may be overridden by client,
240 // but unhooked_arena will have 0 in 'flags'. 284 // but unhooked_arena will have 0 in 'flags'.
241 } 285 }
242 } 286 }
243 } 287 }
244 288
245 // L < meta_data_arena->mu 289 // L < meta_data_arena->mu
246 LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags, 290 LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
247 Arena *meta_data_arena) { 291 Arena *meta_data_arena) {
248 RAW_CHECK(meta_data_arena != 0, "must pass a valid arena"); 292 RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
249 if (meta_data_arena == &default_arena && 293 if (meta_data_arena == &default_arena) {
250 (flags & LowLevelAlloc::kCallMallocHook) == 0) { 294 if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
251 meta_data_arena = &unhooked_arena; 295 meta_data_arena = &unhooked_async_sig_safe_arena;
296 } else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
297 meta_data_arena = &unhooked_arena;
298 }
252 } 299 }
253 // Arena(0) uses the constructor for non-static contexts 300 // Arena(0) uses the constructor for non-static contexts
254 Arena *result = 301 Arena *result =
255 new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0); 302 new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
256 ArenaInit(result); 303 ArenaInit(result);
257 result->flags = flags; 304 result->flags = flags;
258 return result; 305 return result;
259 } 306 }
260 307
261 // L < arena->mu, L < arena->arena->mu 308 // L < arena->mu, L < arena->arena->mu
262 bool LowLevelAlloc::DeleteArena(Arena *arena) { 309 bool LowLevelAlloc::DeleteArena(Arena *arena) {
263 RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena, 310 RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
264 "may not delete default arena"); 311 "may not delete default arena");
265 arena->mu.Lock(); 312 ArenaLock section(arena);
266 bool empty = (arena->allocation_count == 0); 313 bool empty = (arena->allocation_count == 0);
267 arena->mu.Unlock(); 314 section.Leave();
268 if (empty) { 315 if (empty) {
269 while (arena->freelist.next[0] != 0) { 316 while (arena->freelist.next[0] != 0) {
270 AllocList *region = arena->freelist.next[0]; 317 AllocList *region = arena->freelist.next[0];
271 size_t size = region->header.size; 318 size_t size = region->header.size;
272 arena->freelist.next[0] = region->next[0]; 319 arena->freelist.next[0] = region->next[0];
273 RAW_CHECK(region->header.magic == 320 RAW_CHECK(region->header.magic ==
274 Magic(kMagicUnallocated, &region->header), 321 Magic(kMagicUnallocated, &region->header),
275 "bad magic number in DeleteArena()"); 322 "bad magic number in DeleteArena()");
276 RAW_CHECK(region->header.arena == arena, 323 RAW_CHECK(region->header.arena == arena,
277 "bad arena pointer in DeleteArena()"); 324 "bad arena pointer in DeleteArena()");
278 RAW_CHECK(size % arena->pagesize == 0, 325 RAW_CHECK(size % arena->pagesize == 0,
279 "empty arena has non-page-aligned block size"); 326 "empty arena has non-page-aligned block size");
280 RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0, 327 RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
281 "empty arena has non-page-aligned block"); 328 "empty arena has non-page-aligned block");
282 RAW_CHECK(munmap(region, size) == 0, 329 int munmap_result;
330 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
331 munmap_result = munmap(region, size);
332 } else {
333 munmap_result = MallocHook::UnhookedMUnmap(region, size);
334 }
335 RAW_CHECK(munmap_result == 0,
283 "LowLevelAlloc::DeleteArena: munmap failed address"); 336 "LowLevelAlloc::DeleteArena: munmap failed address");
284 } 337 }
285 Free(arena); 338 Free(arena);
286 } 339 }
287 return empty; 340 return empty;
288 } 341 }
289 342
290 // --------------------------------------------------------------------------- 343 // ---------------------------------------------------------------------------
291 344
292 // Return value rounded up to next multiple of align. 345 // Return value rounded up to next multiple of align.
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
356 void LowLevelAlloc::Free(void *v) { 409 void LowLevelAlloc::Free(void *v) {
357 if (v != 0) { 410 if (v != 0) {
358 AllocList *f = reinterpret_cast<AllocList *>( 411 AllocList *f = reinterpret_cast<AllocList *>(
359 reinterpret_cast<char *>(v) - sizeof (f->header)); 412 reinterpret_cast<char *>(v) - sizeof (f->header));
360 RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), 413 RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
361 "bad magic number in Free()"); 414 "bad magic number in Free()");
362 LowLevelAlloc::Arena *arena = f->header.arena; 415 LowLevelAlloc::Arena *arena = f->header.arena;
363 if ((arena->flags & kCallMallocHook) != 0) { 416 if ((arena->flags & kCallMallocHook) != 0) {
364 MallocHook::InvokeDeleteHook(v); 417 MallocHook::InvokeDeleteHook(v);
365 } 418 }
366 arena->mu.Lock(); 419 ArenaLock section(arena);
367 AddToFreelist(v, arena); 420 AddToFreelist(v, arena);
368 RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); 421 RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
369 arena->allocation_count--; 422 arena->allocation_count--;
370 arena->mu.Unlock(); 423 section.Leave();
371 } 424 }
372 } 425 }
373 426
374 // allocates and returns a block of size bytes, to be freed with Free() 427 // allocates and returns a block of size bytes, to be freed with Free()
375 // L < arena->mu 428 // L < arena->mu
376 void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { 429 static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
377 void *result = 0; 430 void *result = 0;
378 if (request != 0) { 431 if (request != 0) {
379 AllocList *s; // will point to region that satisfies request 432 AllocList *s; // will point to region that satisfies request
380 arena->mu.Lock(); 433 ArenaLock section(arena);
381 ArenaInit(arena); 434 ArenaInit(arena);
382 // round up with header 435 // round up with header
383 size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup); 436 size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
384 for (;;) { // loop until we find a suitable region 437 for (;;) { // loop until we find a suitable region
385 // find the minimum levels that a block of this size must have 438 // find the minimum levels that a block of this size must have
386 int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1; 439 int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
387 if (i < arena->freelist.levels) { // potential blocks exist 440 if (i < arena->freelist.levels) { // potential blocks exist
388 AllocList *before = &arena->freelist; // predecessor of s 441 AllocList *before = &arena->freelist; // predecessor of s
389 while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) { 442 while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
390 before = s; 443 before = s;
391 } 444 }
392 if (s != 0) { // we found a region 445 if (s != 0) { // we found a region
393 break; 446 break;
394 } 447 }
395 } 448 }
396 // we unlock before mmap() both because mmap() may call a callback hook, 449 // we unlock before mmap() both because mmap() may call a callback hook,
397 // and because it may be slow. 450 // and because it may be slow.
398 arena->mu.Unlock(); 451 arena->mu.Unlock();
399 // mmap generous 64K chunks to decrease 452 // mmap generous 64K chunks to decrease
400 // the chances/impact of fragmentation: 453 // the chances/impact of fragmentation:
401 size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); 454 size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
402 void *new_pages = mmap(0, new_pages_size, 455 void *new_pages;
403 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 456 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
457 new_pages = MallocHook::UnhookedMMap(0, new_pages_size,
458 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
459 } else {
460 new_pages = mmap(0, new_pages_size,
461 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
462 }
404 RAW_CHECK(new_pages != MAP_FAILED, "mmap error"); 463 RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
405 arena->mu.Lock(); 464 arena->mu.Lock();
406 s = reinterpret_cast<AllocList *>(new_pages); 465 s = reinterpret_cast<AllocList *>(new_pages);
407 s->header.size = new_pages_size; 466 s->header.size = new_pages_size;
408 // Pretend the block is allocated; call AddToFreelist() to free it. 467 // Pretend the block is allocated; call AddToFreelist() to free it.
409 s->header.magic = Magic(kMagicAllocated, &s->header); 468 s->header.magic = Magic(kMagicAllocated, &s->header);
410 s->header.arena = arena; 469 s->header.arena = arena;
411 AddToFreelist(&s->levels, arena); // insert new region into free list 470 AddToFreelist(&s->levels, arena); // insert new region into free list
412 } 471 }
413 AllocList *prev[kMaxLevel]; 472 AllocList *prev[kMaxLevel];
414 LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list 473 LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
415 // s points to the first free region that's big enough 474 // s points to the first free region that's big enough
416 if (req_rnd + arena->min_size <= s->header.size) { // big enough to split 475 if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
417 AllocList *n = reinterpret_cast<AllocList *> 476 AllocList *n = reinterpret_cast<AllocList *>
418 (req_rnd + reinterpret_cast<char *>(s)); 477 (req_rnd + reinterpret_cast<char *>(s));
419 n->header.size = s->header.size - req_rnd; 478 n->header.size = s->header.size - req_rnd;
420 n->header.magic = Magic(kMagicAllocated, &n->header); 479 n->header.magic = Magic(kMagicAllocated, &n->header);
421 n->header.arena = arena; 480 n->header.arena = arena;
422 s->header.size = req_rnd; 481 s->header.size = req_rnd;
423 AddToFreelist(&n->levels, arena); 482 AddToFreelist(&n->levels, arena);
424 } 483 }
425 s->header.magic = Magic(kMagicAllocated, &s->header); 484 s->header.magic = Magic(kMagicAllocated, &s->header);
426 RAW_CHECK(s->header.arena == arena, ""); 485 RAW_CHECK(s->header.arena == arena, "");
427 arena->allocation_count++; 486 arena->allocation_count++;
428 arena->mu.Unlock(); 487 section.Leave();
429 result = &s->levels; 488 result = &s->levels;
430 } 489 }
431 ANNOTATE_NEW_MEMORY(result, request); 490 ANNOTATE_NEW_MEMORY(result, request);
432 return result; 491 return result;
433 } 492 }
434 493
435 void *LowLevelAlloc::Alloc(size_t request) { 494 void *LowLevelAlloc::Alloc(size_t request) {
436 void *result = DoAllocWithArena(request, &default_arena); 495 void *result = DoAllocWithArena(request, &default_arena);
437 if ((default_arena.flags & kCallMallocHook) != 0) { 496 if ((default_arena.flags & kCallMallocHook) != 0) {
438 // this call must be directly in the user-called allocator function 497 // this call must be directly in the user-called allocator function
(...skipping 10 matching lines...) Expand all
449 // this call must be directly in the user-called allocator function 508 // this call must be directly in the user-called allocator function
450 // for MallocHook::GetCallerStackTrace to work properly 509 // for MallocHook::GetCallerStackTrace to work properly
451 MallocHook::InvokeNewHook(result, request); 510 MallocHook::InvokeNewHook(result, request);
452 } 511 }
453 return result; 512 return result;
454 } 513 }
455 514
456 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { 515 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
457 return &default_arena; 516 return &default_arena;
458 } 517 }
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/base/low_level_alloc.h ('k') | third_party/tcmalloc/chromium/src/base/spinlock.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698