Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(56)

Side by Side Diff: third_party/tcmalloc/chromium/src/base/low_level_alloc.cc

Issue 1076002: Revert 41938 - Merged third_party/tcmalloc/vendor/src(googleperftools r87) in... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* Copyright (c) 2006, Google Inc. 1 /* Copyright (c) 2006, Google Inc.
2 * All rights reserved. 2 * All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
60 static const int kMaxLevel = 30; 60 static const int kMaxLevel = 30;
61 61
62 namespace { 62 namespace {
63 // This struct describes one allocated block, or one free block. 63 // This struct describes one allocated block, or one free block.
64 struct AllocList { 64 struct AllocList {
65 struct Header { 65 struct Header {
66 intptr_t size; // size of entire region, including this field. Must be 66 intptr_t size; // size of entire region, including this field. Must be
67 // first. Valid in both allocated and unallocated blocks 67 // first. Valid in both allocated and unallocated blocks
68 intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this 68 intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
69 LowLevelAlloc::Arena *arena; // pointer to parent arena 69 LowLevelAlloc::Arena *arena; // pointer to parent arena
70 void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*) 70 void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
71 } header; 71 } header;
72 72
73 // Next two fields: in unallocated blocks: freelist skiplist data 73 // Next two fields: in unallocated blocks: freelist skiplist data
74 // in allocated blocks: overlaps with client data 74 // in allocated blocks: overlaps with client data
75 int levels; // levels in skiplist used 75 int levels; // levels in skiplist used
76 AllocList *next[kMaxLevel]; // actually has levels elements. 76 AllocList *next[kMaxLevel]; // actually has levels elements.
77 // The AllocList node may not have room for 77 // The AllocList node may not have room for
78 // all kMaxLevel entries. See max_fit in 78 // all kMaxLevel entries. See max_fit in
79 // LLA_SkiplistLevels() 79 // LLA_SkiplistLevels()
80 }; 80 };
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
190 size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList)) 190 size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList))
191 // (init under mu, then ro) 191 // (init under mu, then ro)
192 size_t min_size; // smallest allocation block size 192 size_t min_size; // smallest allocation block size
193 // (init under mu, then ro) 193 // (init under mu, then ro)
194 }; 194 };
195 195
196 // The default arena, which is used when 0 is passed instead of an Arena 196 // The default arena, which is used when 0 is passed instead of an Arena
197 // pointer. 197 // pointer.
198 static struct LowLevelAlloc::Arena default_arena; 198 static struct LowLevelAlloc::Arena default_arena;
199 199
200 // Non-malloc-hooked arenas: used only to allocate metadata for arenas that 200 // A non-malloc-hooked arena: used only to allocate metadata for arenas that
201 // do not want malloc hook reporting, so that for them there's no malloc hook 201 // do not want malloc hook reporting, so that for them there's no malloc hook
202 // reporting even during arena creation. 202 // reporting even during arena creation.
203 static struct LowLevelAlloc::Arena unhooked_arena; 203 static struct LowLevelAlloc::Arena unhooked_arena;
204 static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
205 204
206 // magic numbers to identify allocated and unallocated blocks 205 // magic numbers to identify allocated and unallocated blocks
207 static const intptr_t kMagicAllocated = 0x4c833e95; 206 static const intptr_t kMagicAllocated = 0x4c833e95;
208 static const intptr_t kMagicUnallocated = ~kMagicAllocated; 207 static const intptr_t kMagicUnallocated = ~kMagicAllocated;
209 208
210 namespace {
211 class ArenaLock {
212 public:
213 explicit ArenaLock(LowLevelAlloc::Arena *arena) :
214 left_(false), mask_valid_(false), arena_(arena) {
215 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
216 // We've decided not to support async-signal-safe arena use until
217 // there a demonstrated need. Here's how one could do it though
218 // (would need to be made more portable).
219 #if 0
220 sigset_t all;
221 sigfillset(&all);
222 this->mask_valid_ =
223 (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
224 #else
225 RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
226 #endif
227 }
228 this->arena_->mu.Lock();
229 }
230 ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
231 void Leave() {
232 this->arena_->mu.Unlock();
233 #if 0
234 if (this->mask_valid_) {
235 pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
236 }
237 #endif
238 this->left_ = true;
239 }
240 private:
241 bool left_; // whether left region
242 bool mask_valid_;
243 #if 0
244 sigset_t mask_; // old mask of blocked signals
245 #endif
246 LowLevelAlloc::Arena *arena_;
247 DISALLOW_COPY_AND_ASSIGN(ArenaLock);
248 };
249 } // anonymous namespace
250
251 // create an appropriate magic number for an object at "ptr" 209 // create an appropriate magic number for an object at "ptr"
252 // "magic" should be kMagicAllocated or kMagicUnallocated 210 // "magic" should be kMagicAllocated or kMagicUnallocated
253 inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) { 211 inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
254 return magic ^ reinterpret_cast<intptr_t>(ptr); 212 return magic ^ reinterpret_cast<intptr_t>(ptr);
255 } 213 }
256 214
257 // Initialize the fields of an Arena 215 // Initialize the fields of an Arena
258 static void ArenaInit(LowLevelAlloc::Arena *arena) { 216 static void ArenaInit(LowLevelAlloc::Arena *arena) {
259 if (arena->pagesize == 0) { 217 if (arena->pagesize == 0) {
260 arena->pagesize = getpagesize(); 218 arena->pagesize = getpagesize();
261 // Round up block sizes to a power of two close to the header size. 219 // Round up block sizes to a power of two close to the header size.
262 arena->roundup = 16; 220 arena->roundup = 16;
263 while (arena->roundup < sizeof (arena->freelist.header)) { 221 while (arena->roundup < sizeof (arena->freelist.header)) {
264 arena->roundup += arena->roundup; 222 arena->roundup += arena->roundup;
265 } 223 }
266 // Don't allocate blocks less than twice the roundup size to avoid tiny 224 // Don't allocate blocks less than twice the roundup size to avoid tiny
267 // free blocks. 225 // free blocks.
268 arena->min_size = 2 * arena->roundup; 226 arena->min_size = 2 * arena->roundup;
269 arena->freelist.header.size = 0; 227 arena->freelist.header.size = 0;
270 arena->freelist.header.magic = 228 arena->freelist.header.magic =
271 Magic(kMagicUnallocated, &arena->freelist.header); 229 Magic(kMagicUnallocated, &arena->freelist.header);
272 arena->freelist.header.arena = arena; 230 arena->freelist.header.arena = arena;
273 arena->freelist.levels = 0; 231 arena->freelist.levels = 0;
274 memset(arena->freelist.next, 0, sizeof (arena->freelist.next)); 232 memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
275 arena->allocation_count = 0; 233 arena->allocation_count = 0;
276 if (arena == &default_arena) { 234 if (arena == &default_arena) {
277 // Default arena should be hooked, e.g. for heap-checker to trace 235 // Default arena should be hooked, e.g. for heap-checker to trace
278 // pointer chains through objects in the default arena. 236 // pointer chains through objects in the default arena.
279 arena->flags = LowLevelAlloc::kCallMallocHook; 237 arena->flags = LowLevelAlloc::kCallMallocHook;
280 } else if (arena == &unhooked_async_sig_safe_arena) {
281 arena->flags = LowLevelAlloc::kAsyncSignalSafe;
282 } else { 238 } else {
283 arena->flags = 0; // other arenas' flags may be overridden by client, 239 arena->flags = 0; // other arenas' flags may be overridden by client,
284 // but unhooked_arena will have 0 in 'flags'. 240 // but unhooked_arena will have 0 in 'flags'.
285 } 241 }
286 } 242 }
287 } 243 }
288 244
289 // L < meta_data_arena->mu 245 // L < meta_data_arena->mu
290 LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags, 246 LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
291 Arena *meta_data_arena) { 247 Arena *meta_data_arena) {
292 RAW_CHECK(meta_data_arena != 0, "must pass a valid arena"); 248 RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
293 if (meta_data_arena == &default_arena) { 249 if (meta_data_arena == &default_arena &&
294 if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { 250 (flags & LowLevelAlloc::kCallMallocHook) == 0) {
295 meta_data_arena = &unhooked_async_sig_safe_arena; 251 meta_data_arena = &unhooked_arena;
296 } else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
297 meta_data_arena = &unhooked_arena;
298 }
299 } 252 }
300 // Arena(0) uses the constructor for non-static contexts 253 // Arena(0) uses the constructor for non-static contexts
301 Arena *result = 254 Arena *result =
302 new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0); 255 new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
303 ArenaInit(result); 256 ArenaInit(result);
304 result->flags = flags; 257 result->flags = flags;
305 return result; 258 return result;
306 } 259 }
307 260
308 // L < arena->mu, L < arena->arena->mu 261 // L < arena->mu, L < arena->arena->mu
309 bool LowLevelAlloc::DeleteArena(Arena *arena) { 262 bool LowLevelAlloc::DeleteArena(Arena *arena) {
310 RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena, 263 RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
311 "may not delete default arena"); 264 "may not delete default arena");
312 ArenaLock section(arena); 265 arena->mu.Lock();
313 bool empty = (arena->allocation_count == 0); 266 bool empty = (arena->allocation_count == 0);
314 section.Leave(); 267 arena->mu.Unlock();
315 if (empty) { 268 if (empty) {
316 while (arena->freelist.next[0] != 0) { 269 while (arena->freelist.next[0] != 0) {
317 AllocList *region = arena->freelist.next[0]; 270 AllocList *region = arena->freelist.next[0];
318 size_t size = region->header.size; 271 size_t size = region->header.size;
319 arena->freelist.next[0] = region->next[0]; 272 arena->freelist.next[0] = region->next[0];
320 RAW_CHECK(region->header.magic == 273 RAW_CHECK(region->header.magic ==
321 Magic(kMagicUnallocated, &region->header), 274 Magic(kMagicUnallocated, &region->header),
322 "bad magic number in DeleteArena()"); 275 "bad magic number in DeleteArena()");
323 RAW_CHECK(region->header.arena == arena, 276 RAW_CHECK(region->header.arena == arena,
324 "bad arena pointer in DeleteArena()"); 277 "bad arena pointer in DeleteArena()");
325 RAW_CHECK(size % arena->pagesize == 0, 278 RAW_CHECK(size % arena->pagesize == 0,
326 "empty arena has non-page-aligned block size"); 279 "empty arena has non-page-aligned block size");
327 RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0, 280 RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
328 "empty arena has non-page-aligned block"); 281 "empty arena has non-page-aligned block");
329 int munmap_result; 282 RAW_CHECK(munmap(region, size) == 0,
330 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
331 munmap_result = munmap(region, size);
332 } else {
333 munmap_result = MallocHook::UnhookedMUnmap(region, size);
334 }
335 RAW_CHECK(munmap_result == 0,
336 "LowLevelAlloc::DeleteArena: munmap failed address"); 283 "LowLevelAlloc::DeleteArena: munmap failed address");
337 } 284 }
338 Free(arena); 285 Free(arena);
339 } 286 }
340 return empty; 287 return empty;
341 } 288 }
342 289
343 // --------------------------------------------------------------------------- 290 // ---------------------------------------------------------------------------
344 291
345 // Return value rounded up to next multiple of align. 292 // Return value rounded up to next multiple of align.
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
409 void LowLevelAlloc::Free(void *v) { 356 void LowLevelAlloc::Free(void *v) {
410 if (v != 0) { 357 if (v != 0) {
411 AllocList *f = reinterpret_cast<AllocList *>( 358 AllocList *f = reinterpret_cast<AllocList *>(
412 reinterpret_cast<char *>(v) - sizeof (f->header)); 359 reinterpret_cast<char *>(v) - sizeof (f->header));
413 RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), 360 RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
414 "bad magic number in Free()"); 361 "bad magic number in Free()");
415 LowLevelAlloc::Arena *arena = f->header.arena; 362 LowLevelAlloc::Arena *arena = f->header.arena;
416 if ((arena->flags & kCallMallocHook) != 0) { 363 if ((arena->flags & kCallMallocHook) != 0) {
417 MallocHook::InvokeDeleteHook(v); 364 MallocHook::InvokeDeleteHook(v);
418 } 365 }
419 ArenaLock section(arena); 366 arena->mu.Lock();
420 AddToFreelist(v, arena); 367 AddToFreelist(v, arena);
421 RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); 368 RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
422 arena->allocation_count--; 369 arena->allocation_count--;
423 section.Leave(); 370 arena->mu.Unlock();
424 } 371 }
425 } 372 }
426 373
427 // allocates and returns a block of size bytes, to be freed with Free() 374 // allocates and returns a block of size bytes, to be freed with Free()
428 // L < arena->mu 375 // L < arena->mu
429 static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { 376 void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
430 void *result = 0; 377 void *result = 0;
431 if (request != 0) { 378 if (request != 0) {
432 AllocList *s; // will point to region that satisfies request 379 AllocList *s; // will point to region that satisfies request
433 ArenaLock section(arena); 380 arena->mu.Lock();
434 ArenaInit(arena); 381 ArenaInit(arena);
435 // round up with header 382 // round up with header
436 size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup); 383 size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
437 for (;;) { // loop until we find a suitable region 384 for (;;) { // loop until we find a suitable region
438 // find the minimum levels that a block of this size must have 385 // find the minimum levels that a block of this size must have
439 int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1; 386 int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
440 if (i < arena->freelist.levels) { // potential blocks exist 387 if (i < arena->freelist.levels) { // potential blocks exist
441 AllocList *before = &arena->freelist; // predecessor of s 388 AllocList *before = &arena->freelist; // predecessor of s
442 while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) { 389 while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
443 before = s; 390 before = s;
444 } 391 }
445 if (s != 0) { // we found a region 392 if (s != 0) { // we found a region
446 break; 393 break;
447 } 394 }
448 } 395 }
449 // we unlock before mmap() both because mmap() may call a callback hook, 396 // we unlock before mmap() both because mmap() may call a callback hook,
450 // and because it may be slow. 397 // and because it may be slow.
451 arena->mu.Unlock(); 398 arena->mu.Unlock();
452 // mmap generous 64K chunks to decrease 399 // mmap generous 64K chunks to decrease
453 // the chances/impact of fragmentation: 400 // the chances/impact of fragmentation:
454 size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); 401 size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
455 void *new_pages; 402 void *new_pages = mmap(0, new_pages_size,
456 if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { 403 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
457 new_pages = MallocHook::UnhookedMMap(0, new_pages_size,
458 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
459 } else {
460 new_pages = mmap(0, new_pages_size,
461 PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
462 }
463 RAW_CHECK(new_pages != MAP_FAILED, "mmap error"); 404 RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
464 arena->mu.Lock(); 405 arena->mu.Lock();
465 s = reinterpret_cast<AllocList *>(new_pages); 406 s = reinterpret_cast<AllocList *>(new_pages);
466 s->header.size = new_pages_size; 407 s->header.size = new_pages_size;
467 // Pretend the block is allocated; call AddToFreelist() to free it. 408 // Pretend the block is allocated; call AddToFreelist() to free it.
468 s->header.magic = Magic(kMagicAllocated, &s->header); 409 s->header.magic = Magic(kMagicAllocated, &s->header);
469 s->header.arena = arena; 410 s->header.arena = arena;
470 AddToFreelist(&s->levels, arena); // insert new region into free list 411 AddToFreelist(&s->levels, arena); // insert new region into free list
471 } 412 }
472 AllocList *prev[kMaxLevel]; 413 AllocList *prev[kMaxLevel];
473 LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list 414 LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
474 // s points to the first free region that's big enough 415 // s points to the first free region that's big enough
475 if (req_rnd + arena->min_size <= s->header.size) { // big enough to split 416 if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
476 AllocList *n = reinterpret_cast<AllocList *> 417 AllocList *n = reinterpret_cast<AllocList *>
477 (req_rnd + reinterpret_cast<char *>(s)); 418 (req_rnd + reinterpret_cast<char *>(s));
478 n->header.size = s->header.size - req_rnd; 419 n->header.size = s->header.size - req_rnd;
479 n->header.magic = Magic(kMagicAllocated, &n->header); 420 n->header.magic = Magic(kMagicAllocated, &n->header);
480 n->header.arena = arena; 421 n->header.arena = arena;
481 s->header.size = req_rnd; 422 s->header.size = req_rnd;
482 AddToFreelist(&n->levels, arena); 423 AddToFreelist(&n->levels, arena);
483 } 424 }
484 s->header.magic = Magic(kMagicAllocated, &s->header); 425 s->header.magic = Magic(kMagicAllocated, &s->header);
485 RAW_CHECK(s->header.arena == arena, ""); 426 RAW_CHECK(s->header.arena == arena, "");
486 arena->allocation_count++; 427 arena->allocation_count++;
487 section.Leave(); 428 arena->mu.Unlock();
488 result = &s->levels; 429 result = &s->levels;
489 } 430 }
490 ANNOTATE_NEW_MEMORY(result, request); 431 ANNOTATE_NEW_MEMORY(result, request);
491 return result; 432 return result;
492 } 433 }
493 434
494 void *LowLevelAlloc::Alloc(size_t request) { 435 void *LowLevelAlloc::Alloc(size_t request) {
495 void *result = DoAllocWithArena(request, &default_arena); 436 void *result = DoAllocWithArena(request, &default_arena);
496 if ((default_arena.flags & kCallMallocHook) != 0) { 437 if ((default_arena.flags & kCallMallocHook) != 0) {
497 // this call must be directly in the user-called allocator function 438 // this call must be directly in the user-called allocator function
(...skipping 10 matching lines...) Expand all
508 // this call must be directly in the user-called allocator function 449 // this call must be directly in the user-called allocator function
509 // for MallocHook::GetCallerStackTrace to work properly 450 // for MallocHook::GetCallerStackTrace to work properly
510 MallocHook::InvokeNewHook(result, request); 451 MallocHook::InvokeNewHook(result, request);
511 } 452 }
512 return result; 453 return result;
513 } 454 }
514 455
515 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { 456 LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
516 return &default_arena; 457 return &default_arena;
517 } 458 }
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/base/low_level_alloc.h ('k') | third_party/tcmalloc/chromium/src/base/spinlock.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698