Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(851)

Side by Side Diff: Source/wtf/PartitionAlloc.cpp

Issue 136333002: PartitionAlloc: simplify PartitionPage structure some more. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
92 double wasteRatio = (double) waste / (double) pageSize; 92 double wasteRatio = (double) waste / (double) pageSize;
93 if (wasteRatio < bestWasteRatio) { 93 if (wasteRatio < bestWasteRatio) {
94 bestWasteRatio = wasteRatio; 94 bestWasteRatio = wasteRatio;
95 bestPages = i; 95 bestPages = i;
96 } 96 }
97 } 97 }
98 ASSERT(bestPages > 0); 98 ASSERT(bestPages > 0);
99 return bestPages; 99 return bestPages;
100 } 100 }
101 101
102 static ALWAYS_INLINE void partitionPageMarkFree(PartitionPage* page)
103 {
104 ASSERT(!partitionPageIsFree(page));
105 page->flags |= kPartitionPageFlagFree;
106 }
107
108 static void parititonAllocBaseInit(PartitionRootBase* root) 102 static void parititonAllocBaseInit(PartitionRootBase* root)
109 { 103 {
110 ASSERT(!root->initialized); 104 ASSERT(!root->initialized);
111 105
112 spinLockLock(&PartitionRootBase::gInitializedLock); 106 spinLockLock(&PartitionRootBase::gInitializedLock);
113 if (!PartitionRootBase::gInitialized) { 107 if (!PartitionRootBase::gInitialized) {
114 PartitionRootBase::gInitialized = true; 108 PartitionRootBase::gInitialized = true;
115 // We mark the seed page as free to make sure it is skipped by our 109 // We mark the seed page as free to make sure it is skipped by our
116 // logic to find a new active page. 110 // logic to find a new active page.
117 partitionPageMarkFree(&PartitionRootBase::gSeedPage);
118 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric: :gSeedPage; 111 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric: :gSeedPage;
119 } 112 }
120 spinLockUnlock(&PartitionRootBase::gInitializedLock); 113 spinLockUnlock(&PartitionRootBase::gInitializedLock);
121 114
122 root->initialized = true; 115 root->initialized = true;
123 root->totalSizeOfSuperPages = 0; 116 root->totalSizeOfSuperPages = 0;
124 root->nextSuperPage = 0; 117 root->nextSuperPage = 0;
125 root->nextPartitionPage = 0; 118 root->nextPartitionPage = 0;
126 root->nextPartitionPageEnd = 0; 119 root->nextPartitionPageEnd = 0;
127 root->firstExtent = 0; 120 root->firstExtent = 0;
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
236 // which tries to overflow to a non-existant order. 229 // which tries to overflow to a non-existant order.
237 *bucketPtr = &PartitionRootGeneric::gPagedBucket; 230 *bucketPtr = &PartitionRootGeneric::gPagedBucket;
238 } 231 }
239 232
240 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) 233 static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
241 { 234 {
242 // Failure here indicates a memory leak. 235 // Failure here indicates a memory leak.
243 bool noLeaks = !bucket->numFullPages; 236 bool noLeaks = !bucket->numFullPages;
244 PartitionPage* page = bucket->activePagesHead; 237 PartitionPage* page = bucket->activePagesHead;
245 while (page) { 238 while (page) {
246 if (!partitionPageIsFree(page) && page->numAllocatedSlots) 239 if (page->numAllocatedSlots)
247 noLeaks = false; 240 noLeaks = false;
248 page = page->activePageNext; 241 page = page->next;
249 } 242 }
250 243
251 return noLeaks; 244 return noLeaks;
252 } 245 }
253 246
254 static void partitionAllocBaseShutdown(PartitionRootBase* root) 247 static void partitionAllocBaseShutdown(PartitionRootBase* root)
255 { 248 {
256 ASSERT(root->initialized); 249 ASSERT(root->initialized);
257 root->initialized = false; 250 root->initialized = false;
258 251
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
386 } 379 }
387 380
388 static ALWAYS_INLINE size_t partitionBucketPartitionPages(const PartitionBucket* bucket) 381 static ALWAYS_INLINE size_t partitionBucketPartitionPages(const PartitionBucket* bucket)
389 { 382 {
390 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage; 383 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage;
391 } 384 }
392 385
393 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucke t* bucket) 386 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucke t* bucket)
394 { 387 {
395 ASSERT(page != &PartitionRootGeneric::gSeedPage); 388 ASSERT(page != &PartitionRootGeneric::gSeedPage);
396 page->flags = 0;
397 page->numAllocatedSlots = 0; 389 page->numAllocatedSlots = 0;
398 page->numUnprovisionedSlots = partitionBucketSlots(bucket); 390 page->numUnprovisionedSlots = partitionBucketSlots(bucket);
399 ASSERT(page->numUnprovisionedSlots); 391 ASSERT(page->numUnprovisionedSlots);
400 page->bucket = bucket; 392 page->bucket = bucket;
393 page->next = 0;
401 // NULLing the freelist is not strictly necessary but it makes an ASSERT in partitionPageFillFreelist simpler. 394 // NULLing the freelist is not strictly necessary but it makes an ASSERT in partitionPageFillFreelist simpler.
402 partitionPageSetFreelistHead(page, 0); 395 page->freelistHead = 0;
403 page->pageOffset = 0; 396 page->pageOffset = 0;
404 size_t numPartitionPages = partitionBucketPartitionPages(bucket); 397 size_t numPartitionPages = partitionBucketPartitionPages(bucket);
405 size_t i; 398 size_t i;
406 char* pageCharPtr = reinterpret_cast<char*>(page); 399 char* pageCharPtr = reinterpret_cast<char*>(page);
407 for (i = 1; i < numPartitionPages; ++i) { 400 for (i = 1; i < numPartitionPages; ++i) {
408 pageCharPtr += kPageMetadataSize; 401 pageCharPtr += kPageMetadataSize;
409 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar Ptr); 402 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar Ptr);
410 secondaryPage->pageOffset = i; 403 secondaryPage->pageOffset = i;
411 } 404 }
412 } 405 }
413 406
414 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page ) 407 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page )
415 { 408 {
416 ASSERT(page != &PartitionRootGeneric::gSeedPage); 409 ASSERT(page != &PartitionRootGeneric::gSeedPage);
417 size_t numSlots = page->numUnprovisionedSlots; 410 size_t numSlots = page->numUnprovisionedSlots;
418 ASSERT(numSlots); 411 ASSERT(numSlots);
419 PartitionBucket* bucket = page->bucket; 412 PartitionBucket* bucket = page->bucket;
420 // We should only get here when _every_ slot is either used or unprovisioned . 413 // We should only get here when _every_ slot is either used or unprovisioned .
421 // (The third state is "on the freelist". If we have a non-empty freelist, w e should not get here.) 414 // (The third state is "on the freelist". If we have a non-empty freelist, w e should not get here.)
422 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); 415 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket));
423 // Similarly, make explicitly sure that the freelist is empty. 416 // Similarly, make explicitly sure that the freelist is empty.
424 ASSERT(!partitionPageFreelistHead(page)); 417 ASSERT(!page->freelistHead);
418 ASSERT(page->numAllocatedSlots >= 0);
425 419
426 size_t size = bucket->slotSize; 420 size_t size = bucket->slotSize;
427 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); 421 char* base = reinterpret_cast<char*>(partitionPageToPointer(page));
428 char* returnObject = base + (size * page->numAllocatedSlots); 422 char* returnObject = base + (size * page->numAllocatedSlots);
429 char* firstFreelistPointer = returnObject + size; 423 char* firstFreelistPointer = returnObject + size;
430 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr eelistEntry*); 424 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr eelistEntry*);
431 // Our goal is to fault as few system pages as possible. We calculate the 425 // Our goal is to fault as few system pages as possible. We calculate the
432 // page containing the "end" of the returned slot, and then allow freelist 426 // page containing the "end" of the returned slot, and then allow freelist
433 // pointers to be written up to the end of that page. 427 // pointers to be written up to the end of that page.
434 char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(fi rstFreelistPointer) + kSystemPageOffsetMask) & kSystemPageBaseMask); 428 char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(fi rstFreelistPointer) + kSystemPageOffsetMask) & kSystemPageBaseMask);
(...skipping 16 matching lines...) Expand all
451 // We always return an object slot -- that's the +1 below. 445 // We always return an object slot -- that's the +1 below.
452 // We do not neccessarily create any new freelist entries, because we cross sub page boundaries frequently for large bucket sizes. 446 // We do not neccessarily create any new freelist entries, because we cross sub page boundaries frequently for large bucket sizes.
453 ASSERT(numNewFreelistEntries + 1 <= numSlots); 447 ASSERT(numNewFreelistEntries + 1 <= numSlots);
454 numSlots -= (numNewFreelistEntries + 1); 448 numSlots -= (numNewFreelistEntries + 1);
455 page->numUnprovisionedSlots = numSlots; 449 page->numUnprovisionedSlots = numSlots;
456 page->numAllocatedSlots++; 450 page->numAllocatedSlots++;
457 451
458 if (LIKELY(numNewFreelistEntries)) { 452 if (LIKELY(numNewFreelistEntries)) {
459 char* freelistPointer = firstFreelistPointer; 453 char* freelistPointer = firstFreelistPointer;
460 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry* >(freelistPointer); 454 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry* >(freelistPointer);
461 partitionPageSetFreelistHead(page, entry); 455 page->freelistHead = entry;
462 while (--numNewFreelistEntries) { 456 while (--numNewFreelistEntries) {
463 freelistPointer += size; 457 freelistPointer += size;
464 PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreeli stEntry*>(freelistPointer); 458 PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreeli stEntry*>(freelistPointer);
465 entry->next = partitionFreelistMask(nextEntry); 459 entry->next = partitionFreelistMask(nextEntry);
466 entry = nextEntry; 460 entry = nextEntry;
467 } 461 }
468 entry->next = partitionFreelistMask(0); 462 entry->next = partitionFreelistMask(0);
469 } else { 463 } else {
470 partitionPageSetFreelistHead(page, 0); 464 page->freelistHead = 0;
471 } 465 }
472 return returnObject; 466 return returnObject;
473 } 467 }
474 468
475 // This helper function scans the active page list for a suitable new active 469 // This helper function scans the active page list for a suitable new active
476 // page, starting at the current active page, and optionally skipping the 470 // page, starting at the current active page, and optionally skipping the
477 // current active page as a candidate. 471 // current active page as a candidate.
478 // When it finds a suitable new active page (one that has free slots), it is 472 // When it finds a suitable new active page (one that has free slots), it is
479 // set as the new active page and true is returned. If there is no suitable new 473 // set as the new active page and true is returned. If there is no suitable new
480 // active page, false is returned. 474 // active page, false is returned.
481 static ALWAYS_INLINE bool partitionSetNewActivePage(PartitionBucket* bucket, boo l currentPageOk) 475 static ALWAYS_INLINE bool partitionSetNewActivePage(PartitionBucket* bucket, boo l currentPageOk)
482 { 476 {
483 PartitionPage* page = bucket->activePagesHead; 477 PartitionPage* page = bucket->activePagesHead;
478 if (page == &PartitionRootBase::gSeedPage) {
479 ASSERT(!page->next);
480 return false;
481 }
482
484 if (!currentPageOk) 483 if (!currentPageOk)
485 page = page->activePageNext; 484 page = page->next;
486 485
487 for (; page; page = page->activePageNext) { 486 for (; page; page = page->next) {
488 ASSERT(page->bucket == bucket->activePagesHead->bucket); 487 ASSERT(page->bucket == bucket->activePagesHead->bucket);
489 488
490 // Skip over free pages. We need this check first so that we can trust
491 // that the page union field really containts a freelist pointer.
492 if (UNLIKELY(partitionPageIsFree(page)))
493 continue;
494
495 // Page is usable if it has something on the freelist, or unprovisioned 489 // Page is usable if it has something on the freelist, or unprovisioned
496 // slots that can be turned into a freelist. 490 // slots that can be turned into a freelist.
497 if (LIKELY(partitionPageFreelistHead(page) != 0) || LIKELY(page->numUnpr ovisionedSlots)) { 491 if (LIKELY(page->freelistHead != 0) || LIKELY(page->numUnprovisionedSlot s)) {
498 bucket->activePagesHead = page; 492 bucket->activePagesHead = page;
499 return true; 493 return true;
500 } 494 }
501 // If we get here, we found a full page. Skip over it too, and also tag 495
502 // it as full (via a negative value). We need it tagged so that free'ing 496 ASSERT(page->numAllocatedSlots >= 0);
503 // can tell, and move it back into the active page list. 497 if (LIKELY(page->numAllocatedSlots == 0)) {
504 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots( bucket))); 498 // We hit a free page, so shepherd it on to the free page list.
505 page->numAllocatedSlots = -page->numAllocatedSlots; 499 page->next = bucket->freePagesHead;
506 ++bucket->numFullPages; 500 bucket->freePagesHead = page;
501 } else {
502 // If we get here, we found a full page. Skip over it too, and also
503 // tag it as full (via a negative value). We need it tagged so that
504 // free'ing can tell, and move it back into the active page list.
505 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSl ots(bucket)));
506 page->numAllocatedSlots = -page->numAllocatedSlots;
507 ++bucket->numFullPages;
508 }
507 } 509 }
508 510
509 bucket->activePagesHead->activePageNext = 0; 511 bucket->activePagesHead->next = 0;
510 return false; 512 return false;
511 } 513 }
512 514
513 static ALWAYS_INLINE void partitionPageSetFreePageNext(PartitionPage* page, Part itionPage* nextFree)
514 {
515 ASSERT(partitionPageIsFree(page));
516 page->u.freePageNext = nextFree;
517 }
518
519 static ALWAYS_INLINE PartitionPage* partitionPageFreePageNext(PartitionPage* pag e)
520 {
521 ASSERT(partitionPageIsFree(page));
522 return page->u.freePageNext;
523 }
524
525 static ALWAYS_INLINE bool partitionBucketIsPaged(PartitionBucket* bucket) 515 static ALWAYS_INLINE bool partitionBucketIsPaged(PartitionBucket* bucket)
526 { 516 {
527 return !bucket->slotSize; 517 return !bucket->slotSize;
528 } 518 }
529 519
530 void* partitionAllocSlowPath(PartitionRootBase* root, size_t size, PartitionBuck et* bucket) 520 void* partitionAllocSlowPath(PartitionRootBase* root, size_t size, PartitionBuck et* bucket)
531 { 521 {
532 // The slow path is called when the freelist is empty. 522 // The slow path is called when the freelist is empty.
533 ASSERT(!partitionPageFreelistHead(bucket->activePagesHead)); 523 ASSERT(!bucket->activePagesHead->freelistHead);
534 524
535 // For the partitionAllocGeneric API, we have a bunch of buckets marked 525 // For the partitionAllocGeneric API, we have a bunch of buckets marked
536 // as special cases. We bounce them through to the slow path so that we 526 // as special cases. We bounce them through to the slow path so that we
537 // can still have a blazing fast hot path due to lack of corner-case 527 // can still have a blazing fast hot path due to lack of corner-case
538 // branches. 528 // branches.
539 if (UNLIKELY(partitionBucketIsPaged(bucket))) { 529 if (UNLIKELY(partitionBucketIsPaged(bucket))) {
540 RELEASE_ASSERT(size <= INT_MAX); 530 RELEASE_ASSERT(size <= INT_MAX);
541 ASSERT(size > kGenericMaxBucketed); 531 ASSERT(size > kGenericMaxBucketed);
542 // This will be going away real soon now. 532 // This will be going away real soon now.
543 return fastMalloc(size); 533 return fastMalloc(size);
544 } 534 }
545 535
546 // First, look for a usable page in the existing active pages list. 536 // First, look for a usable page in the existing active pages list.
547 // Change active page, accepting the current page as a candidate. 537 // Change active page, accepting the current page as a candidate.
548 if (LIKELY(partitionSetNewActivePage(bucket, true))) { 538 if (LIKELY(partitionSetNewActivePage(bucket, true))) {
549 PartitionPage* newPage = bucket->activePagesHead; 539 PartitionPage* newPage = bucket->activePagesHead;
550 if (LIKELY(partitionPageFreelistHead(newPage) != 0)) { 540 if (LIKELY(newPage->freelistHead != 0)) {
551 PartitionFreelistEntry* ret = partitionPageFreelistHead(newPage); 541 PartitionFreelistEntry* ret = newPage->freelistHead;
552 partitionPageSetFreelistHead(newPage, partitionFreelistMask(ret->nex t)); 542 newPage->freelistHead = partitionFreelistMask(ret->next);
553 newPage->numAllocatedSlots++; 543 newPage->numAllocatedSlots++;
554 return ret; 544 return ret;
555 } 545 }
556 ASSERT(newPage->numUnprovisionedSlots); 546 ASSERT(newPage->numUnprovisionedSlots);
557 return partitionPageAllocAndFillFreelist(newPage); 547 return partitionPageAllocAndFillFreelist(newPage);
558 } 548 }
559 549
560 // Second, look in our list of freed but reserved pages. 550 // Second, look in our list of freed but reserved pages.
561 PartitionPage* newPage = bucket->freePagesHead; 551 PartitionPage* newPage = bucket->freePagesHead;
562 if (LIKELY(newPage != 0)) { 552 if (LIKELY(newPage != 0)) {
563 ASSERT(newPage != &PartitionRootGeneric::gSeedPage); 553 ASSERT(newPage != &PartitionRootGeneric::gSeedPage);
564 bucket->freePagesHead = partitionPageFreePageNext(newPage); 554 ASSERT(!newPage->freelistHead);
555 ASSERT(!newPage->numAllocatedSlots);
556 ASSERT(!newPage->numUnprovisionedSlots);
557 bucket->freePagesHead = newPage->next;
565 } else { 558 } else {
566 // Third. If we get here, we need a brand new page. 559 // Third. If we get here, we need a brand new page.
567 size_t numPartitionPages = partitionBucketPartitionPages(bucket); 560 size_t numPartitionPages = partitionBucketPartitionPages(bucket);
568 void* rawNewPage = partitionAllocPartitionPages(root, numPartitionPages) ; 561 void* rawNewPage = partitionAllocPartitionPages(root, numPartitionPages) ;
569 // Skip the alignment check because it depends on page->bucket, which is not yet set. 562 // Skip the alignment check because it depends on page->bucket, which is not yet set.
570 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage); 563 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage);
571 } 564 }
572 565
573 newPage->activePageNext = 0;
574 partitionPageReset(newPage, bucket); 566 partitionPageReset(newPage, bucket);
575 bucket->activePagesHead = newPage; 567 bucket->activePagesHead = newPage;
576 return partitionPageAllocAndFillFreelist(newPage); 568 return partitionPageAllocAndFillFreelist(newPage);
577 } 569 }
578 570
579 static ALWAYS_INLINE void partitionPutPageOnFreelist(PartitionPage* page, Partit ionBucket* bucket) 571 static ALWAYS_INLINE void partitionFreePage(PartitionPage* page)
580 { 572 {
573 ASSERT(page->freelistHead);
574 ASSERT(!page->numAllocatedSlots);
581 partitionUnusePage(page); 575 partitionUnusePage(page);
582 // We actually leave the freed page in the active list as well as 576 // We actually leave the freed page in the active list. We'll sweep it on
583 // putting it on the free list. We'll evict it from the active list soon 577 // to the free page list when we next walk the active page list. Pulling
584 // enough in partitionAllocSlowPath. Pulling this trick enables us to 578 // this trick enables us to use a singly-linked page list for all cases,
585 // use a singly-linked page list for all cases, which is critical in 579 // which is critical in keeping the page metadata structure down to 32
586 // keeping the page metadata structure down to 32-bytes in size. 580 // bytes in size.
587 partitionPageMarkFree(page); 581 page->freelistHead = 0;
588 partitionPageSetFreePageNext(page, bucket->freePagesHead); 582 page->numUnprovisionedSlots = 0;
589 bucket->freePagesHead = page;
590 } 583 }
591 584
592 void partitionFreeSlowPath(PartitionPage* page) 585 void partitionFreeSlowPath(PartitionPage* page)
593 { 586 {
594 PartitionBucket* bucket = page->bucket; 587 PartitionBucket* bucket = page->bucket;
595 ASSERT(page != &PartitionRootGeneric::gSeedPage); 588 ASSERT(page != &PartitionRootGeneric::gSeedPage);
596 ASSERT(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage); 589 ASSERT(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage);
597 if (LIKELY(page->numAllocatedSlots == 0)) { 590 if (LIKELY(page->numAllocatedSlots == 0)) {
598 // Page became fully unused. 591 // Page became fully unused.
599 // If it's the current page, change it! 592 // If it's the current page, change it!
600 if (LIKELY(page == bucket->activePagesHead)) { 593 if (LIKELY(page == bucket->activePagesHead)) {
601 // Change active page, rejecting the current page as a candidate. 594 // Change active page, rejecting the current page as a candidate.
602 if (!partitionSetNewActivePage(bucket, false)) { 595 if (!partitionSetNewActivePage(bucket, false)) {
603 // We do not free the last active page in a bucket. 596 // We do not free the last active page in a bucket.
604 // To do so is a serious performance issue as we can get into 597 // To do so is a serious performance issue as we can get into
605 // a repeating state change between 0 and 1 objects of a given 598 // a repeating state change between 0 and 1 objects of a given
606 // size allocated; and each state change incurs either a system 599 // size allocated; and each state change incurs either a system
607 // call or a page fault, or more. 600 // call or a page fault, or more.
608 ASSERT(!page->activePageNext); 601 ASSERT(!page->next);
609 return; 602 return;
610 } 603 }
604 // Link the to-be-freed page back in after the new current page, to
605 // avoid losing a reference to it.
606 PartitionPage* currentPage = bucket->activePagesHead;
607 page->next = currentPage->next;
608 currentPage->next = page;
611 } 609 }
612 partitionPutPageOnFreelist(page, bucket); 610 partitionFreePage(page);
613 } else { 611 } else {
614 // Ensure that the page is full. That's the only valid case if we 612 // Ensure that the page is full. That's the only valid case if we
615 // arrive here. 613 // arrive here.
616 ASSERT(page->numAllocatedSlots < 0); 614 ASSERT(page->numAllocatedSlots < 0);
617 page->numAllocatedSlots = -page->numAllocatedSlots - 2; 615 page->numAllocatedSlots = -page->numAllocatedSlots - 2;
618 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots( bucket) - 1)); 616 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots( bucket) - 1));
619 // Fully used page became partially used. It must be put back on the 617 // Fully used page became partially used. It must be put back on the
620 // non-full page list. Also make it the current page to increase the 618 // non-full page list. Also make it the current page to increase the
621 // chances of it being filled up again. The old current page will be 619 // chances of it being filled up again. The old current page will be
622 // the next page. 620 // the next page.
623 page->activePageNext = bucket->activePagesHead; 621 page->next = bucket->activePagesHead;
624 bucket->activePagesHead = page; 622 bucket->activePagesHead = page;
625 --bucket->numFullPages; 623 --bucket->numFullPages;
626 // Special case: for a partition page with just a single slot, it may 624 // Special case: for a partition page with just a single slot, it may
627 // now be empty and we want to run it through the empty logic. 625 // now be empty and we want to run it through the empty logic.
628 if (UNLIKELY(page->numAllocatedSlots == 0)) 626 if (UNLIKELY(page->numAllocatedSlots == 0))
629 partitionFreeSlowPath(page); 627 partitionFreeSlowPath(page);
630 // Note: there's an opportunity here to look to see if the old active 628 // Note: there's an opportunity here to look to see if the old active
631 // page is now freeable. 629 // page is now freeable.
632 } 630 }
633 } 631 }
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
724 for (i = 0; i < root.numBuckets; ++i) { 722 for (i = 0; i < root.numBuckets; ++i) {
725 const PartitionBucket& bucket = root.buckets()[i]; 723 const PartitionBucket& bucket = root.buckets()[i];
726 if (bucket.activePagesHead == &PartitionRootGeneric::gSeedPage && !bucke t.freePagesHead && !bucket.numFullPages) { 724 if (bucket.activePagesHead == &PartitionRootGeneric::gSeedPage && !bucke t.freePagesHead && !bucket.numFullPages) {
727 // Empty bucket with no freelist or full pages. Skip reporting it. 725 // Empty bucket with no freelist or full pages. Skip reporting it.
728 continue; 726 continue;
729 } 727 }
730 size_t numFreePages = 0; 728 size_t numFreePages = 0;
731 PartitionPage* freePages = bucket.freePagesHead; 729 PartitionPage* freePages = bucket.freePagesHead;
732 while (freePages) { 730 while (freePages) {
733 ++numFreePages; 731 ++numFreePages;
734 freePages = partitionPageFreePageNext(freePages); 732 freePages = freePages->next;
735 } 733 }
736 size_t bucketSlotSize = bucket.slotSize; 734 size_t bucketSlotSize = bucket.slotSize;
737 size_t bucketNumSlots = partitionBucketSlots(&bucket); 735 size_t bucketNumSlots = partitionBucketSlots(&bucket);
738 size_t bucketUsefulStorage = bucketSlotSize * bucketNumSlots; 736 size_t bucketUsefulStorage = bucketSlotSize * bucketNumSlots;
739 size_t bucketPageSize = bucket.numSystemPagesPerSlotSpan * kSystemPageSi ze; 737 size_t bucketPageSize = bucket.numSystemPagesPerSlotSpan * kSystemPageSi ze;
740 size_t bucketWaste = bucketPageSize - bucketUsefulStorage; 738 size_t bucketWaste = bucketPageSize - bucketUsefulStorage;
741 size_t numActiveBytes = bucket.numFullPages * bucketUsefulStorage; 739 size_t numActiveBytes = bucket.numFullPages * bucketUsefulStorage;
742 size_t numResidentBytes = bucket.numFullPages * bucketPageSize; 740 size_t numResidentBytes = bucket.numFullPages * bucketPageSize;
743 size_t numFreeableBytes = 0; 741 size_t numFreeableBytes = 0;
744 size_t numActivePages = 0; 742 size_t numActivePages = 0;
745 const PartitionPage* page = bucket.activePagesHead; 743 const PartitionPage* page = bucket.activePagesHead;
746 do { 744 do {
747 if (page != &PartitionRootGeneric::gSeedPage) { 745 if (page != &PartitionRootGeneric::gSeedPage) {
748 ++numActivePages; 746 ++numActivePages;
749 numActiveBytes += (page->numAllocatedSlots * bucketSlotSize); 747 numActiveBytes += (page->numAllocatedSlots * bucketSlotSize);
750 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisio nedSlots) * bucketSlotSize; 748 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisio nedSlots) * bucketSlotSize;
751 // Round up to system page size. 749 // Round up to system page size.
752 pageBytesResident = (pageBytesResident + kSystemPageOffsetMask) & kSystemPageBaseMask; 750 pageBytesResident = (pageBytesResident + kSystemPageOffsetMask) & kSystemPageBaseMask;
753 numResidentBytes += pageBytesResident; 751 numResidentBytes += pageBytesResident;
754 if (!page->numAllocatedSlots) 752 if (!page->numAllocatedSlots)
755 numFreeableBytes += pageBytesResident; 753 numFreeableBytes += pageBytesResident;
756 } 754 }
757 page = page->activePageNext; 755 page = page->next;
758 } while (page != bucket.activePagesHead); 756 } while (page != bucket.activePagesHead);
759 totalLive += numActiveBytes; 757 totalLive += numActiveBytes;
760 totalResident += numResidentBytes; 758 totalResident += numResidentBytes;
761 totalFreeable += numFreeableBytes; 759 totalFreeable += numFreeableBytes;
762 printf("bucket size %zu (pageSize %zu waste %zu): %zu alloc/%zu commit/% zu freeable bytes, %zu/%zu/%zu full/active/free pages\n", bucketSlotSize, bucket PageSize, bucketWaste, numActiveBytes, numResidentBytes, numFreeableBytes, stati c_cast<size_t>(bucket.numFullPages), numActivePages, numFreePages); 760 printf("bucket size %zu (pageSize %zu waste %zu): %zu alloc/%zu commit/% zu freeable bytes, %zu/%zu/%zu full/active/free pages\n", bucketSlotSize, bucket PageSize, bucketWaste, numActiveBytes, numResidentBytes, numFreeableBytes, stati c_cast<size_t>(bucket.numFullPages), numActivePages, numFreePages);
763 } 761 }
764 printf("total live: %zu bytes\n", totalLive); 762 printf("total live: %zu bytes\n", totalLive);
765 printf("total resident: %zu bytes\n", totalResident); 763 printf("total resident: %zu bytes\n", totalResident);
766 printf("total freeable: %zu bytes\n", totalFreeable); 764 printf("total freeable: %zu bytes\n", totalFreeable);
767 fflush(stdout); 765 fflush(stdout);
768 } 766 }
769 767
770 #endif // !NDEBUG 768 #endif // !NDEBUG
771 769
772 } // namespace WTF 770 } // namespace WTF
773 771
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698