Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 462 visitStack(visitor); | 462 visitStack(visitor); |
| 463 visitPersistents(visitor); | 463 visitPersistents(visitor); |
| 464 } | 464 } |
| 465 | 465 |
| 466 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) | 466 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) |
| 467 { | 467 { |
| 468 // If thread is cleaning up ignore conservative pointers. | 468 // If thread is cleaning up ignore conservative pointers. |
| 469 if (m_isCleaningUp) | 469 if (m_isCleaningUp) |
| 470 return false; | 470 return false; |
| 471 | 471 |
| 472 if (Heap::notInHeap(address)) | |
| 473 return false; | |
| 474 | |
| 472 BaseHeapPage* page = heapPageFromAddress(address); | 475 BaseHeapPage* page = heapPageFromAddress(address); |
|
wibling-chromium
2014/05/08 07:43:41
NIT: Perhaps add a comment that this will check bo
Erik Corry
2014/05/08 09:26:08
Done.
| |
| 473 if (page) | 476 if (page) { |
| 474 return page->checkAndMarkPointer(visitor, address); | 477 page->checkAndMarkPointer(visitor, address); |
| 475 // Not in heap pages, check large objects | 478 return true; |
| 476 for (int i = 0; i < NumberOfHeaps; i++) { | |
| 477 if (m_heaps[i]->checkAndMarkLargeHeapObject(visitor, address)) | |
| 478 return true; | |
| 479 } | 479 } |
| 480 | |
| 481 Heap::addressIsNotInHeap(address); | |
| 480 return false; | 482 return false; |
| 481 } | 483 } |
| 482 | 484 |
| 483 #if ENABLE(GC_TRACING) | 485 #if ENABLE(GC_TRACING) |
| 484 const GCInfo* ThreadState::findGCInfo(Address address) | 486 const GCInfo* ThreadState::findGCInfo(Address address) |
| 485 { | 487 { |
| 486 BaseHeapPage* page = heapPageFromAddress(address); | 488 BaseHeapPage* page = heapPageFromAddress(address); |
| 487 if (page) { | 489 if (page) { |
| 488 return page->findGCInfo(address); | 490 return page->findGCInfo(address); |
| 489 } | 491 } |
| 490 | |
| 491 // Not in heap pages, check large objects | |
| 492 for (int i = 0; i < NumberOfHeaps; i++) { | |
| 493 if (const GCInfo* info = m_heaps[i]->findGCInfoOfLargeHeapObject(address )) | |
| 494 return info; | |
| 495 } | |
| 496 return 0; | 492 return 0; |
| 497 } | 493 } |
| 498 #endif | 494 #endif |
| 499 | 495 |
| 500 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) | 496 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) |
| 501 { | 497 { |
| 502 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); | 498 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); |
| 503 *slot = CallbackStack::Item(object, callback); | 499 *slot = CallbackStack::Item(object, callback); |
| 504 } | 500 } |
| 505 | 501 |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 519 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 515 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 520 return mutex; | 516 return mutex; |
| 521 } | 517 } |
| 522 | 518 |
| 523 // Trigger garbage collection on a 50% increase in size, but not for | 519 // Trigger garbage collection on a 50% increase in size, but not for |
| 524 // less than 2 pages. | 520 // less than 2 pages. |
| 525 static bool increasedEnoughToGC(size_t newSize, size_t oldSize) | 521 static bool increasedEnoughToGC(size_t newSize, size_t oldSize) |
| 526 { | 522 { |
| 527 if (newSize < 2 * blinkPagePayloadSize()) | 523 if (newSize < 2 * blinkPagePayloadSize()) |
| 528 return false; | 524 return false; |
| 525 if (newSize < (1 << 20)) | |
|
haraken
2014/05/08 05:44:58
What is this condition for?
Mads Ager (chromium)
2014/05/08 06:52:37
Could we do tuning of the GC heuristics separately
Erik Corry
2014/05/08 09:26:08
Done.
Erik Corry
2014/05/08 09:26:08
Heuristic. Removed for now.
| |
| 526 return false; | |
| 529 return newSize > oldSize + (oldSize >> 1); | 527 return newSize > oldSize + (oldSize >> 1); |
| 530 } | 528 } |
| 531 | 529 |
| 532 // FIXME: The heuristics are local for a thread at this | 530 // FIXME: The heuristics are local for a thread at this |
| 533 // point. Consider using heuristics that take memory for all threads | 531 // point. Consider using heuristics that take memory for all threads |
| 534 // into account. | 532 // into account. |
| 535 bool ThreadState::shouldGC() | 533 bool ThreadState::shouldGC() |
| 536 { | 534 { |
| 537 // Do not GC during sweeping. We allow allocation during | 535 // Do not GC during sweeping. We allow allocation during |
| 538 // finalization, but those allocations are not allowed | 536 // finalization, but those allocations are not allowed |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 647 // This happens if a thread did not have time to wake up and sweep, | 645 // This happens if a thread did not have time to wake up and sweep, |
| 648 // before the next GC arrived. | 646 // before the next GC arrived. |
| 649 if (sweepRequested()) | 647 if (sweepRequested()) |
| 650 heap->clearMarks(); | 648 heap->clearMarks(); |
| 651 } | 649 } |
| 652 setSweepRequested(); | 650 setSweepRequested(); |
| 653 } | 651 } |
| 654 | 652 |
| 655 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) | 653 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) |
| 656 { | 654 { |
| 657 BaseHeapPage* page; | 655 BaseHeapPage* page = heapContainsCache()->lookup(address); |
| 658 bool found = heapContainsCache()->lookup(address, &page); | 656 if (page) |
| 659 if (found) | |
| 660 return page; | 657 return page; |
| 661 | 658 |
| 662 for (int i = 0; i < NumberOfHeaps; i++) { | 659 for (int i = 0; i < NumberOfHeaps; i++) { |
| 663 page = m_heaps[i]->heapPageFromAddress(address); | 660 page = m_heaps[i]->heapPageFromAddress(address); |
| 664 #ifndef NDEBUG | 661 #ifndef NDEBUG |
| 665 Address blinkPageAddr = roundToBlinkPageStart(address); | 662 Address blinkPageAddr = roundToBlinkPageStart(address); |
| 666 #endif | 663 #endif |
| 667 ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr)); | 664 ASSERT(!page || page->isLargeObject() || page == m_heaps[i]->heapPageFro mAddress(blinkPageAddr)); |
|
haraken
2014/05/08 05:44:58
heapPageFromAddress(roundToBlinkPageStart(address)
Erik Corry
2014/05/08 09:26:08
Done.
| |
| 668 ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr + blinkPage Size - 1)); | 665 ASSERT(!page || page->isLargeObject() || page == m_heaps[i]->heapPageFro mAddress(blinkPageAddr + blinkPageSize - 1)); |
|
haraken
2014/05/08 05:44:58
heapPageFromAddress(roundToBlinkPageEnd(address))
Erik Corry
2014/05/08 09:26:08
Done.
| |
| 669 if (page) | 666 if (page) { |
|
haraken
2014/05/08 05:44:58
You can add the two ASSERTs into this if branch. T
Erik Corry
2014/05/08 09:26:08
Done.
| |
| 670 break; | 667 heapContainsCache()->addEntry(address, page); |
| 671 } | |
| 672 heapContainsCache()->addEntry(address, page); | |
| 673 return page; // 0 if not found. | |
| 674 } | |
| 675 | |
| 676 BaseHeapPage* ThreadState::contains(Address address) | |
| 677 { | |
| 678 // Check heap contains cache first. | |
| 679 BaseHeapPage* page = heapPageFromAddress(address); | |
| 680 if (page) | |
| 681 return page; | |
| 682 // If no heap page was found check large objects. | |
| 683 for (int i = 0; i < NumberOfHeaps; i++) { | |
| 684 page = m_heaps[i]->largeHeapObjectFromAddress(address); | |
| 685 if (page) | |
| 686 return page; | 668 return page; |
| 669 } | |
| 687 } | 670 } |
| 688 return 0; | 671 return 0; |
| 689 } | 672 } |
| 690 | 673 |
| 691 void ThreadState::getStats(HeapStats& stats) | 674 void ThreadState::getStats(HeapStats& stats) |
| 692 { | 675 { |
| 693 stats = m_stats; | 676 stats = m_stats; |
| 694 #ifndef NDEBUG | 677 #ifndef NDEBUG |
| 695 if (isConsistentForGC()) { | 678 if (isConsistentForGC()) { |
| 696 HeapStats scannedStats; | 679 HeapStats scannedStats; |
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 850 state->safePoint(HeapPointersOnStack); | 833 state->safePoint(HeapPointersOnStack); |
| 851 } | 834 } |
| 852 | 835 |
| 853 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() | 836 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() |
| 854 { | 837 { |
| 855 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); | 838 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); |
| 856 return threads; | 839 return threads; |
| 857 } | 840 } |
| 858 | 841 |
| 859 } | 842 } |
| OLD | NEW |