OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 472 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
483 visitStack(visitor); | 483 visitStack(visitor); |
484 visitPersistents(visitor); | 484 visitPersistents(visitor); |
485 } | 485 } |
486 | 486 |
487 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) | 487 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) |
488 { | 488 { |
489 // If thread is cleaning up ignore conservative pointers. | 489 // If thread is cleaning up ignore conservative pointers. |
490 if (m_isCleaningUp) | 490 if (m_isCleaningUp) |
491 return false; | 491 return false; |
492 | 492 |
| 493 // This checks for normal pages and for large objects which span the extent |
| 494 // of several normal pages. |
493 BaseHeapPage* page = heapPageFromAddress(address); | 495 BaseHeapPage* page = heapPageFromAddress(address); |
494 if (page) | 496 if (page) { |
495 return page->checkAndMarkPointer(visitor, address); | 497 page->checkAndMarkPointer(visitor, address); |
496 // Not in heap pages, check large objects | 498 // Whether or not the pointer was within an object it was certainly |
497 for (int i = 0; i < NumberOfHeaps; i++) { | 499 // within a page that is part of the heap, so we don't want to ask the |
498 if (m_heaps[i]->checkAndMarkLargeHeapObject(visitor, address)) | 500 // other other heaps or put this address in the |
499 return true; | 501 // HeapDoesNotContainCache. |
| 502 return true; |
500 } | 503 } |
| 504 |
501 return false; | 505 return false; |
502 } | 506 } |
503 | 507 |
504 #if ENABLE(GC_TRACING) | 508 #if ENABLE(GC_TRACING) |
505 const GCInfo* ThreadState::findGCInfo(Address address) | 509 const GCInfo* ThreadState::findGCInfo(Address address) |
506 { | 510 { |
507 BaseHeapPage* page = heapPageFromAddress(address); | 511 BaseHeapPage* page = heapPageFromAddress(address); |
508 if (page) { | 512 if (page) { |
509 return page->findGCInfo(address); | 513 return page->findGCInfo(address); |
510 } | 514 } |
511 | |
512 // Not in heap pages, check large objects | |
513 for (int i = 0; i < NumberOfHeaps; i++) { | |
514 if (const GCInfo* info = m_heaps[i]->findGCInfoOfLargeHeapObject(address
)) | |
515 return info; | |
516 } | |
517 return 0; | 515 return 0; |
518 } | 516 } |
519 #endif | 517 #endif |
520 | 518 |
521 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac
k callback) | 519 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac
k callback) |
522 { | 520 { |
523 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba
ckStack); | 521 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba
ckStack); |
524 *slot = CallbackStack::Item(object, callback); | 522 *slot = CallbackStack::Item(object, callback); |
525 } | 523 } |
526 | 524 |
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
668 // This happens if a thread did not have time to wake up and sweep, | 666 // This happens if a thread did not have time to wake up and sweep, |
669 // before the next GC arrived. | 667 // before the next GC arrived. |
670 if (sweepRequested()) | 668 if (sweepRequested()) |
671 heap->clearMarks(); | 669 heap->clearMarks(); |
672 } | 670 } |
673 setSweepRequested(); | 671 setSweepRequested(); |
674 } | 672 } |
675 | 673 |
676 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) | 674 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) |
677 { | 675 { |
678 BaseHeapPage* page; | 676 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address); |
679 bool found = heapContainsCache()->lookup(address, &page); | 677 #ifdef NDEBUG |
680 if (found) | 678 if (cachedPage) |
681 return page; | 679 return cachedPage; |
| 680 #endif |
682 | 681 |
683 for (int i = 0; i < NumberOfHeaps; i++) { | 682 for (int i = 0; i < NumberOfHeaps; i++) { |
684 page = m_heaps[i]->heapPageFromAddress(address); | 683 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address); |
685 #ifndef NDEBUG | 684 if (page) { |
686 Address blinkPageAddr = roundToBlinkPageStart(address); | 685 // Asserts that make sure heapPageFromAddress takes addresses from |
687 #endif | 686 // the whole aligned blinkPageSize memory area. This is necessary |
688 ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr)); | 687 // for the negative cache to work. |
689 ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr + blinkPage
Size - 1)); | 688 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddr
ess(roundToBlinkPageStart(address))); |
690 if (page) | 689 if (roundToBlinkPageStart(address) != roundToBlinkPageEnd(address)) |
691 break; | 690 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFrom
Address(roundToBlinkPageEnd(address) - 1)); |
| 691 ASSERT(!cachedPage || page == cachedPage); |
| 692 if (!cachedPage) |
| 693 heapContainsCache()->addEntry(address, page); |
| 694 return page; |
| 695 } |
692 } | 696 } |
693 heapContainsCache()->addEntry(address, page); | 697 ASSERT(!cachedPage); |
694 return page; // 0 if not found. | |
695 } | |
696 | |
697 BaseHeapPage* ThreadState::contains(Address address) | |
698 { | |
699 // Check heap contains cache first. | |
700 BaseHeapPage* page = heapPageFromAddress(address); | |
701 if (page) | |
702 return page; | |
703 // If no heap page was found check large objects. | |
704 for (int i = 0; i < NumberOfHeaps; i++) { | |
705 page = m_heaps[i]->largeHeapObjectFromAddress(address); | |
706 if (page) | |
707 return page; | |
708 } | |
709 return 0; | 698 return 0; |
710 } | 699 } |
711 | 700 |
712 void ThreadState::getStats(HeapStats& stats) | 701 void ThreadState::getStats(HeapStats& stats) |
713 { | 702 { |
714 stats = m_stats; | 703 stats = m_stats; |
715 #ifndef NDEBUG | 704 #ifndef NDEBUG |
716 if (isConsistentForGC()) { | 705 if (isConsistentForGC()) { |
717 HeapStats scannedStats; | 706 HeapStats scannedStats; |
718 scannedStats.clear(); | 707 scannedStats.clear(); |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
871 state->safePoint(HeapPointersOnStack); | 860 state->safePoint(HeapPointersOnStack); |
872 } | 861 } |
873 | 862 |
874 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() | 863 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() |
875 { | 864 { |
876 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); | 865 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); |
877 return threads; | 866 return threads; |
878 } | 867 } |
879 | 868 |
880 } | 869 } |
OLD | NEW |