OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
462 visitStack(visitor); | 462 visitStack(visitor); |
463 visitPersistents(visitor); | 463 visitPersistents(visitor); |
464 } | 464 } |
465 | 465 |
466 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) | 466 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address) |
467 { | 467 { |
468 // If thread is cleaning up ignore conservative pointers. | 468 // If thread is cleaning up ignore conservative pointers. |
469 if (m_isCleaningUp) | 469 if (m_isCleaningUp) |
470 return false; | 470 return false; |
471 | 471 |
472 // This checks for normal pages and for large objects which span the extent | |
473 // of several normal pages. | |
472 BaseHeapPage* page = heapPageFromAddress(address); | 474 BaseHeapPage* page = heapPageFromAddress(address); |
473 if (page) | 475 if (page) { |
474 return page->checkAndMarkPointer(visitor, address); | 476 page->checkAndMarkPointer(visitor, address); |
475 // Not in heap pages, check large objects | 477 // Whether or not the pointer was within an object it was certainly |
476 for (int i = 0; i < NumberOfHeaps; i++) { | 478 // within a page that is part of the heap, so we don't want to ask the |
477 if (m_heaps[i]->checkAndMarkLargeHeapObject(visitor, address)) | 479 // other other heaps or put this address in the |
wibling-chromium
2014/05/09 10:25:20
other other -> other
| |
478 return true; | 480 // HeapDoesNotContainCache. |
481 return true; | |
479 } | 482 } |
483 | |
480 return false; | 484 return false; |
481 } | 485 } |
482 | 486 |
483 #if ENABLE(GC_TRACING) | 487 #if ENABLE(GC_TRACING) |
484 const GCInfo* ThreadState::findGCInfo(Address address) | 488 const GCInfo* ThreadState::findGCInfo(Address address) |
485 { | 489 { |
486 BaseHeapPage* page = heapPageFromAddress(address); | 490 BaseHeapPage* page = heapPageFromAddress(address); |
487 if (page) { | 491 if (page) { |
488 return page->findGCInfo(address); | 492 return page->findGCInfo(address); |
489 } | 493 } |
490 | |
491 // Not in heap pages, check large objects | |
492 for (int i = 0; i < NumberOfHeaps; i++) { | |
493 if (const GCInfo* info = m_heaps[i]->findGCInfoOfLargeHeapObject(address )) | |
494 return info; | |
495 } | |
496 return 0; | 494 return 0; |
497 } | 495 } |
498 #endif | 496 #endif |
499 | 497 |
500 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) | 498 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) |
501 { | 499 { |
502 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); | 500 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); |
503 *slot = CallbackStack::Item(object, callback); | 501 *slot = CallbackStack::Item(object, callback); |
504 } | 502 } |
505 | 503 |
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
647 // This happens if a thread did not have time to wake up and sweep, | 645 // This happens if a thread did not have time to wake up and sweep, |
648 // before the next GC arrived. | 646 // before the next GC arrived. |
649 if (sweepRequested()) | 647 if (sweepRequested()) |
650 heap->clearMarks(); | 648 heap->clearMarks(); |
651 } | 649 } |
652 setSweepRequested(); | 650 setSweepRequested(); |
653 } | 651 } |
654 | 652 |
655 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) | 653 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) |
656 { | 654 { |
657 BaseHeapPage* page; | 655 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address); |
658 bool found = heapContainsCache()->lookup(address, &page); | 656 #ifdef NDEBUG |
wibling-chromium
2014/05/09 10:25:20
Should this be removed as well?
| |
659 if (found) | 657 if (cachedPage) |
660 return page; | 658 return cachedPage; |
659 #endif | |
661 | 660 |
662 for (int i = 0; i < NumberOfHeaps; i++) { | 661 for (int i = 0; i < NumberOfHeaps; i++) { |
663 page = m_heaps[i]->heapPageFromAddress(address); | 662 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address); |
664 #ifndef NDEBUG | 663 if (page) { |
665 Address blinkPageAddr = roundToBlinkPageStart(address); | 664 // Asserts that make sure heapPageFromAddress takes addresses from |
666 #endif | 665 // the whole aligned blinkPageSize memory area. This is necessary |
667 ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr)); | 666 // for the negative cache to work. |
668 ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr + blinkPage Size - 1)); | 667 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddr ess(roundToBlinkPageStart(address))); |
669 if (page) | 668 if (roundToBlinkPageStart(address) != roundToBlinkPageEnd(address)) |
670 break; | 669 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFrom Address(roundToBlinkPageEnd(address) - 1)); |
670 ASSERT(!cachedPage || page == cachedPage); | |
671 if (!cachedPage) | |
672 heapContainsCache()->addEntry(address, page); | |
673 return page; | |
674 } | |
671 } | 675 } |
672 heapContainsCache()->addEntry(address, page); | 676 ASSERT(!cachedPage); |
673 return page; // 0 if not found. | |
674 } | |
675 | |
676 BaseHeapPage* ThreadState::contains(Address address) | |
677 { | |
678 // Check heap contains cache first. | |
679 BaseHeapPage* page = heapPageFromAddress(address); | |
680 if (page) | |
681 return page; | |
682 // If no heap page was found check large objects. | |
683 for (int i = 0; i < NumberOfHeaps; i++) { | |
684 page = m_heaps[i]->largeHeapObjectFromAddress(address); | |
685 if (page) | |
686 return page; | |
687 } | |
688 return 0; | 677 return 0; |
689 } | 678 } |
690 | 679 |
691 void ThreadState::getStats(HeapStats& stats) | 680 void ThreadState::getStats(HeapStats& stats) |
692 { | 681 { |
693 stats = m_stats; | 682 stats = m_stats; |
694 #ifndef NDEBUG | 683 #ifndef NDEBUG |
695 if (isConsistentForGC()) { | 684 if (isConsistentForGC()) { |
696 HeapStats scannedStats; | 685 HeapStats scannedStats; |
697 scannedStats.clear(); | 686 scannedStats.clear(); |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
850 state->safePoint(HeapPointersOnStack); | 839 state->safePoint(HeapPointersOnStack); |
851 } | 840 } |
852 | 841 |
853 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() | 842 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() |
854 { | 843 { |
855 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); | 844 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); |
856 return threads; | 845 return threads; |
857 } | 846 } |
858 | 847 |
859 } | 848 } |
OLD | NEW |