Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 577 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 588 return NULL; | 588 return NULL; |
| 589 } | 589 } |
| 590 | 590 |
| 591 | 591 |
| 592 // Returns zero for pages that have so little fragmentation that it is not | 592 // Returns zero for pages that have so little fragmentation that it is not |
| 593 // worth defragmenting them. Otherwise a positive integer that gives an | 593 // worth defragmenting them. Otherwise a positive integer that gives an |
| 594 // estimate of fragmentation on an arbitrary scale. | 594 // estimate of fragmentation on an arbitrary scale. |
| 595 static int FreeListFragmentation(PagedSpace* space, Page* p) { | 595 static int FreeListFragmentation(PagedSpace* space, Page* p) { |
| 596 // If page was not swept then there are no free list items on it. | 596 // If page was not swept then there are no free list items on it. |
| 597 if (!p->WasSwept()) { | 597 if (!p->WasSwept()) { |
| 598 if (FLAG_trace_fragmentation) { | 598 if (FLAG_trace_fragmentation_verbose) { |
| 599 PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p), | 599 PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p), |
| 600 AllocationSpaceName(space->identity()), p->LiveBytes()); | 600 AllocationSpaceName(space->identity()), p->LiveBytes()); |
| 601 } | 601 } |
| 602 return 0; | 602 return FLAG_always_compact ? 1 : 0; |
| 603 } | 603 } |
| 604 | 604 |
| 605 PagedSpace::SizeStats sizes; | 605 PagedSpace::SizeStats sizes; |
| 606 space->ObtainFreeListStatistics(p, &sizes); | 606 space->ObtainFreeListStatistics(p, &sizes); |
| 607 | 607 |
| 608 intptr_t ratio; | 608 intptr_t ratio; |
| 609 intptr_t ratio_threshold; | 609 intptr_t ratio_threshold; |
| 610 intptr_t area_size = space->AreaSize(); | 610 intptr_t area_size = space->AreaSize(); |
| 611 if (space->identity() == CODE_SPACE) { | 611 if (space->identity() == CODE_SPACE) { |
| 612 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size; | 612 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size; |
| 613 ratio_threshold = 10; | 613 ratio_threshold = 10; |
| 614 } else { | 614 } else { |
| 615 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size; | 615 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size; |
| 616 ratio_threshold = 15; | 616 ratio_threshold = 15; |
| 617 } | 617 } |
| 618 | 618 |
| 619 if (FLAG_trace_fragmentation) { | 619 if (FLAG_trace_fragmentation_verbose) { |
| 620 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", | 620 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", |
| 621 reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()), | 621 reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()), |
| 622 static_cast<int>(sizes.small_size_), | 622 static_cast<int>(sizes.small_size_), |
| 623 static_cast<double>(sizes.small_size_ * 100) / area_size, | 623 static_cast<double>(sizes.small_size_ * 100) / area_size, |
| 624 static_cast<int>(sizes.medium_size_), | 624 static_cast<int>(sizes.medium_size_), |
| 625 static_cast<double>(sizes.medium_size_ * 100) / area_size, | 625 static_cast<double>(sizes.medium_size_ * 100) / area_size, |
| 626 static_cast<int>(sizes.large_size_), | 626 static_cast<int>(sizes.large_size_), |
| 627 static_cast<double>(sizes.large_size_ * 100) / area_size, | 627 static_cast<double>(sizes.large_size_ * 100) / area_size, |
| 628 static_cast<int>(sizes.huge_size_), | 628 static_cast<int>(sizes.huge_size_), |
| 629 static_cast<double>(sizes.huge_size_ * 100) / area_size, | 629 static_cast<double>(sizes.huge_size_ * 100) / area_size, |
| 630 (ratio > ratio_threshold) ? "[fragmented]" : ""); | 630 (ratio > ratio_threshold) ? "[fragmented]" : ""); |
| 631 } | 631 } |
| 632 | 632 |
| 633 if (FLAG_always_compact && sizes.Total() != area_size) { | 633 if (FLAG_always_compact && sizes.Total() != area_size) { |
| 634 return 1; | 634 return 1; |
| 635 } | 635 } |
| 636 | 636 |
| 637 if (ratio <= ratio_threshold) return 0; // Not fragmented. | 637 if (ratio <= ratio_threshold) return 0; // Not fragmented. |
| 638 | 638 |
| 639 return static_cast<int>(ratio - ratio_threshold); | 639 return static_cast<int>(ratio - ratio_threshold); |
| 640 } | 640 } |
| 641 | 641 |
| 642 | 642 |
| 643 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { | 643 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
| 644 DCHECK(space->identity() == OLD_POINTER_SPACE || | 644 DCHECK(space->identity() == OLD_POINTER_SPACE || |
| 645 space->identity() == OLD_DATA_SPACE || | 645 space->identity() == OLD_DATA_SPACE || |
| 646 space->identity() == CODE_SPACE); | 646 space->identity() == CODE_SPACE); |
| 647 | 647 |
| 648 static const int kMaxMaxEvacuationCandidates = 1000; | 648 static const int kMaxMaxEvacuationCandidates = 10000; |
|
ulan
2015/03/20 10:56:53
Does this help? I think 1000 pages (1GB) should be
Erik Corry
2015/03/20 11:39:23
That rather depends on the total heap size. Not e
| |
| 649 int number_of_pages = space->CountTotalPages(); | 649 int number_of_pages = space->CountTotalPages(); |
| 650 int max_evacuation_candidates = | 650 int max_evacuation_candidates = |
| 651 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1); | 651 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1); |
| 652 | 652 |
| 653 if (FLAG_stress_compaction || FLAG_always_compact) { | 653 if (FLAG_stress_compaction || FLAG_always_compact) { |
| 654 max_evacuation_candidates = kMaxMaxEvacuationCandidates; | 654 max_evacuation_candidates = kMaxMaxEvacuationCandidates; |
| 655 } | 655 } |
| 656 | 656 |
| 657 class Candidate { | 657 class Candidate { |
| 658 public: | 658 public: |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 685 | 685 |
| 686 | 686 |
| 687 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) { | 687 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) { |
| 688 // If over-usage is very high (more than a third of the space), we | 688 // If over-usage is very high (more than a third of the space), we |
| 689 // try to free all mostly empty pages. We expect that almost empty | 689 // try to free all mostly empty pages. We expect that almost empty |
| 690 // pages are even easier to compact so bump the limit even more. | 690 // pages are even easier to compact so bump the limit even more. |
| 691 mode = REDUCE_MEMORY_FOOTPRINT; | 691 mode = REDUCE_MEMORY_FOOTPRINT; |
| 692 max_evacuation_candidates *= 2; | 692 max_evacuation_candidates *= 2; |
| 693 } | 693 } |
| 694 | 694 |
| 695 if (FLAG_always_compact) { | |
| 696 max_evacuation_candidates = kMaxMaxEvacuationCandidates; | |
| 697 } | |
| 698 | |
| 695 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { | 699 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { |
| 696 PrintF( | 700 PrintF( |
| 697 "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " | 701 "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " |
| 698 "evacuation candidate limit: %d\n", | 702 "evacuation candidate limit: %d\n", |
| 699 static_cast<double>(over_reserved) / MB, | 703 static_cast<double>(over_reserved) / MB, |
| 700 static_cast<double>(reserved) / MB, | 704 static_cast<double>(reserved) / MB, |
| 701 static_cast<int>(kFreenessThreshold), max_evacuation_candidates); | 705 static_cast<int>(kFreenessThreshold), max_evacuation_candidates); |
| 702 } | 706 } |
| 703 | 707 |
| 704 intptr_t estimated_release = 0; | 708 intptr_t estimated_release = 0; |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 716 if (it.has_next()) it.next(); // Never compact the first page. | 720 if (it.has_next()) it.next(); // Never compact the first page. |
| 717 | 721 |
| 718 while (it.has_next()) { | 722 while (it.has_next()) { |
| 719 Page* p = it.next(); | 723 Page* p = it.next(); |
| 720 p->ClearEvacuationCandidate(); | 724 p->ClearEvacuationCandidate(); |
| 721 | 725 |
| 722 if (FLAG_stress_compaction) { | 726 if (FLAG_stress_compaction) { |
| 723 unsigned int counter = space->heap()->ms_count(); | 727 unsigned int counter = space->heap()->ms_count(); |
| 724 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; | 728 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; |
| 725 if ((counter & 1) == (page_number & 1)) fragmentation = 1; | 729 if ((counter & 1) == (page_number & 1)) fragmentation = 1; |
| 726 } else if (mode == REDUCE_MEMORY_FOOTPRINT) { | 730 } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) { |
| 727 // Don't try to release too many pages. | 731 // Don't try to release too many pages. |
| 728 if (estimated_release >= over_reserved) { | 732 if (estimated_release >= over_reserved) { |
| 729 continue; | 733 continue; |
| 730 } | 734 } |
| 731 | 735 |
| 732 intptr_t free_bytes = 0; | 736 intptr_t free_bytes = 0; |
| 733 | 737 |
| 734 if (!p->WasSwept()) { | 738 if (!p->WasSwept()) { |
| 735 free_bytes = (p->area_size() - p->LiveBytes()); | 739 free_bytes = (p->area_size() - p->LiveBytes()); |
| 736 } else { | 740 } else { |
| 737 PagedSpace::SizeStats sizes; | 741 PagedSpace::SizeStats sizes; |
| 738 space->ObtainFreeListStatistics(p, &sizes); | 742 space->ObtainFreeListStatistics(p, &sizes); |
| 739 free_bytes = sizes.Total(); | 743 free_bytes = sizes.Total(); |
| 740 } | 744 } |
| 741 | 745 |
| 742 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size(); | 746 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size(); |
| 743 | 747 |
| 744 if (free_pct >= kFreenessThreshold) { | 748 if (free_pct >= kFreenessThreshold) { |
| 745 estimated_release += free_bytes; | 749 estimated_release += free_bytes; |
| 746 fragmentation = free_pct; | 750 fragmentation = free_pct; |
| 747 } else { | 751 } else { |
| 748 fragmentation = 0; | 752 fragmentation = 0; |
| 749 } | 753 } |
| 750 | 754 |
| 751 if (FLAG_trace_fragmentation) { | 755 if (FLAG_trace_fragmentation_verbose) { |
| 752 PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p), | 756 PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p), |
| 753 AllocationSpaceName(space->identity()), | 757 AllocationSpaceName(space->identity()), |
| 754 static_cast<int>(free_bytes), | 758 static_cast<int>(free_bytes), |
| 755 static_cast<double>(free_bytes * 100) / p->area_size(), | 759 static_cast<double>(free_bytes * 100) / p->area_size(), |
| 756 (fragmentation > 0) ? "[fragmented]" : ""); | 760 (fragmentation > 0) ? "[fragmented]" : ""); |
| 757 } | 761 } |
| 758 } else { | 762 } else { |
| 759 fragmentation = FreeListFragmentation(space, p); | 763 fragmentation = FreeListFragmentation(space, p); |
| 760 } | 764 } |
| 761 | 765 |
| (...skipping 2270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3032 | 3036 |
| 3033 // Clear marking bits for current cell. | 3037 // Clear marking bits for current cell. |
| 3034 *cell = 0; | 3038 *cell = 0; |
| 3035 } | 3039 } |
| 3036 p->ResetLiveBytes(); | 3040 p->ResetLiveBytes(); |
| 3037 } | 3041 } |
| 3038 | 3042 |
| 3039 | 3043 |
| 3040 void MarkCompactCollector::EvacuatePages() { | 3044 void MarkCompactCollector::EvacuatePages() { |
| 3041 int npages = evacuation_candidates_.length(); | 3045 int npages = evacuation_candidates_.length(); |
| 3046 int abandoned_pages = 0; | |
| 3042 for (int i = 0; i < npages; i++) { | 3047 for (int i = 0; i < npages; i++) { |
| 3043 Page* p = evacuation_candidates_[i]; | 3048 Page* p = evacuation_candidates_[i]; |
| 3044 DCHECK(p->IsEvacuationCandidate() || | 3049 DCHECK(p->IsEvacuationCandidate() || |
| 3045 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3050 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3046 DCHECK(static_cast<int>(p->parallel_sweeping()) == | 3051 DCHECK(static_cast<int>(p->parallel_sweeping()) == |
| 3047 MemoryChunk::SWEEPING_DONE); | 3052 MemoryChunk::SWEEPING_DONE); |
| 3048 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3053 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3049 // Allocate emergency memory for the case when compaction fails due to out | 3054 // Allocate emergency memory for the case when compaction fails due to out |
| 3050 // of memory. | 3055 // of memory. |
| 3051 if (!space->HasEmergencyMemory()) { | 3056 if (!space->HasEmergencyMemory()) { |
| 3052 space->CreateEmergencyMemory(); | 3057 space->CreateEmergencyMemory(); |
| 3053 } | 3058 } |
| 3054 if (p->IsEvacuationCandidate()) { | 3059 if (p->IsEvacuationCandidate()) { |
| 3055 // During compaction we might have to request a new page. Check that we | 3060 // During compaction we might have to request a new page. Check that we |
| 3056 // have an emergency page and the space still has room for that. | 3061 // have an emergency page and the space still has room for that. |
| 3057 if (space->HasEmergencyMemory() && space->CanExpand()) { | 3062 if (space->HasEmergencyMemory() || space->CanExpand()) { |
|
ulan
2015/03/20 10:56:54
Good catch!
| |
| 3058 EvacuateLiveObjectsFromPage(p); | 3063 EvacuateLiveObjectsFromPage(p); |
| 3059 } else { | 3064 } else { |
| 3060 // Without room for expansion evacuation is not guaranteed to succeed. | 3065 // Without room for expansion evacuation is not guaranteed to succeed. |
| 3061 // Pessimistically abandon unevacuated pages. | 3066 // Pessimistically abandon unevacuated pages. |
| 3062 for (int j = i; j < npages; j++) { | 3067 for (int j = i; j < npages; j++) { |
| 3063 Page* page = evacuation_candidates_[j]; | 3068 Page* page = evacuation_candidates_[j]; |
| 3064 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); | 3069 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); |
| 3065 page->ClearEvacuationCandidate(); | 3070 page->ClearEvacuationCandidate(); |
| 3066 page->SetFlag(Page::RESCAN_ON_EVACUATION); | 3071 page->SetFlag(Page::RESCAN_ON_EVACUATION); |
| 3067 } | 3072 } |
| 3073 abandoned_pages = npages - i; | |
| 3068 break; | 3074 break; |
| 3069 } | 3075 } |
| 3070 } | 3076 } |
| 3071 } | 3077 } |
| 3072 if (npages > 0) { | 3078 if (npages > 0) { |
| 3073 // Release emergency memory. | 3079 // Release emergency memory. |
| 3074 PagedSpaces spaces(heap()); | 3080 PagedSpaces spaces(heap()); |
| 3075 for (PagedSpace* space = spaces.next(); space != NULL; | 3081 for (PagedSpace* space = spaces.next(); space != NULL; |
| 3076 space = spaces.next()) { | 3082 space = spaces.next()) { |
| 3077 if (space->HasEmergencyMemory()) { | 3083 if (space->HasEmergencyMemory()) { |
| 3078 space->FreeEmergencyMemory(); | 3084 space->FreeEmergencyMemory(); |
| 3079 } | 3085 } |
| 3080 } | 3086 } |
| 3087 if (FLAG_trace_fragmentation) { | |
| 3088 if (abandoned_pages != 0) { | |
| 3089 PrintF( | |
| 3090 " Abandon %d out of %d page defragmentations due to lack of " | |
| 3091 "memory\n", | |
| 3092 abandoned_pages, npages); | |
| 3093 } else { | |
| 3094 PrintF(" Defragmented %d pages\n", npages); | |
| 3095 } | |
| 3096 } | |
| 3081 } | 3097 } |
| 3082 } | 3098 } |
| 3083 | 3099 |
| 3084 | 3100 |
| 3085 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3101 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
| 3086 public: | 3102 public: |
| 3087 virtual Object* RetainAs(Object* object) { | 3103 virtual Object* RetainAs(Object* object) { |
| 3088 if (object->IsHeapObject()) { | 3104 if (object->IsHeapObject()) { |
| 3089 HeapObject* heap_object = HeapObject::cast(object); | 3105 HeapObject* heap_object = HeapObject::cast(object); |
| 3090 MapWord map_word = heap_object->map_word(); | 3106 MapWord map_word = heap_object->map_word(); |
| (...skipping 333 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3424 &Heap::ScavengeStoreBufferCallback); | 3440 &Heap::ScavengeStoreBufferCallback); |
| 3425 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps( | 3441 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps( |
| 3426 &UpdatePointer); | 3442 &UpdatePointer); |
| 3427 } | 3443 } |
| 3428 | 3444 |
| 3429 { | 3445 { |
| 3430 GCTracer::Scope gc_scope(heap()->tracer(), | 3446 GCTracer::Scope gc_scope(heap()->tracer(), |
| 3431 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED); | 3447 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED); |
| 3432 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_, | 3448 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_, |
| 3433 code_slots_filtering_required); | 3449 code_slots_filtering_required); |
| 3434 if (FLAG_trace_fragmentation) { | 3450 if (FLAG_trace_fragmentation_verbose) { |
| 3435 PrintF(" migration slots buffer: %d\n", | 3451 PrintF(" migration slots buffer: %d\n", |
| 3436 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); | 3452 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
| 3437 } | 3453 } |
| 3438 | 3454 |
| 3439 if (compacting_ && was_marked_incrementally_) { | 3455 if (compacting_ && was_marked_incrementally_) { |
| 3440 // It's difficult to filter out slots recorded for large objects. | 3456 // It's difficult to filter out slots recorded for large objects. |
| 3441 LargeObjectIterator it(heap_->lo_space()); | 3457 LargeObjectIterator it(heap_->lo_space()); |
| 3442 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 3458 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 3443 // LargeObjectSpace is not swept yet thus we have to skip | 3459 // LargeObjectSpace is not swept yet thus we have to skip |
| 3444 // dead objects explicitly. | 3460 // dead objects explicitly. |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 3459 heap()->tracer(), | 3475 heap()->tracer(), |
| 3460 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); | 3476 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); |
| 3461 for (int i = 0; i < npages; i++) { | 3477 for (int i = 0; i < npages; i++) { |
| 3462 Page* p = evacuation_candidates_[i]; | 3478 Page* p = evacuation_candidates_[i]; |
| 3463 DCHECK(p->IsEvacuationCandidate() || | 3479 DCHECK(p->IsEvacuationCandidate() || |
| 3464 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3480 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3465 | 3481 |
| 3466 if (p->IsEvacuationCandidate()) { | 3482 if (p->IsEvacuationCandidate()) { |
| 3467 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(), | 3483 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(), |
| 3468 code_slots_filtering_required); | 3484 code_slots_filtering_required); |
| 3469 if (FLAG_trace_fragmentation) { | 3485 if (FLAG_trace_fragmentation_verbose) { |
| 3470 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), | 3486 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), |
| 3471 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 3487 SlotsBuffer::SizeOfChain(p->slots_buffer())); |
| 3472 } | 3488 } |
| 3473 | 3489 |
| 3474 // Important: skip list should be cleared only after roots were updated | 3490 // Important: skip list should be cleared only after roots were updated |
| 3475 // because root iteration traverses the stack and might have to find | 3491 // because root iteration traverses the stack and might have to find |
| 3476 // code objects from non-updated pc pointing into evacuation candidate. | 3492 // code objects from non-updated pc pointing into evacuation candidate. |
| 3477 SkipList* list = p->skip_list(); | 3493 SkipList* list = p->skip_list(); |
| 3478 if (list != NULL) list->Clear(); | 3494 if (list != NULL) list->Clear(); |
| 3479 } else { | 3495 } else { |
| (...skipping 923 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4403 SlotsBuffer* buffer = *buffer_address; | 4419 SlotsBuffer* buffer = *buffer_address; |
| 4404 while (buffer != NULL) { | 4420 while (buffer != NULL) { |
| 4405 SlotsBuffer* next_buffer = buffer->next(); | 4421 SlotsBuffer* next_buffer = buffer->next(); |
| 4406 DeallocateBuffer(buffer); | 4422 DeallocateBuffer(buffer); |
| 4407 buffer = next_buffer; | 4423 buffer = next_buffer; |
| 4408 } | 4424 } |
| 4409 *buffer_address = NULL; | 4425 *buffer_address = NULL; |
| 4410 } | 4426 } |
| 4411 } | 4427 } |
| 4412 } // namespace v8::internal | 4428 } // namespace v8::internal |
| OLD | NEW |