OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 10 matching lines...) Expand all Loading... | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #include "compilation-cache.h" | 30 #include "compilation-cache.h" |
31 #include "deoptimizer.h" | |
31 #include "execution.h" | 32 #include "execution.h" |
32 #include "gdb-jit.h" | 33 #include "gdb-jit.h" |
33 #include "global-handles.h" | 34 #include "global-handles.h" |
34 #include "heap-profiler.h" | 35 #include "heap-profiler.h" |
35 #include "ic-inl.h" | 36 #include "ic-inl.h" |
36 #include "incremental-marking.h" | 37 #include "incremental-marking.h" |
37 #include "liveobjectlist-inl.h" | 38 #include "liveobjectlist-inl.h" |
38 #include "mark-compact.h" | 39 #include "mark-compact.h" |
39 #include "objects-visiting.h" | 40 #include "objects-visiting.h" |
40 #include "objects-visiting-inl.h" | 41 #include "objects-visiting-inl.h" |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
251 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); | 252 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); |
252 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); | 253 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); |
253 | 254 |
254 compacting_ = evacuation_candidates_.length() > 0; | 255 compacting_ = evacuation_candidates_.length() > 0; |
255 } | 256 } |
256 | 257 |
257 return compacting_; | 258 return compacting_; |
258 } | 259 } |
259 | 260 |
260 | 261 |
261 void MarkCompactCollector::AbortCompaction() { | |
262 if (compacting_) { | |
263 int npages = evacuation_candidates_.length(); | |
264 for (int i = 0; i < npages; i++) { | |
265 Page* p = evacuation_candidates_[i]; | |
266 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | |
267 p->ClearEvacuationCandidate(); | |
268 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | |
269 } | |
270 compacting_ = false; | |
271 evacuation_candidates_.Rewind(0); | |
272 } | |
273 ASSERT_EQ(0, evacuation_candidates_.length()); | |
274 } | |
275 | |
276 | |
277 void MarkCompactCollector::CollectGarbage() { | 262 void MarkCompactCollector::CollectGarbage() { |
278 // Make sure that Prepare() has been called. The individual steps below will | 263 // Make sure that Prepare() has been called. The individual steps below will |
279 // update the state as they proceed. | 264 // update the state as they proceed. |
280 ASSERT(state_ == PREPARE_GC); | 265 ASSERT(state_ == PREPARE_GC); |
281 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); | 266 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); |
282 | 267 |
283 MarkLiveObjects(); | 268 MarkLiveObjects(); |
284 ASSERT(heap_->incremental_marking()->IsStopped()); | 269 ASSERT(heap_->incremental_marking()->IsStopped()); |
285 | 270 |
286 if (collect_maps_) ClearNonLiveTransitions(); | 271 if (collect_maps_) ClearNonLiveTransitions(); |
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
456 } | 441 } |
457 | 442 |
458 if (count > 0 && FLAG_trace_fragmentation) { | 443 if (count > 0 && FLAG_trace_fragmentation) { |
459 PrintF("Collected %d evacuation candidates for space %s\n", | 444 PrintF("Collected %d evacuation candidates for space %s\n", |
460 count, | 445 count, |
461 AllocationSpaceName(space->identity())); | 446 AllocationSpaceName(space->identity())); |
462 } | 447 } |
463 } | 448 } |
464 | 449 |
465 | 450 |
451 void MarkCompactCollector::AbortCompaction() { | |
452 if (compacting_) { | |
453 int npages = evacuation_candidates_.length(); | |
454 for (int i = 0; i < npages; i++) { | |
455 Page* p = evacuation_candidates_[i]; | |
456 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | |
457 p->ClearEvacuationCandidate(); | |
458 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | |
459 } | |
460 compacting_ = false; | |
461 evacuation_candidates_.Rewind(0); | |
462 invalidated_code_.Rewind(0); | |
463 } | |
464 ASSERT_EQ(0, evacuation_candidates_.length()); | |
465 } | |
466 | |
467 | |
466 void MarkCompactCollector::Prepare(GCTracer* tracer) { | 468 void MarkCompactCollector::Prepare(GCTracer* tracer) { |
467 FLAG_flush_code = false; | 469 FLAG_flush_code = false; |
468 | 470 |
469 // Disable collection of maps if incremental marking is enabled. | 471 // Disable collection of maps if incremental marking is enabled. |
470 // Map collection algorithm relies on a special map transition tree traversal | 472 // Map collection algorithm relies on a special map transition tree traversal |
471 // order which is not implemented for incremental marking. | 473 // order which is not implemented for incremental marking. |
472 collect_maps_ = FLAG_collect_maps && | 474 collect_maps_ = FLAG_collect_maps && |
473 !heap()->incremental_marking()->IsMarking(); | 475 !heap()->incremental_marking()->IsMarking(); |
474 | 476 |
475 // Rather than passing the tracer around we stash it in a static member | 477 // Rather than passing the tracer around we stash it in a static member |
(...skipping 2361 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2837 // Clear marking bits for current cell. | 2839 // Clear marking bits for current cell. |
2838 cells[cell_index] = 0; | 2840 cells[cell_index] = 0; |
2839 } | 2841 } |
2840 if (free_start != p->ObjectAreaEnd()) { | 2842 if (free_start != p->ObjectAreaEnd()) { |
2841 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); | 2843 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); |
2842 } | 2844 } |
2843 p->ResetLiveBytes(); | 2845 p->ResetLiveBytes(); |
2844 } | 2846 } |
2845 | 2847 |
2846 | 2848 |
2849 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { | |
2850 Page* p = Page::FromAddress(code->address()); | |
2851 | |
2852 if (p->IsEvacuationCandidate() || | |
2853 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | |
2854 return false; | |
2855 } | |
2856 | |
2857 Address code_start = code->address(); | |
2858 Address code_end = code_start + code->Size(); | |
2859 | |
2860 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start); | |
2861 uint32_t end_index = | |
2862 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize); | |
2863 | |
2864 Bitmap* b = p->markbits(); | |
2865 | |
2866 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index); | |
2867 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index); | |
2868 | |
2869 MarkBit::CellType* start_cell = start_mark_bit.cell(); | |
2870 MarkBit::CellType* end_cell = end_mark_bit.cell(); | |
2871 | |
2872 if (value) { | |
2873 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1); | |
2874 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1; | |
2875 | |
2876 if (start_cell == end_cell) { | |
2877 *start_cell |= start_mask & end_mask; | |
2878 } else { | |
2879 *start_cell |= start_mask; | |
2880 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) { | |
2881 *cell = ~0; | |
2882 } | |
2883 *end_cell |= end_mask; | |
2884 } | |
2885 } else { | |
2886 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) { | |
2887 *cell = 0; | |
2888 } | |
2889 } | |
2890 | |
2891 return true; | |
2892 } | |
2893 | |
2894 | |
2895 static bool IsOnInvalidatedCodeObject(Address addr) { | |
2896 Page* p = Page::FromAddress(addr); | |
Erik Corry
2011/09/22 09:10:47
This doesn't work for large objects.
FromAnyPointe
| |
2897 | |
2898 // First check owners identity because old pointer and old data spaces | |
Erik Corry
2011/09/22 09:10:47
owners -> owner's
| |
2899 // are swept lazily and might still have non-zero mark-bits on some | |
2900 // pages. | |
2901 if (p->owner()->identity() != CODE_SPACE) return false; | |
2902 | |
2903 // In code space only bits on evacuation candidates (but we don't record | |
2904 // any slots on them) and under invalidated code objects are non-zero. | |
2905 MarkBit mark_bit = | |
2906 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr)); | |
2907 | |
2908 return mark_bit.Get(); | |
2909 } | |
2910 | |
2911 | |
2912 void MarkCompactCollector::InvalidateCode(Code* code) { | |
2913 if (heap_->incremental_marking()->IsCompacting() && | |
2914 !ShouldSkipEvacuationSlotRecording(code)) { | |
2915 ASSERT(compacting_); | |
2916 | |
2917 // If the object is white than no slots were recorded on it yet. | |
2918 MarkBit mark_bit = Marking::MarkBitFrom(code); | |
2919 if (Marking::IsWhite(mark_bit)) return; | |
2920 | |
2921 invalidated_code_.Add(code); | |
2922 } | |
2923 } | |
2924 | |
2925 | |
2926 bool MarkCompactCollector::MarkInvalidatedCode() { | |
2927 bool code_marked = false; | |
2928 | |
2929 int length = invalidated_code_.length(); | |
2930 for (int i = 0; i < length; i++) { | |
2931 Code* code = invalidated_code_[i]; | |
2932 | |
2933 if (SetMarkBitsUnderInvalidatedCode(code, true)) { | |
2934 code_marked = true; | |
2935 } | |
2936 } | |
2937 | |
2938 return code_marked; | |
2939 } | |
2940 | |
2941 | |
2942 void MarkCompactCollector::RemoveDeadInvalidatedCode() { | |
2943 int length = invalidated_code_.length(); | |
2944 for (int i = 0; i < length; i++) { | |
2945 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL; | |
2946 } | |
2947 } | |
2948 | |
2949 | |
2950 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) { | |
2951 int length = invalidated_code_.length(); | |
2952 for (int i = 0; i < length; i++) { | |
2953 Code* code = invalidated_code_[i]; | |
2954 if (code != NULL) { | |
2955 code->Iterate(visitor); | |
2956 SetMarkBitsUnderInvalidatedCode(code, false); | |
2957 } | |
2958 } | |
2959 invalidated_code_.Rewind(0); | |
2960 } | |
2961 | |
2962 | |
2847 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 2963 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
2964 bool code_slots_filtering_required = MarkInvalidatedCode(); | |
2965 | |
2848 EvacuateNewSpace(); | 2966 EvacuateNewSpace(); |
2849 EvacuatePages(); | 2967 EvacuatePages(); |
2850 | 2968 |
2851 // Second pass: find pointers to new space and update them. | 2969 // Second pass: find pointers to new space and update them. |
2852 PointersUpdatingVisitor updating_visitor(heap()); | 2970 PointersUpdatingVisitor updating_visitor(heap()); |
2853 | 2971 |
2854 // Update pointers in to space. | 2972 // Update pointers in to space. |
2855 SemiSpaceIterator to_it(heap()->new_space()->bottom(), | 2973 SemiSpaceIterator to_it(heap()->new_space()->bottom(), |
2856 heap()->new_space()->top()); | 2974 heap()->new_space()->top()); |
2857 for (HeapObject* object = to_it.Next(); | 2975 for (HeapObject* object = to_it.Next(); |
2858 object != NULL; | 2976 object != NULL; |
2859 object = to_it.Next()) { | 2977 object = to_it.Next()) { |
2860 Map* map = object->map(); | 2978 Map* map = object->map(); |
2861 object->IterateBody(map->instance_type(), | 2979 object->IterateBody(map->instance_type(), |
2862 object->SizeFromMap(map), | 2980 object->SizeFromMap(map), |
2863 &updating_visitor); | 2981 &updating_visitor); |
2864 } | 2982 } |
2865 | 2983 |
2866 // Update roots. | 2984 // Update roots. |
2867 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 2985 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
2868 LiveObjectList::IterateElements(&updating_visitor); | 2986 LiveObjectList::IterateElements(&updating_visitor); |
2869 | 2987 |
2870 { | 2988 { |
2871 StoreBufferRebuildScope scope(heap_, | 2989 StoreBufferRebuildScope scope(heap_, |
2872 heap_->store_buffer(), | 2990 heap_->store_buffer(), |
2873 &Heap::ScavengeStoreBufferCallback); | 2991 &Heap::ScavengeStoreBufferCallback); |
2874 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); | 2992 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); |
2875 } | 2993 } |
2876 | 2994 |
2877 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_); | 2995 SlotsBuffer::UpdateSlotsRecordedIn(heap_, |
2996 migration_slots_buffer_, | |
2997 code_slots_filtering_required); | |
2878 if (FLAG_trace_fragmentation) { | 2998 if (FLAG_trace_fragmentation) { |
2879 PrintF(" migration slots buffer: %d\n", | 2999 PrintF(" migration slots buffer: %d\n", |
2880 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); | 3000 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
2881 } | 3001 } |
2882 | 3002 |
2883 int npages = evacuation_candidates_.length(); | 3003 int npages = evacuation_candidates_.length(); |
2884 for (int i = 0; i < npages; i++) { | 3004 for (int i = 0; i < npages; i++) { |
2885 Page* p = evacuation_candidates_[i]; | 3005 Page* p = evacuation_candidates_[i]; |
2886 ASSERT(p->IsEvacuationCandidate() || | 3006 ASSERT(p->IsEvacuationCandidate() || |
2887 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3007 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
2888 | 3008 |
2889 if (p->IsEvacuationCandidate()) { | 3009 if (p->IsEvacuationCandidate()) { |
2890 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer()); | 3010 SlotsBuffer::UpdateSlotsRecordedIn(heap_, |
3011 p->slots_buffer(), | |
3012 code_slots_filtering_required); | |
2891 if (FLAG_trace_fragmentation) { | 3013 if (FLAG_trace_fragmentation) { |
2892 PrintF(" page %p slots buffer: %d\n", | 3014 PrintF(" page %p slots buffer: %d\n", |
2893 reinterpret_cast<void*>(p), | 3015 reinterpret_cast<void*>(p), |
2894 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 3016 SlotsBuffer::SizeOfChain(p->slots_buffer())); |
2895 } | 3017 } |
2896 | 3018 |
2897 // Important: skip list should be cleared only after roots were updated | 3019 // Important: skip list should be cleared only after roots were updated |
2898 // because root iteration traverses the stack and might have to find code | 3020 // because root iteration traverses the stack and might have to find code |
2899 // objects from non-updated pc pointing into evacuation candidate. | 3021 // objects from non-updated pc pointing into evacuation candidate. |
2900 SkipList* list = p->skip_list(); | 3022 SkipList* list = p->skip_list(); |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2952 heap_->UpdateReferencesInExternalStringTable( | 3074 heap_->UpdateReferencesInExternalStringTable( |
2953 &UpdateReferenceInExternalStringTableEntry); | 3075 &UpdateReferenceInExternalStringTableEntry); |
2954 | 3076 |
2955 // Update JSFunction pointers from the runtime profiler. | 3077 // Update JSFunction pointers from the runtime profiler. |
2956 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( | 3078 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( |
2957 &updating_visitor); | 3079 &updating_visitor); |
2958 | 3080 |
2959 EvacuationWeakObjectRetainer evacuation_object_retainer; | 3081 EvacuationWeakObjectRetainer evacuation_object_retainer; |
2960 heap()->ProcessWeakReferences(&evacuation_object_retainer); | 3082 heap()->ProcessWeakReferences(&evacuation_object_retainer); |
2961 | 3083 |
3084 // Visit invalidated code (we ignored all slots on it) and clear mark-bits | |
3085 // under it. | |
3086 ProcessInvalidatedCode(&updating_visitor); | |
3087 | |
2962 #ifdef DEBUG | 3088 #ifdef DEBUG |
2963 if (FLAG_verify_heap) { | 3089 if (FLAG_verify_heap) { |
2964 VerifyEvacuation(heap_); | 3090 VerifyEvacuation(heap_); |
2965 } | 3091 } |
2966 #endif | 3092 #endif |
2967 | 3093 |
2968 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); | 3094 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); |
2969 ASSERT(migration_slots_buffer_ == NULL); | 3095 ASSERT(migration_slots_buffer_ == NULL); |
2970 for (int i = 0; i < npages; i++) { | 3096 for (int i = 0; i < npages; i++) { |
2971 Page* p = evacuation_candidates_[i]; | 3097 Page* p = evacuation_candidates_[i]; |
(...skipping 501 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3473 SweeperType how_to_sweep = | 3599 SweeperType how_to_sweep = |
3474 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; | 3600 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; |
3475 if (sweep_precisely_) how_to_sweep = PRECISE; | 3601 if (sweep_precisely_) how_to_sweep = PRECISE; |
3476 // Noncompacting collections simply sweep the spaces to clear the mark | 3602 // Noncompacting collections simply sweep the spaces to clear the mark |
3477 // bits and free the nonlive blocks (for old and map spaces). We sweep | 3603 // bits and free the nonlive blocks (for old and map spaces). We sweep |
3478 // the map space last because freeing non-live maps overwrites them and | 3604 // the map space last because freeing non-live maps overwrites them and |
3479 // the other spaces rely on possibly non-live maps to get the sizes for | 3605 // the other spaces rely on possibly non-live maps to get the sizes for |
3480 // non-live objects. | 3606 // non-live objects. |
3481 SweepSpace(heap()->old_pointer_space(), how_to_sweep); | 3607 SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
3482 SweepSpace(heap()->old_data_space(), how_to_sweep); | 3608 SweepSpace(heap()->old_data_space(), how_to_sweep); |
3609 | |
3610 RemoveDeadInvalidatedCode(); | |
3483 SweepSpace(heap()->code_space(), PRECISE); | 3611 SweepSpace(heap()->code_space(), PRECISE); |
3612 | |
3484 SweepSpace(heap()->cell_space(), PRECISE); | 3613 SweepSpace(heap()->cell_space(), PRECISE); |
3614 | |
3485 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); | 3615 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); |
3486 EvacuateNewSpaceAndCandidates(); | 3616 EvacuateNewSpaceAndCandidates(); |
3487 } | 3617 } |
3618 | |
3488 // ClearNonLiveTransitions depends on precise sweeping of map space to | 3619 // ClearNonLiveTransitions depends on precise sweeping of map space to |
3489 // detect whether unmarked map became dead in this collection or in one | 3620 // detect whether unmarked map became dead in this collection or in one |
3490 // of the previous ones. | 3621 // of the previous ones. |
3491 SweepSpace(heap()->map_space(), PRECISE); | 3622 SweepSpace(heap()->map_space(), PRECISE); |
3492 | 3623 |
3493 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size()); | 3624 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size()); |
3494 | 3625 |
3495 // Deallocate unmarked objects and clear marked bits for marked objects. | 3626 // Deallocate unmarked objects and clear marked bits for marked objects. |
3496 heap_->lo_space()->FreeUnmarkedObjects(); | 3627 heap_->lo_space()->FreeUnmarkedObjects(); |
3497 } | 3628 } |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3619 ++slot_idx; | 3750 ++slot_idx; |
3620 ASSERT(slot_idx < idx_); | 3751 ASSERT(slot_idx < idx_); |
3621 UpdateSlot(&v, | 3752 UpdateSlot(&v, |
3622 DecodeSlotType(slot), | 3753 DecodeSlotType(slot), |
3623 reinterpret_cast<Address>(slots_[slot_idx])); | 3754 reinterpret_cast<Address>(slots_[slot_idx])); |
3624 } | 3755 } |
3625 } | 3756 } |
3626 } | 3757 } |
3627 | 3758 |
3628 | 3759 |
3760 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) { | |
3761 PointersUpdatingVisitor v(heap); | |
3762 | |
3763 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { | |
3764 ObjectSlot slot = slots_[slot_idx]; | |
3765 if (!IsTypedSlot(slot)) { | |
3766 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) { | |
3767 UpdateSlot(slot); | |
3768 } | |
3769 } else { | |
3770 ++slot_idx; | |
3771 ASSERT(slot_idx < idx_); | |
3772 Address pc = reinterpret_cast<Address>(slots_[slot_idx]); | |
3773 if (!IsOnInvalidatedCodeObject(pc)) { | |
3774 UpdateSlot(&v, | |
3775 DecodeSlotType(slot), | |
3776 reinterpret_cast<Address>(slots_[slot_idx])); | |
3777 } | |
3778 } | |
3779 } | |
3780 } | |
3781 | |
3782 | |
3629 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { | 3783 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { |
3630 return new SlotsBuffer(next_buffer); | 3784 return new SlotsBuffer(next_buffer); |
3631 } | 3785 } |
3632 | 3786 |
3633 | 3787 |
3634 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { | 3788 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { |
3635 delete buffer; | 3789 delete buffer; |
3636 } | 3790 } |
3637 | 3791 |
3638 | 3792 |
3639 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { | 3793 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { |
3640 SlotsBuffer* buffer = *buffer_address; | 3794 SlotsBuffer* buffer = *buffer_address; |
3641 while (buffer != NULL) { | 3795 while (buffer != NULL) { |
3642 SlotsBuffer* next_buffer = buffer->next(); | 3796 SlotsBuffer* next_buffer = buffer->next(); |
3643 DeallocateBuffer(buffer); | 3797 DeallocateBuffer(buffer); |
3644 buffer = next_buffer; | 3798 buffer = next_buffer; |
3645 } | 3799 } |
3646 *buffer_address = NULL; | 3800 *buffer_address = NULL; |
3647 } | 3801 } |
3648 | 3802 |
3649 | 3803 |
3650 } } // namespace v8::internal | 3804 } } // namespace v8::internal |
OLD | NEW |