OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 527 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
538 mark_bit.Next().Clear(); | 538 mark_bit.Next().Clear(); |
539 Page::FromAddress(obj->address())->ResetProgressBar(); | 539 Page::FromAddress(obj->address())->ResetProgressBar(); |
540 Page::FromAddress(obj->address())->ResetLiveBytes(); | 540 Page::FromAddress(obj->address())->ResetLiveBytes(); |
541 } | 541 } |
542 } | 542 } |
543 | 543 |
544 | 544 |
545 void MarkCompactCollector::StartSweeperThreads() { | 545 void MarkCompactCollector::StartSweeperThreads() { |
546 sweeping_pending_ = true; | 546 sweeping_pending_ = true; |
547 for (int i = 0; i < FLAG_sweeper_threads; i++) { | 547 for (int i = 0; i < FLAG_sweeper_threads; i++) { |
548 heap()->isolate()->sweeper_threads()[i]->StartSweeping(); | 548 isolate()->sweeper_threads()[i]->StartSweeping(); |
549 } | 549 } |
550 } | 550 } |
551 | 551 |
552 | 552 |
553 void MarkCompactCollector::WaitUntilSweepingCompleted() { | 553 void MarkCompactCollector::WaitUntilSweepingCompleted() { |
554 ASSERT(sweeping_pending_ == true); | 554 ASSERT(sweeping_pending_ == true); |
555 for (int i = 0; i < FLAG_sweeper_threads; i++) { | 555 for (int i = 0; i < FLAG_sweeper_threads; i++) { |
556 heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread(); | 556 isolate()->sweeper_threads()[i]->WaitForSweeperThread(); |
557 } | 557 } |
558 sweeping_pending_ = false; | 558 sweeping_pending_ = false; |
559 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); | 559 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); |
560 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); | 560 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); |
561 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); | 561 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); |
562 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); | 562 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); |
563 } | 563 } |
564 | 564 |
565 | 565 |
566 intptr_t MarkCompactCollector:: | 566 intptr_t MarkCompactCollector:: |
567 StealMemoryFromSweeperThreads(PagedSpace* space) { | 567 StealMemoryFromSweeperThreads(PagedSpace* space) { |
568 intptr_t freed_bytes = 0; | 568 intptr_t freed_bytes = 0; |
569 for (int i = 0; i < FLAG_sweeper_threads; i++) { | 569 for (int i = 0; i < FLAG_sweeper_threads; i++) { |
570 freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space); | 570 freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space); |
571 } | 571 } |
572 space->AddToAccountingStats(freed_bytes); | 572 space->AddToAccountingStats(freed_bytes); |
573 space->DecrementUnsweptFreeBytes(freed_bytes); | 573 space->DecrementUnsweptFreeBytes(freed_bytes); |
574 return freed_bytes; | 574 return freed_bytes; |
575 } | 575 } |
576 | 576 |
577 | 577 |
578 bool MarkCompactCollector::AreSweeperThreadsActivated() { | 578 bool MarkCompactCollector::AreSweeperThreadsActivated() { |
579 return heap()->isolate()->sweeper_threads() != NULL; | 579 return isolate()->sweeper_threads() != NULL; |
580 } | 580 } |
581 | 581 |
582 | 582 |
583 bool MarkCompactCollector::IsConcurrentSweepingInProgress() { | 583 bool MarkCompactCollector::IsConcurrentSweepingInProgress() { |
584 return sweeping_pending_; | 584 return sweeping_pending_; |
585 } | 585 } |
586 | 586 |
587 | 587 |
588 void MarkCompactCollector::MarkInParallel() { | 588 void MarkCompactCollector::MarkInParallel() { |
589 for (int i = 0; i < FLAG_marking_threads; i++) { | 589 for (int i = 0; i < FLAG_marking_threads; i++) { |
590 heap()->isolate()->marking_threads()[i]->StartMarking(); | 590 isolate()->marking_threads()[i]->StartMarking(); |
591 } | 591 } |
592 } | 592 } |
593 | 593 |
594 | 594 |
595 void MarkCompactCollector::WaitUntilMarkingCompleted() { | 595 void MarkCompactCollector::WaitUntilMarkingCompleted() { |
596 for (int i = 0; i < FLAG_marking_threads; i++) { | 596 for (int i = 0; i < FLAG_marking_threads; i++) { |
597 heap()->isolate()->marking_threads()[i]->WaitForMarkingThread(); | 597 isolate()->marking_threads()[i]->WaitForMarkingThread(); |
598 } | 598 } |
599 } | 599 } |
600 | 600 |
601 | 601 |
602 bool Marking::TransferMark(Address old_start, Address new_start) { | 602 bool Marking::TransferMark(Address old_start, Address new_start) { |
603 // This is only used when resizing an object. | 603 // This is only used when resizing an object. |
604 ASSERT(MemoryChunk::FromAddress(old_start) == | 604 ASSERT(MemoryChunk::FromAddress(old_start) == |
605 MemoryChunk::FromAddress(new_start)); | 605 MemoryChunk::FromAddress(new_start)); |
606 | 606 |
607 // If the mark doesn't move, we don't check the color of the object. | 607 // If the mark doesn't move, we don't check the color of the object. |
(...skipping 337 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
945 | 945 |
946 void MarkCompactCollector::Finish() { | 946 void MarkCompactCollector::Finish() { |
947 #ifdef DEBUG | 947 #ifdef DEBUG |
948 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); | 948 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); |
949 state_ = IDLE; | 949 state_ = IDLE; |
950 #endif | 950 #endif |
951 // The stub cache is not traversed during GC; clear the cache to | 951 // The stub cache is not traversed during GC; clear the cache to |
952 // force lazy re-initialization of it. This must be done after the | 952 // force lazy re-initialization of it. This must be done after the |
953 // GC, because it relies on the new address of certain old space | 953 // GC, because it relies on the new address of certain old space |
954 // objects (empty string, illegal builtin). | 954 // objects (empty string, illegal builtin). |
955 heap()->isolate()->stub_cache()->Clear(); | 955 isolate()->stub_cache()->Clear(); |
956 | 956 |
957 DeoptimizeMarkedCodeFilter filter; | 957 DeoptimizeMarkedCodeFilter filter; |
958 Deoptimizer::DeoptimizeAllFunctionsWith(&filter); | 958 Deoptimizer::DeoptimizeAllFunctionsWith(isolate(), &filter); |
959 } | 959 } |
960 | 960 |
961 | 961 |
962 // ------------------------------------------------------------------------- | 962 // ------------------------------------------------------------------------- |
963 // Phase 1: tracing and marking live objects. | 963 // Phase 1: tracing and marking live objects. |
964 // before: all objects are in normal state. | 964 // before: all objects are in normal state. |
965 // after: a live object's map pointer is marked as '00'. | 965 // after: a live object's map pointer is marked as '00'. |
966 | 966 |
967 // Marking all live objects in the heap as part of mark-sweep or mark-compact | 967 // Marking all live objects in the heap as part of mark-sweep or mark-compact |
968 // collection. Before marking, all objects are in their normal state. After | 968 // collection. Before marking, all objects are in their normal state. After |
(...skipping 956 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1925 // There may be overflowed objects in the heap. Visit them now. | 1925 // There may be overflowed objects in the heap. Visit them now. |
1926 while (marking_deque_.overflowed()) { | 1926 while (marking_deque_.overflowed()) { |
1927 RefillMarkingDeque(); | 1927 RefillMarkingDeque(); |
1928 EmptyMarkingDeque(); | 1928 EmptyMarkingDeque(); |
1929 } | 1929 } |
1930 } | 1930 } |
1931 | 1931 |
1932 | 1932 |
1933 void MarkCompactCollector::MarkImplicitRefGroups() { | 1933 void MarkCompactCollector::MarkImplicitRefGroups() { |
1934 List<ImplicitRefGroup*>* ref_groups = | 1934 List<ImplicitRefGroup*>* ref_groups = |
1935 heap()->isolate()->global_handles()->implicit_ref_groups(); | 1935 isolate()->global_handles()->implicit_ref_groups(); |
1936 | 1936 |
1937 int last = 0; | 1937 int last = 0; |
1938 for (int i = 0; i < ref_groups->length(); i++) { | 1938 for (int i = 0; i < ref_groups->length(); i++) { |
1939 ImplicitRefGroup* entry = ref_groups->at(i); | 1939 ImplicitRefGroup* entry = ref_groups->at(i); |
1940 ASSERT(entry != NULL); | 1940 ASSERT(entry != NULL); |
1941 | 1941 |
1942 if (!IsMarked(*entry->parent_)) { | 1942 if (!IsMarked(*entry->parent_)) { |
1943 (*ref_groups)[last++] = entry; | 1943 (*ref_groups)[last++] = entry; |
1944 continue; | 1944 continue; |
1945 } | 1945 } |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2045 RefillMarkingDeque(); | 2045 RefillMarkingDeque(); |
2046 EmptyMarkingDeque(); | 2046 EmptyMarkingDeque(); |
2047 } | 2047 } |
2048 } | 2048 } |
2049 | 2049 |
2050 | 2050 |
2051 void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) { | 2051 void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) { |
2052 bool work_to_do = true; | 2052 bool work_to_do = true; |
2053 ASSERT(marking_deque_.IsEmpty()); | 2053 ASSERT(marking_deque_.IsEmpty()); |
2054 while (work_to_do) { | 2054 while (work_to_do) { |
2055 heap()->isolate()->global_handles()->IterateObjectGroups( | 2055 isolate()->global_handles()->IterateObjectGroups( |
2056 visitor, &IsUnmarkedHeapObjectWithHeap); | 2056 visitor, &IsUnmarkedHeapObjectWithHeap); |
2057 MarkImplicitRefGroups(); | 2057 MarkImplicitRefGroups(); |
2058 work_to_do = !marking_deque_.IsEmpty(); | 2058 work_to_do = !marking_deque_.IsEmpty(); |
2059 ProcessMarkingDeque(); | 2059 ProcessMarkingDeque(); |
2060 } | 2060 } |
2061 } | 2061 } |
2062 | 2062 |
2063 | 2063 |
2064 void MarkCompactCollector::MarkLiveObjects() { | 2064 void MarkCompactCollector::MarkLiveObjects() { |
2065 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); | 2065 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); |
2066 // The recursive GC marker detects when it is nearing stack overflow, | 2066 // The recursive GC marker detects when it is nearing stack overflow, |
2067 // and switches to a different marking system. JS interrupts interfere | 2067 // and switches to a different marking system. JS interrupts interfere |
2068 // with the C stack limit check. | 2068 // with the C stack limit check. |
2069 PostponeInterruptsScope postpone(heap()->isolate()); | 2069 PostponeInterruptsScope postpone(isolate()); |
2070 | 2070 |
2071 bool incremental_marking_overflowed = false; | 2071 bool incremental_marking_overflowed = false; |
2072 IncrementalMarking* incremental_marking = heap_->incremental_marking(); | 2072 IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
2073 if (was_marked_incrementally_) { | 2073 if (was_marked_incrementally_) { |
2074 // Finalize the incremental marking and check whether we had an overflow. | 2074 // Finalize the incremental marking and check whether we had an overflow. |
2075 // Both markers use grey color to mark overflowed objects so | 2075 // Both markers use grey color to mark overflowed objects so |
2076 // non-incremental marker can deal with them as if overflow | 2076 // non-incremental marker can deal with them as if overflow |
2077 // occured during normal marking. | 2077 // occured during normal marking. |
2078 // But incremental marker uses a separate marking deque | 2078 // But incremental marker uses a separate marking deque |
2079 // so we have to explicitly copy its overflow state. | 2079 // so we have to explicitly copy its overflow state. |
(...skipping 433 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2513 | 2513 |
2514 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { | 2514 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { |
2515 SlotsBuffer::AddTo(&slots_buffer_allocator_, | 2515 SlotsBuffer::AddTo(&slots_buffer_allocator_, |
2516 &migration_slots_buffer_, | 2516 &migration_slots_buffer_, |
2517 SlotsBuffer::CODE_ENTRY_SLOT, | 2517 SlotsBuffer::CODE_ENTRY_SLOT, |
2518 code_entry_slot, | 2518 code_entry_slot, |
2519 SlotsBuffer::IGNORE_OVERFLOW); | 2519 SlotsBuffer::IGNORE_OVERFLOW); |
2520 } | 2520 } |
2521 } | 2521 } |
2522 } else if (dest == CODE_SPACE) { | 2522 } else if (dest == CODE_SPACE) { |
2523 PROFILE(heap()->isolate(), CodeMoveEvent(src, dst)); | 2523 PROFILE(isolate(), CodeMoveEvent(src, dst)); |
2524 heap()->MoveBlock(dst, src, size); | 2524 heap()->MoveBlock(dst, src, size); |
2525 SlotsBuffer::AddTo(&slots_buffer_allocator_, | 2525 SlotsBuffer::AddTo(&slots_buffer_allocator_, |
2526 &migration_slots_buffer_, | 2526 &migration_slots_buffer_, |
2527 SlotsBuffer::RELOCATED_CODE_OBJECT, | 2527 SlotsBuffer::RELOCATED_CODE_OBJECT, |
2528 dst, | 2528 dst, |
2529 SlotsBuffer::IGNORE_OVERFLOW); | 2529 SlotsBuffer::IGNORE_OVERFLOW); |
2530 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src); | 2530 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src); |
2531 } else { | 2531 } else { |
2532 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE); | 2532 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE); |
2533 heap()->MoveBlock(dst, src, size); | 2533 heap()->MoveBlock(dst, src, size); |
(...skipping 1399 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3933 // Deallocate unmarked objects and clear marked bits for marked objects. | 3933 // Deallocate unmarked objects and clear marked bits for marked objects. |
3934 heap_->lo_space()->FreeUnmarkedObjects(); | 3934 heap_->lo_space()->FreeUnmarkedObjects(); |
3935 | 3935 |
3936 // Deallocate evacuated candidate pages. | 3936 // Deallocate evacuated candidate pages. |
3937 ReleaseEvacuationCandidates(); | 3937 ReleaseEvacuationCandidates(); |
3938 } | 3938 } |
3939 | 3939 |
3940 | 3940 |
3941 void MarkCompactCollector::EnableCodeFlushing(bool enable) { | 3941 void MarkCompactCollector::EnableCodeFlushing(bool enable) { |
3942 #ifdef ENABLE_DEBUGGER_SUPPORT | 3942 #ifdef ENABLE_DEBUGGER_SUPPORT |
3943 if (heap()->isolate()->debug()->IsLoaded() || | 3943 if (isolate()->debug()->IsLoaded() || |
3944 heap()->isolate()->debug()->has_break_points()) { | 3944 isolate()->debug()->has_break_points()) { |
3945 enable = false; | 3945 enable = false; |
3946 } | 3946 } |
3947 #endif | 3947 #endif |
3948 | 3948 |
3949 if (enable) { | 3949 if (enable) { |
3950 if (code_flusher_ != NULL) return; | 3950 if (code_flusher_ != NULL) return; |
3951 code_flusher_ = new CodeFlusher(heap()->isolate()); | 3951 code_flusher_ = new CodeFlusher(isolate()); |
3952 } else { | 3952 } else { |
3953 if (code_flusher_ == NULL) return; | 3953 if (code_flusher_ == NULL) return; |
3954 code_flusher_->EvictAllCandidates(); | 3954 code_flusher_->EvictAllCandidates(); |
3955 delete code_flusher_; | 3955 delete code_flusher_; |
3956 code_flusher_ = NULL; | 3956 code_flusher_ = NULL; |
3957 } | 3957 } |
3958 } | 3958 } |
3959 | 3959 |
3960 | 3960 |
3961 // TODO(1466) ReportDeleteIfNeeded is not called currently. | 3961 // TODO(1466) ReportDeleteIfNeeded is not called currently. |
3962 // Our profiling tools do not expect intersections between | 3962 // Our profiling tools do not expect intersections between |
3963 // code objects. We should either reenable it or change our tools. | 3963 // code objects. We should either reenable it or change our tools. |
3964 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, | 3964 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, |
3965 Isolate* isolate) { | 3965 Isolate* isolate) { |
3966 #ifdef ENABLE_GDB_JIT_INTERFACE | 3966 #ifdef ENABLE_GDB_JIT_INTERFACE |
3967 if (obj->IsCode()) { | 3967 if (obj->IsCode()) { |
3968 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); | 3968 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); |
3969 } | 3969 } |
3970 #endif | 3970 #endif |
3971 if (obj->IsCode()) { | 3971 if (obj->IsCode()) { |
3972 PROFILE(isolate, CodeDeleteEvent(obj->address())); | 3972 PROFILE(isolate, CodeDeleteEvent(obj->address())); |
3973 } | 3973 } |
3974 } | 3974 } |
3975 | 3975 |
3976 | 3976 |
| 3977 Isolate* MarkCompactCollector::isolate() const { |
| 3978 return heap_->isolate(); |
| 3979 } |
| 3980 |
| 3981 |
3977 void MarkCompactCollector::Initialize() { | 3982 void MarkCompactCollector::Initialize() { |
3978 MarkCompactMarkingVisitor::Initialize(); | 3983 MarkCompactMarkingVisitor::Initialize(); |
3979 IncrementalMarking::Initialize(); | 3984 IncrementalMarking::Initialize(); |
3980 } | 3985 } |
3981 | 3986 |
3982 | 3987 |
3983 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) { | 3988 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) { |
3984 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES; | 3989 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES; |
3985 } | 3990 } |
3986 | 3991 |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4048 SlotsBuffer::FAIL_ON_OVERFLOW)) { | 4053 SlotsBuffer::FAIL_ON_OVERFLOW)) { |
4049 EvictEvacuationCandidate(target_page); | 4054 EvictEvacuationCandidate(target_page); |
4050 } | 4055 } |
4051 } | 4056 } |
4052 } | 4057 } |
4053 | 4058 |
4054 | 4059 |
4055 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) { | 4060 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) { |
4056 ASSERT(heap()->gc_state() == Heap::MARK_COMPACT); | 4061 ASSERT(heap()->gc_state() == Heap::MARK_COMPACT); |
4057 if (is_compacting()) { | 4062 if (is_compacting()) { |
4058 Code* host = heap()->isolate()->inner_pointer_to_code_cache()-> | 4063 Code* host = isolate()->inner_pointer_to_code_cache()-> |
4059 GcSafeFindCodeForInnerPointer(pc); | 4064 GcSafeFindCodeForInnerPointer(pc); |
4060 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4065 MarkBit mark_bit = Marking::MarkBitFrom(host); |
4061 if (Marking::IsBlack(mark_bit)) { | 4066 if (Marking::IsBlack(mark_bit)) { |
4062 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 4067 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
4063 RecordRelocSlot(&rinfo, target); | 4068 RecordRelocSlot(&rinfo, target); |
4064 } | 4069 } |
4065 } | 4070 } |
4066 } | 4071 } |
4067 | 4072 |
4068 | 4073 |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4128 while (buffer != NULL) { | 4133 while (buffer != NULL) { |
4129 SlotsBuffer* next_buffer = buffer->next(); | 4134 SlotsBuffer* next_buffer = buffer->next(); |
4130 DeallocateBuffer(buffer); | 4135 DeallocateBuffer(buffer); |
4131 buffer = next_buffer; | 4136 buffer = next_buffer; |
4132 } | 4137 } |
4133 *buffer_address = NULL; | 4138 *buffer_address = NULL; |
4134 } | 4139 } |
4135 | 4140 |
4136 | 4141 |
4137 } } // namespace v8::internal | 4142 } } // namespace v8::internal |
OLD | NEW |