OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/heap.h" | 5 #include "src/heap/heap.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/ast/scopeinfo.h" | 9 #include "src/ast/scopeinfo.h" |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 1688 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1699 if (collector->is_code_flushing_enabled()) { | 1699 if (collector->is_code_flushing_enabled()) { |
1700 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor); | 1700 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor); |
1701 } | 1701 } |
1702 } | 1702 } |
1703 | 1703 |
1704 { | 1704 { |
1705 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE); | 1705 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE); |
1706 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); | 1706 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); |
1707 } | 1707 } |
1708 | 1708 |
| 1709 { |
| 1710 // Scavenge objects marked black or grey by MarkCompact collector. |
| 1711 TRACE_GC(tracer(), |
| 1712 GCTracer::Scope::SCAVENGER_OBJECTS_MARKED_BY_MARK_COMPACT); |
| 1713 if (incremental_marking()->IsMarking()) { |
| 1714 PromoteMarkedObjects(); |
| 1715 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); |
| 1716 } |
| 1717 } |
| 1718 |
1709 if (FLAG_scavenge_reclaim_unmodified_objects) { | 1719 if (FLAG_scavenge_reclaim_unmodified_objects) { |
1710 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( | 1720 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( |
1711 &IsUnscavengedHeapObject); | 1721 &IsUnscavengedHeapObject); |
1712 | 1722 |
1713 isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots( | 1723 isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots( |
1714 &scavenge_visitor); | 1724 &scavenge_visitor); |
1715 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); | 1725 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); |
1716 } else { | 1726 } else { |
1717 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS); | 1727 TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS); |
1718 while (isolate()->global_handles()->IterateObjectGroups( | 1728 while (isolate()->global_handles()->IterateObjectGroups( |
(...skipping 1343 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3062 reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex))); | 3072 reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex))); |
3063 } else { | 3073 } else { |
3064 DCHECK_GT(size, 2 * kPointerSize); | 3074 DCHECK_GT(size, 2 * kPointerSize); |
3065 filler->set_map_no_write_barrier( | 3075 filler->set_map_no_write_barrier( |
3066 reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex))); | 3076 reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex))); |
3067 FreeSpace::cast(filler)->nobarrier_set_size(size); | 3077 FreeSpace::cast(filler)->nobarrier_set_size(size); |
3068 } | 3078 } |
3069 if (mode == ClearRecordedSlots::kYes) { | 3079 if (mode == ClearRecordedSlots::kYes) { |
3070 ClearRecordedSlotRange(addr, addr + size); | 3080 ClearRecordedSlotRange(addr, addr + size); |
3071 } | 3081 } |
| 3082 |
3072 // At this point, we may be deserializing the heap from a snapshot, and | 3083 // At this point, we may be deserializing the heap from a snapshot, and |
3073 // none of the maps have been created yet and are NULL. | 3084 // none of the maps have been created yet and are NULL. |
3074 DCHECK((filler->map() == NULL && !deserialization_complete_) || | 3085 DCHECK((filler->map() == NULL && !deserialization_complete_) || |
3075 filler->map()->IsMap()); | 3086 filler->map()->IsMap()); |
3076 } | 3087 } |
3077 | 3088 |
3078 | 3089 |
3079 bool Heap::CanMoveObjectStart(HeapObject* object) { | 3090 bool Heap::CanMoveObjectStart(HeapObject* object) { |
3080 if (!FLAG_move_object_start) return false; | 3091 if (!FLAG_move_object_start) return false; |
3081 | 3092 |
(...skipping 1765 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4847 // Iterate over the partial snapshot cache unless serializing. | 4858 // Iterate over the partial snapshot cache unless serializing. |
4848 if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) { | 4859 if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) { |
4849 SerializerDeserializer::Iterate(isolate_, v); | 4860 SerializerDeserializer::Iterate(isolate_, v); |
4850 } | 4861 } |
4851 // We don't do a v->Synchronize call here, because in debug mode that will | 4862 // We don't do a v->Synchronize call here, because in debug mode that will |
4852 // output a flag to the snapshot. However at this point the serializer and | 4863 // output a flag to the snapshot. However at this point the serializer and |
4853 // deserializer are deliberately a little unsynchronized (see above) so the | 4864 // deserializer are deliberately a little unsynchronized (see above) so the |
4854 // checking of the sync flag in the snapshot would fail. | 4865 // checking of the sync flag in the snapshot would fail. |
4855 } | 4866 } |
4856 | 4867 |
| 4868 void Heap::PromoteMarkedObjects() { |
| 4869 NewSpacePageIterator it(new_space_.from_space()); |
| 4870 while (it.has_next()) { |
| 4871 Page* page = it.next(); |
| 4872 LiveObjectIterator<kAllLiveObjects> it(page); |
| 4873 Object* object = nullptr; |
| 4874 while ((object = it.Next()) != nullptr) { |
| 4875 if (object->IsHeapObject()) { |
| 4876 HeapObject* heap_object = HeapObject::cast(object); |
| 4877 DCHECK(IsUnscavengedHeapObject(this, &object)); |
| 4878 Scavenger::ScavengeObject(&heap_object, heap_object, FORCE_PROMOTION); |
| 4879 } |
| 4880 } |
| 4881 } |
| 4882 } |
4857 | 4883 |
4858 // TODO(1236194): Since the heap size is configurable on the command line | 4884 // TODO(1236194): Since the heap size is configurable on the command line |
4859 // and through the API, we should gracefully handle the case that the heap | 4885 // and through the API, we should gracefully handle the case that the heap |
4860 // size is not big enough to fit all the initial objects. | 4886 // size is not big enough to fit all the initial objects. |
4861 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size, | 4887 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size, |
4862 int max_executable_size, size_t code_range_size) { | 4888 int max_executable_size, size_t code_range_size) { |
4863 if (HasBeenSetUp()) return false; | 4889 if (HasBeenSetUp()) return false; |
4864 | 4890 |
4865 // Overwrite default configuration. | 4891 // Overwrite default configuration. |
4866 if (max_semi_space_size > 0) { | 4892 if (max_semi_space_size > 0) { |
(...skipping 1503 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6370 } | 6396 } |
6371 | 6397 |
6372 | 6398 |
6373 // static | 6399 // static |
6374 int Heap::GetStaticVisitorIdForMap(Map* map) { | 6400 int Heap::GetStaticVisitorIdForMap(Map* map) { |
6375 return StaticVisitorBase::GetVisitorId(map); | 6401 return StaticVisitorBase::GetVisitorId(map); |
6376 } | 6402 } |
6377 | 6403 |
6378 } // namespace internal | 6404 } // namespace internal |
6379 } // namespace v8 | 6405 } // namespace v8 |
OLD | NEW |