Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(327)

Unified Diff: src/heap/heap.cc

Issue 1420423009: [heap] Black allocation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/heap/heap.cc
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index b46d0ec318cee88fa820fa3be4b440470fbecd41..8d16522db15c7988381aedaef6946455512b5545 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -1943,7 +1943,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// to new space.
DCHECK(!target->IsMap());
- IteratePointersToFromSpace(target, size, &Scavenger::ScavengeObject);
+ IteratePromotedObject(target, size, &Scavenger::ScavengeObject);
}
}
@@ -2526,6 +2526,15 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
TransitionArray* array = TransitionArray::cast(raw_array);
array->set_length(capacity);
MemsetPointer(array->data_start(), undefined_value(), capacity);
+ // Transition arrays are tenured. When black allocation is on we have to
+ // add the transition array to the list of encountered_transition_arrays.
+ if (incremental_marking()->black_allocation()) {
+ array->set_next_link(encountered_transition_arrays(),
+ UPDATE_WEAK_WRITE_BARRIER);
+ set_encountered_transition_arrays(array);
+ } else {
+ array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
+ }
return array;
}
@@ -3289,7 +3298,6 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
HeapObject* result = nullptr;
if (!allocation.To(&result)) return allocation;
-
if (immovable) {
Address address = result->address();
// Code objects which should stay at a fixed address are allocated either
@@ -3339,6 +3347,9 @@ AllocationResult Heap::CopyCode(Code* code) {
isolate_->code_range()->contains(code->address()) ||
obj_size <= code_space()->AreaSize());
new_code->Relocate(new_addr - old_addr);
+ // We have to iterate over the object and process its pointers when black
+ // allocation is on.
+ incremental_marking()->IterateBlackObject(new_code);
return new_code;
}
@@ -3386,7 +3397,9 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
new_obj_size <= code_space()->AreaSize());
new_code->Relocate(new_addr - old_addr);
-
+ // We have to iterate over over the object and process its pointers when
+ // black allocation is on.
+ incremental_marking()->IterateBlackObject(new_code);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) code->ObjectVerify();
#endif
@@ -4154,6 +4167,24 @@ bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
return false;
}
+void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
+ // TODO(hpayer): We do not have to iterate reservations on black objects
+ // for marking. We just have to execute the special visiting side effect
+ // code that adds objects to global data structures, e.g. ifor array buffers.
+ if (incremental_marking()->black_allocation()) {
+ for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+ const Heap::Reservation& res = reservations[i];
+ for (auto& chunk : res) {
+ Address addr = chunk.start;
+ while (addr < chunk.end) {
+ HeapObject* obj = HeapObject::FromAddress(addr);
+ incremental_marking()->IterateBlackObject(obj);
+ addr += obj->Size();
+ }
+ }
+ }
+ }
+}
GCIdleTimeHeapState Heap::ComputeHeapState() {
GCIdleTimeHeapState heap_state;
@@ -4500,10 +4531,9 @@ void Heap::ZapFromSpace() {
}
}
-
-void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback) {
+void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
+ Address end, bool record_slots,
+ ObjectSlotCallback callback) {
Address slot_address = start;
while (slot_address < end) {
@@ -4533,25 +4563,38 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
}
}
-
-class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
+class IteratePromotedObjectsVisitor final : public ObjectVisitor {
public:
- IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target,
- bool record_slots,
- ObjectSlotCallback callback)
+ IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
+ bool record_slots, ObjectSlotCallback callback)
: heap_(heap),
target_(target),
record_slots_(record_slots),
callback_(callback) {}
V8_INLINE void VisitPointers(Object** start, Object** end) override {
- heap_->IterateAndMarkPointersToFromSpace(
+ heap_->IteratePromotedObjectPointers(
target_, reinterpret_cast<Address>(start),
reinterpret_cast<Address>(end), record_slots_, callback_);
+ // When black allocations is on, we have to visit objects promoted to
+ // black pages to keep their references alive.
+ // TODO(hpayer): Implement a special promotion visitor that incorporates
+ // regular visiting and IteratePromotedObjectPointers.
+ // TODO(hpayer): Right now we are also going to process black objects
ulan 2016/02/16 10:38:09 This could be a problem if the non-pure marking vi
Hannes Payer (out of office) 2016/03/11 14:20:48 As discussed offline, we are not going to process
+ // that got promoted to black pages. Filter them out to avoid unecessary
+ // work.
+ heap_->incremental_marking()->IterateBlackObject(target_);
ulan 2016/02/16 10:38:09 I think this should be called in line 4546, not he
Hannes Payer (out of office) 2016/03/11 14:20:48 Done.
+ }
+
+ V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+ // Black allocation requires us to process objects referenced by
+ // promoted objects.
+ if (heap_->incremental_marking()->black_allocation()) {
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+ IncrementalMarking::MarkObject(heap_, code);
+ }
}
- V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
-
private:
Heap* heap_;
HeapObject* target_;
@@ -4559,9 +4602,8 @@ class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
ObjectSlotCallback callback_;
};
-
-void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
- ObjectSlotCallback callback) {
+void Heap::IteratePromotedObject(HeapObject* target, int size,
+ ObjectSlotCallback callback) {
// We are not collecting slots on new space objects during mutation
// thus we have to scan for pointers to evacuation candidates when we
// promote objects. But we should not record any slots in non-black
@@ -4574,9 +4616,15 @@ void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
record_slots = Marking::IsBlack(mark_bit);
}
- IteratePointersToFromSpaceVisitor visitor(this, target, record_slots,
- callback);
+ IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
target->IterateBody(target->map()->instance_type(), size, &visitor);
+
+ // Black allocation requires us to process objects referenced by
+ // promoted objects.
+ if (incremental_marking()->black_allocation()) {
+ Map* map = target->map();
+ IncrementalMarking::MarkObject(this, map);
+ }
}
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698