Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(383)

Unified Diff: src/heap/heap.cc

Issue 1406133003: [heap] fix crash during the scavenge of ArrayBuffer (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: store-buffer: move IteratePointersToFromSpace Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/heap/heap.cc
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 75b7ec1fa698f824e2e82c57da0cb9947132b08f..f35092f03532a7df6e911a3bf9a0657888a08eb8 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -1889,42 +1889,9 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// for pointers to from semispace instead of looking for pointers
// to new space.
DCHECK(!target->IsMap());
- Address obj_address = target->address();
-
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
- bool record_slots = false;
- if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = Marking::MarkBitFrom(target);
- record_slots = Marking::IsBlack(mark_bit);
- }
-#if V8_DOUBLE_FIELDS_UNBOXING
- LayoutDescriptorHelper helper(target->map());
- bool has_only_tagged_fields = helper.all_fields_tagged();
-
- if (!has_only_tagged_fields) {
- for (int offset = 0; offset < size;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, size, &end_of_region_offset)) {
- IterateAndMarkPointersToFromSpace(
- target, obj_address + offset,
- obj_address + end_of_region_offset, record_slots,
- &Scavenger::ScavengeObject);
- }
- offset = end_of_region_offset;
- }
- } else {
-#endif
- IterateAndMarkPointersToFromSpace(target, obj_address,
- obj_address + size, record_slots,
- &Scavenger::ScavengeObject);
-#if V8_DOUBLE_FIELDS_UNBOXING
- }
-#endif
+
+ store_buffer()->IteratePointersToFromSpace(target, size,
+ &Scavenger::ScavengeObject);
}
}
@@ -4440,40 +4407,6 @@ void Heap::ZapFromSpace() {
}
-void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback) {
- Address slot_address = start;
-
- while (slot_address < end) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* target = *slot;
- // If the store buffer becomes overfull we mark pages as being exempt from
- // the store buffer. These pages are scanned to find pointers that point
- // to the new space. In that case we may hit newly promoted objects and
- // fix the pointers before the promotion queue gets to them. Thus the 'if'.
- if (target->IsHeapObject()) {
- if (Heap::InFromSpace(target)) {
- callback(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(target));
- Object* new_target = *slot;
- if (InNewSpace(new_target)) {
- SLOW_DCHECK(Heap::InToSpace(new_target));
- SLOW_DCHECK(new_target->IsHeapObject());
- store_buffer_.EnterDirectlyIntoStoreBuffer(
- reinterpret_cast<Address>(slot));
- }
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
- } else if (record_slots &&
- MarkCompactCollector::IsOnEvacuationCandidate(target)) {
- mark_compact_collector()->RecordSlot(object, slot, target);
- }
- }
- slot_address += kPointerSize;
- }
-}
-
-
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/store-buffer.h » ('j') | src/heap/store-buffer.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698