| Index: src/heap/heap.cc
|
| diff --git a/src/heap/heap.cc b/src/heap/heap.cc
|
| index 6255aca0254188df981c1606eabf87466db56145..2385339be09601a699df4de208cdcc484562e063 100644
|
| --- a/src/heap/heap.cc
|
| +++ b/src/heap/heap.cc
|
| @@ -3183,7 +3183,7 @@
|
| lo_space()->AdjustLiveBytes(by);
|
| } else if (!in_heap_iterator() &&
|
| !mark_compact_collector()->sweeping_in_progress() &&
|
| - ObjectMarking::IsBlack(object)) {
|
| + Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
|
| DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
|
| MemoryChunk::IncrementLiveBytes(object, by);
|
| }
|
| @@ -3193,7 +3193,6 @@
|
| FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
|
| int elements_to_trim) {
|
| CHECK_NOT_NULL(object);
|
| - DCHECK(CanMoveObjectStart(object));
|
| DCHECK(!object->IsFixedTypedArrayBase());
|
| DCHECK(!object->IsByteArray());
|
| const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
|
| @@ -3244,6 +3243,7 @@
|
| // Initialize header of the trimmed array. Since left trimming is only
|
| // performed on pages which are not concurrently swept creating a filler
|
| // object does not require synchronization.
|
| + DCHECK(CanMoveObjectStart(object));
|
| Object** former_start = HeapObject::RawField(object, 0);
|
| int new_start_index = elements_to_trim * (element_size / kPointerSize);
|
| former_start[new_start_index] = map;
|
| @@ -3314,7 +3314,7 @@
|
| // Clear the mark bits of the black area that belongs now to the filler.
|
| // This is an optimization. The sweeper will release black fillers anyway.
|
| if (incremental_marking()->black_allocation() &&
|
| - ObjectMarking::IsBlackOrGrey(filler)) {
|
| + Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(filler))) {
|
| Page* page = Page::FromAddress(new_end);
|
| page->markbits()->ClearRange(
|
| page->AddressToMarkbitIndex(new_end),
|
| @@ -4309,19 +4309,17 @@
|
| // Hence we have to color all objects of the reservation first black to avoid
|
| // unnecessary marking deque load.
|
| if (incremental_marking()->black_allocation()) {
|
| - for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) {
|
| + for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
|
| const Heap::Reservation& res = reservations[i];
|
| for (auto& chunk : res) {
|
| Address addr = chunk.start;
|
| while (addr < chunk.end) {
|
| HeapObject* obj = HeapObject::FromAddress(addr);
|
| - ObjectMarking::WhiteToBlack(obj);
|
| + Marking::MarkBlack(ObjectMarking::MarkBitFrom(obj));
|
| addr += obj->Size();
|
| }
|
| }
|
| }
|
| - // Iterate black objects in old space, code space, map space, and large
|
| - // object space for side effects.
|
| for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
|
| const Heap::Reservation& res = reservations[i];
|
| for (auto& chunk : res) {
|
| @@ -4900,7 +4898,8 @@
|
| // it would be a violation of the invariant to record it's slots.
|
| bool record_slots = false;
|
| if (incremental_marking()->IsCompacting()) {
|
| - record_slots = ObjectMarking::IsBlack(target);
|
| + MarkBit mark_bit = ObjectMarking::MarkBitFrom(target);
|
| + record_slots = Marking::IsBlack(mark_bit);
|
| }
|
|
|
| IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
|
| @@ -6138,7 +6137,8 @@
|
|
|
| bool SkipObject(HeapObject* object) {
|
| if (object->IsFiller()) return true;
|
| - return ObjectMarking::IsWhite(object);
|
| + MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
|
| + return Marking::IsWhite(mark_bit);
|
| }
|
|
|
| private:
|
| @@ -6150,8 +6150,6 @@
|
| for (Object** p = start; p < end; p++) {
|
| if (!(*p)->IsHeapObject()) continue;
|
| HeapObject* obj = HeapObject::cast(*p);
|
| - // Use Marking instead of ObjectMarking to avoid adjusting live bytes
|
| - // counter.
|
| MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
|
| if (Marking::IsWhite(mark_bit)) {
|
| Marking::WhiteToBlack(mark_bit);
|
|
|