| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 45 | 45 |
| 46 | 46 |
| 47 void MarkCompactCollector::SetFlags(int flags) { | 47 void MarkCompactCollector::SetFlags(int flags) { |
| 48 sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0); | 48 sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0); |
| 49 reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0); | 49 reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0); |
| 50 abort_incremental_marking_ = | 50 abort_incremental_marking_ = |
| 51 ((flags & Heap::kAbortIncrementalMarkingMask) != 0); | 51 ((flags & Heap::kAbortIncrementalMarkingMask) != 0); |
| 52 } | 52 } |
| 53 | 53 |
| 54 | 54 |
| 55 bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) { |
| 56 if (MarkObjectWithoutPush(obj)) { |
| 57 marking_deque_.PushBlack(obj); |
| 58 return true; |
| 59 } |
| 60 return false; |
| 61 } |
| 62 |
| 63 |
| 55 void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { | 64 void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { |
| 56 ASSERT(Marking::MarkBitFrom(obj) == mark_bit); | 65 ASSERT(Marking::MarkBitFrom(obj) == mark_bit); |
| 57 if (!mark_bit.Get()) { | 66 if (!mark_bit.Get()) { |
| 58 mark_bit.Set(); | 67 mark_bit.Set(); |
| 59 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); | 68 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); |
| 60 ASSERT(IsMarked(obj)); | 69 ProcessNewlyMarkedObject(obj); |
| 61 ASSERT(HEAP->Contains(obj)); | |
| 62 marking_deque_.PushBlack(obj); | |
| 63 } | 70 } |
| 64 } | 71 } |
| 65 | 72 |
| 66 | 73 |
| 74 bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) { |
| 75 MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 76 if (!mark_bit.Get()) { |
| 77 SetMark(obj, mark_bit); |
| 78 return true; |
| 79 } |
| 80 return false; |
| 81 } |
| 82 |
| 83 |
| 67 void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) { | 84 void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) { |
| 68 ASSERT(!mark_bit.Get()); | 85 ASSERT(!mark_bit.Get()); |
| 69 ASSERT(Marking::MarkBitFrom(obj) == mark_bit); | 86 ASSERT(Marking::MarkBitFrom(obj) == mark_bit); |
| 70 mark_bit.Set(); | 87 mark_bit.Set(); |
| 71 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); | 88 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); |
| 89 if (obj->IsMap()) { |
| 90 heap_->ClearCacheOnMap(Map::cast(obj)); |
| 91 } |
| 72 } | 92 } |
| 73 | 93 |
| 74 | 94 |
| 75 bool MarkCompactCollector::IsMarked(Object* obj) { | 95 bool MarkCompactCollector::IsMarked(Object* obj) { |
| 76 ASSERT(obj->IsHeapObject()); | 96 ASSERT(obj->IsHeapObject()); |
| 77 HeapObject* heap_object = HeapObject::cast(obj); | 97 HeapObject* heap_object = HeapObject::cast(obj); |
| 78 return Marking::MarkBitFrom(heap_object).Get(); | 98 return Marking::MarkBitFrom(heap_object).Get(); |
| 79 } | 99 } |
| 80 | 100 |
| 81 | 101 |
| 82 void MarkCompactCollector::RecordSlot(Object** anchor_slot, | 102 void MarkCompactCollector::RecordSlot(Object** anchor_slot, |
| 83 Object** slot, | 103 Object** slot, |
| 84 Object* object) { | 104 Object* object) { |
| 85 Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object)); | 105 Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object)); |
| 86 // Ensure the anchor slot is on the first 'page' of a large object. | |
| 87 ASSERT(Page::FromAddress(reinterpret_cast<Address>(anchor_slot))->owner() != | |
| 88 NULL); | |
| 89 if (object_page->IsEvacuationCandidate() && | 106 if (object_page->IsEvacuationCandidate() && |
| 90 !ShouldSkipEvacuationSlotRecording(anchor_slot)) { | 107 !ShouldSkipEvacuationSlotRecording(anchor_slot)) { |
| 91 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, | 108 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 92 object_page->slots_buffer_address(), | 109 object_page->slots_buffer_address(), |
| 93 slot, | 110 slot, |
| 94 SlotsBuffer::FAIL_ON_OVERFLOW)) { | 111 SlotsBuffer::FAIL_ON_OVERFLOW)) { |
| 95 EvictEvacuationCandidate(object_page); | 112 EvictEvacuationCandidate(object_page); |
| 96 } | 113 } |
| 97 } | 114 } |
| 98 } | 115 } |
| 99 | 116 |
| 100 | 117 |
| 101 } } // namespace v8::internal | 118 } } // namespace v8::internal |
| 102 | 119 |
| 103 #endif // V8_MARK_COMPACT_INL_H_ | 120 #endif // V8_MARK_COMPACT_INL_H_ |
| OLD | NEW |