| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 280 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); | 280 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 281 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); | 281 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 282 | 282 |
| 283 compacting_ = evacuation_candidates_.length() > 0; | 283 compacting_ = evacuation_candidates_.length() > 0; |
| 284 } | 284 } |
| 285 | 285 |
| 286 return compacting_; | 286 return compacting_; |
| 287 } | 287 } |
| 288 | 288 |
| 289 | 289 |
| 290 void MarkCompactCollector::ClearInvalidSlotsBufferEntries(PagedSpace* space) { | |
| 291 PageIterator it(space); | |
| 292 while (it.has_next()) { | |
| 293 Page* p = it.next(); | |
| 294 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); | |
| 295 } | |
| 296 } | |
| 297 | |
| 298 | |
| 299 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() { | |
| 300 heap_->store_buffer()->ClearInvalidStoreBufferEntries(); | |
| 301 | |
| 302 ClearInvalidSlotsBufferEntries(heap_->old_pointer_space()); | |
| 303 ClearInvalidSlotsBufferEntries(heap_->old_data_space()); | |
| 304 ClearInvalidSlotsBufferEntries(heap_->code_space()); | |
| 305 ClearInvalidSlotsBufferEntries(heap_->cell_space()); | |
| 306 ClearInvalidSlotsBufferEntries(heap_->map_space()); | |
| 307 | |
| 308 LargeObjectIterator it(heap_->lo_space()); | |
| 309 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { | |
| 310 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); | |
| 311 SlotsBuffer::RemoveInvalidSlots(heap_, chunk->slots_buffer()); | |
| 312 } | |
| 313 } | |
| 314 | |
| 315 | |
| 316 #ifdef VERIFY_HEAP | |
| 317 static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) { | |
| 318 PageIterator it(space); | |
| 319 while (it.has_next()) { | |
| 320 Page* p = it.next(); | |
| 321 SlotsBuffer::VerifySlots(heap, p->slots_buffer()); | |
| 322 } | |
| 323 } | |
| 324 | |
| 325 | |
| 326 static void VerifyValidStoreAndSlotsBufferEntries(Heap* heap) { | |
| 327 heap->store_buffer()->VerifyValidStoreBufferEntries(); | |
| 328 | |
| 329 VerifyValidSlotsBufferEntries(heap, heap->old_pointer_space()); | |
| 330 VerifyValidSlotsBufferEntries(heap, heap->old_data_space()); | |
| 331 VerifyValidSlotsBufferEntries(heap, heap->code_space()); | |
| 332 VerifyValidSlotsBufferEntries(heap, heap->cell_space()); | |
| 333 VerifyValidSlotsBufferEntries(heap, heap->map_space()); | |
| 334 | |
| 335 LargeObjectIterator it(heap->lo_space()); | |
| 336 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { | |
| 337 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); | |
| 338 SlotsBuffer::VerifySlots(heap, chunk->slots_buffer()); | |
| 339 } | |
| 340 } | |
| 341 #endif | |
| 342 | |
| 343 | |
| 344 void MarkCompactCollector::CollectGarbage() { | 290 void MarkCompactCollector::CollectGarbage() { |
| 345 // Make sure that Prepare() has been called. The individual steps below will | 291 // Make sure that Prepare() has been called. The individual steps below will |
| 346 // update the state as they proceed. | 292 // update the state as they proceed. |
| 347 DCHECK(state_ == PREPARE_GC); | 293 DCHECK(state_ == PREPARE_GC); |
| 348 | 294 |
| 349 MarkLiveObjects(); | 295 MarkLiveObjects(); |
| 350 DCHECK(heap_->incremental_marking()->IsStopped()); | 296 DCHECK(heap_->incremental_marking()->IsStopped()); |
| 351 | 297 |
| 352 // ClearNonLiveReferences can deoptimize code in dependent code arrays. | 298 // ClearNonLiveReferences can deoptimize code in dependent code arrays. |
| 353 // Process weak cells before so that weak cells in dependent code | 299 // Process weak cells before so that weak cells in dependent code |
| 354 // arrays are cleared or contain only live code objects. | 300 // arrays are cleared or contain only live code objects. |
| 355 ProcessAndClearWeakCells(); | 301 ProcessAndClearWeakCells(); |
| 356 | 302 |
| 357 if (FLAG_collect_maps) ClearNonLiveReferences(); | 303 if (FLAG_collect_maps) ClearNonLiveReferences(); |
| 358 | 304 |
| 359 ClearWeakCollections(); | 305 ClearWeakCollections(); |
| 360 | 306 |
| 361 heap_->set_encountered_weak_cells(Smi::FromInt(0)); | 307 heap_->set_encountered_weak_cells(Smi::FromInt(0)); |
| 362 | 308 |
| 363 #ifdef VERIFY_HEAP | 309 #ifdef VERIFY_HEAP |
| 364 if (FLAG_verify_heap) { | 310 if (FLAG_verify_heap) { |
| 365 VerifyMarking(heap_); | 311 VerifyMarking(heap_); |
| 366 } | 312 } |
| 367 #endif | 313 #endif |
| 368 | 314 |
| 369 ClearInvalidStoreAndSlotsBufferEntries(); | 315 heap_->store_buffer()->ClearInvalidStoreBufferEntries(); |
| 370 | 316 |
| 371 #ifdef VERIFY_HEAP | 317 #ifdef VERIFY_HEAP |
| 372 if (FLAG_verify_heap) { | 318 if (FLAG_verify_heap) { |
| 373 VerifyValidStoreAndSlotsBufferEntries(heap_); | 319 heap_->store_buffer()->VerifyValidStoreBufferEntries(); |
| 374 } | 320 } |
| 375 #endif | 321 #endif |
| 376 | 322 |
| 377 SweepSpaces(); | 323 SweepSpaces(); |
| 378 | 324 |
| 379 #ifdef VERIFY_HEAP | 325 #ifdef VERIFY_HEAP |
| 380 VerifyWeakEmbeddedObjectsInCode(); | 326 VerifyWeakEmbeddedObjectsInCode(); |
| 381 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) { | 327 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) { |
| 382 VerifyOmittedMapChecks(); | 328 VerifyOmittedMapChecks(); |
| 383 } | 329 } |
| (...skipping 386 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 770 if (FLAG_trace_fragmentation && | 716 if (FLAG_trace_fragmentation && |
| 771 max_evacuation_candidates >= kMaxMaxEvacuationCandidates) { | 717 max_evacuation_candidates >= kMaxMaxEvacuationCandidates) { |
| 772 PrintF("Hit max page compaction limit of %d pages\n", | 718 PrintF("Hit max page compaction limit of %d pages\n", |
| 773 kMaxMaxEvacuationCandidates); | 719 kMaxMaxEvacuationCandidates); |
| 774 } | 720 } |
| 775 max_evacuation_candidates = | 721 max_evacuation_candidates = |
| 776 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); | 722 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); |
| 777 | 723 |
| 778 int count = 0; | 724 int count = 0; |
| 779 int fragmentation = 0; | 725 int fragmentation = 0; |
| 780 int page_number = 0; | |
| 781 Candidate* least = NULL; | 726 Candidate* least = NULL; |
| 782 | 727 |
| 783 PageIterator it(space); | 728 PageIterator it(space); |
| 784 while (it.has_next()) { | 729 while (it.has_next()) { |
| 785 Page* p = it.next(); | 730 Page* p = it.next(); |
| 786 if (p->NeverEvacuate()) continue; | 731 if (p->NeverEvacuate()) continue; |
| 787 | 732 |
| 788 // Invariant: Evacuation candidates are just created when marking is | 733 // Invariant: Evacuation candidates are just created when marking is |
| 789 // started. At the end of a GC all evacuation candidates are cleared and | 734 // started. At the end of a GC all evacuation candidates are cleared and |
| 790 // their slot buffers are released. | 735 // their slot buffers are released. |
| 791 CHECK(!p->IsEvacuationCandidate()); | 736 CHECK(!p->IsEvacuationCandidate()); |
| 792 CHECK(p->slots_buffer() == NULL); | 737 CHECK(p->slots_buffer() == NULL); |
| 793 | 738 |
| 794 if (FLAG_stress_compaction) { | 739 if (FLAG_stress_compaction) { |
| 795 if (FLAG_manual_evacuation_candidates_selection) { | 740 unsigned int counter = space->heap()->ms_count(); |
| 796 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { | 741 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; |
| 797 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 742 if ((counter & 1) == (page_number & 1)) fragmentation = 1; |
| 798 fragmentation = 1; | |
| 799 } | |
| 800 } else { | |
| 801 unsigned int counter = space->heap()->ms_count(); | |
| 802 if ((counter & 1) == (page_number & 1)) fragmentation = 1; | |
| 803 page_number++; | |
| 804 } | |
| 805 } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) { | 743 } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) { |
| 806 // Don't try to release too many pages. | 744 // Don't try to release too many pages. |
| 807 if (estimated_release >= over_reserved) { | 745 if (estimated_release >= over_reserved) { |
| 808 continue; | 746 continue; |
| 809 } | 747 } |
| 810 | 748 |
| 811 intptr_t free_bytes = 0; | 749 intptr_t free_bytes = 0; |
| 812 | 750 |
| 813 if (!p->WasSwept()) { | 751 if (!p->WasSwept()) { |
| 814 free_bytes = (p->area_size() - p->LiveBytes()); | 752 free_bytes = (p->area_size() - p->LiveBytes()); |
| (...skipping 2275 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3090 if (allocation.To(&target)) { | 3028 if (allocation.To(&target)) { |
| 3091 MigrateObject(target, object, object_size, target_space->identity()); | 3029 MigrateObject(target, object, object_size, target_space->identity()); |
| 3092 heap()->IncrementPromotedObjectsSize(object_size); | 3030 heap()->IncrementPromotedObjectsSize(object_size); |
| 3093 return true; | 3031 return true; |
| 3094 } | 3032 } |
| 3095 | 3033 |
| 3096 return false; | 3034 return false; |
| 3097 } | 3035 } |
| 3098 | 3036 |
| 3099 | 3037 |
| 3100 bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot, | 3038 bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot) { |
| 3101 HeapObject** out_object) { | |
| 3102 // This function does not support large objects right now. | 3039 // This function does not support large objects right now. |
| 3103 Space* owner = p->owner(); | 3040 Space* owner = p->owner(); |
| 3104 if (owner == heap_->lo_space() || owner == NULL) { | 3041 if (owner == heap_->lo_space() || owner == NULL) return true; |
| 3105 *out_object = NULL; | |
| 3106 return true; | |
| 3107 } | |
| 3108 | 3042 |
| 3109 uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot); | 3043 uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot); |
| 3110 unsigned int start_index = mark_bit_index >> Bitmap::kBitsPerCellLog2; | 3044 unsigned int start_index = mark_bit_index >> Bitmap::kBitsPerCellLog2; |
| 3111 MarkBit::CellType index_in_cell = 1U | 3045 MarkBit::CellType index_in_cell = 1U |
| 3112 << (mark_bit_index & Bitmap::kBitIndexMask); | 3046 << (mark_bit_index & Bitmap::kBitIndexMask); |
| 3113 MarkBit::CellType* cells = p->markbits()->cells(); | 3047 MarkBit::CellType* cells = p->markbits()->cells(); |
| 3114 Address cell_base = p->area_start(); | 3048 Address cell_base = p->area_start(); |
| 3115 unsigned int cell_base_start_index = Bitmap::IndexToCell( | 3049 unsigned int cell_base_start_index = Bitmap::IndexToCell( |
| 3116 Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(cell_base))); | 3050 Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(cell_base))); |
| 3117 | 3051 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3151 unsigned int offset = Bitmap::kBitIndexMask - leading_zeros; | 3085 unsigned int offset = Bitmap::kBitIndexMask - leading_zeros; |
| 3152 | 3086 |
| 3153 cell_base += (start_index - cell_base_start_index) * 32 * kPointerSize; | 3087 cell_base += (start_index - cell_base_start_index) * 32 * kPointerSize; |
| 3154 Address address = cell_base + offset * kPointerSize; | 3088 Address address = cell_base + offset * kPointerSize; |
| 3155 HeapObject* object = HeapObject::FromAddress(address); | 3089 HeapObject* object = HeapObject::FromAddress(address); |
| 3156 DCHECK(object->address() < reinterpret_cast<Address>(slot)); | 3090 DCHECK(object->address() < reinterpret_cast<Address>(slot)); |
| 3157 if (object->address() <= slot && | 3091 if (object->address() <= slot && |
| 3158 (object->address() + object->Size()) > slot) { | 3092 (object->address() + object->Size()) > slot) { |
| 3159 // If the slot is within the last found object in the cell, the slot is | 3093 // If the slot is within the last found object in the cell, the slot is |
| 3160 // in a live object. | 3094 // in a live object. |
| 3161 *out_object = object; | |
| 3162 return true; | 3095 return true; |
| 3163 } | 3096 } |
| 3164 return false; | 3097 return false; |
| 3165 } | 3098 } |
| 3166 | 3099 |
| 3167 | 3100 |
| 3168 bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) { | 3101 bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) { |
| 3169 // This function does not support large objects right now. | 3102 // This function does not support large objects right now. |
| 3170 Space* owner = p->owner(); | 3103 Space* owner = p->owner(); |
| 3171 if (owner == heap_->lo_space() || owner == NULL) return true; | 3104 if (owner == heap_->lo_space() || owner == NULL) return true; |
| (...skipping 21 matching lines...) Expand all Loading... |
| 3193 } | 3126 } |
| 3194 | 3127 |
| 3195 offset++; | 3128 offset++; |
| 3196 current_cell >>= 1; | 3129 current_cell >>= 1; |
| 3197 } | 3130 } |
| 3198 } | 3131 } |
| 3199 return false; | 3132 return false; |
| 3200 } | 3133 } |
| 3201 | 3134 |
| 3202 | 3135 |
| 3203 bool MarkCompactCollector::IsSlotInLiveObject(Address slot) { | 3136 bool MarkCompactCollector::IsSlotInLiveObject(HeapObject** address, |
| 3204 HeapObject* object = NULL; | 3137 HeapObject* object) { |
| 3138 // If the target object is not black, the source slot must be part |
| 3139 // of a non-black (dead) object. |
| 3140 if (!Marking::IsBlack(Marking::MarkBitFrom(object))) { |
| 3141 return false; |
| 3142 } |
| 3143 |
| 3205 // The target object is black but we don't know if the source slot is black. | 3144 // The target object is black but we don't know if the source slot is black. |
| 3206 // The source object could have died and the slot could be part of a free | 3145 // The source object could have died and the slot could be part of a free |
| 3207 // space. Find out based on mark bits if the slot is part of a live object. | 3146 // space. Find out based on mark bits if the slot is part of a live object. |
| 3208 if (!IsSlotInBlackObject(Page::FromAddress(slot), slot, &object)) { | 3147 if (!IsSlotInBlackObject( |
| 3148 Page::FromAddress(reinterpret_cast<Address>(address)), |
| 3149 reinterpret_cast<Address>(address))) { |
| 3209 return false; | 3150 return false; |
| 3210 } | 3151 } |
| 3211 | 3152 |
| 3212 #if V8_DOUBLE_FIELDS_UNBOXING | |
| 3213 // |object| is NULL only when the slot belongs to large object space. | |
| 3214 DCHECK(object != NULL || | |
| 3215 Page::FromAnyPointerAddress(heap_, slot)->owner() == | |
| 3216 heap_->lo_space()); | |
| 3217 // We don't need to check large objects' layout descriptor since it can't | |
| 3218 // contain in-object fields anyway. | |
| 3219 if (object != NULL) { | |
| 3220 // Filter out slots that happens to point to unboxed double fields. | |
| 3221 LayoutDescriptorHelper helper(object->map()); | |
| 3222 bool has_only_tagged_fields = helper.all_fields_tagged(); | |
| 3223 if (!has_only_tagged_fields && | |
| 3224 !helper.IsTagged(static_cast<int>(slot - object->address()))) { | |
| 3225 return false; | |
| 3226 } | |
| 3227 } | |
| 3228 #endif | |
| 3229 | |
| 3230 return true; | 3153 return true; |
| 3231 } | 3154 } |
| 3232 | 3155 |
| 3233 | 3156 |
| 3234 void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot, | 3157 void MarkCompactCollector::VerifyIsSlotInLiveObject(HeapObject** address, |
| 3235 HeapObject* object) { | 3158 HeapObject* object) { |
| 3236 // The target object has to be black. | 3159 // The target object has to be black. |
| 3237 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3160 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 3238 | 3161 |
| 3239 // The target object is black but we don't know if the source slot is black. | 3162 // The target object is black but we don't know if the source slot is black. |
| 3240 // The source object could have died and the slot could be part of a free | 3163 // The source object could have died and the slot could be part of a free |
| 3241 // space. Use the mark bit iterator to find out about liveness of the slot. | 3164 // space. Use the mark bit iterator to find out about liveness of the slot. |
| 3242 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot)); | 3165 CHECK(IsSlotInBlackObjectSlow( |
| 3166 Page::FromAddress(reinterpret_cast<Address>(address)), |
| 3167 reinterpret_cast<Address>(address))); |
| 3243 } | 3168 } |
| 3244 | 3169 |
| 3245 | 3170 |
| 3246 void MarkCompactCollector::EvacuateNewSpace() { | 3171 void MarkCompactCollector::EvacuateNewSpace() { |
| 3247 // There are soft limits in the allocation code, designed trigger a mark | 3172 // There are soft limits in the allocation code, designed trigger a mark |
| 3248 // sweep collection by failing allocations. But since we are already in | 3173 // sweep collection by failing allocations. But since we are already in |
| 3249 // a mark-sweep allocation, there is no sense in trying to trigger one. | 3174 // a mark-sweep allocation, there is no sense in trying to trigger one. |
| 3250 AlwaysAllocateScope scope(isolate()); | 3175 AlwaysAllocateScope scope(isolate()); |
| 3251 | 3176 |
| 3252 NewSpace* new_space = heap()->new_space(); | 3177 NewSpace* new_space = heap()->new_space(); |
| (...skipping 1298 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4551 buffer = allocator->AllocateBuffer(buffer); | 4476 buffer = allocator->AllocateBuffer(buffer); |
| 4552 *buffer_address = buffer; | 4477 *buffer_address = buffer; |
| 4553 } | 4478 } |
| 4554 DCHECK(buffer->HasSpaceForTypedSlot()); | 4479 DCHECK(buffer->HasSpaceForTypedSlot()); |
| 4555 buffer->Add(reinterpret_cast<ObjectSlot>(type)); | 4480 buffer->Add(reinterpret_cast<ObjectSlot>(type)); |
| 4556 buffer->Add(reinterpret_cast<ObjectSlot>(addr)); | 4481 buffer->Add(reinterpret_cast<ObjectSlot>(addr)); |
| 4557 return true; | 4482 return true; |
| 4558 } | 4483 } |
| 4559 | 4484 |
| 4560 | 4485 |
| 4561 static Object* g_smi_slot = NULL; | |
| 4562 | |
| 4563 | |
| 4564 void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) { | |
| 4565 DCHECK_EQ(Smi::FromInt(0), g_smi_slot); | |
| 4566 | |
| 4567 // Remove entries by replacing them with a dummy slot containing a smi. | |
| 4568 const ObjectSlot kRemovedEntry = &g_smi_slot; | |
| 4569 | |
| 4570 while (buffer != NULL) { | |
| 4571 SlotsBuffer::ObjectSlot* slots = buffer->slots_; | |
| 4572 intptr_t slots_count = buffer->idx_; | |
| 4573 | |
| 4574 for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) { | |
| 4575 ObjectSlot slot = slots[slot_idx]; | |
| 4576 if (!IsTypedSlot(slot)) { | |
| 4577 Object* object = *slot; | |
| 4578 if (object->IsHeapObject()) { | |
| 4579 if (heap->InNewSpace(object) || | |
| 4580 !heap->mark_compact_collector()->IsSlotInLiveObject( | |
| 4581 reinterpret_cast<Address>(slot))) { | |
| 4582 slots[slot_idx] = kRemovedEntry; | |
| 4583 } | |
| 4584 } | |
| 4585 } else { | |
| 4586 ++slot_idx; | |
| 4587 DCHECK(slot_idx < slots_count); | |
| 4588 } | |
| 4589 } | |
| 4590 buffer = buffer->next(); | |
| 4591 } | |
| 4592 } | |
| 4593 | |
| 4594 | |
| 4595 void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) { | |
| 4596 DCHECK_EQ(Smi::FromInt(0), g_smi_slot); | |
| 4597 | |
| 4598 while (buffer != NULL) { | |
| 4599 SlotsBuffer::ObjectSlot* slots = buffer->slots_; | |
| 4600 intptr_t slots_count = buffer->idx_; | |
| 4601 | |
| 4602 for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) { | |
| 4603 ObjectSlot slot = slots[slot_idx]; | |
| 4604 if (!IsTypedSlot(slot)) { | |
| 4605 Object* object = *slot; | |
| 4606 if (object->IsHeapObject()) { | |
| 4607 CHECK(!heap->InNewSpace(object)); | |
| 4608 CHECK(heap->mark_compact_collector()->IsSlotInLiveObject( | |
| 4609 reinterpret_cast<Address>(slot))); | |
| 4610 } | |
| 4611 } else { | |
| 4612 ++slot_idx; | |
| 4613 DCHECK(slot_idx < slots_count); | |
| 4614 } | |
| 4615 } | |
| 4616 buffer = buffer->next(); | |
| 4617 } | |
| 4618 } | |
| 4619 | |
| 4620 | |
| 4621 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { | 4486 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { |
| 4622 if (RelocInfo::IsCodeTarget(rmode)) { | 4487 if (RelocInfo::IsCodeTarget(rmode)) { |
| 4623 return SlotsBuffer::CODE_TARGET_SLOT; | 4488 return SlotsBuffer::CODE_TARGET_SLOT; |
| 4624 } else if (RelocInfo::IsEmbeddedObject(rmode)) { | 4489 } else if (RelocInfo::IsEmbeddedObject(rmode)) { |
| 4625 return SlotsBuffer::EMBEDDED_OBJECT_SLOT; | 4490 return SlotsBuffer::EMBEDDED_OBJECT_SLOT; |
| 4626 } else if (RelocInfo::IsDebugBreakSlot(rmode)) { | 4491 } else if (RelocInfo::IsDebugBreakSlot(rmode)) { |
| 4627 return SlotsBuffer::DEBUG_TARGET_SLOT; | 4492 return SlotsBuffer::DEBUG_TARGET_SLOT; |
| 4628 } else if (RelocInfo::IsJSReturn(rmode)) { | 4493 } else if (RelocInfo::IsJSReturn(rmode)) { |
| 4629 return SlotsBuffer::JS_RETURN_SLOT; | 4494 return SlotsBuffer::JS_RETURN_SLOT; |
| 4630 } | 4495 } |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4752 SlotsBuffer* buffer = *buffer_address; | 4617 SlotsBuffer* buffer = *buffer_address; |
| 4753 while (buffer != NULL) { | 4618 while (buffer != NULL) { |
| 4754 SlotsBuffer* next_buffer = buffer->next(); | 4619 SlotsBuffer* next_buffer = buffer->next(); |
| 4755 DeallocateBuffer(buffer); | 4620 DeallocateBuffer(buffer); |
| 4756 buffer = next_buffer; | 4621 buffer = next_buffer; |
| 4757 } | 4622 } |
| 4758 *buffer_address = NULL; | 4623 *buffer_address = NULL; |
| 4759 } | 4624 } |
| 4760 } | 4625 } |
| 4761 } // namespace v8::internal | 4626 } // namespace v8::internal |
| OLD | NEW |