Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 85 for (Object** current = start; current < end; current++) { | 85 for (Object** current = start; current < end; current++) { |
| 86 if ((*current)->IsHeapObject()) { | 86 if ((*current)->IsHeapObject()) { |
| 87 HeapObject* object = HeapObject::cast(*current); | 87 HeapObject* object = HeapObject::cast(*current); |
| 88 CHECK(heap_->mark_compact_collector()->IsMarked(object)); | 88 CHECK(heap_->mark_compact_collector()->IsMarked(object)); |
| 89 } | 89 } |
| 90 } | 90 } |
| 91 } | 91 } |
| 92 | 92 |
| 93 void VisitEmbeddedPointer(RelocInfo* rinfo) { | 93 void VisitEmbeddedPointer(RelocInfo* rinfo) { |
| 94 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); | 94 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); |
| 95 if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps || | 95 if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), |
| 96 rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION || | 96 rinfo->target_object())) { |
| 97 !rinfo->target_object()->IsMap() || | |
| 98 !Map::cast(rinfo->target_object())->CanTransition()) { | |
| 99 VisitPointer(rinfo->target_object_address()); | 97 VisitPointer(rinfo->target_object_address()); |
| 100 } | 98 } |
| 101 } | 99 } |
| 102 | 100 |
| 103 private: | 101 private: |
| 104 Heap* heap_; | 102 Heap* heap_; |
| 105 }; | 103 }; |
| 106 | 104 |
| 107 | 105 |
| 108 static void VerifyMarking(Heap* heap, Address bottom, Address top) { | 106 static void VerifyMarking(Heap* heap, Address bottom, Address top) { |
| (...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 425 | 423 |
| 426 if (!FLAG_collect_maps) ReattachInitialMaps(); | 424 if (!FLAG_collect_maps) ReattachInitialMaps(); |
| 427 | 425 |
| 428 #ifdef DEBUG | 426 #ifdef DEBUG |
| 429 if (FLAG_verify_native_context_separation) { | 427 if (FLAG_verify_native_context_separation) { |
| 430 VerifyNativeContextSeparation(heap_); | 428 VerifyNativeContextSeparation(heap_); |
| 431 } | 429 } |
| 432 #endif | 430 #endif |
| 433 | 431 |
| 434 #ifdef VERIFY_HEAP | 432 #ifdef VERIFY_HEAP |
| 435 if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code && | 433 if (heap()->weak_embedded_maps_verification_enabled()) { |
|
Hannes Payer (out of office)
2013/10/01 13:13:50
This property should be renamed to weak_embedded_o
ulan
2013/10/01 14:50:29
Can we instead rename the method to
VerifyWeakEmbe
Hannes Payer (out of office)
2013/10/02 12:21:47
I think object is general enough to also reflect m
ulan
2013/10/02 12:36:49
Done.
| |
| 436 heap()->weak_embedded_maps_verification_enabled()) { | 434 VerifyWeakEmbeddedObjectsInOptimizedCode(); |
| 437 VerifyWeakEmbeddedMapsInOptimizedCode(); | |
| 438 } | 435 } |
| 439 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) { | 436 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) { |
| 440 VerifyOmittedMapChecks(); | 437 VerifyOmittedMapChecks(); |
| 441 } | 438 } |
| 442 #endif | 439 #endif |
| 443 | 440 |
| 444 Finish(); | 441 Finish(); |
| 445 | 442 |
| 446 if (marking_parity_ == EVEN_MARKING_PARITY) { | 443 if (marking_parity_ == EVEN_MARKING_PARITY) { |
| 447 marking_parity_ = ODD_MARKING_PARITY; | 444 marking_parity_ = ODD_MARKING_PARITY; |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 488 | 485 |
| 489 LargeObjectIterator it(heap_->lo_space()); | 486 LargeObjectIterator it(heap_->lo_space()); |
| 490 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 487 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 491 MarkBit mark_bit = Marking::MarkBitFrom(obj); | 488 MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 492 CHECK(Marking::IsWhite(mark_bit)); | 489 CHECK(Marking::IsWhite(mark_bit)); |
| 493 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); | 490 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); |
| 494 } | 491 } |
| 495 } | 492 } |
| 496 | 493 |
| 497 | 494 |
| 498 void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() { | 495 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() { |
| 499 HeapObjectIterator code_iterator(heap()->code_space()); | 496 HeapObjectIterator code_iterator(heap()->code_space()); |
| 500 for (HeapObject* obj = code_iterator.Next(); | 497 for (HeapObject* obj = code_iterator.Next(); |
| 501 obj != NULL; | 498 obj != NULL; |
| 502 obj = code_iterator.Next()) { | 499 obj = code_iterator.Next()) { |
| 503 Code* code = Code::cast(obj); | 500 Code* code = Code::cast(obj); |
| 504 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue; | 501 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue; |
| 505 if (WillBeDeoptimized(code)) continue; | 502 if (WillBeDeoptimized(code)) continue; |
| 506 code->VerifyEmbeddedMapsDependency(); | 503 code->VerifyEmbeddedObjectsDependency(); |
| 507 } | 504 } |
| 508 } | 505 } |
| 509 | 506 |
| 510 | 507 |
| 511 void MarkCompactCollector::VerifyOmittedMapChecks() { | 508 void MarkCompactCollector::VerifyOmittedMapChecks() { |
| 512 HeapObjectIterator iterator(heap()->map_space()); | 509 HeapObjectIterator iterator(heap()->map_space()); |
| 513 for (HeapObject* obj = iterator.Next(); | 510 for (HeapObject* obj = iterator.Next(); |
| 514 obj != NULL; | 511 obj != NULL; |
| 515 obj = iterator.Next()) { | 512 obj = iterator.Next()) { |
| 516 Map* map = Map::cast(obj); | 513 Map* map = Map::cast(obj); |
| (...skipping 957 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1474 JSWeakCollection::kTableOffset); | 1471 JSWeakCollection::kTableOffset); |
| 1475 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers( | 1472 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers( |
| 1476 map->GetHeap(), | 1473 map->GetHeap(), |
| 1477 object, | 1474 object, |
| 1478 JSWeakCollection::kTableOffset + kPointerSize, | 1475 JSWeakCollection::kTableOffset + kPointerSize, |
| 1479 object_size); | 1476 object_size); |
| 1480 | 1477 |
| 1481 // Mark the backing hash table without pushing it on the marking stack. | 1478 // Mark the backing hash table without pushing it on the marking stack. |
| 1482 Object* table_object = weak_collection->table(); | 1479 Object* table_object = weak_collection->table(); |
| 1483 if (!table_object->IsHashTable()) return; | 1480 if (!table_object->IsHashTable()) return; |
| 1484 ObjectHashTable* table = ObjectHashTable::cast(table_object); | 1481 WeakHashTable* table = WeakHashTable::cast(table_object); |
| 1485 Object** table_slot = | 1482 Object** table_slot = |
| 1486 HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset); | 1483 HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset); |
| 1487 MarkBit table_mark = Marking::MarkBitFrom(table); | 1484 MarkBit table_mark = Marking::MarkBitFrom(table); |
| 1488 collector->RecordSlot(table_slot, table_slot, table); | 1485 collector->RecordSlot(table_slot, table_slot, table); |
| 1489 if (!table_mark.Get()) collector->SetMark(table, table_mark); | 1486 if (!table_mark.Get()) collector->SetMark(table, table_mark); |
| 1490 // Recording the map slot can be skipped, because maps are not compacted. | 1487 // Recording the map slot can be skipped, because maps are not compacted. |
| 1491 collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map())); | 1488 collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map())); |
| 1492 ASSERT(MarkCompactCollector::IsMarked(table->map())); | 1489 ASSERT(MarkCompactCollector::IsMarked(table->map())); |
| 1493 } | 1490 } |
| 1494 | 1491 |
| (...skipping 614 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2109 | 2106 |
| 2110 | 2107 |
| 2111 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { | 2108 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { |
| 2112 // Mark the heap roots including global variables, stack variables, | 2109 // Mark the heap roots including global variables, stack variables, |
| 2113 // etc., and all objects reachable from them. | 2110 // etc., and all objects reachable from them. |
| 2114 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); | 2111 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); |
| 2115 | 2112 |
| 2116 // Handle the string table specially. | 2113 // Handle the string table specially. |
| 2117 MarkStringTable(visitor); | 2114 MarkStringTable(visitor); |
| 2118 | 2115 |
| 2116 MarkWeakObjectToCode(visitor); | |
| 2117 | |
| 2119 // There may be overflowed objects in the heap. Visit them now. | 2118 // There may be overflowed objects in the heap. Visit them now. |
| 2120 while (marking_deque_.overflowed()) { | 2119 while (marking_deque_.overflowed()) { |
| 2121 RefillMarkingDeque(); | 2120 RefillMarkingDeque(); |
| 2122 EmptyMarkingDeque(); | 2121 EmptyMarkingDeque(); |
| 2123 } | 2122 } |
| 2124 } | 2123 } |
| 2125 | 2124 |
| 2126 | 2125 |
| 2127 void MarkCompactCollector::MarkImplicitRefGroups() { | 2126 void MarkCompactCollector::MarkImplicitRefGroups() { |
| 2128 List<ImplicitRefGroup*>* ref_groups = | 2127 List<ImplicitRefGroup*>* ref_groups = |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 2149 } | 2148 } |
| 2150 | 2149 |
| 2151 // Once the entire group has been marked, dispose it because it's | 2150 // Once the entire group has been marked, dispose it because it's |
| 2152 // not needed anymore. | 2151 // not needed anymore. |
| 2153 delete entry; | 2152 delete entry; |
| 2154 } | 2153 } |
| 2155 ref_groups->Rewind(last); | 2154 ref_groups->Rewind(last); |
| 2156 } | 2155 } |
| 2157 | 2156 |
| 2158 | 2157 |
| 2158 void MarkCompactCollector::MarkWeakObjectToCode(RootMarkingVisitor* visitor) { | |
|
Hannes Payer (out of office)
2013/10/01 13:13:50
Visitor is not needed for this function.
ulan
2013/10/01 14:50:29
Done.
| |
| 2159 HeapObject* weak_object_to_code = | |
| 2160 HeapObject::cast(heap()->weak_object_to_code()); | |
| 2161 if (!IsMarked(weak_object_to_code)) { | |
| 2162 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code); | |
| 2163 SetMark(weak_object_to_code, mark); | |
| 2164 } | |
| 2165 } | |
| 2166 | |
| 2167 | |
| 2159 // Mark all objects reachable from the objects on the marking stack. | 2168 // Mark all objects reachable from the objects on the marking stack. |
| 2160 // Before: the marking stack contains zero or more heap object pointers. | 2169 // Before: the marking stack contains zero or more heap object pointers. |
| 2161 // After: the marking stack is empty, and all objects reachable from the | 2170 // After: the marking stack is empty, and all objects reachable from the |
| 2162 // marking stack have been marked, or are overflowed in the heap. | 2171 // marking stack have been marked, or are overflowed in the heap. |
| 2163 void MarkCompactCollector::EmptyMarkingDeque() { | 2172 void MarkCompactCollector::EmptyMarkingDeque() { |
| 2164 while (!marking_deque_.IsEmpty()) { | 2173 while (!marking_deque_.IsEmpty()) { |
| 2165 HeapObject* object = marking_deque_.Pop(); | 2174 HeapObject* object = marking_deque_.Pop(); |
| 2166 ASSERT(object->IsHeapObject()); | 2175 ASSERT(object->IsHeapObject()); |
| 2167 ASSERT(heap()->Contains(object)); | 2176 ASSERT(heap()->Contains(object)); |
| 2168 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); | 2177 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| (...skipping 347 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2516 // Since it survived the GC, reattach it now. | 2525 // Since it survived the GC, reattach it now. |
| 2517 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map); | 2526 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map); |
| 2518 } | 2527 } |
| 2519 | 2528 |
| 2520 ClearNonLivePrototypeTransitions(map); | 2529 ClearNonLivePrototypeTransitions(map); |
| 2521 ClearNonLiveMapTransitions(map, map_mark); | 2530 ClearNonLiveMapTransitions(map, map_mark); |
| 2522 | 2531 |
| 2523 if (map_mark.Get()) { | 2532 if (map_mark.Get()) { |
| 2524 ClearNonLiveDependentCode(map->dependent_code()); | 2533 ClearNonLiveDependentCode(map->dependent_code()); |
| 2525 } else { | 2534 } else { |
| 2526 ClearAndDeoptimizeDependentCode(map); | 2535 ClearAndDeoptimizeDependentCode(map->dependent_code()); |
| 2536 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); | |
| 2527 } | 2537 } |
| 2528 } | 2538 } |
| 2529 | 2539 |
| 2530 // Iterate over property cell space, removing dependent code that is not | 2540 // Iterate over property cell space, removing dependent code that is not |
| 2531 // otherwise kept alive by strong references. | 2541 // otherwise kept alive by strong references. |
| 2532 HeapObjectIterator cell_iterator(heap_->property_cell_space()); | 2542 HeapObjectIterator cell_iterator(heap_->property_cell_space()); |
| 2533 for (HeapObject* cell = cell_iterator.Next(); | 2543 for (HeapObject* cell = cell_iterator.Next(); |
| 2534 cell != NULL; | 2544 cell != NULL; |
| 2535 cell = cell_iterator.Next()) { | 2545 cell = cell_iterator.Next()) { |
| 2536 if (IsMarked(cell)) { | 2546 if (IsMarked(cell)) { |
| 2537 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code()); | 2547 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code()); |
| 2538 } | 2548 } |
| 2539 } | 2549 } |
| 2550 | |
| 2551 if (heap_->weak_object_to_code()->IsHashTable()) { | |
| 2552 WeakHashTable* table = WeakHashTable::cast(heap_->weak_object_to_code()); | |
| 2553 uint32_t capacity = table->Capacity(); | |
| 2554 for (uint32_t i = 0; i < capacity; i++) { | |
| 2555 uint32_t key_index = table->EntryToIndex(i); | |
| 2556 Object* key = table->get(key_index); | |
| 2557 if (!table->IsKey(key)) continue; | |
| 2558 uint32_t value_index = table->EntryToValueIndex(i); | |
| 2559 Object* value = table->get(value_index); | |
| 2560 if (IsMarked(key)) { | |
| 2561 if (!IsMarked(value)) { | |
| 2562 HeapObject* obj = HeapObject::cast(value); | |
| 2563 MarkBit mark = Marking::MarkBitFrom(obj); | |
| 2564 SetMark(obj, mark); | |
| 2565 } | |
| 2566 ClearNonLiveDependentCode(DependentCode::cast(value)); | |
| 2567 } else { | |
| 2568 ClearAndDeoptimizeDependentCode(DependentCode::cast(value)); | |
| 2569 table->set(key_index, heap_->the_hole_value()); | |
| 2570 table->set(value_index, heap_->the_hole_value()); | |
| 2571 } | |
| 2572 } | |
| 2573 } | |
| 2540 } | 2574 } |
| 2541 | 2575 |
| 2542 | 2576 |
| 2543 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) { | 2577 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) { |
| 2544 int number_of_transitions = map->NumberOfProtoTransitions(); | 2578 int number_of_transitions = map->NumberOfProtoTransitions(); |
| 2545 FixedArray* prototype_transitions = map->GetPrototypeTransitions(); | 2579 FixedArray* prototype_transitions = map->GetPrototypeTransitions(); |
| 2546 | 2580 |
| 2547 int new_number_of_transitions = 0; | 2581 int new_number_of_transitions = 0; |
| 2548 const int header = Map::kProtoTransitionHeaderSize; | 2582 const int header = Map::kProtoTransitionHeaderSize; |
| 2549 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset; | 2583 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset; |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2595 // Follow back pointer, check whether we are dealing with a map transition | 2629 // Follow back pointer, check whether we are dealing with a map transition |
| 2596 // from a live map to a dead path and in case clear transitions of parent. | 2630 // from a live map to a dead path and in case clear transitions of parent. |
| 2597 bool current_is_alive = map_mark.Get(); | 2631 bool current_is_alive = map_mark.Get(); |
| 2598 bool parent_is_alive = Marking::MarkBitFrom(parent).Get(); | 2632 bool parent_is_alive = Marking::MarkBitFrom(parent).Get(); |
| 2599 if (!current_is_alive && parent_is_alive) { | 2633 if (!current_is_alive && parent_is_alive) { |
| 2600 parent->ClearNonLiveTransitions(heap()); | 2634 parent->ClearNonLiveTransitions(heap()); |
| 2601 } | 2635 } |
| 2602 } | 2636 } |
| 2603 | 2637 |
| 2604 | 2638 |
| 2605 void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) { | 2639 void MarkCompactCollector::ClearAndDeoptimizeDependentCode( |
| 2640 DependentCode* entries) { | |
| 2606 DisallowHeapAllocation no_allocation; | 2641 DisallowHeapAllocation no_allocation; |
| 2607 DependentCode* entries = map->dependent_code(); | |
| 2608 DependentCode::GroupStartIndexes starts(entries); | 2642 DependentCode::GroupStartIndexes starts(entries); |
| 2609 int number_of_entries = starts.number_of_entries(); | 2643 int number_of_entries = starts.number_of_entries(); |
| 2610 if (number_of_entries == 0) return; | 2644 if (number_of_entries == 0) return; |
| 2611 for (int i = 0; i < number_of_entries; i++) { | 2645 for (int i = 0; i < number_of_entries; i++) { |
| 2612 // If the entry is compilation info then the map must be alive, | 2646 // If the entry is compilation info then the map must be alive, |
| 2613 // and ClearAndDeoptimizeDependentCode shouldn't be called. | 2647 // and ClearAndDeoptimizeDependentCode shouldn't be called. |
| 2614 ASSERT(entries->is_code_at(i)); | 2648 ASSERT(entries->is_code_at(i)); |
| 2615 Code* code = entries->code_at(i); | 2649 Code* code = entries->code_at(i); |
| 2616 | 2650 |
| 2617 if (IsMarked(code) && !code->marked_for_deoptimization()) { | 2651 if (IsMarked(code) && !code->marked_for_deoptimization()) { |
| 2618 code->set_marked_for_deoptimization(true); | 2652 code->set_marked_for_deoptimization(true); |
| 2619 have_code_to_deoptimize_ = true; | 2653 have_code_to_deoptimize_ = true; |
| 2620 } | 2654 } |
| 2621 entries->clear_at(i); | 2655 entries->clear_at(i); |
| 2622 } | 2656 } |
| 2623 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); | |
| 2624 } | 2657 } |
| 2625 | 2658 |
| 2626 | 2659 |
| 2627 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) { | 2660 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) { |
| 2628 DisallowHeapAllocation no_allocation; | 2661 DisallowHeapAllocation no_allocation; |
| 2629 DependentCode::GroupStartIndexes starts(entries); | 2662 DependentCode::GroupStartIndexes starts(entries); |
| 2630 int number_of_entries = starts.number_of_entries(); | 2663 int number_of_entries = starts.number_of_entries(); |
| 2631 if (number_of_entries == 0) return; | 2664 if (number_of_entries == 0) return; |
| 2632 int new_number_of_entries = 0; | 2665 int new_number_of_entries = 0; |
| 2633 // Go through all groups, remove dead codes and compact. | 2666 // Go through all groups, remove dead codes and compact. |
| (...skipping 818 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3452 cell = js_global_property_cell_iterator.Next()) { | 3485 cell = js_global_property_cell_iterator.Next()) { |
| 3453 if (cell->IsPropertyCell()) { | 3486 if (cell->IsPropertyCell()) { |
| 3454 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor); | 3487 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor); |
| 3455 } | 3488 } |
| 3456 } | 3489 } |
| 3457 | 3490 |
| 3458 // Update the head of the native contexts list in the heap. | 3491 // Update the head of the native contexts list in the heap. |
| 3459 updating_visitor.VisitPointer(heap_->native_contexts_list_address()); | 3492 updating_visitor.VisitPointer(heap_->native_contexts_list_address()); |
| 3460 | 3493 |
| 3461 heap_->string_table()->Iterate(&updating_visitor); | 3494 heap_->string_table()->Iterate(&updating_visitor); |
| 3495 updating_visitor.VisitPointer(heap_->weak_object_to_code_address()); | |
| 3496 if (heap_->weak_object_to_code()->IsHashTable()) { | |
| 3497 WeakHashTable* table = WeakHashTable::cast(heap_->weak_object_to_code()); | |
| 3498 table->Iterate(&updating_visitor); | |
| 3499 table->Rehash(heap_->undefined_value()); | |
| 3500 } | |
| 3462 | 3501 |
| 3463 // Update pointers from external string table. | 3502 // Update pointers from external string table. |
| 3464 heap_->UpdateReferencesInExternalStringTable( | 3503 heap_->UpdateReferencesInExternalStringTable( |
| 3465 &UpdateReferenceInExternalStringTableEntry); | 3504 &UpdateReferenceInExternalStringTableEntry); |
| 3466 | 3505 |
| 3467 if (!FLAG_watch_ic_patching) { | 3506 if (!FLAG_watch_ic_patching) { |
| 3468 // Update JSFunction pointers from the runtime profiler. | 3507 // Update JSFunction pointers from the runtime profiler. |
| 3469 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( | 3508 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( |
| 3470 &updating_visitor); | 3509 &updating_visitor); |
| 3471 } | 3510 } |
| (...skipping 843 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4315 while (buffer != NULL) { | 4354 while (buffer != NULL) { |
| 4316 SlotsBuffer* next_buffer = buffer->next(); | 4355 SlotsBuffer* next_buffer = buffer->next(); |
| 4317 DeallocateBuffer(buffer); | 4356 DeallocateBuffer(buffer); |
| 4318 buffer = next_buffer; | 4357 buffer = next_buffer; |
| 4319 } | 4358 } |
| 4320 *buffer_address = NULL; | 4359 *buffer_address = NULL; |
| 4321 } | 4360 } |
| 4322 | 4361 |
| 4323 | 4362 |
| 4324 } } // namespace v8::internal | 4363 } } // namespace v8::internal |
| OLD | NEW |