Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(232)

Side by Side Diff: src/mark-compact.cc

Issue 131363008: A64: Synchronize with r15922. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/messages.js » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
66 abort_incremental_marking_(false), 66 abort_incremental_marking_(false),
67 marking_parity_(ODD_MARKING_PARITY), 67 marking_parity_(ODD_MARKING_PARITY),
68 compacting_(false), 68 compacting_(false),
69 was_marked_incrementally_(false), 69 was_marked_incrementally_(false),
70 sweeping_pending_(false), 70 sweeping_pending_(false),
71 sequential_sweeping_(false), 71 sequential_sweeping_(false),
72 tracer_(NULL), 72 tracer_(NULL),
73 migration_slots_buffer_(NULL), 73 migration_slots_buffer_(NULL),
74 heap_(NULL), 74 heap_(NULL),
75 code_flusher_(NULL), 75 code_flusher_(NULL),
76 encountered_weak_collections_(NULL) { } 76 encountered_weak_collections_(NULL),
77 77 code_to_deoptimize_(NULL) { }
78 78
79 #ifdef VERIFY_HEAP 79 #ifdef VERIFY_HEAP
80 class VerifyMarkingVisitor: public ObjectVisitor { 80 class VerifyMarkingVisitor: public ObjectVisitor {
81 public: 81 public:
82 void VisitPointers(Object** start, Object** end) { 82 void VisitPointers(Object** start, Object** end) {
83 for (Object** current = start; current < end; current++) { 83 for (Object** current = start; current < end; current++) {
84 if ((*current)->IsHeapObject()) { 84 if ((*current)->IsHeapObject()) {
85 HeapObject* object = HeapObject::cast(*current); 85 HeapObject* object = HeapObject::cast(*current);
86 CHECK(HEAP->mark_compact_collector()->IsMarked(object)); 86 CHECK(HEAP->mark_compact_collector()->IsMarked(object));
87 } 87 }
(...skipping 397 matching lines...) Expand 10 before | Expand all | Expand 10 after
485 } 485 }
486 486
487 487
488 void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() { 488 void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
489 HeapObjectIterator code_iterator(heap()->code_space()); 489 HeapObjectIterator code_iterator(heap()->code_space());
490 for (HeapObject* obj = code_iterator.Next(); 490 for (HeapObject* obj = code_iterator.Next();
491 obj != NULL; 491 obj != NULL;
492 obj = code_iterator.Next()) { 492 obj = code_iterator.Next()) {
493 Code* code = Code::cast(obj); 493 Code* code = Code::cast(obj);
494 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue; 494 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
495 if (code->marked_for_deoptimization()) continue; 495 if (WillBeDeoptimized(code)) continue;
496 code->VerifyEmbeddedMapsDependency(); 496 code->VerifyEmbeddedMapsDependency();
497 } 497 }
498 } 498 }
499 499
500 500
501 void MarkCompactCollector::VerifyOmittedPrototypeChecks() { 501 void MarkCompactCollector::VerifyOmittedPrototypeChecks() {
502 HeapObjectIterator iterator(heap()->map_space()); 502 HeapObjectIterator iterator(heap()->map_space());
503 for (HeapObject* obj = iterator.Next(); 503 for (HeapObject* obj = iterator.Next();
504 obj != NULL; 504 obj != NULL;
505 obj = iterator.Next()) { 505 obj = iterator.Next()) {
(...skipping 432 matching lines...) Expand 10 before | Expand all | Expand 10 after
938 } 938 }
939 939
940 #ifdef VERIFY_HEAP 940 #ifdef VERIFY_HEAP
941 if (!was_marked_incrementally_ && FLAG_verify_heap) { 941 if (!was_marked_incrementally_ && FLAG_verify_heap) {
942 VerifyMarkbitsAreClean(); 942 VerifyMarkbitsAreClean();
943 } 943 }
944 #endif 944 #endif
945 } 945 }
946 946
947 947
948 class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
949 public:
950 virtual bool TakeFunction(JSFunction* function) {
951 return function->code()->marked_for_deoptimization();
952 }
953 };
954
955
956 void MarkCompactCollector::Finish() { 948 void MarkCompactCollector::Finish() {
957 #ifdef DEBUG 949 #ifdef DEBUG
958 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); 950 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
959 state_ = IDLE; 951 state_ = IDLE;
960 #endif 952 #endif
961 // The stub cache is not traversed during GC; clear the cache to 953 // The stub cache is not traversed during GC; clear the cache to
962 // force lazy re-initialization of it. This must be done after the 954 // force lazy re-initialization of it. This must be done after the
963 // GC, because it relies on the new address of certain old space 955 // GC, because it relies on the new address of certain old space
964 // objects (empty string, illegal builtin). 956 // objects (empty string, illegal builtin).
965 isolate()->stub_cache()->Clear(); 957 isolate()->stub_cache()->Clear();
966 958
967 DeoptimizeMarkedCodeFilter filter; 959 if (code_to_deoptimize_ != Smi::FromInt(0)) {
968 Deoptimizer::DeoptimizeAllFunctionsWith(isolate(), &filter); 960 // Convert the linked list of Code objects into a ZoneList.
961 Zone zone(isolate());
962 ZoneList<Code*> codes(4, &zone);
963
964 Object *list = code_to_deoptimize_;
965 while (list->IsCode()) {
966 Code *code = Code::cast(list);
967 list = code->code_to_deoptimize_link();
968 codes.Add(code, &zone);
969 // Destroy the link and don't ever try to deoptimize this code again.
970 code->set_code_to_deoptimize_link(Smi::FromInt(0));
971 }
972 code_to_deoptimize_ = Smi::FromInt(0);
973
974 Deoptimizer::DeoptimizeCodeList(isolate(), &codes);
975 }
969 } 976 }
970 977
971 978
972 // ------------------------------------------------------------------------- 979 // -------------------------------------------------------------------------
973 // Phase 1: tracing and marking live objects. 980 // Phase 1: tracing and marking live objects.
974 // before: all objects are in normal state. 981 // before: all objects are in normal state.
975 // after: a live object's map pointer is marked as '00'. 982 // after: a live object's map pointer is marked as '00'.
976 983
977 // Marking all live objects in the heap as part of mark-sweep or mark-compact 984 // Marking all live objects in the heap as part of mark-sweep or mark-compact
978 // collection. Before marking, all objects are in their normal state. After 985 // collection. Before marking, all objects are in their normal state. After
(...skipping 1410 matching lines...) Expand 10 before | Expand all | Expand 10 after
2389 2396
2390 // Prune the string table removing all strings only pointed to by the 2397 // Prune the string table removing all strings only pointed to by the
2391 // string table. Cannot use string_table() here because the string 2398 // string table. Cannot use string_table() here because the string
2392 // table is marked. 2399 // table is marked.
2393 StringTable* string_table = heap()->string_table(); 2400 StringTable* string_table = heap()->string_table();
2394 StringTableCleaner v(heap()); 2401 StringTableCleaner v(heap());
2395 string_table->IterateElements(&v); 2402 string_table->IterateElements(&v);
2396 string_table->ElementsRemoved(v.PointersRemoved()); 2403 string_table->ElementsRemoved(v.PointersRemoved());
2397 heap()->external_string_table_.Iterate(&v); 2404 heap()->external_string_table_.Iterate(&v);
2398 heap()->external_string_table_.CleanUp(); 2405 heap()->external_string_table_.CleanUp();
2399 heap()->error_object_list_.RemoveUnmarked(heap());
2400 2406
2401 // Process the weak references. 2407 // Process the weak references.
2402 MarkCompactWeakObjectRetainer mark_compact_object_retainer; 2408 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2403 heap()->ProcessWeakReferences(&mark_compact_object_retainer); 2409 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2404 2410
2405 // Remove object groups after marking phase. 2411 // Remove object groups after marking phase.
2406 heap()->isolate()->global_handles()->RemoveObjectGroups(); 2412 heap()->isolate()->global_handles()->RemoveObjectGroups();
2407 heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); 2413 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2408 2414
2409 // Flush code from collected candidates. 2415 // Flush code from collected candidates.
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
2604 DisallowHeapAllocation no_allocation; 2610 DisallowHeapAllocation no_allocation;
2605 DependentCode* entries = map->dependent_code(); 2611 DependentCode* entries = map->dependent_code();
2606 DependentCode::GroupStartIndexes starts(entries); 2612 DependentCode::GroupStartIndexes starts(entries);
2607 int number_of_entries = starts.number_of_entries(); 2613 int number_of_entries = starts.number_of_entries();
2608 if (number_of_entries == 0) return; 2614 if (number_of_entries == 0) return;
2609 for (int i = 0; i < number_of_entries; i++) { 2615 for (int i = 0; i < number_of_entries; i++) {
2610 // If the entry is compilation info then the map must be alive, 2616 // If the entry is compilation info then the map must be alive,
2611 // and ClearAndDeoptimizeDependentCode shouldn't be called. 2617 // and ClearAndDeoptimizeDependentCode shouldn't be called.
2612 ASSERT(entries->is_code_at(i)); 2618 ASSERT(entries->is_code_at(i));
2613 Code* code = entries->code_at(i); 2619 Code* code = entries->code_at(i);
2614 if (IsMarked(code) && !code->marked_for_deoptimization()) { 2620
2615 code->set_marked_for_deoptimization(true); 2621 if (IsMarked(code) && !WillBeDeoptimized(code)) {
2622 // Insert the code into the code_to_deoptimize linked list.
2623 Object* next = code_to_deoptimize_;
2624 if (next != Smi::FromInt(0)) {
2625 // Record the slot so that it is updated.
2626 Object** slot = code->code_to_deoptimize_link_slot();
2627 RecordSlot(slot, slot, next);
2628 }
2629 code->set_code_to_deoptimize_link(next);
2630 code_to_deoptimize_ = code;
2616 } 2631 }
2617 entries->clear_at(i); 2632 entries->clear_at(i);
2618 } 2633 }
2619 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); 2634 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2620 } 2635 }
2621 2636
2622 2637
2623 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) { 2638 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2624 DisallowHeapAllocation no_allocation; 2639 DisallowHeapAllocation no_allocation;
2625 DependentCode::GroupStartIndexes starts(entries); 2640 DependentCode::GroupStartIndexes starts(entries);
2626 int number_of_entries = starts.number_of_entries(); 2641 int number_of_entries = starts.number_of_entries();
2627 if (number_of_entries == 0) return; 2642 if (number_of_entries == 0) return;
2628 int new_number_of_entries = 0; 2643 int new_number_of_entries = 0;
2629 // Go through all groups, remove dead codes and compact. 2644 // Go through all groups, remove dead codes and compact.
2630 for (int g = 0; g < DependentCode::kGroupCount; g++) { 2645 for (int g = 0; g < DependentCode::kGroupCount; g++) {
2631 int group_number_of_entries = 0; 2646 int group_number_of_entries = 0;
2632 for (int i = starts.at(g); i < starts.at(g + 1); i++) { 2647 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2633 Object* obj = entries->object_at(i); 2648 Object* obj = entries->object_at(i);
2634 ASSERT(obj->IsCode() || IsMarked(obj)); 2649 ASSERT(obj->IsCode() || IsMarked(obj));
2635 if (IsMarked(obj) && 2650 if (IsMarked(obj) &&
2636 (!obj->IsCode() || !Code::cast(obj)->marked_for_deoptimization())) { 2651 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2637 if (new_number_of_entries + group_number_of_entries != i) { 2652 if (new_number_of_entries + group_number_of_entries != i) {
2638 entries->set_object_at( 2653 entries->set_object_at(
2639 new_number_of_entries + group_number_of_entries, obj); 2654 new_number_of_entries + group_number_of_entries, obj);
2640 } 2655 }
2641 Object** slot = entries->slot_at(new_number_of_entries + 2656 Object** slot = entries->slot_at(new_number_of_entries +
2642 group_number_of_entries); 2657 group_number_of_entries);
2643 RecordSlot(slot, slot, obj); 2658 RecordSlot(slot, slot, obj);
2644 group_number_of_entries++; 2659 group_number_of_entries++;
2645 } 2660 }
2646 } 2661 }
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
2716 // to encounter pointers to dead new space objects during traversal of pointers 2731 // to encounter pointers to dead new space objects during traversal of pointers
2717 // to new space. We should clear them to avoid encountering them during next 2732 // to new space. We should clear them to avoid encountering them during next
2718 // pointer iteration. This is an issue if the store buffer overflows and we 2733 // pointer iteration. This is an issue if the store buffer overflows and we
2719 // have to scan the entire old space, including dead objects, looking for 2734 // have to scan the entire old space, including dead objects, looking for
2720 // pointers to new space. 2735 // pointers to new space.
2721 void MarkCompactCollector::MigrateObject(Address dst, 2736 void MarkCompactCollector::MigrateObject(Address dst,
2722 Address src, 2737 Address src,
2723 int size, 2738 int size,
2724 AllocationSpace dest) { 2739 AllocationSpace dest) {
2725 HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst)); 2740 HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2726 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) { 2741 // TODO(hpayer): Replace that check with an assert.
2742 CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
2743 if (dest == OLD_POINTER_SPACE) {
2744 // TODO(hpayer): Replace this check with an assert.
2745 HeapObject* heap_object = HeapObject::FromAddress(src);
2746 CHECK(heap_->TargetSpace(heap_object) == heap_->old_pointer_space());
2727 Address src_slot = src; 2747 Address src_slot = src;
2728 Address dst_slot = dst; 2748 Address dst_slot = dst;
2729 ASSERT(IsAligned(size, kPointerSize)); 2749 ASSERT(IsAligned(size, kPointerSize));
2730 2750
2731 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { 2751 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2732 Object* value = Memory::Object_at(src_slot); 2752 Object* value = Memory::Object_at(src_slot);
2733 2753
2734 Memory::Object_at(dst_slot) = value; 2754 Memory::Object_at(dst_slot) = value;
2735 2755
2736 if (heap_->InNewSpace(value)) { 2756 if (heap_->InNewSpace(value)) {
(...skipping 25 matching lines...) Expand all
2762 PROFILE(isolate(), CodeMoveEvent(src, dst)); 2782 PROFILE(isolate(), CodeMoveEvent(src, dst));
2763 heap()->MoveBlock(dst, src, size); 2783 heap()->MoveBlock(dst, src, size);
2764 SlotsBuffer::AddTo(&slots_buffer_allocator_, 2784 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2765 &migration_slots_buffer_, 2785 &migration_slots_buffer_,
2766 SlotsBuffer::RELOCATED_CODE_OBJECT, 2786 SlotsBuffer::RELOCATED_CODE_OBJECT,
2767 dst, 2787 dst,
2768 SlotsBuffer::IGNORE_OVERFLOW); 2788 SlotsBuffer::IGNORE_OVERFLOW);
2769 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src); 2789 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2770 } else { 2790 } else {
2771 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE); 2791 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2792 // Objects in old data space can just be moved by compaction to a different
2793 // page in old data space.
2794 // TODO(hpayer): Replace the following check with an assert.
2795 CHECK(!heap_->old_data_space()->Contains(src) ||
2796 (heap_->old_data_space()->Contains(dst) &&
2797 heap_->TargetSpace(HeapObject::FromAddress(src)) ==
2798 heap_->old_data_space()));
2772 heap()->MoveBlock(dst, src, size); 2799 heap()->MoveBlock(dst, src, size);
2773 } 2800 }
2774 Memory::Address_at(src) = dst; 2801 Memory::Address_at(src) = dst;
2775 } 2802 }
2776 2803
2777 2804
2778 // Visitor for updating pointers from live objects in old spaces to new space. 2805 // Visitor for updating pointers from live objects in old spaces to new space.
2779 // It does not expect to encounter pointers to dead objects. 2806 // It does not expect to encounter pointers to dead objects.
2780 class PointersUpdatingVisitor: public ObjectVisitor { 2807 class PointersUpdatingVisitor: public ObjectVisitor {
2781 public: 2808 public:
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
2888 if (map_word.IsForwardingAddress()) { 2915 if (map_word.IsForwardingAddress()) {
2889 return String::cast(map_word.ToForwardingAddress()); 2916 return String::cast(map_word.ToForwardingAddress());
2890 } 2917 }
2891 2918
2892 return String::cast(*p); 2919 return String::cast(*p);
2893 } 2920 }
2894 2921
2895 2922
2896 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, 2923 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2897 int object_size) { 2924 int object_size) {
2925 // TODO(hpayer): Replace that check with an assert.
2926 CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
2927
2928 OldSpace* target_space = heap()->TargetSpace(object);
2929
2930 ASSERT(target_space == heap()->old_pointer_space() ||
2931 target_space == heap()->old_data_space());
2898 Object* result; 2932 Object* result;
2899 2933 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2900 if (object_size > Page::kMaxNonCodeHeapObjectSize) { 2934 if (maybe_result->ToObject(&result)) {
2901 MaybeObject* maybe_result = 2935 HeapObject* target = HeapObject::cast(result);
2902 heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE); 2936 MigrateObject(target->address(),
2903 if (maybe_result->ToObject(&result)) { 2937 object->address(),
2904 HeapObject* target = HeapObject::cast(result); 2938 object_size,
2905 MigrateObject(target->address(), 2939 target_space->identity());
2906 object->address(), 2940 heap()->mark_compact_collector()->tracer()->
2907 object_size, 2941 increment_promoted_objects_size(object_size);
2908 LO_SPACE); 2942 return true;
2909 heap()->mark_compact_collector()->tracer()->
2910 increment_promoted_objects_size(object_size);
2911 return true;
2912 }
2913 } else {
2914 OldSpace* target_space = heap()->TargetSpace(object);
2915
2916 ASSERT(target_space == heap()->old_pointer_space() ||
2917 target_space == heap()->old_data_space());
2918 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2919 if (maybe_result->ToObject(&result)) {
2920 HeapObject* target = HeapObject::cast(result);
2921 MigrateObject(target->address(),
2922 object->address(),
2923 object_size,
2924 target_space->identity());
2925 heap()->mark_compact_collector()->tracer()->
2926 increment_promoted_objects_size(object_size);
2927 return true;
2928 }
2929 } 2943 }
2930 2944
2931 return false; 2945 return false;
2932 } 2946 }
2933 2947
2934 2948
2935 void MarkCompactCollector::EvacuateNewSpace() { 2949 void MarkCompactCollector::EvacuateNewSpace() {
2936 // There are soft limits in the allocation code, designed trigger a mark 2950 // There are soft limits in the allocation code, designed trigger a mark
2937 // sweep collection by failing allocations. But since we are already in 2951 // sweep collection by failing allocations. But since we are already in
2938 // a mark-sweep allocation, there is no sense in trying to trigger one. 2952 // a mark-sweep allocation, there is no sense in trying to trigger one.
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after
3264 3278
3265 // If the object is white than no slots were recorded on it yet. 3279 // If the object is white than no slots were recorded on it yet.
3266 MarkBit mark_bit = Marking::MarkBitFrom(code); 3280 MarkBit mark_bit = Marking::MarkBitFrom(code);
3267 if (Marking::IsWhite(mark_bit)) return; 3281 if (Marking::IsWhite(mark_bit)) return;
3268 3282
3269 invalidated_code_.Add(code); 3283 invalidated_code_.Add(code);
3270 } 3284 }
3271 } 3285 }
3272 3286
3273 3287
3288 // Return true if the given code is deoptimized or will be deoptimized.
3289 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3290 // We assume the code_to_deoptimize_link is initialized to undefined.
3291 // If it is 0, or refers to another Code object, then this code
3292 // is already linked, or was already linked into the list.
3293 return code->code_to_deoptimize_link() != heap()->undefined_value()
3294 || code->marked_for_deoptimization();
3295 }
3296
3297
3274 bool MarkCompactCollector::MarkInvalidatedCode() { 3298 bool MarkCompactCollector::MarkInvalidatedCode() {
3275 bool code_marked = false; 3299 bool code_marked = false;
3276 3300
3277 int length = invalidated_code_.length(); 3301 int length = invalidated_code_.length();
3278 for (int i = 0; i < length; i++) { 3302 for (int i = 0; i < length; i++) {
3279 Code* code = invalidated_code_[i]; 3303 Code* code = invalidated_code_[i];
3280 3304
3281 if (SetMarkBitsUnderInvalidatedCode(code, true)) { 3305 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3282 code_marked = true; 3306 code_marked = true;
3283 } 3307 }
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
3447 HeapObjectIterator js_global_property_cell_iterator( 3471 HeapObjectIterator js_global_property_cell_iterator(
3448 heap_->property_cell_space()); 3472 heap_->property_cell_space());
3449 for (HeapObject* cell = js_global_property_cell_iterator.Next(); 3473 for (HeapObject* cell = js_global_property_cell_iterator.Next();
3450 cell != NULL; 3474 cell != NULL;
3451 cell = js_global_property_cell_iterator.Next()) { 3475 cell = js_global_property_cell_iterator.Next()) {
3452 if (cell->IsPropertyCell()) { 3476 if (cell->IsPropertyCell()) {
3453 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor); 3477 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3454 } 3478 }
3455 } 3479 }
3456 3480
3457 // Update pointer from the native contexts list. 3481 // Update the heads of the native contexts list the code to deoptimize list.
3458 updating_visitor.VisitPointer(heap_->native_contexts_list_address()); 3482 updating_visitor.VisitPointer(heap_->native_contexts_list_address());
3483 updating_visitor.VisitPointer(&code_to_deoptimize_);
3459 3484
3460 heap_->string_table()->Iterate(&updating_visitor); 3485 heap_->string_table()->Iterate(&updating_visitor);
3461 3486
3462 // Update pointers from external string table. 3487 // Update pointers from external string table.
3463 heap_->UpdateReferencesInExternalStringTable( 3488 heap_->UpdateReferencesInExternalStringTable(
3464 &UpdateReferenceInExternalStringTableEntry); 3489 &UpdateReferenceInExternalStringTableEntry);
3465 3490
3466 // Update pointers in the new error object list.
3467 heap_->error_object_list()->UpdateReferences();
3468
3469 if (!FLAG_watch_ic_patching) { 3491 if (!FLAG_watch_ic_patching) {
3470 // Update JSFunction pointers from the runtime profiler. 3492 // Update JSFunction pointers from the runtime profiler.
3471 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( 3493 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3472 &updating_visitor); 3494 &updating_visitor);
3473 } 3495 }
3474 3496
3475 EvacuationWeakObjectRetainer evacuation_object_retainer; 3497 EvacuationWeakObjectRetainer evacuation_object_retainer;
3476 heap()->ProcessWeakReferences(&evacuation_object_retainer); 3498 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3477 3499
3478 // Visit invalidated code (we ignored all slots on it) and clear mark-bits 3500 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
(...skipping 836 matching lines...) Expand 10 before | Expand all | Expand 10 after
4315 while (buffer != NULL) { 4337 while (buffer != NULL) {
4316 SlotsBuffer* next_buffer = buffer->next(); 4338 SlotsBuffer* next_buffer = buffer->next();
4317 DeallocateBuffer(buffer); 4339 DeallocateBuffer(buffer);
4318 buffer = next_buffer; 4340 buffer = next_buffer;
4319 } 4341 }
4320 *buffer_address = NULL; 4342 *buffer_address = NULL;
4321 } 4343 }
4322 4344
4323 4345
4324 } } // namespace v8::internal 4346 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/messages.js » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698