Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 66 abort_incremental_marking_(false), | 66 abort_incremental_marking_(false), |
| 67 marking_parity_(ODD_MARKING_PARITY), | 67 marking_parity_(ODD_MARKING_PARITY), |
| 68 compacting_(false), | 68 compacting_(false), |
| 69 was_marked_incrementally_(false), | 69 was_marked_incrementally_(false), |
| 70 sweeping_pending_(false), | 70 sweeping_pending_(false), |
| 71 sequential_sweeping_(false), | 71 sequential_sweeping_(false), |
| 72 tracer_(NULL), | 72 tracer_(NULL), |
| 73 migration_slots_buffer_(NULL), | 73 migration_slots_buffer_(NULL), |
| 74 heap_(NULL), | 74 heap_(NULL), |
| 75 code_flusher_(NULL), | 75 code_flusher_(NULL), |
| 76 encountered_weak_maps_(NULL) { } | 76 encountered_weak_maps_(NULL), |
| 77 code_to_deoptimize_(NULL) { } | |
| 77 | 78 |
| 78 | 79 |
| 79 #ifdef VERIFY_HEAP | 80 #ifdef VERIFY_HEAP |
| 80 class VerifyMarkingVisitor: public ObjectVisitor { | 81 class VerifyMarkingVisitor: public ObjectVisitor { |
| 81 public: | 82 public: |
| 82 void VisitPointers(Object** start, Object** end) { | 83 void VisitPointers(Object** start, Object** end) { |
| 83 for (Object** current = start; current < end; current++) { | 84 for (Object** current = start; current < end; current++) { |
| 84 if ((*current)->IsHeapObject()) { | 85 if ((*current)->IsHeapObject()) { |
| 85 HeapObject* object = HeapObject::cast(*current); | 86 HeapObject* object = HeapObject::cast(*current); |
| 86 CHECK(HEAP->mark_compact_collector()->IsMarked(object)); | 87 CHECK(HEAP->mark_compact_collector()->IsMarked(object)); |
| (...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 478 | 479 |
| 479 LargeObjectIterator it(heap_->lo_space()); | 480 LargeObjectIterator it(heap_->lo_space()); |
| 480 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 481 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 481 MarkBit mark_bit = Marking::MarkBitFrom(obj); | 482 MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 482 CHECK(Marking::IsWhite(mark_bit)); | 483 CHECK(Marking::IsWhite(mark_bit)); |
| 483 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); | 484 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); |
| 484 } | 485 } |
| 485 } | 486 } |
| 486 | 487 |
| 487 | 488 |
| 489 // Return true if the given code is deoptimized or will be deoptimized. | |
| 490 static bool WillBeDeoptimized(Code* code) { | |
| 491 // The gc_metadata field is used as a linked list of code to deopt. | |
|
ulan
2013/07/22 09:23:08
Maybe assert that code->gc_metadata() is optimized
titzer
2013/07/23 12:41:00
I've had to rework this function because I was not
| |
| 492 // != 0 implies the code is in the list, undefined means at the end. | |
| 493 return code->gc_metadata() != Smi::FromInt(0) | |
| 494 || code->marked_for_deoptimization(); | |
| 495 } | |
| 496 | |
| 497 | |
| 488 void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() { | 498 void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() { |
| 489 HeapObjectIterator code_iterator(heap()->code_space()); | 499 HeapObjectIterator code_iterator(heap()->code_space()); |
| 490 for (HeapObject* obj = code_iterator.Next(); | 500 for (HeapObject* obj = code_iterator.Next(); |
| 491 obj != NULL; | 501 obj != NULL; |
| 492 obj = code_iterator.Next()) { | 502 obj = code_iterator.Next()) { |
| 493 Code* code = Code::cast(obj); | 503 Code* code = Code::cast(obj); |
| 494 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue; | 504 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue; |
| 495 if (code->marked_for_deoptimization()) continue; | 505 if (WillBeDeoptimized(code)) continue; |
| 496 code->VerifyEmbeddedMapsDependency(); | 506 code->VerifyEmbeddedMapsDependency(); |
| 497 } | 507 } |
| 498 } | 508 } |
| 499 | 509 |
| 500 | 510 |
| 501 void MarkCompactCollector::VerifyOmittedPrototypeChecks() { | 511 void MarkCompactCollector::VerifyOmittedPrototypeChecks() { |
| 502 HeapObjectIterator iterator(heap()->map_space()); | 512 HeapObjectIterator iterator(heap()->map_space()); |
| 503 for (HeapObject* obj = iterator.Next(); | 513 for (HeapObject* obj = iterator.Next(); |
| 504 obj != NULL; | 514 obj != NULL; |
| 505 obj = iterator.Next()) { | 515 obj = iterator.Next()) { |
| (...skipping 432 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 938 } | 948 } |
| 939 | 949 |
| 940 #ifdef VERIFY_HEAP | 950 #ifdef VERIFY_HEAP |
| 941 if (!was_marked_incrementally_ && FLAG_verify_heap) { | 951 if (!was_marked_incrementally_ && FLAG_verify_heap) { |
| 942 VerifyMarkbitsAreClean(); | 952 VerifyMarkbitsAreClean(); |
| 943 } | 953 } |
| 944 #endif | 954 #endif |
| 945 } | 955 } |
| 946 | 956 |
| 947 | 957 |
| 948 class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter { | |
| 949 public: | |
| 950 virtual bool TakeFunction(JSFunction* function) { | |
| 951 return function->code()->marked_for_deoptimization(); | |
| 952 } | |
| 953 }; | |
| 954 | |
| 955 | |
| 956 void MarkCompactCollector::Finish() { | 958 void MarkCompactCollector::Finish() { |
| 957 #ifdef DEBUG | 959 #ifdef DEBUG |
| 958 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); | 960 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); |
| 959 state_ = IDLE; | 961 state_ = IDLE; |
| 960 #endif | 962 #endif |
| 961 // The stub cache is not traversed during GC; clear the cache to | 963 // The stub cache is not traversed during GC; clear the cache to |
| 962 // force lazy re-initialization of it. This must be done after the | 964 // force lazy re-initialization of it. This must be done after the |
| 963 // GC, because it relies on the new address of certain old space | 965 // GC, because it relies on the new address of certain old space |
| 964 // objects (empty string, illegal builtin). | 966 // objects (empty string, illegal builtin). |
| 965 isolate()->stub_cache()->Clear(); | 967 isolate()->stub_cache()->Clear(); |
| 966 | 968 |
| 967 DeoptimizeMarkedCodeFilter filter; | 969 if (code_to_deoptimize_ != Smi::FromInt(0)) { |
| 968 Deoptimizer::DeoptimizeAllFunctionsWith(isolate(), &filter); | 970 // Convert the linked list of Code objects into a ZoneList. |
| 971 Zone zone(isolate()); | |
| 972 ZoneList<Code*> codes(4, &zone); | |
| 973 | |
| 974 Object *list = code_to_deoptimize_; | |
| 975 while (list->IsCode()) { | |
| 976 Code *code = Code::cast(list); | |
| 977 list = code->gc_metadata(); | |
| 978 codes.Add(code, &zone); | |
| 979 code->set_gc_metadata(Smi::FromInt(0)); | |
| 980 } | |
| 981 code_to_deoptimize_ = Smi::FromInt(0); | |
| 982 | |
| 983 Deoptimizer::DeoptimizeCodeList(isolate(), &codes); | |
| 984 } | |
| 969 } | 985 } |
| 970 | 986 |
| 971 | 987 |
| 972 // ------------------------------------------------------------------------- | 988 // ------------------------------------------------------------------------- |
| 973 // Phase 1: tracing and marking live objects. | 989 // Phase 1: tracing and marking live objects. |
| 974 // before: all objects are in normal state. | 990 // before: all objects are in normal state. |
| 975 // after: a live object's map pointer is marked as '00'. | 991 // after: a live object's map pointer is marked as '00'. |
| 976 | 992 |
| 977 // Marking all live objects in the heap as part of mark-sweep or mark-compact | 993 // Marking all live objects in the heap as part of mark-sweep or mark-compact |
| 978 // collection. Before marking, all objects are in their normal state. After | 994 // collection. Before marking, all objects are in their normal state. After |
| (...skipping 1624 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2603 DisallowHeapAllocation no_allocation; | 2619 DisallowHeapAllocation no_allocation; |
| 2604 DependentCode* entries = map->dependent_code(); | 2620 DependentCode* entries = map->dependent_code(); |
| 2605 DependentCode::GroupStartIndexes starts(entries); | 2621 DependentCode::GroupStartIndexes starts(entries); |
| 2606 int number_of_entries = starts.number_of_entries(); | 2622 int number_of_entries = starts.number_of_entries(); |
| 2607 if (number_of_entries == 0) return; | 2623 if (number_of_entries == 0) return; |
| 2608 for (int i = 0; i < number_of_entries; i++) { | 2624 for (int i = 0; i < number_of_entries; i++) { |
| 2609 // If the entry is compilation info then the map must be alive, | 2625 // If the entry is compilation info then the map must be alive, |
| 2610 // and ClearAndDeoptimizeDependentCode shouldn't be called. | 2626 // and ClearAndDeoptimizeDependentCode shouldn't be called. |
| 2611 ASSERT(entries->is_code_at(i)); | 2627 ASSERT(entries->is_code_at(i)); |
| 2612 Code* code = entries->code_at(i); | 2628 Code* code = entries->code_at(i); |
| 2613 if (IsMarked(code) && !code->marked_for_deoptimization()) { | 2629 |
| 2614 code->set_marked_for_deoptimization(true); | 2630 if (IsMarked(code) && !WillBeDeoptimized(code)) { |
| 2631 // Insert the code into the code_to_deoptimize linked list. | |
| 2632 Object* next; | |
| 2633 if (code_to_deoptimize_ == Smi::FromInt(0)) { | |
| 2634 // First entry; undefined indicates the end of the list. | |
| 2635 next = isolate()->heap()->undefined_value(); | |
| 2636 } else { | |
| 2637 // Link the rest of the list off this code object. | |
| 2638 next = code_to_deoptimize_; | |
| 2639 } | |
| 2640 Object** slot = HeapObject::RawField(code, Code::kGCMetadataOffset); | |
| 2641 code->set_gc_metadata(next); | |
| 2642 RecordSlot(slot, slot, next); // Don't forget about the update. | |
| 2643 code_to_deoptimize_ = code; // This code is the new head of the list. | |
| 2615 } | 2644 } |
| 2616 entries->clear_at(i); | 2645 entries->clear_at(i); |
| 2617 } | 2646 } |
| 2618 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); | 2647 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); |
| 2619 } | 2648 } |
| 2620 | 2649 |
| 2621 | 2650 |
| 2622 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) { | 2651 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) { |
| 2623 DisallowHeapAllocation no_allocation; | 2652 DisallowHeapAllocation no_allocation; |
| 2624 DependentCode::GroupStartIndexes starts(entries); | 2653 DependentCode::GroupStartIndexes starts(entries); |
| 2625 int number_of_entries = starts.number_of_entries(); | 2654 int number_of_entries = starts.number_of_entries(); |
| 2626 if (number_of_entries == 0) return; | 2655 if (number_of_entries == 0) return; |
| 2627 int new_number_of_entries = 0; | 2656 int new_number_of_entries = 0; |
| 2628 // Go through all groups, remove dead codes and compact. | 2657 // Go through all groups, remove dead codes and compact. |
| 2629 for (int g = 0; g < DependentCode::kGroupCount; g++) { | 2658 for (int g = 0; g < DependentCode::kGroupCount; g++) { |
| 2630 int group_number_of_entries = 0; | 2659 int group_number_of_entries = 0; |
| 2631 for (int i = starts.at(g); i < starts.at(g + 1); i++) { | 2660 for (int i = starts.at(g); i < starts.at(g + 1); i++) { |
| 2632 Object* obj = entries->object_at(i); | 2661 Object* obj = entries->object_at(i); |
| 2633 ASSERT(obj->IsCode() || IsMarked(obj)); | 2662 ASSERT(obj->IsCode() || IsMarked(obj)); |
| 2634 if (IsMarked(obj) && | 2663 if (IsMarked(obj) && |
| 2635 (!obj->IsCode() || !Code::cast(obj)->marked_for_deoptimization())) { | 2664 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) { |
| 2636 if (new_number_of_entries + group_number_of_entries != i) { | 2665 if (new_number_of_entries + group_number_of_entries != i) { |
| 2637 entries->set_object_at( | 2666 entries->set_object_at( |
| 2638 new_number_of_entries + group_number_of_entries, obj); | 2667 new_number_of_entries + group_number_of_entries, obj); |
| 2639 } | 2668 } |
| 2640 Object** slot = entries->slot_at(new_number_of_entries + | 2669 Object** slot = entries->slot_at(new_number_of_entries + |
| 2641 group_number_of_entries); | 2670 group_number_of_entries); |
| 2642 RecordSlot(slot, slot, obj); | 2671 RecordSlot(slot, slot, obj); |
| 2643 group_number_of_entries++; | 2672 group_number_of_entries++; |
| 2644 } | 2673 } |
| 2645 } | 2674 } |
| (...skipping 796 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3442 HeapObjectIterator js_global_property_cell_iterator( | 3471 HeapObjectIterator js_global_property_cell_iterator( |
| 3443 heap_->property_cell_space()); | 3472 heap_->property_cell_space()); |
| 3444 for (HeapObject* cell = js_global_property_cell_iterator.Next(); | 3473 for (HeapObject* cell = js_global_property_cell_iterator.Next(); |
| 3445 cell != NULL; | 3474 cell != NULL; |
| 3446 cell = js_global_property_cell_iterator.Next()) { | 3475 cell = js_global_property_cell_iterator.Next()) { |
| 3447 if (cell->IsPropertyCell()) { | 3476 if (cell->IsPropertyCell()) { |
| 3448 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor); | 3477 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor); |
| 3449 } | 3478 } |
| 3450 } | 3479 } |
| 3451 | 3480 |
| 3452 // Update pointer from the native contexts list. | 3481 // Update the heads of the native contexts list the code to deoptimize list. |
| 3453 updating_visitor.VisitPointer(heap_->native_contexts_list_address()); | 3482 updating_visitor.VisitPointer(heap_->native_contexts_list_address()); |
| 3483 updating_visitor.VisitPointer(&code_to_deoptimize_); | |
| 3454 | 3484 |
| 3455 heap_->string_table()->Iterate(&updating_visitor); | 3485 heap_->string_table()->Iterate(&updating_visitor); |
| 3456 | 3486 |
| 3457 // Update pointers from external string table. | 3487 // Update pointers from external string table. |
| 3458 heap_->UpdateReferencesInExternalStringTable( | 3488 heap_->UpdateReferencesInExternalStringTable( |
| 3459 &UpdateReferenceInExternalStringTableEntry); | 3489 &UpdateReferenceInExternalStringTableEntry); |
| 3460 | 3490 |
| 3461 // Update pointers in the new error object list. | 3491 // Update pointers in the new error object list. |
| 3462 heap_->error_object_list()->UpdateReferences(); | 3492 heap_->error_object_list()->UpdateReferences(); |
| 3463 | 3493 |
| (...skipping 846 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4310 while (buffer != NULL) { | 4340 while (buffer != NULL) { |
| 4311 SlotsBuffer* next_buffer = buffer->next(); | 4341 SlotsBuffer* next_buffer = buffer->next(); |
| 4312 DeallocateBuffer(buffer); | 4342 DeallocateBuffer(buffer); |
| 4313 buffer = next_buffer; | 4343 buffer = next_buffer; |
| 4314 } | 4344 } |
| 4315 *buffer_address = NULL; | 4345 *buffer_address = NULL; |
| 4316 } | 4346 } |
| 4317 | 4347 |
| 4318 | 4348 |
| 4319 } } // namespace v8::internal | 4349 } } // namespace v8::internal |
| OLD | NEW |