| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 687 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 698 } | 698 } |
| 699 | 699 |
| 700 survival_rate_ = survival_rate; | 700 survival_rate_ = survival_rate; |
| 701 } | 701 } |
| 702 | 702 |
| 703 bool Heap::PerformGarbageCollection(GarbageCollector collector, | 703 bool Heap::PerformGarbageCollection(GarbageCollector collector, |
| 704 GCTracer* tracer) { | 704 GCTracer* tracer) { |
| 705 bool next_gc_likely_to_collect_more = false; | 705 bool next_gc_likely_to_collect_more = false; |
| 706 | 706 |
| 707 if (collector != SCAVENGER) { | 707 if (collector != SCAVENGER) { |
| 708 PROFILE(CodeMovingGCEvent()); | 708 PROFILE(isolate_, CodeMovingGCEvent()); |
| 709 } | 709 } |
| 710 | 710 |
| 711 VerifySymbolTable(); | 711 VerifySymbolTable(); |
| 712 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { | 712 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { |
| 713 ASSERT(!allocation_allowed_); | 713 ASSERT(!allocation_allowed_); |
| 714 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); | 714 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); |
| 715 global_gc_prologue_callback_(); | 715 global_gc_prologue_callback_(); |
| 716 } | 716 } |
| 717 | 717 |
| 718 GCType gc_type = | 718 GCType gc_type = |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 797 global_gc_epilogue_callback_(); | 797 global_gc_epilogue_callback_(); |
| 798 } | 798 } |
| 799 VerifySymbolTable(); | 799 VerifySymbolTable(); |
| 800 | 800 |
| 801 return next_gc_likely_to_collect_more; | 801 return next_gc_likely_to_collect_more; |
| 802 } | 802 } |
| 803 | 803 |
| 804 | 804 |
| 805 void Heap::MarkCompact(GCTracer* tracer) { | 805 void Heap::MarkCompact(GCTracer* tracer) { |
| 806 gc_state_ = MARK_COMPACT; | 806 gc_state_ = MARK_COMPACT; |
| 807 LOG(ResourceEvent("markcompact", "begin")); | 807 LOG(isolate_, ResourceEvent("markcompact", "begin")); |
| 808 | 808 |
| 809 mark_compact_collector_.Prepare(tracer); | 809 mark_compact_collector_.Prepare(tracer); |
| 810 | 810 |
| 811 bool is_compacting = mark_compact_collector_.IsCompacting(); | 811 bool is_compacting = mark_compact_collector_.IsCompacting(); |
| 812 | 812 |
| 813 if (is_compacting) { | 813 if (is_compacting) { |
| 814 mc_count_++; | 814 mc_count_++; |
| 815 } else { | 815 } else { |
| 816 ms_count_++; | 816 ms_count_++; |
| 817 } | 817 } |
| 818 tracer->set_full_gc_count(mc_count_ + ms_count_); | 818 tracer->set_full_gc_count(mc_count_ + ms_count_); |
| 819 | 819 |
| 820 MarkCompactPrologue(is_compacting); | 820 MarkCompactPrologue(is_compacting); |
| 821 | 821 |
| 822 is_safe_to_read_maps_ = false; | 822 is_safe_to_read_maps_ = false; |
| 823 mark_compact_collector_.CollectGarbage(); | 823 mark_compact_collector_.CollectGarbage(); |
| 824 is_safe_to_read_maps_ = true; | 824 is_safe_to_read_maps_ = true; |
| 825 | 825 |
| 826 LOG(ResourceEvent("markcompact", "end")); | 826 LOG(isolate_, ResourceEvent("markcompact", "end")); |
| 827 | 827 |
| 828 gc_state_ = NOT_IN_GC; | 828 gc_state_ = NOT_IN_GC; |
| 829 | 829 |
| 830 Shrink(); | 830 Shrink(); |
| 831 | 831 |
| 832 isolate_->counters()->objs_since_last_full()->Set(0); | 832 isolate_->counters()->objs_since_last_full()->Set(0); |
| 833 | 833 |
| 834 contexts_disposed_ = 0; | 834 contexts_disposed_ = 0; |
| 835 } | 835 } |
| 836 | 836 |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 945 | 945 |
| 946 // We do not update an allocation watermark of the top page during linear | 946 // We do not update an allocation watermark of the top page during linear |
| 947 // allocation to avoid overhead. So to maintain the watermark invariant | 947 // allocation to avoid overhead. So to maintain the watermark invariant |
| 948 // we have to manually cache the watermark and mark the top page as having an | 948 // we have to manually cache the watermark and mark the top page as having an |
| 949 // invalid watermark. This guarantees that dirty regions iteration will use a | 949 // invalid watermark. This guarantees that dirty regions iteration will use a |
| 950 // correct watermark even if a linear allocation happens. | 950 // correct watermark even if a linear allocation happens. |
| 951 old_pointer_space_->FlushTopPageWatermark(); | 951 old_pointer_space_->FlushTopPageWatermark(); |
| 952 map_space_->FlushTopPageWatermark(); | 952 map_space_->FlushTopPageWatermark(); |
| 953 | 953 |
| 954 // Implements Cheney's copying algorithm | 954 // Implements Cheney's copying algorithm |
| 955 LOG(ResourceEvent("scavenge", "begin")); | 955 LOG(isolate_, ResourceEvent("scavenge", "begin")); |
| 956 | 956 |
| 957 // Clear descriptor cache. | 957 // Clear descriptor cache. |
| 958 isolate_->descriptor_lookup_cache()->Clear(); | 958 isolate_->descriptor_lookup_cache()->Clear(); |
| 959 | 959 |
| 960 // Used for updating survived_since_last_expansion_ at function end. | 960 // Used for updating survived_since_last_expansion_ at function end. |
| 961 intptr_t survived_watermark = PromotedSpaceSize(); | 961 intptr_t survived_watermark = PromotedSpaceSize(); |
| 962 | 962 |
| 963 CheckNewSpaceExpansionCriteria(); | 963 CheckNewSpaceExpansionCriteria(); |
| 964 | 964 |
| 965 // Flip the semispaces. After flipping, to space is empty, from space has | 965 // Flip the semispaces. After flipping, to space is empty, from space has |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1033 | 1033 |
| 1034 is_safe_to_read_maps_ = true; | 1034 is_safe_to_read_maps_ = true; |
| 1035 | 1035 |
| 1036 // Set age mark. | 1036 // Set age mark. |
| 1037 new_space_.set_age_mark(new_space_.top()); | 1037 new_space_.set_age_mark(new_space_.top()); |
| 1038 | 1038 |
| 1039 // Update how much has survived scavenge. | 1039 // Update how much has survived scavenge. |
| 1040 IncrementYoungSurvivorsCounter(static_cast<int>( | 1040 IncrementYoungSurvivorsCounter(static_cast<int>( |
| 1041 (PromotedSpaceSize() - survived_watermark) + new_space_.Size())); | 1041 (PromotedSpaceSize() - survived_watermark) + new_space_.Size())); |
| 1042 | 1042 |
| 1043 LOG(ResourceEvent("scavenge", "end")); | 1043 LOG(isolate_, ResourceEvent("scavenge", "end")); |
| 1044 | 1044 |
| 1045 gc_state_ = NOT_IN_GC; | 1045 gc_state_ = NOT_IN_GC; |
| 1046 } | 1046 } |
| 1047 | 1047 |
| 1048 | 1048 |
| 1049 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, | 1049 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, |
| 1050 Object** p) { | 1050 Object** p) { |
| 1051 MapWord first_word = HeapObject::cast(*p)->map_word(); | 1051 MapWord first_word = HeapObject::cast(*p)->map_word(); |
| 1052 | 1052 |
| 1053 if (!first_word.IsForwardingAddress()) { | 1053 if (!first_word.IsForwardingAddress()) { |
| (...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1312 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1312 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| 1313 // Update NewSpace stats if necessary. | 1313 // Update NewSpace stats if necessary. |
| 1314 RecordCopiedObject(heap, target); | 1314 RecordCopiedObject(heap, target); |
| 1315 #endif | 1315 #endif |
| 1316 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address())); | 1316 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address())); |
| 1317 #if defined(ENABLE_LOGGING_AND_PROFILING) | 1317 #if defined(ENABLE_LOGGING_AND_PROFILING) |
| 1318 Isolate* isolate = heap->isolate(); | 1318 Isolate* isolate = heap->isolate(); |
| 1319 if (isolate->logger()->is_logging() || | 1319 if (isolate->logger()->is_logging() || |
| 1320 isolate->cpu_profiler()->is_profiling()) { | 1320 isolate->cpu_profiler()->is_profiling()) { |
| 1321 if (target->IsSharedFunctionInfo()) { | 1321 if (target->IsSharedFunctionInfo()) { |
| 1322 PROFILE(SharedFunctionInfoMoveEvent( | 1322 PROFILE(isolate, SharedFunctionInfoMoveEvent( |
| 1323 source->address(), target->address())); | 1323 source->address(), target->address())); |
| 1324 } | 1324 } |
| 1325 } | 1325 } |
| 1326 #endif | 1326 #endif |
| 1327 return target; | 1327 return target; |
| 1328 } | 1328 } |
| 1329 | 1329 |
| 1330 | 1330 |
| 1331 template<ObjectContents object_contents, SizeRestriction size_restriction> | 1331 template<ObjectContents object_contents, SizeRestriction size_restriction> |
| 1332 static inline void EvacuateObject(Map* map, | 1332 static inline void EvacuateObject(Map* map, |
| (...skipping 3491 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4824 // Create initial maps. | 4824 // Create initial maps. |
| 4825 if (!CreateInitialMaps()) return false; | 4825 if (!CreateInitialMaps()) return false; |
| 4826 if (!CreateApiObjects()) return false; | 4826 if (!CreateApiObjects()) return false; |
| 4827 | 4827 |
| 4828 // Create initial objects | 4828 // Create initial objects |
| 4829 if (!CreateInitialObjects()) return false; | 4829 if (!CreateInitialObjects()) return false; |
| 4830 | 4830 |
| 4831 global_contexts_list_ = undefined_value(); | 4831 global_contexts_list_ = undefined_value(); |
| 4832 } | 4832 } |
| 4833 | 4833 |
| 4834 LOG(IntPtrTEvent("heap-capacity", Capacity())); | 4834 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); |
| 4835 LOG(IntPtrTEvent("heap-available", Available())); | 4835 LOG(isolate_, IntPtrTEvent("heap-available", Available())); |
| 4836 | 4836 |
| 4837 #ifdef ENABLE_LOGGING_AND_PROFILING | 4837 #ifdef ENABLE_LOGGING_AND_PROFILING |
| 4838 // This should be called only after initial objects have been created. | 4838 // This should be called only after initial objects have been created. |
| 4839 isolate_->producer_heap_profile()->Setup(); | 4839 isolate_->producer_heap_profile()->Setup(); |
| 4840 #endif | 4840 #endif |
| 4841 | 4841 |
| 4842 return true; | 4842 return true; |
| 4843 } | 4843 } |
| 4844 | 4844 |
| 4845 | 4845 |
| (...skipping 916 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5762 } | 5762 } |
| 5763 | 5763 |
| 5764 | 5764 |
| 5765 void ExternalStringTable::TearDown() { | 5765 void ExternalStringTable::TearDown() { |
| 5766 new_space_strings_.Free(); | 5766 new_space_strings_.Free(); |
| 5767 old_space_strings_.Free(); | 5767 old_space_strings_.Free(); |
| 5768 } | 5768 } |
| 5769 | 5769 |
| 5770 | 5770 |
| 5771 } } // namespace v8::internal | 5771 } } // namespace v8::internal |
| OLD | NEW |