OLD | NEW |
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
319 unflattened_strings_length_ = 0; | 319 unflattened_strings_length_ = 0; |
320 #ifdef DEBUG | 320 #ifdef DEBUG |
321 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); | 321 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); |
322 allow_allocation(false); | 322 allow_allocation(false); |
323 | 323 |
324 if (FLAG_verify_heap) { | 324 if (FLAG_verify_heap) { |
325 Verify(); | 325 Verify(); |
326 } | 326 } |
327 | 327 |
328 if (FLAG_gc_verbose) Print(); | 328 if (FLAG_gc_verbose) Print(); |
| 329 |
| 330 if (FLAG_print_rset) { |
| 331 // Not all spaces have remembered set bits that we care about. |
| 332 old_pointer_space_->PrintRSet(); |
| 333 map_space_->PrintRSet(); |
| 334 lo_space_->PrintRSet(); |
| 335 } |
329 #endif | 336 #endif |
330 | 337 |
331 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 338 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
332 ReportStatisticsBeforeGC(); | 339 ReportStatisticsBeforeGC(); |
333 #endif | 340 #endif |
334 } | 341 } |
335 | 342 |
336 int Heap::SizeOfObjects() { | 343 int Heap::SizeOfObjects() { |
337 int total = 0; | 344 int total = 0; |
338 AllSpaces spaces; | 345 AllSpaces spaces; |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
505 gc_performed = true; | 512 gc_performed = true; |
506 } | 513 } |
507 if (!(map_space->ReserveSpace(map_space_size))) { | 514 if (!(map_space->ReserveSpace(map_space_size))) { |
508 Heap::CollectGarbage(map_space_size, MAP_SPACE); | 515 Heap::CollectGarbage(map_space_size, MAP_SPACE); |
509 gc_performed = true; | 516 gc_performed = true; |
510 } | 517 } |
511 if (!(cell_space->ReserveSpace(cell_space_size))) { | 518 if (!(cell_space->ReserveSpace(cell_space_size))) { |
512 Heap::CollectGarbage(cell_space_size, CELL_SPACE); | 519 Heap::CollectGarbage(cell_space_size, CELL_SPACE); |
513 gc_performed = true; | 520 gc_performed = true; |
514 } | 521 } |
515 // We add a slack-factor of 2 in order to have space for a series of | 522 // We add a slack-factor of 2 in order to have space for the remembered |
516 // large-object allocations that are only just larger than the page size. | 523 // set and a series of large-object allocations that are only just larger |
| 524 // than the page size. |
517 large_object_size *= 2; | 525 large_object_size *= 2; |
518 // The ReserveSpace method on the large object space checks how much | 526 // The ReserveSpace method on the large object space checks how much |
519 // we can expand the old generation. This includes expansion caused by | 527 // we can expand the old generation. This includes expansion caused by |
520 // allocation in the other spaces. | 528 // allocation in the other spaces. |
521 large_object_size += cell_space_size + map_space_size + code_space_size + | 529 large_object_size += cell_space_size + map_space_size + code_space_size + |
522 data_space_size + pointer_space_size; | 530 data_space_size + pointer_space_size; |
523 if (!(lo_space->ReserveSpace(large_object_size))) { | 531 if (!(lo_space->ReserveSpace(large_object_size))) { |
524 Heap::CollectGarbage(large_object_size, LO_SPACE); | 532 Heap::CollectGarbage(large_object_size, LO_SPACE); |
525 gc_performed = true; | 533 gc_performed = true; |
526 } | 534 } |
(...skipping 30 matching lines...) Expand all Loading... |
557 }; | 565 }; |
558 | 566 |
559 | 567 |
560 void Heap::ClearJSFunctionResultCaches() { | 568 void Heap::ClearJSFunctionResultCaches() { |
561 if (Bootstrapper::IsActive()) return; | 569 if (Bootstrapper::IsActive()) return; |
562 ClearThreadJSFunctionResultCachesVisitor visitor; | 570 ClearThreadJSFunctionResultCachesVisitor visitor; |
563 ThreadManager::IterateThreads(&visitor); | 571 ThreadManager::IterateThreads(&visitor); |
564 } | 572 } |
565 | 573 |
566 | 574 |
567 #ifdef DEBUG | |
568 | |
569 enum PageWatermarkValidity { | |
570 ALL_VALID, | |
571 ALL_INVALID | |
572 }; | |
573 | |
574 static void VerifyPageWatermarkValidity(PagedSpace* space, | |
575 PageWatermarkValidity validity) { | |
576 PageIterator it(space, PageIterator::PAGES_IN_USE); | |
577 bool expected_value = (validity == ALL_VALID); | |
578 while (it.has_next()) { | |
579 Page* page = it.next(); | |
580 ASSERT(page->IsWatermarkValid() == expected_value); | |
581 } | |
582 } | |
583 #endif | |
584 | |
585 | |
586 void Heap::PerformGarbageCollection(AllocationSpace space, | 575 void Heap::PerformGarbageCollection(AllocationSpace space, |
587 GarbageCollector collector, | 576 GarbageCollector collector, |
588 GCTracer* tracer) { | 577 GCTracer* tracer) { |
589 VerifySymbolTable(); | 578 VerifySymbolTable(); |
590 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { | 579 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { |
591 ASSERT(!allocation_allowed_); | 580 ASSERT(!allocation_allowed_); |
592 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); | 581 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); |
593 global_gc_prologue_callback_(); | 582 global_gc_prologue_callback_(); |
594 } | 583 } |
595 | 584 |
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
820 } | 809 } |
821 | 810 |
822 | 811 |
823 void Heap::Scavenge() { | 812 void Heap::Scavenge() { |
824 #ifdef DEBUG | 813 #ifdef DEBUG |
825 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); | 814 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); |
826 #endif | 815 #endif |
827 | 816 |
828 gc_state_ = SCAVENGE; | 817 gc_state_ = SCAVENGE; |
829 | 818 |
830 Page::FlipMeaningOfInvalidatedWatermarkFlag(); | |
831 #ifdef DEBUG | |
832 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID); | |
833 VerifyPageWatermarkValidity(map_space_, ALL_VALID); | |
834 #endif | |
835 | |
836 // We do not update an allocation watermark of the top page during linear | |
837 // allocation to avoid overhead. So to maintain the watermark invariant | |
838 // we have to manually cache the watermark and mark the top page as having an | |
839 // invalid watermark. This guarantees that dirty regions iteration will use a | |
840 // correct watermark even if a linear allocation happens. | |
841 old_pointer_space_->FlushTopPageWatermark(); | |
842 map_space_->FlushTopPageWatermark(); | |
843 | |
844 // Implements Cheney's copying algorithm | 819 // Implements Cheney's copying algorithm |
845 LOG(ResourceEvent("scavenge", "begin")); | 820 LOG(ResourceEvent("scavenge", "begin")); |
846 | 821 |
847 // Clear descriptor cache. | 822 // Clear descriptor cache. |
848 DescriptorLookupCache::Clear(); | 823 DescriptorLookupCache::Clear(); |
849 | 824 |
850 // Used for updating survived_since_last_expansion_ at function end. | 825 // Used for updating survived_since_last_expansion_ at function end. |
851 int survived_watermark = PromotedSpaceSize(); | 826 int survived_watermark = PromotedSpaceSize(); |
852 | 827 |
853 CheckNewSpaceExpansionCriteria(); | 828 CheckNewSpaceExpansionCriteria(); |
(...skipping 22 matching lines...) Expand all Loading... |
876 // objects are at least one pointer in size. | 851 // objects are at least one pointer in size. |
877 Address new_space_front = new_space_.ToSpaceLow(); | 852 Address new_space_front = new_space_.ToSpaceLow(); |
878 promotion_queue.Initialize(new_space_.ToSpaceHigh()); | 853 promotion_queue.Initialize(new_space_.ToSpaceHigh()); |
879 | 854 |
880 ScavengeVisitor scavenge_visitor; | 855 ScavengeVisitor scavenge_visitor; |
881 // Copy roots. | 856 // Copy roots. |
882 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); | 857 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); |
883 | 858 |
884 // Copy objects reachable from the old generation. By definition, | 859 // Copy objects reachable from the old generation. By definition, |
885 // there are no intergenerational pointers in code or data spaces. | 860 // there are no intergenerational pointers in code or data spaces. |
886 IterateDirtyRegions(old_pointer_space_, | 861 IterateRSet(old_pointer_space_, &ScavengePointer); |
887 &IteratePointersInDirtyRegion, | 862 IterateRSet(map_space_, &ScavengePointer); |
888 &ScavengePointer, | 863 lo_space_->IterateRSet(&ScavengePointer); |
889 WATERMARK_CAN_BE_INVALID); | |
890 | |
891 IterateDirtyRegions(map_space_, | |
892 &IteratePointersInDirtyMapsRegion, | |
893 &ScavengePointer, | |
894 WATERMARK_CAN_BE_INVALID); | |
895 | |
896 lo_space_->IterateDirtyRegions(&ScavengePointer); | |
897 | 864 |
898 // Copy objects reachable from cells by scavenging cell values directly. | 865 // Copy objects reachable from cells by scavenging cell values directly. |
899 HeapObjectIterator cell_iterator(cell_space_); | 866 HeapObjectIterator cell_iterator(cell_space_); |
900 for (HeapObject* cell = cell_iterator.next(); | 867 for (HeapObject* cell = cell_iterator.next(); |
901 cell != NULL; cell = cell_iterator.next()) { | 868 cell != NULL; cell = cell_iterator.next()) { |
902 if (cell->IsJSGlobalPropertyCell()) { | 869 if (cell->IsJSGlobalPropertyCell()) { |
903 Address value_address = | 870 Address value_address = |
904 reinterpret_cast<Address>(cell) + | 871 reinterpret_cast<Address>(cell) + |
905 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); | 872 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
906 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); | 873 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
989 } | 956 } |
990 | 957 |
991 // Promote and process all the to-be-promoted objects. | 958 // Promote and process all the to-be-promoted objects. |
992 while (!promotion_queue.is_empty()) { | 959 while (!promotion_queue.is_empty()) { |
993 HeapObject* source; | 960 HeapObject* source; |
994 Map* map; | 961 Map* map; |
995 promotion_queue.remove(&source, &map); | 962 promotion_queue.remove(&source, &map); |
996 // Copy the from-space object to its new location (given by the | 963 // Copy the from-space object to its new location (given by the |
997 // forwarding address) and fix its map. | 964 // forwarding address) and fix its map. |
998 HeapObject* target = source->map_word().ToForwardingAddress(); | 965 HeapObject* target = source->map_word().ToForwardingAddress(); |
999 int size = source->SizeFromMap(map); | 966 CopyBlock(reinterpret_cast<Object**>(target->address()), |
1000 CopyBlock(target->address(), source->address(), size); | 967 reinterpret_cast<Object**>(source->address()), |
| 968 source->SizeFromMap(map)); |
1001 target->set_map(map); | 969 target->set_map(map); |
1002 | 970 |
1003 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 971 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
1004 // Update NewSpace stats if necessary. | 972 // Update NewSpace stats if necessary. |
1005 RecordCopiedObject(target); | 973 RecordCopiedObject(target); |
1006 #endif | 974 #endif |
1007 // Visit the newly copied object for pointers to new space. | 975 // Visit the newly copied object for pointers to new space. |
1008 ASSERT(!target->IsMap()); | 976 target->Iterate(scavenge_visitor); |
1009 IterateAndMarkPointersToNewSpace(target->address(), | 977 UpdateRSet(target); |
1010 target->address() + size, | |
1011 &ScavengePointer); | |
1012 } | 978 } |
1013 | 979 |
1014 // Take another spin if there are now unswept objects in new space | 980 // Take another spin if there are now unswept objects in new space |
1015 // (there are currently no more unswept promoted objects). | 981 // (there are currently no more unswept promoted objects). |
1016 } while (new_space_front < new_space_.top()); | 982 } while (new_space_front < new_space_.top()); |
1017 | 983 |
1018 return new_space_front; | 984 return new_space_front; |
1019 } | 985 } |
1020 | 986 |
1021 | 987 |
| 988 void Heap::ClearRSetRange(Address start, int size_in_bytes) { |
| 989 uint32_t start_bit; |
| 990 Address start_word_address = |
| 991 Page::ComputeRSetBitPosition(start, 0, &start_bit); |
| 992 uint32_t end_bit; |
| 993 Address end_word_address = |
| 994 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize, |
| 995 0, |
| 996 &end_bit); |
| 997 |
| 998 // We want to clear the bits in the starting word starting with the |
| 999 // first bit, and in the ending word up to and including the last |
| 1000 // bit. Build a pair of bitmasks to do that. |
| 1001 uint32_t start_bitmask = start_bit - 1; |
| 1002 uint32_t end_bitmask = ~((end_bit << 1) - 1); |
| 1003 |
| 1004 // If the start address and end address are the same, we mask that |
| 1005 // word once, otherwise mask the starting and ending word |
| 1006 // separately and all the ones in between. |
| 1007 if (start_word_address == end_word_address) { |
| 1008 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask); |
| 1009 } else { |
| 1010 Memory::uint32_at(start_word_address) &= start_bitmask; |
| 1011 Memory::uint32_at(end_word_address) &= end_bitmask; |
| 1012 start_word_address += kIntSize; |
| 1013 memset(start_word_address, 0, end_word_address - start_word_address); |
| 1014 } |
| 1015 } |
| 1016 |
| 1017 |
| 1018 class UpdateRSetVisitor: public ObjectVisitor { |
| 1019 public: |
| 1020 |
| 1021 void VisitPointer(Object** p) { |
| 1022 UpdateRSet(p); |
| 1023 } |
| 1024 |
| 1025 void VisitPointers(Object** start, Object** end) { |
| 1026 // Update a store into slots [start, end), used (a) to update remembered |
| 1027 // set when promoting a young object to old space or (b) to rebuild |
| 1028 // remembered sets after a mark-compact collection. |
| 1029 for (Object** p = start; p < end; p++) UpdateRSet(p); |
| 1030 } |
| 1031 private: |
| 1032 |
| 1033 void UpdateRSet(Object** p) { |
| 1034 // The remembered set should not be set. It should be clear for objects |
| 1035 // newly copied to old space, and it is cleared before rebuilding in the |
| 1036 // mark-compact collector. |
| 1037 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0)); |
| 1038 if (Heap::InNewSpace(*p)) { |
| 1039 Page::SetRSet(reinterpret_cast<Address>(p), 0); |
| 1040 } |
| 1041 } |
| 1042 }; |
| 1043 |
| 1044 |
| 1045 int Heap::UpdateRSet(HeapObject* obj) { |
| 1046 ASSERT(!InNewSpace(obj)); |
| 1047 // Special handling of fixed arrays to iterate the body based on the start |
| 1048 // address and offset. Just iterating the pointers as in UpdateRSetVisitor |
| 1049 // will not work because Page::SetRSet needs to have the start of the |
| 1050 // object for large object pages. |
| 1051 if (obj->IsFixedArray()) { |
| 1052 FixedArray* array = FixedArray::cast(obj); |
| 1053 int length = array->length(); |
| 1054 for (int i = 0; i < length; i++) { |
| 1055 int offset = FixedArray::kHeaderSize + i * kPointerSize; |
| 1056 ASSERT(!Page::IsRSetSet(obj->address(), offset)); |
| 1057 if (Heap::InNewSpace(array->get(i))) { |
| 1058 Page::SetRSet(obj->address(), offset); |
| 1059 } |
| 1060 } |
| 1061 } else if (!obj->IsCode()) { |
| 1062 // Skip code object, we know it does not contain inter-generational |
| 1063 // pointers. |
| 1064 UpdateRSetVisitor v; |
| 1065 obj->Iterate(&v); |
| 1066 } |
| 1067 return obj->Size(); |
| 1068 } |
| 1069 |
| 1070 |
| 1071 void Heap::RebuildRSets() { |
| 1072 // By definition, we do not care about remembered set bits in code, |
| 1073 // data, or cell spaces. |
| 1074 map_space_->ClearRSet(); |
| 1075 RebuildRSets(map_space_); |
| 1076 |
| 1077 old_pointer_space_->ClearRSet(); |
| 1078 RebuildRSets(old_pointer_space_); |
| 1079 |
| 1080 Heap::lo_space_->ClearRSet(); |
| 1081 RebuildRSets(lo_space_); |
| 1082 } |
| 1083 |
| 1084 |
| 1085 void Heap::RebuildRSets(PagedSpace* space) { |
| 1086 HeapObjectIterator it(space); |
| 1087 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
| 1088 Heap::UpdateRSet(obj); |
| 1089 } |
| 1090 |
| 1091 |
| 1092 void Heap::RebuildRSets(LargeObjectSpace* space) { |
| 1093 LargeObjectIterator it(space); |
| 1094 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
| 1095 Heap::UpdateRSet(obj); |
| 1096 } |
| 1097 |
| 1098 |
1022 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1099 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
1023 void Heap::RecordCopiedObject(HeapObject* obj) { | 1100 void Heap::RecordCopiedObject(HeapObject* obj) { |
1024 bool should_record = false; | 1101 bool should_record = false; |
1025 #ifdef DEBUG | 1102 #ifdef DEBUG |
1026 should_record = FLAG_heap_stats; | 1103 should_record = FLAG_heap_stats; |
1027 #endif | 1104 #endif |
1028 #ifdef ENABLE_LOGGING_AND_PROFILING | 1105 #ifdef ENABLE_LOGGING_AND_PROFILING |
1029 should_record = should_record || FLAG_log_gc; | 1106 should_record = should_record || FLAG_log_gc; |
1030 #endif | 1107 #endif |
1031 if (should_record) { | 1108 if (should_record) { |
1032 if (new_space_.Contains(obj)) { | 1109 if (new_space_.Contains(obj)) { |
1033 new_space_.RecordAllocation(obj); | 1110 new_space_.RecordAllocation(obj); |
1034 } else { | 1111 } else { |
1035 new_space_.RecordPromotion(obj); | 1112 new_space_.RecordPromotion(obj); |
1036 } | 1113 } |
1037 } | 1114 } |
1038 } | 1115 } |
1039 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1116 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
1040 | 1117 |
1041 | 1118 |
1042 | 1119 |
1043 HeapObject* Heap::MigrateObject(HeapObject* source, | 1120 HeapObject* Heap::MigrateObject(HeapObject* source, |
1044 HeapObject* target, | 1121 HeapObject* target, |
1045 int size) { | 1122 int size) { |
1046 // Copy the content of source to target. | 1123 // Copy the content of source to target. |
1047 CopyBlock(target->address(), source->address(), size); | 1124 CopyBlock(reinterpret_cast<Object**>(target->address()), |
| 1125 reinterpret_cast<Object**>(source->address()), |
| 1126 size); |
1048 | 1127 |
1049 // Set the forwarding address. | 1128 // Set the forwarding address. |
1050 source->set_map_word(MapWord::FromForwardingAddress(target)); | 1129 source->set_map_word(MapWord::FromForwardingAddress(target)); |
1051 | 1130 |
1052 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1131 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
1053 // Update NewSpace stats if necessary. | 1132 // Update NewSpace stats if necessary. |
1054 RecordCopiedObject(target); | 1133 RecordCopiedObject(target); |
1055 #endif | 1134 #endif |
1056 | 1135 |
1057 return target; | 1136 return target; |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1092 // so we can store the from-space address and map pointer of promoted | 1171 // so we can store the from-space address and map pointer of promoted |
1093 // objects in the to space. | 1172 // objects in the to space. |
1094 ASSERT(object_size >= 2 * kPointerSize); | 1173 ASSERT(object_size >= 2 * kPointerSize); |
1095 | 1174 |
1096 // If the object should be promoted, we try to copy it to old space. | 1175 // If the object should be promoted, we try to copy it to old space. |
1097 if (ShouldBePromoted(object->address(), object_size)) { | 1176 if (ShouldBePromoted(object->address(), object_size)) { |
1098 Object* result; | 1177 Object* result; |
1099 if (object_size > MaxObjectSizeInPagedSpace()) { | 1178 if (object_size > MaxObjectSizeInPagedSpace()) { |
1100 result = lo_space_->AllocateRawFixedArray(object_size); | 1179 result = lo_space_->AllocateRawFixedArray(object_size); |
1101 if (!result->IsFailure()) { | 1180 if (!result->IsFailure()) { |
| 1181 // Save the from-space object pointer and its map pointer at the |
| 1182 // top of the to space to be swept and copied later. Write the |
| 1183 // forwarding address over the map word of the from-space |
| 1184 // object. |
1102 HeapObject* target = HeapObject::cast(result); | 1185 HeapObject* target = HeapObject::cast(result); |
| 1186 promotion_queue.insert(object, first_word.ToMap()); |
| 1187 object->set_map_word(MapWord::FromForwardingAddress(target)); |
1103 | 1188 |
1104 if (object->IsFixedArray()) { | 1189 // Give the space allocated for the result a proper map by |
1105 // Save the from-space object pointer and its map pointer at the | 1190 // treating it as a free list node (not linked into the free |
1106 // top of the to space to be swept and copied later. Write the | 1191 // list). |
1107 // forwarding address over the map word of the from-space | 1192 FreeListNode* node = FreeListNode::FromAddress(target->address()); |
1108 // object. | 1193 node->set_size(object_size); |
1109 promotion_queue.insert(object, first_word.ToMap()); | |
1110 object->set_map_word(MapWord::FromForwardingAddress(target)); | |
1111 | 1194 |
1112 // Give the space allocated for the result a proper map by | 1195 *p = target; |
1113 // treating it as a free list node (not linked into the free | |
1114 // list). | |
1115 FreeListNode* node = FreeListNode::FromAddress(target->address()); | |
1116 node->set_size(object_size); | |
1117 | |
1118 *p = target; | |
1119 } else { | |
1120 // In large object space only fixed arrays might possibly contain | |
1121 // intergenerational references. | |
1122 // All other objects can be copied immediately and not revisited. | |
1123 *p = MigrateObject(object, target, object_size); | |
1124 } | |
1125 | |
1126 tracer()->increment_promoted_objects_size(object_size); | 1196 tracer()->increment_promoted_objects_size(object_size); |
1127 return; | 1197 return; |
1128 } | 1198 } |
1129 } else { | 1199 } else { |
1130 OldSpace* target_space = Heap::TargetSpace(object); | 1200 OldSpace* target_space = Heap::TargetSpace(object); |
1131 ASSERT(target_space == Heap::old_pointer_space_ || | 1201 ASSERT(target_space == Heap::old_pointer_space_ || |
1132 target_space == Heap::old_data_space_); | 1202 target_space == Heap::old_data_space_); |
1133 result = target_space->AllocateRaw(object_size); | 1203 result = target_space->AllocateRaw(object_size); |
1134 if (!result->IsFailure()) { | 1204 if (!result->IsFailure()) { |
1135 HeapObject* target = HeapObject::cast(result); | 1205 HeapObject* target = HeapObject::cast(result); |
(...skipping 469 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1605 if (obj->IsFailure()) return false; | 1675 if (obj->IsFailure()) return false; |
1606 roots_[constant_symbol_table[i].index] = String::cast(obj); | 1676 roots_[constant_symbol_table[i].index] = String::cast(obj); |
1607 } | 1677 } |
1608 | 1678 |
1609 // Allocate the hidden symbol which is used to identify the hidden properties | 1679 // Allocate the hidden symbol which is used to identify the hidden properties |
1610 // in JSObjects. The hash code has a special value so that it will not match | 1680 // in JSObjects. The hash code has a special value so that it will not match |
1611 // the empty string when searching for the property. It cannot be part of the | 1681 // the empty string when searching for the property. It cannot be part of the |
1612 // loop above because it needs to be allocated manually with the special | 1682 // loop above because it needs to be allocated manually with the special |
1613 // hash code in place. The hash code for the hidden_symbol is zero to ensure | 1683 // hash code in place. The hash code for the hidden_symbol is zero to ensure |
1614 // that it will always be at the first entry in property descriptors. | 1684 // that it will always be at the first entry in property descriptors. |
1615 obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash); | 1685 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask); |
1616 if (obj->IsFailure()) return false; | 1686 if (obj->IsFailure()) return false; |
1617 hidden_symbol_ = String::cast(obj); | 1687 hidden_symbol_ = String::cast(obj); |
1618 | 1688 |
1619 // Allocate the proxy for __proto__. | 1689 // Allocate the proxy for __proto__. |
1620 obj = AllocateProxy((Address) &Accessors::ObjectPrototype); | 1690 obj = AllocateProxy((Address) &Accessors::ObjectPrototype); |
1621 if (obj->IsFailure()) return false; | 1691 if (obj->IsFailure()) return false; |
1622 set_prototype_accessors(Proxy::cast(obj)); | 1692 set_prototype_accessors(Proxy::cast(obj)); |
1623 | 1693 |
1624 // Allocate the code_stubs dictionary. The initial size is set to avoid | 1694 // Allocate the code_stubs dictionary. The initial size is set to avoid |
1625 // expanding the dictionary during bootstrapping. | 1695 // expanding the dictionary during bootstrapping. |
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1841 share->set_formal_parameter_count(0); | 1911 share->set_formal_parameter_count(0); |
1842 share->set_instance_class_name(Object_symbol()); | 1912 share->set_instance_class_name(Object_symbol()); |
1843 share->set_function_data(undefined_value()); | 1913 share->set_function_data(undefined_value()); |
1844 share->set_script(undefined_value()); | 1914 share->set_script(undefined_value()); |
1845 share->set_start_position_and_type(0); | 1915 share->set_start_position_and_type(0); |
1846 share->set_debug_info(undefined_value()); | 1916 share->set_debug_info(undefined_value()); |
1847 share->set_inferred_name(empty_string()); | 1917 share->set_inferred_name(empty_string()); |
1848 share->set_compiler_hints(0); | 1918 share->set_compiler_hints(0); |
1849 share->set_this_property_assignments_count(0); | 1919 share->set_this_property_assignments_count(0); |
1850 share->set_this_property_assignments(undefined_value()); | 1920 share->set_this_property_assignments(undefined_value()); |
1851 share->set_num_literals(0); | |
1852 share->set_end_position(0); | |
1853 share->set_function_token_position(0); | |
1854 return result; | 1921 return result; |
1855 } | 1922 } |
1856 | 1923 |
1857 | 1924 |
1858 // Returns true for a character in a range. Both limits are inclusive. | 1925 // Returns true for a character in a range. Both limits are inclusive. |
1859 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) { | 1926 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) { |
1860 // This makes uses of the the unsigned wraparound. | 1927 // This makes uses of the the unsigned wraparound. |
1861 return character - from <= to - from; | 1928 return character - from <= to - from; |
1862 } | 1929 } |
1863 | 1930 |
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2105 } | 2172 } |
2106 if (pretenure == NOT_TENURED) { | 2173 if (pretenure == NOT_TENURED) { |
2107 return AllocateByteArray(length); | 2174 return AllocateByteArray(length); |
2108 } | 2175 } |
2109 int size = ByteArray::SizeFor(length); | 2176 int size = ByteArray::SizeFor(length); |
2110 Object* result = (size <= MaxObjectSizeInPagedSpace()) | 2177 Object* result = (size <= MaxObjectSizeInPagedSpace()) |
2111 ? old_data_space_->AllocateRaw(size) | 2178 ? old_data_space_->AllocateRaw(size) |
2112 : lo_space_->AllocateRaw(size); | 2179 : lo_space_->AllocateRaw(size); |
2113 if (result->IsFailure()) return result; | 2180 if (result->IsFailure()) return result; |
2114 | 2181 |
2115 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); | 2182 reinterpret_cast<Array*>(result)->set_map(byte_array_map()); |
2116 reinterpret_cast<ByteArray*>(result)->set_length(length); | 2183 reinterpret_cast<Array*>(result)->set_length(length); |
2117 return result; | 2184 return result; |
2118 } | 2185 } |
2119 | 2186 |
2120 | 2187 |
2121 Object* Heap::AllocateByteArray(int length) { | 2188 Object* Heap::AllocateByteArray(int length) { |
2122 if (length < 0 || length > ByteArray::kMaxLength) { | 2189 if (length < 0 || length > ByteArray::kMaxLength) { |
2123 return Failure::OutOfMemoryException(); | 2190 return Failure::OutOfMemoryException(); |
2124 } | 2191 } |
2125 int size = ByteArray::SizeFor(length); | 2192 int size = ByteArray::SizeFor(length); |
2126 AllocationSpace space = | 2193 AllocationSpace space = |
2127 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE; | 2194 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE; |
2128 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); | 2195 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); |
2129 if (result->IsFailure()) return result; | 2196 if (result->IsFailure()) return result; |
2130 | 2197 |
2131 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); | 2198 reinterpret_cast<Array*>(result)->set_map(byte_array_map()); |
2132 reinterpret_cast<ByteArray*>(result)->set_length(length); | 2199 reinterpret_cast<Array*>(result)->set_length(length); |
2133 return result; | 2200 return result; |
2134 } | 2201 } |
2135 | 2202 |
2136 | 2203 |
2137 void Heap::CreateFillerObjectAt(Address addr, int size) { | 2204 void Heap::CreateFillerObjectAt(Address addr, int size) { |
2138 if (size == 0) return; | 2205 if (size == 0) return; |
2139 HeapObject* filler = HeapObject::FromAddress(addr); | 2206 HeapObject* filler = HeapObject::FromAddress(addr); |
2140 if (size == kPointerSize) { | 2207 if (size == kPointerSize) { |
2141 filler->set_map(one_pointer_filler_map()); | 2208 filler->set_map(one_pointer_filler_map()); |
2142 } else if (size == 2 * kPointerSize) { | 2209 } else if (size == 2 * kPointerSize) { |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2238 result = lo_space_->AllocateRawCode(obj_size); | 2305 result = lo_space_->AllocateRawCode(obj_size); |
2239 } else { | 2306 } else { |
2240 result = code_space_->AllocateRaw(obj_size); | 2307 result = code_space_->AllocateRaw(obj_size); |
2241 } | 2308 } |
2242 | 2309 |
2243 if (result->IsFailure()) return result; | 2310 if (result->IsFailure()) return result; |
2244 | 2311 |
2245 // Copy code object. | 2312 // Copy code object. |
2246 Address old_addr = code->address(); | 2313 Address old_addr = code->address(); |
2247 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); | 2314 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); |
2248 CopyBlock(new_addr, old_addr, obj_size); | 2315 CopyBlock(reinterpret_cast<Object**>(new_addr), |
| 2316 reinterpret_cast<Object**>(old_addr), |
| 2317 obj_size); |
2249 // Relocate the copy. | 2318 // Relocate the copy. |
2250 Code* new_code = Code::cast(result); | 2319 Code* new_code = Code::cast(result); |
2251 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); | 2320 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); |
2252 new_code->Relocate(new_addr - old_addr); | 2321 new_code->Relocate(new_addr - old_addr); |
2253 return new_code; | 2322 return new_code; |
2254 } | 2323 } |
2255 | 2324 |
2256 | 2325 |
2257 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { | 2326 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { |
2258 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(), | 2327 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(), |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2384 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size()); | 2453 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size()); |
2385 | 2454 |
2386 // Do the allocation. | 2455 // Do the allocation. |
2387 Object* result = | 2456 Object* result = |
2388 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE); | 2457 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE); |
2389 if (result->IsFailure()) return result; | 2458 if (result->IsFailure()) return result; |
2390 | 2459 |
2391 // Copy the content. The arguments boilerplate doesn't have any | 2460 // Copy the content. The arguments boilerplate doesn't have any |
2392 // fields that point to new space so it's safe to skip the write | 2461 // fields that point to new space so it's safe to skip the write |
2393 // barrier here. | 2462 // barrier here. |
2394 CopyBlock(HeapObject::cast(result)->address(), | 2463 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()), |
2395 boilerplate->address(), | 2464 reinterpret_cast<Object**>(boilerplate->address()), |
2396 kArgumentsObjectSize); | 2465 kArgumentsObjectSize); |
2397 | 2466 |
2398 // Set the two properties. | 2467 // Set the two properties. |
2399 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index, | 2468 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index, |
2400 callee); | 2469 callee); |
2401 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index, | 2470 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index, |
2402 Smi::FromInt(length), | 2471 Smi::FromInt(length), |
2403 SKIP_WRITE_BARRIER); | 2472 SKIP_WRITE_BARRIER); |
2404 | 2473 |
2405 // Check the state of the object | 2474 // Check the state of the object |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2607 Map* map = source->map(); | 2676 Map* map = source->map(); |
2608 int object_size = map->instance_size(); | 2677 int object_size = map->instance_size(); |
2609 Object* clone; | 2678 Object* clone; |
2610 | 2679 |
2611 // If we're forced to always allocate, we use the general allocation | 2680 // If we're forced to always allocate, we use the general allocation |
2612 // functions which may leave us with an object in old space. | 2681 // functions which may leave us with an object in old space. |
2613 if (always_allocate()) { | 2682 if (always_allocate()) { |
2614 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); | 2683 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); |
2615 if (clone->IsFailure()) return clone; | 2684 if (clone->IsFailure()) return clone; |
2616 Address clone_address = HeapObject::cast(clone)->address(); | 2685 Address clone_address = HeapObject::cast(clone)->address(); |
2617 CopyBlock(clone_address, | 2686 CopyBlock(reinterpret_cast<Object**>(clone_address), |
2618 source->address(), | 2687 reinterpret_cast<Object**>(source->address()), |
2619 object_size); | 2688 object_size); |
2620 // Update write barrier for all fields that lie beyond the header. | 2689 // Update write barrier for all fields that lie beyond the header. |
2621 RecordWrites(clone_address, | 2690 RecordWrites(clone_address, |
2622 JSObject::kHeaderSize, | 2691 JSObject::kHeaderSize, |
2623 (object_size - JSObject::kHeaderSize) / kPointerSize); | 2692 (object_size - JSObject::kHeaderSize) / kPointerSize); |
2624 } else { | 2693 } else { |
2625 clone = new_space_.AllocateRaw(object_size); | 2694 clone = new_space_.AllocateRaw(object_size); |
2626 if (clone->IsFailure()) return clone; | 2695 if (clone->IsFailure()) return clone; |
2627 ASSERT(Heap::InNewSpace(clone)); | 2696 ASSERT(Heap::InNewSpace(clone)); |
2628 // Since we know the clone is allocated in new space, we can copy | 2697 // Since we know the clone is allocated in new space, we can copy |
2629 // the contents without worrying about updating the write barrier. | 2698 // the contents without worrying about updating the write barrier. |
2630 CopyBlock(HeapObject::cast(clone)->address(), | 2699 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()), |
2631 source->address(), | 2700 reinterpret_cast<Object**>(source->address()), |
2632 object_size); | 2701 object_size); |
2633 } | 2702 } |
2634 | 2703 |
2635 FixedArray* elements = FixedArray::cast(source->elements()); | 2704 FixedArray* elements = FixedArray::cast(source->elements()); |
2636 FixedArray* properties = FixedArray::cast(source->properties()); | 2705 FixedArray* properties = FixedArray::cast(source->properties()); |
2637 // Update elements if necessary. | 2706 // Update elements if necessary. |
2638 if (elements->length() > 0) { | 2707 if (elements->length() > 0) { |
2639 Object* elem = CopyFixedArray(elements); | 2708 Object* elem = CopyFixedArray(elements); |
2640 if (elem->IsFailure()) return elem; | 2709 if (elem->IsFailure()) return elem; |
2641 JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); | 2710 JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); |
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2892 ASSERT_EQ(size, HeapObject::cast(result)->Size()); | 2961 ASSERT_EQ(size, HeapObject::cast(result)->Size()); |
2893 return result; | 2962 return result; |
2894 } | 2963 } |
2895 | 2964 |
2896 | 2965 |
2897 Object* Heap::AllocateEmptyFixedArray() { | 2966 Object* Heap::AllocateEmptyFixedArray() { |
2898 int size = FixedArray::SizeFor(0); | 2967 int size = FixedArray::SizeFor(0); |
2899 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); | 2968 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); |
2900 if (result->IsFailure()) return result; | 2969 if (result->IsFailure()) return result; |
2901 // Initialize the object. | 2970 // Initialize the object. |
2902 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map()); | 2971 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); |
2903 reinterpret_cast<FixedArray*>(result)->set_length(0); | 2972 reinterpret_cast<Array*>(result)->set_length(0); |
2904 return result; | 2973 return result; |
2905 } | 2974 } |
2906 | 2975 |
2907 | 2976 |
2908 Object* Heap::AllocateRawFixedArray(int length) { | 2977 Object* Heap::AllocateRawFixedArray(int length) { |
2909 if (length < 0 || length > FixedArray::kMaxLength) { | 2978 if (length < 0 || length > FixedArray::kMaxLength) { |
2910 return Failure::OutOfMemoryException(); | 2979 return Failure::OutOfMemoryException(); |
2911 } | 2980 } |
2912 // Use the general function if we're forced to always allocate. | 2981 // Use the general function if we're forced to always allocate. |
2913 if (always_allocate()) return AllocateFixedArray(length, TENURED); | 2982 if (always_allocate()) return AllocateFixedArray(length, TENURED); |
2914 // Allocate the raw data for a fixed array. | 2983 // Allocate the raw data for a fixed array. |
2915 int size = FixedArray::SizeFor(length); | 2984 int size = FixedArray::SizeFor(length); |
2916 return size <= kMaxObjectSizeInNewSpace | 2985 return size <= kMaxObjectSizeInNewSpace |
2917 ? new_space_.AllocateRaw(size) | 2986 ? new_space_.AllocateRaw(size) |
2918 : lo_space_->AllocateRawFixedArray(size); | 2987 : lo_space_->AllocateRawFixedArray(size); |
2919 } | 2988 } |
2920 | 2989 |
2921 | 2990 |
2922 Object* Heap::CopyFixedArray(FixedArray* src) { | 2991 Object* Heap::CopyFixedArray(FixedArray* src) { |
2923 int len = src->length(); | 2992 int len = src->length(); |
2924 Object* obj = AllocateRawFixedArray(len); | 2993 Object* obj = AllocateRawFixedArray(len); |
2925 if (obj->IsFailure()) return obj; | 2994 if (obj->IsFailure()) return obj; |
2926 if (Heap::InNewSpace(obj)) { | 2995 if (Heap::InNewSpace(obj)) { |
2927 HeapObject* dst = HeapObject::cast(obj); | 2996 HeapObject* dst = HeapObject::cast(obj); |
2928 CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len)); | 2997 CopyBlock(reinterpret_cast<Object**>(dst->address()), |
| 2998 reinterpret_cast<Object**>(src->address()), |
| 2999 FixedArray::SizeFor(len)); |
2929 return obj; | 3000 return obj; |
2930 } | 3001 } |
2931 HeapObject::cast(obj)->set_map(src->map()); | 3002 HeapObject::cast(obj)->set_map(src->map()); |
2932 FixedArray* result = FixedArray::cast(obj); | 3003 FixedArray* result = FixedArray::cast(obj); |
2933 result->set_length(len); | 3004 result->set_length(len); |
2934 | 3005 |
2935 // Copy the content | 3006 // Copy the content |
2936 AssertNoAllocation no_gc; | 3007 AssertNoAllocation no_gc; |
2937 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); | 3008 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); |
2938 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); | 3009 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); |
2939 return result; | 3010 return result; |
2940 } | 3011 } |
2941 | 3012 |
2942 | 3013 |
2943 Object* Heap::AllocateFixedArray(int length) { | 3014 Object* Heap::AllocateFixedArray(int length) { |
2944 ASSERT(length >= 0); | 3015 ASSERT(length >= 0); |
2945 if (length == 0) return empty_fixed_array(); | 3016 if (length == 0) return empty_fixed_array(); |
2946 Object* result = AllocateRawFixedArray(length); | 3017 Object* result = AllocateRawFixedArray(length); |
2947 if (!result->IsFailure()) { | 3018 if (!result->IsFailure()) { |
2948 // Initialize header. | 3019 // Initialize header. |
2949 FixedArray* array = reinterpret_cast<FixedArray*>(result); | 3020 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); |
2950 array->set_map(fixed_array_map()); | 3021 FixedArray* array = FixedArray::cast(result); |
2951 array->set_length(length); | 3022 array->set_length(length); |
2952 // Initialize body. | 3023 // Initialize body. |
2953 ASSERT(!Heap::InNewSpace(undefined_value())); | 3024 ASSERT(!Heap::InNewSpace(undefined_value())); |
2954 MemsetPointer(array->data_start(), undefined_value(), length); | 3025 MemsetPointer(array->data_start(), undefined_value(), length); |
2955 } | 3026 } |
2956 return result; | 3027 return result; |
2957 } | 3028 } |
2958 | 3029 |
2959 | 3030 |
2960 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { | 3031 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { |
2961 if (length < 0 || length > FixedArray::kMaxLength) { | 3032 if (length < 0 || length > FixedArray::kMaxLength) { |
2962 return Failure::OutOfMemoryException(); | 3033 return Failure::OutOfMemoryException(); |
2963 } | 3034 } |
2964 | 3035 |
2965 AllocationSpace space = | 3036 AllocationSpace space = |
2966 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; | 3037 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; |
2967 int size = FixedArray::SizeFor(length); | 3038 int size = FixedArray::SizeFor(length); |
2968 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { | 3039 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { |
2969 // Too big for new space. | 3040 // Too big for new space. |
2970 space = LO_SPACE; | 3041 space = LO_SPACE; |
2971 } else if (space == OLD_POINTER_SPACE && | 3042 } else if (space == OLD_POINTER_SPACE && |
2972 size > MaxObjectSizeInPagedSpace()) { | 3043 size > MaxObjectSizeInPagedSpace()) { |
2973 // Too big for old pointer space. | 3044 // Too big for old pointer space. |
2974 space = LO_SPACE; | 3045 space = LO_SPACE; |
2975 } | 3046 } |
2976 | 3047 |
2977 AllocationSpace retry_space = | 3048 // Specialize allocation for the space. |
2978 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE; | 3049 Object* result = Failure::OutOfMemoryException(); |
2979 | 3050 if (space == NEW_SPACE) { |
2980 return AllocateRaw(size, space, retry_space); | 3051 // We cannot use Heap::AllocateRaw() because it will not properly |
| 3052 // allocate extra remembered set bits if always_allocate() is true and |
| 3053 // new space allocation fails. |
| 3054 result = new_space_.AllocateRaw(size); |
| 3055 if (result->IsFailure() && always_allocate()) { |
| 3056 if (size <= MaxObjectSizeInPagedSpace()) { |
| 3057 result = old_pointer_space_->AllocateRaw(size); |
| 3058 } else { |
| 3059 result = lo_space_->AllocateRawFixedArray(size); |
| 3060 } |
| 3061 } |
| 3062 } else if (space == OLD_POINTER_SPACE) { |
| 3063 result = old_pointer_space_->AllocateRaw(size); |
| 3064 } else { |
| 3065 ASSERT(space == LO_SPACE); |
| 3066 result = lo_space_->AllocateRawFixedArray(size); |
| 3067 } |
| 3068 return result; |
2981 } | 3069 } |
2982 | 3070 |
2983 | 3071 |
2984 static Object* AllocateFixedArrayWithFiller(int length, | 3072 static Object* AllocateFixedArrayWithFiller(int length, |
2985 PretenureFlag pretenure, | 3073 PretenureFlag pretenure, |
2986 Object* filler) { | 3074 Object* filler) { |
2987 ASSERT(length >= 0); | 3075 ASSERT(length >= 0); |
2988 ASSERT(Heap::empty_fixed_array()->IsFixedArray()); | 3076 ASSERT(Heap::empty_fixed_array()->IsFixedArray()); |
2989 if (length == 0) return Heap::empty_fixed_array(); | 3077 if (length == 0) return Heap::empty_fixed_array(); |
2990 | 3078 |
(...skipping 27 matching lines...) Expand all Loading... |
3018 | 3106 |
3019 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map()); | 3107 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map()); |
3020 FixedArray::cast(obj)->set_length(length); | 3108 FixedArray::cast(obj)->set_length(length); |
3021 return obj; | 3109 return obj; |
3022 } | 3110 } |
3023 | 3111 |
3024 | 3112 |
3025 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { | 3113 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { |
3026 Object* result = Heap::AllocateFixedArray(length, pretenure); | 3114 Object* result = Heap::AllocateFixedArray(length, pretenure); |
3027 if (result->IsFailure()) return result; | 3115 if (result->IsFailure()) return result; |
3028 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map()); | 3116 reinterpret_cast<Array*>(result)->set_map(hash_table_map()); |
3029 ASSERT(result->IsHashTable()); | 3117 ASSERT(result->IsHashTable()); |
3030 return result; | 3118 return result; |
3031 } | 3119 } |
3032 | 3120 |
3033 | 3121 |
3034 Object* Heap::AllocateGlobalContext() { | 3122 Object* Heap::AllocateGlobalContext() { |
3035 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS); | 3123 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS); |
3036 if (result->IsFailure()) return result; | 3124 if (result->IsFailure()) return result; |
3037 Context* context = reinterpret_cast<Context*>(result); | 3125 Context* context = reinterpret_cast<Context*>(result); |
3038 context->set_map(global_context_map()); | 3126 context->set_map(global_context_map()); |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3270 return cell_space_->Contains(addr); | 3358 return cell_space_->Contains(addr); |
3271 case LO_SPACE: | 3359 case LO_SPACE: |
3272 return lo_space_->SlowContains(addr); | 3360 return lo_space_->SlowContains(addr); |
3273 } | 3361 } |
3274 | 3362 |
3275 return false; | 3363 return false; |
3276 } | 3364 } |
3277 | 3365 |
3278 | 3366 |
3279 #ifdef DEBUG | 3367 #ifdef DEBUG |
3280 static void DummyScavengePointer(HeapObject** p) { | |
3281 } | |
3282 | |
3283 | |
3284 static void VerifyPointersUnderWatermark( | |
3285 PagedSpace* space, | |
3286 DirtyRegionCallback visit_dirty_region) { | |
3287 PageIterator it(space, PageIterator::PAGES_IN_USE); | |
3288 | |
3289 while (it.has_next()) { | |
3290 Page* page = it.next(); | |
3291 Address start = page->ObjectAreaStart(); | |
3292 Address end = page->AllocationWatermark(); | |
3293 | |
3294 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks, | |
3295 start, | |
3296 end, | |
3297 visit_dirty_region, | |
3298 &DummyScavengePointer); | |
3299 } | |
3300 } | |
3301 | |
3302 | |
3303 static void VerifyPointersUnderWatermark(LargeObjectSpace* space) { | |
3304 LargeObjectIterator it(space); | |
3305 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { | |
3306 if (object->IsFixedArray()) { | |
3307 Address slot_address = object->address(); | |
3308 Address end = object->address() + object->Size(); | |
3309 | |
3310 while (slot_address < end) { | |
3311 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); | |
3312 // When we are not in GC the Heap::InNewSpace() predicate | |
3313 // checks that pointers which satisfy predicate point into | |
3314 // the active semispace. | |
3315 Heap::InNewSpace(*slot); | |
3316 slot_address += kPointerSize; | |
3317 } | |
3318 } | |
3319 } | |
3320 } | |
3321 | |
3322 | |
3323 void Heap::Verify() { | 3368 void Heap::Verify() { |
3324 ASSERT(HasBeenSetup()); | 3369 ASSERT(HasBeenSetup()); |
3325 | 3370 |
3326 VerifyPointersVisitor visitor; | 3371 VerifyPointersVisitor visitor; |
3327 IterateRoots(&visitor, VISIT_ONLY_STRONG); | 3372 IterateRoots(&visitor, VISIT_ONLY_STRONG); |
3328 | 3373 |
3329 new_space_.Verify(); | 3374 new_space_.Verify(); |
3330 | 3375 |
3331 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor; | 3376 VerifyPointersAndRSetVisitor rset_visitor; |
3332 old_pointer_space_->Verify(&dirty_regions_visitor); | 3377 old_pointer_space_->Verify(&rset_visitor); |
3333 map_space_->Verify(&dirty_regions_visitor); | 3378 map_space_->Verify(&rset_visitor); |
3334 | 3379 |
3335 VerifyPointersUnderWatermark(old_pointer_space_, | 3380 VerifyPointersVisitor no_rset_visitor; |
3336 &IteratePointersInDirtyRegion); | 3381 old_data_space_->Verify(&no_rset_visitor); |
3337 VerifyPointersUnderWatermark(map_space_, | 3382 code_space_->Verify(&no_rset_visitor); |
3338 &IteratePointersInDirtyMapsRegion); | 3383 cell_space_->Verify(&no_rset_visitor); |
3339 VerifyPointersUnderWatermark(lo_space_); | |
3340 | |
3341 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID); | |
3342 VerifyPageWatermarkValidity(map_space_, ALL_INVALID); | |
3343 | |
3344 VerifyPointersVisitor no_dirty_regions_visitor; | |
3345 old_data_space_->Verify(&no_dirty_regions_visitor); | |
3346 code_space_->Verify(&no_dirty_regions_visitor); | |
3347 cell_space_->Verify(&no_dirty_regions_visitor); | |
3348 | 3384 |
3349 lo_space_->Verify(); | 3385 lo_space_->Verify(); |
3350 } | 3386 } |
3351 #endif // DEBUG | 3387 #endif // DEBUG |
3352 | 3388 |
3353 | 3389 |
3354 Object* Heap::LookupSymbol(Vector<const char> string) { | 3390 Object* Heap::LookupSymbol(Vector<const char> string) { |
3355 Object* symbol = NULL; | 3391 Object* symbol = NULL; |
3356 Object* new_table = symbol_table()->LookupSymbol(string, &symbol); | 3392 Object* new_table = symbol_table()->LookupSymbol(string, &symbol); |
3357 if (new_table->IsFailure()) return new_table; | 3393 if (new_table->IsFailure()) return new_table; |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3390 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject()); | 3426 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject()); |
3391 for (Address a = new_space_.FromSpaceLow(); | 3427 for (Address a = new_space_.FromSpaceLow(); |
3392 a < new_space_.FromSpaceHigh(); | 3428 a < new_space_.FromSpaceHigh(); |
3393 a += kPointerSize) { | 3429 a += kPointerSize) { |
3394 Memory::Address_at(a) = kFromSpaceZapValue; | 3430 Memory::Address_at(a) = kFromSpaceZapValue; |
3395 } | 3431 } |
3396 } | 3432 } |
3397 #endif // DEBUG | 3433 #endif // DEBUG |
3398 | 3434 |
3399 | 3435 |
3400 bool Heap::IteratePointersInDirtyRegion(Address start, | 3436 int Heap::IterateRSetRange(Address object_start, |
3401 Address end, | 3437 Address object_end, |
3402 ObjectSlotCallback copy_object_func) { | 3438 Address rset_start, |
3403 Address slot_address = start; | 3439 ObjectSlotCallback copy_object_func) { |
3404 bool pointers_to_new_space_found = false; | 3440 Address object_address = object_start; |
| 3441 Address rset_address = rset_start; |
| 3442 int set_bits_count = 0; |
3405 | 3443 |
3406 while (slot_address < end) { | 3444 // Loop over all the pointers in [object_start, object_end). |
3407 Object** slot = reinterpret_cast<Object**>(slot_address); | 3445 while (object_address < object_end) { |
3408 if (Heap::InNewSpace(*slot)) { | 3446 uint32_t rset_word = Memory::uint32_at(rset_address); |
3409 ASSERT((*slot)->IsHeapObject()); | 3447 if (rset_word != 0) { |
3410 copy_object_func(reinterpret_cast<HeapObject**>(slot)); | 3448 uint32_t result_rset = rset_word; |
3411 if (Heap::InNewSpace(*slot)) { | 3449 for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) { |
3412 ASSERT((*slot)->IsHeapObject()); | 3450 // Do not dereference pointers at or past object_end. |
3413 pointers_to_new_space_found = true; | 3451 if ((rset_word & bitmask) != 0 && object_address < object_end) { |
| 3452 Object** object_p = reinterpret_cast<Object**>(object_address); |
| 3453 if (Heap::InNewSpace(*object_p)) { |
| 3454 copy_object_func(reinterpret_cast<HeapObject**>(object_p)); |
| 3455 } |
| 3456 // If this pointer does not need to be remembered anymore, clear |
| 3457 // the remembered set bit. |
| 3458 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask; |
| 3459 set_bits_count++; |
| 3460 } |
| 3461 object_address += kPointerSize; |
3414 } | 3462 } |
| 3463 // Update the remembered set if it has changed. |
| 3464 if (result_rset != rset_word) { |
| 3465 Memory::uint32_at(rset_address) = result_rset; |
| 3466 } |
| 3467 } else { |
| 3468 // No bits in the word were set. This is the common case. |
| 3469 object_address += kPointerSize * kBitsPerInt; |
3415 } | 3470 } |
3416 slot_address += kPointerSize; | 3471 rset_address += kIntSize; |
3417 } | 3472 } |
3418 return pointers_to_new_space_found; | 3473 return set_bits_count; |
3419 } | 3474 } |
3420 | 3475 |
3421 | 3476 |
3422 // Compute start address of the first map following given addr. | 3477 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { |
3423 static inline Address MapStartAlign(Address addr) { | 3478 ASSERT(Page::is_rset_in_use()); |
3424 Address page = Page::FromAddress(addr)->ObjectAreaStart(); | 3479 ASSERT(space == old_pointer_space_ || space == map_space_); |
3425 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); | |
3426 } | |
3427 | 3480 |
3428 | 3481 static void* paged_rset_histogram = StatsTable::CreateHistogram( |
3429 // Compute end address of the first map preceding given addr. | 3482 "V8.RSetPaged", |
3430 static inline Address MapEndAlign(Address addr) { | 3483 0, |
3431 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart(); | 3484 Page::kObjectAreaSize / kPointerSize, |
3432 return page + ((addr - page) / Map::kSize * Map::kSize); | 3485 30); |
3433 } | |
3434 | |
3435 | |
3436 static bool IteratePointersInDirtyMaps(Address start, | |
3437 Address end, | |
3438 ObjectSlotCallback copy_object_func) { | |
3439 ASSERT(MapStartAlign(start) == start); | |
3440 ASSERT(MapEndAlign(end) == end); | |
3441 | |
3442 Address map_address = start; | |
3443 bool pointers_to_new_space_found = false; | |
3444 | |
3445 while (map_address < end) { | |
3446 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address))); | |
3447 ASSERT(Memory::Object_at(map_address)->IsMap()); | |
3448 | |
3449 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; | |
3450 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; | |
3451 | |
3452 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start, | |
3453 pointer_fields_end, | |
3454 copy_object_func)) { | |
3455 pointers_to_new_space_found = true; | |
3456 } | |
3457 | |
3458 map_address += Map::kSize; | |
3459 } | |
3460 | |
3461 return pointers_to_new_space_found; | |
3462 } | |
3463 | |
3464 | |
3465 bool Heap::IteratePointersInDirtyMapsRegion( | |
3466 Address start, | |
3467 Address end, | |
3468 ObjectSlotCallback copy_object_func) { | |
3469 Address map_aligned_start = MapStartAlign(start); | |
3470 Address map_aligned_end = MapEndAlign(end); | |
3471 | |
3472 bool contains_pointers_to_new_space = false; | |
3473 | |
3474 if (map_aligned_start != start) { | |
3475 Address prev_map = map_aligned_start - Map::kSize; | |
3476 ASSERT(Memory::Object_at(prev_map)->IsMap()); | |
3477 | |
3478 Address pointer_fields_start = | |
3479 Max(start, prev_map + Map::kPointerFieldsBeginOffset); | |
3480 | |
3481 Address pointer_fields_end = | |
3482 Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end); | |
3483 | |
3484 contains_pointers_to_new_space = | |
3485 IteratePointersInDirtyRegion(pointer_fields_start, | |
3486 pointer_fields_end, | |
3487 copy_object_func) | |
3488 || contains_pointers_to_new_space; | |
3489 } | |
3490 | |
3491 contains_pointers_to_new_space = | |
3492 IteratePointersInDirtyMaps(map_aligned_start, | |
3493 map_aligned_end, | |
3494 copy_object_func) | |
3495 || contains_pointers_to_new_space; | |
3496 | |
3497 if (map_aligned_end != end) { | |
3498 ASSERT(Memory::Object_at(map_aligned_end)->IsMap()); | |
3499 | |
3500 Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset; | |
3501 | |
3502 Address pointer_fields_end = | |
3503 Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize); | |
3504 | |
3505 contains_pointers_to_new_space = | |
3506 IteratePointersInDirtyRegion(pointer_fields_start, | |
3507 pointer_fields_end, | |
3508 copy_object_func) | |
3509 || contains_pointers_to_new_space; | |
3510 } | |
3511 | |
3512 return contains_pointers_to_new_space; | |
3513 } | |
3514 | |
3515 | |
3516 void Heap::IterateAndMarkPointersToNewSpace(Address start, | |
3517 Address end, | |
3518 ObjectSlotCallback callback) { | |
3519 Address slot_address = start; | |
3520 Page* page = Page::FromAddress(start); | |
3521 | |
3522 uint32_t marks = page->GetRegionMarks(); | |
3523 | |
3524 while (slot_address < end) { | |
3525 Object** slot = reinterpret_cast<Object**>(slot_address); | |
3526 if (Heap::InNewSpace(*slot)) { | |
3527 ASSERT((*slot)->IsHeapObject()); | |
3528 callback(reinterpret_cast<HeapObject**>(slot)); | |
3529 if (Heap::InNewSpace(*slot)) { | |
3530 ASSERT((*slot)->IsHeapObject()); | |
3531 marks |= page->GetRegionMaskForAddress(slot_address); | |
3532 } | |
3533 } | |
3534 slot_address += kPointerSize; | |
3535 } | |
3536 | |
3537 page->SetRegionMarks(marks); | |
3538 } | |
3539 | |
3540 | |
3541 uint32_t Heap::IterateDirtyRegions( | |
3542 uint32_t marks, | |
3543 Address area_start, | |
3544 Address area_end, | |
3545 DirtyRegionCallback visit_dirty_region, | |
3546 ObjectSlotCallback copy_object_func) { | |
3547 uint32_t newmarks = 0; | |
3548 uint32_t mask = 1; | |
3549 | |
3550 if (area_start >= area_end) { | |
3551 return newmarks; | |
3552 } | |
3553 | |
3554 Address region_start = area_start; | |
3555 | |
3556 // area_start does not necessarily coincide with start of the first region. | |
3557 // Thus to calculate the beginning of the next region we have to align | |
3558 // area_start by Page::kRegionSize. | |
3559 Address second_region = | |
3560 reinterpret_cast<Address>( | |
3561 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) & | |
3562 ~Page::kRegionAlignmentMask); | |
3563 | |
3564 // Next region might be beyond area_end. | |
3565 Address region_end = Min(second_region, area_end); | |
3566 | |
3567 if (marks & mask) { | |
3568 if (visit_dirty_region(region_start, region_end, copy_object_func)) { | |
3569 newmarks |= mask; | |
3570 } | |
3571 } | |
3572 mask <<= 1; | |
3573 | |
3574 // Iterate subsequent regions which fully lay inside [area_start, area_end[. | |
3575 region_start = region_end; | |
3576 region_end = region_start + Page::kRegionSize; | |
3577 | |
3578 while (region_end <= area_end) { | |
3579 if (marks & mask) { | |
3580 if (visit_dirty_region(region_start, region_end, copy_object_func)) { | |
3581 newmarks |= mask; | |
3582 } | |
3583 } | |
3584 | |
3585 region_start = region_end; | |
3586 region_end = region_start + Page::kRegionSize; | |
3587 | |
3588 mask <<= 1; | |
3589 } | |
3590 | |
3591 if (region_start != area_end) { | |
3592 // A small piece of area left uniterated because area_end does not coincide | |
3593 // with region end. Check whether region covering last part of area is | |
3594 // dirty. | |
3595 if (marks & mask) { | |
3596 if (visit_dirty_region(region_start, area_end, copy_object_func)) { | |
3597 newmarks |= mask; | |
3598 } | |
3599 } | |
3600 } | |
3601 | |
3602 return newmarks; | |
3603 } | |
3604 | |
3605 | |
3606 | |
3607 void Heap::IterateDirtyRegions( | |
3608 PagedSpace* space, | |
3609 DirtyRegionCallback visit_dirty_region, | |
3610 ObjectSlotCallback copy_object_func, | |
3611 ExpectedPageWatermarkState expected_page_watermark_state) { | |
3612 | 3486 |
3613 PageIterator it(space, PageIterator::PAGES_IN_USE); | 3487 PageIterator it(space, PageIterator::PAGES_IN_USE); |
3614 | |
3615 while (it.has_next()) { | 3488 while (it.has_next()) { |
3616 Page* page = it.next(); | 3489 Page* page = it.next(); |
3617 uint32_t marks = page->GetRegionMarks(); | 3490 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(), |
3618 | 3491 page->RSetStart(), copy_object_func); |
3619 if (marks != Page::kAllRegionsCleanMarks) { | 3492 if (paged_rset_histogram != NULL) { |
3620 Address start = page->ObjectAreaStart(); | 3493 StatsTable::AddHistogramSample(paged_rset_histogram, count); |
3621 | |
3622 // Do not try to visit pointers beyond page allocation watermark. | |
3623 // Page can contain garbage pointers there. | |
3624 Address end; | |
3625 | |
3626 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) || | |
3627 page->IsWatermarkValid()) { | |
3628 end = page->AllocationWatermark(); | |
3629 } else { | |
3630 end = page->CachedAllocationWatermark(); | |
3631 } | |
3632 | |
3633 ASSERT(space == old_pointer_space_ || | |
3634 (space == map_space_ && | |
3635 ((page->ObjectAreaStart() - end) % Map::kSize == 0))); | |
3636 | |
3637 page->SetRegionMarks(IterateDirtyRegions(marks, | |
3638 start, | |
3639 end, | |
3640 visit_dirty_region, | |
3641 copy_object_func)); | |
3642 } | 3494 } |
3643 | |
3644 // Mark page watermark as invalid to maintain watermark validity invariant. | |
3645 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details. | |
3646 page->InvalidateWatermark(true); | |
3647 } | 3495 } |
3648 } | 3496 } |
3649 | 3497 |
3650 | 3498 |
3651 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { | 3499 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { |
3652 IterateStrongRoots(v, mode); | 3500 IterateStrongRoots(v, mode); |
3653 IterateWeakRoots(v, mode); | 3501 IterateWeakRoots(v, mode); |
3654 } | 3502 } |
3655 | 3503 |
3656 | 3504 |
(...skipping 996 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4653 void ExternalStringTable::TearDown() { | 4501 void ExternalStringTable::TearDown() { |
4654 new_space_strings_.Free(); | 4502 new_space_strings_.Free(); |
4655 old_space_strings_.Free(); | 4503 old_space_strings_.Free(); |
4656 } | 4504 } |
4657 | 4505 |
4658 | 4506 |
4659 List<Object*> ExternalStringTable::new_space_strings_; | 4507 List<Object*> ExternalStringTable::new_space_strings_; |
4660 List<Object*> ExternalStringTable::old_space_strings_; | 4508 List<Object*> ExternalStringTable::old_space_strings_; |
4661 | 4509 |
4662 } } // namespace v8::internal | 4510 } } // namespace v8::internal |
OLD | NEW |