OLD | NEW |
---|---|
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
319 unflattened_strings_length_ = 0; | 319 unflattened_strings_length_ = 0; |
320 #ifdef DEBUG | 320 #ifdef DEBUG |
321 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); | 321 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); |
322 allow_allocation(false); | 322 allow_allocation(false); |
323 | 323 |
324 if (FLAG_verify_heap) { | 324 if (FLAG_verify_heap) { |
325 Verify(); | 325 Verify(); |
326 } | 326 } |
327 | 327 |
328 if (FLAG_gc_verbose) Print(); | 328 if (FLAG_gc_verbose) Print(); |
329 | |
330 if (FLAG_print_rset) { | |
331 // Not all spaces have remembered set bits that we care about. | |
332 old_pointer_space_->PrintRSet(); | |
333 map_space_->PrintRSet(); | |
334 lo_space_->PrintRSet(); | |
335 } | |
336 #endif | 329 #endif |
337 | 330 |
338 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 331 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
339 ReportStatisticsBeforeGC(); | 332 ReportStatisticsBeforeGC(); |
340 #endif | 333 #endif |
341 } | 334 } |
342 | 335 |
343 int Heap::SizeOfObjects() { | 336 int Heap::SizeOfObjects() { |
344 int total = 0; | 337 int total = 0; |
345 AllSpaces spaces; | 338 AllSpaces spaces; |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
512 gc_performed = true; | 505 gc_performed = true; |
513 } | 506 } |
514 if (!(map_space->ReserveSpace(map_space_size))) { | 507 if (!(map_space->ReserveSpace(map_space_size))) { |
515 Heap::CollectGarbage(map_space_size, MAP_SPACE); | 508 Heap::CollectGarbage(map_space_size, MAP_SPACE); |
516 gc_performed = true; | 509 gc_performed = true; |
517 } | 510 } |
518 if (!(cell_space->ReserveSpace(cell_space_size))) { | 511 if (!(cell_space->ReserveSpace(cell_space_size))) { |
519 Heap::CollectGarbage(cell_space_size, CELL_SPACE); | 512 Heap::CollectGarbage(cell_space_size, CELL_SPACE); |
520 gc_performed = true; | 513 gc_performed = true; |
521 } | 514 } |
522 // We add a slack-factor of 2 in order to have space for the remembered | 515 // We add a slack-factor of 2 in order to have space for a series of |
523 // set and a series of large-object allocations that are only just larger | 516 // large-object allocations that are only just larger than the page size. |
524 // than the page size. | |
525 large_object_size *= 2; | 517 large_object_size *= 2; |
526 // The ReserveSpace method on the large object space checks how much | 518 // The ReserveSpace method on the large object space checks how much |
527 // we can expand the old generation. This includes expansion caused by | 519 // we can expand the old generation. This includes expansion caused by |
528 // allocation in the other spaces. | 520 // allocation in the other spaces. |
529 large_object_size += cell_space_size + map_space_size + code_space_size + | 521 large_object_size += cell_space_size + map_space_size + code_space_size + |
530 data_space_size + pointer_space_size; | 522 data_space_size + pointer_space_size; |
531 if (!(lo_space->ReserveSpace(large_object_size))) { | 523 if (!(lo_space->ReserveSpace(large_object_size))) { |
532 Heap::CollectGarbage(large_object_size, LO_SPACE); | 524 Heap::CollectGarbage(large_object_size, LO_SPACE); |
533 gc_performed = true; | 525 gc_performed = true; |
534 } | 526 } |
(...skipping 30 matching lines...) Expand all Loading... | |
565 }; | 557 }; |
566 | 558 |
567 | 559 |
568 void Heap::ClearJSFunctionResultCaches() { | 560 void Heap::ClearJSFunctionResultCaches() { |
569 if (Bootstrapper::IsActive()) return; | 561 if (Bootstrapper::IsActive()) return; |
570 ClearThreadJSFunctionResultCachesVisitor visitor; | 562 ClearThreadJSFunctionResultCachesVisitor visitor; |
571 ThreadManager::IterateThreads(&visitor); | 563 ThreadManager::IterateThreads(&visitor); |
572 } | 564 } |
573 | 565 |
574 | 566 |
567 #ifdef DEBUG | |
568 | |
569 enum PageWatermarkValidity { | |
570 ALL_VALID, | |
571 ALL_INVALID | |
572 }; | |
573 | |
574 static void VerifyPageWatermarkValidity(PagedSpace* space, | |
575 PageWatermarkValidity validity) { | |
576 PageIterator it(space, PageIterator::PAGES_IN_USE); | |
577 bool expected_value = (validity == ALL_VALID); | |
578 while (it.has_next()) { | |
579 Page* page = it.next(); | |
580 ASSERT(page->IsWatermarkValid() == expected_value); | |
581 } | |
582 } | |
583 #endif | |
584 | |
585 | |
575 void Heap::PerformGarbageCollection(AllocationSpace space, | 586 void Heap::PerformGarbageCollection(AllocationSpace space, |
576 GarbageCollector collector, | 587 GarbageCollector collector, |
577 GCTracer* tracer) { | 588 GCTracer* tracer) { |
578 VerifySymbolTable(); | 589 VerifySymbolTable(); |
579 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { | 590 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { |
580 ASSERT(!allocation_allowed_); | 591 ASSERT(!allocation_allowed_); |
581 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); | 592 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); |
582 global_gc_prologue_callback_(); | 593 global_gc_prologue_callback_(); |
583 } | 594 } |
584 | 595 |
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
809 } | 820 } |
810 | 821 |
811 | 822 |
812 void Heap::Scavenge() { | 823 void Heap::Scavenge() { |
813 #ifdef DEBUG | 824 #ifdef DEBUG |
814 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); | 825 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); |
815 #endif | 826 #endif |
816 | 827 |
817 gc_state_ = SCAVENGE; | 828 gc_state_ = SCAVENGE; |
818 | 829 |
830 Page::FlipMeaningOfInvalidatedWatermarkFlag(); | |
831 #ifdef DEBUG | |
832 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID); | |
833 VerifyPageWatermarkValidity(map_space_, ALL_VALID); | |
834 #endif | |
835 | |
836 // We do not update an allocation watermark of the top page during linear | |
837 // allocation to avoid overhead. So to maintain the watermark invariant | |
838 // we have to manually cache the watermark and mark the top page as having an | |
839 // invalid watermark. This guarantees that dirty regions iteration will use a | |
840 // correct watermark even if a linear allocation happens. | |
841 old_pointer_space_->FlushTopPageWatermark(); | |
842 map_space_->FlushTopPageWatermark(); | |
843 | |
819 // Implements Cheney's copying algorithm | 844 // Implements Cheney's copying algorithm |
820 LOG(ResourceEvent("scavenge", "begin")); | 845 LOG(ResourceEvent("scavenge", "begin")); |
821 | 846 |
822 // Clear descriptor cache. | 847 // Clear descriptor cache. |
823 DescriptorLookupCache::Clear(); | 848 DescriptorLookupCache::Clear(); |
824 | 849 |
825 // Used for updating survived_since_last_expansion_ at function end. | 850 // Used for updating survived_since_last_expansion_ at function end. |
826 int survived_watermark = PromotedSpaceSize(); | 851 int survived_watermark = PromotedSpaceSize(); |
827 | 852 |
828 CheckNewSpaceExpansionCriteria(); | 853 CheckNewSpaceExpansionCriteria(); |
(...skipping 22 matching lines...) Expand all Loading... | |
851 // objects are at least one pointer in size. | 876 // objects are at least one pointer in size. |
852 Address new_space_front = new_space_.ToSpaceLow(); | 877 Address new_space_front = new_space_.ToSpaceLow(); |
853 promotion_queue.Initialize(new_space_.ToSpaceHigh()); | 878 promotion_queue.Initialize(new_space_.ToSpaceHigh()); |
854 | 879 |
855 ScavengeVisitor scavenge_visitor; | 880 ScavengeVisitor scavenge_visitor; |
856 // Copy roots. | 881 // Copy roots. |
857 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); | 882 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); |
858 | 883 |
859 // Copy objects reachable from the old generation. By definition, | 884 // Copy objects reachable from the old generation. By definition, |
860 // there are no intergenerational pointers in code or data spaces. | 885 // there are no intergenerational pointers in code or data spaces. |
861 IterateRSet(old_pointer_space_, &ScavengePointer); | 886 IterateDirtyRegions(old_pointer_space_, |
862 IterateRSet(map_space_, &ScavengePointer); | 887 &IteratePointersInDirtyRegion, |
863 lo_space_->IterateRSet(&ScavengePointer); | 888 &ScavengePointer, |
889 WATERMARK_CAN_BE_INVALID); | |
890 | |
891 IterateDirtyRegions(map_space_, | |
892 &IteratePointersInDirtyMapsRegion, | |
893 &ScavengePointer, | |
894 WATERMARK_CAN_BE_INVALID); | |
895 | |
896 lo_space_->IterateDirtyRegions(&ScavengePointer); | |
864 | 897 |
865 // Copy objects reachable from cells by scavenging cell values directly. | 898 // Copy objects reachable from cells by scavenging cell values directly. |
866 HeapObjectIterator cell_iterator(cell_space_); | 899 HeapObjectIterator cell_iterator(cell_space_); |
867 for (HeapObject* cell = cell_iterator.next(); | 900 for (HeapObject* cell = cell_iterator.next(); |
868 cell != NULL; cell = cell_iterator.next()) { | 901 cell != NULL; cell = cell_iterator.next()) { |
869 if (cell->IsJSGlobalPropertyCell()) { | 902 if (cell->IsJSGlobalPropertyCell()) { |
870 Address value_address = | 903 Address value_address = |
871 reinterpret_cast<Address>(cell) + | 904 reinterpret_cast<Address>(cell) + |
872 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); | 905 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
873 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); | 906 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
956 } | 989 } |
957 | 990 |
958 // Promote and process all the to-be-promoted objects. | 991 // Promote and process all the to-be-promoted objects. |
959 while (!promotion_queue.is_empty()) { | 992 while (!promotion_queue.is_empty()) { |
960 HeapObject* source; | 993 HeapObject* source; |
961 Map* map; | 994 Map* map; |
962 promotion_queue.remove(&source, &map); | 995 promotion_queue.remove(&source, &map); |
963 // Copy the from-space object to its new location (given by the | 996 // Copy the from-space object to its new location (given by the |
964 // forwarding address) and fix its map. | 997 // forwarding address) and fix its map. |
965 HeapObject* target = source->map_word().ToForwardingAddress(); | 998 HeapObject* target = source->map_word().ToForwardingAddress(); |
966 CopyBlock(reinterpret_cast<Object**>(target->address()), | 999 int size = source->SizeFromMap(map); |
967 reinterpret_cast<Object**>(source->address()), | 1000 CopyBlock(target->address(), source->address(), size); |
968 source->SizeFromMap(map)); | |
969 target->set_map(map); | 1001 target->set_map(map); |
970 | 1002 |
971 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1003 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
972 // Update NewSpace stats if necessary. | 1004 // Update NewSpace stats if necessary. |
973 RecordCopiedObject(target); | 1005 RecordCopiedObject(target); |
974 #endif | 1006 #endif |
975 // Visit the newly copied object for pointers to new space. | 1007 // Visit the newly copied object for pointers to new space. |
976 target->Iterate(scavenge_visitor); | 1008 ASSERT(!target->IsMap()); |
977 UpdateRSet(target); | 1009 IterateAndMarkPointersToNewSpace(target->address(), |
1010 target->address() + size, | |
1011 &ScavengePointer); | |
978 } | 1012 } |
979 | 1013 |
980 // Take another spin if there are now unswept objects in new space | 1014 // Take another spin if there are now unswept objects in new space |
981 // (there are currently no more unswept promoted objects). | 1015 // (there are currently no more unswept promoted objects). |
982 } while (new_space_front < new_space_.top()); | 1016 } while (new_space_front < new_space_.top()); |
983 | 1017 |
984 return new_space_front; | 1018 return new_space_front; |
985 } | 1019 } |
986 | 1020 |
987 | 1021 |
988 void Heap::ClearRSetRange(Address start, int size_in_bytes) { | |
989 uint32_t start_bit; | |
990 Address start_word_address = | |
991 Page::ComputeRSetBitPosition(start, 0, &start_bit); | |
992 uint32_t end_bit; | |
993 Address end_word_address = | |
994 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize, | |
995 0, | |
996 &end_bit); | |
997 | |
998 // We want to clear the bits in the starting word starting with the | |
999 // first bit, and in the ending word up to and including the last | |
1000 // bit. Build a pair of bitmasks to do that. | |
1001 uint32_t start_bitmask = start_bit - 1; | |
1002 uint32_t end_bitmask = ~((end_bit << 1) - 1); | |
1003 | |
1004 // If the start address and end address are the same, we mask that | |
1005 // word once, otherwise mask the starting and ending word | |
1006 // separately and all the ones in between. | |
1007 if (start_word_address == end_word_address) { | |
1008 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask); | |
1009 } else { | |
1010 Memory::uint32_at(start_word_address) &= start_bitmask; | |
1011 Memory::uint32_at(end_word_address) &= end_bitmask; | |
1012 start_word_address += kIntSize; | |
1013 memset(start_word_address, 0, end_word_address - start_word_address); | |
1014 } | |
1015 } | |
1016 | |
1017 | |
1018 class UpdateRSetVisitor: public ObjectVisitor { | |
1019 public: | |
1020 | |
1021 void VisitPointer(Object** p) { | |
1022 UpdateRSet(p); | |
1023 } | |
1024 | |
1025 void VisitPointers(Object** start, Object** end) { | |
1026 // Update a store into slots [start, end), used (a) to update remembered | |
1027 // set when promoting a young object to old space or (b) to rebuild | |
1028 // remembered sets after a mark-compact collection. | |
1029 for (Object** p = start; p < end; p++) UpdateRSet(p); | |
1030 } | |
1031 private: | |
1032 | |
1033 void UpdateRSet(Object** p) { | |
1034 // The remembered set should not be set. It should be clear for objects | |
1035 // newly copied to old space, and it is cleared before rebuilding in the | |
1036 // mark-compact collector. | |
1037 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0)); | |
1038 if (Heap::InNewSpace(*p)) { | |
1039 Page::SetRSet(reinterpret_cast<Address>(p), 0); | |
1040 } | |
1041 } | |
1042 }; | |
1043 | |
1044 | |
1045 int Heap::UpdateRSet(HeapObject* obj) { | |
1046 ASSERT(!InNewSpace(obj)); | |
1047 // Special handling of fixed arrays to iterate the body based on the start | |
1048 // address and offset. Just iterating the pointers as in UpdateRSetVisitor | |
1049 // will not work because Page::SetRSet needs to have the start of the | |
1050 // object for large object pages. | |
1051 if (obj->IsFixedArray()) { | |
1052 FixedArray* array = FixedArray::cast(obj); | |
1053 int length = array->length(); | |
1054 for (int i = 0; i < length; i++) { | |
1055 int offset = FixedArray::kHeaderSize + i * kPointerSize; | |
1056 ASSERT(!Page::IsRSetSet(obj->address(), offset)); | |
1057 if (Heap::InNewSpace(array->get(i))) { | |
1058 Page::SetRSet(obj->address(), offset); | |
1059 } | |
1060 } | |
1061 } else if (!obj->IsCode()) { | |
1062 // Skip code object, we know it does not contain inter-generational | |
1063 // pointers. | |
1064 UpdateRSetVisitor v; | |
1065 obj->Iterate(&v); | |
1066 } | |
1067 return obj->Size(); | |
1068 } | |
1069 | |
1070 | |
1071 void Heap::RebuildRSets() { | |
1072 // By definition, we do not care about remembered set bits in code, | |
1073 // data, or cell spaces. | |
1074 map_space_->ClearRSet(); | |
1075 RebuildRSets(map_space_); | |
1076 | |
1077 old_pointer_space_->ClearRSet(); | |
1078 RebuildRSets(old_pointer_space_); | |
1079 | |
1080 Heap::lo_space_->ClearRSet(); | |
1081 RebuildRSets(lo_space_); | |
1082 } | |
1083 | |
1084 | |
1085 void Heap::RebuildRSets(PagedSpace* space) { | |
1086 HeapObjectIterator it(space); | |
1087 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) | |
1088 Heap::UpdateRSet(obj); | |
1089 } | |
1090 | |
1091 | |
1092 void Heap::RebuildRSets(LargeObjectSpace* space) { | |
1093 LargeObjectIterator it(space); | |
1094 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) | |
1095 Heap::UpdateRSet(obj); | |
1096 } | |
1097 | |
1098 | |
1099 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1022 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
1100 void Heap::RecordCopiedObject(HeapObject* obj) { | 1023 void Heap::RecordCopiedObject(HeapObject* obj) { |
1101 bool should_record = false; | 1024 bool should_record = false; |
1102 #ifdef DEBUG | 1025 #ifdef DEBUG |
1103 should_record = FLAG_heap_stats; | 1026 should_record = FLAG_heap_stats; |
1104 #endif | 1027 #endif |
1105 #ifdef ENABLE_LOGGING_AND_PROFILING | 1028 #ifdef ENABLE_LOGGING_AND_PROFILING |
1106 should_record = should_record || FLAG_log_gc; | 1029 should_record = should_record || FLAG_log_gc; |
1107 #endif | 1030 #endif |
1108 if (should_record) { | 1031 if (should_record) { |
1109 if (new_space_.Contains(obj)) { | 1032 if (new_space_.Contains(obj)) { |
1110 new_space_.RecordAllocation(obj); | 1033 new_space_.RecordAllocation(obj); |
1111 } else { | 1034 } else { |
1112 new_space_.RecordPromotion(obj); | 1035 new_space_.RecordPromotion(obj); |
1113 } | 1036 } |
1114 } | 1037 } |
1115 } | 1038 } |
1116 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1039 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
1117 | 1040 |
1118 | 1041 |
1119 | 1042 |
1120 HeapObject* Heap::MigrateObject(HeapObject* source, | 1043 HeapObject* Heap::MigrateObject(HeapObject* source, |
1121 HeapObject* target, | 1044 HeapObject* target, |
1122 int size) { | 1045 int size) { |
1123 // Copy the content of source to target. | 1046 // Copy the content of source to target. |
1124 CopyBlock(reinterpret_cast<Object**>(target->address()), | 1047 CopyBlock(target->address(), source->address(), size); |
1125 reinterpret_cast<Object**>(source->address()), | |
1126 size); | |
1127 | 1048 |
1128 // Set the forwarding address. | 1049 // Set the forwarding address. |
1129 source->set_map_word(MapWord::FromForwardingAddress(target)); | 1050 source->set_map_word(MapWord::FromForwardingAddress(target)); |
1130 | 1051 |
1131 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1052 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
1132 // Update NewSpace stats if necessary. | 1053 // Update NewSpace stats if necessary. |
1133 RecordCopiedObject(target); | 1054 RecordCopiedObject(target); |
1134 #endif | 1055 #endif |
1135 | 1056 |
1136 return target; | 1057 return target; |
(...skipping 538 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1675 if (obj->IsFailure()) return false; | 1596 if (obj->IsFailure()) return false; |
1676 roots_[constant_symbol_table[i].index] = String::cast(obj); | 1597 roots_[constant_symbol_table[i].index] = String::cast(obj); |
1677 } | 1598 } |
1678 | 1599 |
1679 // Allocate the hidden symbol which is used to identify the hidden properties | 1600 // Allocate the hidden symbol which is used to identify the hidden properties |
1680 // in JSObjects. The hash code has a special value so that it will not match | 1601 // in JSObjects. The hash code has a special value so that it will not match |
1681 // the empty string when searching for the property. It cannot be part of the | 1602 // the empty string when searching for the property. It cannot be part of the |
1682 // loop above because it needs to be allocated manually with the special | 1603 // loop above because it needs to be allocated manually with the special |
1683 // hash code in place. The hash code for the hidden_symbol is zero to ensure | 1604 // hash code in place. The hash code for the hidden_symbol is zero to ensure |
1684 // that it will always be at the first entry in property descriptors. | 1605 // that it will always be at the first entry in property descriptors. |
1685 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask); | 1606 obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash); |
1686 if (obj->IsFailure()) return false; | 1607 if (obj->IsFailure()) return false; |
1687 hidden_symbol_ = String::cast(obj); | 1608 hidden_symbol_ = String::cast(obj); |
1688 | 1609 |
1689 // Allocate the proxy for __proto__. | 1610 // Allocate the proxy for __proto__. |
1690 obj = AllocateProxy((Address) &Accessors::ObjectPrototype); | 1611 obj = AllocateProxy((Address) &Accessors::ObjectPrototype); |
1691 if (obj->IsFailure()) return false; | 1612 if (obj->IsFailure()) return false; |
1692 set_prototype_accessors(Proxy::cast(obj)); | 1613 set_prototype_accessors(Proxy::cast(obj)); |
1693 | 1614 |
1694 // Allocate the code_stubs dictionary. The initial size is set to avoid | 1615 // Allocate the code_stubs dictionary. The initial size is set to avoid |
1695 // expanding the dictionary during bootstrapping. | 1616 // expanding the dictionary during bootstrapping. |
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1911 share->set_formal_parameter_count(0); | 1832 share->set_formal_parameter_count(0); |
1912 share->set_instance_class_name(Object_symbol()); | 1833 share->set_instance_class_name(Object_symbol()); |
1913 share->set_function_data(undefined_value()); | 1834 share->set_function_data(undefined_value()); |
1914 share->set_script(undefined_value()); | 1835 share->set_script(undefined_value()); |
1915 share->set_start_position_and_type(0); | 1836 share->set_start_position_and_type(0); |
1916 share->set_debug_info(undefined_value()); | 1837 share->set_debug_info(undefined_value()); |
1917 share->set_inferred_name(empty_string()); | 1838 share->set_inferred_name(empty_string()); |
1918 share->set_compiler_hints(0); | 1839 share->set_compiler_hints(0); |
1919 share->set_this_property_assignments_count(0); | 1840 share->set_this_property_assignments_count(0); |
1920 share->set_this_property_assignments(undefined_value()); | 1841 share->set_this_property_assignments(undefined_value()); |
1842 share->set_num_literals(0); | |
1843 share->set_end_position(0); | |
1844 share->set_function_token_position(0); | |
1921 return result; | 1845 return result; |
1922 } | 1846 } |
1923 | 1847 |
1924 | 1848 |
1925 // Returns true for a character in a range. Both limits are inclusive. | 1849 // Returns true for a character in a range. Both limits are inclusive. |
1926 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) { | 1850 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) { |
1927 // This makes uses of the the unsigned wraparound. | 1851 // This makes uses of the the unsigned wraparound. |
1928 return character - from <= to - from; | 1852 return character - from <= to - from; |
1929 } | 1853 } |
1930 | 1854 |
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2172 } | 2096 } |
2173 if (pretenure == NOT_TENURED) { | 2097 if (pretenure == NOT_TENURED) { |
2174 return AllocateByteArray(length); | 2098 return AllocateByteArray(length); |
2175 } | 2099 } |
2176 int size = ByteArray::SizeFor(length); | 2100 int size = ByteArray::SizeFor(length); |
2177 Object* result = (size <= MaxObjectSizeInPagedSpace()) | 2101 Object* result = (size <= MaxObjectSizeInPagedSpace()) |
2178 ? old_data_space_->AllocateRaw(size) | 2102 ? old_data_space_->AllocateRaw(size) |
2179 : lo_space_->AllocateRaw(size); | 2103 : lo_space_->AllocateRaw(size); |
2180 if (result->IsFailure()) return result; | 2104 if (result->IsFailure()) return result; |
2181 | 2105 |
2182 reinterpret_cast<Array*>(result)->set_map(byte_array_map()); | 2106 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); |
2183 reinterpret_cast<Array*>(result)->set_length(length); | 2107 reinterpret_cast<ByteArray*>(result)->set_length(length); |
2184 return result; | 2108 return result; |
2185 } | 2109 } |
2186 | 2110 |
2187 | 2111 |
2188 Object* Heap::AllocateByteArray(int length) { | 2112 Object* Heap::AllocateByteArray(int length) { |
2189 if (length < 0 || length > ByteArray::kMaxLength) { | 2113 if (length < 0 || length > ByteArray::kMaxLength) { |
2190 return Failure::OutOfMemoryException(); | 2114 return Failure::OutOfMemoryException(); |
2191 } | 2115 } |
2192 int size = ByteArray::SizeFor(length); | 2116 int size = ByteArray::SizeFor(length); |
2193 AllocationSpace space = | 2117 AllocationSpace space = |
2194 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE; | 2118 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE; |
2195 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); | 2119 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); |
2196 if (result->IsFailure()) return result; | 2120 if (result->IsFailure()) return result; |
2197 | 2121 |
2198 reinterpret_cast<Array*>(result)->set_map(byte_array_map()); | 2122 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); |
2199 reinterpret_cast<Array*>(result)->set_length(length); | 2123 reinterpret_cast<ByteArray*>(result)->set_length(length); |
2200 return result; | 2124 return result; |
2201 } | 2125 } |
2202 | 2126 |
2203 | 2127 |
2204 void Heap::CreateFillerObjectAt(Address addr, int size) { | 2128 void Heap::CreateFillerObjectAt(Address addr, int size) { |
2205 if (size == 0) return; | 2129 if (size == 0) return; |
2206 HeapObject* filler = HeapObject::FromAddress(addr); | 2130 HeapObject* filler = HeapObject::FromAddress(addr); |
2207 if (size == kPointerSize) { | 2131 if (size == kPointerSize) { |
2208 filler->set_map(one_pointer_filler_map()); | 2132 filler->set_map(one_pointer_filler_map()); |
2209 } else if (size == 2 * kPointerSize) { | 2133 } else if (size == 2 * kPointerSize) { |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2305 result = lo_space_->AllocateRawCode(obj_size); | 2229 result = lo_space_->AllocateRawCode(obj_size); |
2306 } else { | 2230 } else { |
2307 result = code_space_->AllocateRaw(obj_size); | 2231 result = code_space_->AllocateRaw(obj_size); |
2308 } | 2232 } |
2309 | 2233 |
2310 if (result->IsFailure()) return result; | 2234 if (result->IsFailure()) return result; |
2311 | 2235 |
2312 // Copy code object. | 2236 // Copy code object. |
2313 Address old_addr = code->address(); | 2237 Address old_addr = code->address(); |
2314 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); | 2238 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); |
2315 CopyBlock(reinterpret_cast<Object**>(new_addr), | 2239 CopyBlock(new_addr, old_addr, obj_size); |
2316 reinterpret_cast<Object**>(old_addr), | |
2317 obj_size); | |
2318 // Relocate the copy. | 2240 // Relocate the copy. |
2319 Code* new_code = Code::cast(result); | 2241 Code* new_code = Code::cast(result); |
2320 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); | 2242 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); |
2321 new_code->Relocate(new_addr - old_addr); | 2243 new_code->Relocate(new_addr - old_addr); |
2322 return new_code; | 2244 return new_code; |
2323 } | 2245 } |
2324 | 2246 |
2325 | 2247 |
2326 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { | 2248 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { |
2327 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(), | 2249 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(), |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2453 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size()); | 2375 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size()); |
2454 | 2376 |
2455 // Do the allocation. | 2377 // Do the allocation. |
2456 Object* result = | 2378 Object* result = |
2457 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE); | 2379 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE); |
2458 if (result->IsFailure()) return result; | 2380 if (result->IsFailure()) return result; |
2459 | 2381 |
2460 // Copy the content. The arguments boilerplate doesn't have any | 2382 // Copy the content. The arguments boilerplate doesn't have any |
2461 // fields that point to new space so it's safe to skip the write | 2383 // fields that point to new space so it's safe to skip the write |
2462 // barrier here. | 2384 // barrier here. |
2463 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()), | 2385 CopyBlock(HeapObject::cast(result)->address(), |
2464 reinterpret_cast<Object**>(boilerplate->address()), | 2386 boilerplate->address(), |
2465 kArgumentsObjectSize); | 2387 kArgumentsObjectSize); |
2466 | 2388 |
2467 // Set the two properties. | 2389 // Set the two properties. |
2468 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index, | 2390 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index, |
2469 callee); | 2391 callee); |
2470 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index, | 2392 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index, |
2471 Smi::FromInt(length), | 2393 Smi::FromInt(length), |
2472 SKIP_WRITE_BARRIER); | 2394 SKIP_WRITE_BARRIER); |
2473 | 2395 |
2474 // Check the state of the object | 2396 // Check the state of the object |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2676 Map* map = source->map(); | 2598 Map* map = source->map(); |
2677 int object_size = map->instance_size(); | 2599 int object_size = map->instance_size(); |
2678 Object* clone; | 2600 Object* clone; |
2679 | 2601 |
2680 // If we're forced to always allocate, we use the general allocation | 2602 // If we're forced to always allocate, we use the general allocation |
2681 // functions which may leave us with an object in old space. | 2603 // functions which may leave us with an object in old space. |
2682 if (always_allocate()) { | 2604 if (always_allocate()) { |
2683 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); | 2605 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); |
2684 if (clone->IsFailure()) return clone; | 2606 if (clone->IsFailure()) return clone; |
2685 Address clone_address = HeapObject::cast(clone)->address(); | 2607 Address clone_address = HeapObject::cast(clone)->address(); |
2686 CopyBlock(reinterpret_cast<Object**>(clone_address), | 2608 CopyBlock(clone_address, |
2687 reinterpret_cast<Object**>(source->address()), | 2609 source->address(), |
2688 object_size); | 2610 object_size); |
2689 // Update write barrier for all fields that lie beyond the header. | 2611 // Update write barrier for all fields that lie beyond the header. |
2690 RecordWrites(clone_address, | 2612 RecordWrites(clone_address, |
2691 JSObject::kHeaderSize, | 2613 JSObject::kHeaderSize, |
2692 (object_size - JSObject::kHeaderSize) / kPointerSize); | 2614 (object_size - JSObject::kHeaderSize) / kPointerSize); |
2693 } else { | 2615 } else { |
2694 clone = new_space_.AllocateRaw(object_size); | 2616 clone = new_space_.AllocateRaw(object_size); |
2695 if (clone->IsFailure()) return clone; | 2617 if (clone->IsFailure()) return clone; |
2696 ASSERT(Heap::InNewSpace(clone)); | 2618 ASSERT(Heap::InNewSpace(clone)); |
2697 // Since we know the clone is allocated in new space, we can copy | 2619 // Since we know the clone is allocated in new space, we can copy |
2698 // the contents without worrying about updating the write barrier. | 2620 // the contents without worrying about updating the write barrier. |
2699 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()), | 2621 CopyBlock(HeapObject::cast(clone)->address(), |
2700 reinterpret_cast<Object**>(source->address()), | 2622 source->address(), |
2701 object_size); | 2623 object_size); |
2702 } | 2624 } |
2703 | 2625 |
2704 FixedArray* elements = FixedArray::cast(source->elements()); | 2626 FixedArray* elements = FixedArray::cast(source->elements()); |
2705 FixedArray* properties = FixedArray::cast(source->properties()); | 2627 FixedArray* properties = FixedArray::cast(source->properties()); |
2706 // Update elements if necessary. | 2628 // Update elements if necessary. |
2707 if (elements->length() > 0) { | 2629 if (elements->length() > 0) { |
2708 Object* elem = CopyFixedArray(elements); | 2630 Object* elem = CopyFixedArray(elements); |
2709 if (elem->IsFailure()) return elem; | 2631 if (elem->IsFailure()) return elem; |
2710 JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); | 2632 JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); |
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2961 ASSERT_EQ(size, HeapObject::cast(result)->Size()); | 2883 ASSERT_EQ(size, HeapObject::cast(result)->Size()); |
2962 return result; | 2884 return result; |
2963 } | 2885 } |
2964 | 2886 |
2965 | 2887 |
2966 Object* Heap::AllocateEmptyFixedArray() { | 2888 Object* Heap::AllocateEmptyFixedArray() { |
2967 int size = FixedArray::SizeFor(0); | 2889 int size = FixedArray::SizeFor(0); |
2968 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); | 2890 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); |
2969 if (result->IsFailure()) return result; | 2891 if (result->IsFailure()) return result; |
2970 // Initialize the object. | 2892 // Initialize the object. |
2971 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); | 2893 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map()); |
2972 reinterpret_cast<Array*>(result)->set_length(0); | 2894 reinterpret_cast<FixedArray*>(result)->set_length(0); |
2973 return result; | 2895 return result; |
2974 } | 2896 } |
2975 | 2897 |
2976 | 2898 |
2977 Object* Heap::AllocateRawFixedArray(int length) { | 2899 Object* Heap::AllocateRawFixedArray(int length) { |
2978 if (length < 0 || length > FixedArray::kMaxLength) { | 2900 if (length < 0 || length > FixedArray::kMaxLength) { |
2979 return Failure::OutOfMemoryException(); | 2901 return Failure::OutOfMemoryException(); |
2980 } | 2902 } |
2981 // Use the general function if we're forced to always allocate. | 2903 // Use the general function if we're forced to always allocate. |
2982 if (always_allocate()) return AllocateFixedArray(length, TENURED); | 2904 if (always_allocate()) return AllocateFixedArray(length, TENURED); |
2983 // Allocate the raw data for a fixed array. | 2905 // Allocate the raw data for a fixed array. |
2984 int size = FixedArray::SizeFor(length); | 2906 int size = FixedArray::SizeFor(length); |
2985 return size <= kMaxObjectSizeInNewSpace | 2907 return size <= kMaxObjectSizeInNewSpace |
2986 ? new_space_.AllocateRaw(size) | 2908 ? new_space_.AllocateRaw(size) |
2987 : lo_space_->AllocateRawFixedArray(size); | 2909 : lo_space_->AllocateRawFixedArray(size); |
2988 } | 2910 } |
2989 | 2911 |
2990 | 2912 |
2991 Object* Heap::CopyFixedArray(FixedArray* src) { | 2913 Object* Heap::CopyFixedArray(FixedArray* src) { |
2992 int len = src->length(); | 2914 int len = src->length(); |
2993 Object* obj = AllocateRawFixedArray(len); | 2915 Object* obj = AllocateRawFixedArray(len); |
2994 if (obj->IsFailure()) return obj; | 2916 if (obj->IsFailure()) return obj; |
2995 if (Heap::InNewSpace(obj)) { | 2917 if (Heap::InNewSpace(obj)) { |
2996 HeapObject* dst = HeapObject::cast(obj); | 2918 HeapObject* dst = HeapObject::cast(obj); |
2997 CopyBlock(reinterpret_cast<Object**>(dst->address()), | 2919 CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len)); |
2998 reinterpret_cast<Object**>(src->address()), | |
2999 FixedArray::SizeFor(len)); | |
3000 return obj; | 2920 return obj; |
3001 } | 2921 } |
3002 HeapObject::cast(obj)->set_map(src->map()); | 2922 HeapObject::cast(obj)->set_map(src->map()); |
3003 FixedArray* result = FixedArray::cast(obj); | 2923 FixedArray* result = FixedArray::cast(obj); |
3004 result->set_length(len); | 2924 result->set_length(len); |
3005 | 2925 |
3006 // Copy the content | 2926 // Copy the content |
3007 AssertNoAllocation no_gc; | 2927 AssertNoAllocation no_gc; |
3008 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); | 2928 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); |
3009 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); | 2929 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); |
3010 return result; | 2930 return result; |
3011 } | 2931 } |
3012 | 2932 |
3013 | 2933 |
3014 Object* Heap::AllocateFixedArray(int length) { | 2934 Object* Heap::AllocateFixedArray(int length) { |
3015 ASSERT(length >= 0); | 2935 ASSERT(length >= 0); |
3016 if (length == 0) return empty_fixed_array(); | 2936 if (length == 0) return empty_fixed_array(); |
3017 Object* result = AllocateRawFixedArray(length); | 2937 Object* result = AllocateRawFixedArray(length); |
3018 if (!result->IsFailure()) { | 2938 if (!result->IsFailure()) { |
3019 // Initialize header. | 2939 // Initialize header. |
3020 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); | 2940 FixedArray* array = reinterpret_cast<FixedArray*>(result); |
3021 FixedArray* array = FixedArray::cast(result); | 2941 array->set_map(fixed_array_map()); |
3022 array->set_length(length); | 2942 array->set_length(length); |
3023 // Initialize body. | 2943 // Initialize body. |
3024 ASSERT(!Heap::InNewSpace(undefined_value())); | 2944 ASSERT(!Heap::InNewSpace(undefined_value())); |
3025 MemsetPointer(array->data_start(), undefined_value(), length); | 2945 MemsetPointer(array->data_start(), undefined_value(), length); |
3026 } | 2946 } |
3027 return result; | 2947 return result; |
3028 } | 2948 } |
3029 | 2949 |
3030 | 2950 |
3031 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { | 2951 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { |
3032 if (length < 0 || length > FixedArray::kMaxLength) { | 2952 if (length < 0 || length > FixedArray::kMaxLength) { |
3033 return Failure::OutOfMemoryException(); | 2953 return Failure::OutOfMemoryException(); |
3034 } | 2954 } |
3035 | 2955 |
3036 AllocationSpace space = | 2956 AllocationSpace space = |
3037 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; | 2957 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; |
3038 int size = FixedArray::SizeFor(length); | 2958 int size = FixedArray::SizeFor(length); |
3039 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { | 2959 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { |
3040 // Too big for new space. | 2960 // Too big for new space. |
3041 space = LO_SPACE; | 2961 space = LO_SPACE; |
3042 } else if (space == OLD_POINTER_SPACE && | 2962 } else if (space == OLD_POINTER_SPACE && |
3043 size > MaxObjectSizeInPagedSpace()) { | 2963 size > MaxObjectSizeInPagedSpace()) { |
3044 // Too big for old pointer space. | 2964 // Too big for old pointer space. |
3045 space = LO_SPACE; | 2965 space = LO_SPACE; |
3046 } | 2966 } |
3047 | 2967 |
3048 // Specialize allocation for the space. | 2968 AllocationSpace retry_space = |
3049 Object* result = Failure::OutOfMemoryException(); | 2969 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE; |
3050 if (space == NEW_SPACE) { | 2970 |
3051 // We cannot use Heap::AllocateRaw() because it will not properly | 2971 return AllocateRaw(size, space, retry_space); |
3052 // allocate extra remembered set bits if always_allocate() is true and | |
3053 // new space allocation fails. | |
3054 result = new_space_.AllocateRaw(size); | |
3055 if (result->IsFailure() && always_allocate()) { | |
3056 if (size <= MaxObjectSizeInPagedSpace()) { | |
3057 result = old_pointer_space_->AllocateRaw(size); | |
3058 } else { | |
3059 result = lo_space_->AllocateRawFixedArray(size); | |
3060 } | |
3061 } | |
3062 } else if (space == OLD_POINTER_SPACE) { | |
3063 result = old_pointer_space_->AllocateRaw(size); | |
3064 } else { | |
3065 ASSERT(space == LO_SPACE); | |
3066 result = lo_space_->AllocateRawFixedArray(size); | |
3067 } | |
3068 return result; | |
3069 } | 2972 } |
3070 | 2973 |
3071 | 2974 |
3072 static Object* AllocateFixedArrayWithFiller(int length, | 2975 static Object* AllocateFixedArrayWithFiller(int length, |
3073 PretenureFlag pretenure, | 2976 PretenureFlag pretenure, |
3074 Object* filler) { | 2977 Object* filler) { |
3075 ASSERT(length >= 0); | 2978 ASSERT(length >= 0); |
3076 ASSERT(Heap::empty_fixed_array()->IsFixedArray()); | 2979 ASSERT(Heap::empty_fixed_array()->IsFixedArray()); |
3077 if (length == 0) return Heap::empty_fixed_array(); | 2980 if (length == 0) return Heap::empty_fixed_array(); |
3078 | 2981 |
(...skipping 27 matching lines...) Expand all Loading... | |
3106 | 3009 |
3107 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map()); | 3010 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map()); |
3108 FixedArray::cast(obj)->set_length(length); | 3011 FixedArray::cast(obj)->set_length(length); |
3109 return obj; | 3012 return obj; |
3110 } | 3013 } |
3111 | 3014 |
3112 | 3015 |
3113 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { | 3016 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { |
3114 Object* result = Heap::AllocateFixedArray(length, pretenure); | 3017 Object* result = Heap::AllocateFixedArray(length, pretenure); |
3115 if (result->IsFailure()) return result; | 3018 if (result->IsFailure()) return result; |
3116 reinterpret_cast<Array*>(result)->set_map(hash_table_map()); | 3019 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map()); |
3117 ASSERT(result->IsHashTable()); | 3020 ASSERT(result->IsHashTable()); |
3118 return result; | 3021 return result; |
3119 } | 3022 } |
3120 | 3023 |
3121 | 3024 |
3122 Object* Heap::AllocateGlobalContext() { | 3025 Object* Heap::AllocateGlobalContext() { |
3123 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS); | 3026 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS); |
3124 if (result->IsFailure()) return result; | 3027 if (result->IsFailure()) return result; |
3125 Context* context = reinterpret_cast<Context*>(result); | 3028 Context* context = reinterpret_cast<Context*>(result); |
3126 context->set_map(global_context_map()); | 3029 context->set_map(global_context_map()); |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3358 return cell_space_->Contains(addr); | 3261 return cell_space_->Contains(addr); |
3359 case LO_SPACE: | 3262 case LO_SPACE: |
3360 return lo_space_->SlowContains(addr); | 3263 return lo_space_->SlowContains(addr); |
3361 } | 3264 } |
3362 | 3265 |
3363 return false; | 3266 return false; |
3364 } | 3267 } |
3365 | 3268 |
3366 | 3269 |
3367 #ifdef DEBUG | 3270 #ifdef DEBUG |
3271 static void DummyScavengePointer(HeapObject** p) { | |
3272 } | |
3273 | |
3274 | |
3275 static void VerifyPointersUnderWatermark( | |
3276 PagedSpace* space, | |
3277 DirtyRegionCallback visit_dirty_region) { | |
3278 PageIterator it(space, PageIterator::PAGES_IN_USE); | |
3279 | |
3280 while (it.has_next()) { | |
3281 Page* page = it.next(); | |
3282 Address start = page->ObjectAreaStart(); | |
3283 Address end = page->AllocationWatermark(); | |
3284 | |
3285 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks, | |
3286 start, | |
3287 end, | |
3288 visit_dirty_region, | |
3289 &DummyScavengePointer); | |
3290 } | |
3291 } | |
3292 | |
3293 | |
3294 static void VerifyPointersUnderWatermark(LargeObjectSpace* space) { | |
3295 LargeObjectIterator it(space); | |
3296 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { | |
3297 if (object->IsFixedArray()) { | |
3298 Address slot_address = object->address(); | |
3299 Address end = object->address() + object->Size(); | |
3300 | |
3301 while (slot_address < end) { | |
3302 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); | |
3303 // When we are not in GC the Heap::InNewSpace() predicate | |
3304 // checks that pointers which satisfy predicate point into | |
3305 // the active semispace. | |
3306 Heap::InNewSpace(*slot); | |
3307 slot_address += kPointerSize; | |
3308 } | |
3309 } | |
3310 } | |
3311 } | |
3312 | |
3313 | |
3368 void Heap::Verify() { | 3314 void Heap::Verify() { |
3369 ASSERT(HasBeenSetup()); | 3315 ASSERT(HasBeenSetup()); |
3370 | 3316 |
3371 VerifyPointersVisitor visitor; | 3317 VerifyPointersVisitor visitor; |
3372 IterateRoots(&visitor, VISIT_ONLY_STRONG); | 3318 IterateRoots(&visitor, VISIT_ONLY_STRONG); |
3373 | 3319 |
3374 new_space_.Verify(); | 3320 new_space_.Verify(); |
3375 | 3321 |
3376 VerifyPointersAndRSetVisitor rset_visitor; | 3322 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor; |
3377 old_pointer_space_->Verify(&rset_visitor); | 3323 old_pointer_space_->Verify(&dirty_regions_visitor); |
3378 map_space_->Verify(&rset_visitor); | 3324 map_space_->Verify(&dirty_regions_visitor); |
3379 | 3325 |
3380 VerifyPointersVisitor no_rset_visitor; | 3326 VerifyPointersUnderWatermark(old_pointer_space_, |
3381 old_data_space_->Verify(&no_rset_visitor); | 3327 &IteratePointersInDirtyRegion); |
3382 code_space_->Verify(&no_rset_visitor); | 3328 VerifyPointersUnderWatermark(map_space_, |
3383 cell_space_->Verify(&no_rset_visitor); | 3329 &IteratePointersInDirtyMapsRegion); |
3330 VerifyPointersUnderWatermark(lo_space_); | |
3331 | |
Vyacheslav Egorov (Chromium)
2010/05/21 11:12:26
Improved heap verification here. Now all regions a
| |
3332 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID); | |
3333 VerifyPageWatermarkValidity(map_space_, ALL_INVALID); | |
3334 | |
3335 VerifyPointersVisitor no_dirty_regions_visitor; | |
3336 old_data_space_->Verify(&no_dirty_regions_visitor); | |
3337 code_space_->Verify(&no_dirty_regions_visitor); | |
3338 cell_space_->Verify(&no_dirty_regions_visitor); | |
3384 | 3339 |
3385 lo_space_->Verify(); | 3340 lo_space_->Verify(); |
3386 } | 3341 } |
3387 #endif // DEBUG | 3342 #endif // DEBUG |
3388 | 3343 |
3389 | 3344 |
3390 Object* Heap::LookupSymbol(Vector<const char> string) { | 3345 Object* Heap::LookupSymbol(Vector<const char> string) { |
3391 Object* symbol = NULL; | 3346 Object* symbol = NULL; |
3392 Object* new_table = symbol_table()->LookupSymbol(string, &symbol); | 3347 Object* new_table = symbol_table()->LookupSymbol(string, &symbol); |
3393 if (new_table->IsFailure()) return new_table; | 3348 if (new_table->IsFailure()) return new_table; |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3426 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject()); | 3381 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject()); |
3427 for (Address a = new_space_.FromSpaceLow(); | 3382 for (Address a = new_space_.FromSpaceLow(); |
3428 a < new_space_.FromSpaceHigh(); | 3383 a < new_space_.FromSpaceHigh(); |
3429 a += kPointerSize) { | 3384 a += kPointerSize) { |
3430 Memory::Address_at(a) = kFromSpaceZapValue; | 3385 Memory::Address_at(a) = kFromSpaceZapValue; |
3431 } | 3386 } |
3432 } | 3387 } |
3433 #endif // DEBUG | 3388 #endif // DEBUG |
3434 | 3389 |
3435 | 3390 |
3436 int Heap::IterateRSetRange(Address object_start, | 3391 bool Heap::IteratePointersInDirtyRegion(Address start, |
3437 Address object_end, | 3392 Address end, |
3438 Address rset_start, | 3393 ObjectSlotCallback copy_object_func) { |
3439 ObjectSlotCallback copy_object_func) { | 3394 Address slot_address = start; |
3440 Address object_address = object_start; | 3395 bool pointers_to_new_space_found = false; |
3441 Address rset_address = rset_start; | 3396 |
3442 int set_bits_count = 0; | 3397 while (slot_address < end) { |
3443 | 3398 Object** slot = reinterpret_cast<Object**>(slot_address); |
3444 // Loop over all the pointers in [object_start, object_end). | 3399 if (Heap::InNewSpace(*slot)) { |
3445 while (object_address < object_end) { | 3400 ASSERT((*slot)->IsHeapObject()); |
3446 uint32_t rset_word = Memory::uint32_at(rset_address); | 3401 copy_object_func(reinterpret_cast<HeapObject**>(slot)); |
3447 if (rset_word != 0) { | 3402 if (Heap::InNewSpace(*slot)) { |
3448 uint32_t result_rset = rset_word; | 3403 ASSERT((*slot)->IsHeapObject()); |
3449 for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) { | 3404 pointers_to_new_space_found = true; |
3450 // Do not dereference pointers at or past object_end. | 3405 } |
3451 if ((rset_word & bitmask) != 0 && object_address < object_end) { | 3406 } |
3452 Object** object_p = reinterpret_cast<Object**>(object_address); | 3407 slot_address += kPointerSize; |
3453 if (Heap::InNewSpace(*object_p)) { | 3408 } |
3454 copy_object_func(reinterpret_cast<HeapObject**>(object_p)); | 3409 return pointers_to_new_space_found; |
3455 } | 3410 } |
3456 // If this pointer does not need to be remembered anymore, clear | 3411 |
3457 // the remembered set bit. | 3412 |
3458 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask; | 3413 // Compute start address of the first map following given addr. |
3459 set_bits_count++; | 3414 static inline Address MapStartAlign(Address addr) { |
3460 } | 3415 Address page = Page::FromAddress(addr)->ObjectAreaStart(); |
3461 object_address += kPointerSize; | 3416 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); |
3462 } | 3417 } |
3463 // Update the remembered set if it has changed. | 3418 |
3464 if (result_rset != rset_word) { | 3419 |
3465 Memory::uint32_at(rset_address) = result_rset; | 3420 // Compute end address of the first map preceding given addr. |
3466 } | 3421 static inline Address MapEndAlign(Address addr) { |
3467 } else { | 3422 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart(); |
3468 // No bits in the word were set. This is the common case. | 3423 return page + ((addr - page) / Map::kSize * Map::kSize); |
3469 object_address += kPointerSize * kBitsPerInt; | 3424 } |
3470 } | 3425 |
3471 rset_address += kIntSize; | 3426 |
3472 } | 3427 static bool IteratePointersInDirtyMaps(Address start, |
3473 return set_bits_count; | 3428 Address end, |
3474 } | 3429 ObjectSlotCallback copy_object_func) { |
3475 | 3430 ASSERT(MapStartAlign(start) == start); |
3476 | 3431 ASSERT(MapEndAlign(end) == end); |
3477 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { | 3432 |
3478 ASSERT(Page::is_rset_in_use()); | 3433 Address map_address = start; |
3479 ASSERT(space == old_pointer_space_ || space == map_space_); | 3434 bool pointers_to_new_space_found = false; |
3480 | 3435 |
3481 static void* paged_rset_histogram = StatsTable::CreateHistogram( | 3436 while (map_address < end) { |
3482 "V8.RSetPaged", | 3437 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address))); |
3483 0, | 3438 ASSERT(Memory::Object_at(map_address)->IsMap()); |
3484 Page::kObjectAreaSize / kPointerSize, | 3439 |
3485 30); | 3440 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; |
3441 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; | |
3442 | |
3443 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start, | |
3444 pointer_fields_end, | |
3445 copy_object_func)) { | |
3446 pointers_to_new_space_found = true; | |
3447 } | |
3448 | |
3449 map_address += Map::kSize; | |
3450 } | |
3451 | |
3452 return pointers_to_new_space_found; | |
3453 } | |
3454 | |
3455 | |
3456 bool Heap::IteratePointersInDirtyMapsRegion( | |
3457 Address start, | |
3458 Address end, | |
3459 ObjectSlotCallback copy_object_func) { | |
3460 Address map_aligned_start = MapStartAlign(start); | |
3461 Address map_aligned_end = MapEndAlign(end); | |
3462 | |
3463 bool contains_pointers_to_new_space = false; | |
3464 | |
3465 if (map_aligned_start != start) { | |
3466 Address prev_map = map_aligned_start - Map::kSize; | |
3467 ASSERT(Memory::Object_at(prev_map)->IsMap()); | |
3468 | |
3469 Address pointer_fields_start = | |
3470 Max(start, prev_map + Map::kPointerFieldsBeginOffset); | |
3471 | |
3472 Address pointer_fields_end = | |
3473 Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end); | |
3474 | |
3475 contains_pointers_to_new_space = | |
3476 IteratePointersInDirtyRegion(pointer_fields_start, | |
3477 pointer_fields_end, | |
3478 copy_object_func) | |
3479 || contains_pointers_to_new_space; | |
3480 } | |
3481 | |
3482 contains_pointers_to_new_space = | |
3483 IteratePointersInDirtyMaps(map_aligned_start, | |
3484 map_aligned_end, | |
3485 copy_object_func) | |
3486 || contains_pointers_to_new_space; | |
3487 | |
3488 if (map_aligned_end != end) { | |
3489 ASSERT(Memory::Object_at(map_aligned_end)->IsMap()); | |
3490 | |
3491 Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset; | |
3492 | |
3493 Address pointer_fields_end = | |
3494 Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize); | |
3495 | |
3496 contains_pointers_to_new_space = | |
3497 IteratePointersInDirtyRegion(pointer_fields_start, | |
3498 pointer_fields_end, | |
3499 copy_object_func) | |
3500 || contains_pointers_to_new_space; | |
3501 } | |
3502 | |
3503 return contains_pointers_to_new_space; | |
3504 } | |
3505 | |
3506 | |
3507 void Heap::IterateAndMarkPointersToNewSpace(Address start, | |
3508 Address end, | |
3509 ObjectSlotCallback callback) { | |
3510 Address slot_address = start; | |
3511 Page* page = Page::FromAddress(start); | |
3512 | |
3513 uint32_t marks = page->GetRegionMarks(); | |
3514 | |
3515 while (slot_address < end) { | |
3516 Object** slot = reinterpret_cast<Object**>(slot_address); | |
3517 if (Heap::InNewSpace(*slot)) { | |
3518 ASSERT((*slot)->IsHeapObject()); | |
3519 callback(reinterpret_cast<HeapObject**>(slot)); | |
3520 if (Heap::InNewSpace(*slot)) { | |
3521 ASSERT((*slot)->IsHeapObject()); | |
3522 marks |= page->GetRegionMaskForAddress(slot_address); | |
3523 } | |
3524 } | |
3525 slot_address += kPointerSize; | |
3526 } | |
3527 | |
3528 page->SetRegionMarks(marks); | |
3529 } | |
3530 | |
3531 | |
3532 uint32_t Heap::IterateDirtyRegions( | |
3533 uint32_t marks, | |
3534 Address area_start, | |
3535 Address area_end, | |
3536 DirtyRegionCallback visit_dirty_region, | |
3537 ObjectSlotCallback copy_object_func) { | |
3538 uint32_t newmarks = 0; | |
3539 uint32_t mask = 1; | |
3540 | |
3541 if (area_start >= area_end) { | |
3542 return newmarks; | |
3543 } | |
3544 | |
3545 Address region_start = area_start; | |
3546 | |
3547 // area_start does not necessarily coincide with start of the first region. | |
3548 // Thus to calculate the beginning of the next region we have to align | |
3549 // area_start by Page::kRegionSize. | |
3550 Address second_region = | |
3551 reinterpret_cast<Address>( | |
3552 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) & | |
3553 ~Page::kRegionAlignmentMask); | |
3554 | |
3555 // Next region might be beyond area_end. | |
3556 Address region_end = Min(second_region, area_end); | |
3557 | |
3558 if (marks & mask) { | |
3559 if (visit_dirty_region(region_start, region_end, copy_object_func)) { | |
3560 newmarks |= mask; | |
3561 } | |
3562 } | |
3563 mask <<= 1; | |
3564 | |
3565 // Iterate subsequent regions which fully lay inside [area_start, area_end[. | |
3566 region_start = region_end; | |
3567 region_end = region_start + Page::kRegionSize; | |
3568 | |
3569 while (region_end <= area_end) { | |
3570 if (marks & mask) { | |
3571 if (visit_dirty_region(region_start, region_end, copy_object_func)) { | |
3572 newmarks |= mask; | |
3573 } | |
3574 } | |
3575 | |
3576 region_start = region_end; | |
3577 region_end = region_start + Page::kRegionSize; | |
3578 | |
3579 mask <<= 1; | |
3580 } | |
3581 | |
3582 if (region_start != area_end) { | |
3583 // A small piece of area left uniterated because area_end does not coincide | |
3584 // with region end. Check whether region covering last part of area is | |
3585 // dirty. | |
3586 if (marks & mask) { | |
3587 if (visit_dirty_region(region_start, area_end, copy_object_func)) { | |
3588 newmarks |= mask; | |
3589 } | |
3590 } | |
3591 } | |
3592 | |
3593 return newmarks; | |
3594 } | |
3595 | |
3596 | |
3597 | |
3598 void Heap::IterateDirtyRegions( | |
3599 PagedSpace* space, | |
3600 DirtyRegionCallback visit_dirty_region, | |
3601 ObjectSlotCallback copy_object_func, | |
3602 ExpectedPageWatermarkState expected_page_watermark_state) { | |
3486 | 3603 |
3487 PageIterator it(space, PageIterator::PAGES_IN_USE); | 3604 PageIterator it(space, PageIterator::PAGES_IN_USE); |
3605 | |
3488 while (it.has_next()) { | 3606 while (it.has_next()) { |
3489 Page* page = it.next(); | 3607 Page* page = it.next(); |
3490 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(), | 3608 uint32_t marks = page->GetRegionMarks(); |
3491 page->RSetStart(), copy_object_func); | 3609 |
3492 if (paged_rset_histogram != NULL) { | 3610 if (marks != Page::kAllRegionsCleanMarks) { |
3493 StatsTable::AddHistogramSample(paged_rset_histogram, count); | 3611 Address start = page->ObjectAreaStart(); |
3494 } | 3612 |
3495 } | 3613 // Do not try to visit pointers beyond page allocation watermark. |
3496 } | 3614 // Page can contain garbage pointers there. |
3497 | 3615 Address end; |
3616 | |
3617 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) || | |
3618 page->IsWatermarkValid()) { | |
3619 end = page->AllocationWatermark(); | |
3620 } else { | |
3621 end = page->CachedAllocationWatermark(); | |
3622 } | |
3623 | |
3624 ASSERT(space == old_pointer_space_ || | |
3625 (space == map_space_ && | |
3626 ((page->ObjectAreaStart() - end) % Map::kSize == 0))); | |
3627 | |
3628 page->SetRegionMarks(IterateDirtyRegions(marks, | |
3629 start, | |
3630 end, | |
3631 visit_dirty_region, | |
3632 copy_object_func)); | |
3633 } | |
3634 | |
3635 // Mark page watermark as invalid to maintain watermark validity invariant. | |
3636 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details. | |
3637 page->InvalidateWatermark(true); | |
3638 } | |
3639 } | |
3640 | |
3498 | 3641 |
3499 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { | 3642 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { |
3500 IterateStrongRoots(v, mode); | 3643 IterateStrongRoots(v, mode); |
3501 IterateWeakRoots(v, mode); | 3644 IterateWeakRoots(v, mode); |
3502 } | 3645 } |
3503 | 3646 |
3504 | 3647 |
3505 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { | 3648 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { |
3506 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); | 3649 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); |
3507 v->Synchronize("symbol_table"); | 3650 v->Synchronize("symbol_table"); |
(...skipping 993 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4501 void ExternalStringTable::TearDown() { | 4644 void ExternalStringTable::TearDown() { |
4502 new_space_strings_.Free(); | 4645 new_space_strings_.Free(); |
4503 old_space_strings_.Free(); | 4646 old_space_strings_.Free(); |
4504 } | 4647 } |
4505 | 4648 |
4506 | 4649 |
4507 List<Object*> ExternalStringTable::new_space_strings_; | 4650 List<Object*> ExternalStringTable::new_space_strings_; |
4508 List<Object*> ExternalStringTable::old_space_strings_; | 4651 List<Object*> ExternalStringTable::old_space_strings_; |
4509 | 4652 |
4510 } } // namespace v8::internal | 4653 } } // namespace v8::internal |
OLD | NEW |