Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/heap.cc

Issue 2144006: Cardmarking writebarrier. (Closed)
Patch Set: change NewSpace and SemiSpace Contains to match HasHeapObjectTag Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | src/spaces.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after
319 unflattened_strings_length_ = 0; 319 unflattened_strings_length_ = 0;
320 #ifdef DEBUG 320 #ifdef DEBUG
321 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); 321 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
322 allow_allocation(false); 322 allow_allocation(false);
323 323
324 if (FLAG_verify_heap) { 324 if (FLAG_verify_heap) {
325 Verify(); 325 Verify();
326 } 326 }
327 327
328 if (FLAG_gc_verbose) Print(); 328 if (FLAG_gc_verbose) Print();
329
330 if (FLAG_print_rset) {
331 // Not all spaces have remembered set bits that we care about.
332 old_pointer_space_->PrintRSet();
333 map_space_->PrintRSet();
334 lo_space_->PrintRSet();
335 }
336 #endif 329 #endif
337 330
338 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 331 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
339 ReportStatisticsBeforeGC(); 332 ReportStatisticsBeforeGC();
340 #endif 333 #endif
341 } 334 }
342 335
343 int Heap::SizeOfObjects() { 336 int Heap::SizeOfObjects() {
344 int total = 0; 337 int total = 0;
345 AllSpaces spaces; 338 AllSpaces spaces;
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
512 gc_performed = true; 505 gc_performed = true;
513 } 506 }
514 if (!(map_space->ReserveSpace(map_space_size))) { 507 if (!(map_space->ReserveSpace(map_space_size))) {
515 Heap::CollectGarbage(map_space_size, MAP_SPACE); 508 Heap::CollectGarbage(map_space_size, MAP_SPACE);
516 gc_performed = true; 509 gc_performed = true;
517 } 510 }
518 if (!(cell_space->ReserveSpace(cell_space_size))) { 511 if (!(cell_space->ReserveSpace(cell_space_size))) {
519 Heap::CollectGarbage(cell_space_size, CELL_SPACE); 512 Heap::CollectGarbage(cell_space_size, CELL_SPACE);
520 gc_performed = true; 513 gc_performed = true;
521 } 514 }
522 // We add a slack-factor of 2 in order to have space for the remembered 515 // We add a slack-factor of 2 in order to have space for a series of
523 // set and a series of large-object allocations that are only just larger 516 // large-object allocations that are only just larger than the page size.
524 // than the page size.
525 large_object_size *= 2; 517 large_object_size *= 2;
526 // The ReserveSpace method on the large object space checks how much 518 // The ReserveSpace method on the large object space checks how much
527 // we can expand the old generation. This includes expansion caused by 519 // we can expand the old generation. This includes expansion caused by
528 // allocation in the other spaces. 520 // allocation in the other spaces.
529 large_object_size += cell_space_size + map_space_size + code_space_size + 521 large_object_size += cell_space_size + map_space_size + code_space_size +
530 data_space_size + pointer_space_size; 522 data_space_size + pointer_space_size;
531 if (!(lo_space->ReserveSpace(large_object_size))) { 523 if (!(lo_space->ReserveSpace(large_object_size))) {
532 Heap::CollectGarbage(large_object_size, LO_SPACE); 524 Heap::CollectGarbage(large_object_size, LO_SPACE);
533 gc_performed = true; 525 gc_performed = true;
534 } 526 }
(...skipping 30 matching lines...) Expand all
565 }; 557 };
566 558
567 559
568 void Heap::ClearJSFunctionResultCaches() { 560 void Heap::ClearJSFunctionResultCaches() {
569 if (Bootstrapper::IsActive()) return; 561 if (Bootstrapper::IsActive()) return;
570 ClearThreadJSFunctionResultCachesVisitor visitor; 562 ClearThreadJSFunctionResultCachesVisitor visitor;
571 ThreadManager::IterateThreads(&visitor); 563 ThreadManager::IterateThreads(&visitor);
572 } 564 }
573 565
574 566
567 #ifdef DEBUG
568
569 enum PageWatermarkValidity {
570 ALL_VALID,
571 ALL_INVALID
572 };
573
574 static void VerifyPageWatermarkValidity(PagedSpace* space,
575 PageWatermarkValidity validity) {
576 PageIterator it(space, PageIterator::PAGES_IN_USE);
577 bool expected_value = (validity == ALL_VALID);
578 while (it.has_next()) {
579 Page* page = it.next();
580 ASSERT(page->IsWatermarkValid() == expected_value);
581 }
582 }
583 #endif
584
585
575 void Heap::PerformGarbageCollection(AllocationSpace space, 586 void Heap::PerformGarbageCollection(AllocationSpace space,
576 GarbageCollector collector, 587 GarbageCollector collector,
577 GCTracer* tracer) { 588 GCTracer* tracer) {
578 VerifySymbolTable(); 589 VerifySymbolTable();
579 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { 590 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
580 ASSERT(!allocation_allowed_); 591 ASSERT(!allocation_allowed_);
581 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 592 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
582 global_gc_prologue_callback_(); 593 global_gc_prologue_callback_();
583 } 594 }
584 595
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
809 } 820 }
810 821
811 822
812 void Heap::Scavenge() { 823 void Heap::Scavenge() {
813 #ifdef DEBUG 824 #ifdef DEBUG
814 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); 825 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
815 #endif 826 #endif
816 827
817 gc_state_ = SCAVENGE; 828 gc_state_ = SCAVENGE;
818 829
830 Page::FlipMeaningOfInvalidatedWatermarkFlag();
831 #ifdef DEBUG
832 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
833 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
834 #endif
835
836 // We do not update an allocation watermark of the top page during linear
837 // allocation to avoid overhead. So to maintain the watermark invariant
838 // we have to manually cache the watermark and mark the top page as having an
839 // invalid watermark. This guarantees that dirty regions iteration will use a
840 // correct watermark even if a linear allocation happens.
841 old_pointer_space_->FlushTopPageWatermark();
842 map_space_->FlushTopPageWatermark();
843
819 // Implements Cheney's copying algorithm 844 // Implements Cheney's copying algorithm
820 LOG(ResourceEvent("scavenge", "begin")); 845 LOG(ResourceEvent("scavenge", "begin"));
821 846
822 // Clear descriptor cache. 847 // Clear descriptor cache.
823 DescriptorLookupCache::Clear(); 848 DescriptorLookupCache::Clear();
824 849
825 // Used for updating survived_since_last_expansion_ at function end. 850 // Used for updating survived_since_last_expansion_ at function end.
826 int survived_watermark = PromotedSpaceSize(); 851 int survived_watermark = PromotedSpaceSize();
827 852
828 CheckNewSpaceExpansionCriteria(); 853 CheckNewSpaceExpansionCriteria();
(...skipping 22 matching lines...) Expand all
851 // objects are at least one pointer in size. 876 // objects are at least one pointer in size.
852 Address new_space_front = new_space_.ToSpaceLow(); 877 Address new_space_front = new_space_.ToSpaceLow();
853 promotion_queue.Initialize(new_space_.ToSpaceHigh()); 878 promotion_queue.Initialize(new_space_.ToSpaceHigh());
854 879
855 ScavengeVisitor scavenge_visitor; 880 ScavengeVisitor scavenge_visitor;
856 // Copy roots. 881 // Copy roots.
857 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 882 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
858 883
859 // Copy objects reachable from the old generation. By definition, 884 // Copy objects reachable from the old generation. By definition,
860 // there are no intergenerational pointers in code or data spaces. 885 // there are no intergenerational pointers in code or data spaces.
861 IterateRSet(old_pointer_space_, &ScavengePointer); 886 IterateDirtyRegions(old_pointer_space_,
862 IterateRSet(map_space_, &ScavengePointer); 887 &IteratePointersInDirtyRegion,
863 lo_space_->IterateRSet(&ScavengePointer); 888 &ScavengePointer,
889 WATERMARK_CAN_BE_INVALID);
890
891 IterateDirtyRegions(map_space_,
892 &IteratePointersInDirtyMapsRegion,
893 &ScavengePointer,
894 WATERMARK_CAN_BE_INVALID);
895
896 lo_space_->IterateDirtyRegions(&ScavengePointer);
864 897
865 // Copy objects reachable from cells by scavenging cell values directly. 898 // Copy objects reachable from cells by scavenging cell values directly.
866 HeapObjectIterator cell_iterator(cell_space_); 899 HeapObjectIterator cell_iterator(cell_space_);
867 for (HeapObject* cell = cell_iterator.next(); 900 for (HeapObject* cell = cell_iterator.next();
868 cell != NULL; cell = cell_iterator.next()) { 901 cell != NULL; cell = cell_iterator.next()) {
869 if (cell->IsJSGlobalPropertyCell()) { 902 if (cell->IsJSGlobalPropertyCell()) {
870 Address value_address = 903 Address value_address =
871 reinterpret_cast<Address>(cell) + 904 reinterpret_cast<Address>(cell) +
872 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 905 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
873 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 906 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
956 } 989 }
957 990
958 // Promote and process all the to-be-promoted objects. 991 // Promote and process all the to-be-promoted objects.
959 while (!promotion_queue.is_empty()) { 992 while (!promotion_queue.is_empty()) {
960 HeapObject* source; 993 HeapObject* source;
961 Map* map; 994 Map* map;
962 promotion_queue.remove(&source, &map); 995 promotion_queue.remove(&source, &map);
963 // Copy the from-space object to its new location (given by the 996 // Copy the from-space object to its new location (given by the
964 // forwarding address) and fix its map. 997 // forwarding address) and fix its map.
965 HeapObject* target = source->map_word().ToForwardingAddress(); 998 HeapObject* target = source->map_word().ToForwardingAddress();
966 CopyBlock(reinterpret_cast<Object**>(target->address()), 999 int size = source->SizeFromMap(map);
967 reinterpret_cast<Object**>(source->address()), 1000 CopyBlock(target->address(), source->address(), size);
968 source->SizeFromMap(map));
969 target->set_map(map); 1001 target->set_map(map);
970 1002
971 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1003 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
972 // Update NewSpace stats if necessary. 1004 // Update NewSpace stats if necessary.
973 RecordCopiedObject(target); 1005 RecordCopiedObject(target);
974 #endif 1006 #endif
975 // Visit the newly copied object for pointers to new space. 1007 // Visit the newly copied object for pointers to new space.
976 target->Iterate(scavenge_visitor); 1008 ASSERT(!target->IsMap());
977 UpdateRSet(target); 1009 IterateAndMarkPointersToNewSpace(target->address(),
1010 target->address() + size,
1011 &ScavengePointer);
978 } 1012 }
979 1013
980 // Take another spin if there are now unswept objects in new space 1014 // Take another spin if there are now unswept objects in new space
981 // (there are currently no more unswept promoted objects). 1015 // (there are currently no more unswept promoted objects).
982 } while (new_space_front < new_space_.top()); 1016 } while (new_space_front < new_space_.top());
983 1017
984 return new_space_front; 1018 return new_space_front;
985 } 1019 }
986 1020
987 1021
988 void Heap::ClearRSetRange(Address start, int size_in_bytes) {
989 uint32_t start_bit;
990 Address start_word_address =
991 Page::ComputeRSetBitPosition(start, 0, &start_bit);
992 uint32_t end_bit;
993 Address end_word_address =
994 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
995 0,
996 &end_bit);
997
998 // We want to clear the bits in the starting word starting with the
999 // first bit, and in the ending word up to and including the last
1000 // bit. Build a pair of bitmasks to do that.
1001 uint32_t start_bitmask = start_bit - 1;
1002 uint32_t end_bitmask = ~((end_bit << 1) - 1);
1003
1004 // If the start address and end address are the same, we mask that
1005 // word once, otherwise mask the starting and ending word
1006 // separately and all the ones in between.
1007 if (start_word_address == end_word_address) {
1008 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
1009 } else {
1010 Memory::uint32_at(start_word_address) &= start_bitmask;
1011 Memory::uint32_at(end_word_address) &= end_bitmask;
1012 start_word_address += kIntSize;
1013 memset(start_word_address, 0, end_word_address - start_word_address);
1014 }
1015 }
1016
1017
1018 class UpdateRSetVisitor: public ObjectVisitor {
1019 public:
1020
1021 void VisitPointer(Object** p) {
1022 UpdateRSet(p);
1023 }
1024
1025 void VisitPointers(Object** start, Object** end) {
1026 // Update a store into slots [start, end), used (a) to update remembered
1027 // set when promoting a young object to old space or (b) to rebuild
1028 // remembered sets after a mark-compact collection.
1029 for (Object** p = start; p < end; p++) UpdateRSet(p);
1030 }
1031 private:
1032
1033 void UpdateRSet(Object** p) {
1034 // The remembered set should not be set. It should be clear for objects
1035 // newly copied to old space, and it is cleared before rebuilding in the
1036 // mark-compact collector.
1037 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
1038 if (Heap::InNewSpace(*p)) {
1039 Page::SetRSet(reinterpret_cast<Address>(p), 0);
1040 }
1041 }
1042 };
1043
1044
1045 int Heap::UpdateRSet(HeapObject* obj) {
1046 ASSERT(!InNewSpace(obj));
1047 // Special handling of fixed arrays to iterate the body based on the start
1048 // address and offset. Just iterating the pointers as in UpdateRSetVisitor
1049 // will not work because Page::SetRSet needs to have the start of the
1050 // object for large object pages.
1051 if (obj->IsFixedArray()) {
1052 FixedArray* array = FixedArray::cast(obj);
1053 int length = array->length();
1054 for (int i = 0; i < length; i++) {
1055 int offset = FixedArray::kHeaderSize + i * kPointerSize;
1056 ASSERT(!Page::IsRSetSet(obj->address(), offset));
1057 if (Heap::InNewSpace(array->get(i))) {
1058 Page::SetRSet(obj->address(), offset);
1059 }
1060 }
1061 } else if (!obj->IsCode()) {
1062 // Skip code object, we know it does not contain inter-generational
1063 // pointers.
1064 UpdateRSetVisitor v;
1065 obj->Iterate(&v);
1066 }
1067 return obj->Size();
1068 }
1069
1070
1071 void Heap::RebuildRSets() {
1072 // By definition, we do not care about remembered set bits in code,
1073 // data, or cell spaces.
1074 map_space_->ClearRSet();
1075 RebuildRSets(map_space_);
1076
1077 old_pointer_space_->ClearRSet();
1078 RebuildRSets(old_pointer_space_);
1079
1080 Heap::lo_space_->ClearRSet();
1081 RebuildRSets(lo_space_);
1082 }
1083
1084
1085 void Heap::RebuildRSets(PagedSpace* space) {
1086 HeapObjectIterator it(space);
1087 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1088 Heap::UpdateRSet(obj);
1089 }
1090
1091
1092 void Heap::RebuildRSets(LargeObjectSpace* space) {
1093 LargeObjectIterator it(space);
1094 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1095 Heap::UpdateRSet(obj);
1096 }
1097
1098
1099 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1022 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1100 void Heap::RecordCopiedObject(HeapObject* obj) { 1023 void Heap::RecordCopiedObject(HeapObject* obj) {
1101 bool should_record = false; 1024 bool should_record = false;
1102 #ifdef DEBUG 1025 #ifdef DEBUG
1103 should_record = FLAG_heap_stats; 1026 should_record = FLAG_heap_stats;
1104 #endif 1027 #endif
1105 #ifdef ENABLE_LOGGING_AND_PROFILING 1028 #ifdef ENABLE_LOGGING_AND_PROFILING
1106 should_record = should_record || FLAG_log_gc; 1029 should_record = should_record || FLAG_log_gc;
1107 #endif 1030 #endif
1108 if (should_record) { 1031 if (should_record) {
1109 if (new_space_.Contains(obj)) { 1032 if (new_space_.Contains(obj)) {
1110 new_space_.RecordAllocation(obj); 1033 new_space_.RecordAllocation(obj);
1111 } else { 1034 } else {
1112 new_space_.RecordPromotion(obj); 1035 new_space_.RecordPromotion(obj);
1113 } 1036 }
1114 } 1037 }
1115 } 1038 }
1116 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1039 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1117 1040
1118 1041
1119 1042
1120 HeapObject* Heap::MigrateObject(HeapObject* source, 1043 HeapObject* Heap::MigrateObject(HeapObject* source,
1121 HeapObject* target, 1044 HeapObject* target,
1122 int size) { 1045 int size) {
1123 // Copy the content of source to target. 1046 // Copy the content of source to target.
1124 CopyBlock(reinterpret_cast<Object**>(target->address()), 1047 CopyBlock(target->address(), source->address(), size);
1125 reinterpret_cast<Object**>(source->address()),
1126 size);
1127 1048
1128 // Set the forwarding address. 1049 // Set the forwarding address.
1129 source->set_map_word(MapWord::FromForwardingAddress(target)); 1050 source->set_map_word(MapWord::FromForwardingAddress(target));
1130 1051
1131 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1052 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1132 // Update NewSpace stats if necessary. 1053 // Update NewSpace stats if necessary.
1133 RecordCopiedObject(target); 1054 RecordCopiedObject(target);
1134 #endif 1055 #endif
1135 1056
1136 return target; 1057 return target;
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1171 // so we can store the from-space address and map pointer of promoted 1092 // so we can store the from-space address and map pointer of promoted
1172 // objects in the to space. 1093 // objects in the to space.
1173 ASSERT(object_size >= 2 * kPointerSize); 1094 ASSERT(object_size >= 2 * kPointerSize);
1174 1095
1175 // If the object should be promoted, we try to copy it to old space. 1096 // If the object should be promoted, we try to copy it to old space.
1176 if (ShouldBePromoted(object->address(), object_size)) { 1097 if (ShouldBePromoted(object->address(), object_size)) {
1177 Object* result; 1098 Object* result;
1178 if (object_size > MaxObjectSizeInPagedSpace()) { 1099 if (object_size > MaxObjectSizeInPagedSpace()) {
1179 result = lo_space_->AllocateRawFixedArray(object_size); 1100 result = lo_space_->AllocateRawFixedArray(object_size);
1180 if (!result->IsFailure()) { 1101 if (!result->IsFailure()) {
1181 // Save the from-space object pointer and its map pointer at the
1182 // top of the to space to be swept and copied later. Write the
1183 // forwarding address over the map word of the from-space
1184 // object.
1185 HeapObject* target = HeapObject::cast(result); 1102 HeapObject* target = HeapObject::cast(result);
1186 promotion_queue.insert(object, first_word.ToMap());
1187 object->set_map_word(MapWord::FromForwardingAddress(target));
1188 1103
1189 // Give the space allocated for the result a proper map by 1104 if (object->IsFixedArray()) {
1190 // treating it as a free list node (not linked into the free 1105 // Save the from-space object pointer and its map pointer at the
1191 // list). 1106 // top of the to space to be swept and copied later. Write the
1192 FreeListNode* node = FreeListNode::FromAddress(target->address()); 1107 // forwarding address over the map word of the from-space
1193 node->set_size(object_size); 1108 // object.
1109 promotion_queue.insert(object, first_word.ToMap());
1110 object->set_map_word(MapWord::FromForwardingAddress(target));
1194 1111
1195 *p = target; 1112 // Give the space allocated for the result a proper map by
1113 // treating it as a free list node (not linked into the free
1114 // list).
1115 FreeListNode* node = FreeListNode::FromAddress(target->address());
1116 node->set_size(object_size);
1117
1118 *p = target;
1119 } else {
1120 // In large object space only fixed arrays might possibly contain
1121 // intergenerational references.
1122 // All other objects can be copied immediately and not revisited.
1123 *p = MigrateObject(object, target, object_size);
1124 }
1125
1196 tracer()->increment_promoted_objects_size(object_size); 1126 tracer()->increment_promoted_objects_size(object_size);
1197 return; 1127 return;
1198 } 1128 }
1199 } else { 1129 } else {
1200 OldSpace* target_space = Heap::TargetSpace(object); 1130 OldSpace* target_space = Heap::TargetSpace(object);
1201 ASSERT(target_space == Heap::old_pointer_space_ || 1131 ASSERT(target_space == Heap::old_pointer_space_ ||
1202 target_space == Heap::old_data_space_); 1132 target_space == Heap::old_data_space_);
1203 result = target_space->AllocateRaw(object_size); 1133 result = target_space->AllocateRaw(object_size);
1204 if (!result->IsFailure()) { 1134 if (!result->IsFailure()) {
1205 HeapObject* target = HeapObject::cast(result); 1135 HeapObject* target = HeapObject::cast(result);
(...skipping 469 matching lines...) Expand 10 before | Expand all | Expand 10 after
1675 if (obj->IsFailure()) return false; 1605 if (obj->IsFailure()) return false;
1676 roots_[constant_symbol_table[i].index] = String::cast(obj); 1606 roots_[constant_symbol_table[i].index] = String::cast(obj);
1677 } 1607 }
1678 1608
1679 // Allocate the hidden symbol which is used to identify the hidden properties 1609 // Allocate the hidden symbol which is used to identify the hidden properties
1680 // in JSObjects. The hash code has a special value so that it will not match 1610 // in JSObjects. The hash code has a special value so that it will not match
1681 // the empty string when searching for the property. It cannot be part of the 1611 // the empty string when searching for the property. It cannot be part of the
1682 // loop above because it needs to be allocated manually with the special 1612 // loop above because it needs to be allocated manually with the special
1683 // hash code in place. The hash code for the hidden_symbol is zero to ensure 1613 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1684 // that it will always be at the first entry in property descriptors. 1614 // that it will always be at the first entry in property descriptors.
1685 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask); 1615 obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
1686 if (obj->IsFailure()) return false; 1616 if (obj->IsFailure()) return false;
1687 hidden_symbol_ = String::cast(obj); 1617 hidden_symbol_ = String::cast(obj);
1688 1618
1689 // Allocate the proxy for __proto__. 1619 // Allocate the proxy for __proto__.
1690 obj = AllocateProxy((Address) &Accessors::ObjectPrototype); 1620 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1691 if (obj->IsFailure()) return false; 1621 if (obj->IsFailure()) return false;
1692 set_prototype_accessors(Proxy::cast(obj)); 1622 set_prototype_accessors(Proxy::cast(obj));
1693 1623
1694 // Allocate the code_stubs dictionary. The initial size is set to avoid 1624 // Allocate the code_stubs dictionary. The initial size is set to avoid
1695 // expanding the dictionary during bootstrapping. 1625 // expanding the dictionary during bootstrapping.
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
1911 share->set_formal_parameter_count(0); 1841 share->set_formal_parameter_count(0);
1912 share->set_instance_class_name(Object_symbol()); 1842 share->set_instance_class_name(Object_symbol());
1913 share->set_function_data(undefined_value()); 1843 share->set_function_data(undefined_value());
1914 share->set_script(undefined_value()); 1844 share->set_script(undefined_value());
1915 share->set_start_position_and_type(0); 1845 share->set_start_position_and_type(0);
1916 share->set_debug_info(undefined_value()); 1846 share->set_debug_info(undefined_value());
1917 share->set_inferred_name(empty_string()); 1847 share->set_inferred_name(empty_string());
1918 share->set_compiler_hints(0); 1848 share->set_compiler_hints(0);
1919 share->set_this_property_assignments_count(0); 1849 share->set_this_property_assignments_count(0);
1920 share->set_this_property_assignments(undefined_value()); 1850 share->set_this_property_assignments(undefined_value());
1851 share->set_num_literals(0);
1852 share->set_end_position(0);
1853 share->set_function_token_position(0);
1921 return result; 1854 return result;
1922 } 1855 }
1923 1856
1924 1857
1925 // Returns true for a character in a range. Both limits are inclusive. 1858 // Returns true for a character in a range. Both limits are inclusive.
1926 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) { 1859 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
1927 // This makes uses of the the unsigned wraparound. 1860 // This makes uses of the the unsigned wraparound.
1928 return character - from <= to - from; 1861 return character - from <= to - from;
1929 } 1862 }
1930 1863
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after
2172 } 2105 }
2173 if (pretenure == NOT_TENURED) { 2106 if (pretenure == NOT_TENURED) {
2174 return AllocateByteArray(length); 2107 return AllocateByteArray(length);
2175 } 2108 }
2176 int size = ByteArray::SizeFor(length); 2109 int size = ByteArray::SizeFor(length);
2177 Object* result = (size <= MaxObjectSizeInPagedSpace()) 2110 Object* result = (size <= MaxObjectSizeInPagedSpace())
2178 ? old_data_space_->AllocateRaw(size) 2111 ? old_data_space_->AllocateRaw(size)
2179 : lo_space_->AllocateRaw(size); 2112 : lo_space_->AllocateRaw(size);
2180 if (result->IsFailure()) return result; 2113 if (result->IsFailure()) return result;
2181 2114
2182 reinterpret_cast<Array*>(result)->set_map(byte_array_map()); 2115 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2183 reinterpret_cast<Array*>(result)->set_length(length); 2116 reinterpret_cast<ByteArray*>(result)->set_length(length);
2184 return result; 2117 return result;
2185 } 2118 }
2186 2119
2187 2120
2188 Object* Heap::AllocateByteArray(int length) { 2121 Object* Heap::AllocateByteArray(int length) {
2189 if (length < 0 || length > ByteArray::kMaxLength) { 2122 if (length < 0 || length > ByteArray::kMaxLength) {
2190 return Failure::OutOfMemoryException(); 2123 return Failure::OutOfMemoryException();
2191 } 2124 }
2192 int size = ByteArray::SizeFor(length); 2125 int size = ByteArray::SizeFor(length);
2193 AllocationSpace space = 2126 AllocationSpace space =
2194 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE; 2127 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
2195 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); 2128 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
2196 if (result->IsFailure()) return result; 2129 if (result->IsFailure()) return result;
2197 2130
2198 reinterpret_cast<Array*>(result)->set_map(byte_array_map()); 2131 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2199 reinterpret_cast<Array*>(result)->set_length(length); 2132 reinterpret_cast<ByteArray*>(result)->set_length(length);
2200 return result; 2133 return result;
2201 } 2134 }
2202 2135
2203 2136
2204 void Heap::CreateFillerObjectAt(Address addr, int size) { 2137 void Heap::CreateFillerObjectAt(Address addr, int size) {
2205 if (size == 0) return; 2138 if (size == 0) return;
2206 HeapObject* filler = HeapObject::FromAddress(addr); 2139 HeapObject* filler = HeapObject::FromAddress(addr);
2207 if (size == kPointerSize) { 2140 if (size == kPointerSize) {
2208 filler->set_map(one_pointer_filler_map()); 2141 filler->set_map(one_pointer_filler_map());
2209 } else if (size == 2 * kPointerSize) { 2142 } else if (size == 2 * kPointerSize) {
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
2305 result = lo_space_->AllocateRawCode(obj_size); 2238 result = lo_space_->AllocateRawCode(obj_size);
2306 } else { 2239 } else {
2307 result = code_space_->AllocateRaw(obj_size); 2240 result = code_space_->AllocateRaw(obj_size);
2308 } 2241 }
2309 2242
2310 if (result->IsFailure()) return result; 2243 if (result->IsFailure()) return result;
2311 2244
2312 // Copy code object. 2245 // Copy code object.
2313 Address old_addr = code->address(); 2246 Address old_addr = code->address();
2314 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); 2247 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2315 CopyBlock(reinterpret_cast<Object**>(new_addr), 2248 CopyBlock(new_addr, old_addr, obj_size);
2316 reinterpret_cast<Object**>(old_addr),
2317 obj_size);
2318 // Relocate the copy. 2249 // Relocate the copy.
2319 Code* new_code = Code::cast(result); 2250 Code* new_code = Code::cast(result);
2320 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); 2251 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2321 new_code->Relocate(new_addr - old_addr); 2252 new_code->Relocate(new_addr - old_addr);
2322 return new_code; 2253 return new_code;
2323 } 2254 }
2324 2255
2325 2256
2326 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { 2257 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
2327 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(), 2258 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(),
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
2453 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size()); 2384 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2454 2385
2455 // Do the allocation. 2386 // Do the allocation.
2456 Object* result = 2387 Object* result =
2457 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE); 2388 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2458 if (result->IsFailure()) return result; 2389 if (result->IsFailure()) return result;
2459 2390
2460 // Copy the content. The arguments boilerplate doesn't have any 2391 // Copy the content. The arguments boilerplate doesn't have any
2461 // fields that point to new space so it's safe to skip the write 2392 // fields that point to new space so it's safe to skip the write
2462 // barrier here. 2393 // barrier here.
2463 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()), 2394 CopyBlock(HeapObject::cast(result)->address(),
2464 reinterpret_cast<Object**>(boilerplate->address()), 2395 boilerplate->address(),
2465 kArgumentsObjectSize); 2396 kArgumentsObjectSize);
2466 2397
2467 // Set the two properties. 2398 // Set the two properties.
2468 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index, 2399 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2469 callee); 2400 callee);
2470 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index, 2401 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2471 Smi::FromInt(length), 2402 Smi::FromInt(length),
2472 SKIP_WRITE_BARRIER); 2403 SKIP_WRITE_BARRIER);
2473 2404
2474 // Check the state of the object 2405 // Check the state of the object
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
2676 Map* map = source->map(); 2607 Map* map = source->map();
2677 int object_size = map->instance_size(); 2608 int object_size = map->instance_size();
2678 Object* clone; 2609 Object* clone;
2679 2610
2680 // If we're forced to always allocate, we use the general allocation 2611 // If we're forced to always allocate, we use the general allocation
2681 // functions which may leave us with an object in old space. 2612 // functions which may leave us with an object in old space.
2682 if (always_allocate()) { 2613 if (always_allocate()) {
2683 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); 2614 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
2684 if (clone->IsFailure()) return clone; 2615 if (clone->IsFailure()) return clone;
2685 Address clone_address = HeapObject::cast(clone)->address(); 2616 Address clone_address = HeapObject::cast(clone)->address();
2686 CopyBlock(reinterpret_cast<Object**>(clone_address), 2617 CopyBlock(clone_address,
2687 reinterpret_cast<Object**>(source->address()), 2618 source->address(),
2688 object_size); 2619 object_size);
2689 // Update write barrier for all fields that lie beyond the header. 2620 // Update write barrier for all fields that lie beyond the header.
2690 RecordWrites(clone_address, 2621 RecordWrites(clone_address,
2691 JSObject::kHeaderSize, 2622 JSObject::kHeaderSize,
2692 (object_size - JSObject::kHeaderSize) / kPointerSize); 2623 (object_size - JSObject::kHeaderSize) / kPointerSize);
2693 } else { 2624 } else {
2694 clone = new_space_.AllocateRaw(object_size); 2625 clone = new_space_.AllocateRaw(object_size);
2695 if (clone->IsFailure()) return clone; 2626 if (clone->IsFailure()) return clone;
2696 ASSERT(Heap::InNewSpace(clone)); 2627 ASSERT(Heap::InNewSpace(clone));
2697 // Since we know the clone is allocated in new space, we can copy 2628 // Since we know the clone is allocated in new space, we can copy
2698 // the contents without worrying about updating the write barrier. 2629 // the contents without worrying about updating the write barrier.
2699 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()), 2630 CopyBlock(HeapObject::cast(clone)->address(),
2700 reinterpret_cast<Object**>(source->address()), 2631 source->address(),
2701 object_size); 2632 object_size);
2702 } 2633 }
2703 2634
2704 FixedArray* elements = FixedArray::cast(source->elements()); 2635 FixedArray* elements = FixedArray::cast(source->elements());
2705 FixedArray* properties = FixedArray::cast(source->properties()); 2636 FixedArray* properties = FixedArray::cast(source->properties());
2706 // Update elements if necessary. 2637 // Update elements if necessary.
2707 if (elements->length() > 0) { 2638 if (elements->length() > 0) {
2708 Object* elem = CopyFixedArray(elements); 2639 Object* elem = CopyFixedArray(elements);
2709 if (elem->IsFailure()) return elem; 2640 if (elem->IsFailure()) return elem;
2710 JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); 2641 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
2961 ASSERT_EQ(size, HeapObject::cast(result)->Size()); 2892 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2962 return result; 2893 return result;
2963 } 2894 }
2964 2895
2965 2896
2966 Object* Heap::AllocateEmptyFixedArray() { 2897 Object* Heap::AllocateEmptyFixedArray() {
2967 int size = FixedArray::SizeFor(0); 2898 int size = FixedArray::SizeFor(0);
2968 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); 2899 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
2969 if (result->IsFailure()) return result; 2900 if (result->IsFailure()) return result;
2970 // Initialize the object. 2901 // Initialize the object.
2971 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); 2902 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
2972 reinterpret_cast<Array*>(result)->set_length(0); 2903 reinterpret_cast<FixedArray*>(result)->set_length(0);
2973 return result; 2904 return result;
2974 } 2905 }
2975 2906
2976 2907
2977 Object* Heap::AllocateRawFixedArray(int length) { 2908 Object* Heap::AllocateRawFixedArray(int length) {
2978 if (length < 0 || length > FixedArray::kMaxLength) { 2909 if (length < 0 || length > FixedArray::kMaxLength) {
2979 return Failure::OutOfMemoryException(); 2910 return Failure::OutOfMemoryException();
2980 } 2911 }
2981 // Use the general function if we're forced to always allocate. 2912 // Use the general function if we're forced to always allocate.
2982 if (always_allocate()) return AllocateFixedArray(length, TENURED); 2913 if (always_allocate()) return AllocateFixedArray(length, TENURED);
2983 // Allocate the raw data for a fixed array. 2914 // Allocate the raw data for a fixed array.
2984 int size = FixedArray::SizeFor(length); 2915 int size = FixedArray::SizeFor(length);
2985 return size <= kMaxObjectSizeInNewSpace 2916 return size <= kMaxObjectSizeInNewSpace
2986 ? new_space_.AllocateRaw(size) 2917 ? new_space_.AllocateRaw(size)
2987 : lo_space_->AllocateRawFixedArray(size); 2918 : lo_space_->AllocateRawFixedArray(size);
2988 } 2919 }
2989 2920
2990 2921
2991 Object* Heap::CopyFixedArray(FixedArray* src) { 2922 Object* Heap::CopyFixedArray(FixedArray* src) {
2992 int len = src->length(); 2923 int len = src->length();
2993 Object* obj = AllocateRawFixedArray(len); 2924 Object* obj = AllocateRawFixedArray(len);
2994 if (obj->IsFailure()) return obj; 2925 if (obj->IsFailure()) return obj;
2995 if (Heap::InNewSpace(obj)) { 2926 if (Heap::InNewSpace(obj)) {
2996 HeapObject* dst = HeapObject::cast(obj); 2927 HeapObject* dst = HeapObject::cast(obj);
2997 CopyBlock(reinterpret_cast<Object**>(dst->address()), 2928 CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
2998 reinterpret_cast<Object**>(src->address()),
2999 FixedArray::SizeFor(len));
3000 return obj; 2929 return obj;
3001 } 2930 }
3002 HeapObject::cast(obj)->set_map(src->map()); 2931 HeapObject::cast(obj)->set_map(src->map());
3003 FixedArray* result = FixedArray::cast(obj); 2932 FixedArray* result = FixedArray::cast(obj);
3004 result->set_length(len); 2933 result->set_length(len);
3005 2934
3006 // Copy the content 2935 // Copy the content
3007 AssertNoAllocation no_gc; 2936 AssertNoAllocation no_gc;
3008 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); 2937 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3009 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); 2938 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3010 return result; 2939 return result;
3011 } 2940 }
3012 2941
3013 2942
3014 Object* Heap::AllocateFixedArray(int length) { 2943 Object* Heap::AllocateFixedArray(int length) {
3015 ASSERT(length >= 0); 2944 ASSERT(length >= 0);
3016 if (length == 0) return empty_fixed_array(); 2945 if (length == 0) return empty_fixed_array();
3017 Object* result = AllocateRawFixedArray(length); 2946 Object* result = AllocateRawFixedArray(length);
3018 if (!result->IsFailure()) { 2947 if (!result->IsFailure()) {
3019 // Initialize header. 2948 // Initialize header.
3020 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); 2949 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3021 FixedArray* array = FixedArray::cast(result); 2950 array->set_map(fixed_array_map());
3022 array->set_length(length); 2951 array->set_length(length);
3023 // Initialize body. 2952 // Initialize body.
3024 ASSERT(!Heap::InNewSpace(undefined_value())); 2953 ASSERT(!Heap::InNewSpace(undefined_value()));
3025 MemsetPointer(array->data_start(), undefined_value(), length); 2954 MemsetPointer(array->data_start(), undefined_value(), length);
3026 } 2955 }
3027 return result; 2956 return result;
3028 } 2957 }
3029 2958
3030 2959
3031 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { 2960 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
3032 if (length < 0 || length > FixedArray::kMaxLength) { 2961 if (length < 0 || length > FixedArray::kMaxLength) {
3033 return Failure::OutOfMemoryException(); 2962 return Failure::OutOfMemoryException();
3034 } 2963 }
3035 2964
3036 AllocationSpace space = 2965 AllocationSpace space =
3037 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; 2966 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3038 int size = FixedArray::SizeFor(length); 2967 int size = FixedArray::SizeFor(length);
3039 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { 2968 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3040 // Too big for new space. 2969 // Too big for new space.
3041 space = LO_SPACE; 2970 space = LO_SPACE;
3042 } else if (space == OLD_POINTER_SPACE && 2971 } else if (space == OLD_POINTER_SPACE &&
3043 size > MaxObjectSizeInPagedSpace()) { 2972 size > MaxObjectSizeInPagedSpace()) {
3044 // Too big for old pointer space. 2973 // Too big for old pointer space.
3045 space = LO_SPACE; 2974 space = LO_SPACE;
3046 } 2975 }
3047 2976
3048 // Specialize allocation for the space. 2977 AllocationSpace retry_space =
3049 Object* result = Failure::OutOfMemoryException(); 2978 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3050 if (space == NEW_SPACE) { 2979
3051 // We cannot use Heap::AllocateRaw() because it will not properly 2980 return AllocateRaw(size, space, retry_space);
3052 // allocate extra remembered set bits if always_allocate() is true and
3053 // new space allocation fails.
3054 result = new_space_.AllocateRaw(size);
3055 if (result->IsFailure() && always_allocate()) {
3056 if (size <= MaxObjectSizeInPagedSpace()) {
3057 result = old_pointer_space_->AllocateRaw(size);
3058 } else {
3059 result = lo_space_->AllocateRawFixedArray(size);
3060 }
3061 }
3062 } else if (space == OLD_POINTER_SPACE) {
3063 result = old_pointer_space_->AllocateRaw(size);
3064 } else {
3065 ASSERT(space == LO_SPACE);
3066 result = lo_space_->AllocateRawFixedArray(size);
3067 }
3068 return result;
3069 } 2981 }
3070 2982
3071 2983
3072 static Object* AllocateFixedArrayWithFiller(int length, 2984 static Object* AllocateFixedArrayWithFiller(int length,
3073 PretenureFlag pretenure, 2985 PretenureFlag pretenure,
3074 Object* filler) { 2986 Object* filler) {
3075 ASSERT(length >= 0); 2987 ASSERT(length >= 0);
3076 ASSERT(Heap::empty_fixed_array()->IsFixedArray()); 2988 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3077 if (length == 0) return Heap::empty_fixed_array(); 2989 if (length == 0) return Heap::empty_fixed_array();
3078 2990
(...skipping 27 matching lines...) Expand all
3106 3018
3107 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map()); 3019 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3108 FixedArray::cast(obj)->set_length(length); 3020 FixedArray::cast(obj)->set_length(length);
3109 return obj; 3021 return obj;
3110 } 3022 }
3111 3023
3112 3024
3113 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { 3025 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3114 Object* result = Heap::AllocateFixedArray(length, pretenure); 3026 Object* result = Heap::AllocateFixedArray(length, pretenure);
3115 if (result->IsFailure()) return result; 3027 if (result->IsFailure()) return result;
3116 reinterpret_cast<Array*>(result)->set_map(hash_table_map()); 3028 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
3117 ASSERT(result->IsHashTable()); 3029 ASSERT(result->IsHashTable());
3118 return result; 3030 return result;
3119 } 3031 }
3120 3032
3121 3033
3122 Object* Heap::AllocateGlobalContext() { 3034 Object* Heap::AllocateGlobalContext() {
3123 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS); 3035 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3124 if (result->IsFailure()) return result; 3036 if (result->IsFailure()) return result;
3125 Context* context = reinterpret_cast<Context*>(result); 3037 Context* context = reinterpret_cast<Context*>(result);
3126 context->set_map(global_context_map()); 3038 context->set_map(global_context_map());
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
3358 return cell_space_->Contains(addr); 3270 return cell_space_->Contains(addr);
3359 case LO_SPACE: 3271 case LO_SPACE:
3360 return lo_space_->SlowContains(addr); 3272 return lo_space_->SlowContains(addr);
3361 } 3273 }
3362 3274
3363 return false; 3275 return false;
3364 } 3276 }
3365 3277
3366 3278
3367 #ifdef DEBUG 3279 #ifdef DEBUG
3280 static void DummyScavengePointer(HeapObject** p) {
3281 }
3282
3283
3284 static void VerifyPointersUnderWatermark(
3285 PagedSpace* space,
3286 DirtyRegionCallback visit_dirty_region) {
3287 PageIterator it(space, PageIterator::PAGES_IN_USE);
3288
3289 while (it.has_next()) {
3290 Page* page = it.next();
3291 Address start = page->ObjectAreaStart();
3292 Address end = page->AllocationWatermark();
3293
3294 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
3295 start,
3296 end,
3297 visit_dirty_region,
3298 &DummyScavengePointer);
3299 }
3300 }
3301
3302
3303 static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3304 LargeObjectIterator it(space);
3305 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3306 if (object->IsFixedArray()) {
3307 Address slot_address = object->address();
3308 Address end = object->address() + object->Size();
3309
3310 while (slot_address < end) {
3311 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3312 // When we are not in GC the Heap::InNewSpace() predicate
3313 // checks that pointers which satisfy predicate point into
3314 // the active semispace.
3315 Heap::InNewSpace(*slot);
3316 slot_address += kPointerSize;
3317 }
3318 }
3319 }
3320 }
3321
3322
3368 void Heap::Verify() { 3323 void Heap::Verify() {
3369 ASSERT(HasBeenSetup()); 3324 ASSERT(HasBeenSetup());
3370 3325
3371 VerifyPointersVisitor visitor; 3326 VerifyPointersVisitor visitor;
3372 IterateRoots(&visitor, VISIT_ONLY_STRONG); 3327 IterateRoots(&visitor, VISIT_ONLY_STRONG);
3373 3328
3374 new_space_.Verify(); 3329 new_space_.Verify();
3375 3330
3376 VerifyPointersAndRSetVisitor rset_visitor; 3331 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
3377 old_pointer_space_->Verify(&rset_visitor); 3332 old_pointer_space_->Verify(&dirty_regions_visitor);
3378 map_space_->Verify(&rset_visitor); 3333 map_space_->Verify(&dirty_regions_visitor);
3379 3334
3380 VerifyPointersVisitor no_rset_visitor; 3335 VerifyPointersUnderWatermark(old_pointer_space_,
3381 old_data_space_->Verify(&no_rset_visitor); 3336 &IteratePointersInDirtyRegion);
3382 code_space_->Verify(&no_rset_visitor); 3337 VerifyPointersUnderWatermark(map_space_,
3383 cell_space_->Verify(&no_rset_visitor); 3338 &IteratePointersInDirtyMapsRegion);
3339 VerifyPointersUnderWatermark(lo_space_);
3340
3341 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
3342 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
3343
3344 VerifyPointersVisitor no_dirty_regions_visitor;
3345 old_data_space_->Verify(&no_dirty_regions_visitor);
3346 code_space_->Verify(&no_dirty_regions_visitor);
3347 cell_space_->Verify(&no_dirty_regions_visitor);
3384 3348
3385 lo_space_->Verify(); 3349 lo_space_->Verify();
3386 } 3350 }
3387 #endif // DEBUG 3351 #endif // DEBUG
3388 3352
3389 3353
3390 Object* Heap::LookupSymbol(Vector<const char> string) { 3354 Object* Heap::LookupSymbol(Vector<const char> string) {
3391 Object* symbol = NULL; 3355 Object* symbol = NULL;
3392 Object* new_table = symbol_table()->LookupSymbol(string, &symbol); 3356 Object* new_table = symbol_table()->LookupSymbol(string, &symbol);
3393 if (new_table->IsFailure()) return new_table; 3357 if (new_table->IsFailure()) return new_table;
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
3426 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject()); 3390 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
3427 for (Address a = new_space_.FromSpaceLow(); 3391 for (Address a = new_space_.FromSpaceLow();
3428 a < new_space_.FromSpaceHigh(); 3392 a < new_space_.FromSpaceHigh();
3429 a += kPointerSize) { 3393 a += kPointerSize) {
3430 Memory::Address_at(a) = kFromSpaceZapValue; 3394 Memory::Address_at(a) = kFromSpaceZapValue;
3431 } 3395 }
3432 } 3396 }
3433 #endif // DEBUG 3397 #endif // DEBUG
3434 3398
3435 3399
3436 int Heap::IterateRSetRange(Address object_start, 3400 bool Heap::IteratePointersInDirtyRegion(Address start,
3437 Address object_end, 3401 Address end,
3438 Address rset_start, 3402 ObjectSlotCallback copy_object_func) {
3439 ObjectSlotCallback copy_object_func) { 3403 Address slot_address = start;
3440 Address object_address = object_start; 3404 bool pointers_to_new_space_found = false;
3441 Address rset_address = rset_start; 3405
3442 int set_bits_count = 0; 3406 while (slot_address < end) {
3443 3407 Object** slot = reinterpret_cast<Object**>(slot_address);
3444 // Loop over all the pointers in [object_start, object_end). 3408 if (Heap::InNewSpace(*slot)) {
3445 while (object_address < object_end) { 3409 ASSERT((*slot)->IsHeapObject());
3446 uint32_t rset_word = Memory::uint32_at(rset_address); 3410 copy_object_func(reinterpret_cast<HeapObject**>(slot));
3447 if (rset_word != 0) { 3411 if (Heap::InNewSpace(*slot)) {
3448 uint32_t result_rset = rset_word; 3412 ASSERT((*slot)->IsHeapObject());
3449 for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) { 3413 pointers_to_new_space_found = true;
3450 // Do not dereference pointers at or past object_end. 3414 }
3451 if ((rset_word & bitmask) != 0 && object_address < object_end) { 3415 }
3452 Object** object_p = reinterpret_cast<Object**>(object_address); 3416 slot_address += kPointerSize;
3453 if (Heap::InNewSpace(*object_p)) { 3417 }
3454 copy_object_func(reinterpret_cast<HeapObject**>(object_p)); 3418 return pointers_to_new_space_found;
3455 } 3419 }
3456 // If this pointer does not need to be remembered anymore, clear 3420
3457 // the remembered set bit. 3421
3458 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask; 3422 // Compute start address of the first map following given addr.
3459 set_bits_count++; 3423 static inline Address MapStartAlign(Address addr) {
3460 } 3424 Address page = Page::FromAddress(addr)->ObjectAreaStart();
3461 object_address += kPointerSize; 3425 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
3462 } 3426 }
3463 // Update the remembered set if it has changed. 3427
3464 if (result_rset != rset_word) { 3428
3465 Memory::uint32_at(rset_address) = result_rset; 3429 // Compute end address of the first map preceding given addr.
3466 } 3430 static inline Address MapEndAlign(Address addr) {
3467 } else { 3431 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
3468 // No bits in the word were set. This is the common case. 3432 return page + ((addr - page) / Map::kSize * Map::kSize);
3469 object_address += kPointerSize * kBitsPerInt; 3433 }
3470 } 3434
3471 rset_address += kIntSize; 3435
3472 } 3436 static bool IteratePointersInDirtyMaps(Address start,
3473 return set_bits_count; 3437 Address end,
3474 } 3438 ObjectSlotCallback copy_object_func) {
3475 3439 ASSERT(MapStartAlign(start) == start);
3476 3440 ASSERT(MapEndAlign(end) == end);
3477 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { 3441
3478 ASSERT(Page::is_rset_in_use()); 3442 Address map_address = start;
3479 ASSERT(space == old_pointer_space_ || space == map_space_); 3443 bool pointers_to_new_space_found = false;
3480 3444
3481 static void* paged_rset_histogram = StatsTable::CreateHistogram( 3445 while (map_address < end) {
3482 "V8.RSetPaged", 3446 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
3483 0, 3447 ASSERT(Memory::Object_at(map_address)->IsMap());
3484 Page::kObjectAreaSize / kPointerSize, 3448
3485 30); 3449 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
3450 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
3451
3452 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
3453 pointer_fields_end,
3454 copy_object_func)) {
3455 pointers_to_new_space_found = true;
3456 }
3457
3458 map_address += Map::kSize;
3459 }
3460
3461 return pointers_to_new_space_found;
3462 }
3463
3464
3465 bool Heap::IteratePointersInDirtyMapsRegion(
3466 Address start,
3467 Address end,
3468 ObjectSlotCallback copy_object_func) {
3469 Address map_aligned_start = MapStartAlign(start);
3470 Address map_aligned_end = MapEndAlign(end);
3471
3472 bool contains_pointers_to_new_space = false;
3473
3474 if (map_aligned_start != start) {
3475 Address prev_map = map_aligned_start - Map::kSize;
3476 ASSERT(Memory::Object_at(prev_map)->IsMap());
3477
3478 Address pointer_fields_start =
3479 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
3480
3481 Address pointer_fields_end =
3482 Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
3483
3484 contains_pointers_to_new_space =
3485 IteratePointersInDirtyRegion(pointer_fields_start,
3486 pointer_fields_end,
3487 copy_object_func)
3488 || contains_pointers_to_new_space;
3489 }
3490
3491 contains_pointers_to_new_space =
3492 IteratePointersInDirtyMaps(map_aligned_start,
3493 map_aligned_end,
3494 copy_object_func)
3495 || contains_pointers_to_new_space;
3496
3497 if (map_aligned_end != end) {
3498 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
3499
3500 Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
3501
3502 Address pointer_fields_end =
3503 Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
3504
3505 contains_pointers_to_new_space =
3506 IteratePointersInDirtyRegion(pointer_fields_start,
3507 pointer_fields_end,
3508 copy_object_func)
3509 || contains_pointers_to_new_space;
3510 }
3511
3512 return contains_pointers_to_new_space;
3513 }
3514
3515
3516 void Heap::IterateAndMarkPointersToNewSpace(Address start,
3517 Address end,
3518 ObjectSlotCallback callback) {
3519 Address slot_address = start;
3520 Page* page = Page::FromAddress(start);
3521
3522 uint32_t marks = page->GetRegionMarks();
3523
3524 while (slot_address < end) {
3525 Object** slot = reinterpret_cast<Object**>(slot_address);
3526 if (Heap::InNewSpace(*slot)) {
3527 ASSERT((*slot)->IsHeapObject());
3528 callback(reinterpret_cast<HeapObject**>(slot));
3529 if (Heap::InNewSpace(*slot)) {
3530 ASSERT((*slot)->IsHeapObject());
3531 marks |= page->GetRegionMaskForAddress(slot_address);
3532 }
3533 }
3534 slot_address += kPointerSize;
3535 }
3536
3537 page->SetRegionMarks(marks);
3538 }
3539
3540
3541 uint32_t Heap::IterateDirtyRegions(
3542 uint32_t marks,
3543 Address area_start,
3544 Address area_end,
3545 DirtyRegionCallback visit_dirty_region,
3546 ObjectSlotCallback copy_object_func) {
3547 uint32_t newmarks = 0;
3548 uint32_t mask = 1;
3549
3550 if (area_start >= area_end) {
3551 return newmarks;
3552 }
3553
3554 Address region_start = area_start;
3555
3556 // area_start does not necessarily coincide with start of the first region.
3557 // Thus to calculate the beginning of the next region we have to align
3558 // area_start by Page::kRegionSize.
3559 Address second_region =
3560 reinterpret_cast<Address>(
3561 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
3562 ~Page::kRegionAlignmentMask);
3563
3564 // Next region might be beyond area_end.
3565 Address region_end = Min(second_region, area_end);
3566
3567 if (marks & mask) {
3568 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3569 newmarks |= mask;
3570 }
3571 }
3572 mask <<= 1;
3573
3574 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
3575 region_start = region_end;
3576 region_end = region_start + Page::kRegionSize;
3577
3578 while (region_end <= area_end) {
3579 if (marks & mask) {
3580 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3581 newmarks |= mask;
3582 }
3583 }
3584
3585 region_start = region_end;
3586 region_end = region_start + Page::kRegionSize;
3587
3588 mask <<= 1;
3589 }
3590
3591 if (region_start != area_end) {
3592 // A small piece of area left uniterated because area_end does not coincide
3593 // with region end. Check whether region covering last part of area is
3594 // dirty.
3595 if (marks & mask) {
3596 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
3597 newmarks |= mask;
3598 }
3599 }
3600 }
3601
3602 return newmarks;
3603 }
3604
3605
3606
3607 void Heap::IterateDirtyRegions(
3608 PagedSpace* space,
3609 DirtyRegionCallback visit_dirty_region,
3610 ObjectSlotCallback copy_object_func,
3611 ExpectedPageWatermarkState expected_page_watermark_state) {
3486 3612
3487 PageIterator it(space, PageIterator::PAGES_IN_USE); 3613 PageIterator it(space, PageIterator::PAGES_IN_USE);
3614
3488 while (it.has_next()) { 3615 while (it.has_next()) {
3489 Page* page = it.next(); 3616 Page* page = it.next();
3490 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(), 3617 uint32_t marks = page->GetRegionMarks();
3491 page->RSetStart(), copy_object_func); 3618
3492 if (paged_rset_histogram != NULL) { 3619 if (marks != Page::kAllRegionsCleanMarks) {
3493 StatsTable::AddHistogramSample(paged_rset_histogram, count); 3620 Address start = page->ObjectAreaStart();
3494 } 3621
3495 } 3622 // Do not try to visit pointers beyond page allocation watermark.
3496 } 3623 // Page can contain garbage pointers there.
3497 3624 Address end;
3625
3626 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
3627 page->IsWatermarkValid()) {
3628 end = page->AllocationWatermark();
3629 } else {
3630 end = page->CachedAllocationWatermark();
3631 }
3632
3633 ASSERT(space == old_pointer_space_ ||
3634 (space == map_space_ &&
3635 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
3636
3637 page->SetRegionMarks(IterateDirtyRegions(marks,
3638 start,
3639 end,
3640 visit_dirty_region,
3641 copy_object_func));
3642 }
3643
3644 // Mark page watermark as invalid to maintain watermark validity invariant.
3645 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
3646 page->InvalidateWatermark(true);
3647 }
3648 }
3649
3498 3650
3499 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 3651 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
3500 IterateStrongRoots(v, mode); 3652 IterateStrongRoots(v, mode);
3501 IterateWeakRoots(v, mode); 3653 IterateWeakRoots(v, mode);
3502 } 3654 }
3503 3655
3504 3656
3505 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 3657 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
3506 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); 3658 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
3507 v->Synchronize("symbol_table"); 3659 v->Synchronize("symbol_table");
(...skipping 993 matching lines...) Expand 10 before | Expand all | Expand 10 after
4501 void ExternalStringTable::TearDown() { 4653 void ExternalStringTable::TearDown() {
4502 new_space_strings_.Free(); 4654 new_space_strings_.Free();
4503 old_space_strings_.Free(); 4655 old_space_strings_.Free();
4504 } 4656 }
4505 4657
4506 4658
4507 List<Object*> ExternalStringTable::new_space_strings_; 4659 List<Object*> ExternalStringTable::new_space_strings_;
4508 List<Object*> ExternalStringTable::old_space_strings_; 4660 List<Object*> ExternalStringTable::old_space_strings_;
4509 4661
4510 } } // namespace v8::internal 4662 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | src/spaces.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698