Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/heap.cc

Issue 2073018: Reverting r4703. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after
319 unflattened_strings_length_ = 0; 319 unflattened_strings_length_ = 0;
320 #ifdef DEBUG 320 #ifdef DEBUG
321 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); 321 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
322 allow_allocation(false); 322 allow_allocation(false);
323 323
324 if (FLAG_verify_heap) { 324 if (FLAG_verify_heap) {
325 Verify(); 325 Verify();
326 } 326 }
327 327
328 if (FLAG_gc_verbose) Print(); 328 if (FLAG_gc_verbose) Print();
329
330 if (FLAG_print_rset) {
331 // Not all spaces have remembered set bits that we care about.
332 old_pointer_space_->PrintRSet();
333 map_space_->PrintRSet();
334 lo_space_->PrintRSet();
335 }
329 #endif 336 #endif
330 337
331 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 338 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
332 ReportStatisticsBeforeGC(); 339 ReportStatisticsBeforeGC();
333 #endif 340 #endif
334 } 341 }
335 342
336 int Heap::SizeOfObjects() { 343 int Heap::SizeOfObjects() {
337 int total = 0; 344 int total = 0;
338 AllSpaces spaces; 345 AllSpaces spaces;
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
505 gc_performed = true; 512 gc_performed = true;
506 } 513 }
507 if (!(map_space->ReserveSpace(map_space_size))) { 514 if (!(map_space->ReserveSpace(map_space_size))) {
508 Heap::CollectGarbage(map_space_size, MAP_SPACE); 515 Heap::CollectGarbage(map_space_size, MAP_SPACE);
509 gc_performed = true; 516 gc_performed = true;
510 } 517 }
511 if (!(cell_space->ReserveSpace(cell_space_size))) { 518 if (!(cell_space->ReserveSpace(cell_space_size))) {
512 Heap::CollectGarbage(cell_space_size, CELL_SPACE); 519 Heap::CollectGarbage(cell_space_size, CELL_SPACE);
513 gc_performed = true; 520 gc_performed = true;
514 } 521 }
515 // We add a slack-factor of 2 in order to have space for a series of 522 // We add a slack-factor of 2 in order to have space for the remembered
516 // large-object allocations that are only just larger than the page size. 523 // set and a series of large-object allocations that are only just larger
524 // than the page size.
517 large_object_size *= 2; 525 large_object_size *= 2;
518 // The ReserveSpace method on the large object space checks how much 526 // The ReserveSpace method on the large object space checks how much
519 // we can expand the old generation. This includes expansion caused by 527 // we can expand the old generation. This includes expansion caused by
520 // allocation in the other spaces. 528 // allocation in the other spaces.
521 large_object_size += cell_space_size + map_space_size + code_space_size + 529 large_object_size += cell_space_size + map_space_size + code_space_size +
522 data_space_size + pointer_space_size; 530 data_space_size + pointer_space_size;
523 if (!(lo_space->ReserveSpace(large_object_size))) { 531 if (!(lo_space->ReserveSpace(large_object_size))) {
524 Heap::CollectGarbage(large_object_size, LO_SPACE); 532 Heap::CollectGarbage(large_object_size, LO_SPACE);
525 gc_performed = true; 533 gc_performed = true;
526 } 534 }
(...skipping 30 matching lines...) Expand all
557 }; 565 };
558 566
559 567
560 void Heap::ClearJSFunctionResultCaches() { 568 void Heap::ClearJSFunctionResultCaches() {
561 if (Bootstrapper::IsActive()) return; 569 if (Bootstrapper::IsActive()) return;
562 ClearThreadJSFunctionResultCachesVisitor visitor; 570 ClearThreadJSFunctionResultCachesVisitor visitor;
563 ThreadManager::IterateThreads(&visitor); 571 ThreadManager::IterateThreads(&visitor);
564 } 572 }
565 573
566 574
567 #ifdef DEBUG
568
569 enum PageWatermarkValidity {
570 ALL_VALID,
571 ALL_INVALID
572 };
573
574 static void VerifyPageWatermarkValidity(PagedSpace* space,
575 PageWatermarkValidity validity) {
576 PageIterator it(space, PageIterator::PAGES_IN_USE);
577 bool expected_value = (validity == ALL_VALID);
578 while (it.has_next()) {
579 Page* page = it.next();
580 ASSERT(page->IsWatermarkValid() == expected_value);
581 }
582 }
583 #endif
584
585
586 void Heap::PerformGarbageCollection(AllocationSpace space, 575 void Heap::PerformGarbageCollection(AllocationSpace space,
587 GarbageCollector collector, 576 GarbageCollector collector,
588 GCTracer* tracer) { 577 GCTracer* tracer) {
589 VerifySymbolTable(); 578 VerifySymbolTable();
590 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { 579 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
591 ASSERT(!allocation_allowed_); 580 ASSERT(!allocation_allowed_);
592 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 581 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
593 global_gc_prologue_callback_(); 582 global_gc_prologue_callback_();
594 } 583 }
595 584
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
820 } 809 }
821 810
822 811
823 void Heap::Scavenge() { 812 void Heap::Scavenge() {
824 #ifdef DEBUG 813 #ifdef DEBUG
825 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); 814 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
826 #endif 815 #endif
827 816
828 gc_state_ = SCAVENGE; 817 gc_state_ = SCAVENGE;
829 818
830 Page::FlipMeaningOfInvalidatedWatermarkFlag();
831 #ifdef DEBUG
832 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
833 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
834 #endif
835
836 // We do not update an allocation watermark of the top page during linear
837 // allocation to avoid overhead. So to maintain the watermark invariant
838 // we have to manually cache the watermark and mark the top page as having an
839 // invalid watermark. This guarantees that dirty regions iteration will use a
840 // correct watermark even if a linear allocation happens.
841 old_pointer_space_->FlushTopPageWatermark();
842 map_space_->FlushTopPageWatermark();
843
844 // Implements Cheney's copying algorithm 819 // Implements Cheney's copying algorithm
845 LOG(ResourceEvent("scavenge", "begin")); 820 LOG(ResourceEvent("scavenge", "begin"));
846 821
847 // Clear descriptor cache. 822 // Clear descriptor cache.
848 DescriptorLookupCache::Clear(); 823 DescriptorLookupCache::Clear();
849 824
850 // Used for updating survived_since_last_expansion_ at function end. 825 // Used for updating survived_since_last_expansion_ at function end.
851 int survived_watermark = PromotedSpaceSize(); 826 int survived_watermark = PromotedSpaceSize();
852 827
853 CheckNewSpaceExpansionCriteria(); 828 CheckNewSpaceExpansionCriteria();
(...skipping 22 matching lines...) Expand all
876 // objects are at least one pointer in size. 851 // objects are at least one pointer in size.
877 Address new_space_front = new_space_.ToSpaceLow(); 852 Address new_space_front = new_space_.ToSpaceLow();
878 promotion_queue.Initialize(new_space_.ToSpaceHigh()); 853 promotion_queue.Initialize(new_space_.ToSpaceHigh());
879 854
880 ScavengeVisitor scavenge_visitor; 855 ScavengeVisitor scavenge_visitor;
881 // Copy roots. 856 // Copy roots.
882 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 857 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
883 858
884 // Copy objects reachable from the old generation. By definition, 859 // Copy objects reachable from the old generation. By definition,
885 // there are no intergenerational pointers in code or data spaces. 860 // there are no intergenerational pointers in code or data spaces.
886 IterateDirtyRegions(old_pointer_space_, 861 IterateRSet(old_pointer_space_, &ScavengePointer);
887 &IteratePointersInDirtyRegion, 862 IterateRSet(map_space_, &ScavengePointer);
888 &ScavengePointer, 863 lo_space_->IterateRSet(&ScavengePointer);
889 WATERMARK_CAN_BE_INVALID);
890
891 IterateDirtyRegions(map_space_,
892 &IteratePointersInDirtyMapsRegion,
893 &ScavengePointer,
894 WATERMARK_CAN_BE_INVALID);
895
896 lo_space_->IterateDirtyRegions(&ScavengePointer);
897 864
898 // Copy objects reachable from cells by scavenging cell values directly. 865 // Copy objects reachable from cells by scavenging cell values directly.
899 HeapObjectIterator cell_iterator(cell_space_); 866 HeapObjectIterator cell_iterator(cell_space_);
900 for (HeapObject* cell = cell_iterator.next(); 867 for (HeapObject* cell = cell_iterator.next();
901 cell != NULL; cell = cell_iterator.next()) { 868 cell != NULL; cell = cell_iterator.next()) {
902 if (cell->IsJSGlobalPropertyCell()) { 869 if (cell->IsJSGlobalPropertyCell()) {
903 Address value_address = 870 Address value_address =
904 reinterpret_cast<Address>(cell) + 871 reinterpret_cast<Address>(cell) +
905 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 872 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
906 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 873 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
989 } 956 }
990 957
991 // Promote and process all the to-be-promoted objects. 958 // Promote and process all the to-be-promoted objects.
992 while (!promotion_queue.is_empty()) { 959 while (!promotion_queue.is_empty()) {
993 HeapObject* source; 960 HeapObject* source;
994 Map* map; 961 Map* map;
995 promotion_queue.remove(&source, &map); 962 promotion_queue.remove(&source, &map);
996 // Copy the from-space object to its new location (given by the 963 // Copy the from-space object to its new location (given by the
997 // forwarding address) and fix its map. 964 // forwarding address) and fix its map.
998 HeapObject* target = source->map_word().ToForwardingAddress(); 965 HeapObject* target = source->map_word().ToForwardingAddress();
999 int size = source->SizeFromMap(map); 966 CopyBlock(reinterpret_cast<Object**>(target->address()),
1000 CopyBlock(target->address(), source->address(), size); 967 reinterpret_cast<Object**>(source->address()),
968 source->SizeFromMap(map));
1001 target->set_map(map); 969 target->set_map(map);
1002 970
1003 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 971 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1004 // Update NewSpace stats if necessary. 972 // Update NewSpace stats if necessary.
1005 RecordCopiedObject(target); 973 RecordCopiedObject(target);
1006 #endif 974 #endif
1007 // Visit the newly copied object for pointers to new space. 975 // Visit the newly copied object for pointers to new space.
1008 ASSERT(!target->IsMap()); 976 target->Iterate(scavenge_visitor);
1009 IterateAndMarkPointersToNewSpace(target->address(), 977 UpdateRSet(target);
1010 target->address() + size,
1011 &ScavengePointer);
1012 } 978 }
1013 979
1014 // Take another spin if there are now unswept objects in new space 980 // Take another spin if there are now unswept objects in new space
1015 // (there are currently no more unswept promoted objects). 981 // (there are currently no more unswept promoted objects).
1016 } while (new_space_front < new_space_.top()); 982 } while (new_space_front < new_space_.top());
1017 983
1018 return new_space_front; 984 return new_space_front;
1019 } 985 }
1020 986
1021 987
988 void Heap::ClearRSetRange(Address start, int size_in_bytes) {
989 uint32_t start_bit;
990 Address start_word_address =
991 Page::ComputeRSetBitPosition(start, 0, &start_bit);
992 uint32_t end_bit;
993 Address end_word_address =
994 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
995 0,
996 &end_bit);
997
998 // We want to clear the bits in the starting word starting with the
999 // first bit, and in the ending word up to and including the last
1000 // bit. Build a pair of bitmasks to do that.
1001 uint32_t start_bitmask = start_bit - 1;
1002 uint32_t end_bitmask = ~((end_bit << 1) - 1);
1003
1004 // If the start address and end address are the same, we mask that
1005 // word once, otherwise mask the starting and ending word
1006 // separately and all the ones in between.
1007 if (start_word_address == end_word_address) {
1008 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
1009 } else {
1010 Memory::uint32_at(start_word_address) &= start_bitmask;
1011 Memory::uint32_at(end_word_address) &= end_bitmask;
1012 start_word_address += kIntSize;
1013 memset(start_word_address, 0, end_word_address - start_word_address);
1014 }
1015 }
1016
1017
1018 class UpdateRSetVisitor: public ObjectVisitor {
1019 public:
1020
1021 void VisitPointer(Object** p) {
1022 UpdateRSet(p);
1023 }
1024
1025 void VisitPointers(Object** start, Object** end) {
1026 // Update a store into slots [start, end), used (a) to update remembered
1027 // set when promoting a young object to old space or (b) to rebuild
1028 // remembered sets after a mark-compact collection.
1029 for (Object** p = start; p < end; p++) UpdateRSet(p);
1030 }
1031 private:
1032
1033 void UpdateRSet(Object** p) {
1034 // The remembered set should not be set. It should be clear for objects
1035 // newly copied to old space, and it is cleared before rebuilding in the
1036 // mark-compact collector.
1037 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
1038 if (Heap::InNewSpace(*p)) {
1039 Page::SetRSet(reinterpret_cast<Address>(p), 0);
1040 }
1041 }
1042 };
1043
1044
1045 int Heap::UpdateRSet(HeapObject* obj) {
1046 ASSERT(!InNewSpace(obj));
1047 // Special handling of fixed arrays to iterate the body based on the start
1048 // address and offset. Just iterating the pointers as in UpdateRSetVisitor
1049 // will not work because Page::SetRSet needs to have the start of the
1050 // object for large object pages.
1051 if (obj->IsFixedArray()) {
1052 FixedArray* array = FixedArray::cast(obj);
1053 int length = array->length();
1054 for (int i = 0; i < length; i++) {
1055 int offset = FixedArray::kHeaderSize + i * kPointerSize;
1056 ASSERT(!Page::IsRSetSet(obj->address(), offset));
1057 if (Heap::InNewSpace(array->get(i))) {
1058 Page::SetRSet(obj->address(), offset);
1059 }
1060 }
1061 } else if (!obj->IsCode()) {
1062 // Skip code object, we know it does not contain inter-generational
1063 // pointers.
1064 UpdateRSetVisitor v;
1065 obj->Iterate(&v);
1066 }
1067 return obj->Size();
1068 }
1069
1070
1071 void Heap::RebuildRSets() {
1072 // By definition, we do not care about remembered set bits in code,
1073 // data, or cell spaces.
1074 map_space_->ClearRSet();
1075 RebuildRSets(map_space_);
1076
1077 old_pointer_space_->ClearRSet();
1078 RebuildRSets(old_pointer_space_);
1079
1080 Heap::lo_space_->ClearRSet();
1081 RebuildRSets(lo_space_);
1082 }
1083
1084
1085 void Heap::RebuildRSets(PagedSpace* space) {
1086 HeapObjectIterator it(space);
1087 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1088 Heap::UpdateRSet(obj);
1089 }
1090
1091
1092 void Heap::RebuildRSets(LargeObjectSpace* space) {
1093 LargeObjectIterator it(space);
1094 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1095 Heap::UpdateRSet(obj);
1096 }
1097
1098
1022 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1099 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1023 void Heap::RecordCopiedObject(HeapObject* obj) { 1100 void Heap::RecordCopiedObject(HeapObject* obj) {
1024 bool should_record = false; 1101 bool should_record = false;
1025 #ifdef DEBUG 1102 #ifdef DEBUG
1026 should_record = FLAG_heap_stats; 1103 should_record = FLAG_heap_stats;
1027 #endif 1104 #endif
1028 #ifdef ENABLE_LOGGING_AND_PROFILING 1105 #ifdef ENABLE_LOGGING_AND_PROFILING
1029 should_record = should_record || FLAG_log_gc; 1106 should_record = should_record || FLAG_log_gc;
1030 #endif 1107 #endif
1031 if (should_record) { 1108 if (should_record) {
1032 if (new_space_.Contains(obj)) { 1109 if (new_space_.Contains(obj)) {
1033 new_space_.RecordAllocation(obj); 1110 new_space_.RecordAllocation(obj);
1034 } else { 1111 } else {
1035 new_space_.RecordPromotion(obj); 1112 new_space_.RecordPromotion(obj);
1036 } 1113 }
1037 } 1114 }
1038 } 1115 }
1039 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1116 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1040 1117
1041 1118
1042 1119
1043 HeapObject* Heap::MigrateObject(HeapObject* source, 1120 HeapObject* Heap::MigrateObject(HeapObject* source,
1044 HeapObject* target, 1121 HeapObject* target,
1045 int size) { 1122 int size) {
1046 // Copy the content of source to target. 1123 // Copy the content of source to target.
1047 CopyBlock(target->address(), source->address(), size); 1124 CopyBlock(reinterpret_cast<Object**>(target->address()),
1125 reinterpret_cast<Object**>(source->address()),
1126 size);
1048 1127
1049 // Set the forwarding address. 1128 // Set the forwarding address.
1050 source->set_map_word(MapWord::FromForwardingAddress(target)); 1129 source->set_map_word(MapWord::FromForwardingAddress(target));
1051 1130
1052 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1131 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1053 // Update NewSpace stats if necessary. 1132 // Update NewSpace stats if necessary.
1054 RecordCopiedObject(target); 1133 RecordCopiedObject(target);
1055 #endif 1134 #endif
1056 1135
1057 return target; 1136 return target;
(...skipping 538 matching lines...) Expand 10 before | Expand all | Expand 10 after
1596 if (obj->IsFailure()) return false; 1675 if (obj->IsFailure()) return false;
1597 roots_[constant_symbol_table[i].index] = String::cast(obj); 1676 roots_[constant_symbol_table[i].index] = String::cast(obj);
1598 } 1677 }
1599 1678
1600 // Allocate the hidden symbol which is used to identify the hidden properties 1679 // Allocate the hidden symbol which is used to identify the hidden properties
1601 // in JSObjects. The hash code has a special value so that it will not match 1680 // in JSObjects. The hash code has a special value so that it will not match
1602 // the empty string when searching for the property. It cannot be part of the 1681 // the empty string when searching for the property. It cannot be part of the
1603 // loop above because it needs to be allocated manually with the special 1682 // loop above because it needs to be allocated manually with the special
1604 // hash code in place. The hash code for the hidden_symbol is zero to ensure 1683 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1605 // that it will always be at the first entry in property descriptors. 1684 // that it will always be at the first entry in property descriptors.
1606 obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash); 1685 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
1607 if (obj->IsFailure()) return false; 1686 if (obj->IsFailure()) return false;
1608 hidden_symbol_ = String::cast(obj); 1687 hidden_symbol_ = String::cast(obj);
1609 1688
1610 // Allocate the proxy for __proto__. 1689 // Allocate the proxy for __proto__.
1611 obj = AllocateProxy((Address) &Accessors::ObjectPrototype); 1690 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1612 if (obj->IsFailure()) return false; 1691 if (obj->IsFailure()) return false;
1613 set_prototype_accessors(Proxy::cast(obj)); 1692 set_prototype_accessors(Proxy::cast(obj));
1614 1693
1615 // Allocate the code_stubs dictionary. The initial size is set to avoid 1694 // Allocate the code_stubs dictionary. The initial size is set to avoid
1616 // expanding the dictionary during bootstrapping. 1695 // expanding the dictionary during bootstrapping.
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
1832 share->set_formal_parameter_count(0); 1911 share->set_formal_parameter_count(0);
1833 share->set_instance_class_name(Object_symbol()); 1912 share->set_instance_class_name(Object_symbol());
1834 share->set_function_data(undefined_value()); 1913 share->set_function_data(undefined_value());
1835 share->set_script(undefined_value()); 1914 share->set_script(undefined_value());
1836 share->set_start_position_and_type(0); 1915 share->set_start_position_and_type(0);
1837 share->set_debug_info(undefined_value()); 1916 share->set_debug_info(undefined_value());
1838 share->set_inferred_name(empty_string()); 1917 share->set_inferred_name(empty_string());
1839 share->set_compiler_hints(0); 1918 share->set_compiler_hints(0);
1840 share->set_this_property_assignments_count(0); 1919 share->set_this_property_assignments_count(0);
1841 share->set_this_property_assignments(undefined_value()); 1920 share->set_this_property_assignments(undefined_value());
1842 share->set_num_literals(0);
1843 share->set_end_position(0);
1844 share->set_function_token_position(0);
1845 return result; 1921 return result;
1846 } 1922 }
1847 1923
1848 1924
1849 // Returns true for a character in a range. Both limits are inclusive. 1925 // Returns true for a character in a range. Both limits are inclusive.
1850 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) { 1926 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
1851 // This makes uses of the the unsigned wraparound. 1927 // This makes uses of the the unsigned wraparound.
1852 return character - from <= to - from; 1928 return character - from <= to - from;
1853 } 1929 }
1854 1930
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after
2096 } 2172 }
2097 if (pretenure == NOT_TENURED) { 2173 if (pretenure == NOT_TENURED) {
2098 return AllocateByteArray(length); 2174 return AllocateByteArray(length);
2099 } 2175 }
2100 int size = ByteArray::SizeFor(length); 2176 int size = ByteArray::SizeFor(length);
2101 Object* result = (size <= MaxObjectSizeInPagedSpace()) 2177 Object* result = (size <= MaxObjectSizeInPagedSpace())
2102 ? old_data_space_->AllocateRaw(size) 2178 ? old_data_space_->AllocateRaw(size)
2103 : lo_space_->AllocateRaw(size); 2179 : lo_space_->AllocateRaw(size);
2104 if (result->IsFailure()) return result; 2180 if (result->IsFailure()) return result;
2105 2181
2106 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); 2182 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
2107 reinterpret_cast<ByteArray*>(result)->set_length(length); 2183 reinterpret_cast<Array*>(result)->set_length(length);
2108 return result; 2184 return result;
2109 } 2185 }
2110 2186
2111 2187
2112 Object* Heap::AllocateByteArray(int length) { 2188 Object* Heap::AllocateByteArray(int length) {
2113 if (length < 0 || length > ByteArray::kMaxLength) { 2189 if (length < 0 || length > ByteArray::kMaxLength) {
2114 return Failure::OutOfMemoryException(); 2190 return Failure::OutOfMemoryException();
2115 } 2191 }
2116 int size = ByteArray::SizeFor(length); 2192 int size = ByteArray::SizeFor(length);
2117 AllocationSpace space = 2193 AllocationSpace space =
2118 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE; 2194 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
2119 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); 2195 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
2120 if (result->IsFailure()) return result; 2196 if (result->IsFailure()) return result;
2121 2197
2122 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); 2198 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
2123 reinterpret_cast<ByteArray*>(result)->set_length(length); 2199 reinterpret_cast<Array*>(result)->set_length(length);
2124 return result; 2200 return result;
2125 } 2201 }
2126 2202
2127 2203
2128 void Heap::CreateFillerObjectAt(Address addr, int size) { 2204 void Heap::CreateFillerObjectAt(Address addr, int size) {
2129 if (size == 0) return; 2205 if (size == 0) return;
2130 HeapObject* filler = HeapObject::FromAddress(addr); 2206 HeapObject* filler = HeapObject::FromAddress(addr);
2131 if (size == kPointerSize) { 2207 if (size == kPointerSize) {
2132 filler->set_map(one_pointer_filler_map()); 2208 filler->set_map(one_pointer_filler_map());
2133 } else if (size == 2 * kPointerSize) { 2209 } else if (size == 2 * kPointerSize) {
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
2229 result = lo_space_->AllocateRawCode(obj_size); 2305 result = lo_space_->AllocateRawCode(obj_size);
2230 } else { 2306 } else {
2231 result = code_space_->AllocateRaw(obj_size); 2307 result = code_space_->AllocateRaw(obj_size);
2232 } 2308 }
2233 2309
2234 if (result->IsFailure()) return result; 2310 if (result->IsFailure()) return result;
2235 2311
2236 // Copy code object. 2312 // Copy code object.
2237 Address old_addr = code->address(); 2313 Address old_addr = code->address();
2238 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); 2314 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2239 CopyBlock(new_addr, old_addr, obj_size); 2315 CopyBlock(reinterpret_cast<Object**>(new_addr),
2316 reinterpret_cast<Object**>(old_addr),
2317 obj_size);
2240 // Relocate the copy. 2318 // Relocate the copy.
2241 Code* new_code = Code::cast(result); 2319 Code* new_code = Code::cast(result);
2242 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); 2320 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2243 new_code->Relocate(new_addr - old_addr); 2321 new_code->Relocate(new_addr - old_addr);
2244 return new_code; 2322 return new_code;
2245 } 2323 }
2246 2324
2247 2325
2248 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { 2326 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
2249 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(), 2327 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(),
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
2375 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size()); 2453 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2376 2454
2377 // Do the allocation. 2455 // Do the allocation.
2378 Object* result = 2456 Object* result =
2379 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE); 2457 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2380 if (result->IsFailure()) return result; 2458 if (result->IsFailure()) return result;
2381 2459
2382 // Copy the content. The arguments boilerplate doesn't have any 2460 // Copy the content. The arguments boilerplate doesn't have any
2383 // fields that point to new space so it's safe to skip the write 2461 // fields that point to new space so it's safe to skip the write
2384 // barrier here. 2462 // barrier here.
2385 CopyBlock(HeapObject::cast(result)->address(), 2463 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
2386 boilerplate->address(), 2464 reinterpret_cast<Object**>(boilerplate->address()),
2387 kArgumentsObjectSize); 2465 kArgumentsObjectSize);
2388 2466
2389 // Set the two properties. 2467 // Set the two properties.
2390 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index, 2468 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2391 callee); 2469 callee);
2392 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index, 2470 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2393 Smi::FromInt(length), 2471 Smi::FromInt(length),
2394 SKIP_WRITE_BARRIER); 2472 SKIP_WRITE_BARRIER);
2395 2473
2396 // Check the state of the object 2474 // Check the state of the object
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
2598 Map* map = source->map(); 2676 Map* map = source->map();
2599 int object_size = map->instance_size(); 2677 int object_size = map->instance_size();
2600 Object* clone; 2678 Object* clone;
2601 2679
2602 // If we're forced to always allocate, we use the general allocation 2680 // If we're forced to always allocate, we use the general allocation
2603 // functions which may leave us with an object in old space. 2681 // functions which may leave us with an object in old space.
2604 if (always_allocate()) { 2682 if (always_allocate()) {
2605 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); 2683 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
2606 if (clone->IsFailure()) return clone; 2684 if (clone->IsFailure()) return clone;
2607 Address clone_address = HeapObject::cast(clone)->address(); 2685 Address clone_address = HeapObject::cast(clone)->address();
2608 CopyBlock(clone_address, 2686 CopyBlock(reinterpret_cast<Object**>(clone_address),
2609 source->address(), 2687 reinterpret_cast<Object**>(source->address()),
2610 object_size); 2688 object_size);
2611 // Update write barrier for all fields that lie beyond the header. 2689 // Update write barrier for all fields that lie beyond the header.
2612 RecordWrites(clone_address, 2690 RecordWrites(clone_address,
2613 JSObject::kHeaderSize, 2691 JSObject::kHeaderSize,
2614 (object_size - JSObject::kHeaderSize) / kPointerSize); 2692 (object_size - JSObject::kHeaderSize) / kPointerSize);
2615 } else { 2693 } else {
2616 clone = new_space_.AllocateRaw(object_size); 2694 clone = new_space_.AllocateRaw(object_size);
2617 if (clone->IsFailure()) return clone; 2695 if (clone->IsFailure()) return clone;
2618 ASSERT(Heap::InNewSpace(clone)); 2696 ASSERT(Heap::InNewSpace(clone));
2619 // Since we know the clone is allocated in new space, we can copy 2697 // Since we know the clone is allocated in new space, we can copy
2620 // the contents without worrying about updating the write barrier. 2698 // the contents without worrying about updating the write barrier.
2621 CopyBlock(HeapObject::cast(clone)->address(), 2699 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
2622 source->address(), 2700 reinterpret_cast<Object**>(source->address()),
2623 object_size); 2701 object_size);
2624 } 2702 }
2625 2703
2626 FixedArray* elements = FixedArray::cast(source->elements()); 2704 FixedArray* elements = FixedArray::cast(source->elements());
2627 FixedArray* properties = FixedArray::cast(source->properties()); 2705 FixedArray* properties = FixedArray::cast(source->properties());
2628 // Update elements if necessary. 2706 // Update elements if necessary.
2629 if (elements->length() > 0) { 2707 if (elements->length() > 0) {
2630 Object* elem = CopyFixedArray(elements); 2708 Object* elem = CopyFixedArray(elements);
2631 if (elem->IsFailure()) return elem; 2709 if (elem->IsFailure()) return elem;
2632 JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); 2710 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
2883 ASSERT_EQ(size, HeapObject::cast(result)->Size()); 2961 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2884 return result; 2962 return result;
2885 } 2963 }
2886 2964
2887 2965
2888 Object* Heap::AllocateEmptyFixedArray() { 2966 Object* Heap::AllocateEmptyFixedArray() {
2889 int size = FixedArray::SizeFor(0); 2967 int size = FixedArray::SizeFor(0);
2890 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); 2968 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
2891 if (result->IsFailure()) return result; 2969 if (result->IsFailure()) return result;
2892 // Initialize the object. 2970 // Initialize the object.
2893 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map()); 2971 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2894 reinterpret_cast<FixedArray*>(result)->set_length(0); 2972 reinterpret_cast<Array*>(result)->set_length(0);
2895 return result; 2973 return result;
2896 } 2974 }
2897 2975
2898 2976
2899 Object* Heap::AllocateRawFixedArray(int length) { 2977 Object* Heap::AllocateRawFixedArray(int length) {
2900 if (length < 0 || length > FixedArray::kMaxLength) { 2978 if (length < 0 || length > FixedArray::kMaxLength) {
2901 return Failure::OutOfMemoryException(); 2979 return Failure::OutOfMemoryException();
2902 } 2980 }
2903 // Use the general function if we're forced to always allocate. 2981 // Use the general function if we're forced to always allocate.
2904 if (always_allocate()) return AllocateFixedArray(length, TENURED); 2982 if (always_allocate()) return AllocateFixedArray(length, TENURED);
2905 // Allocate the raw data for a fixed array. 2983 // Allocate the raw data for a fixed array.
2906 int size = FixedArray::SizeFor(length); 2984 int size = FixedArray::SizeFor(length);
2907 return size <= kMaxObjectSizeInNewSpace 2985 return size <= kMaxObjectSizeInNewSpace
2908 ? new_space_.AllocateRaw(size) 2986 ? new_space_.AllocateRaw(size)
2909 : lo_space_->AllocateRawFixedArray(size); 2987 : lo_space_->AllocateRawFixedArray(size);
2910 } 2988 }
2911 2989
2912 2990
2913 Object* Heap::CopyFixedArray(FixedArray* src) { 2991 Object* Heap::CopyFixedArray(FixedArray* src) {
2914 int len = src->length(); 2992 int len = src->length();
2915 Object* obj = AllocateRawFixedArray(len); 2993 Object* obj = AllocateRawFixedArray(len);
2916 if (obj->IsFailure()) return obj; 2994 if (obj->IsFailure()) return obj;
2917 if (Heap::InNewSpace(obj)) { 2995 if (Heap::InNewSpace(obj)) {
2918 HeapObject* dst = HeapObject::cast(obj); 2996 HeapObject* dst = HeapObject::cast(obj);
2919 CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len)); 2997 CopyBlock(reinterpret_cast<Object**>(dst->address()),
2998 reinterpret_cast<Object**>(src->address()),
2999 FixedArray::SizeFor(len));
2920 return obj; 3000 return obj;
2921 } 3001 }
2922 HeapObject::cast(obj)->set_map(src->map()); 3002 HeapObject::cast(obj)->set_map(src->map());
2923 FixedArray* result = FixedArray::cast(obj); 3003 FixedArray* result = FixedArray::cast(obj);
2924 result->set_length(len); 3004 result->set_length(len);
2925 3005
2926 // Copy the content 3006 // Copy the content
2927 AssertNoAllocation no_gc; 3007 AssertNoAllocation no_gc;
2928 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); 3008 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
2929 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); 3009 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
2930 return result; 3010 return result;
2931 } 3011 }
2932 3012
2933 3013
2934 Object* Heap::AllocateFixedArray(int length) { 3014 Object* Heap::AllocateFixedArray(int length) {
2935 ASSERT(length >= 0); 3015 ASSERT(length >= 0);
2936 if (length == 0) return empty_fixed_array(); 3016 if (length == 0) return empty_fixed_array();
2937 Object* result = AllocateRawFixedArray(length); 3017 Object* result = AllocateRawFixedArray(length);
2938 if (!result->IsFailure()) { 3018 if (!result->IsFailure()) {
2939 // Initialize header. 3019 // Initialize header.
2940 FixedArray* array = reinterpret_cast<FixedArray*>(result); 3020 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2941 array->set_map(fixed_array_map()); 3021 FixedArray* array = FixedArray::cast(result);
2942 array->set_length(length); 3022 array->set_length(length);
2943 // Initialize body. 3023 // Initialize body.
2944 ASSERT(!Heap::InNewSpace(undefined_value())); 3024 ASSERT(!Heap::InNewSpace(undefined_value()));
2945 MemsetPointer(array->data_start(), undefined_value(), length); 3025 MemsetPointer(array->data_start(), undefined_value(), length);
2946 } 3026 }
2947 return result; 3027 return result;
2948 } 3028 }
2949 3029
2950 3030
2951 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { 3031 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
2952 if (length < 0 || length > FixedArray::kMaxLength) { 3032 if (length < 0 || length > FixedArray::kMaxLength) {
2953 return Failure::OutOfMemoryException(); 3033 return Failure::OutOfMemoryException();
2954 } 3034 }
2955 3035
2956 AllocationSpace space = 3036 AllocationSpace space =
2957 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; 3037 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2958 int size = FixedArray::SizeFor(length); 3038 int size = FixedArray::SizeFor(length);
2959 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { 3039 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
2960 // Too big for new space. 3040 // Too big for new space.
2961 space = LO_SPACE; 3041 space = LO_SPACE;
2962 } else if (space == OLD_POINTER_SPACE && 3042 } else if (space == OLD_POINTER_SPACE &&
2963 size > MaxObjectSizeInPagedSpace()) { 3043 size > MaxObjectSizeInPagedSpace()) {
2964 // Too big for old pointer space. 3044 // Too big for old pointer space.
2965 space = LO_SPACE; 3045 space = LO_SPACE;
2966 } 3046 }
2967 3047
2968 AllocationSpace retry_space = 3048 // Specialize allocation for the space.
2969 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE; 3049 Object* result = Failure::OutOfMemoryException();
2970 3050 if (space == NEW_SPACE) {
2971 return AllocateRaw(size, space, retry_space); 3051 // We cannot use Heap::AllocateRaw() because it will not properly
3052 // allocate extra remembered set bits if always_allocate() is true and
3053 // new space allocation fails.
3054 result = new_space_.AllocateRaw(size);
3055 if (result->IsFailure() && always_allocate()) {
3056 if (size <= MaxObjectSizeInPagedSpace()) {
3057 result = old_pointer_space_->AllocateRaw(size);
3058 } else {
3059 result = lo_space_->AllocateRawFixedArray(size);
3060 }
3061 }
3062 } else if (space == OLD_POINTER_SPACE) {
3063 result = old_pointer_space_->AllocateRaw(size);
3064 } else {
3065 ASSERT(space == LO_SPACE);
3066 result = lo_space_->AllocateRawFixedArray(size);
3067 }
3068 return result;
2972 } 3069 }
2973 3070
2974 3071
2975 static Object* AllocateFixedArrayWithFiller(int length, 3072 static Object* AllocateFixedArrayWithFiller(int length,
2976 PretenureFlag pretenure, 3073 PretenureFlag pretenure,
2977 Object* filler) { 3074 Object* filler) {
2978 ASSERT(length >= 0); 3075 ASSERT(length >= 0);
2979 ASSERT(Heap::empty_fixed_array()->IsFixedArray()); 3076 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
2980 if (length == 0) return Heap::empty_fixed_array(); 3077 if (length == 0) return Heap::empty_fixed_array();
2981 3078
(...skipping 27 matching lines...) Expand all
3009 3106
3010 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map()); 3107 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3011 FixedArray::cast(obj)->set_length(length); 3108 FixedArray::cast(obj)->set_length(length);
3012 return obj; 3109 return obj;
3013 } 3110 }
3014 3111
3015 3112
3016 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { 3113 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3017 Object* result = Heap::AllocateFixedArray(length, pretenure); 3114 Object* result = Heap::AllocateFixedArray(length, pretenure);
3018 if (result->IsFailure()) return result; 3115 if (result->IsFailure()) return result;
3019 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map()); 3116 reinterpret_cast<Array*>(result)->set_map(hash_table_map());
3020 ASSERT(result->IsHashTable()); 3117 ASSERT(result->IsHashTable());
3021 return result; 3118 return result;
3022 } 3119 }
3023 3120
3024 3121
3025 Object* Heap::AllocateGlobalContext() { 3122 Object* Heap::AllocateGlobalContext() {
3026 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS); 3123 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3027 if (result->IsFailure()) return result; 3124 if (result->IsFailure()) return result;
3028 Context* context = reinterpret_cast<Context*>(result); 3125 Context* context = reinterpret_cast<Context*>(result);
3029 context->set_map(global_context_map()); 3126 context->set_map(global_context_map());
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
3261 return cell_space_->Contains(addr); 3358 return cell_space_->Contains(addr);
3262 case LO_SPACE: 3359 case LO_SPACE:
3263 return lo_space_->SlowContains(addr); 3360 return lo_space_->SlowContains(addr);
3264 } 3361 }
3265 3362
3266 return false; 3363 return false;
3267 } 3364 }
3268 3365
3269 3366
3270 #ifdef DEBUG 3367 #ifdef DEBUG
3271 static void DummyScavengePointer(HeapObject** p) {
3272 }
3273
3274
3275 static void VerifyPointersUnderWatermark(
3276 PagedSpace* space,
3277 DirtyRegionCallback visit_dirty_region) {
3278 PageIterator it(space, PageIterator::PAGES_IN_USE);
3279
3280 while (it.has_next()) {
3281 Page* page = it.next();
3282 Address start = page->ObjectAreaStart();
3283 Address end = page->AllocationWatermark();
3284
3285 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
3286 start,
3287 end,
3288 visit_dirty_region,
3289 &DummyScavengePointer);
3290 }
3291 }
3292
3293
3294 static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3295 LargeObjectIterator it(space);
3296 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3297 if (object->IsFixedArray()) {
3298 Address slot_address = object->address();
3299 Address end = object->address() + object->Size();
3300
3301 while (slot_address < end) {
3302 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3303 // When we are not in GC the Heap::InNewSpace() predicate
3304 // checks that pointers which satisfy predicate point into
3305 // the active semispace.
3306 Heap::InNewSpace(*slot);
3307 slot_address += kPointerSize;
3308 }
3309 }
3310 }
3311 }
3312
3313
3314 void Heap::Verify() { 3368 void Heap::Verify() {
3315 ASSERT(HasBeenSetup()); 3369 ASSERT(HasBeenSetup());
3316 3370
3317 VerifyPointersVisitor visitor; 3371 VerifyPointersVisitor visitor;
3318 IterateRoots(&visitor, VISIT_ONLY_STRONG); 3372 IterateRoots(&visitor, VISIT_ONLY_STRONG);
3319 3373
3320 new_space_.Verify(); 3374 new_space_.Verify();
3321 3375
3322 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor; 3376 VerifyPointersAndRSetVisitor rset_visitor;
3323 old_pointer_space_->Verify(&dirty_regions_visitor); 3377 old_pointer_space_->Verify(&rset_visitor);
3324 map_space_->Verify(&dirty_regions_visitor); 3378 map_space_->Verify(&rset_visitor);
3325 3379
3326 VerifyPointersUnderWatermark(old_pointer_space_, 3380 VerifyPointersVisitor no_rset_visitor;
3327 &IteratePointersInDirtyRegion); 3381 old_data_space_->Verify(&no_rset_visitor);
3328 VerifyPointersUnderWatermark(map_space_, 3382 code_space_->Verify(&no_rset_visitor);
3329 &IteratePointersInDirtyMapsRegion); 3383 cell_space_->Verify(&no_rset_visitor);
3330 VerifyPointersUnderWatermark(lo_space_);
3331
3332 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
3333 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
3334
3335 VerifyPointersVisitor no_dirty_regions_visitor;
3336 old_data_space_->Verify(&no_dirty_regions_visitor);
3337 code_space_->Verify(&no_dirty_regions_visitor);
3338 cell_space_->Verify(&no_dirty_regions_visitor);
3339 3384
3340 lo_space_->Verify(); 3385 lo_space_->Verify();
3341 } 3386 }
3342 #endif // DEBUG 3387 #endif // DEBUG
3343 3388
3344 3389
3345 Object* Heap::LookupSymbol(Vector<const char> string) { 3390 Object* Heap::LookupSymbol(Vector<const char> string) {
3346 Object* symbol = NULL; 3391 Object* symbol = NULL;
3347 Object* new_table = symbol_table()->LookupSymbol(string, &symbol); 3392 Object* new_table = symbol_table()->LookupSymbol(string, &symbol);
3348 if (new_table->IsFailure()) return new_table; 3393 if (new_table->IsFailure()) return new_table;
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
3381 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject()); 3426 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
3382 for (Address a = new_space_.FromSpaceLow(); 3427 for (Address a = new_space_.FromSpaceLow();
3383 a < new_space_.FromSpaceHigh(); 3428 a < new_space_.FromSpaceHigh();
3384 a += kPointerSize) { 3429 a += kPointerSize) {
3385 Memory::Address_at(a) = kFromSpaceZapValue; 3430 Memory::Address_at(a) = kFromSpaceZapValue;
3386 } 3431 }
3387 } 3432 }
3388 #endif // DEBUG 3433 #endif // DEBUG
3389 3434
3390 3435
3391 bool Heap::IteratePointersInDirtyRegion(Address start, 3436 int Heap::IterateRSetRange(Address object_start,
3392 Address end, 3437 Address object_end,
3393 ObjectSlotCallback copy_object_func) { 3438 Address rset_start,
3394 Address slot_address = start; 3439 ObjectSlotCallback copy_object_func) {
3395 bool pointers_to_new_space_found = false; 3440 Address object_address = object_start;
3441 Address rset_address = rset_start;
3442 int set_bits_count = 0;
3396 3443
3397 while (slot_address < end) { 3444 // Loop over all the pointers in [object_start, object_end).
3398 Object** slot = reinterpret_cast<Object**>(slot_address); 3445 while (object_address < object_end) {
3399 if (Heap::InNewSpace(*slot)) { 3446 uint32_t rset_word = Memory::uint32_at(rset_address);
3400 ASSERT((*slot)->IsHeapObject()); 3447 if (rset_word != 0) {
3401 copy_object_func(reinterpret_cast<HeapObject**>(slot)); 3448 uint32_t result_rset = rset_word;
3402 if (Heap::InNewSpace(*slot)) { 3449 for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
3403 ASSERT((*slot)->IsHeapObject()); 3450 // Do not dereference pointers at or past object_end.
3404 pointers_to_new_space_found = true; 3451 if ((rset_word & bitmask) != 0 && object_address < object_end) {
3452 Object** object_p = reinterpret_cast<Object**>(object_address);
3453 if (Heap::InNewSpace(*object_p)) {
3454 copy_object_func(reinterpret_cast<HeapObject**>(object_p));
3455 }
3456 // If this pointer does not need to be remembered anymore, clear
3457 // the remembered set bit.
3458 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
3459 set_bits_count++;
3460 }
3461 object_address += kPointerSize;
3405 } 3462 }
3463 // Update the remembered set if it has changed.
3464 if (result_rset != rset_word) {
3465 Memory::uint32_at(rset_address) = result_rset;
3466 }
3467 } else {
3468 // No bits in the word were set. This is the common case.
3469 object_address += kPointerSize * kBitsPerInt;
3406 } 3470 }
3407 slot_address += kPointerSize; 3471 rset_address += kIntSize;
3408 } 3472 }
3409 return pointers_to_new_space_found; 3473 return set_bits_count;
3410 } 3474 }
3411 3475
3412 3476
3413 // Compute start address of the first map following given addr. 3477 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
3414 static inline Address MapStartAlign(Address addr) { 3478 ASSERT(Page::is_rset_in_use());
3415 Address page = Page::FromAddress(addr)->ObjectAreaStart(); 3479 ASSERT(space == old_pointer_space_ || space == map_space_);
3416 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
3417 }
3418 3480
3419 3481 static void* paged_rset_histogram = StatsTable::CreateHistogram(
3420 // Compute end address of the first map preceding given addr. 3482 "V8.RSetPaged",
3421 static inline Address MapEndAlign(Address addr) { 3483 0,
3422 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart(); 3484 Page::kObjectAreaSize / kPointerSize,
3423 return page + ((addr - page) / Map::kSize * Map::kSize); 3485 30);
3424 }
3425
3426
3427 static bool IteratePointersInDirtyMaps(Address start,
3428 Address end,
3429 ObjectSlotCallback copy_object_func) {
3430 ASSERT(MapStartAlign(start) == start);
3431 ASSERT(MapEndAlign(end) == end);
3432
3433 Address map_address = start;
3434 bool pointers_to_new_space_found = false;
3435
3436 while (map_address < end) {
3437 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
3438 ASSERT(Memory::Object_at(map_address)->IsMap());
3439
3440 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
3441 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
3442
3443 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
3444 pointer_fields_end,
3445 copy_object_func)) {
3446 pointers_to_new_space_found = true;
3447 }
3448
3449 map_address += Map::kSize;
3450 }
3451
3452 return pointers_to_new_space_found;
3453 }
3454
3455
3456 bool Heap::IteratePointersInDirtyMapsRegion(
3457 Address start,
3458 Address end,
3459 ObjectSlotCallback copy_object_func) {
3460 Address map_aligned_start = MapStartAlign(start);
3461 Address map_aligned_end = MapEndAlign(end);
3462
3463 bool contains_pointers_to_new_space = false;
3464
3465 if (map_aligned_start != start) {
3466 Address prev_map = map_aligned_start - Map::kSize;
3467 ASSERT(Memory::Object_at(prev_map)->IsMap());
3468
3469 Address pointer_fields_start =
3470 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
3471
3472 Address pointer_fields_end =
3473 Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
3474
3475 contains_pointers_to_new_space =
3476 IteratePointersInDirtyRegion(pointer_fields_start,
3477 pointer_fields_end,
3478 copy_object_func)
3479 || contains_pointers_to_new_space;
3480 }
3481
3482 contains_pointers_to_new_space =
3483 IteratePointersInDirtyMaps(map_aligned_start,
3484 map_aligned_end,
3485 copy_object_func)
3486 || contains_pointers_to_new_space;
3487
3488 if (map_aligned_end != end) {
3489 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
3490
3491 Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
3492
3493 Address pointer_fields_end =
3494 Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
3495
3496 contains_pointers_to_new_space =
3497 IteratePointersInDirtyRegion(pointer_fields_start,
3498 pointer_fields_end,
3499 copy_object_func)
3500 || contains_pointers_to_new_space;
3501 }
3502
3503 return contains_pointers_to_new_space;
3504 }
3505
3506
3507 void Heap::IterateAndMarkPointersToNewSpace(Address start,
3508 Address end,
3509 ObjectSlotCallback callback) {
3510 Address slot_address = start;
3511 Page* page = Page::FromAddress(start);
3512
3513 uint32_t marks = page->GetRegionMarks();
3514
3515 while (slot_address < end) {
3516 Object** slot = reinterpret_cast<Object**>(slot_address);
3517 if (Heap::InNewSpace(*slot)) {
3518 ASSERT((*slot)->IsHeapObject());
3519 callback(reinterpret_cast<HeapObject**>(slot));
3520 if (Heap::InNewSpace(*slot)) {
3521 ASSERT((*slot)->IsHeapObject());
3522 marks |= page->GetRegionMaskForAddress(slot_address);
3523 }
3524 }
3525 slot_address += kPointerSize;
3526 }
3527
3528 page->SetRegionMarks(marks);
3529 }
3530
3531
3532 uint32_t Heap::IterateDirtyRegions(
3533 uint32_t marks,
3534 Address area_start,
3535 Address area_end,
3536 DirtyRegionCallback visit_dirty_region,
3537 ObjectSlotCallback copy_object_func) {
3538 uint32_t newmarks = 0;
3539 uint32_t mask = 1;
3540
3541 if (area_start >= area_end) {
3542 return newmarks;
3543 }
3544
3545 Address region_start = area_start;
3546
3547 // area_start does not necessarily coincide with start of the first region.
3548 // Thus to calculate the beginning of the next region we have to align
3549 // area_start by Page::kRegionSize.
3550 Address second_region =
3551 reinterpret_cast<Address>(
3552 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
3553 ~Page::kRegionAlignmentMask);
3554
3555 // Next region might be beyond area_end.
3556 Address region_end = Min(second_region, area_end);
3557
3558 if (marks & mask) {
3559 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3560 newmarks |= mask;
3561 }
3562 }
3563 mask <<= 1;
3564
3565 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
3566 region_start = region_end;
3567 region_end = region_start + Page::kRegionSize;
3568
3569 while (region_end <= area_end) {
3570 if (marks & mask) {
3571 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3572 newmarks |= mask;
3573 }
3574 }
3575
3576 region_start = region_end;
3577 region_end = region_start + Page::kRegionSize;
3578
3579 mask <<= 1;
3580 }
3581
3582 if (region_start != area_end) {
3583 // A small piece of area left uniterated because area_end does not coincide
3584 // with region end. Check whether region covering last part of area is
3585 // dirty.
3586 if (marks & mask) {
3587 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
3588 newmarks |= mask;
3589 }
3590 }
3591 }
3592
3593 return newmarks;
3594 }
3595
3596
3597
3598 void Heap::IterateDirtyRegions(
3599 PagedSpace* space,
3600 DirtyRegionCallback visit_dirty_region,
3601 ObjectSlotCallback copy_object_func,
3602 ExpectedPageWatermarkState expected_page_watermark_state) {
3603 3486
3604 PageIterator it(space, PageIterator::PAGES_IN_USE); 3487 PageIterator it(space, PageIterator::PAGES_IN_USE);
3605
3606 while (it.has_next()) { 3488 while (it.has_next()) {
3607 Page* page = it.next(); 3489 Page* page = it.next();
3608 uint32_t marks = page->GetRegionMarks(); 3490 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
3609 3491 page->RSetStart(), copy_object_func);
3610 if (marks != Page::kAllRegionsCleanMarks) { 3492 if (paged_rset_histogram != NULL) {
3611 Address start = page->ObjectAreaStart(); 3493 StatsTable::AddHistogramSample(paged_rset_histogram, count);
3612
3613 // Do not try to visit pointers beyond page allocation watermark.
3614 // Page can contain garbage pointers there.
3615 Address end;
3616
3617 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
3618 page->IsWatermarkValid()) {
3619 end = page->AllocationWatermark();
3620 } else {
3621 end = page->CachedAllocationWatermark();
3622 }
3623
3624 ASSERT(space == old_pointer_space_ ||
3625 (space == map_space_ &&
3626 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
3627
3628 page->SetRegionMarks(IterateDirtyRegions(marks,
3629 start,
3630 end,
3631 visit_dirty_region,
3632 copy_object_func));
3633 } 3494 }
3634
3635 // Mark page watermark as invalid to maintain watermark validity invariant.
3636 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
3637 page->InvalidateWatermark(true);
3638 } 3495 }
3639 } 3496 }
3640 3497
3641 3498
3642 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 3499 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
3643 IterateStrongRoots(v, mode); 3500 IterateStrongRoots(v, mode);
3644 IterateWeakRoots(v, mode); 3501 IterateWeakRoots(v, mode);
3645 } 3502 }
3646 3503
3647 3504
(...skipping 996 matching lines...) Expand 10 before | Expand all | Expand 10 after
4644 void ExternalStringTable::TearDown() { 4501 void ExternalStringTable::TearDown() {
4645 new_space_strings_.Free(); 4502 new_space_strings_.Free();
4646 old_space_strings_.Free(); 4503 old_space_strings_.Free();
4647 } 4504 }
4648 4505
4649 4506
4650 List<Object*> ExternalStringTable::new_space_strings_; 4507 List<Object*> ExternalStringTable::new_space_strings_;
4651 List<Object*> ExternalStringTable::old_space_strings_; 4508 List<Object*> ExternalStringTable::old_space_strings_;
4652 4509
4653 } } // namespace v8::internal 4510 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698