Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(946)

Side by Side Diff: src/heap.cc

Issue 2101002: Cardmarking writebarrier. (Closed)
Patch Set: fixed review comments Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after
311 unflattened_strings_length_ = 0; 311 unflattened_strings_length_ = 0;
312 #ifdef DEBUG 312 #ifdef DEBUG
313 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); 313 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
314 allow_allocation(false); 314 allow_allocation(false);
315 315
316 if (FLAG_verify_heap) { 316 if (FLAG_verify_heap) {
317 Verify(); 317 Verify();
318 } 318 }
319 319
320 if (FLAG_gc_verbose) Print(); 320 if (FLAG_gc_verbose) Print();
321
322 if (FLAG_print_rset) {
323 // Not all spaces have remembered set bits that we care about.
324 old_pointer_space_->PrintRSet();
325 map_space_->PrintRSet();
326 lo_space_->PrintRSet();
327 }
328 #endif 321 #endif
329 322
330 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 323 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
331 ReportStatisticsBeforeGC(); 324 ReportStatisticsBeforeGC();
332 #endif 325 #endif
333 } 326 }
334 327
335 int Heap::SizeOfObjects() { 328 int Heap::SizeOfObjects() {
336 int total = 0; 329 int total = 0;
337 AllSpaces spaces; 330 AllSpaces spaces;
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
504 gc_performed = true; 497 gc_performed = true;
505 } 498 }
506 if (!(map_space->ReserveSpace(map_space_size))) { 499 if (!(map_space->ReserveSpace(map_space_size))) {
507 Heap::CollectGarbage(map_space_size, MAP_SPACE); 500 Heap::CollectGarbage(map_space_size, MAP_SPACE);
508 gc_performed = true; 501 gc_performed = true;
509 } 502 }
510 if (!(cell_space->ReserveSpace(cell_space_size))) { 503 if (!(cell_space->ReserveSpace(cell_space_size))) {
511 Heap::CollectGarbage(cell_space_size, CELL_SPACE); 504 Heap::CollectGarbage(cell_space_size, CELL_SPACE);
512 gc_performed = true; 505 gc_performed = true;
513 } 506 }
514 // We add a slack-factor of 2 in order to have space for the remembered 507 // We add a slack-factor of 2 in order to have space for a series of
515 // set and a series of large-object allocations that are only just larger 508 // large-object allocations that are only just larger than the page size.
516 // than the page size.
517 large_object_size *= 2; 509 large_object_size *= 2;
518 // The ReserveSpace method on the large object space checks how much 510 // The ReserveSpace method on the large object space checks how much
519 // we can expand the old generation. This includes expansion caused by 511 // we can expand the old generation. This includes expansion caused by
520 // allocation in the other spaces. 512 // allocation in the other spaces.
521 large_object_size += cell_space_size + map_space_size + code_space_size + 513 large_object_size += cell_space_size + map_space_size + code_space_size +
522 data_space_size + pointer_space_size; 514 data_space_size + pointer_space_size;
523 if (!(lo_space->ReserveSpace(large_object_size))) { 515 if (!(lo_space->ReserveSpace(large_object_size))) {
524 Heap::CollectGarbage(large_object_size, LO_SPACE); 516 Heap::CollectGarbage(large_object_size, LO_SPACE);
525 gc_performed = true; 517 gc_performed = true;
526 } 518 }
(...skipping 30 matching lines...) Expand all
557 }; 549 };
558 550
559 551
560 void Heap::ClearJSFunctionResultCaches() { 552 void Heap::ClearJSFunctionResultCaches() {
561 if (Bootstrapper::IsActive()) return; 553 if (Bootstrapper::IsActive()) return;
562 ClearThreadJSFunctionResultCachesVisitor visitor; 554 ClearThreadJSFunctionResultCachesVisitor visitor;
563 ThreadManager::IterateThreads(&visitor); 555 ThreadManager::IterateThreads(&visitor);
564 } 556 }
565 557
566 558
559 #ifdef DEBUG
560
561 enum PageWatermarkValidity {
562 ALL_VALID,
563 ALL_INVALID
564 };
565
566 static void VerifyPageWatermarkValidity(PagedSpace* space,
567 PageWatermarkValidity validity) {
568 PageIterator it(space, PageIterator::PAGES_IN_USE);
569 bool expected_value = (validity == ALL_VALID);
570 while (it.has_next()) {
571 Page* page = it.next();
572 ASSERT(page->IsWatermarkValid() == expected_value);
573 }
574 }
575 #endif
576
577
567 void Heap::PerformGarbageCollection(AllocationSpace space, 578 void Heap::PerformGarbageCollection(AllocationSpace space,
568 GarbageCollector collector, 579 GarbageCollector collector,
569 GCTracer* tracer) { 580 GCTracer* tracer) {
570 VerifySymbolTable(); 581 VerifySymbolTable();
571 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { 582 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
572 ASSERT(!allocation_allowed_); 583 ASSERT(!allocation_allowed_);
573 GCTracer::ExternalScope scope(tracer); 584 GCTracer::ExternalScope scope(tracer);
574 global_gc_prologue_callback_(); 585 global_gc_prologue_callback_();
575 } 586 }
576 587
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
795 } 806 }
796 807
797 808
798 void Heap::Scavenge() { 809 void Heap::Scavenge() {
799 #ifdef DEBUG 810 #ifdef DEBUG
800 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); 811 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
801 #endif 812 #endif
802 813
803 gc_state_ = SCAVENGE; 814 gc_state_ = SCAVENGE;
804 815
816 Page::FlipMeaningOfInvalidatedWatermarkFlag();
817 #ifdef DEBUG
818 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
819 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
820 #endif
821
822 // We do not update an allocation watermark of the top page during linear
823 // allocation to avoid overhead. So to maintain the watermark invariant
824 // we have to manually cache the watermark and mark the top page as having an
825 // invalid watermark. This guarantees that dirty regions iteration will use a
826 // correct watermark even if a linear allocation happens.
827 old_pointer_space_->FlushTopPageWatermark();
828 map_space_->FlushTopPageWatermark();
829
805 // Implements Cheney's copying algorithm 830 // Implements Cheney's copying algorithm
806 LOG(ResourceEvent("scavenge", "begin")); 831 LOG(ResourceEvent("scavenge", "begin"));
807 832
808 // Clear descriptor cache. 833 // Clear descriptor cache.
809 DescriptorLookupCache::Clear(); 834 DescriptorLookupCache::Clear();
810 835
811 // Used for updating survived_since_last_expansion_ at function end. 836 // Used for updating survived_since_last_expansion_ at function end.
812 int survived_watermark = PromotedSpaceSize(); 837 int survived_watermark = PromotedSpaceSize();
813 838
814 CheckNewSpaceExpansionCriteria(); 839 CheckNewSpaceExpansionCriteria();
(...skipping 22 matching lines...) Expand all
837 // objects are at least one pointer in size. 862 // objects are at least one pointer in size.
838 Address new_space_front = new_space_.ToSpaceLow(); 863 Address new_space_front = new_space_.ToSpaceLow();
839 promotion_queue.Initialize(new_space_.ToSpaceHigh()); 864 promotion_queue.Initialize(new_space_.ToSpaceHigh());
840 865
841 ScavengeVisitor scavenge_visitor; 866 ScavengeVisitor scavenge_visitor;
842 // Copy roots. 867 // Copy roots.
843 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 868 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
844 869
845 // Copy objects reachable from the old generation. By definition, 870 // Copy objects reachable from the old generation. By definition,
846 // there are no intergenerational pointers in code or data spaces. 871 // there are no intergenerational pointers in code or data spaces.
847 IterateRSet(old_pointer_space_, &ScavengePointer); 872 IterateDirtyRegions(old_pointer_space_,
848 IterateRSet(map_space_, &ScavengePointer); 873 &IteratePointersInDirtyRegion,
849 lo_space_->IterateRSet(&ScavengePointer); 874 &ScavengePointer,
875 WATERMARK_CAN_BE_INVALID);
876
877 IterateDirtyRegions(map_space_,
878 &IteratePointersInDirtyMapsRegion,
879 &ScavengePointer,
880 WATERMARK_CAN_BE_INVALID);
881
882 lo_space_->IterateDirtyRegions(&ScavengePointer);
850 883
851 // Copy objects reachable from cells by scavenging cell values directly. 884 // Copy objects reachable from cells by scavenging cell values directly.
852 HeapObjectIterator cell_iterator(cell_space_); 885 HeapObjectIterator cell_iterator(cell_space_);
853 for (HeapObject* cell = cell_iterator.next(); 886 for (HeapObject* cell = cell_iterator.next();
854 cell != NULL; cell = cell_iterator.next()) { 887 cell != NULL; cell = cell_iterator.next()) {
855 if (cell->IsJSGlobalPropertyCell()) { 888 if (cell->IsJSGlobalPropertyCell()) {
856 Address value_address = 889 Address value_address =
857 reinterpret_cast<Address>(cell) + 890 reinterpret_cast<Address>(cell) +
858 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 891 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
859 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 892 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
942 } 975 }
943 976
944 // Promote and process all the to-be-promoted objects. 977 // Promote and process all the to-be-promoted objects.
945 while (!promotion_queue.is_empty()) { 978 while (!promotion_queue.is_empty()) {
946 HeapObject* source; 979 HeapObject* source;
947 Map* map; 980 Map* map;
948 promotion_queue.remove(&source, &map); 981 promotion_queue.remove(&source, &map);
949 // Copy the from-space object to its new location (given by the 982 // Copy the from-space object to its new location (given by the
950 // forwarding address) and fix its map. 983 // forwarding address) and fix its map.
951 HeapObject* target = source->map_word().ToForwardingAddress(); 984 HeapObject* target = source->map_word().ToForwardingAddress();
952 CopyBlock(reinterpret_cast<Object**>(target->address()), 985 int size = source->SizeFromMap(map);
953 reinterpret_cast<Object**>(source->address()), 986 CopyBlock(target->address(), source->address(), size);
954 source->SizeFromMap(map));
955 target->set_map(map); 987 target->set_map(map);
956 988
957 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 989 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
958 // Update NewSpace stats if necessary. 990 // Update NewSpace stats if necessary.
959 RecordCopiedObject(target); 991 RecordCopiedObject(target);
960 #endif 992 #endif
961 // Visit the newly copied object for pointers to new space. 993 // Visit the newly copied object for pointers to new space.
962 target->Iterate(scavenge_visitor); 994 ASSERT(!target->IsMap());
963 UpdateRSet(target); 995 IterateAndMarkPointersToNewSpace(target->address(),
996 target->address() + size,
997 &ScavengePointer);
964 } 998 }
965 999
966 // Take another spin if there are now unswept objects in new space 1000 // Take another spin if there are now unswept objects in new space
967 // (there are currently no more unswept promoted objects). 1001 // (there are currently no more unswept promoted objects).
968 } while (new_space_front < new_space_.top()); 1002 } while (new_space_front < new_space_.top());
969 1003
970 return new_space_front; 1004 return new_space_front;
971 } 1005 }
972 1006
973 1007
974 void Heap::ClearRSetRange(Address start, int size_in_bytes) {
975 uint32_t start_bit;
976 Address start_word_address =
977 Page::ComputeRSetBitPosition(start, 0, &start_bit);
978 uint32_t end_bit;
979 Address end_word_address =
980 Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
981 0,
982 &end_bit);
983
984 // We want to clear the bits in the starting word starting with the
985 // first bit, and in the ending word up to and including the last
986 // bit. Build a pair of bitmasks to do that.
987 uint32_t start_bitmask = start_bit - 1;
988 uint32_t end_bitmask = ~((end_bit << 1) - 1);
989
990 // If the start address and end address are the same, we mask that
991 // word once, otherwise mask the starting and ending word
992 // separately and all the ones in between.
993 if (start_word_address == end_word_address) {
994 Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
995 } else {
996 Memory::uint32_at(start_word_address) &= start_bitmask;
997 Memory::uint32_at(end_word_address) &= end_bitmask;
998 start_word_address += kIntSize;
999 memset(start_word_address, 0, end_word_address - start_word_address);
1000 }
1001 }
1002
1003
1004 class UpdateRSetVisitor: public ObjectVisitor {
1005 public:
1006
1007 void VisitPointer(Object** p) {
1008 UpdateRSet(p);
1009 }
1010
1011 void VisitPointers(Object** start, Object** end) {
1012 // Update a store into slots [start, end), used (a) to update remembered
1013 // set when promoting a young object to old space or (b) to rebuild
1014 // remembered sets after a mark-compact collection.
1015 for (Object** p = start; p < end; p++) UpdateRSet(p);
1016 }
1017 private:
1018
1019 void UpdateRSet(Object** p) {
1020 // The remembered set should not be set. It should be clear for objects
1021 // newly copied to old space, and it is cleared before rebuilding in the
1022 // mark-compact collector.
1023 ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
1024 if (Heap::InNewSpace(*p)) {
1025 Page::SetRSet(reinterpret_cast<Address>(p), 0);
1026 }
1027 }
1028 };
1029
1030
1031 int Heap::UpdateRSet(HeapObject* obj) {
1032 ASSERT(!InNewSpace(obj));
1033 // Special handling of fixed arrays to iterate the body based on the start
1034 // address and offset. Just iterating the pointers as in UpdateRSetVisitor
1035 // will not work because Page::SetRSet needs to have the start of the
1036 // object for large object pages.
1037 if (obj->IsFixedArray()) {
1038 FixedArray* array = FixedArray::cast(obj);
1039 int length = array->length();
1040 for (int i = 0; i < length; i++) {
1041 int offset = FixedArray::kHeaderSize + i * kPointerSize;
1042 ASSERT(!Page::IsRSetSet(obj->address(), offset));
1043 if (Heap::InNewSpace(array->get(i))) {
1044 Page::SetRSet(obj->address(), offset);
1045 }
1046 }
1047 } else if (!obj->IsCode()) {
1048 // Skip code object, we know it does not contain inter-generational
1049 // pointers.
1050 UpdateRSetVisitor v;
1051 obj->Iterate(&v);
1052 }
1053 return obj->Size();
1054 }
1055
1056
1057 void Heap::RebuildRSets() {
1058 // By definition, we do not care about remembered set bits in code,
1059 // data, or cell spaces.
1060 map_space_->ClearRSet();
1061 RebuildRSets(map_space_);
1062
1063 old_pointer_space_->ClearRSet();
1064 RebuildRSets(old_pointer_space_);
1065
1066 Heap::lo_space_->ClearRSet();
1067 RebuildRSets(lo_space_);
1068 }
1069
1070
1071 void Heap::RebuildRSets(PagedSpace* space) {
1072 HeapObjectIterator it(space);
1073 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1074 Heap::UpdateRSet(obj);
1075 }
1076
1077
1078 void Heap::RebuildRSets(LargeObjectSpace* space) {
1079 LargeObjectIterator it(space);
1080 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1081 Heap::UpdateRSet(obj);
1082 }
1083
1084
1085 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1008 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1086 void Heap::RecordCopiedObject(HeapObject* obj) { 1009 void Heap::RecordCopiedObject(HeapObject* obj) {
1087 bool should_record = false; 1010 bool should_record = false;
1088 #ifdef DEBUG 1011 #ifdef DEBUG
1089 should_record = FLAG_heap_stats; 1012 should_record = FLAG_heap_stats;
1090 #endif 1013 #endif
1091 #ifdef ENABLE_LOGGING_AND_PROFILING 1014 #ifdef ENABLE_LOGGING_AND_PROFILING
1092 should_record = should_record || FLAG_log_gc; 1015 should_record = should_record || FLAG_log_gc;
1093 #endif 1016 #endif
1094 if (should_record) { 1017 if (should_record) {
1095 if (new_space_.Contains(obj)) { 1018 if (new_space_.Contains(obj)) {
1096 new_space_.RecordAllocation(obj); 1019 new_space_.RecordAllocation(obj);
1097 } else { 1020 } else {
1098 new_space_.RecordPromotion(obj); 1021 new_space_.RecordPromotion(obj);
1099 } 1022 }
1100 } 1023 }
1101 } 1024 }
1102 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1025 #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1103 1026
1104 1027
1105 1028
1106 HeapObject* Heap::MigrateObject(HeapObject* source, 1029 HeapObject* Heap::MigrateObject(HeapObject* source,
1107 HeapObject* target, 1030 HeapObject* target,
1108 int size) { 1031 int size) {
1109 // Copy the content of source to target. 1032 // Copy the content of source to target.
1110 CopyBlock(reinterpret_cast<Object**>(target->address()), 1033 CopyBlock(target->address(), source->address(), size);
1111 reinterpret_cast<Object**>(source->address()),
1112 size);
1113 1034
1114 // Set the forwarding address. 1035 // Set the forwarding address.
1115 source->set_map_word(MapWord::FromForwardingAddress(target)); 1036 source->set_map_word(MapWord::FromForwardingAddress(target));
1116 1037
1117 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1038 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1118 // Update NewSpace stats if necessary. 1039 // Update NewSpace stats if necessary.
1119 RecordCopiedObject(target); 1040 RecordCopiedObject(target);
1120 #endif 1041 #endif
1121 1042
1122 return target; 1043 return target;
(...skipping 536 matching lines...) Expand 10 before | Expand all | Expand 10 after
1659 if (obj->IsFailure()) return false; 1580 if (obj->IsFailure()) return false;
1660 roots_[constant_symbol_table[i].index] = String::cast(obj); 1581 roots_[constant_symbol_table[i].index] = String::cast(obj);
1661 } 1582 }
1662 1583
1663 // Allocate the hidden symbol which is used to identify the hidden properties 1584 // Allocate the hidden symbol which is used to identify the hidden properties
1664 // in JSObjects. The hash code has a special value so that it will not match 1585 // in JSObjects. The hash code has a special value so that it will not match
1665 // the empty string when searching for the property. It cannot be part of the 1586 // the empty string when searching for the property. It cannot be part of the
1666 // loop above because it needs to be allocated manually with the special 1587 // loop above because it needs to be allocated manually with the special
1667 // hash code in place. The hash code for the hidden_symbol is zero to ensure 1588 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1668 // that it will always be at the first entry in property descriptors. 1589 // that it will always be at the first entry in property descriptors.
1669 obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask); 1590 obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
1670 if (obj->IsFailure()) return false; 1591 if (obj->IsFailure()) return false;
1671 hidden_symbol_ = String::cast(obj); 1592 hidden_symbol_ = String::cast(obj);
1672 1593
1673 // Allocate the proxy for __proto__. 1594 // Allocate the proxy for __proto__.
1674 obj = AllocateProxy((Address) &Accessors::ObjectPrototype); 1595 obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
1675 if (obj->IsFailure()) return false; 1596 if (obj->IsFailure()) return false;
1676 set_prototype_accessors(Proxy::cast(obj)); 1597 set_prototype_accessors(Proxy::cast(obj));
1677 1598
1678 // Allocate the code_stubs dictionary. The initial size is set to avoid 1599 // Allocate the code_stubs dictionary. The initial size is set to avoid
1679 // expanding the dictionary during bootstrapping. 1600 // expanding the dictionary during bootstrapping.
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
1895 share->set_formal_parameter_count(0); 1816 share->set_formal_parameter_count(0);
1896 share->set_instance_class_name(Object_symbol()); 1817 share->set_instance_class_name(Object_symbol());
1897 share->set_function_data(undefined_value()); 1818 share->set_function_data(undefined_value());
1898 share->set_script(undefined_value()); 1819 share->set_script(undefined_value());
1899 share->set_start_position_and_type(0); 1820 share->set_start_position_and_type(0);
1900 share->set_debug_info(undefined_value()); 1821 share->set_debug_info(undefined_value());
1901 share->set_inferred_name(empty_string()); 1822 share->set_inferred_name(empty_string());
1902 share->set_compiler_hints(0); 1823 share->set_compiler_hints(0);
1903 share->set_this_property_assignments_count(0); 1824 share->set_this_property_assignments_count(0);
1904 share->set_this_property_assignments(undefined_value()); 1825 share->set_this_property_assignments(undefined_value());
1826 share->set_num_literals(0);
1827 share->set_end_position(0);
1828 share->set_function_token_position(0);
1905 return result; 1829 return result;
1906 } 1830 }
1907 1831
1908 1832
1909 // Returns true for a character in a range. Both limits are inclusive. 1833 // Returns true for a character in a range. Both limits are inclusive.
1910 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) { 1834 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
1911 // This makes uses of the the unsigned wraparound. 1835 // This makes uses of the the unsigned wraparound.
1912 return character - from <= to - from; 1836 return character - from <= to - from;
1913 } 1837 }
1914 1838
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after
2156 } 2080 }
2157 if (pretenure == NOT_TENURED) { 2081 if (pretenure == NOT_TENURED) {
2158 return AllocateByteArray(length); 2082 return AllocateByteArray(length);
2159 } 2083 }
2160 int size = ByteArray::SizeFor(length); 2084 int size = ByteArray::SizeFor(length);
2161 Object* result = (size <= MaxObjectSizeInPagedSpace()) 2085 Object* result = (size <= MaxObjectSizeInPagedSpace())
2162 ? old_data_space_->AllocateRaw(size) 2086 ? old_data_space_->AllocateRaw(size)
2163 : lo_space_->AllocateRaw(size); 2087 : lo_space_->AllocateRaw(size);
2164 if (result->IsFailure()) return result; 2088 if (result->IsFailure()) return result;
2165 2089
2166 reinterpret_cast<Array*>(result)->set_map(byte_array_map()); 2090 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2167 reinterpret_cast<Array*>(result)->set_length(length); 2091 reinterpret_cast<ByteArray*>(result)->set_length(length);
2168 return result; 2092 return result;
2169 } 2093 }
2170 2094
2171 2095
2172 Object* Heap::AllocateByteArray(int length) { 2096 Object* Heap::AllocateByteArray(int length) {
2173 if (length < 0 || length > ByteArray::kMaxLength) { 2097 if (length < 0 || length > ByteArray::kMaxLength) {
2174 return Failure::OutOfMemoryException(); 2098 return Failure::OutOfMemoryException();
2175 } 2099 }
2176 int size = ByteArray::SizeFor(length); 2100 int size = ByteArray::SizeFor(length);
2177 AllocationSpace space = 2101 AllocationSpace space =
2178 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE; 2102 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
2179 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); 2103 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
2180 if (result->IsFailure()) return result; 2104 if (result->IsFailure()) return result;
2181 2105
2182 reinterpret_cast<Array*>(result)->set_map(byte_array_map()); 2106 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2183 reinterpret_cast<Array*>(result)->set_length(length); 2107 reinterpret_cast<ByteArray*>(result)->set_length(length);
2184 return result; 2108 return result;
2185 } 2109 }
2186 2110
2187 2111
2188 void Heap::CreateFillerObjectAt(Address addr, int size) { 2112 void Heap::CreateFillerObjectAt(Address addr, int size) {
2189 if (size == 0) return; 2113 if (size == 0) return;
2190 HeapObject* filler = HeapObject::FromAddress(addr); 2114 HeapObject* filler = HeapObject::FromAddress(addr);
2191 if (size == kPointerSize) { 2115 if (size == kPointerSize) {
2192 filler->set_map(one_pointer_filler_map()); 2116 filler->set_map(one_pointer_filler_map());
2193 } else if (size == 2 * kPointerSize) { 2117 } else if (size == 2 * kPointerSize) {
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
2289 result = lo_space_->AllocateRawCode(obj_size); 2213 result = lo_space_->AllocateRawCode(obj_size);
2290 } else { 2214 } else {
2291 result = code_space_->AllocateRaw(obj_size); 2215 result = code_space_->AllocateRaw(obj_size);
2292 } 2216 }
2293 2217
2294 if (result->IsFailure()) return result; 2218 if (result->IsFailure()) return result;
2295 2219
2296 // Copy code object. 2220 // Copy code object.
2297 Address old_addr = code->address(); 2221 Address old_addr = code->address();
2298 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); 2222 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2299 CopyBlock(reinterpret_cast<Object**>(new_addr), 2223 CopyBlock(new_addr, old_addr, obj_size);
2300 reinterpret_cast<Object**>(old_addr),
2301 obj_size);
2302 // Relocate the copy. 2224 // Relocate the copy.
2303 Code* new_code = Code::cast(result); 2225 Code* new_code = Code::cast(result);
2304 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address())); 2226 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2305 new_code->Relocate(new_addr - old_addr); 2227 new_code->Relocate(new_addr - old_addr);
2306 return new_code; 2228 return new_code;
2307 } 2229 }
2308 2230
2309 2231
2310 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { 2232 Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
2311 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(), 2233 int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(),
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
2437 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size()); 2359 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2438 2360
2439 // Do the allocation. 2361 // Do the allocation.
2440 Object* result = 2362 Object* result =
2441 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE); 2363 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2442 if (result->IsFailure()) return result; 2364 if (result->IsFailure()) return result;
2443 2365
2444 // Copy the content. The arguments boilerplate doesn't have any 2366 // Copy the content. The arguments boilerplate doesn't have any
2445 // fields that point to new space so it's safe to skip the write 2367 // fields that point to new space so it's safe to skip the write
2446 // barrier here. 2368 // barrier here.
2447 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()), 2369 CopyBlock(HeapObject::cast(result)->address(),
2448 reinterpret_cast<Object**>(boilerplate->address()), 2370 boilerplate->address(),
2449 kArgumentsObjectSize); 2371 kArgumentsObjectSize);
2450 2372
2451 // Set the two properties. 2373 // Set the two properties.
2452 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index, 2374 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2453 callee); 2375 callee);
2454 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index, 2376 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2455 Smi::FromInt(length), 2377 Smi::FromInt(length),
2456 SKIP_WRITE_BARRIER); 2378 SKIP_WRITE_BARRIER);
2457 2379
2458 // Check the state of the object 2380 // Check the state of the object
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
2660 Map* map = source->map(); 2582 Map* map = source->map();
2661 int object_size = map->instance_size(); 2583 int object_size = map->instance_size();
2662 Object* clone; 2584 Object* clone;
2663 2585
2664 // If we're forced to always allocate, we use the general allocation 2586 // If we're forced to always allocate, we use the general allocation
2665 // functions which may leave us with an object in old space. 2587 // functions which may leave us with an object in old space.
2666 if (always_allocate()) { 2588 if (always_allocate()) {
2667 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); 2589 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
2668 if (clone->IsFailure()) return clone; 2590 if (clone->IsFailure()) return clone;
2669 Address clone_address = HeapObject::cast(clone)->address(); 2591 Address clone_address = HeapObject::cast(clone)->address();
2670 CopyBlock(reinterpret_cast<Object**>(clone_address), 2592 CopyBlock(clone_address,
2671 reinterpret_cast<Object**>(source->address()), 2593 source->address(),
2672 object_size); 2594 object_size);
2673 // Update write barrier for all fields that lie beyond the header. 2595 // Update write barrier for all fields that lie beyond the header.
2674 RecordWrites(clone_address, 2596 RecordWrites(clone_address,
2675 JSObject::kHeaderSize, 2597 JSObject::kHeaderSize,
2676 (object_size - JSObject::kHeaderSize) / kPointerSize); 2598 (object_size - JSObject::kHeaderSize) / kPointerSize);
2677 } else { 2599 } else {
2678 clone = new_space_.AllocateRaw(object_size); 2600 clone = new_space_.AllocateRaw(object_size);
2679 if (clone->IsFailure()) return clone; 2601 if (clone->IsFailure()) return clone;
2680 ASSERT(Heap::InNewSpace(clone)); 2602 ASSERT(Heap::InNewSpace(clone));
2681 // Since we know the clone is allocated in new space, we can copy 2603 // Since we know the clone is allocated in new space, we can copy
2682 // the contents without worrying about updating the write barrier. 2604 // the contents without worrying about updating the write barrier.
2683 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()), 2605 CopyBlock(HeapObject::cast(clone)->address(),
2684 reinterpret_cast<Object**>(source->address()), 2606 source->address(),
2685 object_size); 2607 object_size);
2686 } 2608 }
2687 2609
2688 FixedArray* elements = FixedArray::cast(source->elements()); 2610 FixedArray* elements = FixedArray::cast(source->elements());
2689 FixedArray* properties = FixedArray::cast(source->properties()); 2611 FixedArray* properties = FixedArray::cast(source->properties());
2690 // Update elements if necessary. 2612 // Update elements if necessary.
2691 if (elements->length() > 0) { 2613 if (elements->length() > 0) {
2692 Object* elem = CopyFixedArray(elements); 2614 Object* elem = CopyFixedArray(elements);
2693 if (elem->IsFailure()) return elem; 2615 if (elem->IsFailure()) return elem;
2694 JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); 2616 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
2945 ASSERT_EQ(size, HeapObject::cast(result)->Size()); 2867 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2946 return result; 2868 return result;
2947 } 2869 }
2948 2870
2949 2871
2950 Object* Heap::AllocateEmptyFixedArray() { 2872 Object* Heap::AllocateEmptyFixedArray() {
2951 int size = FixedArray::SizeFor(0); 2873 int size = FixedArray::SizeFor(0);
2952 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); 2874 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
2953 if (result->IsFailure()) return result; 2875 if (result->IsFailure()) return result;
2954 // Initialize the object. 2876 // Initialize the object.
2955 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); 2877 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
2956 reinterpret_cast<Array*>(result)->set_length(0); 2878 reinterpret_cast<FixedArray*>(result)->set_length(0);
2957 return result; 2879 return result;
2958 } 2880 }
2959 2881
2960 2882
2961 Object* Heap::AllocateRawFixedArray(int length) { 2883 Object* Heap::AllocateRawFixedArray(int length) {
2962 if (length < 0 || length > FixedArray::kMaxLength) { 2884 if (length < 0 || length > FixedArray::kMaxLength) {
2963 return Failure::OutOfMemoryException(); 2885 return Failure::OutOfMemoryException();
2964 } 2886 }
2965 // Use the general function if we're forced to always allocate. 2887 // Use the general function if we're forced to always allocate.
2966 if (always_allocate()) return AllocateFixedArray(length, TENURED); 2888 if (always_allocate()) return AllocateFixedArray(length, TENURED);
2967 // Allocate the raw data for a fixed array. 2889 // Allocate the raw data for a fixed array.
2968 int size = FixedArray::SizeFor(length); 2890 int size = FixedArray::SizeFor(length);
2969 return size <= kMaxObjectSizeInNewSpace 2891 return size <= kMaxObjectSizeInNewSpace
2970 ? new_space_.AllocateRaw(size) 2892 ? new_space_.AllocateRaw(size)
2971 : lo_space_->AllocateRawFixedArray(size); 2893 : lo_space_->AllocateRawFixedArray(size);
2972 } 2894 }
2973 2895
2974 2896
2975 Object* Heap::CopyFixedArray(FixedArray* src) { 2897 Object* Heap::CopyFixedArray(FixedArray* src) {
2976 int len = src->length(); 2898 int len = src->length();
2977 Object* obj = AllocateRawFixedArray(len); 2899 Object* obj = AllocateRawFixedArray(len);
2978 if (obj->IsFailure()) return obj; 2900 if (obj->IsFailure()) return obj;
2979 if (Heap::InNewSpace(obj)) { 2901 if (Heap::InNewSpace(obj)) {
2980 HeapObject* dst = HeapObject::cast(obj); 2902 HeapObject* dst = HeapObject::cast(obj);
2981 CopyBlock(reinterpret_cast<Object**>(dst->address()), 2903 CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
2982 reinterpret_cast<Object**>(src->address()),
2983 FixedArray::SizeFor(len));
2984 return obj; 2904 return obj;
2985 } 2905 }
2986 HeapObject::cast(obj)->set_map(src->map()); 2906 HeapObject::cast(obj)->set_map(src->map());
2987 FixedArray* result = FixedArray::cast(obj); 2907 FixedArray* result = FixedArray::cast(obj);
2988 result->set_length(len); 2908 result->set_length(len);
2989 2909
2990 // Copy the content 2910 // Copy the content
2991 AssertNoAllocation no_gc; 2911 AssertNoAllocation no_gc;
2992 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); 2912 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
2993 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); 2913 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
2994 return result; 2914 return result;
2995 } 2915 }
2996 2916
2997 2917
2998 Object* Heap::AllocateFixedArray(int length) { 2918 Object* Heap::AllocateFixedArray(int length) {
2999 ASSERT(length >= 0); 2919 ASSERT(length >= 0);
3000 if (length == 0) return empty_fixed_array(); 2920 if (length == 0) return empty_fixed_array();
3001 Object* result = AllocateRawFixedArray(length); 2921 Object* result = AllocateRawFixedArray(length);
3002 if (!result->IsFailure()) { 2922 if (!result->IsFailure()) {
3003 // Initialize header. 2923 // Initialize header.
3004 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); 2924 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3005 FixedArray* array = FixedArray::cast(result); 2925 array->set_map(fixed_array_map());
3006 array->set_length(length); 2926 array->set_length(length);
3007 // Initialize body. 2927 // Initialize body.
3008 ASSERT(!Heap::InNewSpace(undefined_value())); 2928 ASSERT(!Heap::InNewSpace(undefined_value()));
3009 MemsetPointer(array->data_start(), undefined_value(), length); 2929 MemsetPointer(array->data_start(), undefined_value(), length);
3010 } 2930 }
3011 return result; 2931 return result;
3012 } 2932 }
3013 2933
3014 2934
3015 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { 2935 Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
3016 if (length < 0 || length > FixedArray::kMaxLength) { 2936 if (length < 0 || length > FixedArray::kMaxLength) {
3017 return Failure::OutOfMemoryException(); 2937 return Failure::OutOfMemoryException();
3018 } 2938 }
3019 2939
3020 AllocationSpace space = 2940 AllocationSpace space =
3021 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; 2941 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3022 int size = FixedArray::SizeFor(length); 2942 int size = FixedArray::SizeFor(length);
3023 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) { 2943 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3024 // Too big for new space. 2944 // Too big for new space.
3025 space = LO_SPACE; 2945 space = LO_SPACE;
3026 } else if (space == OLD_POINTER_SPACE && 2946 } else if (space == OLD_POINTER_SPACE &&
3027 size > MaxObjectSizeInPagedSpace()) { 2947 size > MaxObjectSizeInPagedSpace()) {
3028 // Too big for old pointer space. 2948 // Too big for old pointer space.
3029 space = LO_SPACE; 2949 space = LO_SPACE;
3030 } 2950 }
3031 2951
3032 // Specialize allocation for the space. 2952 AllocationSpace retry_space =
3033 Object* result = Failure::OutOfMemoryException(); 2953 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3034 if (space == NEW_SPACE) { 2954
3035 // We cannot use Heap::AllocateRaw() because it will not properly 2955 return AllocateRaw(size, space, retry_space);
3036 // allocate extra remembered set bits if always_allocate() is true and
3037 // new space allocation fails.
3038 result = new_space_.AllocateRaw(size);
3039 if (result->IsFailure() && always_allocate()) {
3040 if (size <= MaxObjectSizeInPagedSpace()) {
3041 result = old_pointer_space_->AllocateRaw(size);
3042 } else {
3043 result = lo_space_->AllocateRawFixedArray(size);
3044 }
3045 }
3046 } else if (space == OLD_POINTER_SPACE) {
3047 result = old_pointer_space_->AllocateRaw(size);
3048 } else {
3049 ASSERT(space == LO_SPACE);
3050 result = lo_space_->AllocateRawFixedArray(size);
3051 }
3052 return result;
3053 } 2956 }
3054 2957
3055 2958
3056 static Object* AllocateFixedArrayWithFiller(int length, 2959 static Object* AllocateFixedArrayWithFiller(int length,
3057 PretenureFlag pretenure, 2960 PretenureFlag pretenure,
3058 Object* filler) { 2961 Object* filler) {
3059 ASSERT(length >= 0); 2962 ASSERT(length >= 0);
3060 ASSERT(Heap::empty_fixed_array()->IsFixedArray()); 2963 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3061 if (length == 0) return Heap::empty_fixed_array(); 2964 if (length == 0) return Heap::empty_fixed_array();
3062 2965
(...skipping 27 matching lines...) Expand all
3090 2993
3091 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map()); 2994 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3092 FixedArray::cast(obj)->set_length(length); 2995 FixedArray::cast(obj)->set_length(length);
3093 return obj; 2996 return obj;
3094 } 2997 }
3095 2998
3096 2999
3097 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { 3000 Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3098 Object* result = Heap::AllocateFixedArray(length, pretenure); 3001 Object* result = Heap::AllocateFixedArray(length, pretenure);
3099 if (result->IsFailure()) return result; 3002 if (result->IsFailure()) return result;
3100 reinterpret_cast<Array*>(result)->set_map(hash_table_map()); 3003 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
3101 ASSERT(result->IsHashTable()); 3004 ASSERT(result->IsHashTable());
3102 return result; 3005 return result;
3103 } 3006 }
3104 3007
3105 3008
3106 Object* Heap::AllocateGlobalContext() { 3009 Object* Heap::AllocateGlobalContext() {
3107 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS); 3010 Object* result = Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3108 if (result->IsFailure()) return result; 3011 if (result->IsFailure()) return result;
3109 Context* context = reinterpret_cast<Context*>(result); 3012 Context* context = reinterpret_cast<Context*>(result);
3110 context->set_map(global_context_map()); 3013 context->set_map(global_context_map());
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
3342 return cell_space_->Contains(addr); 3245 return cell_space_->Contains(addr);
3343 case LO_SPACE: 3246 case LO_SPACE:
3344 return lo_space_->SlowContains(addr); 3247 return lo_space_->SlowContains(addr);
3345 } 3248 }
3346 3249
3347 return false; 3250 return false;
3348 } 3251 }
3349 3252
3350 3253
3351 #ifdef DEBUG 3254 #ifdef DEBUG
3255
3256
3257 static bool VerifyPointersInDirtyRegion(Address start,
3258 Address end,
3259 ObjectSlotCallback copy_object_func
3260 ) {
3261 Address slot_address = start;
3262
3263 while (slot_address < end) {
3264 Object** slot = reinterpret_cast<Object**>(slot_address);
3265 if (Heap::InNewSpace(*slot)) {
3266 ASSERT(Heap::InToSpace(*slot));
3267 }
3268 slot_address += kPointerSize;
3269 }
3270
3271 return true;
3272 }
3273
3274
3275 static void DummyScavengePointer(HeapObject** p) {
3276 }
3277
3278
3352 void Heap::Verify() { 3279 void Heap::Verify() {
3353 ASSERT(HasBeenSetup()); 3280 ASSERT(HasBeenSetup());
3354 3281
3355 VerifyPointersVisitor visitor; 3282 VerifyPointersVisitor visitor;
3356 IterateRoots(&visitor, VISIT_ONLY_STRONG); 3283 IterateRoots(&visitor, VISIT_ONLY_STRONG);
3357 3284
3358 new_space_.Verify(); 3285 new_space_.Verify();
3359 3286
3360 VerifyPointersAndRSetVisitor rset_visitor; 3287 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
3361 old_pointer_space_->Verify(&rset_visitor); 3288 old_pointer_space_->Verify(&dirty_regions_visitor);
3362 map_space_->Verify(&rset_visitor); 3289 map_space_->Verify(&dirty_regions_visitor);
3363 3290
3364 VerifyPointersVisitor no_rset_visitor; 3291 IterateDirtyRegions(old_pointer_space_,
3365 old_data_space_->Verify(&no_rset_visitor); 3292 &VerifyPointersInDirtyRegion,
3366 code_space_->Verify(&no_rset_visitor); 3293 &DummyScavengePointer,
3367 cell_space_->Verify(&no_rset_visitor); 3294 WATERMARK_SHOULD_BE_VALID);
3295
3296 IterateDirtyRegions(map_space_,
3297 &VerifyPointersInDirtyRegion,
3298 &DummyScavengePointer,
3299 WATERMARK_SHOULD_BE_VALID);
3300
3301
3302 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
3303 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
3304
3305 VerifyPointersVisitor no_dirty_regions_visitor;
3306 old_data_space_->Verify(&no_dirty_regions_visitor);
3307 code_space_->Verify(&no_dirty_regions_visitor);
3308 cell_space_->Verify(&no_dirty_regions_visitor);
3368 3309
3369 lo_space_->Verify(); 3310 lo_space_->Verify();
3370 } 3311 }
3371 #endif // DEBUG 3312 #endif // DEBUG
3372 3313
3373 3314
3374 Object* Heap::LookupSymbol(Vector<const char> string) { 3315 Object* Heap::LookupSymbol(Vector<const char> string) {
3375 Object* symbol = NULL; 3316 Object* symbol = NULL;
3376 Object* new_table = symbol_table()->LookupSymbol(string, &symbol); 3317 Object* new_table = symbol_table()->LookupSymbol(string, &symbol);
3377 if (new_table->IsFailure()) return new_table; 3318 if (new_table->IsFailure()) return new_table;
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
3410 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject()); 3351 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
3411 for (Address a = new_space_.FromSpaceLow(); 3352 for (Address a = new_space_.FromSpaceLow();
3412 a < new_space_.FromSpaceHigh(); 3353 a < new_space_.FromSpaceHigh();
3413 a += kPointerSize) { 3354 a += kPointerSize) {
3414 Memory::Address_at(a) = kFromSpaceZapValue; 3355 Memory::Address_at(a) = kFromSpaceZapValue;
3415 } 3356 }
3416 } 3357 }
3417 #endif // DEBUG 3358 #endif // DEBUG
3418 3359
3419 3360
3420 int Heap::IterateRSetRange(Address object_start, 3361 bool Heap::IteratePointersInDirtyRegion(Address start,
3421 Address object_end, 3362 Address end,
3422 Address rset_start, 3363 ObjectSlotCallback copy_object_func) {
3423 ObjectSlotCallback copy_object_func) { 3364 Address slot_address = start;
3424 Address object_address = object_start; 3365 bool pointers_to_new_space_found = false;
3425 Address rset_address = rset_start; 3366
3426 int set_bits_count = 0; 3367 while (slot_address < end) {
3427 3368 Object** slot = reinterpret_cast<Object**>(slot_address);
3428 // Loop over all the pointers in [object_start, object_end). 3369 if (Heap::InNewSpace(*slot)) {
3429 while (object_address < object_end) { 3370 ASSERT((*slot)->IsHeapObject());
3430 uint32_t rset_word = Memory::uint32_at(rset_address); 3371 copy_object_func(reinterpret_cast<HeapObject**>(slot));
3431 if (rset_word != 0) { 3372 if (Heap::InNewSpace(*slot)) {
3432 uint32_t result_rset = rset_word; 3373 ASSERT((*slot)->IsHeapObject());
3433 for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) { 3374 pointers_to_new_space_found = true;
3434 // Do not dereference pointers at or past object_end. 3375 }
3435 if ((rset_word & bitmask) != 0 && object_address < object_end) { 3376 }
3436 Object** object_p = reinterpret_cast<Object**>(object_address); 3377 slot_address += kPointerSize;
3437 if (Heap::InNewSpace(*object_p)) { 3378 }
3438 copy_object_func(reinterpret_cast<HeapObject**>(object_p)); 3379 return pointers_to_new_space_found;
3439 } 3380 }
3440 // If this pointer does not need to be remembered anymore, clear 3381
3441 // the remembered set bit. 3382
3442 if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask; 3383 // Compute start address of the first map following given addr.
3443 set_bits_count++; 3384 static inline Address MapStartAlign(Address addr) {
3444 } 3385 Address page = Page::FromAddress(addr)->ObjectAreaStart();
3445 object_address += kPointerSize; 3386 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
3446 } 3387 }
3447 // Update the remembered set if it has changed. 3388
3448 if (result_rset != rset_word) { 3389
3449 Memory::uint32_at(rset_address) = result_rset; 3390 // Compute end address of the first map preceding given addr.
3450 } 3391 static inline Address MapEndAlign(Address addr) {
3451 } else { 3392 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
3452 // No bits in the word were set. This is the common case. 3393 return page + ((addr - page) / Map::kSize * Map::kSize);
3453 object_address += kPointerSize * kBitsPerInt; 3394 }
3454 } 3395
3455 rset_address += kIntSize; 3396
3456 } 3397 static bool IteratePointersInDirtyMaps(Address start,
3457 return set_bits_count; 3398 Address end,
3458 } 3399 ObjectSlotCallback copy_object_func) {
3459 3400 ASSERT(MapStartAlign(start) == start);
3460 3401 ASSERT(MapEndAlign(end) == end);
3461 void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { 3402
3462 ASSERT(Page::is_rset_in_use()); 3403 Address map_address = start;
3463 ASSERT(space == old_pointer_space_ || space == map_space_); 3404 bool pointers_to_new_space_found = false;
3464 3405
3465 static void* paged_rset_histogram = StatsTable::CreateHistogram( 3406 while (map_address < end) {
3466 "V8.RSetPaged", 3407 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
3467 0, 3408 ASSERT(Memory::Object_at(map_address)->IsMap());
3468 Page::kObjectAreaSize / kPointerSize, 3409
3469 30); 3410 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
3411 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
3412
3413 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
3414 pointer_fields_end,
3415 copy_object_func)) {
3416 pointers_to_new_space_found = true;
3417 }
3418
3419 map_address += Map::kSize;
3420 }
3421
3422 return pointers_to_new_space_found;
3423 }
3424
3425
3426 bool Heap::IteratePointersInDirtyMapsRegion(
3427 Address start,
3428 Address end,
3429 ObjectSlotCallback copy_object_func) {
3430 Address map_aligned_start = MapStartAlign(start);
3431 Address map_aligned_end = MapEndAlign(end);
3432
3433 bool contains_pointers_to_new_space = false;
3434
3435 if (map_aligned_start != start) {
3436 Address prev_map = map_aligned_start - Map::kSize;
3437 ASSERT(Memory::Object_at(prev_map)->IsMap());
3438
3439 Address pointer_fields_start =
3440 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
3441
3442 Address pointer_fields_end =
3443 Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
3444
3445 contains_pointers_to_new_space =
3446 IteratePointersInDirtyRegion(pointer_fields_start,
3447 pointer_fields_end,
3448 copy_object_func)
3449 || contains_pointers_to_new_space;
3450 }
3451
3452 contains_pointers_to_new_space =
3453 IteratePointersInDirtyMaps(map_aligned_start,
3454 map_aligned_end,
3455 copy_object_func)
3456 || contains_pointers_to_new_space;
3457
3458 if (map_aligned_end != end) {
3459 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
3460
3461 Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
3462
3463 Address pointer_fields_end =
3464 Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
3465
3466 contains_pointers_to_new_space =
3467 IteratePointersInDirtyRegion(pointer_fields_start,
3468 pointer_fields_end,
3469 copy_object_func)
3470 || contains_pointers_to_new_space;
3471 }
3472
3473 return contains_pointers_to_new_space;
3474 }
3475
3476
3477 void Heap::IterateAndMarkPointersToNewSpace(Address start,
3478 Address end,
3479 ObjectSlotCallback callback) {
3480 Address slot_address = start;
3481 Page* page = Page::FromAddress(start);
3482
3483 uint32_t marks = page->GetRegionMarks();
3484
3485 while (slot_address < end) {
3486 Object** slot = reinterpret_cast<Object**>(slot_address);
3487 if (Heap::InNewSpace(*slot)) {
3488 ASSERT((*slot)->IsHeapObject());
3489 callback(reinterpret_cast<HeapObject**>(slot));
3490 if (Heap::InNewSpace(*slot)) {
3491 ASSERT((*slot)->IsHeapObject());
3492 marks |= page->GetRegionMaskForAddress(slot_address);
3493 }
3494 }
3495 slot_address += kPointerSize;
3496 }
3497
3498 page->SetRegionMarks(marks);
3499 }
3500
3501
3502 uint32_t Heap::IterateDirtyRegions(
3503 uint32_t marks,
3504 Address area_start,
3505 Address area_end,
3506 DirtyRegionCallback visit_dirty_region,
3507 ObjectSlotCallback copy_object_func) {
3508 uint32_t newmarks = 0;
3509 uint32_t mask = 1;
3510
3511 if (area_start >= area_end) {
3512 return newmarks;
3513 }
3514
3515 Address region_start = area_start;
3516
3517 // area_start does not necessarily coincide with start of the first region.
3518 // Thus to calculate the beginning of the next region we have to align
3519 // area_start by Page::kRegionSize.
3520 Address second_region =
3521 reinterpret_cast<Address>(
3522 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
3523 ~Page::kRegionAlignmentMask);
3524
3525 // Next region might be beyond area_end.
3526 Address region_end = Min(second_region, area_end);
3527
3528 if (marks & mask) {
3529 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3530 newmarks |= mask;
3531 }
3532 }
3533 mask <<= 1;
3534
3535 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
3536 region_start = region_end;
3537 region_end = region_start + Page::kRegionSize;
3538
3539 while (region_end <= area_end) {
3540 if (marks & mask) {
3541 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3542 newmarks |= mask;
3543 }
3544 }
3545
3546 region_start = region_end;
3547 region_end = region_start + Page::kRegionSize;
3548
3549 mask <<= 1;
3550 }
3551
3552 if (region_start != area_end) {
3553 // A small piece of area left uniterated because area_end does not coincide
3554 // with region end. Check whether region covering last part of area is
3555 // dirty.
3556 if (marks & mask) {
3557 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
3558 newmarks |= mask;
3559 }
3560 }
3561 }
3562
3563 return newmarks;
3564 }
3565
3566
3567
3568 void Heap::IterateDirtyRegions(
3569 PagedSpace* space,
3570 DirtyRegionCallback visit_dirty_region,
3571 ObjectSlotCallback copy_object_func,
3572 ExpectedPageWatermarkState expected_page_watermark_state) {
3470 3573
3471 PageIterator it(space, PageIterator::PAGES_IN_USE); 3574 PageIterator it(space, PageIterator::PAGES_IN_USE);
3575
3472 while (it.has_next()) { 3576 while (it.has_next()) {
3473 Page* page = it.next(); 3577 Page* page = it.next();
3474 int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(), 3578 uint32_t marks = page->GetRegionMarks();
3475 page->RSetStart(), copy_object_func); 3579
3476 if (paged_rset_histogram != NULL) { 3580 if (marks != Page::kAllRegionsCleanMarks) {
3477 StatsTable::AddHistogramSample(paged_rset_histogram, count); 3581 Address start = page->ObjectAreaStart();
3478 } 3582
3479 } 3583 // Do not try to visit pointers beyond page allocation watermark.
3480 } 3584 // Page can contain garbage pointers there.
3481 3585 Address end;
3586
3587 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
3588 page->IsWatermarkValid()) {
3589 end = page->AllocationWatermark();
3590 } else {
3591 end = page->CachedAllocationWatermark();
3592 }
3593
3594 ASSERT(space == old_pointer_space_ ||
3595 (space == map_space_ &&
3596 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
3597
3598 page->SetRegionMarks(IterateDirtyRegions(marks,
3599 start,
3600 end,
3601 visit_dirty_region,
3602 copy_object_func));
3603 }
3604
3605 // Mark page watermark as invalid to maintain watermark validity invariant.
3606 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
3607 page->InvalidateWatermark(true);
3608 }
3609 }
3610
3482 3611
3483 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 3612 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
3484 IterateStrongRoots(v, mode); 3613 IterateStrongRoots(v, mode);
3485 IterateWeakRoots(v, mode); 3614 IterateWeakRoots(v, mode);
3486 } 3615 }
3487 3616
3488 3617
3489 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 3618 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
3490 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); 3619 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
3491 v->Synchronize("symbol_table"); 3620 v->Synchronize("symbol_table");
(...skipping 901 matching lines...) Expand 10 before | Expand all | Expand 10 after
4393 void ExternalStringTable::TearDown() { 4522 void ExternalStringTable::TearDown() {
4394 new_space_strings_.Free(); 4523 new_space_strings_.Free();
4395 old_space_strings_.Free(); 4524 old_space_strings_.Free();
4396 } 4525 }
4397 4526
4398 4527
4399 List<Object*> ExternalStringTable::new_space_strings_; 4528 List<Object*> ExternalStringTable::new_space_strings_;
4400 List<Object*> ExternalStringTable::old_space_strings_; 4529 List<Object*> ExternalStringTable::old_space_strings_;
4401 4530
4402 } } // namespace v8::internal 4531 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698