Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1093)

Side by Side Diff: src/heap/heap.cc

Issue 1259613006: Change RecordSlot interface. Make it more robust by replacing anchor slot with actual object. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 2066 matching lines...) Expand 10 before | Expand all | Expand 10 after
2077 } 2077 }
2078 #if V8_DOUBLE_FIELDS_UNBOXING 2078 #if V8_DOUBLE_FIELDS_UNBOXING
2079 LayoutDescriptorHelper helper(target->map()); 2079 LayoutDescriptorHelper helper(target->map());
2080 bool has_only_tagged_fields = helper.all_fields_tagged(); 2080 bool has_only_tagged_fields = helper.all_fields_tagged();
2081 2081
2082 if (!has_only_tagged_fields) { 2082 if (!has_only_tagged_fields) {
2083 for (int offset = 0; offset < size;) { 2083 for (int offset = 0; offset < size;) {
2084 int end_of_region_offset; 2084 int end_of_region_offset;
2085 if (helper.IsTagged(offset, size, &end_of_region_offset)) { 2085 if (helper.IsTagged(offset, size, &end_of_region_offset)) {
2086 IterateAndMarkPointersToFromSpace( 2086 IterateAndMarkPointersToFromSpace(
2087 record_slots, obj_address + offset, 2087 target, obj_address + offset,
2088 obj_address + end_of_region_offset, &ScavengeObject); 2088 obj_address + end_of_region_offset, record_slots,
2089 &ScavengeObject);
2089 } 2090 }
2090 offset = end_of_region_offset; 2091 offset = end_of_region_offset;
2091 } 2092 }
2092 } else { 2093 } else {
2093 #endif 2094 #endif
2094 IterateAndMarkPointersToFromSpace( 2095 IterateAndMarkPointersToFromSpace(target, obj_address,
2095 record_slots, obj_address, obj_address + size, &ScavengeObject); 2096 obj_address + size, record_slots,
2097 &ScavengeObject);
2096 #if V8_DOUBLE_FIELDS_UNBOXING 2098 #if V8_DOUBLE_FIELDS_UNBOXING
2097 } 2099 }
2098 #endif 2100 #endif
2099 } 2101 }
2100 } 2102 }
2101 2103
2102 // Take another spin if there are now unswept objects in new space 2104 // Take another spin if there are now unswept objects in new space
2103 // (there are currently no more unswept promoted objects). 2105 // (there are currently no more unswept promoted objects).
2104 } while (new_space_front != new_space_.top()); 2106 } while (new_space_front != new_space_.top());
2105 2107
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after
2411 MarkBit mark_bit = Marking::MarkBitFrom(target); 2413 MarkBit mark_bit = Marking::MarkBitFrom(target);
2412 if (Marking::IsBlack(mark_bit)) { 2414 if (Marking::IsBlack(mark_bit)) {
2413 // This object is black and it might not be rescanned by marker. 2415 // This object is black and it might not be rescanned by marker.
2414 // We should explicitly record code entry slot for compaction because 2416 // We should explicitly record code entry slot for compaction because
2415 // promotion queue processing (IterateAndMarkPointersToFromSpace) will 2417 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2416 // miss it as it is not HeapObject-tagged. 2418 // miss it as it is not HeapObject-tagged.
2417 Address code_entry_slot = 2419 Address code_entry_slot =
2418 target->address() + JSFunction::kCodeEntryOffset; 2420 target->address() + JSFunction::kCodeEntryOffset;
2419 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot)); 2421 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2420 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot( 2422 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2421 code_entry_slot, code); 2423 target, code_entry_slot, code);
2422 } 2424 }
2423 } 2425 }
2424 2426
2425 2427
2426 static inline void EvacuateFixedArray(Map* map, HeapObject** slot, 2428 static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2427 HeapObject* object) { 2429 HeapObject* object) {
2428 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); 2430 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2429 EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object, 2431 EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
2430 object_size); 2432 object_size);
2431 } 2433 }
(...skipping 1143 matching lines...) Expand 10 before | Expand all | Expand 10 after
3575 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_, 3577 allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3576 site, SKIP_WRITE_BARRIER); 3578 site, SKIP_WRITE_BARRIER);
3577 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt( 3579 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3578 allocation_sites_scratchpad_length_); 3580 allocation_sites_scratchpad_length_);
3579 3581
3580 if (mode == RECORD_SCRATCHPAD_SLOT) { 3582 if (mode == RECORD_SCRATCHPAD_SLOT) {
3581 // We need to allow slots buffer overflow here since the evacuation 3583 // We need to allow slots buffer overflow here since the evacuation
3582 // candidates are not part of the global list of old space pages and 3584 // candidates are not part of the global list of old space pages and
3583 // releasing an evacuation candidate due to a slots buffer overflow 3585 // releasing an evacuation candidate due to a slots buffer overflow
3584 // results in lost pages. 3586 // results in lost pages.
3585 mark_compact_collector()->RecordSlot(slot, slot, *slot, 3587 mark_compact_collector()->RecordSlot(allocation_sites_scratchpad(), slot,
3586 SlotsBuffer::IGNORE_OVERFLOW); 3588 *slot, SlotsBuffer::IGNORE_OVERFLOW);
3587 } 3589 }
3588 allocation_sites_scratchpad_length_++; 3590 allocation_sites_scratchpad_length_++;
3589 } 3591 }
3590 } 3592 }
3591 3593
3592 3594
3593 3595
3594 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) { 3596 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3595 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]); 3597 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3596 } 3598 }
(...skipping 1528 matching lines...) Expand 10 before | Expand all | Expand 10 after
5125 while (it.has_next()) { 5127 while (it.has_next()) {
5126 NewSpacePage* page = it.next(); 5128 NewSpacePage* page = it.next();
5127 for (Address cursor = page->area_start(), limit = page->area_end(); 5129 for (Address cursor = page->area_start(), limit = page->area_end();
5128 cursor < limit; cursor += kPointerSize) { 5130 cursor < limit; cursor += kPointerSize) {
5129 Memory::Address_at(cursor) = kFromSpaceZapValue; 5131 Memory::Address_at(cursor) = kFromSpaceZapValue;
5130 } 5132 }
5131 } 5133 }
5132 } 5134 }
5133 5135
5134 5136
5135 void Heap::IterateAndMarkPointersToFromSpace(bool record_slots, Address start, 5137 void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
5136 Address end, 5138 Address end, bool record_slots,
5137 ObjectSlotCallback callback) { 5139 ObjectSlotCallback callback) {
5138 Address slot_address = start; 5140 Address slot_address = start;
5139 5141
5140 while (slot_address < end) { 5142 while (slot_address < end) {
5141 Object** slot = reinterpret_cast<Object**>(slot_address); 5143 Object** slot = reinterpret_cast<Object**>(slot_address);
5142 Object* object = *slot; 5144 Object* target = *slot;
5143 // If the store buffer becomes overfull we mark pages as being exempt from 5145 // If the store buffer becomes overfull we mark pages as being exempt from
5144 // the store buffer. These pages are scanned to find pointers that point 5146 // the store buffer. These pages are scanned to find pointers that point
5145 // to the new space. In that case we may hit newly promoted objects and 5147 // to the new space. In that case we may hit newly promoted objects and
5146 // fix the pointers before the promotion queue gets to them. Thus the 'if'. 5148 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
5147 if (object->IsHeapObject()) { 5149 if (target->IsHeapObject()) {
5148 if (Heap::InFromSpace(object)) { 5150 if (Heap::InFromSpace(target)) {
5149 callback(reinterpret_cast<HeapObject**>(slot), 5151 callback(reinterpret_cast<HeapObject**>(slot),
5150 HeapObject::cast(object)); 5152 HeapObject::cast(target));
5151 Object* new_object = *slot; 5153 Object* new_target = *slot;
5152 if (InNewSpace(new_object)) { 5154 if (InNewSpace(new_target)) {
5153 SLOW_DCHECK(Heap::InToSpace(new_object)); 5155 SLOW_DCHECK(Heap::InToSpace(new_target));
5154 SLOW_DCHECK(new_object->IsHeapObject()); 5156 SLOW_DCHECK(new_target->IsHeapObject());
5155 store_buffer_.EnterDirectlyIntoStoreBuffer( 5157 store_buffer_.EnterDirectlyIntoStoreBuffer(
5156 reinterpret_cast<Address>(slot)); 5158 reinterpret_cast<Address>(slot));
5157 } 5159 }
5158 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object)); 5160 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
5159 } else if (record_slots && 5161 } else if (record_slots &&
5160 MarkCompactCollector::IsOnEvacuationCandidate(object)) { 5162 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
5161 mark_compact_collector()->RecordSlot(slot, slot, object); 5163 mark_compact_collector()->RecordSlot(object, slot, target);
5162 } 5164 }
5163 } 5165 }
5164 slot_address += kPointerSize; 5166 slot_address += kPointerSize;
5165 } 5167 }
5166 } 5168 }
5167 5169
5168 5170
5169 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 5171 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5170 IterateStrongRoots(v, mode); 5172 IterateStrongRoots(v, mode);
5171 IterateWeakRoots(v, mode); 5173 IterateWeakRoots(v, mode);
(...skipping 1690 matching lines...) Expand 10 before | Expand all | Expand 10 after
6862 *object_type = "CODE_TYPE"; \ 6864 *object_type = "CODE_TYPE"; \
6863 *object_sub_type = "CODE_AGE/" #name; \ 6865 *object_sub_type = "CODE_AGE/" #name; \
6864 return true; 6866 return true;
6865 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) 6867 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6866 #undef COMPARE_AND_RETURN_NAME 6868 #undef COMPARE_AND_RETURN_NAME
6867 } 6869 }
6868 return false; 6870 return false;
6869 } 6871 }
6870 } // namespace internal 6872 } // namespace internal
6871 } // namespace v8 6873 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698