Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/heap/heap.cc

Issue 1420423009: [heap] Black allocation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/scopeinfo.h" 9 #include "src/ast/scopeinfo.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 1127 matching lines...) Expand 10 before | Expand all | Expand 10 after
1138 } 1138 }
1139 #endif // VERIFY_HEAP 1139 #endif // VERIFY_HEAP
1140 1140
1141 1141
1142 bool Heap::ReserveSpace(Reservation* reservations) { 1142 bool Heap::ReserveSpace(Reservation* reservations) {
1143 bool gc_performed = true; 1143 bool gc_performed = true;
1144 int counter = 0; 1144 int counter = 0;
1145 static const int kThreshold = 20; 1145 static const int kThreshold = 20;
1146 while (gc_performed && counter++ < kThreshold) { 1146 while (gc_performed && counter++ < kThreshold) {
1147 gc_performed = false; 1147 gc_performed = false;
1148 bool black_allocation = incremental_marking_->black_allocation();
1148 for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) { 1149 for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
1149 Reservation* reservation = &reservations[space]; 1150 Reservation* reservation = &reservations[space];
1150 DCHECK_LE(1, reservation->length()); 1151 DCHECK_LE(1, reservation->length());
1151 if (reservation->at(0).size == 0) continue; 1152 if (reservation->at(0).size == 0) continue;
1152 bool perform_gc = false; 1153 bool perform_gc = false;
1153 if (space == LO_SPACE) { 1154 if (space == LO_SPACE) {
1154 DCHECK_EQ(1, reservation->length()); 1155 DCHECK_EQ(1, reservation->length());
1155 perform_gc = !CanExpandOldGeneration(reservation->at(0).size); 1156 perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
1156 } else { 1157 } else {
1157 for (auto& chunk : *reservation) { 1158 for (auto& chunk : *reservation) {
1158 AllocationResult allocation; 1159 AllocationResult allocation;
1159 int size = chunk.size; 1160 int size = chunk.size;
1160 DCHECK_LE(size, MemoryAllocator::PageAreaSize( 1161 DCHECK_LE(size, MemoryAllocator::PageAreaSize(
1161 static_cast<AllocationSpace>(space))); 1162 static_cast<AllocationSpace>(space)));
1162 if (space == NEW_SPACE) { 1163 if (space == NEW_SPACE) {
1163 allocation = new_space()->AllocateRawUnaligned(size); 1164 allocation = new_space()->AllocateRawUnaligned(size);
1164 } else { 1165 } else {
1165 allocation = paged_space(space)->AllocateRawUnaligned(size); 1166 allocation = paged_space(space)->AllocateRawUnaligned(size);
1166 } 1167 }
1167 HeapObject* free_space = nullptr; 1168 HeapObject* free_space = nullptr;
1168 if (allocation.To(&free_space)) { 1169 if (allocation.To(&free_space)) {
1169 // Mark with a free list node, in case we have a GC before 1170 // Mark with a free list node, in case we have a GC before
1170 // deserializing. 1171 // deserializing.
1171 Address free_space_address = free_space->address(); 1172 Address free_space_address = free_space->address();
1172 CreateFillerObjectAt(free_space_address, size); 1173 CreateFillerObjectAt(free_space_address, size);
1173 DCHECK(space < Serializer::kNumberOfPreallocatedSpaces); 1174 DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
1174 chunk.start = free_space_address; 1175 chunk.start = free_space_address;
1175 chunk.end = free_space_address + size; 1176 chunk.end = free_space_address + size;
1177 // We have to make sure that either all pages are black pages or
1178 // none. If we turned on black allocation while allocating
1179 // reservations for different spaces we have to abort.
1180 if (black_allocation != incremental_marking_->black_allocation()) {
1181 perform_gc = true;
ulan 2016/02/10 10:27:39 How about not starting black allocation if deseria
Hannes Payer (out of office) 2016/02/11 18:18:07 I don't think it would be an issue since there sho
1182 }
1176 } else { 1183 } else {
1177 perform_gc = true; 1184 perform_gc = true;
1178 break; 1185 break;
1179 } 1186 }
1180 } 1187 }
1181 } 1188 }
1182 if (perform_gc) { 1189 if (perform_gc) {
1183 if (space == NEW_SPACE) { 1190 if (space == NEW_SPACE) {
1184 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space"); 1191 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
1185 } else { 1192 } else {
(...skipping 768 matching lines...) Expand 10 before | Expand all | Expand 10 after
1954 HeapObject* target; 1961 HeapObject* target;
1955 int size; 1962 int size;
1956 promotion_queue()->remove(&target, &size); 1963 promotion_queue()->remove(&target, &size);
1957 1964
1958 // Promoted object might be already partially visited 1965 // Promoted object might be already partially visited
1959 // during old space pointer iteration. Thus we search specifically 1966 // during old space pointer iteration. Thus we search specifically
1960 // for pointers to from semispace instead of looking for pointers 1967 // for pointers to from semispace instead of looking for pointers
1961 // to new space. 1968 // to new space.
1962 DCHECK(!target->IsMap()); 1969 DCHECK(!target->IsMap());
1963 1970
1964 IteratePointersToFromSpace(target, size, &Scavenger::ScavengeObject); 1971 IteratePromotedObject(target, size, &Scavenger::ScavengeObject);
1965 } 1972 }
1966 } 1973 }
1967 1974
1968 // Take another spin if there are now unswept objects in new space 1975 // Take another spin if there are now unswept objects in new space
1969 // (there are currently no more unswept promoted objects). 1976 // (there are currently no more unswept promoted objects).
1970 } while (new_space_front != new_space_.top()); 1977 } while (new_space_front != new_space_.top());
1971 1978
1972 return new_space_front; 1979 return new_space_front;
1973 } 1980 }
1974 1981
(...skipping 1335 matching lines...) Expand 10 before | Expand all | Expand 10 after
3310 return elements; 3317 return elements;
3311 } 3318 }
3312 3319
3313 3320
3314 AllocationResult Heap::AllocateCode(int object_size, bool immovable) { 3321 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3315 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); 3322 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3316 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE); 3323 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
3317 3324
3318 HeapObject* result = nullptr; 3325 HeapObject* result = nullptr;
3319 if (!allocation.To(&result)) return allocation; 3326 if (!allocation.To(&result)) return allocation;
3320
3321 if (immovable) { 3327 if (immovable) {
3322 Address address = result->address(); 3328 Address address = result->address();
3323 // Code objects which should stay at a fixed address are allocated either 3329 // Code objects which should stay at a fixed address are allocated either
3324 // in the first page of code space (objects on the first page of each space 3330 // in the first page of code space (objects on the first page of each space
3325 // are never moved) or in large object space. 3331 // are never moved) or in large object space.
3326 if (!code_space_->FirstPage()->Contains(address) && 3332 if (!code_space_->FirstPage()->Contains(address) &&
3327 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { 3333 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3328 // Discard the first code allocation, which was on a page where it could 3334 // Discard the first code allocation, which was on a page where it could
3329 // be moved. 3335 // be moved.
3330 CreateFillerObjectAt(result->address(), object_size); 3336 CreateFillerObjectAt(result->address(), object_size);
(...skipping 29 matching lines...) Expand all
3360 Address new_addr = result->address(); 3366 Address new_addr = result->address();
3361 CopyBlock(new_addr, old_addr, obj_size); 3367 CopyBlock(new_addr, old_addr, obj_size);
3362 Code* new_code = Code::cast(result); 3368 Code* new_code = Code::cast(result);
3363 3369
3364 // Relocate the copy. 3370 // Relocate the copy.
3365 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); 3371 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3366 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || 3372 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3367 isolate_->code_range()->contains(code->address()) || 3373 isolate_->code_range()->contains(code->address()) ||
3368 obj_size <= code_space()->AreaSize()); 3374 obj_size <= code_space()->AreaSize());
3369 new_code->Relocate(new_addr - old_addr); 3375 new_code->Relocate(new_addr - old_addr);
3376 // We have to iterate over the object and process its pointers when black
3377 // allocation is on.
3378 if (incremental_marking()->black_allocation() &&
3379 Page::FromAddress(new_code->address())->IsFlagSet(Page::BLACK_PAGE)) {
3380 incremental_marking()->IterateBlackCode(new_code);
ulan 2016/02/10 10:27:39 How about making this more generic "allocation bar
Hannes Payer (out of office) 2016/02/11 18:18:07 I like it. It will be also called when processing
3381 }
3370 return new_code; 3382 return new_code;
3371 } 3383 }
3372 3384
3373 3385
3374 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) { 3386 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3375 // Allocate ByteArray before the Code object, so that we do not risk 3387 // Allocate ByteArray before the Code object, so that we do not risk
3376 // leaving uninitialized Code object (and breaking the heap). 3388 // leaving uninitialized Code object (and breaking the heap).
3377 ByteArray* reloc_info_array = nullptr; 3389 ByteArray* reloc_info_array = nullptr;
3378 { 3390 {
3379 AllocationResult allocation = 3391 AllocationResult allocation =
(...skipping 27 matching lines...) Expand all
3407 CopyBytes(new_code->relocation_start(), reloc_info.start(), 3419 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3408 static_cast<size_t>(reloc_info.length())); 3420 static_cast<size_t>(reloc_info.length()));
3409 3421
3410 // Relocate the copy. 3422 // Relocate the copy.
3411 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); 3423 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3412 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || 3424 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3413 isolate_->code_range()->contains(code->address()) || 3425 isolate_->code_range()->contains(code->address()) ||
3414 new_obj_size <= code_space()->AreaSize()); 3426 new_obj_size <= code_space()->AreaSize());
3415 3427
3416 new_code->Relocate(new_addr - old_addr); 3428 new_code->Relocate(new_addr - old_addr);
3417 3429 // We have to iterate over over the object and process its pointers when
3430 // black allocation is on.
3431 if (incremental_marking()->black_allocation() &&
3432 Page::FromAddress(new_code->address())->IsFlagSet(Page::BLACK_PAGE)) {
3433 incremental_marking()->IterateBlackCode(new_code);
3434 }
3418 #ifdef VERIFY_HEAP 3435 #ifdef VERIFY_HEAP
3419 if (FLAG_verify_heap) code->ObjectVerify(); 3436 if (FLAG_verify_heap) code->ObjectVerify();
3420 #endif 3437 #endif
3421 return new_code; 3438 return new_code;
3422 } 3439 }
3423 3440
3424 3441
3425 void Heap::InitializeAllocationMemento(AllocationMemento* memento, 3442 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3426 AllocationSite* allocation_site) { 3443 AllocationSite* allocation_site) {
3427 memento->set_map_no_write_barrier(allocation_memento_map()); 3444 memento->set_map_no_write_barrier(allocation_memento_map());
(...skipping 1043 matching lines...) Expand 10 before | Expand all | Expand 10 after
4471 new_space_.FromSpaceEnd()); 4488 new_space_.FromSpaceEnd());
4472 while (it.has_next()) { 4489 while (it.has_next()) {
4473 NewSpacePage* page = it.next(); 4490 NewSpacePage* page = it.next();
4474 for (Address cursor = page->area_start(), limit = page->area_end(); 4491 for (Address cursor = page->area_start(), limit = page->area_end();
4475 cursor < limit; cursor += kPointerSize) { 4492 cursor < limit; cursor += kPointerSize) {
4476 Memory::Address_at(cursor) = kFromSpaceZapValue; 4493 Memory::Address_at(cursor) = kFromSpaceZapValue;
4477 } 4494 }
4478 } 4495 }
4479 } 4496 }
4480 4497
4481 4498 void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
4482 void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start, 4499 Address end, bool record_slots,
4483 Address end, bool record_slots, 4500 ObjectSlotCallback callback) {
4484 ObjectSlotCallback callback) {
4485 Address slot_address = start; 4501 Address slot_address = start;
4486 4502
4487 while (slot_address < end) { 4503 while (slot_address < end) {
4488 Object** slot = reinterpret_cast<Object**>(slot_address); 4504 Object** slot = reinterpret_cast<Object**>(slot_address);
4489 Object* target = *slot; 4505 Object* target = *slot;
4490 // If the store buffer becomes overfull we mark pages as being exempt from 4506 // If the store buffer becomes overfull we mark pages as being exempt from
4491 // the store buffer. These pages are scanned to find pointers that point 4507 // the store buffer. These pages are scanned to find pointers that point
4492 // to the new space. In that case we may hit newly promoted objects and 4508 // to the new space. In that case we may hit newly promoted objects and
4493 // fix the pointers before the promotion queue gets to them. Thus the 'if'. 4509 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4494 if (target->IsHeapObject()) { 4510 if (target->IsHeapObject()) {
4495 if (Heap::InFromSpace(target)) { 4511 if (Heap::InFromSpace(target)) {
4496 callback(reinterpret_cast<HeapObject**>(slot), 4512 callback(reinterpret_cast<HeapObject**>(slot),
4497 HeapObject::cast(target)); 4513 HeapObject::cast(target));
4498 Object* new_target = *slot; 4514 Object* new_target = *slot;
4499 if (InNewSpace(new_target)) { 4515 if (InNewSpace(new_target)) {
4500 SLOW_DCHECK(Heap::InToSpace(new_target)); 4516 SLOW_DCHECK(Heap::InToSpace(new_target));
4501 SLOW_DCHECK(new_target->IsHeapObject()); 4517 SLOW_DCHECK(new_target->IsHeapObject());
4502 store_buffer_.EnterDirectlyIntoStoreBuffer( 4518 store_buffer_.EnterDirectlyIntoStoreBuffer(
4503 reinterpret_cast<Address>(slot)); 4519 reinterpret_cast<Address>(slot));
4504 } 4520 }
4505 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target)); 4521 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
4506 } else if (record_slots && 4522 } else if (record_slots &&
4507 MarkCompactCollector::IsOnEvacuationCandidate(target)) { 4523 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
4508 mark_compact_collector()->RecordSlot(object, slot, target); 4524 mark_compact_collector()->RecordSlot(object, slot, target);
4509 } 4525 }
4526 // Black allocations requires to mark objects referenced by promoted
4527 // objects. If the object got forwarded to *slot
4528 // UpdateMarkingDequeAfterScavenge will take care of the color
4529 // transition.
4530 if (incremental_marking()->black_allocation()) {
4531 IncrementalMarking::MarkObject(this, HeapObject::cast(target));
ulan 2016/02/10 10:27:39 This ignores all the special handling that the mar
Hannes Payer (out of office) 2016/02/11 18:18:07 Weakness during black allocation is special, i.e.
4532 }
4510 } 4533 }
4511 slot_address += kPointerSize; 4534 slot_address += kPointerSize;
4512 } 4535 }
4513 } 4536 }
4514 4537
4515 4538 class IteratePromotedObjectsVisitor final : public ObjectVisitor {
4516 class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
4517 public: 4539 public:
4518 IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target, 4540 IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
4519 bool record_slots, 4541 bool record_slots, ObjectSlotCallback callback)
4520 ObjectSlotCallback callback)
4521 : heap_(heap), 4542 : heap_(heap),
4522 target_(target), 4543 target_(target),
4523 record_slots_(record_slots), 4544 record_slots_(record_slots),
4524 callback_(callback) {} 4545 callback_(callback) {}
4525 4546
4526 V8_INLINE void VisitPointers(Object** start, Object** end) override { 4547 V8_INLINE void VisitPointers(Object** start, Object** end) override {
4527 heap_->IterateAndMarkPointersToFromSpace( 4548 heap_->IteratePromotedObjectPointers(
4528 target_, reinterpret_cast<Address>(start), 4549 target_, reinterpret_cast<Address>(start),
4529 reinterpret_cast<Address>(end), record_slots_, callback_); 4550 reinterpret_cast<Address>(end), record_slots_, callback_);
4530 } 4551 }
4531 4552
4532 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {} 4553 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
4554 // Black allocation requires us to process objects referenced by
4555 // promoted objects.
4556 if (heap_->incremental_marking()->black_allocation()) {
4557 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
4558 IncrementalMarking::MarkObject(heap_, code);
4559 }
4560 }
4533 4561
4534 private: 4562 private:
4535 Heap* heap_; 4563 Heap* heap_;
4536 HeapObject* target_; 4564 HeapObject* target_;
4537 bool record_slots_; 4565 bool record_slots_;
4538 ObjectSlotCallback callback_; 4566 ObjectSlotCallback callback_;
4539 }; 4567 };
4540 4568
4541 4569 void Heap::IteratePromotedObject(HeapObject* target, int size,
4542 void Heap::IteratePointersToFromSpace(HeapObject* target, int size, 4570 ObjectSlotCallback callback) {
4543 ObjectSlotCallback callback) {
4544 // We are not collecting slots on new space objects during mutation 4571 // We are not collecting slots on new space objects during mutation
4545 // thus we have to scan for pointers to evacuation candidates when we 4572 // thus we have to scan for pointers to evacuation candidates when we
4546 // promote objects. But we should not record any slots in non-black 4573 // promote objects. But we should not record any slots in non-black
4547 // objects. Grey object's slots would be rescanned. 4574 // objects. Grey object's slots would be rescanned.
4548 // White object might not survive until the end of collection 4575 // White object might not survive until the end of collection
4549 // it would be a violation of the invariant to record it's slots. 4576 // it would be a violation of the invariant to record it's slots.
4550 bool record_slots = false; 4577 bool record_slots = false;
4551 if (incremental_marking()->IsCompacting()) { 4578 if (incremental_marking()->IsCompacting()) {
4552 MarkBit mark_bit = Marking::MarkBitFrom(target); 4579 MarkBit mark_bit = Marking::MarkBitFrom(target);
4553 record_slots = Marking::IsBlack(mark_bit); 4580 record_slots = Marking::IsBlack(mark_bit);
4554 } 4581 }
4555 4582
4556 IteratePointersToFromSpaceVisitor visitor(this, target, record_slots, 4583 IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
4557 callback);
4558 target->IterateBody(target->map()->instance_type(), size, &visitor); 4584 target->IterateBody(target->map()->instance_type(), size, &visitor);
4585
4586 // Black allocation requires us to process objects referenced by
4587 // promoted objects.
4588 if (incremental_marking()->black_allocation()) {
4589 Map* map = target->map();
4590 IncrementalMarking::MarkObject(this, map);
4591 }
4559 } 4592 }
4560 4593
4561 4594
4562 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 4595 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4563 IterateStrongRoots(v, mode); 4596 IterateStrongRoots(v, mode);
4564 IterateWeakRoots(v, mode); 4597 IterateWeakRoots(v, mode);
4565 } 4598 }
4566 4599
4567 4600
4568 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 4601 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
(...skipping 1653 matching lines...) Expand 10 before | Expand all | Expand 10 after
6222 } 6255 }
6223 6256
6224 6257
6225 // static 6258 // static
6226 int Heap::GetStaticVisitorIdForMap(Map* map) { 6259 int Heap::GetStaticVisitorIdForMap(Map* map) {
6227 return StaticVisitorBase::GetVisitorId(map); 6260 return StaticVisitorBase::GetVisitorId(map);
6228 } 6261 }
6229 6262
6230 } // namespace internal 6263 } // namespace internal
6231 } // namespace v8 6264 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.h » ('j') | src/heap/mark-compact.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698