Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(582)

Side by Side Diff: src/heap/heap.cc

Issue 1420423009: [heap] Black allocation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/scopeinfo.h" 9 #include "src/ast/scopeinfo.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 1559 matching lines...) Expand 10 before | Expand all | Expand 10 after
1570 1570
1571 void PromotionQueue::RelocateQueueHead() { 1571 void PromotionQueue::RelocateQueueHead() {
1572 DCHECK(emergency_stack_ == NULL); 1572 DCHECK(emergency_stack_ == NULL);
1573 1573
1574 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); 1574 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1575 struct Entry* head_start = rear_; 1575 struct Entry* head_start = rear_;
1576 struct Entry* head_end = 1576 struct Entry* head_end =
1577 Min(front_, reinterpret_cast<struct Entry*>(p->area_end())); 1577 Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
1578 1578
1579 int entries_count = 1579 int entries_count =
1580 static_cast<int>(head_end - head_start) / kEntrySizeInWords; 1580 static_cast<int>(head_end - head_start) / sizeof(struct Entry);
1581 1581
1582 emergency_stack_ = new List<Entry>(2 * entries_count); 1582 emergency_stack_ = new List<Entry>(2 * entries_count);
1583 1583
1584 while (head_start != head_end) { 1584 while (head_start != head_end) {
1585 struct Entry* entry = head_start++; 1585 struct Entry* entry = head_start++;
1586 // New space allocation in SemiSpaceCopyObject marked the region 1586 // New space allocation in SemiSpaceCopyObject marked the region
1587 // overlapping with promotion queue as uninitialized. 1587 // overlapping with promotion queue as uninitialized.
1588 MSAN_MEMORY_IS_INITIALIZED(&entry->size_, sizeof(size)); 1588 MSAN_MEMORY_IS_INITIALIZED(&entry->size_, sizeof(size));
1589 MSAN_MEMORY_IS_INITIALIZED(&entry->obj_, sizeof(obj)); 1589 MSAN_MEMORY_IS_INITIALIZED(&entry->obj_, sizeof(obj));
1590 emergency_stack_->Add(*entry); 1590 emergency_stack_->Add(*entry);
(...skipping 347 matching lines...) Expand 10 before | Expand all | Expand 10 after
1938 } else { 1938 } else {
1939 new_space_front = 1939 new_space_front =
1940 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start(); 1940 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1941 } 1941 }
1942 } 1942 }
1943 1943
1944 // Promote and process all the to-be-promoted objects. 1944 // Promote and process all the to-be-promoted objects.
1945 { 1945 {
1946 while (!promotion_queue()->is_empty()) { 1946 while (!promotion_queue()->is_empty()) {
1947 HeapObject* target; 1947 HeapObject* target;
1948 intptr_t size; 1948 int32_t size;
1949 promotion_queue()->remove(&target, &size); 1949 bool was_marked_black;
1950 promotion_queue()->remove(&target, &size, &was_marked_black);
1950 1951
1951 // Promoted object might be already partially visited 1952 // Promoted object might be already partially visited
1952 // during old space pointer iteration. Thus we search specifically 1953 // during old space pointer iteration. Thus we search specifically
1953 // for pointers to from semispace instead of looking for pointers 1954 // for pointers to from semispace instead of looking for pointers
1954 // to new space. 1955 // to new space.
1955 DCHECK(!target->IsMap()); 1956 DCHECK(!target->IsMap());
1956 1957
1957 IteratePointersToFromSpace(target, static_cast<int>(size), 1958 IteratePromotedObject(target, static_cast<int>(size), was_marked_black,
1958 &Scavenger::ScavengeObject); 1959 &Scavenger::ScavengeObject);
1959 } 1960 }
1960 } 1961 }
1961 1962
1962 // Take another spin if there are now unswept objects in new space 1963 // Take another spin if there are now unswept objects in new space
1963 // (there are currently no more unswept promoted objects). 1964 // (there are currently no more unswept promoted objects).
1964 } while (new_space_front != new_space_.top()); 1965 } while (new_space_front != new_space_.top());
1965 1966
1966 return new_space_front; 1967 return new_space_front;
1967 } 1968 }
1968 1969
(...skipping 577 matching lines...) Expand 10 before | Expand all | Expand 10 after
2546 DCHECK(capacity > 0); 2547 DCHECK(capacity > 0);
2547 HeapObject* raw_array = nullptr; 2548 HeapObject* raw_array = nullptr;
2548 { 2549 {
2549 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED); 2550 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
2550 if (!allocation.To(&raw_array)) return allocation; 2551 if (!allocation.To(&raw_array)) return allocation;
2551 } 2552 }
2552 raw_array->set_map_no_write_barrier(transition_array_map()); 2553 raw_array->set_map_no_write_barrier(transition_array_map());
2553 TransitionArray* array = TransitionArray::cast(raw_array); 2554 TransitionArray* array = TransitionArray::cast(raw_array);
2554 array->set_length(capacity); 2555 array->set_length(capacity);
2555 MemsetPointer(array->data_start(), undefined_value(), capacity); 2556 MemsetPointer(array->data_start(), undefined_value(), capacity);
2557 // Transition arrays are tenured. When black allocation is on we have to
2558 // add the transition array to the list of encountered_transition_arrays.
2559 if (incremental_marking()->black_allocation()) {
2560 array->set_next_link(encountered_transition_arrays(),
2561 UPDATE_WEAK_WRITE_BARRIER);
2562 set_encountered_transition_arrays(array);
2563 } else {
2564 array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
2565 }
2556 return array; 2566 return array;
2557 } 2567 }
2558 2568
2559 2569
2560 void Heap::CreateApiObjects() { 2570 void Heap::CreateApiObjects() {
2561 HandleScope scope(isolate()); 2571 HandleScope scope(isolate());
2562 Factory* factory = isolate()->factory(); 2572 Factory* factory = isolate()->factory();
2563 Handle<Map> new_neander_map = 2573 Handle<Map> new_neander_map =
2564 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); 2574 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2565 2575
(...skipping 760 matching lines...) Expand 10 before | Expand all | Expand 10 after
3326 return elements; 3336 return elements;
3327 } 3337 }
3328 3338
3329 3339
3330 AllocationResult Heap::AllocateCode(int object_size, bool immovable) { 3340 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3331 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); 3341 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3332 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE); 3342 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
3333 3343
3334 HeapObject* result = nullptr; 3344 HeapObject* result = nullptr;
3335 if (!allocation.To(&result)) return allocation; 3345 if (!allocation.To(&result)) return allocation;
3336
3337 if (immovable) { 3346 if (immovable) {
3338 Address address = result->address(); 3347 Address address = result->address();
3339 // Code objects which should stay at a fixed address are allocated either 3348 // Code objects which should stay at a fixed address are allocated either
3340 // in the first page of code space (objects on the first page of each space 3349 // in the first page of code space (objects on the first page of each space
3341 // are never moved) or in large object space. 3350 // are never moved) or in large object space.
3342 if (!code_space_->FirstPage()->Contains(address) && 3351 if (!code_space_->FirstPage()->Contains(address) &&
3343 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { 3352 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3344 // Discard the first code allocation, which was on a page where it could 3353 // Discard the first code allocation, which was on a page where it could
3345 // be moved. 3354 // be moved.
3346 CreateFillerObjectAt(result->address(), object_size, 3355 CreateFillerObjectAt(result->address(), object_size,
(...skipping 30 matching lines...) Expand all
3377 Address new_addr = result->address(); 3386 Address new_addr = result->address();
3378 CopyBlock(new_addr, old_addr, obj_size); 3387 CopyBlock(new_addr, old_addr, obj_size);
3379 Code* new_code = Code::cast(result); 3388 Code* new_code = Code::cast(result);
3380 3389
3381 // Relocate the copy. 3390 // Relocate the copy.
3382 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); 3391 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3383 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || 3392 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3384 isolate_->code_range()->contains(code->address()) || 3393 isolate_->code_range()->contains(code->address()) ||
3385 obj_size <= code_space()->AreaSize()); 3394 obj_size <= code_space()->AreaSize());
3386 new_code->Relocate(new_addr - old_addr); 3395 new_code->Relocate(new_addr - old_addr);
3396 // We have to iterate over the object and process its pointers when black
3397 // allocation is on.
3398 incremental_marking()->IterateBlackObject(new_code);
3387 return new_code; 3399 return new_code;
3388 } 3400 }
3389 3401
3390 AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) { 3402 AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
3391 int size = BytecodeArray::SizeFor(bytecode_array->length()); 3403 int size = BytecodeArray::SizeFor(bytecode_array->length());
3392 HeapObject* result = nullptr; 3404 HeapObject* result = nullptr;
3393 { 3405 {
3394 AllocationResult allocation = AllocateRaw(size, OLD_SPACE); 3406 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3395 if (!allocation.To(&result)) return allocation; 3407 if (!allocation.To(&result)) return allocation;
3396 } 3408 }
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
3443 CopyBytes(new_code->relocation_start(), reloc_info.start(), 3455 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3444 static_cast<size_t>(reloc_info.length())); 3456 static_cast<size_t>(reloc_info.length()));
3445 3457
3446 // Relocate the copy. 3458 // Relocate the copy.
3447 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); 3459 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3448 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || 3460 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3449 isolate_->code_range()->contains(code->address()) || 3461 isolate_->code_range()->contains(code->address()) ||
3450 new_obj_size <= code_space()->AreaSize()); 3462 new_obj_size <= code_space()->AreaSize());
3451 3463
3452 new_code->Relocate(new_addr - old_addr); 3464 new_code->Relocate(new_addr - old_addr);
3453 3465 // We have to iterate over over the object and process its pointers when
3466 // black allocation is on.
3467 incremental_marking()->IterateBlackObject(new_code);
3454 #ifdef VERIFY_HEAP 3468 #ifdef VERIFY_HEAP
3455 if (FLAG_verify_heap) code->ObjectVerify(); 3469 if (FLAG_verify_heap) code->ObjectVerify();
3456 #endif 3470 #endif
3457 return new_code; 3471 return new_code;
3458 } 3472 }
3459 3473
3460 3474
3461 void Heap::InitializeAllocationMemento(AllocationMemento* memento, 3475 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3462 AllocationSite* allocation_site) { 3476 AllocationSite* allocation_site) {
3463 memento->set_map_no_write_barrier(allocation_memento_map()); 3477 memento->set_map_no_write_barrier(allocation_memento_map());
(...skipping 762 matching lines...) Expand 10 before | Expand all | Expand 10 after
4226 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact( 4240 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
4227 static_cast<size_t>(idle_time_in_ms), size_of_objects, 4241 static_cast<size_t>(idle_time_in_ms), size_of_objects,
4228 final_incremental_mark_compact_speed_in_bytes_per_ms))) { 4242 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4229 CollectAllGarbage(current_gc_flags_, 4243 CollectAllGarbage(current_gc_flags_,
4230 "idle notification: finalize incremental marking"); 4244 "idle notification: finalize incremental marking");
4231 return true; 4245 return true;
4232 } 4246 }
4233 return false; 4247 return false;
4234 } 4248 }
4235 4249
4250 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
4251 // TODO(hpayer): We do not have to iterate reservations on black objects
4252 // for marking. We just have to execute the special visiting side effect
4253 // code that adds objects to global data structures, e.g. for array buffers.
4254 if (incremental_marking()->black_allocation()) {
4255 for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
4256 const Heap::Reservation& res = reservations[i];
4257 for (auto& chunk : res) {
4258 Address addr = chunk.start;
4259 while (addr < chunk.end) {
4260 HeapObject* obj = HeapObject::FromAddress(addr);
4261 incremental_marking()->IterateBlackObject(obj);
4262 addr += obj->Size();
4263 }
4264 }
4265 }
4266 }
4267 }
4236 4268
4237 GCIdleTimeHeapState Heap::ComputeHeapState() { 4269 GCIdleTimeHeapState Heap::ComputeHeapState() {
4238 GCIdleTimeHeapState heap_state; 4270 GCIdleTimeHeapState heap_state;
4239 heap_state.contexts_disposed = contexts_disposed_; 4271 heap_state.contexts_disposed = contexts_disposed_;
4240 heap_state.contexts_disposal_rate = 4272 heap_state.contexts_disposal_rate =
4241 tracer()->ContextDisposalRateInMilliseconds(); 4273 tracer()->ContextDisposalRateInMilliseconds();
4242 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects()); 4274 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4243 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped(); 4275 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4244 return heap_state; 4276 return heap_state;
4245 } 4277 }
(...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after
4572 new_space_.FromSpaceEnd()); 4604 new_space_.FromSpaceEnd());
4573 while (it.has_next()) { 4605 while (it.has_next()) {
4574 NewSpacePage* page = it.next(); 4606 NewSpacePage* page = it.next();
4575 for (Address cursor = page->area_start(), limit = page->area_end(); 4607 for (Address cursor = page->area_start(), limit = page->area_end();
4576 cursor < limit; cursor += kPointerSize) { 4608 cursor < limit; cursor += kPointerSize) {
4577 Memory::Address_at(cursor) = kFromSpaceZapValue; 4609 Memory::Address_at(cursor) = kFromSpaceZapValue;
4578 } 4610 }
4579 } 4611 }
4580 } 4612 }
4581 4613
4582 4614 void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
4583 void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start, 4615 Address end, bool record_slots,
4584 Address end, bool record_slots, 4616 ObjectSlotCallback callback) {
4585 ObjectSlotCallback callback) {
4586 Address slot_address = start; 4617 Address slot_address = start;
4587 Page* page = Page::FromAddress(start); 4618 Page* page = Page::FromAddress(start);
4588 4619
4589 while (slot_address < end) { 4620 while (slot_address < end) {
4590 Object** slot = reinterpret_cast<Object**>(slot_address); 4621 Object** slot = reinterpret_cast<Object**>(slot_address);
4591 Object* target = *slot; 4622 Object* target = *slot;
4592 if (target->IsHeapObject()) { 4623 if (target->IsHeapObject()) {
4593 if (Heap::InFromSpace(target)) { 4624 if (Heap::InFromSpace(target)) {
4594 callback(reinterpret_cast<HeapObject**>(slot), 4625 callback(reinterpret_cast<HeapObject**>(slot),
4595 HeapObject::cast(target)); 4626 HeapObject::cast(target));
4596 Object* new_target = *slot; 4627 Object* new_target = *slot;
4597 if (InNewSpace(new_target)) { 4628 if (InNewSpace(new_target)) {
4598 SLOW_DCHECK(Heap::InToSpace(new_target)); 4629 SLOW_DCHECK(Heap::InToSpace(new_target));
4599 SLOW_DCHECK(new_target->IsHeapObject()); 4630 SLOW_DCHECK(new_target->IsHeapObject());
4600 RememberedSet<OLD_TO_NEW>::Insert(page, slot_address); 4631 RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
4601 } 4632 }
4602 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target)); 4633 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
4603 } else if (record_slots && 4634 } else if (record_slots &&
4604 MarkCompactCollector::IsOnEvacuationCandidate(target)) { 4635 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
4605 mark_compact_collector()->RecordSlot(object, slot, target); 4636 mark_compact_collector()->RecordSlot(object, slot, target);
4606 } 4637 }
4607 } 4638 }
4608 slot_address += kPointerSize; 4639 slot_address += kPointerSize;
4609 } 4640 }
4610 } 4641 }
4611 4642
4612 4643 class IteratePromotedObjectsVisitor final : public ObjectVisitor {
4613 class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
4614 public: 4644 public:
4615 IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target, 4645 IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
4616 bool record_slots, 4646 bool record_slots, ObjectSlotCallback callback)
4617 ObjectSlotCallback callback)
4618 : heap_(heap), 4647 : heap_(heap),
4619 target_(target), 4648 target_(target),
4620 record_slots_(record_slots), 4649 record_slots_(record_slots),
4621 callback_(callback) {} 4650 callback_(callback) {}
4622 4651
4623 V8_INLINE void VisitPointers(Object** start, Object** end) override { 4652 V8_INLINE void VisitPointers(Object** start, Object** end) override {
4624 heap_->IterateAndMarkPointersToFromSpace( 4653 heap_->IteratePromotedObjectPointers(
4625 target_, reinterpret_cast<Address>(start), 4654 target_, reinterpret_cast<Address>(start),
4626 reinterpret_cast<Address>(end), record_slots_, callback_); 4655 reinterpret_cast<Address>(end), record_slots_, callback_);
4627 } 4656 }
4628 4657
4629 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {} 4658 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
4659 // Black allocation requires us to process objects referenced by
4660 // promoted objects.
4661 if (heap_->incremental_marking()->black_allocation()) {
4662 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
4663 IncrementalMarking::MarkObject(heap_, code);
4664 }
4665 }
4630 4666
4631 private: 4667 private:
4632 Heap* heap_; 4668 Heap* heap_;
4633 HeapObject* target_; 4669 HeapObject* target_;
4634 bool record_slots_; 4670 bool record_slots_;
4635 ObjectSlotCallback callback_; 4671 ObjectSlotCallback callback_;
4636 }; 4672 };
4637 4673
4638 4674 void Heap::IteratePromotedObject(HeapObject* target, int size,
4639 void Heap::IteratePointersToFromSpace(HeapObject* target, int size, 4675 bool was_marked_black,
4640 ObjectSlotCallback callback) { 4676 ObjectSlotCallback callback) {
4641 // We are not collecting slots on new space objects during mutation 4677 // We are not collecting slots on new space objects during mutation
4642 // thus we have to scan for pointers to evacuation candidates when we 4678 // thus we have to scan for pointers to evacuation candidates when we
4643 // promote objects. But we should not record any slots in non-black 4679 // promote objects. But we should not record any slots in non-black
4644 // objects. Grey object's slots would be rescanned. 4680 // objects. Grey object's slots would be rescanned.
4645 // White object might not survive until the end of collection 4681 // White object might not survive until the end of collection
4646 // it would be a violation of the invariant to record it's slots. 4682 // it would be a violation of the invariant to record it's slots.
4647 bool record_slots = false; 4683 bool record_slots = false;
4648 if (incremental_marking()->IsCompacting()) { 4684 if (incremental_marking()->IsCompacting()) {
4649 MarkBit mark_bit = Marking::MarkBitFrom(target); 4685 MarkBit mark_bit = Marking::MarkBitFrom(target);
4650 record_slots = Marking::IsBlack(mark_bit); 4686 record_slots = Marking::IsBlack(mark_bit);
4651 } 4687 }
4652 4688
4653 IteratePointersToFromSpaceVisitor visitor(this, target, record_slots, 4689 IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
4654 callback);
4655 target->IterateBody(target->map()->instance_type(), size, &visitor); 4690 target->IterateBody(target->map()->instance_type(), size, &visitor);
4691
4692 // When black allocations is on, we have to visit not already marked black
4693 // objects (in new space) promoted to black pages to keep their references
4694 // alive.
4695 // TODO(hpayer): Implement a special promotion visitor that incorporates
4696 // regular visiting and IteratePromotedObjectPointers.
4697 if (!was_marked_black) {
4698 if (incremental_marking()->black_allocation()) {
4699 Map* map = target->map();
4700 IncrementalMarking::MarkObject(this, map);
4701 }
4702 incremental_marking()->IterateBlackObject(target);
4703 }
4656 } 4704 }
4657 4705
4658 4706
4659 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 4707 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4660 IterateStrongRoots(v, mode); 4708 IterateStrongRoots(v, mode);
4661 IterateWeakRoots(v, mode); 4709 IterateWeakRoots(v, mode);
4662 } 4710 }
4663 4711
4664 4712
4665 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 4713 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
(...skipping 1684 matching lines...) Expand 10 before | Expand all | Expand 10 after
6350 } 6398 }
6351 6399
6352 6400
6353 // static 6401 // static
6354 int Heap::GetStaticVisitorIdForMap(Map* map) { 6402 int Heap::GetStaticVisitorIdForMap(Map* map) {
6355 return StaticVisitorBase::GetVisitorId(map); 6403 return StaticVisitorBase::GetVisitorId(map);
6356 } 6404 }
6357 6405
6358 } // namespace internal 6406 } // namespace internal
6359 } // namespace v8 6407 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | src/heap/mark-compact-inl.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698