Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(246)

Side by Side Diff: src/heap/heap.cc

Issue 1420423009: [heap] Black allocation. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/scopeinfo.h" 9 #include "src/ast/scopeinfo.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 1559 matching lines...) Expand 10 before | Expand all | Expand 10 after
1570 1570
1571 void PromotionQueue::RelocateQueueHead() { 1571 void PromotionQueue::RelocateQueueHead() {
1572 DCHECK(emergency_stack_ == NULL); 1572 DCHECK(emergency_stack_ == NULL);
1573 1573
1574 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); 1574 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1575 struct Entry* head_start = rear_; 1575 struct Entry* head_start = rear_;
1576 struct Entry* head_end = 1576 struct Entry* head_end =
1577 Min(front_, reinterpret_cast<struct Entry*>(p->area_end())); 1577 Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
1578 1578
1579 int entries_count = 1579 int entries_count =
1580 static_cast<int>(head_end - head_start) / kEntrySizeInWords; 1580 static_cast<int>(head_end - head_start) / sizeof(struct Entry);
1581 1581
1582 emergency_stack_ = new List<Entry>(2 * entries_count); 1582 emergency_stack_ = new List<Entry>(2 * entries_count);
1583 1583
1584 while (head_start != head_end) { 1584 while (head_start != head_end) {
1585 struct Entry* entry = head_start++; 1585 struct Entry* entry = head_start++;
1586 // New space allocation in SemiSpaceCopyObject marked the region 1586 // New space allocation in SemiSpaceCopyObject marked the region
1587 // overlapping with promotion queue as uninitialized. 1587 // overlapping with promotion queue as uninitialized.
1588 MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry)); 1588 MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
1589 emergency_stack_->Add(*entry); 1589 emergency_stack_->Add(*entry);
1590 } 1590 }
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after
1937 } else { 1937 } else {
1938 new_space_front = 1938 new_space_front =
1939 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start(); 1939 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1940 } 1940 }
1941 } 1941 }
1942 1942
1943 // Promote and process all the to-be-promoted objects. 1943 // Promote and process all the to-be-promoted objects.
1944 { 1944 {
1945 while (!promotion_queue()->is_empty()) { 1945 while (!promotion_queue()->is_empty()) {
1946 HeapObject* target; 1946 HeapObject* target;
1947 intptr_t size; 1947 int32_t size;
1948 promotion_queue()->remove(&target, &size); 1948 bool was_marked_black;
1949 promotion_queue()->remove(&target, &size, &was_marked_black);
1949 1950
1950 // Promoted object might be already partially visited 1951 // Promoted object might be already partially visited
1951 // during old space pointer iteration. Thus we search specifically 1952 // during old space pointer iteration. Thus we search specifically
1952 // for pointers to from semispace instead of looking for pointers 1953 // for pointers to from semispace instead of looking for pointers
1953 // to new space. 1954 // to new space.
1954 DCHECK(!target->IsMap()); 1955 DCHECK(!target->IsMap());
1955 1956
1956 IteratePointersToFromSpace(target, static_cast<int>(size), 1957 IteratePromotedObject(target, static_cast<int>(size), was_marked_black,
1957 &Scavenger::ScavengeObject); 1958 &Scavenger::ScavengeObject);
1958 } 1959 }
1959 } 1960 }
1960 1961
1961 // Take another spin if there are now unswept objects in new space 1962 // Take another spin if there are now unswept objects in new space
1962 // (there are currently no more unswept promoted objects). 1963 // (there are currently no more unswept promoted objects).
1963 } while (new_space_front != new_space_.top()); 1964 } while (new_space_front != new_space_.top());
1964 1965
1965 return new_space_front; 1966 return new_space_front;
1966 } 1967 }
1967 1968
(...skipping 577 matching lines...) Expand 10 before | Expand all | Expand 10 after
2545 DCHECK(capacity > 0); 2546 DCHECK(capacity > 0);
2546 HeapObject* raw_array = nullptr; 2547 HeapObject* raw_array = nullptr;
2547 { 2548 {
2548 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED); 2549 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
2549 if (!allocation.To(&raw_array)) return allocation; 2550 if (!allocation.To(&raw_array)) return allocation;
2550 } 2551 }
2551 raw_array->set_map_no_write_barrier(transition_array_map()); 2552 raw_array->set_map_no_write_barrier(transition_array_map());
2552 TransitionArray* array = TransitionArray::cast(raw_array); 2553 TransitionArray* array = TransitionArray::cast(raw_array);
2553 array->set_length(capacity); 2554 array->set_length(capacity);
2554 MemsetPointer(array->data_start(), undefined_value(), capacity); 2555 MemsetPointer(array->data_start(), undefined_value(), capacity);
2556 // Transition arrays are tenured. When black allocation is on we have to
2557 // add the transition array to the list of encountered_transition_arrays.
2558 if (incremental_marking()->black_allocation()) {
2559 array->set_next_link(encountered_transition_arrays(),
2560 UPDATE_WEAK_WRITE_BARRIER);
2561 set_encountered_transition_arrays(array);
2562 } else {
2563 array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
2564 }
2555 return array; 2565 return array;
2556 } 2566 }
2557 2567
2558 2568
2559 void Heap::CreateApiObjects() { 2569 void Heap::CreateApiObjects() {
2560 HandleScope scope(isolate()); 2570 HandleScope scope(isolate());
2561 Factory* factory = isolate()->factory(); 2571 Factory* factory = isolate()->factory();
2562 Handle<Map> new_neander_map = 2572 Handle<Map> new_neander_map =
2563 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); 2573 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2564 2574
(...skipping 760 matching lines...) Expand 10 before | Expand all | Expand 10 after
3325 return elements; 3335 return elements;
3326 } 3336 }
3327 3337
3328 3338
3329 AllocationResult Heap::AllocateCode(int object_size, bool immovable) { 3339 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3330 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); 3340 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3331 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE); 3341 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
3332 3342
3333 HeapObject* result = nullptr; 3343 HeapObject* result = nullptr;
3334 if (!allocation.To(&result)) return allocation; 3344 if (!allocation.To(&result)) return allocation;
3335
3336 if (immovable) { 3345 if (immovable) {
3337 Address address = result->address(); 3346 Address address = result->address();
3338 // Code objects which should stay at a fixed address are allocated either 3347 // Code objects which should stay at a fixed address are allocated either
3339 // in the first page of code space (objects on the first page of each space 3348 // in the first page of code space (objects on the first page of each space
3340 // are never moved) or in large object space. 3349 // are never moved) or in large object space.
3341 if (!code_space_->FirstPage()->Contains(address) && 3350 if (!code_space_->FirstPage()->Contains(address) &&
3342 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { 3351 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3343 // Discard the first code allocation, which was on a page where it could 3352 // Discard the first code allocation, which was on a page where it could
3344 // be moved. 3353 // be moved.
3345 CreateFillerObjectAt(result->address(), object_size, 3354 CreateFillerObjectAt(result->address(), object_size,
(...skipping 30 matching lines...) Expand all
3376 Address new_addr = result->address(); 3385 Address new_addr = result->address();
3377 CopyBlock(new_addr, old_addr, obj_size); 3386 CopyBlock(new_addr, old_addr, obj_size);
3378 Code* new_code = Code::cast(result); 3387 Code* new_code = Code::cast(result);
3379 3388
3380 // Relocate the copy. 3389 // Relocate the copy.
3381 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); 3390 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3382 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || 3391 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3383 isolate_->code_range()->contains(code->address()) || 3392 isolate_->code_range()->contains(code->address()) ||
3384 obj_size <= code_space()->AreaSize()); 3393 obj_size <= code_space()->AreaSize());
3385 new_code->Relocate(new_addr - old_addr); 3394 new_code->Relocate(new_addr - old_addr);
3395 // We have to iterate over the object and process its pointers when black
3396 // allocation is on.
3397 incremental_marking()->IterateBlackObject(new_code);
3386 return new_code; 3398 return new_code;
3387 } 3399 }
3388 3400
3389 AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) { 3401 AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
3390 int size = BytecodeArray::SizeFor(bytecode_array->length()); 3402 int size = BytecodeArray::SizeFor(bytecode_array->length());
3391 HeapObject* result = nullptr; 3403 HeapObject* result = nullptr;
3392 { 3404 {
3393 AllocationResult allocation = AllocateRaw(size, OLD_SPACE); 3405 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3394 if (!allocation.To(&result)) return allocation; 3406 if (!allocation.To(&result)) return allocation;
3395 } 3407 }
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
3442 CopyBytes(new_code->relocation_start(), reloc_info.start(), 3454 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3443 static_cast<size_t>(reloc_info.length())); 3455 static_cast<size_t>(reloc_info.length()));
3444 3456
3445 // Relocate the copy. 3457 // Relocate the copy.
3446 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment)); 3458 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3447 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() || 3459 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3448 isolate_->code_range()->contains(code->address()) || 3460 isolate_->code_range()->contains(code->address()) ||
3449 new_obj_size <= code_space()->AreaSize()); 3461 new_obj_size <= code_space()->AreaSize());
3450 3462
3451 new_code->Relocate(new_addr - old_addr); 3463 new_code->Relocate(new_addr - old_addr);
3452 3464 // We have to iterate over over the object and process its pointers when
3465 // black allocation is on.
3466 incremental_marking()->IterateBlackObject(new_code);
3453 #ifdef VERIFY_HEAP 3467 #ifdef VERIFY_HEAP
3454 if (FLAG_verify_heap) code->ObjectVerify(); 3468 if (FLAG_verify_heap) code->ObjectVerify();
3455 #endif 3469 #endif
3456 return new_code; 3470 return new_code;
3457 } 3471 }
3458 3472
3459 3473
3460 void Heap::InitializeAllocationMemento(AllocationMemento* memento, 3474 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3461 AllocationSite* allocation_site) { 3475 AllocationSite* allocation_site) {
3462 memento->set_map_no_write_barrier(allocation_memento_map()); 3476 memento->set_map_no_write_barrier(allocation_memento_map());
(...skipping 762 matching lines...) Expand 10 before | Expand all | Expand 10 after
4225 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact( 4239 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
4226 static_cast<size_t>(idle_time_in_ms), size_of_objects, 4240 static_cast<size_t>(idle_time_in_ms), size_of_objects,
4227 final_incremental_mark_compact_speed_in_bytes_per_ms))) { 4241 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4228 CollectAllGarbage(current_gc_flags_, 4242 CollectAllGarbage(current_gc_flags_,
4229 "idle notification: finalize incremental marking"); 4243 "idle notification: finalize incremental marking");
4230 return true; 4244 return true;
4231 } 4245 }
4232 return false; 4246 return false;
4233 } 4247 }
4234 4248
4249 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
4250 // TODO(hpayer): We do not have to iterate reservations on black objects
4251 // for marking. We just have to execute the special visiting side effect
4252 // code that adds objects to global data structures, e.g. for array buffers.
4253 if (incremental_marking()->black_allocation()) {
4254 for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
4255 const Heap::Reservation& res = reservations[i];
4256 for (auto& chunk : res) {
4257 Address addr = chunk.start;
4258 while (addr < chunk.end) {
4259 HeapObject* obj = HeapObject::FromAddress(addr);
4260 incremental_marking()->IterateBlackObject(obj);
4261 addr += obj->Size();
4262 }
4263 }
4264 }
4265 }
4266 }
4235 4267
4236 GCIdleTimeHeapState Heap::ComputeHeapState() { 4268 GCIdleTimeHeapState Heap::ComputeHeapState() {
4237 GCIdleTimeHeapState heap_state; 4269 GCIdleTimeHeapState heap_state;
4238 heap_state.contexts_disposed = contexts_disposed_; 4270 heap_state.contexts_disposed = contexts_disposed_;
4239 heap_state.contexts_disposal_rate = 4271 heap_state.contexts_disposal_rate =
4240 tracer()->ContextDisposalRateInMilliseconds(); 4272 tracer()->ContextDisposalRateInMilliseconds();
4241 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects()); 4273 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4242 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped(); 4274 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4243 return heap_state; 4275 return heap_state;
4244 } 4276 }
(...skipping 326 matching lines...) Expand 10 before | Expand all | Expand 10 after
4571 new_space_.FromSpaceEnd()); 4603 new_space_.FromSpaceEnd());
4572 while (it.has_next()) { 4604 while (it.has_next()) {
4573 NewSpacePage* page = it.next(); 4605 NewSpacePage* page = it.next();
4574 for (Address cursor = page->area_start(), limit = page->area_end(); 4606 for (Address cursor = page->area_start(), limit = page->area_end();
4575 cursor < limit; cursor += kPointerSize) { 4607 cursor < limit; cursor += kPointerSize) {
4576 Memory::Address_at(cursor) = kFromSpaceZapValue; 4608 Memory::Address_at(cursor) = kFromSpaceZapValue;
4577 } 4609 }
4578 } 4610 }
4579 } 4611 }
4580 4612
4581 4613 void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
4582 void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start, 4614 Address end, bool record_slots,
4583 Address end, bool record_slots, 4615 ObjectSlotCallback callback) {
4584 ObjectSlotCallback callback) {
4585 Address slot_address = start; 4616 Address slot_address = start;
4586 Page* page = Page::FromAddress(start); 4617 Page* page = Page::FromAddress(start);
4587 4618
4588 while (slot_address < end) { 4619 while (slot_address < end) {
4589 Object** slot = reinterpret_cast<Object**>(slot_address); 4620 Object** slot = reinterpret_cast<Object**>(slot_address);
4590 Object* target = *slot; 4621 Object* target = *slot;
4591 if (target->IsHeapObject()) { 4622 if (target->IsHeapObject()) {
4592 if (Heap::InFromSpace(target)) { 4623 if (Heap::InFromSpace(target)) {
4593 callback(reinterpret_cast<HeapObject**>(slot), 4624 callback(reinterpret_cast<HeapObject**>(slot),
4594 HeapObject::cast(target)); 4625 HeapObject::cast(target));
4595 Object* new_target = *slot; 4626 Object* new_target = *slot;
4596 if (InNewSpace(new_target)) { 4627 if (InNewSpace(new_target)) {
4597 SLOW_DCHECK(Heap::InToSpace(new_target)); 4628 SLOW_DCHECK(Heap::InToSpace(new_target));
4598 SLOW_DCHECK(new_target->IsHeapObject()); 4629 SLOW_DCHECK(new_target->IsHeapObject());
4599 RememberedSet<OLD_TO_NEW>::Insert(page, slot_address); 4630 RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
4600 } 4631 }
4601 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target)); 4632 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
4602 } else if (record_slots && 4633 } else if (record_slots &&
4603 MarkCompactCollector::IsOnEvacuationCandidate(target)) { 4634 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
4604 mark_compact_collector()->RecordSlot(object, slot, target); 4635 mark_compact_collector()->RecordSlot(object, slot, target);
4605 } 4636 }
4606 } 4637 }
4607 slot_address += kPointerSize; 4638 slot_address += kPointerSize;
4608 } 4639 }
4609 } 4640 }
4610 4641
4611 4642 class IteratePromotedObjectsVisitor final : public ObjectVisitor {
4612 class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
4613 public: 4643 public:
4614 IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target, 4644 IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
4615 bool record_slots, 4645 bool record_slots, ObjectSlotCallback callback)
4616 ObjectSlotCallback callback)
4617 : heap_(heap), 4646 : heap_(heap),
4618 target_(target), 4647 target_(target),
4619 record_slots_(record_slots), 4648 record_slots_(record_slots),
4620 callback_(callback) {} 4649 callback_(callback) {}
4621 4650
4622 V8_INLINE void VisitPointers(Object** start, Object** end) override { 4651 V8_INLINE void VisitPointers(Object** start, Object** end) override {
4623 heap_->IterateAndMarkPointersToFromSpace( 4652 heap_->IteratePromotedObjectPointers(
4624 target_, reinterpret_cast<Address>(start), 4653 target_, reinterpret_cast<Address>(start),
4625 reinterpret_cast<Address>(end), record_slots_, callback_); 4654 reinterpret_cast<Address>(end), record_slots_, callback_);
4626 } 4655 }
4627 4656
4628 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {} 4657 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
4658 // Black allocation requires us to process objects referenced by
4659 // promoted objects.
4660 if (heap_->incremental_marking()->black_allocation()) {
4661 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
4662 IncrementalMarking::MarkObject(heap_, code);
4663 }
4664 }
4629 4665
4630 private: 4666 private:
4631 Heap* heap_; 4667 Heap* heap_;
4632 HeapObject* target_; 4668 HeapObject* target_;
4633 bool record_slots_; 4669 bool record_slots_;
4634 ObjectSlotCallback callback_; 4670 ObjectSlotCallback callback_;
4635 }; 4671 };
4636 4672
4637 4673 void Heap::IteratePromotedObject(HeapObject* target, int size,
4638 void Heap::IteratePointersToFromSpace(HeapObject* target, int size, 4674 bool was_marked_black,
4639 ObjectSlotCallback callback) { 4675 ObjectSlotCallback callback) {
4640 // We are not collecting slots on new space objects during mutation 4676 // We are not collecting slots on new space objects during mutation
4641 // thus we have to scan for pointers to evacuation candidates when we 4677 // thus we have to scan for pointers to evacuation candidates when we
4642 // promote objects. But we should not record any slots in non-black 4678 // promote objects. But we should not record any slots in non-black
4643 // objects. Grey object's slots would be rescanned. 4679 // objects. Grey object's slots would be rescanned.
4644 // White object might not survive until the end of collection 4680 // White object might not survive until the end of collection
4645 // it would be a violation of the invariant to record it's slots. 4681 // it would be a violation of the invariant to record it's slots.
4646 bool record_slots = false; 4682 bool record_slots = false;
4647 if (incremental_marking()->IsCompacting()) { 4683 if (incremental_marking()->IsCompacting()) {
4648 MarkBit mark_bit = Marking::MarkBitFrom(target); 4684 MarkBit mark_bit = Marking::MarkBitFrom(target);
4649 record_slots = Marking::IsBlack(mark_bit); 4685 record_slots = Marking::IsBlack(mark_bit);
4650 } 4686 }
4651 4687
4652 IteratePointersToFromSpaceVisitor visitor(this, target, record_slots, 4688 IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
4653 callback);
4654 target->IterateBody(target->map()->instance_type(), size, &visitor); 4689 target->IterateBody(target->map()->instance_type(), size, &visitor);
4690
4691 // When black allocations is on, we have to visit not already marked black
4692 // objects (in new space) promoted to black pages to keep their references
4693 // alive.
4694 // TODO(hpayer): Implement a special promotion visitor that incorporates
4695 // regular visiting and IteratePromotedObjectPointers.
4696 if (!was_marked_black) {
4697 if (incremental_marking()->black_allocation()) {
4698 Map* map = target->map();
4699 IncrementalMarking::MarkObject(this, map);
4700 }
4701 incremental_marking()->IterateBlackObject(target);
4702 }
4655 } 4703 }
4656 4704
4657 4705
4658 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 4706 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4659 IterateStrongRoots(v, mode); 4707 IterateStrongRoots(v, mode);
4660 IterateWeakRoots(v, mode); 4708 IterateWeakRoots(v, mode);
4661 } 4709 }
4662 4710
4663 4711
4664 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 4712 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
(...skipping 1684 matching lines...) Expand 10 before | Expand all | Expand 10 after
6349 } 6397 }
6350 6398
6351 6399
6352 // static 6400 // static
6353 int Heap::GetStaticVisitorIdForMap(Map* map) { 6401 int Heap::GetStaticVisitorIdForMap(Map* map) {
6354 return StaticVisitorBase::GetVisitorId(map); 6402 return StaticVisitorBase::GetVisitorId(map);
6355 } 6403 }
6356 6404
6357 } // namespace internal 6405 } // namespace internal
6358 } // namespace v8 6406 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698