Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(30)

Side by Side Diff: src/heap/heap.cc

Issue 1608583002: New page local store buffer. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/scopeinfo.h" 9 #include "src/ast/scopeinfo.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
87 // ConfigureHeap. 87 // ConfigureHeap.
88 // Will be 4 * reserved_semispace_size_ to ensure that young 88 // Will be 4 * reserved_semispace_size_ to ensure that young
89 // generation can be aligned to its size. 89 // generation can be aligned to its size.
90 maximum_committed_(0), 90 maximum_committed_(0),
91 survived_since_last_expansion_(0), 91 survived_since_last_expansion_(0),
92 survived_last_scavenge_(0), 92 survived_last_scavenge_(0),
93 always_allocate_scope_count_(0), 93 always_allocate_scope_count_(0),
94 contexts_disposed_(0), 94 contexts_disposed_(0),
95 number_of_disposed_maps_(0), 95 number_of_disposed_maps_(0),
96 global_ic_age_(0), 96 global_ic_age_(0),
97 scan_on_scavenge_pages_(0),
98 new_space_(this), 97 new_space_(this),
99 old_space_(NULL), 98 old_space_(NULL),
100 code_space_(NULL), 99 code_space_(NULL),
101 map_space_(NULL), 100 map_space_(NULL),
102 lo_space_(NULL), 101 lo_space_(NULL),
103 gc_state_(NOT_IN_GC), 102 gc_state_(NOT_IN_GC),
104 gc_post_processing_depth_(0), 103 gc_post_processing_depth_(0),
105 allocations_count_(0), 104 allocations_count_(0),
106 raw_allocations_hash_(0), 105 raw_allocations_hash_(0),
107 ms_count_(0), 106 ms_count_(0),
108 gc_count_(0), 107 gc_count_(0),
109 remembered_unmapped_pages_index_(0), 108 remembered_unmapped_pages_index_(0),
110 #ifdef DEBUG 109 #ifdef DEBUG
111 allocation_timeout_(0), 110 allocation_timeout_(0),
112 #endif // DEBUG 111 #endif // DEBUG
113 old_generation_allocation_limit_(initial_old_generation_size_), 112 old_generation_allocation_limit_(initial_old_generation_size_),
114 old_gen_exhausted_(false), 113 old_gen_exhausted_(false),
115 optimize_for_memory_usage_(false), 114 optimize_for_memory_usage_(false),
116 inline_allocation_disabled_(false), 115 inline_allocation_disabled_(false),
117 store_buffer_rebuilder_(store_buffer()),
118 total_regexp_code_generated_(0), 116 total_regexp_code_generated_(0),
119 tracer_(nullptr), 117 tracer_(nullptr),
120 high_survival_rate_period_length_(0), 118 high_survival_rate_period_length_(0),
121 promoted_objects_size_(0), 119 promoted_objects_size_(0),
122 promotion_ratio_(0), 120 promotion_ratio_(0),
123 semi_space_copied_object_size_(0), 121 semi_space_copied_object_size_(0),
124 previous_semi_space_copied_object_size_(0), 122 previous_semi_space_copied_object_size_(0),
125 semi_space_copied_rate_(0), 123 semi_space_copied_rate_(0),
126 nodes_died_in_new_space_(0), 124 nodes_died_in_new_space_(0),
127 nodes_copied_in_new_space_(0), 125 nodes_copied_in_new_space_(0),
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
447 UpdateMaximumCommitted(); 445 UpdateMaximumCommitted();
448 446
449 #ifdef DEBUG 447 #ifdef DEBUG
450 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); 448 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
451 449
452 if (FLAG_gc_verbose) Print(); 450 if (FLAG_gc_verbose) Print();
453 451
454 ReportStatisticsBeforeGC(); 452 ReportStatisticsBeforeGC();
455 #endif // DEBUG 453 #endif // DEBUG
456 454
457 store_buffer()->GCPrologue();
458
459 if (isolate()->concurrent_osr_enabled()) { 455 if (isolate()->concurrent_osr_enabled()) {
460 isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs(); 456 isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
461 } 457 }
462 458
463 if (new_space_.IsAtMaximumCapacity()) { 459 if (new_space_.IsAtMaximumCapacity()) {
464 maximum_size_scavenges_++; 460 maximum_size_scavenges_++;
465 } else { 461 } else {
466 maximum_size_scavenges_ = 0; 462 maximum_size_scavenges_ = 0;
467 } 463 }
468 CheckNewSpaceExpansionCriteria(); 464 CheckNewSpaceExpansionCriteria();
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
632 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup); 628 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
633 site->set_deopt_dependent_code(false); 629 site->set_deopt_dependent_code(false);
634 } 630 }
635 list_element = site->weak_next(); 631 list_element = site->weak_next();
636 } 632 }
637 Deoptimizer::DeoptimizeMarkedCode(isolate_); 633 Deoptimizer::DeoptimizeMarkedCode(isolate_);
638 } 634 }
639 635
640 636
641 void Heap::GarbageCollectionEpilogue() { 637 void Heap::GarbageCollectionEpilogue() {
642 store_buffer()->GCEpilogue();
643
644 // In release mode, we only zap the from space under heap verification. 638 // In release mode, we only zap the from space under heap verification.
645 if (Heap::ShouldZapGarbage()) { 639 if (Heap::ShouldZapGarbage()) {
646 ZapFromSpace(); 640 ZapFromSpace();
647 } 641 }
648 642
649 #ifdef VERIFY_HEAP 643 #ifdef VERIFY_HEAP
650 if (FLAG_verify_heap) { 644 if (FLAG_verify_heap) {
651 Verify(); 645 Verify();
652 } 646 }
653 #endif 647 #endif
(...skipping 891 matching lines...) Expand 10 before | Expand all | Expand 10 after
1545 JSFunction* constructor = JSFunction::cast(obj_constructor); 1539 JSFunction* constructor = JSFunction::cast(obj_constructor);
1546 if (!constructor->shared()->IsApiFunction()) return false; 1540 if (!constructor->shared()->IsApiFunction()) return false;
1547 if (constructor != nullptr && 1541 if (constructor != nullptr &&
1548 constructor->initial_map() == heap_object->map()) { 1542 constructor->initial_map() == heap_object->map()) {
1549 return true; 1543 return true;
1550 } 1544 }
1551 return false; 1545 return false;
1552 } 1546 }
1553 1547
1554 1548
1555 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1556 StoreBufferEvent event) {
1557 heap->store_buffer_rebuilder_.Callback(page, event);
1558 }
1559
1560
1561 void PromotionQueue::Initialize() { 1549 void PromotionQueue::Initialize() {
1562 // The last to-space page may be used for promotion queue. On promotion 1550 // The last to-space page may be used for promotion queue. On promotion
1563 // conflict, we use the emergency stack. 1551 // conflict, we use the emergency stack.
1564 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) == 1552 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1565 0); 1553 0);
1566 front_ = rear_ = 1554 front_ = rear_ =
1567 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); 1555 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1568 limit_ = reinterpret_cast<intptr_t*>( 1556 limit_ = reinterpret_cast<intptr_t*>(
1569 Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start()); 1557 Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
1570 emergency_stack_ = NULL; 1558 emergency_stack_ = NULL;
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
1683 { 1671 {
1684 // Copy roots. 1672 // Copy roots.
1685 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS); 1673 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
1686 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 1674 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1687 } 1675 }
1688 1676
1689 { 1677 {
1690 // Copy objects reachable from the old generation. 1678 // Copy objects reachable from the old generation.
1691 GCTracer::Scope gc_scope(tracer(), 1679 GCTracer::Scope gc_scope(tracer(),
1692 GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS); 1680 GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
1693 StoreBufferRebuildScope scope(this, store_buffer(),
1694 &ScavengeStoreBufferCallback);
1695 store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject); 1681 store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject);
1696 } 1682 }
1697 1683
1698 { 1684 {
1699 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_WEAK); 1685 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
1700 // Copy objects reachable from the encountered weak collections list. 1686 // Copy objects reachable from the encountered weak collections list.
1701 scavenge_visitor.VisitPointer(&encountered_weak_collections_); 1687 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1702 // Copy objects reachable from the encountered weak cells. 1688 // Copy objects reachable from the encountered weak cells.
1703 scavenge_visitor.VisitPointer(&encountered_weak_cells_); 1689 scavenge_visitor.VisitPointer(&encountered_weak_cells_);
1704 } 1690 }
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after
1939 new_space_front += 1925 new_space_front +=
1940 StaticScavengeVisitor::IterateBody(object->map(), object); 1926 StaticScavengeVisitor::IterateBody(object->map(), object);
1941 } else { 1927 } else {
1942 new_space_front = 1928 new_space_front =
1943 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start(); 1929 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1944 } 1930 }
1945 } 1931 }
1946 1932
1947 // Promote and process all the to-be-promoted objects. 1933 // Promote and process all the to-be-promoted objects.
1948 { 1934 {
1949 StoreBufferRebuildScope scope(this, store_buffer(),
1950 &ScavengeStoreBufferCallback);
1951 while (!promotion_queue()->is_empty()) { 1935 while (!promotion_queue()->is_empty()) {
1952 HeapObject* target; 1936 HeapObject* target;
1953 int size; 1937 int size;
1954 promotion_queue()->remove(&target, &size); 1938 promotion_queue()->remove(&target, &size);
1955 1939
1956 // Promoted object might be already partially visited 1940 // Promoted object might be already partially visited
1957 // during old space pointer iteration. Thus we search specifically 1941 // during old space pointer iteration. Thus we search specifically
1958 // for pointers to from semispace instead of looking for pointers 1942 // for pointers to from semispace instead of looking for pointers
1959 // to new space. 1943 // to new space.
1960 DCHECK(!target->IsMap()); 1944 DCHECK(!target->IsMap());
(...skipping 2527 matching lines...) Expand 10 before | Expand all | Expand 10 after
4488 // to the new space. In that case we may hit newly promoted objects and 4472 // to the new space. In that case we may hit newly promoted objects and
4489 // fix the pointers before the promotion queue gets to them. Thus the 'if'. 4473 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4490 if (target->IsHeapObject()) { 4474 if (target->IsHeapObject()) {
4491 if (Heap::InFromSpace(target)) { 4475 if (Heap::InFromSpace(target)) {
4492 callback(reinterpret_cast<HeapObject**>(slot), 4476 callback(reinterpret_cast<HeapObject**>(slot),
4493 HeapObject::cast(target)); 4477 HeapObject::cast(target));
4494 Object* new_target = *slot; 4478 Object* new_target = *slot;
4495 if (InNewSpace(new_target)) { 4479 if (InNewSpace(new_target)) {
4496 SLOW_DCHECK(Heap::InToSpace(new_target)); 4480 SLOW_DCHECK(Heap::InToSpace(new_target));
4497 SLOW_DCHECK(new_target->IsHeapObject()); 4481 SLOW_DCHECK(new_target->IsHeapObject());
4498 store_buffer_.EnterDirectlyIntoStoreBuffer( 4482 store_buffer_.Mark(reinterpret_cast<Address>(slot));
4499 reinterpret_cast<Address>(slot));
4500 } 4483 }
4501 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target)); 4484 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
4502 } else if (record_slots && 4485 } else if (record_slots &&
4503 MarkCompactCollector::IsOnEvacuationCandidate(target)) { 4486 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
4504 mark_compact_collector()->RecordSlot(object, slot, target); 4487 mark_compact_collector()->RecordSlot(object, slot, target);
4505 } 4488 }
4506 } 4489 }
4507 slot_address += kPointerSize; 4490 slot_address += kPointerSize;
4508 } 4491 }
4509 } 4492 }
(...skipping 1583 matching lines...) Expand 10 before | Expand all | Expand 10 after
6093 // PreFree logically frees the memory chunk. However, the actual freeing 6076 // PreFree logically frees the memory chunk. However, the actual freeing
6094 // will happen on a separate thread sometime later. 6077 // will happen on a separate thread sometime later.
6095 isolate_->memory_allocator()->PreFreeMemory(chunk); 6078 isolate_->memory_allocator()->PreFreeMemory(chunk);
6096 6079
6097 // The chunks added to this queue will be freed by a concurrent thread. 6080 // The chunks added to this queue will be freed by a concurrent thread.
6098 chunk->set_next_chunk(chunks_queued_for_free_); 6081 chunk->set_next_chunk(chunks_queued_for_free_);
6099 chunks_queued_for_free_ = chunk; 6082 chunks_queued_for_free_ = chunk;
6100 } 6083 }
6101 6084
6102 6085
6103 void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() { 6086 void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
Hannes Payer (out of office) 2016/01/20 19:43:00 This method should not be needed anymore because:
ulan 2016/01/28 19:07:21 Done.
6104 if (chunks_queued_for_free_ == NULL) return; 6087 if (chunks_queued_for_free_ == NULL) return;
6105 MemoryChunk* next; 6088 MemoryChunk* next;
6106 MemoryChunk* chunk; 6089 MemoryChunk* chunk;
6107 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { 6090 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6108 next = chunk->next_chunk(); 6091 next = chunk->next_chunk();
6092 chunk->ReleaseOldToNewSlots();
Hannes Payer (out of office) 2016/01/20 19:43:00 They should be released in MemoryChunk::ReleaseAll
ulan 2016/01/28 19:07:22 Done.
6109 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); 6093 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
Hannes Payer (out of office) 2016/01/20 19:43:00 ABOUT_TO_BE_FREED should not be needed anymore.
ulan 2016/01/28 19:07:21 Done.
6110 } 6094 }
6111 store_buffer()->Compact();
6112 store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6113 } 6095 }
6114 6096
6115 6097
6116 void Heap::FreeQueuedChunks() { 6098 void Heap::FreeQueuedChunks() {
6117 if (chunks_queued_for_free_ != NULL) { 6099 if (chunks_queued_for_free_ != NULL) {
6118 if (FLAG_concurrent_sweeping) { 6100 if (FLAG_concurrent_sweeping) {
6119 V8::GetCurrentPlatform()->CallOnBackgroundThread( 6101 V8::GetCurrentPlatform()->CallOnBackgroundThread(
6120 new UnmapFreeMemoryTask(this, chunks_queued_for_free_), 6102 new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
6121 v8::Platform::kShortRunningTask); 6103 v8::Platform::kShortRunningTask);
6122 } else { 6104 } else {
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
6243 } 6225 }
6244 6226
6245 6227
6246 // static 6228 // static
6247 int Heap::GetStaticVisitorIdForMap(Map* map) { 6229 int Heap::GetStaticVisitorIdForMap(Map* map) {
6248 return StaticVisitorBase::GetVisitorId(map); 6230 return StaticVisitorBase::GetVisitorId(map);
6249 } 6231 }
6250 6232
6251 } // namespace internal 6233 } // namespace internal
6252 } // namespace v8 6234 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698