Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(267)

Side by Side Diff: src/heap/heap.cc

Issue 1608583002: New page local store buffer. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase and fix signed unsigned conversion Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/scopeinfo.h" 9 #include "src/ast/scopeinfo.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
86 // ConfigureHeap. 86 // ConfigureHeap.
87 // Will be 4 * reserved_semispace_size_ to ensure that young 87 // Will be 4 * reserved_semispace_size_ to ensure that young
88 // generation can be aligned to its size. 88 // generation can be aligned to its size.
89 maximum_committed_(0), 89 maximum_committed_(0),
90 survived_since_last_expansion_(0), 90 survived_since_last_expansion_(0),
91 survived_last_scavenge_(0), 91 survived_last_scavenge_(0),
92 always_allocate_scope_count_(0), 92 always_allocate_scope_count_(0),
93 contexts_disposed_(0), 93 contexts_disposed_(0),
94 number_of_disposed_maps_(0), 94 number_of_disposed_maps_(0),
95 global_ic_age_(0), 95 global_ic_age_(0),
96 scan_on_scavenge_pages_(0),
97 new_space_(this), 96 new_space_(this),
98 old_space_(NULL), 97 old_space_(NULL),
99 code_space_(NULL), 98 code_space_(NULL),
100 map_space_(NULL), 99 map_space_(NULL),
101 lo_space_(NULL), 100 lo_space_(NULL),
102 gc_state_(NOT_IN_GC), 101 gc_state_(NOT_IN_GC),
103 gc_post_processing_depth_(0), 102 gc_post_processing_depth_(0),
104 allocations_count_(0), 103 allocations_count_(0),
105 raw_allocations_hash_(0), 104 raw_allocations_hash_(0),
106 ms_count_(0), 105 ms_count_(0),
107 gc_count_(0), 106 gc_count_(0),
108 remembered_unmapped_pages_index_(0), 107 remembered_unmapped_pages_index_(0),
109 #ifdef DEBUG 108 #ifdef DEBUG
110 allocation_timeout_(0), 109 allocation_timeout_(0),
111 #endif // DEBUG 110 #endif // DEBUG
112 old_generation_allocation_limit_(initial_old_generation_size_), 111 old_generation_allocation_limit_(initial_old_generation_size_),
113 old_gen_exhausted_(false), 112 old_gen_exhausted_(false),
114 optimize_for_memory_usage_(false), 113 optimize_for_memory_usage_(false),
115 inline_allocation_disabled_(false), 114 inline_allocation_disabled_(false),
116 store_buffer_rebuilder_(store_buffer()),
117 total_regexp_code_generated_(0), 115 total_regexp_code_generated_(0),
118 tracer_(nullptr), 116 tracer_(nullptr),
119 high_survival_rate_period_length_(0), 117 high_survival_rate_period_length_(0),
120 promoted_objects_size_(0), 118 promoted_objects_size_(0),
121 promotion_ratio_(0), 119 promotion_ratio_(0),
122 semi_space_copied_object_size_(0), 120 semi_space_copied_object_size_(0),
123 previous_semi_space_copied_object_size_(0), 121 previous_semi_space_copied_object_size_(0),
124 semi_space_copied_rate_(0), 122 semi_space_copied_rate_(0),
125 nodes_died_in_new_space_(0), 123 nodes_died_in_new_space_(0),
126 nodes_copied_in_new_space_(0), 124 nodes_copied_in_new_space_(0),
(...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after
446 UpdateMaximumCommitted(); 444 UpdateMaximumCommitted();
447 445
448 #ifdef DEBUG 446 #ifdef DEBUG
449 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); 447 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
450 448
451 if (FLAG_gc_verbose) Print(); 449 if (FLAG_gc_verbose) Print();
452 450
453 ReportStatisticsBeforeGC(); 451 ReportStatisticsBeforeGC();
454 #endif // DEBUG 452 #endif // DEBUG
455 453
456 store_buffer()->GCPrologue();
457
458 if (isolate()->concurrent_osr_enabled()) { 454 if (isolate()->concurrent_osr_enabled()) {
459 isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs(); 455 isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
460 } 456 }
461 457
462 if (new_space_.IsAtMaximumCapacity()) { 458 if (new_space_.IsAtMaximumCapacity()) {
463 maximum_size_scavenges_++; 459 maximum_size_scavenges_++;
464 } else { 460 } else {
465 maximum_size_scavenges_ = 0; 461 maximum_size_scavenges_ = 0;
466 } 462 }
467 CheckNewSpaceExpansionCriteria(); 463 CheckNewSpaceExpansionCriteria();
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
635 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup); 631 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
636 site->set_deopt_dependent_code(false); 632 site->set_deopt_dependent_code(false);
637 } 633 }
638 list_element = site->weak_next(); 634 list_element = site->weak_next();
639 } 635 }
640 Deoptimizer::DeoptimizeMarkedCode(isolate_); 636 Deoptimizer::DeoptimizeMarkedCode(isolate_);
641 } 637 }
642 638
643 639
644 void Heap::GarbageCollectionEpilogue() { 640 void Heap::GarbageCollectionEpilogue() {
645 store_buffer()->GCEpilogue();
646
647 // In release mode, we only zap the from space under heap verification. 641 // In release mode, we only zap the from space under heap verification.
648 if (Heap::ShouldZapGarbage()) { 642 if (Heap::ShouldZapGarbage()) {
649 ZapFromSpace(); 643 ZapFromSpace();
650 } 644 }
651 645
652 #ifdef VERIFY_HEAP 646 #ifdef VERIFY_HEAP
653 if (FLAG_verify_heap) { 647 if (FLAG_verify_heap) {
654 Verify(); 648 Verify();
655 } 649 }
656 #endif 650 #endif
(...skipping 890 matching lines...) Expand 10 before | Expand all | Expand 10 after
1547 JSFunction* constructor = JSFunction::cast(obj_constructor); 1541 JSFunction* constructor = JSFunction::cast(obj_constructor);
1548 if (!constructor->shared()->IsApiFunction()) return false; 1542 if (!constructor->shared()->IsApiFunction()) return false;
1549 if (constructor != nullptr && 1543 if (constructor != nullptr &&
1550 constructor->initial_map() == heap_object->map()) { 1544 constructor->initial_map() == heap_object->map()) {
1551 return true; 1545 return true;
1552 } 1546 }
1553 return false; 1547 return false;
1554 } 1548 }
1555 1549
1556 1550
1557 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1558 StoreBufferEvent event) {
1559 heap->store_buffer_rebuilder_.Callback(page, event);
1560 }
1561
1562
1563 void PromotionQueue::Initialize() { 1551 void PromotionQueue::Initialize() {
1564 // The last to-space page may be used for promotion queue. On promotion 1552 // The last to-space page may be used for promotion queue. On promotion
1565 // conflict, we use the emergency stack. 1553 // conflict, we use the emergency stack.
1566 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) == 1554 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1567 0); 1555 0);
1568 front_ = rear_ = 1556 front_ = rear_ =
1569 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); 1557 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1570 limit_ = reinterpret_cast<intptr_t*>( 1558 limit_ = reinterpret_cast<intptr_t*>(
1571 Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start()); 1559 Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
1572 emergency_stack_ = NULL; 1560 emergency_stack_ = NULL;
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
1685 { 1673 {
1686 // Copy roots. 1674 // Copy roots.
1687 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS); 1675 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
1688 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 1676 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1689 } 1677 }
1690 1678
1691 { 1679 {
1692 // Copy objects reachable from the old generation. 1680 // Copy objects reachable from the old generation.
1693 GCTracer::Scope gc_scope(tracer(), 1681 GCTracer::Scope gc_scope(tracer(),
1694 GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS); 1682 GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
1695 StoreBufferRebuildScope scope(this, store_buffer(),
1696 &ScavengeStoreBufferCallback);
1697 store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject); 1683 store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject);
1698 } 1684 }
1699 1685
1700 { 1686 {
1701 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_WEAK); 1687 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
1702 // Copy objects reachable from the encountered weak collections list. 1688 // Copy objects reachable from the encountered weak collections list.
1703 scavenge_visitor.VisitPointer(&encountered_weak_collections_); 1689 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1704 // Copy objects reachable from the encountered weak cells. 1690 // Copy objects reachable from the encountered weak cells.
1705 scavenge_visitor.VisitPointer(&encountered_weak_cells_); 1691 scavenge_visitor.VisitPointer(&encountered_weak_cells_);
1706 } 1692 }
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after
1941 new_space_front += 1927 new_space_front +=
1942 StaticScavengeVisitor::IterateBody(object->map(), object); 1928 StaticScavengeVisitor::IterateBody(object->map(), object);
1943 } else { 1929 } else {
1944 new_space_front = 1930 new_space_front =
1945 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start(); 1931 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1946 } 1932 }
1947 } 1933 }
1948 1934
1949 // Promote and process all the to-be-promoted objects. 1935 // Promote and process all the to-be-promoted objects.
1950 { 1936 {
1951 StoreBufferRebuildScope scope(this, store_buffer(),
1952 &ScavengeStoreBufferCallback);
1953 while (!promotion_queue()->is_empty()) { 1937 while (!promotion_queue()->is_empty()) {
1954 HeapObject* target; 1938 HeapObject* target;
1955 int size; 1939 int size;
1956 promotion_queue()->remove(&target, &size); 1940 promotion_queue()->remove(&target, &size);
1957 1941
1958 // Promoted object might be already partially visited 1942 // Promoted object might be already partially visited
1959 // during old space pointer iteration. Thus we search specifically 1943 // during old space pointer iteration. Thus we search specifically
1960 // for pointers to from semispace instead of looking for pointers 1944 // for pointers to from semispace instead of looking for pointers
1961 // to new space. 1945 // to new space.
1962 DCHECK(!target->IsMap()); 1946 DCHECK(!target->IsMap());
(...skipping 2517 matching lines...) Expand 10 before | Expand all | Expand 10 after
4480 // to the new space. In that case we may hit newly promoted objects and 4464 // to the new space. In that case we may hit newly promoted objects and
4481 // fix the pointers before the promotion queue gets to them. Thus the 'if'. 4465 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4482 if (target->IsHeapObject()) { 4466 if (target->IsHeapObject()) {
4483 if (Heap::InFromSpace(target)) { 4467 if (Heap::InFromSpace(target)) {
4484 callback(reinterpret_cast<HeapObject**>(slot), 4468 callback(reinterpret_cast<HeapObject**>(slot),
4485 HeapObject::cast(target)); 4469 HeapObject::cast(target));
4486 Object* new_target = *slot; 4470 Object* new_target = *slot;
4487 if (InNewSpace(new_target)) { 4471 if (InNewSpace(new_target)) {
4488 SLOW_DCHECK(Heap::InToSpace(new_target)); 4472 SLOW_DCHECK(Heap::InToSpace(new_target));
4489 SLOW_DCHECK(new_target->IsHeapObject()); 4473 SLOW_DCHECK(new_target->IsHeapObject());
4490 store_buffer_.EnterDirectlyIntoStoreBuffer( 4474 store_buffer_.Mark(reinterpret_cast<Address>(slot));
4491 reinterpret_cast<Address>(slot));
4492 } 4475 }
4493 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target)); 4476 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
4494 } else if (record_slots && 4477 } else if (record_slots &&
4495 MarkCompactCollector::IsOnEvacuationCandidate(target)) { 4478 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
4496 mark_compact_collector()->RecordSlot(object, slot, target); 4479 mark_compact_collector()->RecordSlot(object, slot, target);
4497 } 4480 }
4498 } 4481 }
4499 slot_address += kPointerSize; 4482 slot_address += kPointerSize;
4500 } 4483 }
4501 } 4484 }
(...skipping 1560 matching lines...) Expand 10 before | Expand all | Expand 10 after
6062 // PreFree logically frees the memory chunk. However, the actual freeing 6045 // PreFree logically frees the memory chunk. However, the actual freeing
6063 // will happen on a separate thread sometime later. 6046 // will happen on a separate thread sometime later.
6064 isolate_->memory_allocator()->PreFreeMemory(chunk); 6047 isolate_->memory_allocator()->PreFreeMemory(chunk);
6065 6048
6066 // The chunks added to this queue will be freed by a concurrent thread. 6049 // The chunks added to this queue will be freed by a concurrent thread.
6067 chunk->set_next_chunk(chunks_queued_for_free_); 6050 chunk->set_next_chunk(chunks_queued_for_free_);
6068 chunks_queued_for_free_ = chunk; 6051 chunks_queued_for_free_ = chunk;
6069 } 6052 }
6070 6053
6071 6054
6072 void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
6073 if (chunks_queued_for_free_ == NULL) return;
6074 MemoryChunk* next;
6075 MemoryChunk* chunk;
6076 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6077 next = chunk->next_chunk();
6078 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6079 }
6080 store_buffer()->Compact();
6081 store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6082 }
6083
6084
6085 void Heap::FreeQueuedChunks() { 6055 void Heap::FreeQueuedChunks() {
6086 if (chunks_queued_for_free_ != NULL) { 6056 if (chunks_queued_for_free_ != NULL) {
6087 if (FLAG_concurrent_sweeping) { 6057 if (FLAG_concurrent_sweeping) {
6088 V8::GetCurrentPlatform()->CallOnBackgroundThread( 6058 V8::GetCurrentPlatform()->CallOnBackgroundThread(
6089 new UnmapFreeMemoryTask(this, chunks_queued_for_free_), 6059 new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
6090 v8::Platform::kShortRunningTask); 6060 v8::Platform::kShortRunningTask);
6091 } else { 6061 } else {
6092 FreeQueuedChunks(chunks_queued_for_free_); 6062 FreeQueuedChunks(chunks_queued_for_free_);
6093 pending_unmapping_tasks_semaphore_.Signal(); 6063 pending_unmapping_tasks_semaphore_.Signal();
6094 } 6064 }
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
6212 } 6182 }
6213 6183
6214 6184
6215 // static 6185 // static
6216 int Heap::GetStaticVisitorIdForMap(Map* map) { 6186 int Heap::GetStaticVisitorIdForMap(Map* map) {
6217 return StaticVisitorBase::GetVisitorId(map); 6187 return StaticVisitorBase::GetVisitorId(map);
6218 } 6188 }
6219 6189
6220 } // namespace internal 6190 } // namespace internal
6221 } // namespace v8 6191 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698