Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(197)

Side by Side Diff: src/heap/heap.cc

Issue 2407153002: [heap] Use RAIL mode for initial heap sizing (Closed)
Patch Set: comments Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/context-slot-cache.h" 9 #include "src/ast/context-slot-cache.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 : external_memory_(0), 73 : external_memory_(0),
74 external_memory_limit_(kExternalAllocationSoftLimit), 74 external_memory_limit_(kExternalAllocationSoftLimit),
75 external_memory_at_last_mark_compact_(0), 75 external_memory_at_last_mark_compact_(0),
76 isolate_(nullptr), 76 isolate_(nullptr),
77 code_range_size_(0), 77 code_range_size_(0),
78 // semispace_size_ should be a power of 2 and old_generation_size_ should 78 // semispace_size_ should be a power of 2 and old_generation_size_ should
79 // be a multiple of Page::kPageSize. 79 // be a multiple of Page::kPageSize.
80 max_semi_space_size_(8 * (kPointerSize / 4) * MB), 80 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
81 initial_semispace_size_(MB), 81 initial_semispace_size_(MB),
82 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), 82 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
83 initial_old_generation_size_(max_old_generation_size_ /
84 kInitalOldGenerationLimitFactor),
85 old_generation_size_configured_(false),
86 max_executable_size_(256ul * (kPointerSize / 4) * MB), 83 max_executable_size_(256ul * (kPointerSize / 4) * MB),
87 // Variables set based on semispace_size_ and old_generation_size_ in 84 // Variables set based on semispace_size_ and old_generation_size_ in
88 // ConfigureHeap. 85 // ConfigureHeap.
89 // Will be 4 * reserved_semispace_size_ to ensure that young 86 // Will be 4 * reserved_semispace_size_ to ensure that young
90 // generation can be aligned to its size. 87 // generation can be aligned to its size.
91 maximum_committed_(0), 88 maximum_committed_(0),
92 survived_since_last_expansion_(0), 89 survived_since_last_expansion_(0),
93 survived_last_scavenge_(0), 90 survived_last_scavenge_(0),
94 always_allocate_scope_count_(0), 91 always_allocate_scope_count_(0),
95 memory_pressure_level_(MemoryPressureLevel::kNone), 92 memory_pressure_level_(MemoryPressureLevel::kNone),
96 contexts_disposed_(0), 93 contexts_disposed_(0),
97 number_of_disposed_maps_(0), 94 number_of_disposed_maps_(0),
98 global_ic_age_(0), 95 global_ic_age_(0),
99 new_space_(nullptr), 96 new_space_(nullptr),
100 old_space_(NULL), 97 old_space_(NULL),
101 code_space_(NULL), 98 code_space_(NULL),
102 map_space_(NULL), 99 map_space_(NULL),
103 lo_space_(NULL), 100 lo_space_(NULL),
104 gc_state_(NOT_IN_GC), 101 gc_state_(NOT_IN_GC),
105 gc_post_processing_depth_(0), 102 gc_post_processing_depth_(0),
106 allocations_count_(0), 103 allocations_count_(0),
107 raw_allocations_hash_(0), 104 raw_allocations_hash_(0),
108 ms_count_(0), 105 ms_count_(0),
109 gc_count_(0), 106 gc_count_(0),
110 remembered_unmapped_pages_index_(0), 107 remembered_unmapped_pages_index_(0),
111 #ifdef DEBUG 108 #ifdef DEBUG
112 allocation_timeout_(0), 109 allocation_timeout_(0),
113 #endif // DEBUG 110 #endif // DEBUG
114 old_generation_allocation_limit_(initial_old_generation_size_), 111 old_generation_allocation_limit_(0),
115 inline_allocation_disabled_(false), 112 inline_allocation_disabled_(false),
116 total_regexp_code_generated_(0), 113 total_regexp_code_generated_(0),
117 tracer_(nullptr), 114 tracer_(nullptr),
118 promoted_objects_size_(0), 115 promoted_objects_size_(0),
119 promotion_ratio_(0), 116 promotion_ratio_(0),
120 semi_space_copied_object_size_(0), 117 semi_space_copied_object_size_(0),
121 previous_semi_space_copied_object_size_(0), 118 previous_semi_space_copied_object_size_(0),
122 semi_space_copied_rate_(0), 119 semi_space_copied_rate_(0),
123 nodes_died_in_new_space_(0), 120 nodes_died_in_new_space_(0),
124 nodes_copied_in_new_space_(0), 121 nodes_copied_in_new_space_(0),
(...skipping 915 matching lines...) Expand 10 before | Expand all | Expand 10 after
1040 kNoGCCallbackFlags); 1037 kNoGCCallbackFlags);
1041 } 1038 }
1042 1039
1043 return next_gc_likely_to_collect_more; 1040 return next_gc_likely_to_collect_more;
1044 } 1041 }
1045 1042
1046 1043
1047 int Heap::NotifyContextDisposed(bool dependant_context) { 1044 int Heap::NotifyContextDisposed(bool dependant_context) {
1048 if (!dependant_context) { 1045 if (!dependant_context) {
1049 tracer()->ResetSurvivalEvents(); 1046 tracer()->ResetSurvivalEvents();
1050 old_generation_size_configured_ = false;
1051 MemoryReducer::Event event; 1047 MemoryReducer::Event event;
1052 event.type = MemoryReducer::kPossibleGarbage; 1048 event.type = MemoryReducer::kPossibleGarbage;
1053 event.time_ms = MonotonicallyIncreasingTimeInMs(); 1049 event.time_ms = MonotonicallyIncreasingTimeInMs();
1054 memory_reducer_->NotifyPossibleGarbage(event); 1050 memory_reducer_->NotifyPossibleGarbage(event);
1055 } 1051 }
1056 if (isolate()->concurrent_recompilation_enabled()) { 1052 if (isolate()->concurrent_recompilation_enabled()) {
1057 // Flush the queued recompilation tasks. 1053 // Flush the queued recompilation tasks.
1058 isolate()->optimizing_compile_dispatcher()->Flush(); 1054 isolate()->optimizing_compile_dispatcher()->Flush();
1059 } 1055 }
1060 AgeInlineCaches(); 1056 AgeInlineCaches();
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after
1304 1300
1305 int start_new_space_size = static_cast<int>(Heap::new_space()->Size()); 1301 int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
1306 1302
1307 { 1303 {
1308 Heap::PretenuringScope pretenuring_scope(this); 1304 Heap::PretenuringScope pretenuring_scope(this);
1309 1305
1310 if (collector == MARK_COMPACTOR) { 1306 if (collector == MARK_COMPACTOR) {
1311 UpdateOldGenerationAllocationCounter(); 1307 UpdateOldGenerationAllocationCounter();
1312 // Perform mark-sweep with optional compaction. 1308 // Perform mark-sweep with optional compaction.
1313 MarkCompact(); 1309 MarkCompact();
1314 old_generation_size_configured_ = true;
1315 // This should be updated before PostGarbageCollectionProcessing, which 1310 // This should be updated before PostGarbageCollectionProcessing, which
1316 // can cause another GC. Take into account the objects promoted during GC. 1311 // can cause another GC. Take into account the objects promoted during GC.
1317 old_generation_allocation_counter_at_last_gc_ += 1312 old_generation_allocation_counter_at_last_gc_ +=
1318 static_cast<size_t>(promoted_objects_size_); 1313 static_cast<size_t>(promoted_objects_size_);
1319 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); 1314 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1320 } else { 1315 } else {
1321 Scavenge(); 1316 Scavenge();
1322 } 1317 }
1323 1318
1324 ProcessPretenuringFeedback(); 1319 ProcessPretenuringFeedback();
1325 } 1320 }
1326 1321
1327 UpdateSurvivalStatistics(start_new_space_size); 1322 UpdateSurvivalStatistics(start_new_space_size);
1328 ConfigureInitialOldGenerationSize();
1329 1323
1330 isolate_->counters()->objs_since_last_young()->Set(0); 1324 isolate_->counters()->objs_since_last_young()->Set(0);
1331 1325
1332 gc_post_processing_depth_++; 1326 gc_post_processing_depth_++;
1333 { 1327 {
1334 AllowHeapAllocation allow_allocation; 1328 AllowHeapAllocation allow_allocation;
1335 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES); 1329 TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES);
1336 freed_global_handles = 1330 freed_global_handles =
1337 isolate_->global_handles()->PostGarbageCollectionProcessing( 1331 isolate_->global_handles()->PostGarbageCollectionProcessing(
1338 collector, gc_callback_flags); 1332 collector, gc_callback_flags);
1339 } 1333 }
1340 gc_post_processing_depth_--; 1334 gc_post_processing_depth_--;
1341 1335
1342 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); 1336 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1343 1337
1344 // Update relocatables. 1338 // Update relocatables.
1345 Relocatable::PostGarbageCollectionProcessing(isolate_); 1339 Relocatable::PostGarbageCollectionProcessing(isolate_);
1346 1340
1347 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); 1341 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1348 double mutator_speed = 1342 double mutator_speed =
1349 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond(); 1343 tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1350 intptr_t old_gen_size = PromotedSpaceSizeOfObjects(); 1344 intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
1351 if (collector == MARK_COMPACTOR) { 1345 if (collector == MARK_COMPACTOR) {
1352 // Register the amount of external allocated memory. 1346 // Register the amount of external allocated memory.
1353 external_memory_at_last_mark_compact_ = external_memory_; 1347 external_memory_at_last_mark_compact_ = external_memory_;
1354 external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit; 1348 external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
1355 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); 1349 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1356 } else if (HasLowYoungGenerationAllocationRate() && 1350 } else if (HasLowYoungGenerationAllocationRate()) {
1357 old_generation_size_configured_) {
1358 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); 1351 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1359 } 1352 }
1360 1353
1361 { 1354 {
1362 GCCallbacksScope scope(this); 1355 GCCallbacksScope scope(this);
1363 if (scope.CheckReenter()) { 1356 if (scope.CheckReenter()) {
1364 AllowHeapAllocation allow_allocation; 1357 AllowHeapAllocation allow_allocation;
1365 TRACE_GC(tracer(), collector == MARK_COMPACTOR 1358 TRACE_GC(tracer(), collector == MARK_COMPACTOR
1366 ? GCTracer::Scope::MC_EXTERNAL_EPILOGUE 1359 ? GCTracer::Scope::MC_EXTERNAL_EPILOGUE
1367 : GCTracer::Scope::SCAVENGER_EXTERNAL_EPILOGUE); 1360 : GCTracer::Scope::SCAVENGER_EXTERNAL_EPILOGUE);
(...skipping 647 matching lines...) Expand 10 before | Expand all | Expand 10 after
2015 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) { 2008 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
2016 ArrayBufferTracker::RegisterNew(this, buffer); 2009 ArrayBufferTracker::RegisterNew(this, buffer);
2017 } 2010 }
2018 2011
2019 2012
2020 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) { 2013 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2021 ArrayBufferTracker::Unregister(this, buffer); 2014 ArrayBufferTracker::Unregister(this, buffer);
2022 } 2015 }
2023 2016
2024 2017
2025 void Heap::ConfigureInitialOldGenerationSize() {
2026 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2027 old_generation_allocation_limit_ =
2028 Max(MinimumAllocationLimitGrowingStep(),
2029 static_cast<intptr_t>(
2030 static_cast<double>(old_generation_allocation_limit_) *
2031 (tracer()->AverageSurvivalRatio() / 100)));
2032 }
2033 }
2034
2035
2036 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, 2018 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2037 int instance_size) { 2019 int instance_size) {
2038 Object* result = nullptr; 2020 Object* result = nullptr;
2039 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE); 2021 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2040 if (!allocation.To(&result)) return allocation; 2022 if (!allocation.To(&result)) return allocation;
2041 2023
2042 // Map::cast cannot be used due to uninitialized map field. 2024 // Map::cast cannot be used due to uninitialized map field.
2043 reinterpret_cast<Map*>(result)->set_map( 2025 reinterpret_cast<Map*>(result)->set_map(
2044 reinterpret_cast<Map*>(root(kMetaMapRootIndex))); 2026 reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
2045 reinterpret_cast<Map*>(result)->set_instance_type(instance_type); 2027 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
(...skipping 3029 matching lines...) Expand 10 before | Expand all | Expand 10 after
5075 max_old_generation_size_ = 5057 max_old_generation_size_ =
5076 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize), 5058 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
5077 max_old_generation_size_); 5059 max_old_generation_size_);
5078 5060
5079 // The max executable size must be less than or equal to the max old 5061 // The max executable size must be less than or equal to the max old
5080 // generation size. 5062 // generation size.
5081 if (max_executable_size_ > max_old_generation_size_) { 5063 if (max_executable_size_ > max_old_generation_size_) {
5082 max_executable_size_ = max_old_generation_size_; 5064 max_executable_size_ = max_old_generation_size_;
5083 } 5065 }
5084 5066
5085 if (FLAG_initial_old_space_size > 0) {
5086 initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
5087 } else {
5088 initial_old_generation_size_ =
5089 max_old_generation_size_ / kInitalOldGenerationLimitFactor;
5090 }
5091 old_generation_allocation_limit_ = initial_old_generation_size_;
5092
5093 // We rely on being able to allocate new arrays in paged spaces. 5067 // We rely on being able to allocate new arrays in paged spaces.
5094 DCHECK(kMaxRegularHeapObjectSize >= 5068 DCHECK(kMaxRegularHeapObjectSize >=
5095 (JSArray::kSize + 5069 (JSArray::kSize +
5096 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) + 5070 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
5097 AllocationMemento::kSize)); 5071 AllocationMemento::kSize));
5098 5072
5099 code_range_size_ = code_range_size * MB; 5073 code_range_size_ = code_range_size * MB;
5100 5074
5101 configured_ = true; 5075 configured_ = true;
5102 return true; 5076 return true;
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after
5322 "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR 5296 "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
5323 " KB, " 5297 " KB, "
5324 "new limit: %" V8PRIdPTR " KB (%.1f)\n", 5298 "new limit: %" V8PRIdPTR " KB (%.1f)\n",
5325 old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB, 5299 old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
5326 factor); 5300 factor);
5327 } 5301 }
5328 old_generation_allocation_limit_ = limit; 5302 old_generation_allocation_limit_ = limit;
5329 } 5303 }
5330 } 5304 }
5331 5305
5306 intptr_t Heap::OldGenerationSpaceAvailable() {
5307 if (old_generation_allocation_limit_ == 0) {
5308 // Lazy initialization of allocation limit.
5309 old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
5310 kConservativeHeapGrowingFactor, PromotedSpaceSizeOfObjects());
5311 }
5312 return old_generation_allocation_limit_ - PromotedTotalSize();
5313 }
5314
5315 bool Heap::ShouldOptimizeForLoadTime() {
5316 return isolate()->rail_mode() == PERFORMANCE_LOAD &&
5317 PromotedTotalSize() <
5318 max_old_generation_size_ / kInitalOldGenerationLimitFactor &&
5319 MonotonicallyIncreasingTimeInMs() <
5320 isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
5321 }
5322
5332 // This predicate is called when an old generation space cannot allocated from 5323 // This predicate is called when an old generation space cannot allocated from
5333 // the free list and is about to add a new page. Returning false will cause a 5324 // the free list and is about to add a new page. Returning false will cause a
5334 // major GC. It happens when the old generation allocation limit is reached and 5325 // major GC. It happens when the old generation allocation limit is reached and
5335 // - either we need to optimize for memory usage, 5326 // - either we need to optimize for memory usage,
5336 // - or the incremental marking is not in progress and we cannot start it. 5327 // - or the incremental marking is not in progress and we cannot start it.
5337 bool Heap::ShouldExpandOldGenerationOnAllocationFailure() { 5328 bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
5338 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true; 5329 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
5339 // We reached the old generation allocation limit. 5330 // We reached the old generation allocation limit.
5340 5331
5341 if (ShouldOptimizeForMemoryUsage()) return false; 5332 if (ShouldOptimizeForMemoryUsage()) return false;
5342 5333
5334 if (ShouldOptimizeForLoadTime()) return true;
5335
5343 if (incremental_marking()->IsStopped() && 5336 if (incremental_marking()->IsStopped() &&
5344 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) { 5337 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
5345 // We cannot start incremental marking. 5338 // We cannot start incremental marking.
5346 return false; 5339 return false;
5347 } 5340 }
5348 return true; 5341 return true;
5349 } 5342 }
5350 5343
5351 // This function returns either kNoLimit, kSoftLimit, or kHardLimit. 5344 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
5352 // The kNoLimit means that either incremental marking is disabled or it is too 5345 // The kNoLimit means that either incremental marking is disabled or it is too
5353 // early to start incremental marking. 5346 // early to start incremental marking.
5354 // The kSoftLimit means that incremental marking should be started soon. 5347 // The kSoftLimit means that incremental marking should be started soon.
5355 // The kHardLimit means that incremental marking should be started immediately. 5348 // The kHardLimit means that incremental marking should be started immediately.
5356 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() { 5349 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
5357 if (!incremental_marking()->CanBeActivated() || 5350 if (!incremental_marking()->CanBeActivated() ||
5358 PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) { 5351 PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
5359 // Incremental marking is disabled or it is too early to start. 5352 // Incremental marking is disabled or it is too early to start.
5360 return IncrementalMarkingLimit::kNoLimit; 5353 return IncrementalMarkingLimit::kNoLimit;
5361 } 5354 }
5362 if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) || 5355 if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
5363 HighMemoryPressure()) { 5356 HighMemoryPressure()) {
5364 // If there is high memory pressure or stress testing is enabled, then 5357 // If there is high memory pressure or stress testing is enabled, then
5365 // start marking immediately. 5358 // start marking immediately.
5366 return IncrementalMarkingLimit::kHardLimit; 5359 return IncrementalMarkingLimit::kHardLimit;
5367 } 5360 }
5368 intptr_t old_generation_space_available = OldGenerationSpaceAvailable(); 5361 intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
5369 if (old_generation_space_available > new_space_->Capacity()) { 5362 if (old_generation_space_available > new_space_->Capacity()) {
5370 return IncrementalMarkingLimit::kNoLimit; 5363 return IncrementalMarkingLimit::kNoLimit;
5371 } 5364 }
5365
5366 if (ShouldOptimizeForLoadTime()) return IncrementalMarkingLimit::kNoLimit;
5367
5372 // We are close to the allocation limit. 5368 // We are close to the allocation limit.
5373 // Choose between the hard and the soft limits. 5369 // Choose between the hard and the soft limits.
5374 if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) { 5370 if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
5375 return IncrementalMarkingLimit::kHardLimit; 5371 return IncrementalMarkingLimit::kHardLimit;
5376 } 5372 }
5377 return IncrementalMarkingLimit::kSoftLimit; 5373 return IncrementalMarkingLimit::kSoftLimit;
5378 } 5374 }
5379 5375
5380 void Heap::EnableInlineAllocation() { 5376 void Heap::EnableInlineAllocation() {
5381 if (!inline_allocation_disabled_) return; 5377 if (!inline_allocation_disabled_) return;
(...skipping 1121 matching lines...) Expand 10 before | Expand all | Expand 10 after
6503 } 6499 }
6504 6500
6505 6501
6506 // static 6502 // static
6507 int Heap::GetStaticVisitorIdForMap(Map* map) { 6503 int Heap::GetStaticVisitorIdForMap(Map* map) {
6508 return StaticVisitorBase::GetVisitorId(map); 6504 return StaticVisitorBase::GetVisitorId(map);
6509 } 6505 }
6510 6506
6511 } // namespace internal 6507 } // namespace internal
6512 } // namespace v8 6508 } // namespace v8
OLDNEW
« src/heap/heap.h ('K') | « src/heap/heap.h ('k') | src/heap/incremental-marking-job.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698