| Index: src/heap/heap.cc
|
| diff --git a/src/heap/heap.cc b/src/heap/heap.cc
|
| index 3528c93f4162e26078fbff411692d0e32c21cee6..194f1cbaf7883ff0c1a6282d1ea45d3c0f801554 100644
|
| --- a/src/heap/heap.cc
|
| +++ b/src/heap/heap.cc
|
| @@ -179,14 +179,13 @@ Heap::Heap()
|
| RememberUnmappedPage(NULL, false);
|
| }
|
|
|
| -
|
| -intptr_t Heap::Capacity() {
|
| +size_t Heap::Capacity() {
|
| if (!HasBeenSetUp()) return 0;
|
|
|
| return new_space_->Capacity() + OldGenerationCapacity();
|
| }
|
|
|
| -intptr_t Heap::OldGenerationCapacity() {
|
| +size_t Heap::OldGenerationCapacity() {
|
| if (!HasBeenSetUp()) return 0;
|
|
|
| return old_space_->Capacity() + code_space_->Capacity() +
|
| @@ -233,11 +232,10 @@ void Heap::UpdateMaximumCommitted() {
|
| }
|
| }
|
|
|
| -
|
| -intptr_t Heap::Available() {
|
| +size_t Heap::Available() {
|
| if (!HasBeenSetUp()) return 0;
|
|
|
| - intptr_t total = 0;
|
| + size_t total = 0;
|
| AllSpaces spaces(this);
|
| for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
|
| total += space->Available();
|
| @@ -275,8 +273,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
|
| // and does not count available bytes already in the old space or code
|
| // space. Undercounting is safe---we may get an unrequested full GC when
|
| // a scavenge would have succeeded.
|
| - if (static_cast<intptr_t>(memory_allocator()->MaxAvailable()) <=
|
| - new_space_->Size()) {
|
| + if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
|
| isolate_->counters()
|
| ->gc_compactor_caused_by_oldspace_exhaustion()
|
| ->Increment();
|
| @@ -316,55 +313,55 @@ void Heap::ReportStatisticsBeforeGC() {
|
|
|
| void Heap::PrintShortHeapStatistics() {
|
| if (!FLAG_trace_gc_verbose) return;
|
| - PrintIsolate(isolate_,
|
| - "Memory allocator, used: %6zu KB,"
|
| - " available: %6zu KB\n",
|
| + PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
|
| + " KB,"
|
| + " available: %6" PRIuS " KB\n",
|
| memory_allocator()->Size() / KB,
|
| memory_allocator()->Available() / KB);
|
| - PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR
|
| + PrintIsolate(isolate_, "New space, used: %6" PRIuS
|
| " KB"
|
| - ", available: %6" V8PRIdPTR
|
| + ", available: %6" PRIuS
|
| " KB"
|
| - ", committed: %6zu KB\n",
|
| + ", committed: %6" PRIuS " KB\n",
|
| new_space_->Size() / KB, new_space_->Available() / KB,
|
| new_space_->CommittedMemory() / KB);
|
| - PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR
|
| + PrintIsolate(isolate_, "Old space, used: %6" PRIuS
|
| " KB"
|
| - ", available: %6" V8PRIdPTR
|
| + ", available: %6" PRIuS
|
| " KB"
|
| - ", committed: %6zu KB\n",
|
| + ", committed: %6" PRIuS " KB\n",
|
| old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
|
| old_space_->CommittedMemory() / KB);
|
| - PrintIsolate(isolate_, "Code space, used: %6" V8PRIdPTR
|
| + PrintIsolate(isolate_, "Code space, used: %6" PRIuS
|
| " KB"
|
| - ", available: %6" V8PRIdPTR
|
| + ", available: %6" PRIuS
|
| " KB"
|
| - ", committed: %6zu KB\n",
|
| + ", committed: %6" PRIuS "KB\n",
|
| code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
|
| code_space_->CommittedMemory() / KB);
|
| - PrintIsolate(isolate_, "Map space, used: %6" V8PRIdPTR
|
| + PrintIsolate(isolate_, "Map space, used: %6" PRIuS
|
| " KB"
|
| - ", available: %6" V8PRIdPTR
|
| + ", available: %6" PRIuS
|
| " KB"
|
| - ", committed: %6zu KB\n",
|
| + ", committed: %6" PRIuS " KB\n",
|
| map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
|
| map_space_->CommittedMemory() / KB);
|
| - PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
|
| + PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
|
| " KB"
|
| - ", available: %6" V8PRIdPTR
|
| + ", available: %6" PRIuS
|
| " KB"
|
| - ", committed: %6zu KB\n",
|
| + ", committed: %6" PRIuS " KB\n",
|
| lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
|
| lo_space_->CommittedMemory() / KB);
|
| - PrintIsolate(isolate_, "All spaces, used: %6" V8PRIdPTR
|
| + PrintIsolate(isolate_, "All spaces, used: %6" PRIuS
|
| " KB"
|
| - ", available: %6" V8PRIdPTR
|
| + ", available: %6" PRIuS
|
| " KB"
|
| - ", committed: %6zu KB\n",
|
| + ", committed: %6" PRIuS "KB\n",
|
| this->SizeOfObjects() / KB, this->Available() / KB,
|
| this->CommittedMemory() / KB);
|
| - PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
|
| - static_cast<intptr_t>(external_memory_ / KB));
|
| + PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
|
| + external_memory_ / KB);
|
| PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
|
| total_gc_time_ms_);
|
| }
|
| @@ -442,9 +439,8 @@ void Heap::GarbageCollectionPrologue() {
|
| store_buffer()->MoveAllEntriesToRememberedSet();
|
| }
|
|
|
| -
|
| -intptr_t Heap::SizeOfObjects() {
|
| - intptr_t total = 0;
|
| +size_t Heap::SizeOfObjects() {
|
| + size_t total = 0;
|
| AllSpaces spaces(this);
|
| for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
|
| total += space->SizeOfObjects();
|
| @@ -977,7 +973,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
|
| }
|
|
|
| bool next_gc_likely_to_collect_more = false;
|
| - intptr_t committed_memory_before = 0;
|
| + size_t committed_memory_before = 0;
|
|
|
| if (collector == MARK_COMPACTOR) {
|
| committed_memory_before = CommittedOldGenerationMemory();
|
| @@ -1004,8 +1000,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
|
| }
|
|
|
| if (collector == MARK_COMPACTOR) {
|
| - intptr_t committed_memory_after = CommittedOldGenerationMemory();
|
| - intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
|
| + size_t committed_memory_after = CommittedOldGenerationMemory();
|
| + size_t used_memory_after = PromotedSpaceSizeOfObjects();
|
| MemoryReducer::Event event;
|
| event.type = MemoryReducer::kMarkCompact;
|
| event.time_ms = MonotonicallyIncreasingTimeInMs();
|
| @@ -1014,7 +1010,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
|
| // - there is high fragmentation,
|
| // - there are live detached contexts.
|
| event.next_gc_likely_to_collect_more =
|
| - (committed_memory_before - committed_memory_after) > MB ||
|
| + (committed_memory_before > committed_memory_after + MB) ||
|
| HasHighFragmentation(used_memory_after, committed_memory_after) ||
|
| (detached_contexts()->length() > 0);
|
| if (deserialization_complete_) {
|
| @@ -1355,7 +1351,7 @@ bool Heap::PerformGarbageCollection(
|
| double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
|
| double mutator_speed =
|
| tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
|
| - intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
|
| + size_t old_gen_size = PromotedSpaceSizeOfObjects();
|
| if (collector == MARK_COMPACTOR) {
|
| // Register the amount of external allocated memory.
|
| external_memory_at_last_mark_compact_ = external_memory_;
|
| @@ -1605,7 +1601,7 @@ void Heap::Scavenge() {
|
| LOG(isolate_, ResourceEvent("scavenge", "begin"));
|
|
|
| // Used for updating survived_since_last_expansion_ at function end.
|
| - intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
|
| + size_t survived_watermark = PromotedSpaceSizeOfObjects();
|
|
|
| scavenge_collector_->SelectScavengingVisitorsTable();
|
|
|
| @@ -1717,9 +1713,9 @@ void Heap::Scavenge() {
|
| ArrayBufferTracker::FreeDeadInNewSpace(this);
|
|
|
| // Update how much has survived scavenge.
|
| - IncrementYoungSurvivorsCounter(
|
| - static_cast<int>((PromotedSpaceSizeOfObjects() - survived_watermark) +
|
| - new_space_->Size()));
|
| + DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
|
| + IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
|
| + new_space_->Size() - survived_watermark);
|
|
|
| LOG(isolate_, ResourceEvent("scavenge", "end"));
|
|
|
| @@ -2007,7 +2003,7 @@ void Heap::ConfigureInitialOldGenerationSize() {
|
| if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
|
| old_generation_allocation_limit_ =
|
| Max(MinimumAllocationLimitGrowingStep(),
|
| - static_cast<intptr_t>(
|
| + static_cast<size_t>(
|
| static_cast<double>(old_generation_allocation_limit_) *
|
| (tracer()->AverageSurvivalRatio() / 100)));
|
| }
|
| @@ -2909,12 +2905,13 @@ int Heap::FullSizeNumberStringCacheLength() {
|
| // Compute the size of the number string cache based on the max newspace size.
|
| // The number string cache has a minimum size based on twice the initial cache
|
| // size to ensure that it is bigger after being made 'full size'.
|
| - int number_string_cache_size = max_semi_space_size_ / 512;
|
| - number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
|
| - Min(0x4000, number_string_cache_size));
|
| + size_t number_string_cache_size = max_semi_space_size_ / 512;
|
| + number_string_cache_size =
|
| + Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
|
| + Min<size_t>(0x4000u, number_string_cache_size));
|
| // There is a string and a number per entry so the length is twice the number
|
| // of entries.
|
| - return number_string_cache_size * 2;
|
| + return static_cast<int>(number_string_cache_size * 2);
|
| }
|
|
|
|
|
| @@ -4104,16 +4101,16 @@ bool Heap::HasLowAllocationRate() {
|
|
|
|
|
| bool Heap::HasHighFragmentation() {
|
| - intptr_t used = PromotedSpaceSizeOfObjects();
|
| - intptr_t committed = CommittedOldGenerationMemory();
|
| + size_t used = PromotedSpaceSizeOfObjects();
|
| + size_t committed = CommittedOldGenerationMemory();
|
| return HasHighFragmentation(used, committed);
|
| }
|
|
|
| -
|
| -bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
|
| - const intptr_t kSlack = 16 * MB;
|
| +bool Heap::HasHighFragmentation(size_t used, size_t committed) {
|
| + const size_t kSlack = 16 * MB;
|
| // Fragmentation is high if committed > 2 * used + kSlack.
|
| // Rewrite the exression to avoid overflow.
|
| + DCHECK_GE(committed, used);
|
| return committed - used > used + kSlack;
|
| }
|
|
|
| @@ -4970,31 +4967,31 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
|
| // TODO(1236194): Since the heap size is configurable on the command line
|
| // and through the API, we should gracefully handle the case that the heap
|
| // size is not big enough to fit all the initial objects.
|
| -bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
|
| - int max_executable_size, size_t code_range_size) {
|
| +bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
|
| + size_t max_executable_size, size_t code_range_size) {
|
| if (HasBeenSetUp()) return false;
|
|
|
| // Overwrite default configuration.
|
| - if (max_semi_space_size > 0) {
|
| + if (max_semi_space_size != 0) {
|
| max_semi_space_size_ = max_semi_space_size * MB;
|
| }
|
| - if (max_old_space_size > 0) {
|
| - max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
|
| + if (max_old_space_size != 0) {
|
| + max_old_generation_size_ = max_old_space_size * MB;
|
| }
|
| - if (max_executable_size > 0) {
|
| - max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
|
| + if (max_executable_size != 0) {
|
| + max_executable_size_ = max_executable_size * MB;
|
| }
|
|
|
| // If max space size flags are specified overwrite the configuration.
|
| if (FLAG_max_semi_space_size > 0) {
|
| - max_semi_space_size_ = FLAG_max_semi_space_size * MB;
|
| + max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
|
| }
|
| if (FLAG_max_old_space_size > 0) {
|
| max_old_generation_size_ =
|
| - static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
|
| + static_cast<size_t>(FLAG_max_old_space_size) * MB;
|
| }
|
| if (FLAG_max_executable_size > 0) {
|
| - max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
|
| + max_executable_size_ = static_cast<size_t>(FLAG_max_executable_size) * MB;
|
| }
|
|
|
| if (Page::kPageSize > MB) {
|
| @@ -5011,17 +5008,18 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
|
|
|
| // The new space size must be a power of two to support single-bit testing
|
| // for containment.
|
| - max_semi_space_size_ =
|
| - base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
|
| + max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
|
| + static_cast<uint32_t>(max_semi_space_size_));
|
|
|
| if (FLAG_min_semi_space_size > 0) {
|
| - int initial_semispace_size = FLAG_min_semi_space_size * MB;
|
| + size_t initial_semispace_size =
|
| + static_cast<size_t>(FLAG_min_semi_space_size) * MB;
|
| if (initial_semispace_size > max_semi_space_size_) {
|
| initial_semispace_size_ = max_semi_space_size_;
|
| if (FLAG_trace_gc) {
|
| PrintIsolate(isolate_,
|
| "Min semi-space size cannot be more than the maximum "
|
| - "semi-space size of %d MB\n",
|
| + "semi-space size of %" PRIuS " MB\n",
|
| max_semi_space_size_ / MB);
|
| }
|
| } else {
|
| @@ -5039,7 +5037,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
|
| // The old generation is paged and needs at least one page for each space.
|
| int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
|
| max_old_generation_size_ =
|
| - Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
|
| + Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
|
| max_old_generation_size_);
|
|
|
| // The max executable size must be less than or equal to the max old
|
| @@ -5138,16 +5136,15 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
|
| }
|
| }
|
|
|
| -
|
| -intptr_t Heap::PromotedSpaceSizeOfObjects() {
|
| +size_t Heap::PromotedSpaceSizeOfObjects() {
|
| return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
|
| map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
|
| }
|
|
|
| -
|
| -int64_t Heap::PromotedExternalMemorySize() {
|
| +uint64_t Heap::PromotedExternalMemorySize() {
|
| if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
|
| - return external_memory_ - external_memory_at_last_mark_compact_;
|
| + return static_cast<uint64_t>(external_memory_ -
|
| + external_memory_at_last_mark_compact_);
|
| }
|
|
|
|
|
| @@ -5215,29 +5212,29 @@ double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
|
| return factor;
|
| }
|
|
|
| -
|
| -intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
|
| - intptr_t old_gen_size) {
|
| +size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
|
| + size_t old_gen_size) {
|
| CHECK(factor > 1.0);
|
| CHECK(old_gen_size > 0);
|
| - intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
|
| - limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep());
|
| + uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
|
| + limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
|
| + MinimumAllocationLimitGrowingStep());
|
| limit += new_space_->Capacity();
|
| - intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
|
| - return Min(limit, halfway_to_the_max);
|
| + uint64_t halfway_to_the_max =
|
| + (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
|
| + return static_cast<size_t>(Min(limit, halfway_to_the_max));
|
| }
|
|
|
| -intptr_t Heap::MinimumAllocationLimitGrowingStep() {
|
| - const double kRegularAllocationLimitGrowingStep = 8;
|
| - const double kLowMemoryAllocationLimitGrowingStep = 2;
|
| - intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
|
| +size_t Heap::MinimumAllocationLimitGrowingStep() {
|
| + const size_t kRegularAllocationLimitGrowingStep = 8;
|
| + const size_t kLowMemoryAllocationLimitGrowingStep = 2;
|
| + size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
|
| return limit * (ShouldOptimizeForMemoryUsage()
|
| ? kLowMemoryAllocationLimitGrowingStep
|
| : kRegularAllocationLimitGrowingStep);
|
| }
|
|
|
| -void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
|
| - double gc_speed,
|
| +void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
|
| double mutator_speed) {
|
| double factor = HeapGrowingFactor(gc_speed, mutator_speed);
|
|
|
| @@ -5270,24 +5267,23 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
|
| CalculateOldGenerationAllocationLimit(factor, old_gen_size);
|
|
|
| if (FLAG_trace_gc_verbose) {
|
| - isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR
|
| - " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
|
| - old_gen_size / KB,
|
| - old_generation_allocation_limit_ / KB, factor);
|
| + isolate_->PrintWithTimestamp(
|
| + "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
|
| + old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
|
| }
|
| }
|
|
|
| -void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
|
| +void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
|
| double gc_speed,
|
| double mutator_speed) {
|
| double factor = HeapGrowingFactor(gc_speed, mutator_speed);
|
| - intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
|
| + size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
|
| if (limit < old_generation_allocation_limit_) {
|
| if (FLAG_trace_gc_verbose) {
|
| isolate_->PrintWithTimestamp(
|
| - "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
|
| + "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
|
| " KB, "
|
| - "new limit: %" V8PRIdPTR " KB (%.1f)\n",
|
| + "new limit: %" PRIuS " KB (%.1f)\n",
|
| old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
|
| factor);
|
| }
|
| @@ -5321,7 +5317,8 @@ bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
|
| // The kHardLimit means that incremental marking should be started immediately.
|
| Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
|
| if (!incremental_marking()->CanBeActivated() ||
|
| - PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
|
| + PromotedSpaceSizeOfObjects() <=
|
| + IncrementalMarking::kActivationThreshold) {
|
| // Incremental marking is disabled or it is too early to start.
|
| return IncrementalMarkingLimit::kNoLimit;
|
| }
|
| @@ -5331,13 +5328,13 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
|
| // start marking immediately.
|
| return IncrementalMarkingLimit::kHardLimit;
|
| }
|
| - intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
|
| + size_t old_generation_space_available = OldGenerationSpaceAvailable();
|
| if (old_generation_space_available > new_space_->Capacity()) {
|
| return IncrementalMarkingLimit::kNoLimit;
|
| }
|
| // We are close to the allocation limit.
|
| // Choose between the hard and the soft limits.
|
| - if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
|
| + if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
|
| return IncrementalMarkingLimit::kHardLimit;
|
| }
|
| return IncrementalMarkingLimit::kSoftLimit;
|
|
|