| Index: src/heap/heap.cc
|
| diff --git a/src/heap/heap.cc b/src/heap/heap.cc
|
| index f9fb0ff5aa419abf0ca1b1f26c97f983d9919ea0..ab52719fd8ed293df0df459a867a2784e0571695 100644
|
| --- a/src/heap/heap.cc
|
| +++ b/src/heap/heap.cc
|
| @@ -48,9 +48,6 @@
|
| Object** end;
|
| StrongRootsList* next;
|
| };
|
| -
|
| -
|
| -DEFINE_OPERATORS_FOR_FLAGS(Heap::GCFlags)
|
|
|
|
|
| Heap::Heap()
|
| @@ -136,7 +133,7 @@
|
| ring_buffer_end_(0),
|
| promotion_queue_(this),
|
| configured_(false),
|
| - current_gc_flags_(kNoGCFlags),
|
| + current_gc_flags_(Heap::kNoGCFlags),
|
| current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
|
| external_string_table_(this),
|
| chunks_queued_for_free_(NULL),
|
| @@ -753,7 +750,7 @@
|
| void Heap::HandleGCRequest() {
|
| if (incremental_marking()->request_type() ==
|
| IncrementalMarking::COMPLETE_MARKING) {
|
| - CollectAllGarbage("GC interrupt", current_gc_flags_,
|
| + CollectAllGarbage(current_gc_flags_, "GC interrupt",
|
| current_gc_callback_flags_);
|
| return;
|
| }
|
| @@ -797,12 +794,14 @@
|
| }
|
|
|
|
|
| -void Heap::CollectAllGarbage(const char* gc_reason, const GCFlags flags,
|
| +void Heap::CollectAllGarbage(int flags, const char* gc_reason,
|
| const v8::GCCallbackFlags gc_callback_flags) {
|
| // Since we are ignoring the return value, the exact choice of space does
|
| // not matter, so long as we do not specify NEW_SPACE, which would not
|
| // cause a full GC.
|
| - CollectGarbage(OLD_SPACE, gc_reason, flags, gc_callback_flags);
|
| + set_current_gc_flags(flags);
|
| + CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
|
| + set_current_gc_flags(kNoGCFlags);
|
| }
|
|
|
|
|
| @@ -824,18 +823,18 @@
|
| isolate()->optimizing_compile_dispatcher()->Flush();
|
| }
|
| isolate()->ClearSerializerData();
|
| - isolate()->compilation_cache()->Clear();
|
| + set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
|
| + isolate_->compilation_cache()->Clear();
|
| const int kMaxNumberOfAttempts = 7;
|
| const int kMinNumberOfAttempts = 2;
|
| for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
|
| - if (!CollectGarbage(OLD_SPACE, gc_reason,
|
| - Heap::kAbortIncrementalMarkingMask |
|
| - Heap::kReduceMemoryFootprintMask,
|
| - kGCCallbackFlagForced) &&
|
| - ((attempt + 1) >= kMinNumberOfAttempts)) {
|
| + if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
|
| + v8::kGCCallbackFlagForced) &&
|
| + attempt + 1 >= kMinNumberOfAttempts) {
|
| break;
|
| }
|
| }
|
| + set_current_gc_flags(kNoGCFlags);
|
| new_space_.Shrink();
|
| UncommitFromSpace();
|
| }
|
| @@ -861,7 +860,8 @@
|
|
|
|
|
| bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
|
| - const char* collector_reason) {
|
| + const char* collector_reason,
|
| + const v8::GCCallbackFlags gc_callback_flags) {
|
| // The VM is in the GC state until exiting this function.
|
| VMState<GC> state(isolate_);
|
|
|
| @@ -916,7 +916,8 @@
|
| HistogramTimerScope histogram_timer_scope(
|
| (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
|
| : isolate_->counters()->gc_compactor());
|
| - next_gc_likely_to_collect_more = PerformGarbageCollection(collector);
|
| + next_gc_likely_to_collect_more =
|
| + PerformGarbageCollection(collector, gc_callback_flags);
|
| }
|
|
|
| GarbageCollectionEpilogue();
|
| @@ -947,7 +948,7 @@
|
| }
|
|
|
| if (collector == MARK_COMPACTOR &&
|
| - (current_gc_callback_flags_ & kGCCallbackFlagForced) != 0) {
|
| + (gc_callback_flags & kGCCallbackFlagForced) != 0) {
|
| isolate()->CountUsage(v8::Isolate::kForcedGC);
|
| }
|
|
|
| @@ -982,7 +983,7 @@
|
| }
|
|
|
|
|
| -void Heap::StartIncrementalMarking(const GCFlags gc_flags,
|
| +void Heap::StartIncrementalMarking(int gc_flags,
|
| const GCCallbackFlags gc_callback_flags,
|
| const char* reason) {
|
| DCHECK(incremental_marking()->IsStopped());
|
| @@ -1084,17 +1085,17 @@
|
| }
|
| if (perform_gc) {
|
| if (space == NEW_SPACE) {
|
| - CollectGarbageNewSpace("failed to reserve space in the new space");
|
| + CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
|
| } else {
|
| if (counter > 1) {
|
| CollectAllGarbage(
|
| + kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
|
| "failed to reserve space in paged or large "
|
| - "object space, trying to reduce memory footprint",
|
| - kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask);
|
| + "object space, trying to reduce memory footprint");
|
| } else {
|
| CollectAllGarbage(
|
| - "failed to reserve space in paged or large object space",
|
| - kAbortIncrementalMarkingMask);
|
| + kAbortIncrementalMarkingMask,
|
| + "failed to reserve space in paged or large object space");
|
| }
|
| }
|
| gc_performed = true;
|
| @@ -1163,8 +1164,8 @@
|
| }
|
| }
|
|
|
| -
|
| -bool Heap::PerformGarbageCollection(GarbageCollector collector) {
|
| +bool Heap::PerformGarbageCollection(
|
| + GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
|
| int freed_global_handles = 0;
|
|
|
| if (collector != SCAVENGER) {
|
| @@ -1241,7 +1242,7 @@
|
| GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
|
| freed_global_handles =
|
| isolate_->global_handles()->PostGarbageCollectionProcessing(
|
| - collector, current_gc_callback_flags_);
|
| + collector, gc_callback_flags);
|
| }
|
| gc_post_processing_depth_--;
|
|
|
| @@ -1272,7 +1273,7 @@
|
| GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
|
| VMState<EXTERNAL> state(isolate_);
|
| HandleScope handle_scope(isolate_);
|
| - CallGCEpilogueCallbacks(gc_type, current_gc_callback_flags_);
|
| + CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
|
| }
|
| }
|
|
|
| @@ -4536,7 +4537,7 @@
|
| void Heap::MakeHeapIterable() {
|
| DCHECK(AllowHeapAllocation::IsAllowed());
|
| if (!IsHeapIterable()) {
|
| - CollectAllGarbage("Heap::MakeHeapIterable", kMakeHeapIterableMask);
|
| + CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
|
| }
|
| if (mark_compact_collector()->sweeping_in_progress()) {
|
| mark_compact_collector()->EnsureSweepingCompleted();
|
| @@ -4661,8 +4662,8 @@
|
| gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
|
| static_cast<size_t>(idle_time_in_ms), size_of_objects,
|
| final_incremental_mark_compact_speed_in_bytes_per_ms))) {
|
| - CollectAllGarbage("idle notification: finalize incremental",
|
| - current_gc_flags_);
|
| + CollectAllGarbage(current_gc_flags_,
|
| + "idle notification: finalize incremental");
|
| return true;
|
| }
|
| return false;
|
| @@ -4745,11 +4746,11 @@
|
| case DO_FULL_GC: {
|
| DCHECK(contexts_disposed_ > 0);
|
| HistogramTimerScope scope(isolate_->counters()->gc_context());
|
| - CollectAllGarbage("idle notification: contexts disposed", kNoGCFlags);
|
| + CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
|
| break;
|
| }
|
| case DO_SCAVENGE:
|
| - CollectGarbageNewSpace("idle notification: scavenge");
|
| + CollectGarbage(NEW_SPACE, "idle notification: scavenge");
|
| break;
|
| case DO_FINALIZE_SWEEPING:
|
| mark_compact_collector()->EnsureSweepingCompleted();
|
|
|