OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
136 max_alive_after_gc_(0), | 136 max_alive_after_gc_(0), |
137 min_in_mutator_(kMaxInt), | 137 min_in_mutator_(kMaxInt), |
138 alive_after_last_gc_(0), | 138 alive_after_last_gc_(0), |
139 last_gc_end_timestamp_(0.0), | 139 last_gc_end_timestamp_(0.0), |
140 store_buffer_(this), | 140 store_buffer_(this), |
141 marking_(this), | 141 marking_(this), |
142 incremental_marking_(this), | 142 incremental_marking_(this), |
143 number_idle_notifications_(0), | 143 number_idle_notifications_(0), |
144 last_idle_notification_gc_count_(0), | 144 last_idle_notification_gc_count_(0), |
145 last_idle_notification_gc_count_init_(false), | 145 last_idle_notification_gc_count_init_(false), |
146 idle_notification_will_schedule_next_gc_(false), | |
146 configured_(false), | 147 configured_(false), |
147 chunks_queued_for_free_(NULL) { | 148 chunks_queued_for_free_(NULL) { |
148 // Allow build-time customization of the max semispace size. Building | 149 // Allow build-time customization of the max semispace size. Building |
149 // V8 with snapshots and a non-default max semispace size is much | 150 // V8 with snapshots and a non-default max semispace size is much |
150 // easier if you can define it as part of the build environment. | 151 // easier if you can define it as part of the build environment. |
151 #if defined(V8_MAX_SEMISPACE_SIZE) | 152 #if defined(V8_MAX_SEMISPACE_SIZE) |
152 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; | 153 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; |
153 #endif | 154 #endif |
154 | 155 |
155 intptr_t max_virtual = OS::MaxVirtualMemory(); | 156 intptr_t max_virtual = OS::MaxVirtualMemory(); |
(...skipping 350 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
506 : isolate_->counters()->gc_compactor(); | 507 : isolate_->counters()->gc_compactor(); |
507 rate->Start(); | 508 rate->Start(); |
508 next_gc_likely_to_collect_more = | 509 next_gc_likely_to_collect_more = |
509 PerformGarbageCollection(collector, &tracer); | 510 PerformGarbageCollection(collector, &tracer); |
510 rate->Stop(); | 511 rate->Stop(); |
511 | 512 |
512 GarbageCollectionEpilogue(); | 513 GarbageCollectionEpilogue(); |
513 } | 514 } |
514 | 515 |
515 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); | 516 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); |
516 if (incremental_marking()->IsStopped()) { | 517 if (incremental_marking()->IsStopped() && |
518 !idle_notification_will_schedule_next_gc()) { | |
Erik Corry
2011/11/10 15:06:55
I think if the heuristics say we need to start a G
ulan
2011/11/11 13:27:26
Removed the flag.
| |
517 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) { | 519 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) { |
518 incremental_marking()->Start(); | 520 incremental_marking()->Start(); |
519 } | 521 } |
520 } | 522 } |
521 | 523 |
522 return next_gc_likely_to_collect_more; | 524 return next_gc_likely_to_collect_more; |
523 } | 525 } |
524 | 526 |
525 | 527 |
526 void Heap::PerformScavenge() { | 528 void Heap::PerformScavenge() { |
(...skipping 3926 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4453 | 4455 |
4454 void Heap::EnsureHeapIsIterable() { | 4456 void Heap::EnsureHeapIsIterable() { |
4455 ASSERT(IsAllocationAllowed()); | 4457 ASSERT(IsAllocationAllowed()); |
4456 if (!IsHeapIterable()) { | 4458 if (!IsHeapIterable()) { |
4457 CollectAllGarbage(kMakeHeapIterableMask); | 4459 CollectAllGarbage(kMakeHeapIterableMask); |
4458 } | 4460 } |
4459 ASSERT(IsHeapIterable()); | 4461 ASSERT(IsHeapIterable()); |
4460 } | 4462 } |
4461 | 4463 |
4462 | 4464 |
4463 bool Heap::IdleNotification() { | 4465 bool Heap::IdleNotification() { |
Erik Corry
2011/11/10 15:06:55
A comment here to say what the return value means
ulan
2011/11/11 13:27:26
This function is documented in V8 API. Added a com
| |
4464 static const int kIdlesBeforeScavenge = 4; | |
4465 static const int kIdlesBeforeMarkSweep = 7; | |
4466 static const int kIdlesBeforeMarkCompact = 8; | |
4467 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1; | |
4468 static const unsigned int kGCsBetweenCleanup = 4; | |
4469 | |
4470 if (!last_idle_notification_gc_count_init_) { | 4466 if (!last_idle_notification_gc_count_init_) { |
4471 last_idle_notification_gc_count_ = gc_count_; | 4467 last_idle_notification_gc_count_ = gc_count_; |
4472 last_idle_notification_gc_count_init_ = true; | 4468 last_idle_notification_gc_count_init_ = true; |
4469 last_idle_notification_timestamp_ = OS::TimeCurrentMillis(); | |
4473 } | 4470 } |
4474 | 4471 |
4472 if (!FLAG_incremental_marking) return IdleGlobalGC(); | |
4473 | |
4474 // The goal is to perform kMaxIdleCount incremental GC cycles and then | |
4475 // wait until the mutator creates more garbage. | |
Erik Corry
2011/11/10 15:06:55
"Creates more garbage" is rather vague. If this m
ulan
2011/11/11 13:27:26
Rephrased it to "enough garbage to justify a new r
| |
4476 // A GC cycle consists of: | |
4477 // 1. many incremental marking steps, | |
4478 // 2. one old space mark-sweep-compact, | |
4479 // 3. many lazy sweep steps. | |
4480 // The counters from IdleGlobalGC are reused, but have different meaning: | |
Erik Corry
2011/11/10 15:06:55
This seems unnecessarily complicated. Counters ar
ulan
2011/11/11 13:27:26
Done.
| |
4481 // - number_idle_notifications_ counts the GC cycles. | |
Erik Corry
2011/11/10 15:06:55
I do not like the phrase "GC cycles". If we use t
ulan
2011/11/11 13:27:26
Done.
| |
4482 // - last_idle_notification_gc_count_ stores the gc_count_ after the last | |
4483 // old space mark-sweep-compact. | |
4484 | |
4485 if (number_idle_notifications_ >= kMaxIdleCount && | |
4486 gc_count_ - last_idle_notification_gc_count_ >= kGCsBetweenCleanup) { | |
4487 // The mutator created more garbage, start new round of GC cycles. | |
4488 number_idle_notifications_ = 0; | |
4489 } | |
4490 | |
4491 intptr_t step_size = IncrementalMarking::kStepFakeAllocatedBytes; | |
4492 | |
4493 double delay = OS::TimeCurrentMillis() - last_idle_notification_timestamp_; | |
4494 last_idle_notification_timestamp_ += delay; | |
4495 | |
4496 if (delay > 400) { | |
4497 // Speed up if idle notifications are rare. | |
Erik Corry
2011/11/10 15:06:55
Time-based things make debugging difficult, and in
ulan
2011/11/11 13:27:26
As discussed offline, added a "hint" argument to I
| |
4498 step_size *= 10; | |
4499 } | |
4500 | |
4501 if (incremental_marking()->IsStopped()) { | |
4502 if (!old_data_space()->IsSweepingComplete() || | |
4503 !old_pointer_space()->IsSweepingComplete()) { | |
Erik Corry
2011/11/10 15:06:55
It is a little dangerous to name individual spaces
ulan
2011/11/11 13:27:26
Done.
| |
4504 bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size); | |
4505 sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size); | |
4506 if (!sweeping_complete) { | |
4507 return false; | |
4508 } | |
4509 } | |
4510 if (!WorthStartingGCWhenIdle()) { | |
4511 return true; | |
4512 } | |
4513 incremental_marking()->Start(); | |
4514 } | |
4515 | |
4516 // This flag prevents incremental marking from requesting GC via stack guard | |
4517 // and also prevents GC from starting a new incremental marking. | |
4518 idle_notification_will_schedule_next_gc_ = true; | |
4519 | |
4520 incremental_marking()->Step(step_size); | |
Erik Corry
2011/11/10 15:06:55
It seems that we do a step without checking that w
ulan
2011/11/11 13:27:26
Line 4513 starts marking if it is not started alre
| |
4521 | |
4522 if (incremental_marking()->IsComplete()) { | |
4523 bool uncommit = false; | |
4524 if (last_idle_notification_gc_count_ - gc_count_ < kGCsBetweenCleanup / 2) { | |
4525 // Mutator was idle since the the last GC caused by IdleNotification. | |
4526 isolate_->compilation_cache()->Clear(); | |
Erik Corry
2011/11/10 15:06:55
We seem to be duplicating a lot of the logic of in
ulan
2011/11/11 13:27:26
We do it in LowMemoryNotification and in old versi
| |
4527 uncommit = true; | |
4528 } | |
4529 CollectAllGarbage(kNoGCFlags); | |
4530 if (uncommit) { | |
4531 new_space_.Shrink(); | |
4532 UncommitFromSpace(); | |
4533 } | |
4534 last_idle_notification_gc_count_ = gc_count_; | |
4535 number_idle_notifications_++; | |
4536 } | |
4537 | |
4538 idle_notification_will_schedule_next_gc_ = false; | |
4539 | |
4540 return !WorthStartingGCWhenIdle() && | |
Erik Corry
2011/11/10 15:06:55
If it is worth starting, should we not just start
ulan
2011/11/11 13:27:26
Now I am simply returning false, so the next notif
| |
4541 incremental_marking()->IsStopped() && | |
4542 old_data_space()->IsSweepingComplete() && | |
Erik Corry
2011/11/10 15:06:55
Listing the lazily swept pages is fragile.
ulan
2011/11/11 13:27:26
Done.
| |
4543 old_pointer_space()->IsSweepingComplete(); | |
4544 } | |
4545 | |
4546 | |
4547 bool Heap::IdleGlobalGC() { | |
4475 bool uncommit = true; | 4548 bool uncommit = true; |
4476 bool finished = false; | 4549 bool finished = false; |
4477 | 4550 |
4478 // Reset the number of idle notifications received when a number of | 4551 // Reset the number of idle notifications received when a number of |
4479 // GCs have taken place. This allows another round of cleanup based | 4552 // GCs have taken place. This allows another round of cleanup based |
4480 // on idle notifications if enough work has been carried out to | 4553 // on idle notifications if enough work has been carried out to |
4481 // provoke a number of garbage collections. | 4554 // provoke a number of garbage collections. |
4482 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) { | 4555 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) { |
4483 number_idle_notifications_ = | 4556 number_idle_notifications_ = |
4484 Min(number_idle_notifications_ + 1, kMaxIdleCount); | 4557 Min(number_idle_notifications_ + 1, kMaxIdleCount); |
(...skipping 1928 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6413 isolate_->heap()->store_buffer()->Compact(); | 6486 isolate_->heap()->store_buffer()->Compact(); |
6414 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); | 6487 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); |
6415 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { | 6488 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { |
6416 next = chunk->next_chunk(); | 6489 next = chunk->next_chunk(); |
6417 isolate_->memory_allocator()->Free(chunk); | 6490 isolate_->memory_allocator()->Free(chunk); |
6418 } | 6491 } |
6419 chunks_queued_for_free_ = NULL; | 6492 chunks_queued_for_free_ = NULL; |
6420 } | 6493 } |
6421 | 6494 |
6422 } } // namespace v8::internal | 6495 } } // namespace v8::internal |
OLD | NEW |