OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 551 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
562 pending_sweeper_tasks_semaphore_.Wait(); | 562 pending_sweeper_tasks_semaphore_.Wait(); |
563 pending_sweeper_tasks_semaphore_.Wait(); | 563 pending_sweeper_tasks_semaphore_.Wait(); |
564 pending_sweeper_tasks_semaphore_.Wait(); | 564 pending_sweeper_tasks_semaphore_.Wait(); |
565 } | 565 } |
566 | 566 |
567 ParallelSweepSpacesComplete(); | 567 ParallelSweepSpacesComplete(); |
568 sweeping_in_progress_ = false; | 568 sweeping_in_progress_ = false; |
569 RefillFreeList(heap()->paged_space(OLD_SPACE)); | 569 RefillFreeList(heap()->paged_space(OLD_SPACE)); |
570 RefillFreeList(heap()->paged_space(CODE_SPACE)); | 570 RefillFreeList(heap()->paged_space(CODE_SPACE)); |
571 RefillFreeList(heap()->paged_space(MAP_SPACE)); | 571 RefillFreeList(heap()->paged_space(MAP_SPACE)); |
572 heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes(); | |
573 heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes(); | |
574 heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes(); | |
575 | 572 |
576 #ifdef VERIFY_HEAP | 573 #ifdef VERIFY_HEAP |
577 if (FLAG_verify_heap && !evacuation()) { | 574 if (FLAG_verify_heap && !evacuation()) { |
578 VerifyEvacuation(heap_); | 575 VerifyEvacuation(heap_); |
579 } | 576 } |
580 #endif | 577 #endif |
581 } | 578 } |
582 | 579 |
583 | 580 |
584 bool MarkCompactCollector::IsSweepingCompleted() { | 581 bool MarkCompactCollector::IsSweepingCompleted() { |
(...skipping 14 matching lines...) Expand all Loading... | |
599 } else if (space == heap()->code_space()) { | 596 } else if (space == heap()->code_space()) { |
600 free_list = free_list_code_space_.get(); | 597 free_list = free_list_code_space_.get(); |
601 } else if (space == heap()->map_space()) { | 598 } else if (space == heap()->map_space()) { |
602 free_list = free_list_map_space_.get(); | 599 free_list = free_list_map_space_.get(); |
603 } else { | 600 } else { |
604 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure | 601 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure |
605 // to only refill them for the old space. | 602 // to only refill them for the old space. |
606 return; | 603 return; |
607 } | 604 } |
608 | 605 |
609 intptr_t freed_bytes = space->free_list()->Concatenate(free_list); | 606 intptr_t added = space->free_list()->Concatenate(free_list); |
610 space->AddToAccountingStats(freed_bytes); | 607 space->accounting_stats_.IncreaseCapacity(added); |
611 space->DecrementUnsweptFreeBytes(freed_bytes); | |
612 } | 608 } |
613 | 609 |
614 | 610 |
615 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { | 611 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { |
616 // This is only used when resizing an object. | 612 // This is only used when resizing an object. |
617 DCHECK(MemoryChunk::FromAddress(old_start) == | 613 DCHECK(MemoryChunk::FromAddress(old_start) == |
618 MemoryChunk::FromAddress(new_start)); | 614 MemoryChunk::FromAddress(new_start)); |
619 | 615 |
620 if (!heap->incremental_marking()->IsMarking()) return; | 616 if (!heap->incremental_marking()->IsMarking()) return; |
621 | 617 |
(...skipping 3725 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4347 continue; | 4343 continue; |
4348 } | 4344 } |
4349 | 4345 |
4350 // One unused page is kept, all further are released before sweeping them. | 4346 // One unused page is kept, all further are released before sweeping them. |
4351 if (p->LiveBytes() == 0) { | 4347 if (p->LiveBytes() == 0) { |
4352 if (unused_page_present) { | 4348 if (unused_page_present) { |
4353 if (FLAG_gc_verbose) { | 4349 if (FLAG_gc_verbose) { |
4354 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", | 4350 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", |
4355 reinterpret_cast<intptr_t>(p)); | 4351 reinterpret_cast<intptr_t>(p)); |
4356 } | 4352 } |
4357 // Adjust unswept free bytes because releasing a page expects said | 4353 // Adjust bytes allocated because releasing a page expects said |
4358 // counter to be accurate for unswept pages. | 4354 // counter to be accurate for unswept pages. |
4359 space->IncreaseUnsweptFreeBytes(p); | 4355 space->accounting_stats_.AllocateBytes(p->area_size()); |
Hannes Payer (out of office)
2015/10/02 06:52:09
This seems to be non-logical. We do we account her
Michael Lippautz
2015/10/02 13:04:35
I agree, the change here is mechanical though. Rel
Hannes Payer (out of office)
2015/10/05 15:23:26
The logic is just broken Let's fix it right away.
Michael Lippautz
2015/10/07 13:15:39
Done. I removed the corresponding parts. ReleasePa
| |
4360 space->ReleasePage(p); | 4356 space->ReleasePage(p); |
4361 continue; | 4357 continue; |
4362 } | 4358 } |
4363 unused_page_present = true; | 4359 unused_page_present = true; |
4364 } | 4360 } |
4365 | 4361 |
4366 switch (sweeper) { | 4362 switch (sweeper) { |
4367 case CONCURRENT_SWEEPING: | 4363 case CONCURRENT_SWEEPING: |
4368 if (!parallel_sweeping_active) { | 4364 if (!parallel_sweeping_active) { |
4369 if (FLAG_gc_verbose) { | 4365 if (FLAG_gc_verbose) { |
(...skipping 13 matching lines...) Expand all Loading... | |
4383 IGNORE_FREE_SPACE>(space, NULL, p, NULL); | 4379 IGNORE_FREE_SPACE>(space, NULL, p, NULL); |
4384 } | 4380 } |
4385 pages_swept++; | 4381 pages_swept++; |
4386 parallel_sweeping_active = true; | 4382 parallel_sweeping_active = true; |
4387 } else { | 4383 } else { |
4388 if (FLAG_gc_verbose) { | 4384 if (FLAG_gc_verbose) { |
4389 PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n", | 4385 PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n", |
4390 reinterpret_cast<intptr_t>(p)); | 4386 reinterpret_cast<intptr_t>(p)); |
4391 } | 4387 } |
4392 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending); | 4388 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending); |
4393 space->IncreaseUnsweptFreeBytes(p); | 4389 int to_sweep = p->area_size() - p->LiveBytes(); |
4390 space->accounting_stats_.ShrinkSpace(to_sweep); | |
Hannes Payer (out of office)
2015/10/02 06:52:09
Here we are going to shrink the capacity, a lot. A
Michael Lippautz
2015/10/02 13:04:35
I changed Committed() and MaxCommitted() to be bas
| |
4394 } | 4391 } |
4395 space->set_end_of_unswept_pages(p); | 4392 space->set_end_of_unswept_pages(p); |
4396 break; | 4393 break; |
4397 case SEQUENTIAL_SWEEPING: { | 4394 case SEQUENTIAL_SWEEPING: { |
4398 if (FLAG_gc_verbose) { | 4395 if (FLAG_gc_verbose) { |
4399 PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p)); | 4396 PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p)); |
4400 } | 4397 } |
4401 if (space->identity() == CODE_SPACE) { | 4398 if (space->identity() == CODE_SPACE) { |
4402 if (FLAG_zap_code_space) { | 4399 if (FLAG_zap_code_space) { |
4403 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, | 4400 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4599 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4596 MarkBit mark_bit = Marking::MarkBitFrom(host); |
4600 if (Marking::IsBlack(mark_bit)) { | 4597 if (Marking::IsBlack(mark_bit)) { |
4601 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 4598 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
4602 RecordRelocSlot(&rinfo, target); | 4599 RecordRelocSlot(&rinfo, target); |
4603 } | 4600 } |
4604 } | 4601 } |
4605 } | 4602 } |
4606 | 4603 |
4607 } // namespace internal | 4604 } // namespace internal |
4608 } // namespace v8 | 4605 } // namespace v8 |
OLD | NEW |