Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(487)

Side by Side Diff: src/spaces.cc

Issue 6970004: Introduce lazy sweeping. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« src/spaces.h ('K') | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 610 matching lines...) Expand 10 before | Expand all | Expand 10 after
621 621
622 // ----------------------------------------------------------------------------- 622 // -----------------------------------------------------------------------------
623 // PagedSpace implementation 623 // PagedSpace implementation
624 624
625 PagedSpace::PagedSpace(Heap* heap, 625 PagedSpace::PagedSpace(Heap* heap,
626 intptr_t max_capacity, 626 intptr_t max_capacity,
627 AllocationSpace id, 627 AllocationSpace id,
628 Executability executable) 628 Executability executable)
629 : Space(heap, id, executable), 629 : Space(heap, id, executable),
630 free_list_(this), 630 free_list_(this),
631 was_swept_conservatively_(false) { 631 was_swept_conservatively_(false),
632 first_unswept_page_(Page::FromAddress(NULL)),
633 last_unswept_page_(Page::FromAddress(NULL)) {
632 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) 634 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
633 * Page::kObjectAreaSize; 635 * Page::kObjectAreaSize;
634 accounting_stats_.Clear(); 636 accounting_stats_.Clear();
635 637
636 allocation_info_.top = NULL; 638 allocation_info_.top = NULL;
637 allocation_info_.limit = NULL; 639 allocation_info_.limit = NULL;
638 640
639 anchor_.InitializeAsAnchor(this); 641 anchor_.InitializeAsAnchor(this);
640 } 642 }
641 643
(...skipping 915 matching lines...) Expand 10 before | Expand all | Expand 10 after
1557 1559
1558 ASSERT(new_node_size - size_in_bytes >= 0); // New linear size. 1560 ASSERT(new_node_size - size_in_bytes >= 0); // New linear size.
1559 1561
1560 const int kThreshold = IncrementalMarking::kAllocatedThreshold; 1562 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1561 1563
1562 // Memory in the linear allocation area is counted as allocated. We may free 1564 // Memory in the linear allocation area is counted as allocated. We may free
1563 // a little of this again immediately - see below. 1565 // a little of this again immediately - see below.
1564 owner_->Allocate(new_node_size); 1566 owner_->Allocate(new_node_size);
1565 1567
1566 if (new_node_size - size_in_bytes > kThreshold && 1568 if (new_node_size - size_in_bytes > kThreshold &&
1567 HEAP->incremental_marking()->IsMarking() && 1569 HEAP->incremental_marking()->IsMarkingIncomplete() &&
1568 FLAG_incremental_marking_steps) { 1570 FLAG_incremental_marking_steps) {
1569 // We don't want to give too large linear areas to the allocator while 1571 // We don't want to give too large linear areas to the allocator while
1570 // incremental marking is going on, because we won't check again whether 1572 // incremental marking is going on, because we won't check again whether
1571 // we want to do another increment until the linear area is used up. 1573 // we want to do another increment until the linear area is used up.
1572 owner_->Free(new_node->address() + size_in_bytes + kThreshold, 1574 owner_->Free(new_node->address() + size_in_bytes + kThreshold,
1573 new_node_size - size_in_bytes - kThreshold); 1575 new_node_size - size_in_bytes - kThreshold);
1574 owner_->SetTop(new_node->address() + size_in_bytes, 1576 owner_->SetTop(new_node->address() + size_in_bytes,
1575 new_node->address() + size_in_bytes + kThreshold); 1577 new_node->address() + size_in_bytes + kThreshold);
1576 } else { 1578 } else {
1577 // Normally we give the rest of the node to the allocator as its new 1579 // Normally we give the rest of the node to the allocator as its new
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
1634 1636
1635 1637
1636 // ----------------------------------------------------------------------------- 1638 // -----------------------------------------------------------------------------
1637 // OldSpace implementation 1639 // OldSpace implementation
1638 1640
1639 void OldSpace::PrepareForMarkCompact(bool will_compact) { 1641 void OldSpace::PrepareForMarkCompact(bool will_compact) {
1640 ASSERT(!will_compact); 1642 ASSERT(!will_compact);
1641 // Call prepare of the super class. 1643 // Call prepare of the super class.
1642 PagedSpace::PrepareForMarkCompact(will_compact); 1644 PagedSpace::PrepareForMarkCompact(will_compact);
1643 1645
1646 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
1647
1644 // Clear the free list before a full GC---it will be rebuilt afterward. 1648 // Clear the free list before a full GC---it will be rebuilt afterward.
1649 // TODO(gc): can we avoid resetting free list?
1645 free_list_.Reset(); 1650 free_list_.Reset();
1646 } 1651 }
1647 1652
1648 1653
1649 bool NewSpace::ReserveSpace(int bytes) { 1654 bool NewSpace::ReserveSpace(int bytes) {
1650 // We can't reliably unpack a partial snapshot that needs more new space 1655 // We can't reliably unpack a partial snapshot that needs more new space
1651 // space than the minimum NewSpace size. 1656 // space than the minimum NewSpace size.
1652 ASSERT(bytes <= InitialCapacity()); 1657 ASSERT(bytes <= InitialCapacity());
1653 Address limit = allocation_info_.limit; 1658 Address limit = allocation_info_.limit;
1654 Address top = allocation_info_.top; 1659 Address top = allocation_info_.top;
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1691 } 1696 }
1692 1697
1693 1698
1694 // You have to call this last, since the implementation from PagedSpace 1699 // You have to call this last, since the implementation from PagedSpace
1695 // doesn't know that memory was 'promised' to large object space. 1700 // doesn't know that memory was 'promised' to large object space.
1696 bool LargeObjectSpace::ReserveSpace(int bytes) { 1701 bool LargeObjectSpace::ReserveSpace(int bytes) {
1697 return heap()->OldGenerationSpaceAvailable() >= bytes; 1702 return heap()->OldGenerationSpaceAvailable() >= bytes;
1698 } 1703 }
1699 1704
1700 1705
1706 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
1707 if (IsSweepingComplete()) return true;
1708
1709 int freed_bytes = 0;
1710 Page* last = last_unswept_page_->next_page();
1711 Page* p = first_unswept_page_;
1712 do {
1713 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
1714 p = p->next_page();
1715 } while (p != last && freed_bytes < bytes_to_sweep);
1716
1717 if (p == last) {
1718 last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL);
1719 } else {
1720 first_unswept_page_ = p;
1721 }
1722
1723 heap()->LowerOldGenLimits(freed_bytes);
1724
1725 return IsSweepingComplete();
1726 }
1727
1728
1701 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 1729 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
1702 // Allocation in this space has failed. 1730 // Allocation in this space has failed.
1703 1731
1704 // Free list allocation failed and there is no next page. Fail if we have 1732 // Free list allocation failed and there is no next page. Fail if we have
1705 // hit the old generation size limit that should cause a garbage 1733 // hit the old generation size limit that should cause a garbage
1706 // collection. 1734 // collection.
1707 if (!heap()->always_allocate() && 1735 if (!heap()->always_allocate() &&
1708 heap()->OldGenerationAllocationLimitReached()) { 1736 heap()->OldGenerationAllocationLimitReached()) {
1709 return NULL; 1737 return NULL;
1710 } 1738 }
1711 1739
1740 // If there are unswept pages advance lazy sweeper.
1741 if (first_unswept_page_->is_valid()) {
1742 AdvanceSweeper(size_in_bytes);
1743
1744 // Retry the free list allocation.
1745 HeapObject* object = free_list_.Allocate(size_in_bytes);
1746 if (object != NULL) return object;
1747
1748 if (!IsSweepingComplete()) {
1749 AdvanceSweeper(kMaxInt);
1750
1751 // Retry the free list allocation.
1752 object = free_list_.Allocate(size_in_bytes);
1753 if (object != NULL) return object;
1754 }
1755 }
1756
1712 // Try to expand the space and allocate in the new next page. 1757 // Try to expand the space and allocate in the new next page.
1713 if (Expand()) { 1758 if (Expand()) {
1714 return free_list_.Allocate(size_in_bytes); 1759 return free_list_.Allocate(size_in_bytes);
1715 } 1760 }
1716 1761
1717 // Finally, fail. 1762 // Finally, fail.
1718 return NULL; 1763 return NULL;
1719 } 1764 }
1720 1765
1721 1766
(...skipping 494 matching lines...) Expand 10 before | Expand all | Expand 10 after
2216 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 2261 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2217 if (obj->IsCode()) { 2262 if (obj->IsCode()) {
2218 Code* code = Code::cast(obj); 2263 Code* code = Code::cast(obj);
2219 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2264 isolate->code_kind_statistics()[code->kind()] += code->Size();
2220 } 2265 }
2221 } 2266 }
2222 } 2267 }
2223 #endif // DEBUG 2268 #endif // DEBUG
2224 2269
2225 } } // namespace v8::internal 2270 } } // namespace v8::internal
OLDNEW
« src/spaces.h ('K') | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698