| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 11 matching lines...) Expand all Loading... |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #include "macro-assembler.h" | 30 #include "macro-assembler.h" |
| 31 #include "mark-compact.h" | 31 #include "mark-compact.h" |
| 32 #include "msan.h" |
| 32 #include "platform.h" | 33 #include "platform.h" |
| 33 | 34 |
| 34 namespace v8 { | 35 namespace v8 { |
| 35 namespace internal { | 36 namespace internal { |
| 36 | 37 |
| 37 | 38 |
| 38 // ---------------------------------------------------------------------------- | 39 // ---------------------------------------------------------------------------- |
| 39 // HeapObjectIterator | 40 // HeapObjectIterator |
| 40 | 41 |
| 41 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | 42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
| (...skipping 668 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 710 } | 711 } |
| 711 | 712 |
| 712 MemoryChunk* result = MemoryChunk::Initialize(heap, | 713 MemoryChunk* result = MemoryChunk::Initialize(heap, |
| 713 base, | 714 base, |
| 714 chunk_size, | 715 chunk_size, |
| 715 area_start, | 716 area_start, |
| 716 area_end, | 717 area_end, |
| 717 executable, | 718 executable, |
| 718 owner); | 719 owner); |
| 719 result->set_reserved_memory(&reservation); | 720 result->set_reserved_memory(&reservation); |
| 721 MSAN_MEMORY_IS_INITIALIZED(base, chunk_size); |
| 720 return result; | 722 return result; |
| 721 } | 723 } |
| 722 | 724 |
| 723 | 725 |
| 724 void Page::ResetFreeListStatistics() { | 726 void Page::ResetFreeListStatistics() { |
| 725 non_available_small_blocks_ = 0; | 727 non_available_small_blocks_ = 0; |
| 726 available_in_small_free_list_ = 0; | 728 available_in_small_free_list_ = 0; |
| 727 available_in_medium_free_list_ = 0; | 729 available_in_medium_free_list_ = 0; |
| 728 available_in_large_free_list_ = 0; | 730 available_in_large_free_list_ = 0; |
| 729 available_in_huge_free_list_ = 0; | 731 available_in_huge_free_list_ = 0; |
| (...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1071 size = 8 * kPointerSize * KB; | 1073 size = 8 * kPointerSize * KB; |
| 1072 break; | 1074 break; |
| 1073 case CODE_SPACE: | 1075 case CODE_SPACE: |
| 1074 if (heap()->isolate()->code_range()->exists()) { | 1076 if (heap()->isolate()->code_range()->exists()) { |
| 1075 // When code range exists, code pages are allocated in a special way | 1077 // When code range exists, code pages are allocated in a special way |
| 1076 // (from the reserved code range). That part of the code is not yet | 1078 // (from the reserved code range). That part of the code is not yet |
| 1077 // upgraded to handle small pages. | 1079 // upgraded to handle small pages. |
| 1078 size = AreaSize(); | 1080 size = AreaSize(); |
| 1079 } else { | 1081 } else { |
| 1080 #if V8_TARGET_ARCH_MIPS | 1082 #if V8_TARGET_ARCH_MIPS |
| 1081 // On MIPS, code stubs seem to be quite a bit larger. | 1083 // TODO(plind): Investigate larger code stubs size on MIPS. |
| 1082 // TODO(olivf/MIPS folks): Can we do anything about this? Does it | 1084 size = 480 * KB; |
| 1083 // indicate the presence of a bug? | |
| 1084 size = 464 * KB; | |
| 1085 #else | 1085 #else |
| 1086 size = 416 * KB; | 1086 size = 416 * KB; |
| 1087 #endif | 1087 #endif |
| 1088 } | 1088 } |
| 1089 break; | 1089 break; |
| 1090 default: | 1090 default: |
| 1091 UNREACHABLE(); | 1091 UNREACHABLE(); |
| 1092 } | 1092 } |
| 1093 return Min(size, AreaSize()); | 1093 return Min(size, AreaSize()); |
| 1094 } | 1094 } |
| (...skipping 425 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1520 | 1520 |
| 1521 void SemiSpace::TearDown() { | 1521 void SemiSpace::TearDown() { |
| 1522 start_ = NULL; | 1522 start_ = NULL; |
| 1523 capacity_ = 0; | 1523 capacity_ = 0; |
| 1524 } | 1524 } |
| 1525 | 1525 |
| 1526 | 1526 |
| 1527 bool SemiSpace::Commit() { | 1527 bool SemiSpace::Commit() { |
| 1528 ASSERT(!is_committed()); | 1528 ASSERT(!is_committed()); |
| 1529 int pages = capacity_ / Page::kPageSize; | 1529 int pages = capacity_ / Page::kPageSize; |
| 1530 Address end = start_ + maximum_capacity_; | 1530 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, |
| 1531 Address start = end - pages * Page::kPageSize; | |
| 1532 if (!heap()->isolate()->memory_allocator()->CommitBlock(start, | |
| 1533 capacity_, | 1531 capacity_, |
| 1534 executable())) { | 1532 executable())) { |
| 1535 return false; | 1533 return false; |
| 1536 } | 1534 } |
| 1537 | 1535 |
| 1538 NewSpacePage* page = anchor(); | 1536 NewSpacePage* current = anchor(); |
| 1539 for (int i = 1; i <= pages; i++) { | 1537 for (int i = 0; i < pages; i++) { |
| 1540 NewSpacePage* new_page = | 1538 NewSpacePage* new_page = |
| 1541 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this); | 1539 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); |
| 1542 new_page->InsertAfter(page); | 1540 new_page->InsertAfter(current); |
| 1543 page = new_page; | 1541 current = new_page; |
| 1544 } | 1542 } |
| 1545 | 1543 |
| 1546 committed_ = true; | 1544 committed_ = true; |
| 1547 Reset(); | 1545 Reset(); |
| 1548 return true; | 1546 return true; |
| 1549 } | 1547 } |
| 1550 | 1548 |
| 1551 | 1549 |
| 1552 bool SemiSpace::Uncommit() { | 1550 bool SemiSpace::Uncommit() { |
| 1553 ASSERT(is_committed()); | 1551 ASSERT(is_committed()); |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1577 bool SemiSpace::GrowTo(int new_capacity) { | 1575 bool SemiSpace::GrowTo(int new_capacity) { |
| 1578 if (!is_committed()) { | 1576 if (!is_committed()) { |
| 1579 if (!Commit()) return false; | 1577 if (!Commit()) return false; |
| 1580 } | 1578 } |
| 1581 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); | 1579 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1582 ASSERT(new_capacity <= maximum_capacity_); | 1580 ASSERT(new_capacity <= maximum_capacity_); |
| 1583 ASSERT(new_capacity > capacity_); | 1581 ASSERT(new_capacity > capacity_); |
| 1584 int pages_before = capacity_ / Page::kPageSize; | 1582 int pages_before = capacity_ / Page::kPageSize; |
| 1585 int pages_after = new_capacity / Page::kPageSize; | 1583 int pages_after = new_capacity / Page::kPageSize; |
| 1586 | 1584 |
| 1587 Address end = start_ + maximum_capacity_; | |
| 1588 Address start = end - new_capacity; | |
| 1589 size_t delta = new_capacity - capacity_; | 1585 size_t delta = new_capacity - capacity_; |
| 1590 | 1586 |
| 1591 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1587 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1592 if (!heap()->isolate()->memory_allocator()->CommitBlock( | 1588 if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1593 start, delta, executable())) { | 1589 start_ + capacity_, delta, executable())) { |
| 1594 return false; | 1590 return false; |
| 1595 } | 1591 } |
| 1596 capacity_ = new_capacity; | 1592 capacity_ = new_capacity; |
| 1597 NewSpacePage* last_page = anchor()->prev_page(); | 1593 NewSpacePage* last_page = anchor()->prev_page(); |
| 1598 ASSERT(last_page != anchor()); | 1594 ASSERT(last_page != anchor()); |
| 1599 for (int i = pages_before + 1; i <= pages_after; i++) { | 1595 for (int i = pages_before; i < pages_after; i++) { |
| 1600 Address page_address = end - i * Page::kPageSize; | 1596 Address page_address = start_ + i * Page::kPageSize; |
| 1601 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), | 1597 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), |
| 1602 page_address, | 1598 page_address, |
| 1603 this); | 1599 this); |
| 1604 new_page->InsertAfter(last_page); | 1600 new_page->InsertAfter(last_page); |
| 1605 Bitmap::Clear(new_page); | 1601 Bitmap::Clear(new_page); |
| 1606 // Duplicate the flags that was set on the old page. | 1602 // Duplicate the flags that was set on the old page. |
| 1607 new_page->SetFlags(last_page->GetFlags(), | 1603 new_page->SetFlags(last_page->GetFlags(), |
| 1608 NewSpacePage::kCopyOnFlipFlagsMask); | 1604 NewSpacePage::kCopyOnFlipFlagsMask); |
| 1609 last_page = new_page; | 1605 last_page = new_page; |
| 1610 } | 1606 } |
| 1611 return true; | 1607 return true; |
| 1612 } | 1608 } |
| 1613 | 1609 |
| 1614 | 1610 |
| 1615 bool SemiSpace::ShrinkTo(int new_capacity) { | 1611 bool SemiSpace::ShrinkTo(int new_capacity) { |
| 1616 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); | 1612 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1617 ASSERT(new_capacity >= initial_capacity_); | 1613 ASSERT(new_capacity >= initial_capacity_); |
| 1618 ASSERT(new_capacity < capacity_); | 1614 ASSERT(new_capacity < capacity_); |
| 1619 if (is_committed()) { | 1615 if (is_committed()) { |
| 1620 // Semispaces grow backwards from the end of their allocated capacity, | |
| 1621 // so we find the before and after start addresses relative to the | |
| 1622 // end of the space. | |
| 1623 Address space_end = start_ + maximum_capacity_; | |
| 1624 Address old_start = space_end - capacity_; | |
| 1625 size_t delta = capacity_ - new_capacity; | 1616 size_t delta = capacity_ - new_capacity; |
| 1626 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1617 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1627 | 1618 |
| 1628 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); | 1619 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); |
| 1629 if (!allocator->UncommitBlock(old_start, delta)) { | 1620 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { |
| 1630 return false; | 1621 return false; |
| 1631 } | 1622 } |
| 1632 | 1623 |
| 1633 int pages_after = new_capacity / Page::kPageSize; | 1624 int pages_after = new_capacity / Page::kPageSize; |
| 1634 NewSpacePage* new_last_page = | 1625 NewSpacePage* new_last_page = |
| 1635 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize); | 1626 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); |
| 1636 new_last_page->set_next_page(anchor()); | 1627 new_last_page->set_next_page(anchor()); |
| 1637 anchor()->set_prev_page(new_last_page); | 1628 anchor()->set_prev_page(new_last_page); |
| 1638 ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page)); | 1629 ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page)); |
| 1639 } | 1630 } |
| 1640 | 1631 |
| 1641 capacity_ = new_capacity; | 1632 capacity_ = new_capacity; |
| 1642 | 1633 |
| 1643 return true; | 1634 return true; |
| 1644 } | 1635 } |
| 1645 | 1636 |
| 1646 | 1637 |
| 1647 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { | 1638 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { |
| 1648 anchor_.set_owner(this); | 1639 anchor_.set_owner(this); |
| (...skipping 1580 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3229 object->ShortPrint(); | 3220 object->ShortPrint(); |
| 3230 PrintF("\n"); | 3221 PrintF("\n"); |
| 3231 } | 3222 } |
| 3232 printf(" --------------------------------------\n"); | 3223 printf(" --------------------------------------\n"); |
| 3233 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3224 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3234 } | 3225 } |
| 3235 | 3226 |
| 3236 #endif // DEBUG | 3227 #endif // DEBUG |
| 3237 | 3228 |
| 3238 } } // namespace v8::internal | 3229 } } // namespace v8::internal |
| OLD | NEW |