| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "v8.h" | 5 #include "v8.h" |
| 6 | 6 |
| 7 #include "full-codegen.h" | 7 #include "full-codegen.h" |
| 8 #include "macro-assembler.h" | 8 #include "macro-assembler.h" |
| 9 #include "mark-compact.h" | 9 #include "mark-compact.h" |
| 10 #include "msan.h" | 10 #include "msan.h" |
| (...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 268 capacity_(0), | 268 capacity_(0), |
| 269 capacity_executable_(0), | 269 capacity_executable_(0), |
| 270 size_(0), | 270 size_(0), |
| 271 size_executable_(0), | 271 size_executable_(0), |
| 272 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), | 272 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), |
| 273 highest_ever_allocated_(reinterpret_cast<void*>(0)) { | 273 highest_ever_allocated_(reinterpret_cast<void*>(0)) { |
| 274 } | 274 } |
| 275 | 275 |
| 276 | 276 |
| 277 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { | 277 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { |
| 278 if (FLAG_shadow_pages) { |
| 279 // We need up to two times more non-executable memory for shadow pages. |
| 280 capacity *= 2; |
| 281 } |
| 278 capacity_ = RoundUp(capacity, Page::kPageSize); | 282 capacity_ = RoundUp(capacity, Page::kPageSize); |
| 279 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | 283 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
| 280 ASSERT_GE(capacity_, capacity_executable_); | 284 ASSERT_GE(capacity_, capacity_executable_); |
| 281 | 285 |
| 282 size_ = 0; | 286 size_ = 0; |
| 283 size_executable_ = 0; | 287 size_executable_ = 0; |
| 284 | 288 |
| 285 return true; | 289 return true; |
| 286 } | 290 } |
| 287 | 291 |
| (...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 459 MemoryChunk* chunk = FromAddress(base); | 463 MemoryChunk* chunk = FromAddress(base); |
| 460 | 464 |
| 461 ASSERT(base == chunk->address()); | 465 ASSERT(base == chunk->address()); |
| 462 | 466 |
| 463 chunk->heap_ = heap; | 467 chunk->heap_ = heap; |
| 464 chunk->size_ = size; | 468 chunk->size_ = size; |
| 465 chunk->area_start_ = area_start; | 469 chunk->area_start_ = area_start; |
| 466 chunk->area_end_ = area_end; | 470 chunk->area_end_ = area_end; |
| 467 chunk->flags_ = 0; | 471 chunk->flags_ = 0; |
| 468 chunk->set_owner(owner); | 472 chunk->set_owner(owner); |
| 473 chunk->shadow_chunk_ = NULL; |
| 474 chunk->shadow_data_offset_ = 0; |
| 475 chunk->large_object_shadow_data_ = reinterpret_cast<intptr_t>(kZapValue); |
| 469 chunk->InitializeReservedMemory(); | 476 chunk->InitializeReservedMemory(); |
| 470 chunk->slots_buffer_ = NULL; | 477 chunk->slots_buffer_ = NULL; |
| 471 chunk->skip_list_ = NULL; | 478 chunk->skip_list_ = NULL; |
| 472 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | 479 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
| 473 chunk->progress_bar_ = 0; | 480 chunk->progress_bar_ = 0; |
| 474 chunk->high_water_mark_ = static_cast<int>(area_start - base); | 481 chunk->high_water_mark_ = static_cast<int>(area_start - base); |
| 475 chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE); | 482 chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE); |
| 476 chunk->available_in_small_free_list_ = 0; | 483 chunk->available_in_small_free_list_ = 0; |
| 477 chunk->available_in_medium_free_list_ = 0; | 484 chunk->available_in_medium_free_list_ = 0; |
| 478 chunk->available_in_large_free_list_ = 0; | 485 chunk->available_in_large_free_list_ = 0; |
| (...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 694 area_start, | 701 area_start, |
| 695 area_end, | 702 area_end, |
| 696 executable, | 703 executable, |
| 697 owner); | 704 owner); |
| 698 result->set_reserved_memory(&reservation); | 705 result->set_reserved_memory(&reservation); |
| 699 MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size); | 706 MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size); |
| 700 return result; | 707 return result; |
| 701 } | 708 } |
| 702 | 709 |
| 703 | 710 |
| 711 bool MemoryAllocator::AllocateShadowChunkFor(MemoryChunk* chunk) { |
| 712 ASSERT(FLAG_shadow_pages); |
| 713 ASSERT(chunk->shadow_chunk() == NULL); |
| 714 |
| 715 int size = |
| 716 chunk->area_end() - (chunk->address() + MemoryChunk::kObjectStartOffset); |
| 717 MemoryChunk* shadow = AllocateChunk(size, size, NOT_EXECUTABLE, NULL); |
| 718 if (shadow == NULL) { |
| 719 return false; |
| 720 } |
| 721 ASSERT((chunk->area_end() - chunk->address()) == |
| 722 (shadow->area_end() - shadow->address())); |
| 723 chunk->set_shadow_chunk(shadow); |
| 724 chunk->shadow_data_offset_ = shadow->address() - chunk->address(); |
| 725 ASSERT(IsAligned(chunk->shadow_data_offset_, Page::kPageSize)); |
| 726 |
| 727 return true; |
| 728 } |
| 729 |
| 730 |
| 731 void MemoryAllocator::FreeShadowChunkFor(MemoryChunk* chunk) { |
| 732 ASSERT(FLAG_shadow_pages); |
| 733 |
| 734 if (chunk->shadow_chunk() != NULL) { |
| 735 Free(chunk->shadow_chunk()); |
| 736 chunk->set_shadow_chunk(NULL); |
| 737 } |
| 738 } |
| 739 |
| 740 |
| 704 void Page::ResetFreeListStatistics() { | 741 void Page::ResetFreeListStatistics() { |
| 705 non_available_small_blocks_ = 0; | 742 non_available_small_blocks_ = 0; |
| 706 available_in_small_free_list_ = 0; | 743 available_in_small_free_list_ = 0; |
| 707 available_in_medium_free_list_ = 0; | 744 available_in_medium_free_list_ = 0; |
| 708 available_in_large_free_list_ = 0; | 745 available_in_large_free_list_ = 0; |
| 709 available_in_huge_free_list_ = 0; | 746 available_in_huge_free_list_ = 0; |
| 710 } | 747 } |
| 711 | 748 |
| 712 | 749 |
| 713 Page* MemoryAllocator::AllocatePage(intptr_t size, | 750 Page* MemoryAllocator::AllocatePage(intptr_t size, |
| 714 PagedSpace* owner, | 751 PagedSpace* owner, |
| 715 Executability executable) { | 752 Executability executable) { |
| 716 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); | 753 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); |
| 717 | 754 |
| 718 if (chunk == NULL) return NULL; | 755 if (chunk == NULL) return NULL; |
| 719 | 756 |
| 720 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | 757 Page* p = Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 758 if (FLAG_shadow_pages) { |
| 759 if (!AllocateShadowChunkFor(p)) { |
| 760 Free(p); |
| 761 return NULL; |
| 762 } |
| 763 } |
| 764 return p; |
| 721 } | 765 } |
| 722 | 766 |
| 723 | 767 |
| 724 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 768 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 725 Space* owner, | 769 Space* owner, |
| 726 Executability executable) { | 770 Executability executable) { |
| 727 MemoryChunk* chunk = AllocateChunk(object_size, | 771 MemoryChunk* chunk = AllocateChunk(object_size, |
| 728 object_size, | 772 object_size, |
| 729 executable, | 773 executable, |
| 730 owner); | 774 owner); |
| 731 if (chunk == NULL) return NULL; | 775 if (chunk == NULL) return NULL; |
| 732 return LargePage::Initialize(isolate_->heap(), chunk); | 776 |
| 777 LargePage* p = LargePage::Initialize(isolate_->heap(), chunk); |
| 778 |
| 779 if (FLAG_shadow_pages) { |
| 780 // Instead of allocating shadow chunk we initialize shadow offset so that |
| 781 // it points directly into p.large_object_shadow_data_. |
| 782 p->shadow_data_offset_ = |
| 783 reinterpret_cast<Address>(&p->large_object_shadow_data_) - |
| 784 p->GetObject()->address(); |
| 785 } |
| 786 return p; |
| 733 } | 787 } |
| 734 | 788 |
| 735 | 789 |
| 736 void MemoryAllocator::Free(MemoryChunk* chunk) { | 790 void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 737 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | 791 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 738 if (chunk->owner() != NULL) { | 792 if (chunk->owner() != NULL) { |
| 739 ObjectSpace space = | 793 ObjectSpace space = |
| 740 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | 794 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| 741 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | 795 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
| 742 } | 796 } |
| 743 | 797 |
| 744 isolate_->heap()->RememberUnmappedPage( | 798 isolate_->heap()->RememberUnmappedPage( |
| 745 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate()); | 799 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate()); |
| 746 | 800 |
| 747 delete chunk->slots_buffer(); | 801 delete chunk->slots_buffer(); |
| 748 delete chunk->skip_list(); | 802 delete chunk->skip_list(); |
| 749 | 803 |
| 804 if (FLAG_shadow_pages) { |
| 805 FreeShadowChunkFor(chunk); |
| 806 } |
| 807 |
| 750 VirtualMemory* reservation = chunk->reserved_memory(); | 808 VirtualMemory* reservation = chunk->reserved_memory(); |
| 751 if (reservation->IsReserved()) { | 809 if (reservation->IsReserved()) { |
| 752 FreeMemory(reservation, chunk->executable()); | 810 FreeMemory(reservation, chunk->executable()); |
| 753 } else { | 811 } else { |
| 754 FreeMemory(chunk->address(), | 812 FreeMemory(chunk->address(), |
| 755 chunk->size(), | 813 chunk->size(), |
| 756 chunk->executable()); | 814 chunk->executable()); |
| 757 } | 815 } |
| 758 } | 816 } |
| 759 | 817 |
| (...skipping 741 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1501 maximum_committed_ = 0; | 1559 maximum_committed_ = 0; |
| 1502 committed_ = false; | 1560 committed_ = false; |
| 1503 start_ = start; | 1561 start_ = start; |
| 1504 address_mask_ = ~(maximum_capacity - 1); | 1562 address_mask_ = ~(maximum_capacity - 1); |
| 1505 object_mask_ = address_mask_ | kHeapObjectTagMask; | 1563 object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 1506 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; | 1564 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
| 1507 age_mark_ = start_; | 1565 age_mark_ = start_; |
| 1508 } | 1566 } |
| 1509 | 1567 |
| 1510 | 1568 |
| 1569 bool SemiSpace::AllocateShadowChunksForPages(int start_index, int end_index) { |
| 1570 ASSERT(FLAG_shadow_pages); |
| 1571 |
| 1572 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); |
| 1573 |
| 1574 for (int i = start_index; i < end_index; i++) { |
| 1575 NewSpacePage* page = |
| 1576 NewSpacePage::FromAddress(start_ + i * Page::kPageSize); |
| 1577 |
| 1578 if (!allocator->AllocateShadowChunkFor(page)) { |
| 1579 return false; |
| 1580 } |
| 1581 } |
| 1582 return true; |
| 1583 } |
| 1584 |
| 1585 |
| 1586 void SemiSpace::FreeShadowChunksForPages(int start_index, int end_index) { |
| 1587 ASSERT(FLAG_shadow_pages); |
| 1588 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); |
| 1589 |
| 1590 for (int i = start_index; i < end_index; i++) { |
| 1591 NewSpacePage* page = |
| 1592 NewSpacePage::FromAddress(start_ + i * Page::kPageSize); |
| 1593 |
| 1594 allocator->FreeShadowChunkFor(page); |
| 1595 } |
| 1596 } |
| 1597 |
| 1598 |
| 1511 void SemiSpace::TearDown() { | 1599 void SemiSpace::TearDown() { |
| 1600 if (FLAG_shadow_pages && is_committed()) { |
| 1601 int pages = capacity_ / Page::kPageSize; |
| 1602 FreeShadowChunksForPages(0, pages); |
| 1603 } |
| 1512 start_ = NULL; | 1604 start_ = NULL; |
| 1513 capacity_ = 0; | 1605 capacity_ = 0; |
| 1514 } | 1606 } |
| 1515 | 1607 |
| 1516 | 1608 |
| 1517 bool SemiSpace::Commit() { | 1609 bool SemiSpace::Commit() { |
| 1518 ASSERT(!is_committed()); | 1610 ASSERT(!is_committed()); |
| 1519 int pages = capacity_ / Page::kPageSize; | 1611 int pages = capacity_ / Page::kPageSize; |
| 1520 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, | 1612 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, |
| 1521 capacity_, | 1613 capacity_, |
| 1522 executable())) { | 1614 executable())) { |
| 1523 return false; | 1615 return false; |
| 1524 } | 1616 } |
| 1525 | 1617 |
| 1526 NewSpacePage* current = anchor(); | 1618 NewSpacePage* current = anchor(); |
| 1527 for (int i = 0; i < pages; i++) { | 1619 for (int i = 0; i < pages; i++) { |
| 1528 NewSpacePage* new_page = | 1620 NewSpacePage* new_page = |
| 1529 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); | 1621 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); |
| 1530 new_page->InsertAfter(current); | 1622 new_page->InsertAfter(current); |
| 1531 current = new_page; | 1623 current = new_page; |
| 1532 } | 1624 } |
| 1625 if (FLAG_shadow_pages) { |
| 1626 if (!AllocateShadowChunksForPages(0, pages)) { |
| 1627 return false; |
| 1628 } |
| 1629 } |
| 1533 | 1630 |
| 1534 SetCapacity(capacity_); | 1631 SetCapacity(capacity_); |
| 1535 committed_ = true; | 1632 committed_ = true; |
| 1536 Reset(); | 1633 Reset(); |
| 1537 return true; | 1634 return true; |
| 1538 } | 1635 } |
| 1539 | 1636 |
| 1540 | 1637 |
| 1541 bool SemiSpace::Uncommit() { | 1638 bool SemiSpace::Uncommit() { |
| 1542 ASSERT(is_committed()); | 1639 ASSERT(is_committed()); |
| 1640 |
| 1641 if (FLAG_shadow_pages) { |
| 1642 int pages_before = capacity_ / Page::kPageSize; |
| 1643 FreeShadowChunksForPages(0, pages_before); |
| 1644 } |
| 1645 |
| 1543 Address start = start_ + maximum_capacity_ - capacity_; | 1646 Address start = start_ + maximum_capacity_ - capacity_; |
| 1544 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { | 1647 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { |
| 1545 return false; | 1648 return false; |
| 1546 } | 1649 } |
| 1547 anchor()->set_next_page(anchor()); | 1650 anchor()->set_next_page(anchor()); |
| 1548 anchor()->set_prev_page(anchor()); | 1651 anchor()->set_prev_page(anchor()); |
| 1549 | 1652 |
| 1550 committed_ = false; | 1653 committed_ = false; |
| 1551 return true; | 1654 return true; |
| 1552 } | 1655 } |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1588 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), | 1691 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), |
| 1589 page_address, | 1692 page_address, |
| 1590 this); | 1693 this); |
| 1591 new_page->InsertAfter(last_page); | 1694 new_page->InsertAfter(last_page); |
| 1592 Bitmap::Clear(new_page); | 1695 Bitmap::Clear(new_page); |
| 1593 // Duplicate the flags that was set on the old page. | 1696 // Duplicate the flags that was set on the old page. |
| 1594 new_page->SetFlags(last_page->GetFlags(), | 1697 new_page->SetFlags(last_page->GetFlags(), |
| 1595 NewSpacePage::kCopyOnFlipFlagsMask); | 1698 NewSpacePage::kCopyOnFlipFlagsMask); |
| 1596 last_page = new_page; | 1699 last_page = new_page; |
| 1597 } | 1700 } |
| 1701 |
| 1702 if (FLAG_shadow_pages) { |
| 1703 if (!AllocateShadowChunksForPages(pages_before, pages_after)) { |
| 1704 return false; |
| 1705 } |
| 1706 } |
| 1707 |
| 1598 return true; | 1708 return true; |
| 1599 } | 1709 } |
| 1600 | 1710 |
| 1601 | 1711 |
| 1602 bool SemiSpace::ShrinkTo(int new_capacity) { | 1712 bool SemiSpace::ShrinkTo(int new_capacity) { |
| 1603 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); | 1713 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1604 ASSERT(new_capacity >= initial_capacity_); | 1714 ASSERT(new_capacity >= initial_capacity_); |
| 1605 ASSERT(new_capacity < capacity_); | 1715 ASSERT(new_capacity < capacity_); |
| 1606 if (is_committed()) { | 1716 if (is_committed()) { |
| 1607 size_t delta = capacity_ - new_capacity; | 1717 size_t delta = capacity_ - new_capacity; |
| 1608 ASSERT(IsAligned(delta, OS::AllocateAlignment())); | 1718 ASSERT(IsAligned(delta, OS::AllocateAlignment())); |
| 1609 | 1719 |
| 1610 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); | 1720 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); |
| 1721 int pages_after = new_capacity / Page::kPageSize; |
| 1722 |
| 1723 if (FLAG_shadow_pages) { |
| 1724 int pages_before = capacity_ / Page::kPageSize; |
| 1725 FreeShadowChunksForPages(pages_after, pages_before); |
| 1726 } |
| 1727 |
| 1611 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { | 1728 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { |
| 1612 return false; | 1729 return false; |
| 1613 } | 1730 } |
| 1614 | 1731 |
| 1615 int pages_after = new_capacity / Page::kPageSize; | |
| 1616 NewSpacePage* new_last_page = | 1732 NewSpacePage* new_last_page = |
| 1617 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); | 1733 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); |
| 1618 new_last_page->set_next_page(anchor()); | 1734 new_last_page->set_next_page(anchor()); |
| 1619 anchor()->set_prev_page(new_last_page); | 1735 anchor()->set_prev_page(new_last_page); |
| 1620 ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page)); | 1736 ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page)); |
| 1621 } | 1737 } |
| 1622 | 1738 |
| 1623 SetCapacity(new_capacity); | 1739 SetCapacity(new_capacity); |
| 1624 | 1740 |
| 1625 return true; | 1741 return true; |
| (...skipping 1485 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3111 object->ShortPrint(); | 3227 object->ShortPrint(); |
| 3112 PrintF("\n"); | 3228 PrintF("\n"); |
| 3113 } | 3229 } |
| 3114 printf(" --------------------------------------\n"); | 3230 printf(" --------------------------------------\n"); |
| 3115 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3231 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3116 } | 3232 } |
| 3117 | 3233 |
| 3118 #endif // DEBUG | 3234 #endif // DEBUG |
| 3119 | 3235 |
| 3120 } } // namespace v8::internal | 3236 } } // namespace v8::internal |
| OLD | NEW |