OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1115 | 1115 |
1116 void PagedSpace::ResetFreeListStatistics() { | 1116 void PagedSpace::ResetFreeListStatistics() { |
1117 PageIterator page_iterator(this); | 1117 PageIterator page_iterator(this); |
1118 while (page_iterator.has_next()) { | 1118 while (page_iterator.has_next()) { |
1119 Page* page = page_iterator.next(); | 1119 Page* page = page_iterator.next(); |
1120 page->ResetFreeListStatistics(); | 1120 page->ResetFreeListStatistics(); |
1121 } | 1121 } |
1122 } | 1122 } |
1123 | 1123 |
1124 | 1124 |
1125 void PagedSpace::IncreaseCapacity(int size) { | |
1126 accounting_stats_.ExpandSpace(size); | |
1127 heap()->UpdateMaximumCommitted(); | |
Hannes Payer (out of office)
2013/10/22 14:46:07
Instead of calling heap()->UpdateMaximumCommitted(
rmcilroy
2013/10/23 14:40:23
The heap size can be increased when there is no GC
Hannes Payer (out of office)
2013/10/24 08:12:35
Beginning of GC is what I meant, that should be fi
rmcilroy
2013/10/24 16:34:31
Done.
| |
1128 } | |
1129 | |
1130 | |
1125 void PagedSpace::ReleasePage(Page* page, bool unlink) { | 1131 void PagedSpace::ReleasePage(Page* page, bool unlink) { |
1126 ASSERT(page->LiveBytes() == 0); | 1132 ASSERT(page->LiveBytes() == 0); |
1127 ASSERT(AreaSize() == page->area_size()); | 1133 ASSERT(AreaSize() == page->area_size()); |
1128 | 1134 |
1129 // Adjust list of unswept pages if the page is the head of the list. | 1135 // Adjust list of unswept pages if the page is the head of the list. |
1130 if (first_unswept_page_ == page) { | 1136 if (first_unswept_page_ == page) { |
1131 first_unswept_page_ = page->next_page(); | 1137 first_unswept_page_ = page->next_page(); |
1132 if (first_unswept_page_ == anchor()) { | 1138 if (first_unswept_page_ == anchor()) { |
1133 first_unswept_page_ = Page::FromAddress(NULL); | 1139 first_unswept_page_ = Page::FromAddress(NULL); |
1134 } | 1140 } |
(...skipping 367 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1502 // Creates a space in the young generation. The constructor does not | 1508 // Creates a space in the young generation. The constructor does not |
1503 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of | 1509 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of |
1504 // memory of size 'capacity' when set up, and does not grow or shrink | 1510 // memory of size 'capacity' when set up, and does not grow or shrink |
1505 // otherwise. In the mark-compact collector, the memory region of the from | 1511 // otherwise. In the mark-compact collector, the memory region of the from |
1506 // space is used as the marking stack. It requires contiguous memory | 1512 // space is used as the marking stack. It requires contiguous memory |
1507 // addresses. | 1513 // addresses. |
1508 ASSERT(maximum_capacity >= Page::kPageSize); | 1514 ASSERT(maximum_capacity >= Page::kPageSize); |
1509 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); | 1515 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
1510 capacity_ = initial_capacity; | 1516 capacity_ = initial_capacity; |
1511 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); | 1517 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); |
1518 maximum_committed_ = 0; | |
1512 committed_ = false; | 1519 committed_ = false; |
1513 start_ = start; | 1520 start_ = start; |
1514 address_mask_ = ~(maximum_capacity - 1); | 1521 address_mask_ = ~(maximum_capacity - 1); |
1515 object_mask_ = address_mask_ | kHeapObjectTagMask; | 1522 object_mask_ = address_mask_ | kHeapObjectTagMask; |
1516 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; | 1523 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
1517 age_mark_ = start_; | 1524 age_mark_ = start_; |
1518 } | 1525 } |
1519 | 1526 |
1520 | 1527 |
1521 void SemiSpace::TearDown() { | 1528 void SemiSpace::TearDown() { |
(...skipping 12 matching lines...) Expand all Loading... | |
1534 } | 1541 } |
1535 | 1542 |
1536 NewSpacePage* current = anchor(); | 1543 NewSpacePage* current = anchor(); |
1537 for (int i = 0; i < pages; i++) { | 1544 for (int i = 0; i < pages; i++) { |
1538 NewSpacePage* new_page = | 1545 NewSpacePage* new_page = |
1539 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); | 1546 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); |
1540 new_page->InsertAfter(current); | 1547 new_page->InsertAfter(current); |
1541 current = new_page; | 1548 current = new_page; |
1542 } | 1549 } |
1543 | 1550 |
1551 if (capacity_ > maximum_committed_) { | |
1552 maximum_committed_ = capacity_; | |
1553 heap()->UpdateMaximumCommitted(); | |
1554 } | |
Hannes Payer (out of office)
2013/10/22 14:46:07
Instead of duplicating this code, why don't we add
rmcilroy
2013/10/23 14:40:23
Done.
| |
1544 committed_ = true; | 1555 committed_ = true; |
1545 Reset(); | 1556 Reset(); |
1546 return true; | 1557 return true; |
1547 } | 1558 } |
1548 | 1559 |
1549 | 1560 |
1550 bool SemiSpace::Uncommit() { | 1561 bool SemiSpace::Uncommit() { |
1551 ASSERT(is_committed()); | 1562 ASSERT(is_committed()); |
1552 Address start = start_ + maximum_capacity_ - capacity_; | 1563 Address start = start_ + maximum_capacity_ - capacity_; |
1553 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { | 1564 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1597 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), | 1608 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), |
1598 page_address, | 1609 page_address, |
1599 this); | 1610 this); |
1600 new_page->InsertAfter(last_page); | 1611 new_page->InsertAfter(last_page); |
1601 Bitmap::Clear(new_page); | 1612 Bitmap::Clear(new_page); |
1602 // Duplicate the flags that was set on the old page. | 1613 // Duplicate the flags that was set on the old page. |
1603 new_page->SetFlags(last_page->GetFlags(), | 1614 new_page->SetFlags(last_page->GetFlags(), |
1604 NewSpacePage::kCopyOnFlipFlagsMask); | 1615 NewSpacePage::kCopyOnFlipFlagsMask); |
1605 last_page = new_page; | 1616 last_page = new_page; |
1606 } | 1617 } |
1618 if (capacity_ > maximum_committed_) { | |
1619 maximum_committed_ = capacity_; | |
1620 heap()->UpdateMaximumCommitted(); | |
1621 } | |
1607 return true; | 1622 return true; |
1608 } | 1623 } |
1609 | 1624 |
1610 | 1625 |
1611 bool SemiSpace::ShrinkTo(int new_capacity) { | 1626 bool SemiSpace::ShrinkTo(int new_capacity) { |
1612 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); | 1627 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); |
1613 ASSERT(new_capacity >= initial_capacity_); | 1628 ASSERT(new_capacity >= initial_capacity_); |
1614 ASSERT(new_capacity < capacity_); | 1629 ASSERT(new_capacity < capacity_); |
1615 if (is_committed()) { | 1630 if (is_committed()) { |
1616 size_t delta = capacity_ - new_capacity; | 1631 size_t delta = capacity_ - new_capacity; |
(...skipping 1311 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2928 first_page_(NULL), | 2943 first_page_(NULL), |
2929 size_(0), | 2944 size_(0), |
2930 page_count_(0), | 2945 page_count_(0), |
2931 objects_size_(0), | 2946 objects_size_(0), |
2932 chunk_map_(ComparePointers, 1024) {} | 2947 chunk_map_(ComparePointers, 1024) {} |
2933 | 2948 |
2934 | 2949 |
2935 bool LargeObjectSpace::SetUp() { | 2950 bool LargeObjectSpace::SetUp() { |
2936 first_page_ = NULL; | 2951 first_page_ = NULL; |
2937 size_ = 0; | 2952 size_ = 0; |
2953 maximum_committed_ = 0; | |
2938 page_count_ = 0; | 2954 page_count_ = 0; |
2939 objects_size_ = 0; | 2955 objects_size_ = 0; |
2940 chunk_map_.Clear(); | 2956 chunk_map_.Clear(); |
2941 return true; | 2957 return true; |
2942 } | 2958 } |
2943 | 2959 |
2944 | 2960 |
2945 void LargeObjectSpace::TearDown() { | 2961 void LargeObjectSpace::TearDown() { |
2946 while (first_page_ != NULL) { | 2962 while (first_page_ != NULL) { |
2947 LargePage* page = first_page_; | 2963 LargePage* page = first_page_; |
(...skipping 26 matching lines...) Expand all Loading... | |
2974 AllocateLargePage(object_size, this, executable); | 2990 AllocateLargePage(object_size, this, executable); |
2975 if (page == NULL) return Failure::RetryAfterGC(identity()); | 2991 if (page == NULL) return Failure::RetryAfterGC(identity()); |
2976 ASSERT(page->area_size() >= object_size); | 2992 ASSERT(page->area_size() >= object_size); |
2977 | 2993 |
2978 size_ += static_cast<int>(page->size()); | 2994 size_ += static_cast<int>(page->size()); |
2979 objects_size_ += object_size; | 2995 objects_size_ += object_size; |
2980 page_count_++; | 2996 page_count_++; |
2981 page->set_next_page(first_page_); | 2997 page->set_next_page(first_page_); |
2982 first_page_ = page; | 2998 first_page_ = page; |
2983 | 2999 |
3000 if (size_ > maximum_committed_) { | |
3001 maximum_committed_ = size_; | |
3002 heap()->UpdateMaximumCommitted(); | |
3003 } | |
3004 | |
2984 // Register all MemoryChunk::kAlignment-aligned chunks covered by | 3005 // Register all MemoryChunk::kAlignment-aligned chunks covered by |
2985 // this large page in the chunk map. | 3006 // this large page in the chunk map. |
2986 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; | 3007 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
2987 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; | 3008 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; |
2988 for (uintptr_t key = base; key <= limit; key++) { | 3009 for (uintptr_t key = base; key <= limit; key++) { |
2989 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), | 3010 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
2990 static_cast<uint32_t>(key), | 3011 static_cast<uint32_t>(key), |
2991 true); | 3012 true); |
2992 ASSERT(entry != NULL); | 3013 ASSERT(entry != NULL); |
2993 entry->value = page; | 3014 entry->value = page; |
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3220 object->ShortPrint(); | 3241 object->ShortPrint(); |
3221 PrintF("\n"); | 3242 PrintF("\n"); |
3222 } | 3243 } |
3223 printf(" --------------------------------------\n"); | 3244 printf(" --------------------------------------\n"); |
3224 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3245 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
3225 } | 3246 } |
3226 | 3247 |
3227 #endif // DEBUG | 3248 #endif // DEBUG |
3228 | 3249 |
3229 } } // namespace v8::internal | 3250 } } // namespace v8::internal |
OLD | NEW |