OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
7 | 7 |
8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
11 #include "src/base/bits.h" | 11 #include "src/base/bits.h" |
12 #include "src/base/platform/mutex.h" | 12 #include "src/base/platform/mutex.h" |
13 #include "src/flags.h" | 13 #include "src/flags.h" |
14 #include "src/hashmap.h" | 14 #include "src/hashmap.h" |
15 #include "src/list.h" | 15 #include "src/list.h" |
16 #include "src/objects.h" | 16 #include "src/objects.h" |
17 #include "src/utils.h" | 17 #include "src/utils.h" |
18 | 18 |
19 namespace v8 { | 19 namespace v8 { |
20 namespace internal { | 20 namespace internal { |
21 | 21 |
22 class AllocationInfo; | 22 class AllocationInfo; |
23 class AllocationObserver; | 23 class AllocationObserver; |
24 class CompactionSpace; | 24 class CompactionSpace; |
25 class CompactionSpaceCollection; | 25 class CompactionSpaceCollection; |
26 class FreeList; | 26 class FreeList; |
27 class Isolate; | 27 class Isolate; |
28 class MemoryAllocator; | 28 class MemoryAllocator; |
29 class MemoryChunk; | 29 class MemoryChunk; |
| 30 class Page; |
30 class PagedSpace; | 31 class PagedSpace; |
31 class SemiSpace; | 32 class SemiSpace; |
32 class SkipList; | 33 class SkipList; |
33 class SlotsBuffer; | 34 class SlotsBuffer; |
34 class SlotSet; | 35 class SlotSet; |
35 class TypedSlotSet; | 36 class TypedSlotSet; |
36 class Space; | 37 class Space; |
37 | 38 |
38 // ----------------------------------------------------------------------------- | 39 // ----------------------------------------------------------------------------- |
39 // Heap structures: | 40 // Heap structures: |
(...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
280 // Clear all cells till the cell containing the last index. | 281 // Clear all cells till the cell containing the last index. |
281 for (uint32_t i = start_cell_index; i < end_cell_index; i++) { | 282 for (uint32_t i = start_cell_index; i < end_cell_index; i++) { |
282 cells()[i] = 0; | 283 cells()[i] = 0; |
283 } | 284 } |
284 // Clear all bits in the last cell till the last bit before index. | 285 // Clear all bits in the last cell till the last bit before index. |
285 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1); | 286 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1); |
286 cells()[end_cell_index] &= clear_mask; | 287 cells()[end_cell_index] &= clear_mask; |
287 } | 288 } |
288 }; | 289 }; |
289 | 290 |
| 291 enum FreeListCategoryType { |
| 292 kTiniest, |
| 293 kTiny, |
| 294 kSmall, |
| 295 kMedium, |
| 296 kLarge, |
| 297 kHuge, |
| 298 |
| 299 kFirstCategory = kTiniest, |
| 300 kLastCategory = kHuge, |
| 301 kNumberOfCategories = kLastCategory + 1, |
| 302 kInvalidCategory |
| 303 }; |
| 304 |
| 305 enum FreeMode { kLinkCategory, kDoNotLinkCategory }; |
| 306 |
| 307 // A free list category maintains a linked list of free memory blocks. |
| 308 class FreeListCategory { |
| 309 public: |
| 310 static const int kSize = kIntSize + // FreeListCategoryType type_ |
| 311 kIntSize + // int available_ |
| 312 kPointerSize + // FreeSpace* top_ |
| 313 kPointerSize + // FreeListCategory* prev_ |
| 314 kPointerSize; // FreeListCategory* next_ |
| 315 |
| 316 FreeListCategory() |
| 317 : type_(kInvalidCategory), |
| 318 available_(0), |
| 319 top_(nullptr), |
| 320 prev_(nullptr), |
| 321 next_(nullptr) {} |
| 322 |
| 323 void Initialize(FreeListCategoryType type) { |
| 324 type_ = type; |
| 325 available_ = 0; |
| 326 top_ = nullptr; |
| 327 prev_ = nullptr; |
| 328 next_ = nullptr; |
| 329 } |
| 330 |
| 331 void Invalidate(); |
| 332 |
| 333 void Reset(); |
| 334 |
| 335 void ResetStats() { Reset(); } |
| 336 |
| 337 void RepairFreeList(Heap* heap); |
| 338 |
| 339 // Relinks the category into the currently owning free list. Requires that the |
| 340 // category is currently unlinked. |
| 341 void Relink(); |
| 342 |
| 343 bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode); |
| 344 |
| 345 // Picks a node from the list and stores its size in |node_size|. Returns |
| 346 // nullptr if the category is empty. |
| 347 FreeSpace* PickNodeFromList(int* node_size); |
| 348 |
| 349 // Performs a single try to pick a node of at least |minimum_size| from the |
| 350 // category. Stores the actual size in |node_size|. Returns nullptr if no |
| 351 // node is found. |
| 352 FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size); |
| 353 |
| 354 // Picks a node of at least |minimum_size| from the category. Stores the |
| 355 // actual size in |node_size|. Returns nullptr if no node is found. |
| 356 FreeSpace* SearchForNodeInList(int minimum_size, int* node_size); |
| 357 |
| 358 inline FreeList* owner(); |
| 359 inline bool is_linked(); |
| 360 bool is_empty() { return top() == nullptr; } |
| 361 int available() const { return available_; } |
| 362 |
| 363 #ifdef DEBUG |
| 364 intptr_t SumFreeList(); |
| 365 int FreeListLength(); |
| 366 #endif |
| 367 |
| 368 private: |
| 369 // For debug builds we accurately compute free lists lengths up until |
| 370 // {kVeryLongFreeList} by manually walking the list. |
| 371 static const int kVeryLongFreeList = 500; |
| 372 |
| 373 inline Page* page(); |
| 374 |
| 375 FreeSpace* top() { return top_; } |
| 376 void set_top(FreeSpace* top) { top_ = top; } |
| 377 FreeListCategory* prev() { return prev_; } |
| 378 void set_prev(FreeListCategory* prev) { prev_ = prev; } |
| 379 FreeListCategory* next() { return next_; } |
| 380 void set_next(FreeListCategory* next) { next_ = next; } |
| 381 |
| 382 // |type_|: The type of this free list category. |
| 383 FreeListCategoryType type_; |
| 384 |
| 385 // |available_|: Total available bytes in all blocks of this free list |
| 386 // category. |
| 387 int available_; |
| 388 |
| 389 // |top_|: Points to the top FreeSpace* in the free list category. |
| 390 FreeSpace* top_; |
| 391 |
| 392 FreeListCategory* prev_; |
| 393 FreeListCategory* next_; |
| 394 |
| 395 friend class FreeList; |
| 396 friend class PagedSpace; |
| 397 }; |
290 | 398 |
291 // MemoryChunk represents a memory region owned by a specific space. | 399 // MemoryChunk represents a memory region owned by a specific space. |
292 // It is divided into the header and the body. Chunk start is always | 400 // It is divided into the header and the body. Chunk start is always |
293 // 1MB aligned. Start of the body is aligned so it can accommodate | 401 // 1MB aligned. Start of the body is aligned so it can accommodate |
294 // any heap object. | 402 // any heap object. |
295 class MemoryChunk { | 403 class MemoryChunk { |
296 public: | 404 public: |
297 enum MemoryChunkFlags { | 405 enum MemoryChunkFlags { |
298 IS_EXECUTABLE, | 406 IS_EXECUTABLE, |
299 POINTERS_TO_HERE_ARE_INTERESTING, | 407 POINTERS_TO_HERE_ARE_INTERESTING, |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
390 + kPointerSize; // SkipList* skip_list_; | 498 + kPointerSize; // SkipList* skip_list_; |
391 | 499 |
392 static const size_t kMinHeaderSize = | 500 static const size_t kMinHeaderSize = |
393 kWriteBarrierCounterOffset + | 501 kWriteBarrierCounterOffset + |
394 kIntptrSize // intptr_t write_barrier_counter_ | 502 kIntptrSize // intptr_t write_barrier_counter_ |
395 + kPointerSize // AtomicValue high_water_mark_ | 503 + kPointerSize // AtomicValue high_water_mark_ |
396 + kPointerSize // base::Mutex* mutex_ | 504 + kPointerSize // base::Mutex* mutex_ |
397 + kPointerSize // base::AtomicWord concurrent_sweeping_ | 505 + kPointerSize // base::AtomicWord concurrent_sweeping_ |
398 + 2 * kPointerSize // AtomicNumber free-list statistics | 506 + 2 * kPointerSize // AtomicNumber free-list statistics |
399 + kPointerSize // AtomicValue next_chunk_ | 507 + kPointerSize // AtomicValue next_chunk_ |
400 + kPointerSize; // AtomicValue prev_chunk_ | 508 + kPointerSize // AtomicValue prev_chunk_ |
| 509 // FreeListCategory categories_[kNumberOfCategories] |
| 510 + FreeListCategory::kSize * kNumberOfCategories; |
401 | 511 |
402 // We add some more space to the computed header size to amount for missing | 512 // We add some more space to the computed header size to amount for missing |
403 // alignment requirements in our computation. | 513 // alignment requirements in our computation. |
404 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 514 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
405 static const size_t kHeaderSize = kMinHeaderSize; | 515 static const size_t kHeaderSize = kMinHeaderSize; |
406 | 516 |
407 static const int kBodyOffset = | 517 static const int kBodyOffset = |
408 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 518 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
409 | 519 |
410 // The start offset of the object area in a page. Aligned to both maps and | 520 // The start offset of the object area in a page. Aligned to both maps and |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
570 | 680 |
571 bool IsEvacuationCandidate() { | 681 bool IsEvacuationCandidate() { |
572 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); | 682 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); |
573 return IsFlagSet(EVACUATION_CANDIDATE); | 683 return IsFlagSet(EVACUATION_CANDIDATE); |
574 } | 684 } |
575 | 685 |
576 bool CanAllocate() { | 686 bool CanAllocate() { |
577 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); | 687 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
578 } | 688 } |
579 | 689 |
580 void MarkEvacuationCandidate() { | |
581 DCHECK(!IsFlagSet(NEVER_EVACUATE)); | |
582 DCHECK_NULL(old_to_old_slots_); | |
583 DCHECK_NULL(typed_old_to_old_slots_); | |
584 SetFlag(EVACUATION_CANDIDATE); | |
585 } | |
586 | |
587 void ClearEvacuationCandidate() { | |
588 DCHECK_NULL(old_to_old_slots_); | |
589 DCHECK_NULL(typed_old_to_old_slots_); | |
590 ClearFlag(EVACUATION_CANDIDATE); | |
591 } | |
592 | |
593 bool ShouldSkipEvacuationSlotRecording() { | 690 bool ShouldSkipEvacuationSlotRecording() { |
594 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; | 691 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
595 } | 692 } |
596 | 693 |
597 Executability executable() { | 694 Executability executable() { |
598 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 695 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
599 } | 696 } |
600 | 697 |
601 bool InNewSpace() { | 698 bool InNewSpace() { |
602 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; | 699 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
692 | 789 |
693 // PagedSpace free-list statistics. | 790 // PagedSpace free-list statistics. |
694 AtomicNumber<intptr_t> available_in_free_list_; | 791 AtomicNumber<intptr_t> available_in_free_list_; |
695 AtomicNumber<intptr_t> wasted_memory_; | 792 AtomicNumber<intptr_t> wasted_memory_; |
696 | 793 |
697 // next_chunk_ holds a pointer of type MemoryChunk | 794 // next_chunk_ holds a pointer of type MemoryChunk |
698 AtomicValue<MemoryChunk*> next_chunk_; | 795 AtomicValue<MemoryChunk*> next_chunk_; |
699 // prev_chunk_ holds a pointer of type MemoryChunk | 796 // prev_chunk_ holds a pointer of type MemoryChunk |
700 AtomicValue<MemoryChunk*> prev_chunk_; | 797 AtomicValue<MemoryChunk*> prev_chunk_; |
701 | 798 |
| 799 FreeListCategory categories_[kNumberOfCategories]; |
| 800 |
702 private: | 801 private: |
703 void InitializeReservedMemory() { reservation_.Reset(); } | 802 void InitializeReservedMemory() { reservation_.Reset(); } |
704 | 803 |
705 friend class MemoryAllocator; | 804 friend class MemoryAllocator; |
706 friend class MemoryChunkValidator; | 805 friend class MemoryChunkValidator; |
707 }; | 806 }; |
708 | 807 |
709 enum FreeListCategoryType { | |
710 kTiniest, | |
711 kTiny, | |
712 kSmall, | |
713 kMedium, | |
714 kLarge, | |
715 kHuge, | |
716 | |
717 kFirstCategory = kTiniest, | |
718 kLastCategory = kHuge, | |
719 kNumberOfCategories = kLastCategory + 1 | |
720 }; | |
721 | |
722 // ----------------------------------------------------------------------------- | 808 // ----------------------------------------------------------------------------- |
723 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 809 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
724 // | 810 // |
725 // The only way to get a page pointer is by calling factory methods: | 811 // The only way to get a page pointer is by calling factory methods: |
726 // Page* p = Page::FromAddress(addr); or | 812 // Page* p = Page::FromAddress(addr); or |
727 // Page* p = Page::FromAllocationTop(top); | 813 // Page* p = Page::FromAllocationTop(top); |
728 class Page : public MemoryChunk { | 814 class Page : public MemoryChunk { |
729 public: | 815 public: |
730 // Returns the page containing a given address. The address ranges | 816 // Returns the page containing a given address. The address ranges |
731 // from [page_addr .. page_addr + kPageSize[ | 817 // from [page_addr .. page_addr + kPageSize[ |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
815 return concurrent_sweeping_state().Value() == kSweepingDone; | 901 return concurrent_sweeping_state().Value() == kSweepingDone; |
816 } | 902 } |
817 | 903 |
818 void ResetFreeListStatistics(); | 904 void ResetFreeListStatistics(); |
819 | 905 |
820 int LiveBytesFromFreeList() { | 906 int LiveBytesFromFreeList() { |
821 return static_cast<int>(area_size() - wasted_memory() - | 907 return static_cast<int>(area_size() - wasted_memory() - |
822 available_in_free_list()); | 908 available_in_free_list()); |
823 } | 909 } |
824 | 910 |
| 911 template <typename Callback> |
| 912 inline void ForAllFreeListCategories(Callback callback) { |
| 913 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| 914 callback(&categories_[i]); |
| 915 } |
| 916 } |
| 917 |
| 918 FreeListCategory* free_list_category(FreeListCategoryType type) { |
| 919 return &categories_[type]; |
| 920 } |
| 921 |
825 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ | 922 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
826 type name() { return name##_.Value(); } \ | 923 type name() { return name##_.Value(); } \ |
827 void set_##name(type name) { name##_.SetValue(name); } \ | 924 void set_##name(type name) { name##_.SetValue(name); } \ |
828 void add_##name(type name) { name##_.Increment(name); } | 925 void add_##name(type name) { name##_.Increment(name); } |
829 | 926 |
830 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory) | 927 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory) |
831 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list) | 928 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list) |
832 | 929 |
833 #undef FRAGMENTATION_STATS_ACCESSORS | 930 #undef FRAGMENTATION_STATS_ACCESSORS |
834 | 931 |
835 #ifdef DEBUG | 932 #ifdef DEBUG |
836 void Print(); | 933 void Print(); |
837 #endif // DEBUG | 934 #endif // DEBUG |
838 | 935 |
| 936 inline void MarkNeverAllocateForTesting(); |
| 937 inline void MarkEvacuationCandidate(); |
| 938 inline void ClearEvacuationCandidate(); |
| 939 |
| 940 private: |
| 941 inline void InitializeFreeListCategories(); |
| 942 |
839 friend class MemoryAllocator; | 943 friend class MemoryAllocator; |
840 }; | 944 }; |
841 | 945 |
842 | 946 |
843 class LargePage : public MemoryChunk { | 947 class LargePage : public MemoryChunk { |
844 public: | 948 public: |
845 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } | 949 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
846 | 950 |
847 inline LargePage* next_page() { | 951 inline LargePage* next_page() { |
848 return static_cast<LargePage*>(next_chunk()); | 952 return static_cast<LargePage*>(next_chunk()); |
(...skipping 619 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1468 | 1572 |
1469 // Zero out all the allocation statistics (i.e., no capacity). | 1573 // Zero out all the allocation statistics (i.e., no capacity). |
1470 void Clear() { | 1574 void Clear() { |
1471 capacity_ = 0; | 1575 capacity_ = 0; |
1472 max_capacity_ = 0; | 1576 max_capacity_ = 0; |
1473 size_ = 0; | 1577 size_ = 0; |
1474 } | 1578 } |
1475 | 1579 |
1476 void ClearSize() { size_ = capacity_; } | 1580 void ClearSize() { size_ = capacity_; } |
1477 | 1581 |
1478 // Reset the allocation statistics (i.e., available = capacity with no wasted | |
1479 // or allocated bytes). | |
1480 void Reset() { | |
1481 size_ = 0; | |
1482 } | |
1483 | |
1484 // Accessors for the allocation statistics. | 1582 // Accessors for the allocation statistics. |
1485 intptr_t Capacity() { return capacity_; } | 1583 intptr_t Capacity() { return capacity_; } |
1486 intptr_t MaxCapacity() { return max_capacity_; } | 1584 intptr_t MaxCapacity() { return max_capacity_; } |
1487 intptr_t Size() { | 1585 intptr_t Size() { |
1488 CHECK_GE(size_, 0); | 1586 CHECK_GE(size_, 0); |
1489 return size_; | 1587 return size_; |
1490 } | 1588 } |
1491 | 1589 |
1492 // Grow the space by adding available bytes. They are initially marked as | 1590 // Grow the space by adding available bytes. They are initially marked as |
1493 // being in use (part of the size), but will normally be immediately freed, | 1591 // being in use (part of the size), but will normally be immediately freed, |
1494 // putting them on the free list and removing them from size_. | 1592 // putting them on the free list and removing them from size_. |
1495 void ExpandSpace(int size_in_bytes) { | 1593 void ExpandSpace(int size_in_bytes) { |
1496 capacity_ += size_in_bytes; | 1594 capacity_ += size_in_bytes; |
1497 size_ += size_in_bytes; | 1595 size_ += size_in_bytes; |
1498 if (capacity_ > max_capacity_) { | 1596 if (capacity_ > max_capacity_) { |
1499 max_capacity_ = capacity_; | 1597 max_capacity_ = capacity_; |
1500 } | 1598 } |
1501 CHECK(size_ >= 0); | 1599 CHECK(size_ >= 0); |
1502 } | 1600 } |
1503 | 1601 |
1504 // Shrink the space by removing available bytes. Since shrinking is done | 1602 // Shrink the space by removing available bytes. Since shrinking is done |
1505 // during sweeping, bytes have been marked as being in use (part of the size) | 1603 // during sweeping, bytes have been marked as being in use (part of the size) |
1506 // and are hereby freed. | 1604 // and are hereby freed. |
1507 void ShrinkSpace(int size_in_bytes) { | 1605 void ShrinkSpace(int size_in_bytes) { |
1508 capacity_ -= size_in_bytes; | 1606 capacity_ -= size_in_bytes; |
1509 size_ -= size_in_bytes; | 1607 size_ -= size_in_bytes; |
1510 CHECK(size_ >= 0); | 1608 CHECK_GE(size_, 0); |
1511 } | 1609 } |
1512 | 1610 |
1513 // Allocate from available bytes (available -> size). | 1611 // Allocate from available bytes (available -> size). |
1514 void AllocateBytes(intptr_t size_in_bytes) { | 1612 void AllocateBytes(intptr_t size_in_bytes) { |
1515 size_ += size_in_bytes; | 1613 size_ += size_in_bytes; |
1516 CHECK(size_ >= 0); | 1614 CHECK_GE(size_, 0); |
1517 } | 1615 } |
1518 | 1616 |
1519 // Free allocated bytes, making them available (size -> available). | 1617 // Free allocated bytes, making them available (size -> available). |
1520 void DeallocateBytes(intptr_t size_in_bytes) { | 1618 void DeallocateBytes(intptr_t size_in_bytes) { |
1521 size_ -= size_in_bytes; | 1619 size_ -= size_in_bytes; |
1522 CHECK_GE(size_, 0); | 1620 CHECK_GE(size_, 0); |
1523 } | 1621 } |
1524 | 1622 |
1525 // Merge {other} into {this}. | 1623 // Merge {other} into {this}. |
1526 void Merge(const AllocationStats& other) { | 1624 void Merge(const AllocationStats& other) { |
(...skipping 18 matching lines...) Expand all Loading... |
1545 // bookkeeping structures) currently in the space. | 1643 // bookkeeping structures) currently in the space. |
1546 intptr_t capacity_; | 1644 intptr_t capacity_; |
1547 | 1645 |
1548 // |max_capacity_|: The maximum capacity ever observed. | 1646 // |max_capacity_|: The maximum capacity ever observed. |
1549 intptr_t max_capacity_; | 1647 intptr_t max_capacity_; |
1550 | 1648 |
1551 // |size_|: The number of allocated bytes. | 1649 // |size_|: The number of allocated bytes. |
1552 intptr_t size_; | 1650 intptr_t size_; |
1553 }; | 1651 }; |
1554 | 1652 |
1555 | |
1556 // A free list category maintains a linked list of free memory blocks. | |
1557 class FreeListCategory { | |
1558 public: | |
1559 FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {} | |
1560 | |
1561 void Initialize(FreeList* owner, FreeListCategoryType type) { | |
1562 owner_ = owner; | |
1563 type_ = type; | |
1564 } | |
1565 | |
1566 // Concatenates {category} into {this}. | |
1567 // | |
1568 // Note: Thread-safe. | |
1569 intptr_t Concatenate(FreeListCategory* category); | |
1570 | |
1571 void Reset(); | |
1572 | |
1573 void Free(FreeSpace* node, int size_in_bytes); | |
1574 | |
1575 // Pick a node from the list. | |
1576 FreeSpace* PickNodeFromList(int* node_size); | |
1577 | |
1578 // Pick a node from the list and compare it against {size_in_bytes}. If the | |
1579 // node's size is greater or equal return the node and null otherwise. | |
1580 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size); | |
1581 | |
1582 // Search for a node of size {size_in_bytes}. | |
1583 FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size); | |
1584 | |
1585 intptr_t EvictFreeListItemsInList(Page* p); | |
1586 bool ContainsPageFreeListItemsInList(Page* p); | |
1587 | |
1588 void RepairFreeList(Heap* heap); | |
1589 | |
1590 bool IsEmpty() { return top() == nullptr; } | |
1591 | |
1592 FreeList* owner() { return owner_; } | |
1593 int available() const { return available_; } | |
1594 | |
1595 #ifdef DEBUG | |
1596 intptr_t SumFreeList(); | |
1597 int FreeListLength(); | |
1598 bool IsVeryLong(); | |
1599 #endif | |
1600 | |
1601 private: | |
1602 // For debug builds we accurately compute free lists lengths up until | |
1603 // {kVeryLongFreeList} by manually walking the list. | |
1604 static const int kVeryLongFreeList = 500; | |
1605 | |
1606 FreeSpace* top() { return top_.Value(); } | |
1607 void set_top(FreeSpace* top) { top_.SetValue(top); } | |
1608 | |
1609 FreeSpace* end() const { return end_; } | |
1610 void set_end(FreeSpace* end) { end_ = end; } | |
1611 | |
1612 // |type_|: The type of this free list category. | |
1613 FreeListCategoryType type_; | |
1614 | |
1615 // |top_|: Points to the top FreeSpace* in the free list category. | |
1616 AtomicValue<FreeSpace*> top_; | |
1617 | |
1618 // |end_|: Points to the end FreeSpace* in the free list category. | |
1619 FreeSpace* end_; | |
1620 | |
1621 // |available_|: Total available bytes in all blocks of this free list | |
1622 // category. | |
1623 int available_; | |
1624 | |
1625 // |owner_|: The owning free list of this category. | |
1626 FreeList* owner_; | |
1627 }; | |
1628 | |
1629 // A free list maintaining free blocks of memory. The free list is organized in | 1653 // A free list maintaining free blocks of memory. The free list is organized in |
1630 // a way to encourage objects allocated around the same time to be near each | 1654 // a way to encourage objects allocated around the same time to be near each |
1631 // other. The normal way to allocate is intended to be by bumping a 'top' | 1655 // other. The normal way to allocate is intended to be by bumping a 'top' |
1632 // pointer until it hits a 'limit' pointer. When the limit is hit we need to | 1656 // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
1633 // find a new space to allocate from. This is done with the free list, which is | 1657 // find a new space to allocate from. This is done with the free list, which is |
1634 // divided up into rough categories to cut down on waste. Having finer | 1658 // divided up into rough categories to cut down on waste. Having finer |
1635 // categories would scatter allocation more. | 1659 // categories would scatter allocation more. |
1636 | 1660 |
1637 // The free list is organized in categories as follows: | 1661 // The free list is organized in categories as follows: |
1638 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for | 1662 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for |
(...skipping 24 matching lines...) Expand all Loading... |
1663 } else if (maximum_freed <= kMediumListMax) { | 1687 } else if (maximum_freed <= kMediumListMax) { |
1664 return kMediumAllocationMax; | 1688 return kMediumAllocationMax; |
1665 } else if (maximum_freed <= kLargeListMax) { | 1689 } else if (maximum_freed <= kLargeListMax) { |
1666 return kLargeAllocationMax; | 1690 return kLargeAllocationMax; |
1667 } | 1691 } |
1668 return maximum_freed; | 1692 return maximum_freed; |
1669 } | 1693 } |
1670 | 1694 |
1671 explicit FreeList(PagedSpace* owner); | 1695 explicit FreeList(PagedSpace* owner); |
1672 | 1696 |
1673 // The method concatenates {other} into {this} and returns the added bytes, | |
1674 // including waste. | |
1675 // | |
1676 // Note: Thread-safe. | |
1677 intptr_t Concatenate(FreeList* other); | |
1678 | |
1679 // Adds a node on the free list. The block of size {size_in_bytes} starting | 1697 // Adds a node on the free list. The block of size {size_in_bytes} starting |
1680 // at {start} is placed on the free list. The return value is the number of | 1698 // at {start} is placed on the free list. The return value is the number of |
1681 // bytes that were not added to the free list, because they freed memory block | 1699 // bytes that were not added to the free list, because they freed memory block |
1682 // was too small. Bookkeeping information will be written to the block, i.e., | 1700 // was too small. Bookkeeping information will be written to the block, i.e., |
1683 // its contents will be destroyed. The start address should be word aligned, | 1701 // its contents will be destroyed. The start address should be word aligned, |
1684 // and the size should be a non-zero multiple of the word size. | 1702 // and the size should be a non-zero multiple of the word size. |
1685 int Free(Address start, int size_in_bytes); | 1703 int Free(Address start, int size_in_bytes, FreeMode mode); |
1686 | 1704 |
1687 // Allocate a block of size {size_in_bytes} from the free list. The block is | 1705 // Allocate a block of size {size_in_bytes} from the free list. The block is |
1688 // unitialized. A failure is returned if no block is available. The size | 1706 // unitialized. A failure is returned if no block is available. The size |
1689 // should be a non-zero multiple of the word size. | 1707 // should be a non-zero multiple of the word size. |
1690 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); | 1708 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); |
1691 | 1709 |
1692 // Clear the free list. | 1710 // Clear the free list. |
1693 void Reset(); | 1711 void Reset(); |
1694 | 1712 |
1695 void ResetStats() { wasted_bytes_ = 0; } | 1713 void ResetStats() { |
| 1714 wasted_bytes_.SetValue(0); |
| 1715 ForAllFreeListCategories( |
| 1716 [](FreeListCategory* category) { category->ResetStats(); }); |
| 1717 } |
1696 | 1718 |
1697 // Return the number of bytes available on the free list. | 1719 // Return the number of bytes available on the free list. |
1698 intptr_t Available() { | 1720 intptr_t Available() { |
1699 intptr_t available = 0; | 1721 intptr_t available = 0; |
1700 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { | 1722 ForAllFreeListCategories([&available](FreeListCategory* category) { |
1701 available += category_[i].available(); | 1723 available += category->available(); |
1702 } | 1724 }); |
1703 return available; | 1725 return available; |
1704 } | 1726 } |
1705 | 1727 |
1706 // The method tries to find a {FreeSpace} node of at least {size_in_bytes} | |
1707 // size in the free list category exactly matching the size. If no suitable | |
1708 // node could be found, the method falls back to retrieving a {FreeSpace} | |
1709 // from the large or huge free list category. | |
1710 // | |
1711 // Can be used concurrently. | |
1712 MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes); | |
1713 | |
1714 bool IsEmpty() { | 1728 bool IsEmpty() { |
1715 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { | 1729 bool empty = true; |
1716 if (!category_[i].IsEmpty()) return false; | 1730 ForAllFreeListCategories([&empty](FreeListCategory* category) { |
1717 } | 1731 if (!category->is_empty()) empty = false; |
1718 return true; | 1732 }); |
| 1733 return empty; |
1719 } | 1734 } |
1720 | 1735 |
1721 // Used after booting the VM. | 1736 // Used after booting the VM. |
1722 void RepairLists(Heap* heap); | 1737 void RepairLists(Heap* heap); |
1723 | 1738 |
1724 intptr_t EvictFreeListItems(Page* p); | 1739 intptr_t EvictFreeListItems(Page* page); |
1725 bool ContainsPageFreeListItems(Page* p); | 1740 bool ContainsPageFreeListItems(Page* page); |
1726 | 1741 |
1727 PagedSpace* owner() { return owner_; } | 1742 PagedSpace* owner() { return owner_; } |
1728 intptr_t wasted_bytes() { return wasted_bytes_; } | 1743 intptr_t wasted_bytes() { return wasted_bytes_.Value(); } |
1729 base::Mutex* mutex() { return &mutex_; } | 1744 |
| 1745 template <typename Callback> |
| 1746 void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) { |
| 1747 FreeListCategory* current = categories_[type]; |
| 1748 while (current != nullptr) { |
| 1749 FreeListCategory* next = current->next(); |
| 1750 callback(current); |
| 1751 current = next; |
| 1752 } |
| 1753 } |
| 1754 |
| 1755 template <typename Callback> |
| 1756 void ForAllFreeListCategories(Callback callback) { |
| 1757 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| 1758 ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback); |
| 1759 } |
| 1760 } |
| 1761 |
| 1762 bool AddCategory(FreeListCategory* category); |
| 1763 void RemoveCategory(FreeListCategory* category); |
| 1764 void PrintCategories(FreeListCategoryType type); |
1730 | 1765 |
1731 #ifdef DEBUG | 1766 #ifdef DEBUG |
1732 void Zap(); | |
1733 intptr_t SumFreeLists(); | 1767 intptr_t SumFreeLists(); |
1734 bool IsVeryLong(); | 1768 bool IsVeryLong(); |
1735 #endif | 1769 #endif |
1736 | 1770 |
1737 private: | 1771 private: |
| 1772 class FreeListCategoryIterator { |
| 1773 public: |
| 1774 FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type) |
| 1775 : current_(free_list->categories_[type]) {} |
| 1776 |
| 1777 bool HasNext() { return current_ != nullptr; } |
| 1778 |
| 1779 FreeListCategory* Next() { |
| 1780 DCHECK(HasNext()); |
| 1781 FreeListCategory* tmp = current_; |
| 1782 current_ = current_->next(); |
| 1783 return tmp; |
| 1784 } |
| 1785 |
| 1786 private: |
| 1787 FreeListCategory* current_; |
| 1788 }; |
| 1789 |
1738 // The size range of blocks, in bytes. | 1790 // The size range of blocks, in bytes. |
1739 static const int kMinBlockSize = 3 * kPointerSize; | 1791 static const int kMinBlockSize = 3 * kPointerSize; |
1740 static const int kMaxBlockSize = Page::kAllocatableMemory; | 1792 static const int kMaxBlockSize = Page::kAllocatableMemory; |
1741 | 1793 |
1742 static const int kTiniestListMax = 0xa * kPointerSize; | 1794 static const int kTiniestListMax = 0xa * kPointerSize; |
1743 static const int kTinyListMax = 0x1f * kPointerSize; | 1795 static const int kTinyListMax = 0x1f * kPointerSize; |
1744 static const int kSmallListMax = 0xff * kPointerSize; | 1796 static const int kSmallListMax = 0xff * kPointerSize; |
1745 static const int kMediumListMax = 0x7ff * kPointerSize; | 1797 static const int kMediumListMax = 0x7ff * kPointerSize; |
1746 static const int kLargeListMax = 0x3fff * kPointerSize; | 1798 static const int kLargeListMax = 0x3fff * kPointerSize; |
1747 static const int kTinyAllocationMax = kTiniestListMax; | 1799 static const int kTinyAllocationMax = kTiniestListMax; |
1748 static const int kSmallAllocationMax = kTinyListMax; | 1800 static const int kSmallAllocationMax = kTinyListMax; |
1749 static const int kMediumAllocationMax = kSmallListMax; | 1801 static const int kMediumAllocationMax = kSmallListMax; |
1750 static const int kLargeAllocationMax = kMediumListMax; | 1802 static const int kLargeAllocationMax = kMediumListMax; |
1751 | 1803 |
1752 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); | 1804 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); |
1753 FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size); | |
1754 | 1805 |
1755 FreeListCategory* GetFreeListCategory(FreeListCategoryType category) { | 1806 // Walks all available categories for a given |type| and tries to retrieve |
1756 return &category_[category]; | 1807 // a node. Returns nullptr if the category is empty. |
1757 } | 1808 FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size); |
| 1809 |
| 1810 // Tries to retrieve a node from the first category in a given |type|. |
| 1811 // Returns nullptr if the category is empty. |
| 1812 FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size, |
| 1813 int minimum_size); |
| 1814 |
| 1815 // Searches a given |type| for a node of at least |minimum_size|. |
| 1816 FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size, |
| 1817 int minimum_size); |
1758 | 1818 |
1759 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) { | 1819 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) { |
1760 if (size_in_bytes <= kTiniestListMax) { | 1820 if (size_in_bytes <= kTiniestListMax) { |
1761 return kTiniest; | 1821 return kTiniest; |
1762 } else if (size_in_bytes <= kTinyListMax) { | 1822 } else if (size_in_bytes <= kTinyListMax) { |
1763 return kTiny; | 1823 return kTiny; |
1764 } else if (size_in_bytes <= kSmallListMax) { | 1824 } else if (size_in_bytes <= kSmallListMax) { |
1765 return kSmall; | 1825 return kSmall; |
1766 } else if (size_in_bytes <= kMediumListMax) { | 1826 } else if (size_in_bytes <= kMediumListMax) { |
1767 return kMedium; | 1827 return kMedium; |
1768 } else if (size_in_bytes <= kLargeListMax) { | 1828 } else if (size_in_bytes <= kLargeListMax) { |
1769 return kLarge; | 1829 return kLarge; |
1770 } | 1830 } |
1771 return kHuge; | 1831 return kHuge; |
1772 } | 1832 } |
1773 | 1833 |
1774 // The tiny categories are not used for fast allocation. | 1834 // The tiny categories are not used for fast allocation. |
1775 FreeListCategoryType SelectFastAllocationFreeListCategoryType( | 1835 FreeListCategoryType SelectFastAllocationFreeListCategoryType( |
1776 size_t size_in_bytes) { | 1836 size_t size_in_bytes) { |
1777 if (size_in_bytes <= kSmallAllocationMax) { | 1837 if (size_in_bytes <= kSmallAllocationMax) { |
1778 return kSmall; | 1838 return kSmall; |
1779 } else if (size_in_bytes <= kMediumAllocationMax) { | 1839 } else if (size_in_bytes <= kMediumAllocationMax) { |
1780 return kMedium; | 1840 return kMedium; |
1781 } else if (size_in_bytes <= kLargeAllocationMax) { | 1841 } else if (size_in_bytes <= kLargeAllocationMax) { |
1782 return kLarge; | 1842 return kLarge; |
1783 } | 1843 } |
1784 return kHuge; | 1844 return kHuge; |
1785 } | 1845 } |
1786 | 1846 |
| 1847 FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; } |
| 1848 |
1787 PagedSpace* owner_; | 1849 PagedSpace* owner_; |
1788 base::Mutex mutex_; | 1850 AtomicNumber<intptr_t> wasted_bytes_; |
1789 intptr_t wasted_bytes_; | 1851 FreeListCategory* categories_[kNumberOfCategories]; |
1790 FreeListCategory category_[kNumberOfCategories]; | 1852 |
| 1853 friend class FreeListCategory; |
1791 | 1854 |
1792 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); | 1855 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); |
1793 }; | 1856 }; |
1794 | 1857 |
1795 | 1858 |
1796 class AllocationResult { | 1859 class AllocationResult { |
1797 public: | 1860 public: |
1798 // Implicit constructor from Object*. | 1861 // Implicit constructor from Object*. |
1799 AllocationResult(Object* object) // NOLINT | 1862 AllocationResult(Object* object) // NOLINT |
1800 : object_(object) { | 1863 : object_(object) { |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1882 | 1945 |
1883 private: | 1946 private: |
1884 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info); | 1947 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info); |
1885 | 1948 |
1886 void Close(); | 1949 void Close(); |
1887 | 1950 |
1888 Heap* heap_; | 1951 Heap* heap_; |
1889 AllocationInfo allocation_info_; | 1952 AllocationInfo allocation_info_; |
1890 }; | 1953 }; |
1891 | 1954 |
1892 | |
1893 class PagedSpace : public Space { | 1955 class PagedSpace : public Space { |
1894 public: | 1956 public: |
1895 static const intptr_t kCompactionMemoryWanted = 500 * KB; | 1957 static const intptr_t kCompactionMemoryWanted = 500 * KB; |
1896 | 1958 |
1897 // Creates a space with an id. | 1959 // Creates a space with an id. |
1898 PagedSpace(Heap* heap, AllocationSpace id, Executability executable); | 1960 PagedSpace(Heap* heap, AllocationSpace id, Executability executable); |
1899 | 1961 |
1900 ~PagedSpace() override { TearDown(); } | 1962 ~PagedSpace() override { TearDown(); } |
1901 | 1963 |
1902 // Set up the space using the given address range of virtual memory (from | 1964 // Set up the space using the given address range of virtual memory (from |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1939 // The stats are rebuilt during sweeping by adding each page to the | 2001 // The stats are rebuilt during sweeping by adding each page to the |
1940 // capacity and the size when it is encountered. As free spaces are | 2002 // capacity and the size when it is encountered. As free spaces are |
1941 // discovered during the sweeping they are subtracted from the size and added | 2003 // discovered during the sweeping they are subtracted from the size and added |
1942 // to the available and wasted totals. | 2004 // to the available and wasted totals. |
1943 void ClearStats() { | 2005 void ClearStats() { |
1944 accounting_stats_.ClearSize(); | 2006 accounting_stats_.ClearSize(); |
1945 free_list_.ResetStats(); | 2007 free_list_.ResetStats(); |
1946 ResetFreeListStatistics(); | 2008 ResetFreeListStatistics(); |
1947 } | 2009 } |
1948 | 2010 |
1949 // Increases the number of available bytes of that space. | |
1950 void AddToAccountingStats(intptr_t bytes) { | |
1951 accounting_stats_.DeallocateBytes(bytes); | |
1952 } | |
1953 | |
1954 // Available bytes without growing. These are the bytes on the free list. | 2011 // Available bytes without growing. These are the bytes on the free list. |
1955 // The bytes in the linear allocation area are not included in this total | 2012 // The bytes in the linear allocation area are not included in this total |
1956 // because updating the stats would slow down allocation. New pages are | 2013 // because updating the stats would slow down allocation. New pages are |
1957 // immediately added to the free list so they show up here. | 2014 // immediately added to the free list so they show up here. |
1958 intptr_t Available() override { return free_list_.Available(); } | 2015 intptr_t Available() override { return free_list_.Available(); } |
1959 | 2016 |
1960 // Allocated bytes in this space. Garbage bytes that were not found due to | 2017 // Allocated bytes in this space. Garbage bytes that were not found due to |
1961 // concurrent sweeping are counted as being allocated! The bytes in the | 2018 // concurrent sweeping are counted as being allocated! The bytes in the |
1962 // current linear allocation area (between top and limit) are also counted | 2019 // current linear allocation area (between top and limit) are also counted |
1963 // here. | 2020 // here. |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1999 // Allocate the requested number of bytes in the space and consider allocation | 2056 // Allocate the requested number of bytes in the space and consider allocation |
2000 // alignment if needed. | 2057 // alignment if needed. |
2001 MUST_USE_RESULT inline AllocationResult AllocateRaw( | 2058 MUST_USE_RESULT inline AllocationResult AllocateRaw( |
2002 int size_in_bytes, AllocationAlignment alignment); | 2059 int size_in_bytes, AllocationAlignment alignment); |
2003 | 2060 |
2004 // Give a block of memory to the space's free list. It might be added to | 2061 // Give a block of memory to the space's free list. It might be added to |
2005 // the free list or accounted as waste. | 2062 // the free list or accounted as waste. |
2006 // If add_to_freelist is false then just accounting stats are updated and | 2063 // If add_to_freelist is false then just accounting stats are updated and |
2007 // no attempt to add area to free list is made. | 2064 // no attempt to add area to free list is made. |
2008 int Free(Address start, int size_in_bytes) { | 2065 int Free(Address start, int size_in_bytes) { |
2009 int wasted = free_list_.Free(start, size_in_bytes); | 2066 int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory); |
2010 accounting_stats_.DeallocateBytes(size_in_bytes); | 2067 accounting_stats_.DeallocateBytes(size_in_bytes); |
2011 return size_in_bytes - wasted; | 2068 return size_in_bytes - wasted; |
2012 } | 2069 } |
2013 | 2070 |
| 2071 int UnaccountedFree(Address start, int size_in_bytes) { |
| 2072 int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory); |
| 2073 return size_in_bytes - wasted; |
| 2074 } |
| 2075 |
2014 void ResetFreeList() { free_list_.Reset(); } | 2076 void ResetFreeList() { free_list_.Reset(); } |
2015 | 2077 |
2016 // Set space allocation info. | 2078 // Set space allocation info. |
2017 void SetTopAndLimit(Address top, Address limit) { | 2079 void SetTopAndLimit(Address top, Address limit) { |
2018 DCHECK(top == limit || | 2080 DCHECK(top == limit || |
2019 Page::FromAddress(top) == Page::FromAddress(limit - 1)); | 2081 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
2020 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 2082 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
2021 allocation_info_.Reset(top, limit); | 2083 allocation_info_.Reset(top, limit); |
2022 } | 2084 } |
2023 | 2085 |
2024 // Empty space allocation info, returning unused area to free list. | 2086 // Empty space allocation info, returning unused area to free list. |
2025 void EmptyAllocationInfo() { | 2087 void EmptyAllocationInfo() { |
2026 // Mark the old linear allocation area with a free space map so it can be | 2088 // Mark the old linear allocation area with a free space map so it can be |
2027 // skipped when scanning the heap. | 2089 // skipped when scanning the heap. |
2028 int old_linear_size = static_cast<int>(limit() - top()); | 2090 int old_linear_size = static_cast<int>(limit() - top()); |
2029 Free(top(), old_linear_size); | 2091 Free(top(), old_linear_size); |
2030 SetTopAndLimit(NULL, NULL); | 2092 SetTopAndLimit(NULL, NULL); |
2031 } | 2093 } |
2032 | 2094 |
2033 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } | 2095 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } |
2034 | 2096 |
2035 void IncreaseCapacity(int size); | 2097 void IncreaseCapacity(int size); |
2036 | 2098 |
2037 // Releases an unused page and shrinks the space. | 2099 // Releases an unused page and shrinks the space. |
2038 void ReleasePage(Page* page, bool evict_free_list_items); | 2100 void ReleasePage(Page* page); |
2039 | 2101 |
2040 // The dummy page that anchors the linked list of pages. | 2102 // The dummy page that anchors the linked list of pages. |
2041 Page* anchor() { return &anchor_; } | 2103 Page* anchor() { return &anchor_; } |
2042 | 2104 |
2043 #ifdef VERIFY_HEAP | 2105 #ifdef VERIFY_HEAP |
2044 // Verify integrity of this space. | 2106 // Verify integrity of this space. |
2045 virtual void Verify(ObjectVisitor* visitor); | 2107 virtual void Verify(ObjectVisitor* visitor); |
2046 | 2108 |
2047 // Overridden by subclasses to verify space-specific object | 2109 // Overridden by subclasses to verify space-specific object |
2048 // properties (e.g., only maps or free-list nodes are in map space). | 2110 // properties (e.g., only maps or free-list nodes are in map space). |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2084 virtual bool is_local() { return false; } | 2146 virtual bool is_local() { return false; } |
2085 | 2147 |
2086 // Merges {other} into the current space. Note that this modifies {other}, | 2148 // Merges {other} into the current space. Note that this modifies {other}, |
2087 // e.g., removes its bump pointer area and resets statistics. | 2149 // e.g., removes its bump pointer area and resets statistics. |
2088 void MergeCompactionSpace(CompactionSpace* other); | 2150 void MergeCompactionSpace(CompactionSpace* other); |
2089 | 2151 |
2090 // Refills the free list from the corresponding free list filled by the | 2152 // Refills the free list from the corresponding free list filled by the |
2091 // sweeper. | 2153 // sweeper. |
2092 virtual void RefillFreeList(); | 2154 virtual void RefillFreeList(); |
2093 | 2155 |
| 2156 FreeList* free_list() { return &free_list_; } |
| 2157 |
| 2158 base::Mutex* mutex() { return &space_mutex_; } |
| 2159 |
| 2160 inline void UnlinkFreeListCategories(Page* page); |
| 2161 inline intptr_t RelinkFreeListCategories(Page* page); |
| 2162 |
2094 protected: | 2163 protected: |
2095 void AddMemory(Address start, intptr_t size); | |
2096 | |
2097 void MoveOverFreeMemory(PagedSpace* other); | |
2098 | |
2099 // PagedSpaces that should be included in snapshots have different, i.e., | 2164 // PagedSpaces that should be included in snapshots have different, i.e., |
2100 // smaller, initial pages. | 2165 // smaller, initial pages. |
2101 virtual bool snapshotable() { return true; } | 2166 virtual bool snapshotable() { return true; } |
2102 | 2167 |
2103 FreeList* free_list() { return &free_list_; } | |
2104 | |
2105 bool HasPages() { return anchor_.next_page() != &anchor_; } | 2168 bool HasPages() { return anchor_.next_page() != &anchor_; } |
2106 | 2169 |
2107 // Cleans up the space, frees all pages in this space except those belonging | 2170 // Cleans up the space, frees all pages in this space except those belonging |
2108 // to the initial chunk, uncommits addresses in the initial chunk. | 2171 // to the initial chunk, uncommits addresses in the initial chunk. |
2109 void TearDown(); | 2172 void TearDown(); |
2110 | 2173 |
2111 // Expands the space by allocating a fixed number of pages. Returns false if | 2174 // Expands the space by allocating a fixed number of pages. Returns false if |
2112 // it cannot allocate requested number of pages from OS, or if the hard heap | 2175 // it cannot allocate requested number of pages from OS, or if the hard heap |
2113 // size limit has been hit. | 2176 // size limit has been hit. |
2114 bool Expand(); | 2177 bool Expand(); |
(...skipping 677 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2792 // ----------------------------------------------------------------------------- | 2855 // ----------------------------------------------------------------------------- |
2793 // Compaction space that is used temporarily during compaction. | 2856 // Compaction space that is used temporarily during compaction. |
2794 | 2857 |
2795 class CompactionSpace : public PagedSpace { | 2858 class CompactionSpace : public PagedSpace { |
2796 public: | 2859 public: |
2797 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) | 2860 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) |
2798 : PagedSpace(heap, id, executable) {} | 2861 : PagedSpace(heap, id, executable) {} |
2799 | 2862 |
2800 bool is_local() override { return true; } | 2863 bool is_local() override { return true; } |
2801 | 2864 |
2802 void RefillFreeList() override; | |
2803 | |
2804 protected: | 2865 protected: |
2805 // The space is temporary and not included in any snapshots. | 2866 // The space is temporary and not included in any snapshots. |
2806 bool snapshotable() override { return false; } | 2867 bool snapshotable() override { return false; } |
2807 | 2868 |
2808 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation( | 2869 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation( |
2809 int size_in_bytes) override; | 2870 int size_in_bytes) override; |
2810 }; | 2871 }; |
2811 | 2872 |
2812 | 2873 |
2813 // A collection of |CompactionSpace|s used by a single compaction task. | 2874 // A collection of |CompactionSpace|s used by a single compaction task. |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3027 count = 0; | 3088 count = 0; |
3028 } | 3089 } |
3029 // Must be small, since an iteration is used for lookup. | 3090 // Must be small, since an iteration is used for lookup. |
3030 static const int kMaxComments = 64; | 3091 static const int kMaxComments = 64; |
3031 }; | 3092 }; |
3032 #endif | 3093 #endif |
3033 } // namespace internal | 3094 } // namespace internal |
3034 } // namespace v8 | 3095 } // namespace v8 |
3035 | 3096 |
3036 #endif // V8_HEAP_SPACES_H_ | 3097 #endif // V8_HEAP_SPACES_H_ |
OLD | NEW |