| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
| 11 #include "src/base/bits.h" | 11 #include "src/base/bits.h" |
| 12 #include "src/base/platform/mutex.h" | 12 #include "src/base/platform/mutex.h" |
| 13 #include "src/flags.h" | 13 #include "src/flags.h" |
| 14 #include "src/hashmap.h" | 14 #include "src/hashmap.h" |
| 15 #include "src/list.h" | 15 #include "src/list.h" |
| 16 #include "src/objects.h" | 16 #include "src/objects.h" |
| 17 #include "src/utils.h" | 17 #include "src/utils.h" |
| 18 | 18 |
| 19 namespace v8 { | 19 namespace v8 { |
| 20 namespace internal { | 20 namespace internal { |
| 21 | 21 |
| 22 class AllocationInfo; | 22 class AllocationInfo; |
| 23 class AllocationObserver; | 23 class AllocationObserver; |
| 24 class CompactionSpace; | 24 class CompactionSpace; |
| 25 class CompactionSpaceCollection; | 25 class CompactionSpaceCollection; |
| 26 class FreeList; | 26 class FreeList; |
| 27 class Isolate; | 27 class Isolate; |
| 28 class MemoryAllocator; | 28 class MemoryAllocator; |
| 29 class MemoryChunk; | 29 class MemoryChunk; |
| 30 class Page; |
| 30 class PagedSpace; | 31 class PagedSpace; |
| 31 class SemiSpace; | 32 class SemiSpace; |
| 32 class SkipList; | 33 class SkipList; |
| 33 class SlotsBuffer; | 34 class SlotsBuffer; |
| 34 class SlotSet; | 35 class SlotSet; |
| 35 class TypedSlotSet; | 36 class TypedSlotSet; |
| 36 class Space; | 37 class Space; |
| 37 | 38 |
| 38 // ----------------------------------------------------------------------------- | 39 // ----------------------------------------------------------------------------- |
| 39 // Heap structures: | 40 // Heap structures: |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 282 // Clear all cells till the cell containing the last index. | 283 // Clear all cells till the cell containing the last index. |
| 283 for (uint32_t i = start_cell_index; i < end_cell_index; i++) { | 284 for (uint32_t i = start_cell_index; i < end_cell_index; i++) { |
| 284 cells()[i] = 0; | 285 cells()[i] = 0; |
| 285 } | 286 } |
| 286 // Clear all bits in the last cell till the last bit before index. | 287 // Clear all bits in the last cell till the last bit before index. |
| 287 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1); | 288 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1); |
| 288 cells()[end_cell_index] &= clear_mask; | 289 cells()[end_cell_index] &= clear_mask; |
| 289 } | 290 } |
| 290 }; | 291 }; |
| 291 | 292 |
| 293 enum FreeListCategoryType { |
| 294 kTiniest, |
| 295 kTiny, |
| 296 kSmall, |
| 297 kMedium, |
| 298 kLarge, |
| 299 kHuge, |
| 300 |
| 301 kFirstCategory = kTiniest, |
| 302 kLastCategory = kHuge, |
| 303 kNumberOfCategories = kLastCategory + 1, |
| 304 kInvalidCategory |
| 305 }; |
| 306 |
| 307 // A free list category maintains a linked list of free memory blocks. |
| 308 class FreeListCategory { |
| 309 public: |
| 310 static const int kSize = kIntSize + // FreeListCategoryType type_ |
| 311 kIntSize + // int available_ |
| 312 kPointerSize + // FreeSpace* top_ |
| 313 kPointerSize + // FreeListCategory* prev_ |
| 314 kPointerSize; // FreeListCategory* next_ |
| 315 |
| 316 FreeListCategory() |
| 317 : type_(kInvalidCategory), |
| 318 available_(0), |
| 319 top_(nullptr), |
| 320 prev_(nullptr), |
| 321 next_(nullptr) {} |
| 322 |
| 323 void Initialize(FreeListCategoryType type) { |
| 324 type_ = type; |
| 325 available_ = 0; |
| 326 top_ = nullptr; |
| 327 prev_ = nullptr; |
| 328 next_ = nullptr; |
| 329 } |
| 330 |
| 331 void Invalidate(); |
| 332 |
| 333 void Reset(); |
| 334 |
| 335 void ResetStats() { Reset(); } |
| 336 |
| 337 void RepairFreeList(Heap* heap); |
| 338 |
| 339 // Relinks the category into the currently owning free list. Requires that the |
| 340 // category is currently unlinked. |
| 341 void Relink(); |
| 342 |
| 343 bool Free(FreeSpace* node, int size_in_bytes, bool keep_local = false); |
| 344 |
| 345 // Picks a node from the list and stores its size in |node_size|. Returns |
| 346 // nullptr if the category is empty. |
| 347 FreeSpace* PickNodeFromList(int* node_size); |
| 348 |
| 349 // Performs a single try to pick a node of at least |minimum_size| from the |
| 350 // category. Stores the actual size in |node_size|. Returns nullptr if no |
| 351 // node is found. |
| 352 FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size); |
| 353 |
| 354 // Picks a node of at least |minimum_size| from the category. Stores the |
| 355 // actual size in |node_size|. Returns nullptr if no node is found. |
| 356 FreeSpace* SearchForNodeInList(int minimum_size, int* node_size); |
| 357 |
| 358 inline FreeList* owner(); |
| 359 inline bool is_linked(); |
| 360 bool is_empty() { return top() == nullptr; } |
| 361 int available() const { return available_; } |
| 362 |
| 363 #ifdef DEBUG |
| 364 intptr_t SumFreeList(); |
| 365 int FreeListLength(); |
| 366 bool IsVeryLong(); |
| 367 #endif |
| 368 |
| 369 private: |
| 370 // For debug builds we accurately compute free lists lengths up until |
| 371 // {kVeryLongFreeList} by manually walking the list. |
| 372 static const int kVeryLongFreeList = 500; |
| 373 |
| 374 inline Page* page(); |
| 375 |
| 376 FreeSpace* top() { return top_; } |
| 377 void set_top(FreeSpace* top) { top_ = top; } |
| 378 FreeListCategory* prev() { return prev_; } |
| 379 void set_prev(FreeListCategory* prev) { prev_ = prev; } |
| 380 FreeListCategory* next() { return next_; } |
| 381 void set_next(FreeListCategory* next) { next_ = next; } |
| 382 |
| 383 // |type_|: The type of this free list category. |
| 384 FreeListCategoryType type_; |
| 385 |
| 386 // |available_|: Total available bytes in all blocks of this free list |
| 387 // category. |
| 388 int available_; |
| 389 |
| 390 // |top_|: Points to the top FreeSpace* in the free list category. |
| 391 FreeSpace* top_; |
| 392 |
| 393 FreeListCategory* prev_; |
| 394 FreeListCategory* next_; |
| 395 |
| 396 friend class FreeList; |
| 397 friend class PagedSpace; |
| 398 }; |
| 292 | 399 |
| 293 // MemoryChunk represents a memory region owned by a specific space. | 400 // MemoryChunk represents a memory region owned by a specific space. |
| 294 // It is divided into the header and the body. Chunk start is always | 401 // It is divided into the header and the body. Chunk start is always |
| 295 // 1MB aligned. Start of the body is aligned so it can accommodate | 402 // 1MB aligned. Start of the body is aligned so it can accommodate |
| 296 // any heap object. | 403 // any heap object. |
| 297 class MemoryChunk { | 404 class MemoryChunk { |
| 298 public: | 405 public: |
| 299 enum MemoryChunkFlags { | 406 enum MemoryChunkFlags { |
| 300 IS_EXECUTABLE, | 407 IS_EXECUTABLE, |
| 301 POINTERS_TO_HERE_ARE_INTERESTING, | 408 POINTERS_TO_HERE_ARE_INTERESTING, |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 404 | 511 |
| 405 static const size_t kMinHeaderSize = | 512 static const size_t kMinHeaderSize = |
| 406 kWriteBarrierCounterOffset + | 513 kWriteBarrierCounterOffset + |
| 407 kIntptrSize // intptr_t write_barrier_counter_ | 514 kIntptrSize // intptr_t write_barrier_counter_ |
| 408 + kPointerSize // AtomicValue high_water_mark_ | 515 + kPointerSize // AtomicValue high_water_mark_ |
| 409 + kPointerSize // base::Mutex* mutex_ | 516 + kPointerSize // base::Mutex* mutex_ |
| 410 + kPointerSize // base::AtomicWord parallel_sweeping_ | 517 + kPointerSize // base::AtomicWord parallel_sweeping_ |
| 411 + kPointerSize // AtomicValue parallel_compaction_ | 518 + kPointerSize // AtomicValue parallel_compaction_ |
| 412 + 2 * kPointerSize // AtomicNumber free-list statistics | 519 + 2 * kPointerSize // AtomicNumber free-list statistics |
| 413 + kPointerSize // AtomicValue next_chunk_ | 520 + kPointerSize // AtomicValue next_chunk_ |
| 414 + kPointerSize; // AtomicValue prev_chunk_ | 521 + kPointerSize // AtomicValue prev_chunk_ |
| 522 // FreeListCategory categories_[kNumberOfCategories] |
| 523 + FreeListCategory::kSize * kNumberOfCategories; |
| 415 | 524 |
| 416 // We add some more space to the computed header size to amount for missing | 525 // We add some more space to the computed header size to amount for missing |
| 417 // alignment requirements in our computation. | 526 // alignment requirements in our computation. |
| 418 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | 527 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
| 419 static const size_t kHeaderSize = kMinHeaderSize; | 528 static const size_t kHeaderSize = kMinHeaderSize; |
| 420 | 529 |
| 421 static const int kBodyOffset = | 530 static const int kBodyOffset = |
| 422 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | 531 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
| 423 | 532 |
| 424 // The start offset of the object area in a page. Aligned to both maps and | 533 // The start offset of the object area in a page. Aligned to both maps and |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 586 | 695 |
| 587 bool IsEvacuationCandidate() { | 696 bool IsEvacuationCandidate() { |
| 588 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); | 697 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); |
| 589 return IsFlagSet(EVACUATION_CANDIDATE); | 698 return IsFlagSet(EVACUATION_CANDIDATE); |
| 590 } | 699 } |
| 591 | 700 |
| 592 bool CanAllocate() { | 701 bool CanAllocate() { |
| 593 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); | 702 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); |
| 594 } | 703 } |
| 595 | 704 |
| 596 void MarkEvacuationCandidate() { | |
| 597 DCHECK(!IsFlagSet(NEVER_EVACUATE)); | |
| 598 DCHECK_NULL(old_to_old_slots_); | |
| 599 DCHECK_NULL(typed_old_to_old_slots_); | |
| 600 SetFlag(EVACUATION_CANDIDATE); | |
| 601 } | |
| 602 | |
| 603 void ClearEvacuationCandidate() { | |
| 604 DCHECK_NULL(old_to_old_slots_); | |
| 605 DCHECK_NULL(typed_old_to_old_slots_); | |
| 606 ClearFlag(EVACUATION_CANDIDATE); | |
| 607 } | |
| 608 | |
| 609 bool ShouldSkipEvacuationSlotRecording() { | 705 bool ShouldSkipEvacuationSlotRecording() { |
| 610 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; | 706 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
| 611 } | 707 } |
| 612 | 708 |
| 613 Executability executable() { | 709 Executability executable() { |
| 614 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 710 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 615 } | 711 } |
| 616 | 712 |
| 617 bool InNewSpace() { | 713 bool InNewSpace() { |
| 618 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; | 714 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 709 | 805 |
| 710 // PagedSpace free-list statistics. | 806 // PagedSpace free-list statistics. |
| 711 AtomicNumber<intptr_t> available_in_free_list_; | 807 AtomicNumber<intptr_t> available_in_free_list_; |
| 712 AtomicNumber<intptr_t> wasted_memory_; | 808 AtomicNumber<intptr_t> wasted_memory_; |
| 713 | 809 |
| 714 // next_chunk_ holds a pointer of type MemoryChunk | 810 // next_chunk_ holds a pointer of type MemoryChunk |
| 715 AtomicValue<MemoryChunk*> next_chunk_; | 811 AtomicValue<MemoryChunk*> next_chunk_; |
| 716 // prev_chunk_ holds a pointer of type MemoryChunk | 812 // prev_chunk_ holds a pointer of type MemoryChunk |
| 717 AtomicValue<MemoryChunk*> prev_chunk_; | 813 AtomicValue<MemoryChunk*> prev_chunk_; |
| 718 | 814 |
| 815 FreeListCategory categories_[kNumberOfCategories]; |
| 816 |
| 719 private: | 817 private: |
| 720 void InitializeReservedMemory() { reservation_.Reset(); } | 818 void InitializeReservedMemory() { reservation_.Reset(); } |
| 721 | 819 |
| 722 friend class MemoryAllocator; | 820 friend class MemoryAllocator; |
| 723 friend class MemoryChunkValidator; | 821 friend class MemoryChunkValidator; |
| 724 }; | 822 }; |
| 725 | 823 |
| 726 enum FreeListCategoryType { | |
| 727 kTiniest, | |
| 728 kTiny, | |
| 729 kSmall, | |
| 730 kMedium, | |
| 731 kLarge, | |
| 732 kHuge, | |
| 733 | |
| 734 kFirstCategory = kTiniest, | |
| 735 kLastCategory = kHuge, | |
| 736 kNumberOfCategories = kLastCategory + 1 | |
| 737 }; | |
| 738 | |
| 739 // ----------------------------------------------------------------------------- | 824 // ----------------------------------------------------------------------------- |
| 740 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 825 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| 741 // | 826 // |
| 742 // The only way to get a page pointer is by calling factory methods: | 827 // The only way to get a page pointer is by calling factory methods: |
| 743 // Page* p = Page::FromAddress(addr); or | 828 // Page* p = Page::FromAddress(addr); or |
| 744 // Page* p = Page::FromAllocationTop(top); | 829 // Page* p = Page::FromAllocationTop(top); |
| 745 class Page : public MemoryChunk { | 830 class Page : public MemoryChunk { |
| 746 public: | 831 public: |
| 747 // Returns the page containing a given address. The address ranges | 832 // Returns the page containing a given address. The address ranges |
| 748 // from [page_addr .. page_addr + kPageSize[ | 833 // from [page_addr .. page_addr + kPageSize[ |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 832 return concurrent_sweeping_state().Value() == kSweepingDone; | 917 return concurrent_sweeping_state().Value() == kSweepingDone; |
| 833 } | 918 } |
| 834 | 919 |
| 835 void ResetFreeListStatistics(); | 920 void ResetFreeListStatistics(); |
| 836 | 921 |
| 837 int LiveBytesFromFreeList() { | 922 int LiveBytesFromFreeList() { |
| 838 return static_cast<int>(area_size() - wasted_memory() - | 923 return static_cast<int>(area_size() - wasted_memory() - |
| 839 available_in_free_list()); | 924 available_in_free_list()); |
| 840 } | 925 } |
| 841 | 926 |
| 927 template <typename Callback> |
| 928 inline void ForAllFreeListCategories(Callback callback) { |
| 929 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| 930 callback(&categories_[i]); |
| 931 } |
| 932 } |
| 933 |
| 934 FreeListCategory* free_list_category(FreeListCategoryType type) { |
| 935 return &categories_[type]; |
| 936 } |
| 937 |
| 842 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ | 938 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ |
| 843 type name() { return name##_.Value(); } \ | 939 type name() { return name##_.Value(); } \ |
| 844 void set_##name(type name) { name##_.SetValue(name); } \ | 940 void set_##name(type name) { name##_.SetValue(name); } \ |
| 845 void add_##name(type name) { name##_.Increment(name); } | 941 void add_##name(type name) { name##_.Increment(name); } |
| 846 | 942 |
| 847 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory) | 943 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory) |
| 848 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list) | 944 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list) |
| 849 | 945 |
| 850 #undef FRAGMENTATION_STATS_ACCESSORS | 946 #undef FRAGMENTATION_STATS_ACCESSORS |
| 851 | 947 |
| 852 #ifdef DEBUG | 948 #ifdef DEBUG |
| 853 void Print(); | 949 void Print(); |
| 854 #endif // DEBUG | 950 #endif // DEBUG |
| 855 | 951 |
| 952 inline void MarkNeverAllocateForTesting(); |
| 953 inline void MarkEvacuationCandidate(); |
| 954 inline void ClearEvacuationCandidate(); |
| 955 |
| 956 private: |
| 957 inline void InitializeFreeListCategories(); |
| 958 |
| 856 friend class MemoryAllocator; | 959 friend class MemoryAllocator; |
| 857 }; | 960 }; |
| 858 | 961 |
| 859 | 962 |
| 860 class LargePage : public MemoryChunk { | 963 class LargePage : public MemoryChunk { |
| 861 public: | 964 public: |
| 862 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } | 965 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
| 863 | 966 |
| 864 inline LargePage* next_page() { | 967 inline LargePage* next_page() { |
| 865 return static_cast<LargePage*>(next_chunk()); | 968 return static_cast<LargePage*>(next_chunk()); |
| (...skipping 619 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1485 | 1588 |
| 1486 // Zero out all the allocation statistics (i.e., no capacity). | 1589 // Zero out all the allocation statistics (i.e., no capacity). |
| 1487 void Clear() { | 1590 void Clear() { |
| 1488 capacity_ = 0; | 1591 capacity_ = 0; |
| 1489 max_capacity_ = 0; | 1592 max_capacity_ = 0; |
| 1490 size_ = 0; | 1593 size_ = 0; |
| 1491 } | 1594 } |
| 1492 | 1595 |
| 1493 void ClearSize() { size_ = capacity_; } | 1596 void ClearSize() { size_ = capacity_; } |
| 1494 | 1597 |
| 1495 // Reset the allocation statistics (i.e., available = capacity with no wasted | |
| 1496 // or allocated bytes). | |
| 1497 void Reset() { | |
| 1498 size_ = 0; | |
| 1499 } | |
| 1500 | |
| 1501 // Accessors for the allocation statistics. | 1598 // Accessors for the allocation statistics. |
| 1502 intptr_t Capacity() { return capacity_; } | 1599 intptr_t Capacity() { return capacity_; } |
| 1503 intptr_t MaxCapacity() { return max_capacity_; } | 1600 intptr_t MaxCapacity() { return max_capacity_; } |
| 1504 intptr_t Size() { | 1601 intptr_t Size() { |
| 1505 CHECK_GE(size_, 0); | 1602 CHECK_GE(size_, 0); |
| 1506 return size_; | 1603 return size_; |
| 1507 } | 1604 } |
| 1508 | 1605 |
| 1509 // Grow the space by adding available bytes. They are initially marked as | 1606 // Grow the space by adding available bytes. They are initially marked as |
| 1510 // being in use (part of the size), but will normally be immediately freed, | 1607 // being in use (part of the size), but will normally be immediately freed, |
| 1511 // putting them on the free list and removing them from size_. | 1608 // putting them on the free list and removing them from size_. |
| 1512 void ExpandSpace(int size_in_bytes) { | 1609 void ExpandSpace(int size_in_bytes) { |
| 1513 capacity_ += size_in_bytes; | 1610 capacity_ += size_in_bytes; |
| 1514 size_ += size_in_bytes; | 1611 size_ += size_in_bytes; |
| 1515 if (capacity_ > max_capacity_) { | 1612 if (capacity_ > max_capacity_) { |
| 1516 max_capacity_ = capacity_; | 1613 max_capacity_ = capacity_; |
| 1517 } | 1614 } |
| 1518 CHECK(size_ >= 0); | 1615 CHECK(size_ >= 0); |
| 1519 } | 1616 } |
| 1520 | 1617 |
| 1521 // Shrink the space by removing available bytes. Since shrinking is done | 1618 // Shrink the space by removing available bytes. Since shrinking is done |
| 1522 // during sweeping, bytes have been marked as being in use (part of the size) | 1619 // during sweeping, bytes have been marked as being in use (part of the size) |
| 1523 // and are hereby freed. | 1620 // and are hereby freed. |
| 1524 void ShrinkSpace(int size_in_bytes) { | 1621 void ShrinkSpace(int size_in_bytes) { |
| 1525 capacity_ -= size_in_bytes; | 1622 capacity_ -= size_in_bytes; |
| 1526 size_ -= size_in_bytes; | 1623 size_ -= size_in_bytes; |
| 1527 CHECK(size_ >= 0); | 1624 CHECK_GE(size_, 0); |
| 1528 } | 1625 } |
| 1529 | 1626 |
| 1530 // Allocate from available bytes (available -> size). | 1627 // Allocate from available bytes (available -> size). |
| 1531 void AllocateBytes(intptr_t size_in_bytes) { | 1628 void AllocateBytes(intptr_t size_in_bytes) { |
| 1532 size_ += size_in_bytes; | 1629 size_ += size_in_bytes; |
| 1533 CHECK(size_ >= 0); | 1630 CHECK_GE(size_, 0); |
| 1534 } | 1631 } |
| 1535 | 1632 |
| 1536 // Free allocated bytes, making them available (size -> available). | 1633 // Free allocated bytes, making them available (size -> available). |
| 1537 void DeallocateBytes(intptr_t size_in_bytes) { | 1634 void DeallocateBytes(intptr_t size_in_bytes) { |
| 1538 size_ -= size_in_bytes; | 1635 size_ -= size_in_bytes; |
| 1539 CHECK_GE(size_, 0); | 1636 CHECK_GE(size_, 0); |
| 1540 } | 1637 } |
| 1541 | 1638 |
| 1542 // Merge {other} into {this}. | 1639 // Merge {other} into {this}. |
| 1543 void Merge(const AllocationStats& other) { | 1640 void Merge(const AllocationStats& other) { |
| (...skipping 18 matching lines...) Expand all Loading... |
| 1562 // bookkeeping structures) currently in the space. | 1659 // bookkeeping structures) currently in the space. |
| 1563 intptr_t capacity_; | 1660 intptr_t capacity_; |
| 1564 | 1661 |
| 1565 // |max_capacity_|: The maximum capacity ever observed. | 1662 // |max_capacity_|: The maximum capacity ever observed. |
| 1566 intptr_t max_capacity_; | 1663 intptr_t max_capacity_; |
| 1567 | 1664 |
| 1568 // |size_|: The number of allocated bytes. | 1665 // |size_|: The number of allocated bytes. |
| 1569 intptr_t size_; | 1666 intptr_t size_; |
| 1570 }; | 1667 }; |
| 1571 | 1668 |
| 1572 | |
| 1573 // A free list category maintains a linked list of free memory blocks. | |
| 1574 class FreeListCategory { | |
| 1575 public: | |
| 1576 FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {} | |
| 1577 | |
| 1578 void Initialize(FreeList* owner, FreeListCategoryType type) { | |
| 1579 owner_ = owner; | |
| 1580 type_ = type; | |
| 1581 } | |
| 1582 | |
| 1583 // Concatenates {category} into {this}. | |
| 1584 // | |
| 1585 // Note: Thread-safe. | |
| 1586 intptr_t Concatenate(FreeListCategory* category); | |
| 1587 | |
| 1588 void Reset(); | |
| 1589 | |
| 1590 void Free(FreeSpace* node, int size_in_bytes); | |
| 1591 | |
| 1592 // Pick a node from the list. | |
| 1593 FreeSpace* PickNodeFromList(int* node_size); | |
| 1594 | |
| 1595 // Pick a node from the list and compare it against {size_in_bytes}. If the | |
| 1596 // node's size is greater or equal return the node and null otherwise. | |
| 1597 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size); | |
| 1598 | |
| 1599 // Search for a node of size {size_in_bytes}. | |
| 1600 FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size); | |
| 1601 | |
| 1602 intptr_t EvictFreeListItemsInList(Page* p); | |
| 1603 bool ContainsPageFreeListItemsInList(Page* p); | |
| 1604 | |
| 1605 void RepairFreeList(Heap* heap); | |
| 1606 | |
| 1607 bool IsEmpty() { return top() == nullptr; } | |
| 1608 | |
| 1609 FreeList* owner() { return owner_; } | |
| 1610 int available() const { return available_; } | |
| 1611 | |
| 1612 #ifdef DEBUG | |
| 1613 intptr_t SumFreeList(); | |
| 1614 int FreeListLength(); | |
| 1615 bool IsVeryLong(); | |
| 1616 #endif | |
| 1617 | |
| 1618 private: | |
| 1619 // For debug builds we accurately compute free lists lengths up until | |
| 1620 // {kVeryLongFreeList} by manually walking the list. | |
| 1621 static const int kVeryLongFreeList = 500; | |
| 1622 | |
| 1623 FreeSpace* top() { return top_.Value(); } | |
| 1624 void set_top(FreeSpace* top) { top_.SetValue(top); } | |
| 1625 | |
| 1626 FreeSpace* end() const { return end_; } | |
| 1627 void set_end(FreeSpace* end) { end_ = end; } | |
| 1628 | |
| 1629 // |type_|: The type of this free list category. | |
| 1630 FreeListCategoryType type_; | |
| 1631 | |
| 1632 // |top_|: Points to the top FreeSpace* in the free list category. | |
| 1633 AtomicValue<FreeSpace*> top_; | |
| 1634 | |
| 1635 // |end_|: Points to the end FreeSpace* in the free list category. | |
| 1636 FreeSpace* end_; | |
| 1637 | |
| 1638 // |available_|: Total available bytes in all blocks of this free list | |
| 1639 // category. | |
| 1640 int available_; | |
| 1641 | |
| 1642 // |owner_|: The owning free list of this category. | |
| 1643 FreeList* owner_; | |
| 1644 }; | |
| 1645 | |
| 1646 // A free list maintaining free blocks of memory. The free list is organized in | 1669 // A free list maintaining free blocks of memory. The free list is organized in |
| 1647 // a way to encourage objects allocated around the same time to be near each | 1670 // a way to encourage objects allocated around the same time to be near each |
| 1648 // other. The normal way to allocate is intended to be by bumping a 'top' | 1671 // other. The normal way to allocate is intended to be by bumping a 'top' |
| 1649 // pointer until it hits a 'limit' pointer. When the limit is hit we need to | 1672 // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
| 1650 // find a new space to allocate from. This is done with the free list, which is | 1673 // find a new space to allocate from. This is done with the free list, which is |
| 1651 // divided up into rough categories to cut down on waste. Having finer | 1674 // divided up into rough categories to cut down on waste. Having finer |
| 1652 // categories would scatter allocation more. | 1675 // categories would scatter allocation more. |
| 1653 | 1676 |
| 1654 // The free list is organized in categories as follows: | 1677 // The free list is organized in categories as follows: |
| 1655 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for | 1678 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1680 } else if (maximum_freed <= kMediumListMax) { | 1703 } else if (maximum_freed <= kMediumListMax) { |
| 1681 return kMediumAllocationMax; | 1704 return kMediumAllocationMax; |
| 1682 } else if (maximum_freed <= kLargeListMax) { | 1705 } else if (maximum_freed <= kLargeListMax) { |
| 1683 return kLargeAllocationMax; | 1706 return kLargeAllocationMax; |
| 1684 } | 1707 } |
| 1685 return maximum_freed; | 1708 return maximum_freed; |
| 1686 } | 1709 } |
| 1687 | 1710 |
| 1688 explicit FreeList(PagedSpace* owner); | 1711 explicit FreeList(PagedSpace* owner); |
| 1689 | 1712 |
| 1690 // The method concatenates {other} into {this} and returns the added bytes, | |
| 1691 // including waste. | |
| 1692 // | |
| 1693 // Note: Thread-safe. | |
| 1694 intptr_t Concatenate(FreeList* other); | |
| 1695 | |
| 1696 // Adds a node on the free list. The block of size {size_in_bytes} starting | 1713 // Adds a node on the free list. The block of size {size_in_bytes} starting |
| 1697 // at {start} is placed on the free list. The return value is the number of | 1714 // at {start} is placed on the free list. The return value is the number of |
| 1698 // bytes that were not added to the free list, because they freed memory block | 1715 // bytes that were not added to the free list, because they freed memory block |
| 1699 // was too small. Bookkeeping information will be written to the block, i.e., | 1716 // was too small. Bookkeeping information will be written to the block, i.e., |
| 1700 // its contents will be destroyed. The start address should be word aligned, | 1717 // its contents will be destroyed. The start address should be word aligned, |
| 1701 // and the size should be a non-zero multiple of the word size. | 1718 // and the size should be a non-zero multiple of the word size. |
| 1702 int Free(Address start, int size_in_bytes); | 1719 int Free(Address start, int size_in_bytes, bool keep_local = false); |
| 1703 | 1720 |
| 1704 // Allocate a block of size {size_in_bytes} from the free list. The block is | 1721 // Allocate a block of size {size_in_bytes} from the free list. The block is |
| 1705 // unitialized. A failure is returned if no block is available. The size | 1722 // unitialized. A failure is returned if no block is available. The size |
| 1706 // should be a non-zero multiple of the word size. | 1723 // should be a non-zero multiple of the word size. |
| 1707 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); | 1724 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); |
| 1708 | 1725 |
| 1709 // Clear the free list. | 1726 // Clear the free list. |
| 1710 void Reset(); | 1727 void Reset(); |
| 1711 | 1728 |
| 1712 void ResetStats() { wasted_bytes_ = 0; } | 1729 void ResetStats() { |
| 1730 wasted_bytes_.SetValue(0); |
| 1731 ForAllFreeListCategories( |
| 1732 [](FreeListCategory* category) { category->ResetStats(); }); |
| 1733 } |
| 1713 | 1734 |
| 1714 // Return the number of bytes available on the free list. | 1735 // Return the number of bytes available on the free list. |
| 1715 intptr_t Available() { | 1736 intptr_t Available() { |
| 1716 intptr_t available = 0; | 1737 intptr_t available = 0; |
| 1717 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { | 1738 ForAllFreeListCategories([&available](FreeListCategory* category) { |
| 1718 available += category_[i].available(); | 1739 available += category->available(); |
| 1719 } | 1740 }); |
| 1720 return available; | 1741 return available; |
| 1721 } | 1742 } |
| 1722 | 1743 |
| 1723 // The method tries to find a {FreeSpace} node of at least {size_in_bytes} | |
| 1724 // size in the free list category exactly matching the size. If no suitable | |
| 1725 // node could be found, the method falls back to retrieving a {FreeSpace} | |
| 1726 // from the large or huge free list category. | |
| 1727 // | |
| 1728 // Can be used concurrently. | |
| 1729 MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes); | |
| 1730 | |
| 1731 bool IsEmpty() { | 1744 bool IsEmpty() { |
| 1732 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { | 1745 bool empty = true; |
| 1733 if (!category_[i].IsEmpty()) return false; | 1746 ForAllFreeListCategories([&empty](FreeListCategory* category) { |
| 1734 } | 1747 if (!category->is_empty()) empty = false; |
| 1735 return true; | 1748 }); |
| 1749 return empty; |
| 1736 } | 1750 } |
| 1737 | 1751 |
| 1738 // Used after booting the VM. | 1752 // Used after booting the VM. |
| 1739 void RepairLists(Heap* heap); | 1753 void RepairLists(Heap* heap); |
| 1740 | 1754 |
| 1741 intptr_t EvictFreeListItems(Page* p); | 1755 intptr_t EvictFreeListItems(Page* page); |
| 1742 bool ContainsPageFreeListItems(Page* p); | 1756 bool ContainsPageFreeListItems(Page* page); |
| 1743 | 1757 |
| 1744 PagedSpace* owner() { return owner_; } | 1758 PagedSpace* owner() { return owner_; } |
| 1745 intptr_t wasted_bytes() { return wasted_bytes_; } | 1759 intptr_t wasted_bytes() { return wasted_bytes_.Value(); } |
| 1746 base::Mutex* mutex() { return &mutex_; } | 1760 |
| 1761 template <typename Callback> |
| 1762 void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) { |
| 1763 FreeListCategory* current = categories_[type]; |
| 1764 while (current != nullptr) { |
| 1765 FreeListCategory* next = current->next(); |
| 1766 callback(current); |
| 1767 current = next; |
| 1768 } |
| 1769 } |
| 1770 |
| 1771 template <typename Callback> |
| 1772 void ForAllFreeListCategories(Callback callback) { |
| 1773 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| 1774 ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback); |
| 1775 } |
| 1776 } |
| 1777 |
| 1778 bool AddCategory(FreeListCategory* category); |
| 1779 void RemoveCategory(FreeListCategory* category); |
| 1780 void PrintCategories(FreeListCategoryType type); |
| 1747 | 1781 |
| 1748 #ifdef DEBUG | 1782 #ifdef DEBUG |
| 1749 void Zap(); | |
| 1750 intptr_t SumFreeLists(); | 1783 intptr_t SumFreeLists(); |
| 1751 bool IsVeryLong(); | 1784 bool IsVeryLong(); |
| 1752 #endif | 1785 #endif |
| 1753 | 1786 |
| 1754 private: | 1787 private: |
| 1788 class FreeListCategoryIterator { |
| 1789 public: |
| 1790 FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type) |
| 1791 : current_(free_list->categories_[type]) {} |
| 1792 |
| 1793 bool HasNext() { return current_ != nullptr; } |
| 1794 |
| 1795 FreeListCategory* Next() { |
| 1796 DCHECK(HasNext()); |
| 1797 FreeListCategory* tmp = current_; |
| 1798 current_ = current_->next(); |
| 1799 return tmp; |
| 1800 } |
| 1801 |
| 1802 private: |
| 1803 FreeListCategory* current_; |
| 1804 }; |
| 1805 |
| 1755 // The size range of blocks, in bytes. | 1806 // The size range of blocks, in bytes. |
| 1756 static const int kMinBlockSize = 3 * kPointerSize; | 1807 static const int kMinBlockSize = 3 * kPointerSize; |
| 1757 static const int kMaxBlockSize = Page::kAllocatableMemory; | 1808 static const int kMaxBlockSize = Page::kAllocatableMemory; |
| 1758 | 1809 |
| 1759 static const int kTiniestListMax = 0xa * kPointerSize; | 1810 static const int kTiniestListMax = 0xa * kPointerSize; |
| 1760 static const int kTinyListMax = 0x1f * kPointerSize; | 1811 static const int kTinyListMax = 0x1f * kPointerSize; |
| 1761 static const int kSmallListMax = 0xff * kPointerSize; | 1812 static const int kSmallListMax = 0xff * kPointerSize; |
| 1762 static const int kMediumListMax = 0x7ff * kPointerSize; | 1813 static const int kMediumListMax = 0x7ff * kPointerSize; |
| 1763 static const int kLargeListMax = 0x3fff * kPointerSize; | 1814 static const int kLargeListMax = 0x3fff * kPointerSize; |
| 1764 static const int kTinyAllocationMax = kTiniestListMax; | 1815 static const int kTinyAllocationMax = kTiniestListMax; |
| 1765 static const int kSmallAllocationMax = kTinyListMax; | 1816 static const int kSmallAllocationMax = kTinyListMax; |
| 1766 static const int kMediumAllocationMax = kSmallListMax; | 1817 static const int kMediumAllocationMax = kSmallListMax; |
| 1767 static const int kLargeAllocationMax = kMediumListMax; | 1818 static const int kLargeAllocationMax = kMediumListMax; |
| 1768 | 1819 |
| 1769 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); | 1820 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); |
| 1770 FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size); | |
| 1771 | 1821 |
| 1772 FreeListCategory* GetFreeListCategory(FreeListCategoryType category) { | 1822 // Walks all available categories for a given |type| and tries to retrieve |
| 1773 return &category_[category]; | 1823 // a node. Returns nullptr if the category is empty. |
| 1774 } | 1824 FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size); |
| 1825 |
| 1826 // Tries to retrieve a node from the first category in a given |type|. |
| 1827 // Returns nullptr if the category is empty. |
| 1828 FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size, |
| 1829 int minimum_size); |
| 1830 |
| 1831 // Searches a given |type| for a node of at least |minimum_size|. |
| 1832 FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size, |
| 1833 int minimum_size); |
| 1775 | 1834 |
| 1776 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) { | 1835 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) { |
| 1777 if (size_in_bytes <= kTiniestListMax) { | 1836 if (size_in_bytes <= kTiniestListMax) { |
| 1778 return kTiniest; | 1837 return kTiniest; |
| 1779 } else if (size_in_bytes <= kTinyListMax) { | 1838 } else if (size_in_bytes <= kTinyListMax) { |
| 1780 return kTiny; | 1839 return kTiny; |
| 1781 } else if (size_in_bytes <= kSmallListMax) { | 1840 } else if (size_in_bytes <= kSmallListMax) { |
| 1782 return kSmall; | 1841 return kSmall; |
| 1783 } else if (size_in_bytes <= kMediumListMax) { | 1842 } else if (size_in_bytes <= kMediumListMax) { |
| 1784 return kMedium; | 1843 return kMedium; |
| 1785 } else if (size_in_bytes <= kLargeListMax) { | 1844 } else if (size_in_bytes <= kLargeListMax) { |
| 1786 return kLarge; | 1845 return kLarge; |
| 1787 } | 1846 } |
| 1788 return kHuge; | 1847 return kHuge; |
| 1789 } | 1848 } |
| 1790 | 1849 |
| 1791 // The tiny categories are not used for fast allocation. | 1850 // The tiny categories are not used for fast allocation. |
| 1792 FreeListCategoryType SelectFastAllocationFreeListCategoryType( | 1851 FreeListCategoryType SelectFastAllocationFreeListCategoryType( |
| 1793 size_t size_in_bytes) { | 1852 size_t size_in_bytes) { |
| 1794 if (size_in_bytes <= kSmallAllocationMax) { | 1853 if (size_in_bytes <= kSmallAllocationMax) { |
| 1795 return kSmall; | 1854 return kSmall; |
| 1796 } else if (size_in_bytes <= kMediumAllocationMax) { | 1855 } else if (size_in_bytes <= kMediumAllocationMax) { |
| 1797 return kMedium; | 1856 return kMedium; |
| 1798 } else if (size_in_bytes <= kLargeAllocationMax) { | 1857 } else if (size_in_bytes <= kLargeAllocationMax) { |
| 1799 return kLarge; | 1858 return kLarge; |
| 1800 } | 1859 } |
| 1801 return kHuge; | 1860 return kHuge; |
| 1802 } | 1861 } |
| 1803 | 1862 |
| 1863 FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; } |
| 1864 |
| 1804 PagedSpace* owner_; | 1865 PagedSpace* owner_; |
| 1805 base::Mutex mutex_; | 1866 AtomicNumber<intptr_t> wasted_bytes_; |
| 1806 intptr_t wasted_bytes_; | 1867 FreeListCategory* categories_[kNumberOfCategories]; |
| 1807 FreeListCategory category_[kNumberOfCategories]; | 1868 |
| 1869 friend class FreeListCategory; |
| 1808 | 1870 |
| 1809 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); | 1871 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); |
| 1810 }; | 1872 }; |
| 1811 | 1873 |
| 1812 | 1874 |
| 1813 class AllocationResult { | 1875 class AllocationResult { |
| 1814 public: | 1876 public: |
| 1815 // Implicit constructor from Object*. | 1877 // Implicit constructor from Object*. |
| 1816 AllocationResult(Object* object) // NOLINT | 1878 AllocationResult(Object* object) // NOLINT |
| 1817 : object_(object) { | 1879 : object_(object) { |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1956 // The stats are rebuilt during sweeping by adding each page to the | 2018 // The stats are rebuilt during sweeping by adding each page to the |
| 1957 // capacity and the size when it is encountered. As free spaces are | 2019 // capacity and the size when it is encountered. As free spaces are |
| 1958 // discovered during the sweeping they are subtracted from the size and added | 2020 // discovered during the sweeping they are subtracted from the size and added |
| 1959 // to the available and wasted totals. | 2021 // to the available and wasted totals. |
| 1960 void ClearStats() { | 2022 void ClearStats() { |
| 1961 accounting_stats_.ClearSize(); | 2023 accounting_stats_.ClearSize(); |
| 1962 free_list_.ResetStats(); | 2024 free_list_.ResetStats(); |
| 1963 ResetFreeListStatistics(); | 2025 ResetFreeListStatistics(); |
| 1964 } | 2026 } |
| 1965 | 2027 |
| 1966 // Increases the number of available bytes of that space. | |
| 1967 void AddToAccountingStats(intptr_t bytes) { | |
| 1968 accounting_stats_.DeallocateBytes(bytes); | |
| 1969 } | |
| 1970 | |
| 1971 // Available bytes without growing. These are the bytes on the free list. | 2028 // Available bytes without growing. These are the bytes on the free list. |
| 1972 // The bytes in the linear allocation area are not included in this total | 2029 // The bytes in the linear allocation area are not included in this total |
| 1973 // because updating the stats would slow down allocation. New pages are | 2030 // because updating the stats would slow down allocation. New pages are |
| 1974 // immediately added to the free list so they show up here. | 2031 // immediately added to the free list so they show up here. |
| 1975 intptr_t Available() override { return free_list_.Available(); } | 2032 intptr_t Available() override { return free_list_.Available(); } |
| 1976 | 2033 |
| 1977 // Allocated bytes in this space. Garbage bytes that were not found due to | 2034 // Allocated bytes in this space. Garbage bytes that were not found due to |
| 1978 // concurrent sweeping are counted as being allocated! The bytes in the | 2035 // concurrent sweeping are counted as being allocated! The bytes in the |
| 1979 // current linear allocation area (between top and limit) are also counted | 2036 // current linear allocation area (between top and limit) are also counted |
| 1980 // here. | 2037 // here. |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2015 | 2072 |
| 2016 // Allocate the requested number of bytes in the space and consider allocation | 2073 // Allocate the requested number of bytes in the space and consider allocation |
| 2017 // alignment if needed. | 2074 // alignment if needed. |
| 2018 MUST_USE_RESULT inline AllocationResult AllocateRaw( | 2075 MUST_USE_RESULT inline AllocationResult AllocateRaw( |
| 2019 int size_in_bytes, AllocationAlignment alignment); | 2076 int size_in_bytes, AllocationAlignment alignment); |
| 2020 | 2077 |
| 2021 // Give a block of memory to the space's free list. It might be added to | 2078 // Give a block of memory to the space's free list. It might be added to |
| 2022 // the free list or accounted as waste. | 2079 // the free list or accounted as waste. |
| 2023 // If add_to_freelist is false then just accounting stats are updated and | 2080 // If add_to_freelist is false then just accounting stats are updated and |
| 2024 // no attempt to add area to free list is made. | 2081 // no attempt to add area to free list is made. |
| 2025 int Free(Address start, int size_in_bytes) { | 2082 int Free(Address start, int size_in_bytes, bool keep_local = false) { |
| 2026 int wasted = free_list_.Free(start, size_in_bytes); | 2083 int wasted = free_list_.Free(start, size_in_bytes, keep_local); |
| 2027 accounting_stats_.DeallocateBytes(size_in_bytes); | 2084 accounting_stats_.DeallocateBytes(size_in_bytes); |
| 2028 return size_in_bytes - wasted; | 2085 return size_in_bytes - wasted; |
| 2029 } | 2086 } |
| 2030 | 2087 |
| 2088 int UnaccountedFree(Address start, int size_in_bytes, |
| 2089 bool keep_local = false) { |
| 2090 int wasted = free_list_.Free(start, size_in_bytes, keep_local); |
| 2091 return size_in_bytes - wasted; |
| 2092 } |
| 2093 |
| 2031 void ResetFreeList() { free_list_.Reset(); } | 2094 void ResetFreeList() { free_list_.Reset(); } |
| 2032 | 2095 |
| 2033 // Set space allocation info. | 2096 // Set space allocation info. |
| 2034 void SetTopAndLimit(Address top, Address limit) { | 2097 void SetTopAndLimit(Address top, Address limit) { |
| 2035 DCHECK(top == limit || | 2098 DCHECK(top == limit || |
| 2036 Page::FromAddress(top) == Page::FromAddress(limit - 1)); | 2099 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
| 2037 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 2100 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 2038 allocation_info_.Reset(top, limit); | 2101 allocation_info_.Reset(top, limit); |
| 2039 } | 2102 } |
| 2040 | 2103 |
| 2041 // Empty space allocation info, returning unused area to free list. | 2104 // Empty space allocation info, returning unused area to free list. |
| 2042 void EmptyAllocationInfo() { | 2105 void EmptyAllocationInfo() { |
| 2043 // Mark the old linear allocation area with a free space map so it can be | 2106 // Mark the old linear allocation area with a free space map so it can be |
| 2044 // skipped when scanning the heap. | 2107 // skipped when scanning the heap. |
| 2045 int old_linear_size = static_cast<int>(limit() - top()); | 2108 int old_linear_size = static_cast<int>(limit() - top()); |
| 2046 Free(top(), old_linear_size); | 2109 Free(top(), old_linear_size); |
| 2047 SetTopAndLimit(NULL, NULL); | 2110 SetTopAndLimit(NULL, NULL); |
| 2048 } | 2111 } |
| 2049 | 2112 |
| 2050 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } | 2113 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } |
| 2051 | 2114 |
| 2052 void IncreaseCapacity(int size); | 2115 void IncreaseCapacity(int size); |
| 2053 | 2116 |
| 2054 // Releases an unused page and shrinks the space. | 2117 // Releases an unused page and shrinks the space. |
| 2055 void ReleasePage(Page* page, bool evict_free_list_items); | 2118 void ReleasePage(Page* page); |
| 2056 | 2119 |
| 2057 // The dummy page that anchors the linked list of pages. | 2120 // The dummy page that anchors the linked list of pages. |
| 2058 Page* anchor() { return &anchor_; } | 2121 Page* anchor() { return &anchor_; } |
| 2059 | 2122 |
| 2060 #ifdef VERIFY_HEAP | 2123 #ifdef VERIFY_HEAP |
| 2061 // Verify integrity of this space. | 2124 // Verify integrity of this space. |
| 2062 virtual void Verify(ObjectVisitor* visitor); | 2125 virtual void Verify(ObjectVisitor* visitor); |
| 2063 | 2126 |
| 2064 // Overridden by subclasses to verify space-specific object | 2127 // Overridden by subclasses to verify space-specific object |
| 2065 // properties (e.g., only maps or free-list nodes are in map space). | 2128 // properties (e.g., only maps or free-list nodes are in map space). |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2101 virtual bool is_local() { return false; } | 2164 virtual bool is_local() { return false; } |
| 2102 | 2165 |
| 2103 // Merges {other} into the current space. Note that this modifies {other}, | 2166 // Merges {other} into the current space. Note that this modifies {other}, |
| 2104 // e.g., removes its bump pointer area and resets statistics. | 2167 // e.g., removes its bump pointer area and resets statistics. |
| 2105 void MergeCompactionSpace(CompactionSpace* other); | 2168 void MergeCompactionSpace(CompactionSpace* other); |
| 2106 | 2169 |
| 2107 // Refills the free list from the corresponding free list filled by the | 2170 // Refills the free list from the corresponding free list filled by the |
| 2108 // sweeper. | 2171 // sweeper. |
| 2109 virtual void RefillFreeList(); | 2172 virtual void RefillFreeList(); |
| 2110 | 2173 |
| 2174 FreeList* free_list() { return &free_list_; } |
| 2175 |
| 2176 base::Mutex* mutex() { return &space_mutex_; } |
| 2177 |
| 2111 protected: | 2178 protected: |
| 2112 void AddMemory(Address start, intptr_t size); | |
| 2113 | |
| 2114 void MoveOverFreeMemory(PagedSpace* other); | |
| 2115 | |
| 2116 // PagedSpaces that should be included in snapshots have different, i.e., | 2179 // PagedSpaces that should be included in snapshots have different, i.e., |
| 2117 // smaller, initial pages. | 2180 // smaller, initial pages. |
| 2118 virtual bool snapshotable() { return true; } | 2181 virtual bool snapshotable() { return true; } |
| 2119 | 2182 |
| 2120 FreeList* free_list() { return &free_list_; } | |
| 2121 | |
| 2122 bool HasPages() { return anchor_.next_page() != &anchor_; } | 2183 bool HasPages() { return anchor_.next_page() != &anchor_; } |
| 2123 | 2184 |
| 2124 // Cleans up the space, frees all pages in this space except those belonging | 2185 // Cleans up the space, frees all pages in this space except those belonging |
| 2125 // to the initial chunk, uncommits addresses in the initial chunk. | 2186 // to the initial chunk, uncommits addresses in the initial chunk. |
| 2126 void TearDown(); | 2187 void TearDown(); |
| 2127 | 2188 |
| 2128 // Expands the space by allocating a fixed number of pages. Returns false if | 2189 // Expands the space by allocating a fixed number of pages. Returns false if |
| 2129 // it cannot allocate requested number of pages from OS, or if the hard heap | 2190 // it cannot allocate requested number of pages from OS, or if the hard heap |
| 2130 // size limit has been hit. | 2191 // size limit has been hit. |
| 2131 bool Expand(); | 2192 bool Expand(); |
| (...skipping 676 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2808 // ----------------------------------------------------------------------------- | 2869 // ----------------------------------------------------------------------------- |
| 2809 // Compaction space that is used temporarily during compaction. | 2870 // Compaction space that is used temporarily during compaction. |
| 2810 | 2871 |
| 2811 class CompactionSpace : public PagedSpace { | 2872 class CompactionSpace : public PagedSpace { |
| 2812 public: | 2873 public: |
| 2813 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) | 2874 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) |
| 2814 : PagedSpace(heap, id, executable) {} | 2875 : PagedSpace(heap, id, executable) {} |
| 2815 | 2876 |
| 2816 bool is_local() override { return true; } | 2877 bool is_local() override { return true; } |
| 2817 | 2878 |
| 2818 void RefillFreeList() override; | |
| 2819 | |
| 2820 protected: | 2879 protected: |
| 2821 // The space is temporary and not included in any snapshots. | 2880 // The space is temporary and not included in any snapshots. |
| 2822 bool snapshotable() override { return false; } | 2881 bool snapshotable() override { return false; } |
| 2823 | 2882 |
| 2824 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation( | 2883 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation( |
| 2825 int size_in_bytes) override; | 2884 int size_in_bytes) override; |
| 2826 }; | 2885 }; |
| 2827 | 2886 |
| 2828 | 2887 |
| 2829 // A collection of |CompactionSpace|s used by a single compaction task. | 2888 // A collection of |CompactionSpace|s used by a single compaction task. |
| (...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3058 count = 0; | 3117 count = 0; |
| 3059 } | 3118 } |
| 3060 // Must be small, since an iteration is used for lookup. | 3119 // Must be small, since an iteration is used for lookup. |
| 3061 static const int kMaxComments = 64; | 3120 static const int kMaxComments = 64; |
| 3062 }; | 3121 }; |
| 3063 #endif | 3122 #endif |
| 3064 } // namespace internal | 3123 } // namespace internal |
| 3065 } // namespace v8 | 3124 } // namespace v8 |
| 3066 | 3125 |
| 3067 #endif // V8_HEAP_SPACES_H_ | 3126 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |