Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(106)

Side by Side Diff: src/heap/spaces.h

Issue 1772733002: [heap] Move to two-level free-list (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Speed up very slow FreeList::IsVeryLong which is used in a regular DCHECK Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/atomic-utils.h" 9 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h" 10 #include "src/base/atomicops.h"
11 #include "src/base/bits.h" 11 #include "src/base/bits.h"
12 #include "src/base/platform/mutex.h" 12 #include "src/base/platform/mutex.h"
13 #include "src/flags.h" 13 #include "src/flags.h"
14 #include "src/hashmap.h" 14 #include "src/hashmap.h"
15 #include "src/list.h" 15 #include "src/list.h"
16 #include "src/objects.h" 16 #include "src/objects.h"
17 #include "src/utils.h" 17 #include "src/utils.h"
18 18
19 namespace v8 { 19 namespace v8 {
20 namespace internal { 20 namespace internal {
21 21
22 class AllocationInfo; 22 class AllocationInfo;
23 class AllocationObserver; 23 class AllocationObserver;
24 class CompactionSpace; 24 class CompactionSpace;
25 class CompactionSpaceCollection; 25 class CompactionSpaceCollection;
26 class FreeList; 26 class FreeList;
27 class Isolate; 27 class Isolate;
28 class MemoryAllocator; 28 class MemoryAllocator;
29 class MemoryChunk; 29 class MemoryChunk;
30 class Page;
30 class PagedSpace; 31 class PagedSpace;
31 class SemiSpace; 32 class SemiSpace;
32 class SkipList; 33 class SkipList;
33 class SlotsBuffer; 34 class SlotsBuffer;
34 class SlotSet; 35 class SlotSet;
35 class TypedSlotSet; 36 class TypedSlotSet;
36 class Space; 37 class Space;
37 38
38 // ----------------------------------------------------------------------------- 39 // -----------------------------------------------------------------------------
39 // Heap structures: 40 // Heap structures:
(...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after
278 // Clear all cells till the cell containing the last index. 279 // Clear all cells till the cell containing the last index.
279 for (uint32_t i = start_cell_index; i < end_cell_index; i++) { 280 for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
280 cells()[i] = 0; 281 cells()[i] = 0;
281 } 282 }
282 // Clear all bits in the last cell till the last bit before index. 283 // Clear all bits in the last cell till the last bit before index.
283 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1); 284 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
284 cells()[end_cell_index] &= clear_mask; 285 cells()[end_cell_index] &= clear_mask;
285 } 286 }
286 }; 287 };
287 288
289 enum FreeListCategoryType {
290 kTiniest,
291 kTiny,
292 kSmall,
293 kMedium,
294 kLarge,
295 kHuge,
296
297 kFirstCategory = kTiniest,
298 kLastCategory = kHuge,
299 kNumberOfCategories = kLastCategory + 1,
300 kInvalidCategory
301 };
302
303 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
304
305 // A free list category maintains a linked list of free memory blocks.
306 class FreeListCategory {
307 public:
308 static const int kSize = kIntSize + // FreeListCategoryType type_
309 kIntSize + // int available_
310 kPointerSize + // FreeSpace* top_
311 kPointerSize + // FreeListCategory* prev_
312 kPointerSize; // FreeListCategory* next_
313
314 FreeListCategory()
315 : type_(kInvalidCategory),
316 available_(0),
317 top_(nullptr),
318 prev_(nullptr),
319 next_(nullptr) {}
320
321 void Initialize(FreeListCategoryType type) {
322 type_ = type;
323 available_ = 0;
324 top_ = nullptr;
325 prev_ = nullptr;
326 next_ = nullptr;
327 }
328
329 void Invalidate();
330
331 void Reset();
332
333 void ResetStats() { Reset(); }
334
335 void RepairFreeList(Heap* heap);
336
337 // Relinks the category into the currently owning free list. Requires that the
338 // category is currently unlinked.
339 void Relink();
340
341 bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
Hannes Payer (out of office) 2016/03/11 09:56:24 Why not templatize on mode?
Michael Lippautz 2016/03/11 10:19:12 Templatizing FreeListCategory::Free would then req
342
343 // Picks a node from the list and stores its size in |node_size|. Returns
344 // nullptr if the category is empty.
345 FreeSpace* PickNodeFromList(int* node_size);
346
347 // Performs a single try to pick a node of at least |minimum_size| from the
348 // category. Stores the actual size in |node_size|. Returns nullptr if no
349 // node is found.
350 FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
351
352 // Picks a node of at least |minimum_size| from the category. Stores the
353 // actual size in |node_size|. Returns nullptr if no node is found.
354 FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
355
356 inline FreeList* owner();
357 inline bool is_linked();
358 bool is_empty() { return top() == nullptr; }
359 int available() const { return available_; }
360
361 #ifdef DEBUG
362 intptr_t SumFreeList();
363 int FreeListLength();
364 #endif
365
366 private:
367 // For debug builds we accurately compute free lists lengths up until
368 // {kVeryLongFreeList} by manually walking the list.
369 static const int kVeryLongFreeList = 500;
370
371 inline Page* page();
372
373 FreeSpace* top() { return top_; }
374 void set_top(FreeSpace* top) { top_ = top; }
375 FreeListCategory* prev() { return prev_; }
376 void set_prev(FreeListCategory* prev) { prev_ = prev; }
377 FreeListCategory* next() { return next_; }
378 void set_next(FreeListCategory* next) { next_ = next; }
379
380 // |type_|: The type of this free list category.
381 FreeListCategoryType type_;
382
383 // |available_|: Total available bytes in all blocks of this free list
384 // category.
Hannes Payer (out of office) 2016/03/11 09:56:24 remvove space
Michael Lippautz 2016/03/11 10:19:12 Done.
385 int available_;
386
387 // |top_|: Points to the top FreeSpace* in the free list category.
388 FreeSpace* top_;
389
390 FreeListCategory* prev_;
391 FreeListCategory* next_;
392
393 friend class FreeList;
394 friend class PagedSpace;
395 };
288 396
289 // MemoryChunk represents a memory region owned by a specific space. 397 // MemoryChunk represents a memory region owned by a specific space.
290 // It is divided into the header and the body. Chunk start is always 398 // It is divided into the header and the body. Chunk start is always
291 // 1MB aligned. Start of the body is aligned so it can accommodate 399 // 1MB aligned. Start of the body is aligned so it can accommodate
292 // any heap object. 400 // any heap object.
293 class MemoryChunk { 401 class MemoryChunk {
294 public: 402 public:
295 enum MemoryChunkFlags { 403 enum MemoryChunkFlags {
296 IS_EXECUTABLE, 404 IS_EXECUTABLE,
297 POINTERS_TO_HERE_ARE_INTERESTING, 405 POINTERS_TO_HERE_ARE_INTERESTING,
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
400 508
401 static const size_t kMinHeaderSize = 509 static const size_t kMinHeaderSize =
402 kWriteBarrierCounterOffset + 510 kWriteBarrierCounterOffset +
403 kIntptrSize // intptr_t write_barrier_counter_ 511 kIntptrSize // intptr_t write_barrier_counter_
404 + kPointerSize // AtomicValue high_water_mark_ 512 + kPointerSize // AtomicValue high_water_mark_
405 + kPointerSize // base::Mutex* mutex_ 513 + kPointerSize // base::Mutex* mutex_
406 + kPointerSize // base::AtomicWord parallel_sweeping_ 514 + kPointerSize // base::AtomicWord parallel_sweeping_
407 + kPointerSize // AtomicValue parallel_compaction_ 515 + kPointerSize // AtomicValue parallel_compaction_
408 + 2 * kPointerSize // AtomicNumber free-list statistics 516 + 2 * kPointerSize // AtomicNumber free-list statistics
409 + kPointerSize // AtomicValue next_chunk_ 517 + kPointerSize // AtomicValue next_chunk_
410 + kPointerSize; // AtomicValue prev_chunk_ 518 + kPointerSize // AtomicValue prev_chunk_
519 // FreeListCategory categories_[kNumberOfCategories]
520 + FreeListCategory::kSize * kNumberOfCategories;
411 521
412 // We add some more space to the computed header size to amount for missing 522 // We add some more space to the computed header size to amount for missing
413 // alignment requirements in our computation. 523 // alignment requirements in our computation.
414 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 524 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
415 static const size_t kHeaderSize = kMinHeaderSize; 525 static const size_t kHeaderSize = kMinHeaderSize;
416 526
417 static const int kBodyOffset = 527 static const int kBodyOffset =
418 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); 528 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
419 529
420 // The start offset of the object area in a page. Aligned to both maps and 530 // The start offset of the object area in a page. Aligned to both maps and
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
582 692
583 bool IsEvacuationCandidate() { 693 bool IsEvacuationCandidate() {
584 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); 694 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
585 return IsFlagSet(EVACUATION_CANDIDATE); 695 return IsFlagSet(EVACUATION_CANDIDATE);
586 } 696 }
587 697
588 bool CanAllocate() { 698 bool CanAllocate() {
589 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); 699 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
590 } 700 }
591 701
592 void MarkEvacuationCandidate() {
593 DCHECK(!IsFlagSet(NEVER_EVACUATE));
594 DCHECK_NULL(old_to_old_slots_);
595 DCHECK_NULL(typed_old_to_old_slots_);
596 SetFlag(EVACUATION_CANDIDATE);
597 }
598
599 void ClearEvacuationCandidate() {
600 DCHECK_NULL(old_to_old_slots_);
601 DCHECK_NULL(typed_old_to_old_slots_);
602 ClearFlag(EVACUATION_CANDIDATE);
603 }
604
605 bool ShouldSkipEvacuationSlotRecording() { 702 bool ShouldSkipEvacuationSlotRecording() {
606 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; 703 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
607 } 704 }
608 705
609 Executability executable() { 706 Executability executable() {
610 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 707 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
611 } 708 }
612 709
613 bool InNewSpace() { 710 bool InNewSpace() {
614 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; 711 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
705 802
706 // PagedSpace free-list statistics. 803 // PagedSpace free-list statistics.
707 AtomicNumber<intptr_t> available_in_free_list_; 804 AtomicNumber<intptr_t> available_in_free_list_;
708 AtomicNumber<intptr_t> wasted_memory_; 805 AtomicNumber<intptr_t> wasted_memory_;
709 806
710 // next_chunk_ holds a pointer of type MemoryChunk 807 // next_chunk_ holds a pointer of type MemoryChunk
711 AtomicValue<MemoryChunk*> next_chunk_; 808 AtomicValue<MemoryChunk*> next_chunk_;
712 // prev_chunk_ holds a pointer of type MemoryChunk 809 // prev_chunk_ holds a pointer of type MemoryChunk
713 AtomicValue<MemoryChunk*> prev_chunk_; 810 AtomicValue<MemoryChunk*> prev_chunk_;
714 811
812 FreeListCategory categories_[kNumberOfCategories];
813
715 private: 814 private:
716 void InitializeReservedMemory() { reservation_.Reset(); } 815 void InitializeReservedMemory() { reservation_.Reset(); }
717 816
718 friend class MemoryAllocator; 817 friend class MemoryAllocator;
719 friend class MemoryChunkValidator; 818 friend class MemoryChunkValidator;
720 }; 819 };
721 820
722 enum FreeListCategoryType {
723 kTiniest,
724 kTiny,
725 kSmall,
726 kMedium,
727 kLarge,
728 kHuge,
729
730 kFirstCategory = kTiniest,
731 kLastCategory = kHuge,
732 kNumberOfCategories = kLastCategory + 1
733 };
734
735 // ----------------------------------------------------------------------------- 821 // -----------------------------------------------------------------------------
736 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 822 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
737 // 823 //
738 // The only way to get a page pointer is by calling factory methods: 824 // The only way to get a page pointer is by calling factory methods:
739 // Page* p = Page::FromAddress(addr); or 825 // Page* p = Page::FromAddress(addr); or
740 // Page* p = Page::FromAllocationTop(top); 826 // Page* p = Page::FromAllocationTop(top);
741 class Page : public MemoryChunk { 827 class Page : public MemoryChunk {
742 public: 828 public:
743 // Returns the page containing a given address. The address ranges 829 // Returns the page containing a given address. The address ranges
744 // from [page_addr .. page_addr + kPageSize[ 830 // from [page_addr .. page_addr + kPageSize[
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
828 return concurrent_sweeping_state().Value() == kSweepingDone; 914 return concurrent_sweeping_state().Value() == kSweepingDone;
829 } 915 }
830 916
831 void ResetFreeListStatistics(); 917 void ResetFreeListStatistics();
832 918
833 int LiveBytesFromFreeList() { 919 int LiveBytesFromFreeList() {
834 return static_cast<int>(area_size() - wasted_memory() - 920 return static_cast<int>(area_size() - wasted_memory() -
835 available_in_free_list()); 921 available_in_free_list());
836 } 922 }
837 923
924 template <typename Callback>
925 inline void ForAllFreeListCategories(Callback callback) {
926 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
927 callback(&categories_[i]);
928 }
929 }
930
931 FreeListCategory* free_list_category(FreeListCategoryType type) {
932 return &categories_[type];
933 }
934
838 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ 935 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
839 type name() { return name##_.Value(); } \ 936 type name() { return name##_.Value(); } \
840 void set_##name(type name) { name##_.SetValue(name); } \ 937 void set_##name(type name) { name##_.SetValue(name); } \
841 void add_##name(type name) { name##_.Increment(name); } 938 void add_##name(type name) { name##_.Increment(name); }
842 939
843 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory) 940 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
844 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list) 941 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
845 942
846 #undef FRAGMENTATION_STATS_ACCESSORS 943 #undef FRAGMENTATION_STATS_ACCESSORS
847 944
848 #ifdef DEBUG 945 #ifdef DEBUG
849 void Print(); 946 void Print();
850 #endif // DEBUG 947 #endif // DEBUG
851 948
949 inline void MarkNeverAllocateForTesting();
950 inline void MarkEvacuationCandidate();
951 inline void ClearEvacuationCandidate();
952
953 private:
954 inline void InitializeFreeListCategories();
955
852 friend class MemoryAllocator; 956 friend class MemoryAllocator;
853 }; 957 };
854 958
855 959
856 class LargePage : public MemoryChunk { 960 class LargePage : public MemoryChunk {
857 public: 961 public:
858 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } 962 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
859 963
860 inline LargePage* next_page() { 964 inline LargePage* next_page() {
861 return static_cast<LargePage*>(next_chunk()); 965 return static_cast<LargePage*>(next_chunk());
(...skipping 619 matching lines...) Expand 10 before | Expand all | Expand 10 after
1481 1585
1482 // Zero out all the allocation statistics (i.e., no capacity). 1586 // Zero out all the allocation statistics (i.e., no capacity).
1483 void Clear() { 1587 void Clear() {
1484 capacity_ = 0; 1588 capacity_ = 0;
1485 max_capacity_ = 0; 1589 max_capacity_ = 0;
1486 size_ = 0; 1590 size_ = 0;
1487 } 1591 }
1488 1592
1489 void ClearSize() { size_ = capacity_; } 1593 void ClearSize() { size_ = capacity_; }
1490 1594
1491 // Reset the allocation statistics (i.e., available = capacity with no wasted
1492 // or allocated bytes).
1493 void Reset() {
1494 size_ = 0;
1495 }
1496
1497 // Accessors for the allocation statistics. 1595 // Accessors for the allocation statistics.
1498 intptr_t Capacity() { return capacity_; } 1596 intptr_t Capacity() { return capacity_; }
1499 intptr_t MaxCapacity() { return max_capacity_; } 1597 intptr_t MaxCapacity() { return max_capacity_; }
1500 intptr_t Size() { 1598 intptr_t Size() {
1501 CHECK_GE(size_, 0); 1599 CHECK_GE(size_, 0);
1502 return size_; 1600 return size_;
1503 } 1601 }
1504 1602
1505 // Grow the space by adding available bytes. They are initially marked as 1603 // Grow the space by adding available bytes. They are initially marked as
1506 // being in use (part of the size), but will normally be immediately freed, 1604 // being in use (part of the size), but will normally be immediately freed,
1507 // putting them on the free list and removing them from size_. 1605 // putting them on the free list and removing them from size_.
1508 void ExpandSpace(int size_in_bytes) { 1606 void ExpandSpace(int size_in_bytes) {
1509 capacity_ += size_in_bytes; 1607 capacity_ += size_in_bytes;
1510 size_ += size_in_bytes; 1608 size_ += size_in_bytes;
1511 if (capacity_ > max_capacity_) { 1609 if (capacity_ > max_capacity_) {
1512 max_capacity_ = capacity_; 1610 max_capacity_ = capacity_;
1513 } 1611 }
1514 CHECK(size_ >= 0); 1612 CHECK(size_ >= 0);
1515 } 1613 }
1516 1614
1517 // Shrink the space by removing available bytes. Since shrinking is done 1615 // Shrink the space by removing available bytes. Since shrinking is done
1518 // during sweeping, bytes have been marked as being in use (part of the size) 1616 // during sweeping, bytes have been marked as being in use (part of the size)
1519 // and are hereby freed. 1617 // and are hereby freed.
1520 void ShrinkSpace(int size_in_bytes) { 1618 void ShrinkSpace(int size_in_bytes) {
1521 capacity_ -= size_in_bytes; 1619 capacity_ -= size_in_bytes;
1522 size_ -= size_in_bytes; 1620 size_ -= size_in_bytes;
1523 CHECK(size_ >= 0); 1621 CHECK_GE(size_, 0);
1524 } 1622 }
1525 1623
1526 // Allocate from available bytes (available -> size). 1624 // Allocate from available bytes (available -> size).
1527 void AllocateBytes(intptr_t size_in_bytes) { 1625 void AllocateBytes(intptr_t size_in_bytes) {
1528 size_ += size_in_bytes; 1626 size_ += size_in_bytes;
1529 CHECK(size_ >= 0); 1627 CHECK_GE(size_, 0);
1530 } 1628 }
1531 1629
1532 // Free allocated bytes, making them available (size -> available). 1630 // Free allocated bytes, making them available (size -> available).
1533 void DeallocateBytes(intptr_t size_in_bytes) { 1631 void DeallocateBytes(intptr_t size_in_bytes) {
1534 size_ -= size_in_bytes; 1632 size_ -= size_in_bytes;
1535 CHECK_GE(size_, 0); 1633 CHECK_GE(size_, 0);
1536 } 1634 }
1537 1635
1538 // Merge {other} into {this}. 1636 // Merge {other} into {this}.
1539 void Merge(const AllocationStats& other) { 1637 void Merge(const AllocationStats& other) {
(...skipping 18 matching lines...) Expand all
1558 // bookkeeping structures) currently in the space. 1656 // bookkeeping structures) currently in the space.
1559 intptr_t capacity_; 1657 intptr_t capacity_;
1560 1658
1561 // |max_capacity_|: The maximum capacity ever observed. 1659 // |max_capacity_|: The maximum capacity ever observed.
1562 intptr_t max_capacity_; 1660 intptr_t max_capacity_;
1563 1661
1564 // |size_|: The number of allocated bytes. 1662 // |size_|: The number of allocated bytes.
1565 intptr_t size_; 1663 intptr_t size_;
1566 }; 1664 };
1567 1665
1568
1569 // A free list category maintains a linked list of free memory blocks.
1570 class FreeListCategory {
1571 public:
1572 FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
1573
1574 void Initialize(FreeList* owner, FreeListCategoryType type) {
1575 owner_ = owner;
1576 type_ = type;
1577 }
1578
1579 // Concatenates {category} into {this}.
1580 //
1581 // Note: Thread-safe.
1582 intptr_t Concatenate(FreeListCategory* category);
1583
1584 void Reset();
1585
1586 void Free(FreeSpace* node, int size_in_bytes);
1587
1588 // Pick a node from the list.
1589 FreeSpace* PickNodeFromList(int* node_size);
1590
1591 // Pick a node from the list and compare it against {size_in_bytes}. If the
1592 // node's size is greater or equal return the node and null otherwise.
1593 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
1594
1595 // Search for a node of size {size_in_bytes}.
1596 FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
1597
1598 intptr_t EvictFreeListItemsInList(Page* p);
1599 bool ContainsPageFreeListItemsInList(Page* p);
1600
1601 void RepairFreeList(Heap* heap);
1602
1603 bool IsEmpty() { return top() == nullptr; }
1604
1605 FreeList* owner() { return owner_; }
1606 int available() const { return available_; }
1607
1608 #ifdef DEBUG
1609 intptr_t SumFreeList();
1610 int FreeListLength();
1611 bool IsVeryLong();
1612 #endif
1613
1614 private:
1615 // For debug builds we accurately compute free lists lengths up until
1616 // {kVeryLongFreeList} by manually walking the list.
1617 static const int kVeryLongFreeList = 500;
1618
1619 FreeSpace* top() { return top_.Value(); }
1620 void set_top(FreeSpace* top) { top_.SetValue(top); }
1621
1622 FreeSpace* end() const { return end_; }
1623 void set_end(FreeSpace* end) { end_ = end; }
1624
1625 // |type_|: The type of this free list category.
1626 FreeListCategoryType type_;
1627
1628 // |top_|: Points to the top FreeSpace* in the free list category.
1629 AtomicValue<FreeSpace*> top_;
1630
1631 // |end_|: Points to the end FreeSpace* in the free list category.
1632 FreeSpace* end_;
1633
1634 // |available_|: Total available bytes in all blocks of this free list
1635 // category.
1636 int available_;
1637
1638 // |owner_|: The owning free list of this category.
1639 FreeList* owner_;
1640 };
1641
1642 // A free list maintaining free blocks of memory. The free list is organized in 1666 // A free list maintaining free blocks of memory. The free list is organized in
1643 // a way to encourage objects allocated around the same time to be near each 1667 // a way to encourage objects allocated around the same time to be near each
1644 // other. The normal way to allocate is intended to be by bumping a 'top' 1668 // other. The normal way to allocate is intended to be by bumping a 'top'
1645 // pointer until it hits a 'limit' pointer. When the limit is hit we need to 1669 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1646 // find a new space to allocate from. This is done with the free list, which is 1670 // find a new space to allocate from. This is done with the free list, which is
1647 // divided up into rough categories to cut down on waste. Having finer 1671 // divided up into rough categories to cut down on waste. Having finer
1648 // categories would scatter allocation more. 1672 // categories would scatter allocation more.
1649 1673
1650 // The free list is organized in categories as follows: 1674 // The free list is organized in categories as follows:
1651 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for 1675 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
(...skipping 24 matching lines...) Expand all
1676 } else if (maximum_freed <= kMediumListMax) { 1700 } else if (maximum_freed <= kMediumListMax) {
1677 return kMediumAllocationMax; 1701 return kMediumAllocationMax;
1678 } else if (maximum_freed <= kLargeListMax) { 1702 } else if (maximum_freed <= kLargeListMax) {
1679 return kLargeAllocationMax; 1703 return kLargeAllocationMax;
1680 } 1704 }
1681 return maximum_freed; 1705 return maximum_freed;
1682 } 1706 }
1683 1707
1684 explicit FreeList(PagedSpace* owner); 1708 explicit FreeList(PagedSpace* owner);
1685 1709
1686 // The method concatenates {other} into {this} and returns the added bytes,
1687 // including waste.
1688 //
1689 // Note: Thread-safe.
1690 intptr_t Concatenate(FreeList* other);
1691
1692 // Adds a node on the free list. The block of size {size_in_bytes} starting 1710 // Adds a node on the free list. The block of size {size_in_bytes} starting
1693 // at {start} is placed on the free list. The return value is the number of 1711 // at {start} is placed on the free list. The return value is the number of
1694 // bytes that were not added to the free list, because they freed memory block 1712 // bytes that were not added to the free list, because they freed memory block
1695 // was too small. Bookkeeping information will be written to the block, i.e., 1713 // was too small. Bookkeeping information will be written to the block, i.e.,
1696 // its contents will be destroyed. The start address should be word aligned, 1714 // its contents will be destroyed. The start address should be word aligned,
1697 // and the size should be a non-zero multiple of the word size. 1715 // and the size should be a non-zero multiple of the word size.
1698 int Free(Address start, int size_in_bytes); 1716 int Free(Address start, int size_in_bytes, FreeMode mode);
1699 1717
1700 // Allocate a block of size {size_in_bytes} from the free list. The block is 1718 // Allocate a block of size {size_in_bytes} from the free list. The block is
1701 // unitialized. A failure is returned if no block is available. The size 1719 // unitialized. A failure is returned if no block is available. The size
1702 // should be a non-zero multiple of the word size. 1720 // should be a non-zero multiple of the word size.
1703 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); 1721 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1704 1722
1705 // Clear the free list. 1723 // Clear the free list.
1706 void Reset(); 1724 void Reset();
1707 1725
1708 void ResetStats() { wasted_bytes_ = 0; } 1726 void ResetStats() {
1727 wasted_bytes_.SetValue(0);
1728 ForAllFreeListCategories(
1729 [](FreeListCategory* category) { category->ResetStats(); });
1730 }
1709 1731
1710 // Return the number of bytes available on the free list. 1732 // Return the number of bytes available on the free list.
1711 intptr_t Available() { 1733 intptr_t Available() {
1712 intptr_t available = 0; 1734 intptr_t available = 0;
1713 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { 1735 ForAllFreeListCategories([&available](FreeListCategory* category) {
1714 available += category_[i].available(); 1736 available += category->available();
1715 } 1737 });
1716 return available; 1738 return available;
1717 } 1739 }
1718 1740
1719 // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
1720 // size in the free list category exactly matching the size. If no suitable
1721 // node could be found, the method falls back to retrieving a {FreeSpace}
1722 // from the large or huge free list category.
1723 //
1724 // Can be used concurrently.
1725 MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
1726
1727 bool IsEmpty() { 1741 bool IsEmpty() {
1728 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { 1742 bool empty = true;
1729 if (!category_[i].IsEmpty()) return false; 1743 ForAllFreeListCategories([&empty](FreeListCategory* category) {
1730 } 1744 if (!category->is_empty()) empty = false;
1731 return true; 1745 });
1746 return empty;
1732 } 1747 }
1733 1748
1734 // Used after booting the VM. 1749 // Used after booting the VM.
1735 void RepairLists(Heap* heap); 1750 void RepairLists(Heap* heap);
1736 1751
1737 intptr_t EvictFreeListItems(Page* p); 1752 intptr_t EvictFreeListItems(Page* page);
1738 bool ContainsPageFreeListItems(Page* p); 1753 bool ContainsPageFreeListItems(Page* page);
1739 1754
1740 PagedSpace* owner() { return owner_; } 1755 PagedSpace* owner() { return owner_; }
1741 intptr_t wasted_bytes() { return wasted_bytes_; } 1756 intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
1742 base::Mutex* mutex() { return &mutex_; } 1757
1758 template <typename Callback>
1759 void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1760 FreeListCategory* current = categories_[type];
1761 while (current != nullptr) {
1762 FreeListCategory* next = current->next();
1763 callback(current);
1764 current = next;
1765 }
1766 }
1767
1768 template <typename Callback>
1769 void ForAllFreeListCategories(Callback callback) {
1770 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1771 ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1772 }
1773 }
1774
1775 bool AddCategory(FreeListCategory* category);
1776 void RemoveCategory(FreeListCategory* category);
1777 void PrintCategories(FreeListCategoryType type);
1743 1778
1744 #ifdef DEBUG 1779 #ifdef DEBUG
1745 void Zap();
1746 intptr_t SumFreeLists(); 1780 intptr_t SumFreeLists();
1747 bool IsVeryLong(); 1781 bool IsVeryLong();
1748 #endif 1782 #endif
1749 1783
1750 private: 1784 private:
1785 class FreeListCategoryIterator {
1786 public:
1787 FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1788 : current_(free_list->categories_[type]) {}
1789
1790 bool HasNext() { return current_ != nullptr; }
1791
1792 FreeListCategory* Next() {
1793 DCHECK(HasNext());
1794 FreeListCategory* tmp = current_;
1795 current_ = current_->next();
1796 return tmp;
1797 }
1798
1799 private:
1800 FreeListCategory* current_;
1801 };
1802
1751 // The size range of blocks, in bytes. 1803 // The size range of blocks, in bytes.
1752 static const int kMinBlockSize = 3 * kPointerSize; 1804 static const int kMinBlockSize = 3 * kPointerSize;
1753 static const int kMaxBlockSize = Page::kAllocatableMemory; 1805 static const int kMaxBlockSize = Page::kAllocatableMemory;
1754 1806
1755 static const int kTiniestListMax = 0xa * kPointerSize; 1807 static const int kTiniestListMax = 0xa * kPointerSize;
1756 static const int kTinyListMax = 0x1f * kPointerSize; 1808 static const int kTinyListMax = 0x1f * kPointerSize;
1757 static const int kSmallListMax = 0xff * kPointerSize; 1809 static const int kSmallListMax = 0xff * kPointerSize;
1758 static const int kMediumListMax = 0x7ff * kPointerSize; 1810 static const int kMediumListMax = 0x7ff * kPointerSize;
1759 static const int kLargeListMax = 0x3fff * kPointerSize; 1811 static const int kLargeListMax = 0x3fff * kPointerSize;
1760 static const int kTinyAllocationMax = kTiniestListMax; 1812 static const int kTinyAllocationMax = kTiniestListMax;
1761 static const int kSmallAllocationMax = kTinyListMax; 1813 static const int kSmallAllocationMax = kTinyListMax;
1762 static const int kMediumAllocationMax = kSmallListMax; 1814 static const int kMediumAllocationMax = kSmallListMax;
1763 static const int kLargeAllocationMax = kMediumListMax; 1815 static const int kLargeAllocationMax = kMediumListMax;
1764 1816
1765 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); 1817 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
1766 FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
1767 1818
1768 FreeListCategory* GetFreeListCategory(FreeListCategoryType category) { 1819 // Walks all available categories for a given |type| and tries to retrieve
1769 return &category_[category]; 1820 // a node. Returns nullptr if the category is empty.
1770 } 1821 FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
1822
1823 // Tries to retrieve a node from the first category in a given |type|.
1824 // Returns nullptr if the category is empty.
1825 FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
1826 int minimum_size);
1827
1828 // Searches a given |type| for a node of at least |minimum_size|.
1829 FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
1830 int minimum_size);
1771 1831
1772 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) { 1832 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1773 if (size_in_bytes <= kTiniestListMax) { 1833 if (size_in_bytes <= kTiniestListMax) {
1774 return kTiniest; 1834 return kTiniest;
1775 } else if (size_in_bytes <= kTinyListMax) { 1835 } else if (size_in_bytes <= kTinyListMax) {
1776 return kTiny; 1836 return kTiny;
1777 } else if (size_in_bytes <= kSmallListMax) { 1837 } else if (size_in_bytes <= kSmallListMax) {
1778 return kSmall; 1838 return kSmall;
1779 } else if (size_in_bytes <= kMediumListMax) { 1839 } else if (size_in_bytes <= kMediumListMax) {
1780 return kMedium; 1840 return kMedium;
1781 } else if (size_in_bytes <= kLargeListMax) { 1841 } else if (size_in_bytes <= kLargeListMax) {
1782 return kLarge; 1842 return kLarge;
1783 } 1843 }
1784 return kHuge; 1844 return kHuge;
1785 } 1845 }
1786 1846
1787 // The tiny categories are not used for fast allocation. 1847 // The tiny categories are not used for fast allocation.
1788 FreeListCategoryType SelectFastAllocationFreeListCategoryType( 1848 FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1789 size_t size_in_bytes) { 1849 size_t size_in_bytes) {
1790 if (size_in_bytes <= kSmallAllocationMax) { 1850 if (size_in_bytes <= kSmallAllocationMax) {
1791 return kSmall; 1851 return kSmall;
1792 } else if (size_in_bytes <= kMediumAllocationMax) { 1852 } else if (size_in_bytes <= kMediumAllocationMax) {
1793 return kMedium; 1853 return kMedium;
1794 } else if (size_in_bytes <= kLargeAllocationMax) { 1854 } else if (size_in_bytes <= kLargeAllocationMax) {
1795 return kLarge; 1855 return kLarge;
1796 } 1856 }
1797 return kHuge; 1857 return kHuge;
1798 } 1858 }
1799 1859
1860 FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
1861
1800 PagedSpace* owner_; 1862 PagedSpace* owner_;
1801 base::Mutex mutex_; 1863 AtomicNumber<intptr_t> wasted_bytes_;
1802 intptr_t wasted_bytes_; 1864 FreeListCategory* categories_[kNumberOfCategories];
1803 FreeListCategory category_[kNumberOfCategories]; 1865
1866 friend class FreeListCategory;
1804 1867
1805 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); 1868 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1806 }; 1869 };
1807 1870
1808 1871
1809 class AllocationResult { 1872 class AllocationResult {
1810 public: 1873 public:
1811 // Implicit constructor from Object*. 1874 // Implicit constructor from Object*.
1812 AllocationResult(Object* object) // NOLINT 1875 AllocationResult(Object* object) // NOLINT
1813 : object_(object) { 1876 : object_(object) {
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1895 1958
1896 private: 1959 private:
1897 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info); 1960 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
1898 1961
1899 void Close(); 1962 void Close();
1900 1963
1901 Heap* heap_; 1964 Heap* heap_;
1902 AllocationInfo allocation_info_; 1965 AllocationInfo allocation_info_;
1903 }; 1966 };
1904 1967
1905
1906 class PagedSpace : public Space { 1968 class PagedSpace : public Space {
1907 public: 1969 public:
1908 static const intptr_t kCompactionMemoryWanted = 500 * KB; 1970 static const intptr_t kCompactionMemoryWanted = 500 * KB;
1909 1971
1910 // Creates a space with an id. 1972 // Creates a space with an id.
1911 PagedSpace(Heap* heap, AllocationSpace id, Executability executable); 1973 PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1912 1974
1913 ~PagedSpace() override { TearDown(); } 1975 ~PagedSpace() override { TearDown(); }
1914 1976
1915 // Set up the space using the given address range of virtual memory (from 1977 // Set up the space using the given address range of virtual memory (from
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1952 // The stats are rebuilt during sweeping by adding each page to the 2014 // The stats are rebuilt during sweeping by adding each page to the
1953 // capacity and the size when it is encountered. As free spaces are 2015 // capacity and the size when it is encountered. As free spaces are
1954 // discovered during the sweeping they are subtracted from the size and added 2016 // discovered during the sweeping they are subtracted from the size and added
1955 // to the available and wasted totals. 2017 // to the available and wasted totals.
1956 void ClearStats() { 2018 void ClearStats() {
1957 accounting_stats_.ClearSize(); 2019 accounting_stats_.ClearSize();
1958 free_list_.ResetStats(); 2020 free_list_.ResetStats();
1959 ResetFreeListStatistics(); 2021 ResetFreeListStatistics();
1960 } 2022 }
1961 2023
1962 // Increases the number of available bytes of that space.
1963 void AddToAccountingStats(intptr_t bytes) {
1964 accounting_stats_.DeallocateBytes(bytes);
1965 }
1966
1967 // Available bytes without growing. These are the bytes on the free list. 2024 // Available bytes without growing. These are the bytes on the free list.
1968 // The bytes in the linear allocation area are not included in this total 2025 // The bytes in the linear allocation area are not included in this total
1969 // because updating the stats would slow down allocation. New pages are 2026 // because updating the stats would slow down allocation. New pages are
1970 // immediately added to the free list so they show up here. 2027 // immediately added to the free list so they show up here.
1971 intptr_t Available() override { return free_list_.Available(); } 2028 intptr_t Available() override { return free_list_.Available(); }
1972 2029
1973 // Allocated bytes in this space. Garbage bytes that were not found due to 2030 // Allocated bytes in this space. Garbage bytes that were not found due to
1974 // concurrent sweeping are counted as being allocated! The bytes in the 2031 // concurrent sweeping are counted as being allocated! The bytes in the
1975 // current linear allocation area (between top and limit) are also counted 2032 // current linear allocation area (between top and limit) are also counted
1976 // here. 2033 // here.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
2012 // Allocate the requested number of bytes in the space and consider allocation 2069 // Allocate the requested number of bytes in the space and consider allocation
2013 // alignment if needed. 2070 // alignment if needed.
2014 MUST_USE_RESULT inline AllocationResult AllocateRaw( 2071 MUST_USE_RESULT inline AllocationResult AllocateRaw(
2015 int size_in_bytes, AllocationAlignment alignment); 2072 int size_in_bytes, AllocationAlignment alignment);
2016 2073
2017 // Give a block of memory to the space's free list. It might be added to 2074 // Give a block of memory to the space's free list. It might be added to
2018 // the free list or accounted as waste. 2075 // the free list or accounted as waste.
2019 // If add_to_freelist is false then just accounting stats are updated and 2076 // If add_to_freelist is false then just accounting stats are updated and
2020 // no attempt to add area to free list is made. 2077 // no attempt to add area to free list is made.
2021 int Free(Address start, int size_in_bytes) { 2078 int Free(Address start, int size_in_bytes) {
2022 int wasted = free_list_.Free(start, size_in_bytes); 2079 int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2023 accounting_stats_.DeallocateBytes(size_in_bytes); 2080 accounting_stats_.DeallocateBytes(size_in_bytes);
2024 return size_in_bytes - wasted; 2081 return size_in_bytes - wasted;
2025 } 2082 }
2026 2083
2084 int UnaccountedFree(Address start, int size_in_bytes) {
2085 int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2086 return size_in_bytes - wasted;
2087 }
2088
2027 void ResetFreeList() { free_list_.Reset(); } 2089 void ResetFreeList() { free_list_.Reset(); }
2028 2090
2029 // Set space allocation info. 2091 // Set space allocation info.
2030 void SetTopAndLimit(Address top, Address limit) { 2092 void SetTopAndLimit(Address top, Address limit) {
2031 DCHECK(top == limit || 2093 DCHECK(top == limit ||
2032 Page::FromAddress(top) == Page::FromAddress(limit - 1)); 2094 Page::FromAddress(top) == Page::FromAddress(limit - 1));
2033 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); 2095 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2034 allocation_info_.Reset(top, limit); 2096 allocation_info_.Reset(top, limit);
2035 } 2097 }
2036 2098
2037 // Empty space allocation info, returning unused area to free list. 2099 // Empty space allocation info, returning unused area to free list.
2038 void EmptyAllocationInfo() { 2100 void EmptyAllocationInfo() {
2039 // Mark the old linear allocation area with a free space map so it can be 2101 // Mark the old linear allocation area with a free space map so it can be
2040 // skipped when scanning the heap. 2102 // skipped when scanning the heap.
2041 int old_linear_size = static_cast<int>(limit() - top()); 2103 int old_linear_size = static_cast<int>(limit() - top());
2042 Free(top(), old_linear_size); 2104 Free(top(), old_linear_size);
2043 SetTopAndLimit(NULL, NULL); 2105 SetTopAndLimit(NULL, NULL);
2044 } 2106 }
2045 2107
2046 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } 2108 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2047 2109
2048 void IncreaseCapacity(int size); 2110 void IncreaseCapacity(int size);
2049 2111
2050 // Releases an unused page and shrinks the space. 2112 // Releases an unused page and shrinks the space.
2051 void ReleasePage(Page* page, bool evict_free_list_items); 2113 void ReleasePage(Page* page);
2052 2114
2053 // The dummy page that anchors the linked list of pages. 2115 // The dummy page that anchors the linked list of pages.
2054 Page* anchor() { return &anchor_; } 2116 Page* anchor() { return &anchor_; }
2055 2117
2056 #ifdef VERIFY_HEAP 2118 #ifdef VERIFY_HEAP
2057 // Verify integrity of this space. 2119 // Verify integrity of this space.
2058 virtual void Verify(ObjectVisitor* visitor); 2120 virtual void Verify(ObjectVisitor* visitor);
2059 2121
2060 // Overridden by subclasses to verify space-specific object 2122 // Overridden by subclasses to verify space-specific object
2061 // properties (e.g., only maps or free-list nodes are in map space). 2123 // properties (e.g., only maps or free-list nodes are in map space).
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
2097 virtual bool is_local() { return false; } 2159 virtual bool is_local() { return false; }
2098 2160
2099 // Merges {other} into the current space. Note that this modifies {other}, 2161 // Merges {other} into the current space. Note that this modifies {other},
2100 // e.g., removes its bump pointer area and resets statistics. 2162 // e.g., removes its bump pointer area and resets statistics.
2101 void MergeCompactionSpace(CompactionSpace* other); 2163 void MergeCompactionSpace(CompactionSpace* other);
2102 2164
2103 // Refills the free list from the corresponding free list filled by the 2165 // Refills the free list from the corresponding free list filled by the
2104 // sweeper. 2166 // sweeper.
2105 virtual void RefillFreeList(); 2167 virtual void RefillFreeList();
2106 2168
2169 FreeList* free_list() { return &free_list_; }
2170
2171 base::Mutex* mutex() { return &space_mutex_; }
2172
2173 inline void UnlinkFreeListCategories(Page* page);
2174 inline intptr_t RelinkFreeListCategories(Page* page);
2175
2107 protected: 2176 protected:
2108 void AddMemory(Address start, intptr_t size);
2109
2110 void MoveOverFreeMemory(PagedSpace* other);
2111
2112 // PagedSpaces that should be included in snapshots have different, i.e., 2177 // PagedSpaces that should be included in snapshots have different, i.e.,
2113 // smaller, initial pages. 2178 // smaller, initial pages.
2114 virtual bool snapshotable() { return true; } 2179 virtual bool snapshotable() { return true; }
2115 2180
2116 FreeList* free_list() { return &free_list_; }
2117
2118 bool HasPages() { return anchor_.next_page() != &anchor_; } 2181 bool HasPages() { return anchor_.next_page() != &anchor_; }
2119 2182
2120 // Cleans up the space, frees all pages in this space except those belonging 2183 // Cleans up the space, frees all pages in this space except those belonging
2121 // to the initial chunk, uncommits addresses in the initial chunk. 2184 // to the initial chunk, uncommits addresses in the initial chunk.
2122 void TearDown(); 2185 void TearDown();
2123 2186
2124 // Expands the space by allocating a fixed number of pages. Returns false if 2187 // Expands the space by allocating a fixed number of pages. Returns false if
2125 // it cannot allocate requested number of pages from OS, or if the hard heap 2188 // it cannot allocate requested number of pages from OS, or if the hard heap
2126 // size limit has been hit. 2189 // size limit has been hit.
2127 bool Expand(); 2190 bool Expand();
(...skipping 676 matching lines...) Expand 10 before | Expand all | Expand 10 after
2804 // ----------------------------------------------------------------------------- 2867 // -----------------------------------------------------------------------------
2805 // Compaction space that is used temporarily during compaction. 2868 // Compaction space that is used temporarily during compaction.
2806 2869
2807 class CompactionSpace : public PagedSpace { 2870 class CompactionSpace : public PagedSpace {
2808 public: 2871 public:
2809 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) 2872 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2810 : PagedSpace(heap, id, executable) {} 2873 : PagedSpace(heap, id, executable) {}
2811 2874
2812 bool is_local() override { return true; } 2875 bool is_local() override { return true; }
2813 2876
2814 void RefillFreeList() override;
2815
2816 protected: 2877 protected:
2817 // The space is temporary and not included in any snapshots. 2878 // The space is temporary and not included in any snapshots.
2818 bool snapshotable() override { return false; } 2879 bool snapshotable() override { return false; }
2819 2880
2820 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation( 2881 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2821 int size_in_bytes) override; 2882 int size_in_bytes) override;
2822 }; 2883 };
2823 2884
2824 2885
2825 // A collection of |CompactionSpace|s used by a single compaction task. 2886 // A collection of |CompactionSpace|s used by a single compaction task.
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
3039 count = 0; 3100 count = 0;
3040 } 3101 }
3041 // Must be small, since an iteration is used for lookup. 3102 // Must be small, since an iteration is used for lookup.
3042 static const int kMaxComments = 64; 3103 static const int kMaxComments = 64;
3043 }; 3104 };
3044 #endif 3105 #endif
3045 } // namespace internal 3106 } // namespace internal
3046 } // namespace v8 3107 } // namespace v8
3047 3108
3048 #endif // V8_HEAP_SPACES_H_ 3109 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698