Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(60)

Side by Side Diff: src/heap/spaces.h

Issue 1772733002: [heap] Move to two-level free-list (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase again Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/atomic-utils.h" 9 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h" 10 #include "src/base/atomicops.h"
11 #include "src/base/bits.h" 11 #include "src/base/bits.h"
12 #include "src/base/platform/mutex.h" 12 #include "src/base/platform/mutex.h"
13 #include "src/flags.h" 13 #include "src/flags.h"
14 #include "src/hashmap.h" 14 #include "src/hashmap.h"
15 #include "src/list.h" 15 #include "src/list.h"
16 #include "src/objects.h" 16 #include "src/objects.h"
17 #include "src/utils.h" 17 #include "src/utils.h"
18 18
19 namespace v8 { 19 namespace v8 {
20 namespace internal { 20 namespace internal {
21 21
22 class AllocationInfo; 22 class AllocationInfo;
23 class AllocationObserver; 23 class AllocationObserver;
24 class CompactionSpace; 24 class CompactionSpace;
25 class CompactionSpaceCollection; 25 class CompactionSpaceCollection;
26 class FreeList; 26 class FreeList;
27 class Isolate; 27 class Isolate;
28 class MemoryAllocator; 28 class MemoryAllocator;
29 class MemoryChunk; 29 class MemoryChunk;
30 class Page;
30 class PagedSpace; 31 class PagedSpace;
31 class SemiSpace; 32 class SemiSpace;
32 class SkipList; 33 class SkipList;
33 class SlotsBuffer; 34 class SlotsBuffer;
34 class SlotSet; 35 class SlotSet;
35 class TypedSlotSet; 36 class TypedSlotSet;
36 class Space; 37 class Space;
37 38
38 // ----------------------------------------------------------------------------- 39 // -----------------------------------------------------------------------------
39 // Heap structures: 40 // Heap structures:
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after
282 // Clear all cells till the cell containing the last index. 283 // Clear all cells till the cell containing the last index.
283 for (uint32_t i = start_cell_index; i < end_cell_index; i++) { 284 for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
284 cells()[i] = 0; 285 cells()[i] = 0;
285 } 286 }
286 // Clear all bits in the last cell till the last bit before index. 287 // Clear all bits in the last cell till the last bit before index.
287 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1); 288 uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
288 cells()[end_cell_index] &= clear_mask; 289 cells()[end_cell_index] &= clear_mask;
289 } 290 }
290 }; 291 };
291 292
293 enum FreeListCategoryType {
294 kTiniest,
295 kTiny,
296 kSmall,
297 kMedium,
298 kLarge,
299 kHuge,
300
301 kFirstCategory = kTiniest,
302 kLastCategory = kHuge,
303 kNumberOfCategories = kLastCategory + 1,
304 kInvalidCategory
305 };
306
307 // A free list category maintains a linked list of free memory blocks.
308 class FreeListCategory {
309 public:
310 static const int kSize = kIntSize + // FreeListCategoryType type_
311 kIntSize + // int available_
312 kPointerSize + // FreeSpace* top_
313 kPointerSize + // FreeList* owner_
314 kPointerSize + // FreeListCategory* prev_
315 kPointerSize; // FreeListCategory* next_
316
317 FreeListCategory()
318 : type_(kInvalidCategory),
319 available_(0),
320 top_(nullptr),
321 prev_(nullptr),
322 next_(nullptr) {}
323
324 void Initialize(FreeListCategoryType type) {
325 type_ = type;
326 available_ = 0;
327 top_ = nullptr;
328 prev_ = nullptr;
329 next_ = nullptr;
330 }
331
332 void Invalidate();
333
334 void Reset();
335
336 void ResetStats() { Reset(); }
337
338 void RepairFreeList(Heap* heap);
339
340 // Relinks the category into the currently owning free list. Requires that the
341 // category is currently unlinked.
342 void Relink();
343
344 bool Free(FreeSpace* node, int size_in_bytes, bool keep_local = false);
345
346 // Picks a node from the list and stores its size in |node_size|. Returns
347 // nullptr if the category is empty.
348 FreeSpace* PickNodeFromList(int* node_size);
349
350 // Performs a single try to pick a node of at least |minimum_size| from the
351 // category. Stores the actual size in |node_size|. Returns nullptr if no
352 // node is found.
353 FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
354
355 // Picks a node of at least |minimum_size| from the category. Stores the
356 // actual size in |node_size|. Returns nullptr if no node is found.
357 FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
358
359 inline FreeList* owner();
360 inline bool is_linked();
361 bool is_empty() { return top() == nullptr; }
362 int available() const { return available_; }
363
364 #ifdef DEBUG
365 intptr_t SumFreeList();
366 int FreeListLength();
367 bool IsVeryLong();
368 #endif
369
370 private:
371 // For debug builds we accurately compute free lists lengths up until
372 // {kVeryLongFreeList} by manually walking the list.
373 static const int kVeryLongFreeList = 500;
374
375 inline Page* page();
376
377 FreeSpace* top() { return top_; }
378 void set_top(FreeSpace* top) { top_ = top; }
379 FreeListCategory* prev() { return prev_; }
380 void set_prev(FreeListCategory* prev) { prev_ = prev; }
381 FreeListCategory* next() { return next_; }
382 void set_next(FreeListCategory* next) { next_ = next; }
383
384 // |type_|: The type of this free list category.
385 FreeListCategoryType type_;
386
387 // |available_|: Total available bytes in all blocks of this free list
388 // category.
389 int available_;
390
391 // |top_|: Points to the top FreeSpace* in the free list category.
392 FreeSpace* top_;
393
394 FreeListCategory* prev_;
395 FreeListCategory* next_;
396
397 friend class FreeList;
398 friend class PagedSpace;
399 };
292 400
293 // MemoryChunk represents a memory region owned by a specific space. 401 // MemoryChunk represents a memory region owned by a specific space.
294 // It is divided into the header and the body. Chunk start is always 402 // It is divided into the header and the body. Chunk start is always
295 // 1MB aligned. Start of the body is aligned so it can accommodate 403 // 1MB aligned. Start of the body is aligned so it can accommodate
296 // any heap object. 404 // any heap object.
297 class MemoryChunk { 405 class MemoryChunk {
298 public: 406 public:
299 enum MemoryChunkFlags { 407 enum MemoryChunkFlags {
300 IS_EXECUTABLE, 408 IS_EXECUTABLE,
301 POINTERS_TO_HERE_ARE_INTERESTING, 409 POINTERS_TO_HERE_ARE_INTERESTING,
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
404 512
405 static const size_t kMinHeaderSize = 513 static const size_t kMinHeaderSize =
406 kWriteBarrierCounterOffset + 514 kWriteBarrierCounterOffset +
407 kIntptrSize // intptr_t write_barrier_counter_ 515 kIntptrSize // intptr_t write_barrier_counter_
408 + kPointerSize // AtomicValue high_water_mark_ 516 + kPointerSize // AtomicValue high_water_mark_
409 + kPointerSize // base::Mutex* mutex_ 517 + kPointerSize // base::Mutex* mutex_
410 + kPointerSize // base::AtomicWord parallel_sweeping_ 518 + kPointerSize // base::AtomicWord parallel_sweeping_
411 + kPointerSize // AtomicValue parallel_compaction_ 519 + kPointerSize // AtomicValue parallel_compaction_
412 + 2 * kPointerSize // AtomicNumber free-list statistics 520 + 2 * kPointerSize // AtomicNumber free-list statistics
413 + kPointerSize // AtomicValue next_chunk_ 521 + kPointerSize // AtomicValue next_chunk_
414 + kPointerSize; // AtomicValue prev_chunk_ 522 + kPointerSize // AtomicValue prev_chunk_
523 // FreeListCategory categories_[kNumberOfCategories]
524 + FreeListCategory::kSize * kNumberOfCategories;
415 525
416 // We add some more space to the computed header size to amount for missing 526 // We add some more space to the computed header size to amount for missing
417 // alignment requirements in our computation. 527 // alignment requirements in our computation.
418 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 528 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
419 static const size_t kHeaderSize = kMinHeaderSize; 529 static const size_t kHeaderSize = kMinHeaderSize;
420 530
421 static const int kBodyOffset = 531 static const int kBodyOffset =
422 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); 532 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
423 533
424 // The start offset of the object area in a page. Aligned to both maps and 534 // The start offset of the object area in a page. Aligned to both maps and
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
586 696
587 bool IsEvacuationCandidate() { 697 bool IsEvacuationCandidate() {
588 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); 698 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
589 return IsFlagSet(EVACUATION_CANDIDATE); 699 return IsFlagSet(EVACUATION_CANDIDATE);
590 } 700 }
591 701
592 bool CanAllocate() { 702 bool CanAllocate() {
593 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); 703 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
594 } 704 }
595 705
596 void MarkEvacuationCandidate() {
597 DCHECK(!IsFlagSet(NEVER_EVACUATE));
598 DCHECK_NULL(old_to_old_slots_);
599 DCHECK_NULL(typed_old_to_old_slots_);
600 SetFlag(EVACUATION_CANDIDATE);
601 }
602
603 void ClearEvacuationCandidate() {
604 DCHECK_NULL(old_to_old_slots_);
605 DCHECK_NULL(typed_old_to_old_slots_);
606 ClearFlag(EVACUATION_CANDIDATE);
607 }
608
609 bool ShouldSkipEvacuationSlotRecording() { 706 bool ShouldSkipEvacuationSlotRecording() {
610 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; 707 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
611 } 708 }
612 709
613 Executability executable() { 710 Executability executable() {
614 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 711 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
615 } 712 }
616 713
617 bool InNewSpace() { 714 bool InNewSpace() {
618 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; 715 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
709 806
710 // PagedSpace free-list statistics. 807 // PagedSpace free-list statistics.
711 AtomicNumber<intptr_t> available_in_free_list_; 808 AtomicNumber<intptr_t> available_in_free_list_;
712 AtomicNumber<intptr_t> wasted_memory_; 809 AtomicNumber<intptr_t> wasted_memory_;
713 810
714 // next_chunk_ holds a pointer of type MemoryChunk 811 // next_chunk_ holds a pointer of type MemoryChunk
715 AtomicValue<MemoryChunk*> next_chunk_; 812 AtomicValue<MemoryChunk*> next_chunk_;
716 // prev_chunk_ holds a pointer of type MemoryChunk 813 // prev_chunk_ holds a pointer of type MemoryChunk
717 AtomicValue<MemoryChunk*> prev_chunk_; 814 AtomicValue<MemoryChunk*> prev_chunk_;
718 815
816 FreeListCategory categories_[kNumberOfCategories];
817
719 private: 818 private:
720 void InitializeReservedMemory() { reservation_.Reset(); } 819 void InitializeReservedMemory() { reservation_.Reset(); }
721 820
722 friend class MemoryAllocator; 821 friend class MemoryAllocator;
723 friend class MemoryChunkValidator; 822 friend class MemoryChunkValidator;
724 }; 823 };
725 824
726 enum FreeListCategoryType {
727 kTiniest,
728 kTiny,
729 kSmall,
730 kMedium,
731 kLarge,
732 kHuge,
733
734 kFirstCategory = kTiniest,
735 kLastCategory = kHuge,
736 kNumberOfCategories = kLastCategory + 1
737 };
738
739 // ----------------------------------------------------------------------------- 825 // -----------------------------------------------------------------------------
740 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 826 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
741 // 827 //
742 // The only way to get a page pointer is by calling factory methods: 828 // The only way to get a page pointer is by calling factory methods:
743 // Page* p = Page::FromAddress(addr); or 829 // Page* p = Page::FromAddress(addr); or
744 // Page* p = Page::FromAllocationTop(top); 830 // Page* p = Page::FromAllocationTop(top);
745 class Page : public MemoryChunk { 831 class Page : public MemoryChunk {
746 public: 832 public:
747 // Returns the page containing a given address. The address ranges 833 // Returns the page containing a given address. The address ranges
748 // from [page_addr .. page_addr + kPageSize[ 834 // from [page_addr .. page_addr + kPageSize[
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
832 return concurrent_sweeping_state().Value() == kSweepingDone; 918 return concurrent_sweeping_state().Value() == kSweepingDone;
833 } 919 }
834 920
835 void ResetFreeListStatistics(); 921 void ResetFreeListStatistics();
836 922
837 int LiveBytesFromFreeList() { 923 int LiveBytesFromFreeList() {
838 return static_cast<int>(area_size() - wasted_memory() - 924 return static_cast<int>(area_size() - wasted_memory() -
839 available_in_free_list()); 925 available_in_free_list());
840 } 926 }
841 927
928 template <typename Callback>
929 inline void ForAllFreeListCategories(Callback callback) {
930 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
931 callback(&categories_[i]);
932 }
933 }
934
935 FreeListCategory* free_list_category(FreeListCategoryType type) {
936 return &categories_[type];
937 }
938
842 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ 939 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
843 type name() { return name##_.Value(); } \ 940 type name() { return name##_.Value(); } \
844 void set_##name(type name) { name##_.SetValue(name); } \ 941 void set_##name(type name) { name##_.SetValue(name); } \
845 void add_##name(type name) { name##_.Increment(name); } 942 void add_##name(type name) { name##_.Increment(name); }
846 943
847 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory) 944 FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
848 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list) 945 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
849 946
850 #undef FRAGMENTATION_STATS_ACCESSORS 947 #undef FRAGMENTATION_STATS_ACCESSORS
851 948
852 #ifdef DEBUG 949 #ifdef DEBUG
853 void Print(); 950 void Print();
854 #endif // DEBUG 951 #endif // DEBUG
855 952
953 inline void MarkNeverAllocateForTesting();
954 inline void MarkEvacuationCandidate();
955 inline void ClearEvacuationCandidate();
956
957 private:
958 inline void InitializeFreeListCategories();
959
856 friend class MemoryAllocator; 960 friend class MemoryAllocator;
857 }; 961 };
858 962
859 963
860 class LargePage : public MemoryChunk { 964 class LargePage : public MemoryChunk {
861 public: 965 public:
862 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } 966 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
863 967
864 inline LargePage* next_page() { 968 inline LargePage* next_page() {
865 return static_cast<LargePage*>(next_chunk()); 969 return static_cast<LargePage*>(next_chunk());
(...skipping 619 matching lines...) Expand 10 before | Expand all | Expand 10 after
1485 1589
1486 // Zero out all the allocation statistics (i.e., no capacity). 1590 // Zero out all the allocation statistics (i.e., no capacity).
1487 void Clear() { 1591 void Clear() {
1488 capacity_ = 0; 1592 capacity_ = 0;
1489 max_capacity_ = 0; 1593 max_capacity_ = 0;
1490 size_ = 0; 1594 size_ = 0;
1491 } 1595 }
1492 1596
1493 void ClearSize() { size_ = capacity_; } 1597 void ClearSize() { size_ = capacity_; }
1494 1598
1495 // Reset the allocation statistics (i.e., available = capacity with no wasted
1496 // or allocated bytes).
1497 void Reset() {
1498 size_ = 0;
1499 }
1500
1501 // Accessors for the allocation statistics. 1599 // Accessors for the allocation statistics.
1502 intptr_t Capacity() { return capacity_; } 1600 intptr_t Capacity() { return capacity_; }
1503 intptr_t MaxCapacity() { return max_capacity_; } 1601 intptr_t MaxCapacity() { return max_capacity_; }
1504 intptr_t Size() { 1602 intptr_t Size() {
1505 CHECK_GE(size_, 0); 1603 CHECK_GE(size_, 0);
1506 return size_; 1604 return size_;
1507 } 1605 }
1508 1606
1509 // Grow the space by adding available bytes. They are initially marked as 1607 // Grow the space by adding available bytes. They are initially marked as
1510 // being in use (part of the size), but will normally be immediately freed, 1608 // being in use (part of the size), but will normally be immediately freed,
1511 // putting them on the free list and removing them from size_. 1609 // putting them on the free list and removing them from size_.
1512 void ExpandSpace(int size_in_bytes) { 1610 void ExpandSpace(int size_in_bytes) {
1513 capacity_ += size_in_bytes; 1611 capacity_ += size_in_bytes;
1514 size_ += size_in_bytes; 1612 size_ += size_in_bytes;
1515 if (capacity_ > max_capacity_) { 1613 if (capacity_ > max_capacity_) {
1516 max_capacity_ = capacity_; 1614 max_capacity_ = capacity_;
1517 } 1615 }
1518 CHECK(size_ >= 0); 1616 CHECK(size_ >= 0);
1519 } 1617 }
1520 1618
1521 // Shrink the space by removing available bytes. Since shrinking is done 1619 // Shrink the space by removing available bytes. Since shrinking is done
1522 // during sweeping, bytes have been marked as being in use (part of the size) 1620 // during sweeping, bytes have been marked as being in use (part of the size)
1523 // and are hereby freed. 1621 // and are hereby freed.
1524 void ShrinkSpace(int size_in_bytes) { 1622 void ShrinkSpace(int size_in_bytes) {
1525 capacity_ -= size_in_bytes; 1623 capacity_ -= size_in_bytes;
1526 size_ -= size_in_bytes; 1624 size_ -= size_in_bytes;
1527 CHECK(size_ >= 0); 1625 CHECK_GE(size_, 0);
1528 } 1626 }
1529 1627
1530 // Allocate from available bytes (available -> size). 1628 // Allocate from available bytes (available -> size).
1531 void AllocateBytes(intptr_t size_in_bytes) { 1629 void AllocateBytes(intptr_t size_in_bytes) {
1532 size_ += size_in_bytes; 1630 size_ += size_in_bytes;
1533 CHECK(size_ >= 0); 1631 CHECK_GE(size_, 0);
1534 } 1632 }
1535 1633
1536 // Free allocated bytes, making them available (size -> available). 1634 // Free allocated bytes, making them available (size -> available).
1537 void DeallocateBytes(intptr_t size_in_bytes) { 1635 void DeallocateBytes(intptr_t size_in_bytes) {
1538 size_ -= size_in_bytes; 1636 size_ -= size_in_bytes;
1539 CHECK_GE(size_, 0); 1637 CHECK_GE(size_, 0);
1540 } 1638 }
1541 1639
1542 // Merge {other} into {this}. 1640 // Merge {other} into {this}.
1543 void Merge(const AllocationStats& other) { 1641 void Merge(const AllocationStats& other) {
(...skipping 18 matching lines...) Expand all
1562 // bookkeeping structures) currently in the space. 1660 // bookkeeping structures) currently in the space.
1563 intptr_t capacity_; 1661 intptr_t capacity_;
1564 1662
1565 // |max_capacity_|: The maximum capacity ever observed. 1663 // |max_capacity_|: The maximum capacity ever observed.
1566 intptr_t max_capacity_; 1664 intptr_t max_capacity_;
1567 1665
1568 // |size_|: The number of allocated bytes. 1666 // |size_|: The number of allocated bytes.
1569 intptr_t size_; 1667 intptr_t size_;
1570 }; 1668 };
1571 1669
1572
1573 // A free list category maintains a linked list of free memory blocks.
1574 class FreeListCategory {
1575 public:
1576 FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
1577
1578 void Initialize(FreeList* owner, FreeListCategoryType type) {
1579 owner_ = owner;
1580 type_ = type;
1581 }
1582
1583 // Concatenates {category} into {this}.
1584 //
1585 // Note: Thread-safe.
1586 intptr_t Concatenate(FreeListCategory* category);
1587
1588 void Reset();
1589
1590 void Free(FreeSpace* node, int size_in_bytes);
1591
1592 // Pick a node from the list.
1593 FreeSpace* PickNodeFromList(int* node_size);
1594
1595 // Pick a node from the list and compare it against {size_in_bytes}. If the
1596 // node's size is greater or equal return the node and null otherwise.
1597 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
1598
1599 // Search for a node of size {size_in_bytes}.
1600 FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
1601
1602 intptr_t EvictFreeListItemsInList(Page* p);
1603 bool ContainsPageFreeListItemsInList(Page* p);
1604
1605 void RepairFreeList(Heap* heap);
1606
1607 bool IsEmpty() { return top() == nullptr; }
1608
1609 FreeList* owner() { return owner_; }
1610 int available() const { return available_; }
1611
1612 #ifdef DEBUG
1613 intptr_t SumFreeList();
1614 int FreeListLength();
1615 bool IsVeryLong();
1616 #endif
1617
1618 private:
1619 // For debug builds we accurately compute free lists lengths up until
1620 // {kVeryLongFreeList} by manually walking the list.
1621 static const int kVeryLongFreeList = 500;
1622
1623 FreeSpace* top() { return top_.Value(); }
1624 void set_top(FreeSpace* top) { top_.SetValue(top); }
1625
1626 FreeSpace* end() const { return end_; }
1627 void set_end(FreeSpace* end) { end_ = end; }
1628
1629 // |type_|: The type of this free list category.
1630 FreeListCategoryType type_;
1631
1632 // |top_|: Points to the top FreeSpace* in the free list category.
1633 AtomicValue<FreeSpace*> top_;
1634
1635 // |end_|: Points to the end FreeSpace* in the free list category.
1636 FreeSpace* end_;
1637
1638 // |available_|: Total available bytes in all blocks of this free list
1639 // category.
1640 int available_;
1641
1642 // |owner_|: The owning free list of this category.
1643 FreeList* owner_;
1644 };
1645
1646 // A free list maintaining free blocks of memory. The free list is organized in 1670 // A free list maintaining free blocks of memory. The free list is organized in
1647 // a way to encourage objects allocated around the same time to be near each 1671 // a way to encourage objects allocated around the same time to be near each
1648 // other. The normal way to allocate is intended to be by bumping a 'top' 1672 // other. The normal way to allocate is intended to be by bumping a 'top'
1649 // pointer until it hits a 'limit' pointer. When the limit is hit we need to 1673 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1650 // find a new space to allocate from. This is done with the free list, which is 1674 // find a new space to allocate from. This is done with the free list, which is
1651 // divided up into rough categories to cut down on waste. Having finer 1675 // divided up into rough categories to cut down on waste. Having finer
1652 // categories would scatter allocation more. 1676 // categories would scatter allocation more.
1653 1677
1654 // The free list is organized in categories as follows: 1678 // The free list is organized in categories as follows:
1655 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for 1679 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
(...skipping 24 matching lines...) Expand all
1680 } else if (maximum_freed <= kMediumListMax) { 1704 } else if (maximum_freed <= kMediumListMax) {
1681 return kMediumAllocationMax; 1705 return kMediumAllocationMax;
1682 } else if (maximum_freed <= kLargeListMax) { 1706 } else if (maximum_freed <= kLargeListMax) {
1683 return kLargeAllocationMax; 1707 return kLargeAllocationMax;
1684 } 1708 }
1685 return maximum_freed; 1709 return maximum_freed;
1686 } 1710 }
1687 1711
1688 explicit FreeList(PagedSpace* owner); 1712 explicit FreeList(PagedSpace* owner);
1689 1713
1690 // The method concatenates {other} into {this} and returns the added bytes,
1691 // including waste.
1692 //
1693 // Note: Thread-safe.
1694 intptr_t Concatenate(FreeList* other);
1695
1696 // Adds a node on the free list. The block of size {size_in_bytes} starting 1714 // Adds a node on the free list. The block of size {size_in_bytes} starting
1697 // at {start} is placed on the free list. The return value is the number of 1715 // at {start} is placed on the free list. The return value is the number of
1698 // bytes that were not added to the free list, because they freed memory block 1716 // bytes that were not added to the free list, because they freed memory block
1699 // was too small. Bookkeeping information will be written to the block, i.e., 1717 // was too small. Bookkeeping information will be written to the block, i.e.,
1700 // its contents will be destroyed. The start address should be word aligned, 1718 // its contents will be destroyed. The start address should be word aligned,
1701 // and the size should be a non-zero multiple of the word size. 1719 // and the size should be a non-zero multiple of the word size.
1702 int Free(Address start, int size_in_bytes); 1720 int Free(Address start, int size_in_bytes, bool keep_local = false);
ulan 2016/03/09 15:03:57 Nit: enum would be better than bool
Michael Lippautz 2016/03/10 10:16:59 Done.
1703 1721
1704 // Allocate a block of size {size_in_bytes} from the free list. The block is 1722 // Allocate a block of size {size_in_bytes} from the free list. The block is
1705 // unitialized. A failure is returned if no block is available. The size 1723 // unitialized. A failure is returned if no block is available. The size
1706 // should be a non-zero multiple of the word size. 1724 // should be a non-zero multiple of the word size.
1707 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); 1725 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1708 1726
1709 // Clear the free list. 1727 // Clear the free list.
1710 void Reset(); 1728 void Reset();
1711 1729
1712 void ResetStats() { wasted_bytes_ = 0; } 1730 void ResetStats() {
1731 wasted_bytes_.SetValue(0);
1732 ForAllFreeListCategories(
1733 [](FreeListCategory* category) { category->ResetStats(); });
1734 }
1713 1735
1714 // Return the number of bytes available on the free list. 1736 // Return the number of bytes available on the free list.
1715 intptr_t Available() { 1737 intptr_t Available() {
1716 intptr_t available = 0; 1738 intptr_t available = 0;
1717 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { 1739 ForAllFreeListCategories([&available](FreeListCategory* category) {
1718 available += category_[i].available(); 1740 available += category->available();
1719 } 1741 });
1720 return available; 1742 return available;
1721 } 1743 }
1722 1744
1723 // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
1724 // size in the free list category exactly matching the size. If no suitable
1725 // node could be found, the method falls back to retrieving a {FreeSpace}
1726 // from the large or huge free list category.
1727 //
1728 // Can be used concurrently.
1729 MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
1730
1731 bool IsEmpty() { 1745 bool IsEmpty() {
1732 for (int i = kFirstCategory; i < kNumberOfCategories; i++) { 1746 bool empty = true;
1733 if (!category_[i].IsEmpty()) return false; 1747 ForAllFreeListCategories([&empty](FreeListCategory* category) {
1734 } 1748 if (!category->is_empty()) empty = false;
1735 return true; 1749 });
1750 return empty;
1736 } 1751 }
1737 1752
1738 // Used after booting the VM. 1753 // Used after booting the VM.
1739 void RepairLists(Heap* heap); 1754 void RepairLists(Heap* heap);
1740 1755
1741 intptr_t EvictFreeListItems(Page* p); 1756 intptr_t EvictFreeListItems(Page* page);
1742 bool ContainsPageFreeListItems(Page* p); 1757 bool ContainsPageFreeListItems(Page* page);
1743 1758
1744 PagedSpace* owner() { return owner_; } 1759 PagedSpace* owner() { return owner_; }
1745 intptr_t wasted_bytes() { return wasted_bytes_; } 1760 intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
1746 base::Mutex* mutex() { return &mutex_; } 1761
1762 template <typename Callback>
1763 void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1764 FreeListCategory* current = categories_[type];
1765 while (current != nullptr) {
1766 FreeListCategory* next = current->next();
1767 callback(current);
1768 current = next;
1769 }
1770 }
1771
1772 template <typename Callback>
1773 void ForAllFreeListCategories(Callback callback) {
1774 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1775 ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1776 }
1777 }
1778
1779 bool AddCategory(FreeListCategory* category);
1780 void RemoveCategory(FreeListCategory* category);
1781 void PrintCategories(FreeListCategoryType type);
1747 1782
1748 #ifdef DEBUG 1783 #ifdef DEBUG
1749 void Zap();
1750 intptr_t SumFreeLists(); 1784 intptr_t SumFreeLists();
1751 bool IsVeryLong(); 1785 bool IsVeryLong();
1752 #endif 1786 #endif
1753 1787
1754 private: 1788 private:
1789 class FreeListCategoryIterator {
1790 public:
1791 FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1792 : current_(free_list->categories_[type]) {}
1793
1794 bool HasNext() { return current_ != nullptr; }
1795
1796 FreeListCategory* Next() {
1797 DCHECK(HasNext());
1798 FreeListCategory* tmp = current_;
1799 current_ = current_->next();
1800 return tmp;
1801 }
1802
1803 private:
1804 FreeListCategory* current_;
1805 };
1806
1755 // The size range of blocks, in bytes. 1807 // The size range of blocks, in bytes.
1756 static const int kMinBlockSize = 3 * kPointerSize; 1808 static const int kMinBlockSize = 3 * kPointerSize;
1757 static const int kMaxBlockSize = Page::kAllocatableMemory; 1809 static const int kMaxBlockSize = Page::kAllocatableMemory;
1758 1810
1759 static const int kTiniestListMax = 0xa * kPointerSize; 1811 static const int kTiniestListMax = 0xa * kPointerSize;
1760 static const int kTinyListMax = 0x1f * kPointerSize; 1812 static const int kTinyListMax = 0x1f * kPointerSize;
1761 static const int kSmallListMax = 0xff * kPointerSize; 1813 static const int kSmallListMax = 0xff * kPointerSize;
1762 static const int kMediumListMax = 0x7ff * kPointerSize; 1814 static const int kMediumListMax = 0x7ff * kPointerSize;
1763 static const int kLargeListMax = 0x3fff * kPointerSize; 1815 static const int kLargeListMax = 0x3fff * kPointerSize;
1764 static const int kTinyAllocationMax = kTiniestListMax; 1816 static const int kTinyAllocationMax = kTiniestListMax;
1765 static const int kSmallAllocationMax = kTinyListMax; 1817 static const int kSmallAllocationMax = kTinyListMax;
1766 static const int kMediumAllocationMax = kSmallListMax; 1818 static const int kMediumAllocationMax = kSmallListMax;
1767 static const int kLargeAllocationMax = kMediumListMax; 1819 static const int kLargeAllocationMax = kMediumListMax;
1768 1820
1769 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size); 1821 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
1770 FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size); 1822 FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
1771 1823 FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size,
1772 FreeListCategory* GetFreeListCategory(FreeListCategoryType category) { 1824 int minimum_size);
1773 return &category_[category]; 1825 FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
1774 } 1826 int minimum_size);
1775 1827
1776 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) { 1828 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1777 if (size_in_bytes <= kTiniestListMax) { 1829 if (size_in_bytes <= kTiniestListMax) {
1778 return kTiniest; 1830 return kTiniest;
1779 } else if (size_in_bytes <= kTinyListMax) { 1831 } else if (size_in_bytes <= kTinyListMax) {
1780 return kTiny; 1832 return kTiny;
1781 } else if (size_in_bytes <= kSmallListMax) { 1833 } else if (size_in_bytes <= kSmallListMax) {
1782 return kSmall; 1834 return kSmall;
1783 } else if (size_in_bytes <= kMediumListMax) { 1835 } else if (size_in_bytes <= kMediumListMax) {
1784 return kMedium; 1836 return kMedium;
1785 } else if (size_in_bytes <= kLargeListMax) { 1837 } else if (size_in_bytes <= kLargeListMax) {
1786 return kLarge; 1838 return kLarge;
1787 } 1839 }
1788 return kHuge; 1840 return kHuge;
1789 } 1841 }
1790 1842
1791 // The tiny categories are not used for fast allocation. 1843 // The tiny categories are not used for fast allocation.
1792 FreeListCategoryType SelectFastAllocationFreeListCategoryType( 1844 FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1793 size_t size_in_bytes) { 1845 size_t size_in_bytes) {
1794 if (size_in_bytes <= kSmallAllocationMax) { 1846 if (size_in_bytes <= kSmallAllocationMax) {
1795 return kSmall; 1847 return kSmall;
1796 } else if (size_in_bytes <= kMediumAllocationMax) { 1848 } else if (size_in_bytes <= kMediumAllocationMax) {
1797 return kMedium; 1849 return kMedium;
1798 } else if (size_in_bytes <= kLargeAllocationMax) { 1850 } else if (size_in_bytes <= kLargeAllocationMax) {
1799 return kLarge; 1851 return kLarge;
1800 } 1852 }
1801 return kHuge; 1853 return kHuge;
1802 } 1854 }
1803 1855
1856 FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
1857
1804 PagedSpace* owner_; 1858 PagedSpace* owner_;
1805 base::Mutex mutex_; 1859 AtomicNumber<intptr_t> wasted_bytes_;
1806 intptr_t wasted_bytes_; 1860 FreeListCategory* categories_[kNumberOfCategories];
1807 FreeListCategory category_[kNumberOfCategories]; 1861
1862 friend class FreeListCategory;
1808 1863
1809 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); 1864 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1810 }; 1865 };
1811 1866
1812 1867
1813 class AllocationResult { 1868 class AllocationResult {
1814 public: 1869 public:
1815 // Implicit constructor from Object*. 1870 // Implicit constructor from Object*.
1816 AllocationResult(Object* object) // NOLINT 1871 AllocationResult(Object* object) // NOLINT
1817 : object_(object) { 1872 : object_(object) {
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
1956 // The stats are rebuilt during sweeping by adding each page to the 2011 // The stats are rebuilt during sweeping by adding each page to the
1957 // capacity and the size when it is encountered. As free spaces are 2012 // capacity and the size when it is encountered. As free spaces are
1958 // discovered during the sweeping they are subtracted from the size and added 2013 // discovered during the sweeping they are subtracted from the size and added
1959 // to the available and wasted totals. 2014 // to the available and wasted totals.
1960 void ClearStats() { 2015 void ClearStats() {
1961 accounting_stats_.ClearSize(); 2016 accounting_stats_.ClearSize();
1962 free_list_.ResetStats(); 2017 free_list_.ResetStats();
1963 ResetFreeListStatistics(); 2018 ResetFreeListStatistics();
1964 } 2019 }
1965 2020
1966 // Increases the number of available bytes of that space.
1967 void AddToAccountingStats(intptr_t bytes) {
1968 accounting_stats_.DeallocateBytes(bytes);
1969 }
1970
1971 // Available bytes without growing. These are the bytes on the free list. 2021 // Available bytes without growing. These are the bytes on the free list.
1972 // The bytes in the linear allocation area are not included in this total 2022 // The bytes in the linear allocation area are not included in this total
1973 // because updating the stats would slow down allocation. New pages are 2023 // because updating the stats would slow down allocation. New pages are
1974 // immediately added to the free list so they show up here. 2024 // immediately added to the free list so they show up here.
1975 intptr_t Available() override { return free_list_.Available(); } 2025 intptr_t Available() override { return free_list_.Available(); }
1976 2026
1977 // Allocated bytes in this space. Garbage bytes that were not found due to 2027 // Allocated bytes in this space. Garbage bytes that were not found due to
1978 // concurrent sweeping are counted as being allocated! The bytes in the 2028 // concurrent sweeping are counted as being allocated! The bytes in the
1979 // current linear allocation area (between top and limit) are also counted 2029 // current linear allocation area (between top and limit) are also counted
1980 // here. 2030 // here.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2015 2065
2016 // Allocate the requested number of bytes in the space and consider allocation 2066 // Allocate the requested number of bytes in the space and consider allocation
2017 // alignment if needed. 2067 // alignment if needed.
2018 MUST_USE_RESULT inline AllocationResult AllocateRaw( 2068 MUST_USE_RESULT inline AllocationResult AllocateRaw(
2019 int size_in_bytes, AllocationAlignment alignment); 2069 int size_in_bytes, AllocationAlignment alignment);
2020 2070
2021 // Give a block of memory to the space's free list. It might be added to 2071 // Give a block of memory to the space's free list. It might be added to
2022 // the free list or accounted as waste. 2072 // the free list or accounted as waste.
2023 // If add_to_freelist is false then just accounting stats are updated and 2073 // If add_to_freelist is false then just accounting stats are updated and
2024 // no attempt to add area to free list is made. 2074 // no attempt to add area to free list is made.
2025 int Free(Address start, int size_in_bytes) { 2075 int Free(Address start, int size_in_bytes, bool keep_local = false) {
2026 int wasted = free_list_.Free(start, size_in_bytes); 2076 int wasted = free_list_.Free(start, size_in_bytes, keep_local);
ulan 2016/03/09 15:03:57 We can use UnaccountedFree here.
Michael Lippautz 2016/03/10 10:16:59 I redid how UnaccountedFree and Free calls are use
2027 accounting_stats_.DeallocateBytes(size_in_bytes); 2077 accounting_stats_.DeallocateBytes(size_in_bytes);
2028 return size_in_bytes - wasted; 2078 return size_in_bytes - wasted;
2029 } 2079 }
2030 2080
2081 int UnaccountedFree(Address start, int size_in_bytes,
2082 bool keep_local = false) {
2083 int wasted = free_list_.Free(start, size_in_bytes, keep_local);
2084 return size_in_bytes - wasted;
2085 }
2086
2031 void ResetFreeList() { free_list_.Reset(); } 2087 void ResetFreeList() { free_list_.Reset(); }
2032 2088
2033 // Set space allocation info. 2089 // Set space allocation info.
2034 void SetTopAndLimit(Address top, Address limit) { 2090 void SetTopAndLimit(Address top, Address limit) {
2035 DCHECK(top == limit || 2091 DCHECK(top == limit ||
2036 Page::FromAddress(top) == Page::FromAddress(limit - 1)); 2092 Page::FromAddress(top) == Page::FromAddress(limit - 1));
2037 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); 2093 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2038 allocation_info_.Reset(top, limit); 2094 allocation_info_.Reset(top, limit);
2039 } 2095 }
2040 2096
2041 // Empty space allocation info, returning unused area to free list. 2097 // Empty space allocation info, returning unused area to free list.
2042 void EmptyAllocationInfo() { 2098 void EmptyAllocationInfo() {
2043 // Mark the old linear allocation area with a free space map so it can be 2099 // Mark the old linear allocation area with a free space map so it can be
2044 // skipped when scanning the heap. 2100 // skipped when scanning the heap.
2045 int old_linear_size = static_cast<int>(limit() - top()); 2101 int old_linear_size = static_cast<int>(limit() - top());
2046 Free(top(), old_linear_size); 2102 Free(top(), old_linear_size);
2047 SetTopAndLimit(NULL, NULL); 2103 SetTopAndLimit(NULL, NULL);
2048 } 2104 }
2049 2105
2050 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); } 2106 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2051 2107
2052 void IncreaseCapacity(int size); 2108 void IncreaseCapacity(int size);
2053 2109
2054 // Releases an unused page and shrinks the space. 2110 // Releases an unused page and shrinks the space.
2055 void ReleasePage(Page* page, bool evict_free_list_items); 2111 void ReleasePage(Page* page);
2056 2112
2057 // The dummy page that anchors the linked list of pages. 2113 // The dummy page that anchors the linked list of pages.
2058 Page* anchor() { return &anchor_; } 2114 Page* anchor() { return &anchor_; }
2059 2115
2060 #ifdef VERIFY_HEAP 2116 #ifdef VERIFY_HEAP
2061 // Verify integrity of this space. 2117 // Verify integrity of this space.
2062 virtual void Verify(ObjectVisitor* visitor); 2118 virtual void Verify(ObjectVisitor* visitor);
2063 2119
2064 // Overridden by subclasses to verify space-specific object 2120 // Overridden by subclasses to verify space-specific object
2065 // properties (e.g., only maps or free-list nodes are in map space). 2121 // properties (e.g., only maps or free-list nodes are in map space).
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
2101 virtual bool is_local() { return false; } 2157 virtual bool is_local() { return false; }
2102 2158
2103 // Merges {other} into the current space. Note that this modifies {other}, 2159 // Merges {other} into the current space. Note that this modifies {other},
2104 // e.g., removes its bump pointer area and resets statistics. 2160 // e.g., removes its bump pointer area and resets statistics.
2105 void MergeCompactionSpace(CompactionSpace* other); 2161 void MergeCompactionSpace(CompactionSpace* other);
2106 2162
2107 // Refills the free list from the corresponding free list filled by the 2163 // Refills the free list from the corresponding free list filled by the
2108 // sweeper. 2164 // sweeper.
2109 virtual void RefillFreeList(); 2165 virtual void RefillFreeList();
2110 2166
2167 FreeList* free_list() { return &free_list_; }
2168
2169 base::Mutex* mutex() { return &space_mutex_; }
2170
2111 protected: 2171 protected:
2112 void AddMemory(Address start, intptr_t size);
2113
2114 void MoveOverFreeMemory(PagedSpace* other);
2115
2116 // PagedSpaces that should be included in snapshots have different, i.e., 2172 // PagedSpaces that should be included in snapshots have different, i.e.,
2117 // smaller, initial pages. 2173 // smaller, initial pages.
2118 virtual bool snapshotable() { return true; } 2174 virtual bool snapshotable() { return true; }
2119 2175
2120 FreeList* free_list() { return &free_list_; }
2121
2122 bool HasPages() { return anchor_.next_page() != &anchor_; } 2176 bool HasPages() { return anchor_.next_page() != &anchor_; }
2123 2177
2124 // Cleans up the space, frees all pages in this space except those belonging 2178 // Cleans up the space, frees all pages in this space except those belonging
2125 // to the initial chunk, uncommits addresses in the initial chunk. 2179 // to the initial chunk, uncommits addresses in the initial chunk.
2126 void TearDown(); 2180 void TearDown();
2127 2181
2128 // Expands the space by allocating a fixed number of pages. Returns false if 2182 // Expands the space by allocating a fixed number of pages. Returns false if
2129 // it cannot allocate requested number of pages from OS, or if the hard heap 2183 // it cannot allocate requested number of pages from OS, or if the hard heap
2130 // size limit has been hit. 2184 // size limit has been hit.
2131 bool Expand(); 2185 bool Expand();
(...skipping 676 matching lines...) Expand 10 before | Expand all | Expand 10 after
2808 // ----------------------------------------------------------------------------- 2862 // -----------------------------------------------------------------------------
2809 // Compaction space that is used temporarily during compaction. 2863 // Compaction space that is used temporarily during compaction.
2810 2864
2811 class CompactionSpace : public PagedSpace { 2865 class CompactionSpace : public PagedSpace {
2812 public: 2866 public:
2813 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable) 2867 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2814 : PagedSpace(heap, id, executable) {} 2868 : PagedSpace(heap, id, executable) {}
2815 2869
2816 bool is_local() override { return true; } 2870 bool is_local() override { return true; }
2817 2871
2818 void RefillFreeList() override;
2819
2820 protected: 2872 protected:
2821 // The space is temporary and not included in any snapshots. 2873 // The space is temporary and not included in any snapshots.
2822 bool snapshotable() override { return false; } 2874 bool snapshotable() override { return false; }
2823 2875
2824 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation( 2876 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2825 int size_in_bytes) override; 2877 int size_in_bytes) override;
2826 }; 2878 };
2827 2879
2828 2880
2829 // A collection of |CompactionSpace|s used by a single compaction task. 2881 // A collection of |CompactionSpace|s used by a single compaction task.
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
3058 count = 0; 3110 count = 0;
3059 } 3111 }
3060 // Must be small, since an iteration is used for lookup. 3112 // Must be small, since an iteration is used for lookup.
3061 static const int kMaxComments = 64; 3113 static const int kMaxComments = 64;
3062 }; 3114 };
3063 #endif 3115 #endif
3064 } // namespace internal 3116 } // namespace internal
3065 } // namespace v8 3117 } // namespace v8
3066 3118
3067 #endif // V8_HEAP_SPACES_H_ 3119 #endif // V8_HEAP_SPACES_H_
OLDNEW
« src/heap/mark-compact.cc ('K') | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698