Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/spaces.h

Issue 185653004: Experimental parser: merge to r19637 (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/simulator.h ('k') | src/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
96 96
97 // Some assertion macros used in the debugging mode. 97 // Some assertion macros used in the debugging mode.
98 98
99 #define ASSERT_PAGE_ALIGNED(address) \ 99 #define ASSERT_PAGE_ALIGNED(address) \
100 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) 100 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
101 101
102 #define ASSERT_OBJECT_ALIGNED(address) \ 102 #define ASSERT_OBJECT_ALIGNED(address) \
103 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) 103 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
104 104
105 #define ASSERT_OBJECT_SIZE(size) \ 105 #define ASSERT_OBJECT_SIZE(size) \
106 ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize)) 106 ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
107 107
108 #define ASSERT_PAGE_OFFSET(offset) \ 108 #define ASSERT_PAGE_OFFSET(offset) \
109 ASSERT((Page::kObjectStartOffset <= offset) \ 109 ASSERT((Page::kObjectStartOffset <= offset) \
110 && (offset <= Page::kPageSize)) 110 && (offset <= Page::kPageSize))
111 111
112 #define ASSERT_MAP_PAGE_INDEX(index) \ 112 #define ASSERT_MAP_PAGE_INDEX(index) \
113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) 113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
114 114
115 115
116 class PagedSpace; 116 class PagedSpace;
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
306 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); 306 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
307 } 307 }
308 308
309 // Only works for addresses in pointer spaces, not data or code spaces. 309 // Only works for addresses in pointer spaces, not data or code spaces.
310 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); 310 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
311 311
312 Address address() { return reinterpret_cast<Address>(this); } 312 Address address() { return reinterpret_cast<Address>(this); }
313 313
314 bool is_valid() { return address() != NULL; } 314 bool is_valid() { return address() != NULL; }
315 315
316 MemoryChunk* next_chunk() const { return next_chunk_; } 316 MemoryChunk* next_chunk() const {
317 MemoryChunk* prev_chunk() const { return prev_chunk_; } 317 return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_));
318 }
318 319
319 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } 320 MemoryChunk* prev_chunk() const {
320 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } 321 return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_));
322 }
323
324 void set_next_chunk(MemoryChunk* next) {
325 Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next));
326 }
327
328 void set_prev_chunk(MemoryChunk* prev) {
329 Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev));
330 }
321 331
322 Space* owner() const { 332 Space* owner() const {
323 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == 333 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
324 kFailureTag) { 334 kFailureTag) {
325 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - 335 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
326 kFailureTag); 336 kFailureTag);
327 } else { 337 } else {
328 return NULL; 338 return NULL;
329 } 339 }
330 } 340 }
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
450 // Set or clear multiple flags at a time. The flags in the mask 460 // Set or clear multiple flags at a time. The flags in the mask
451 // are set to the value in "flags", the rest retain the current value 461 // are set to the value in "flags", the rest retain the current value
452 // in flags_. 462 // in flags_.
453 void SetFlags(intptr_t flags, intptr_t mask) { 463 void SetFlags(intptr_t flags, intptr_t mask) {
454 flags_ = (flags_ & ~mask) | (flags & mask); 464 flags_ = (flags_ & ~mask) | (flags & mask);
455 } 465 }
456 466
457 // Return all current flags. 467 // Return all current flags.
458 intptr_t GetFlags() { return flags_; } 468 intptr_t GetFlags() { return flags_; }
459 469
460 intptr_t parallel_sweeping() const { 470
461 return parallel_sweeping_; 471 // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
472 // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept or was
473 // swept by a sweeper thread.
474 // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
475 // sweeping must not be performed on that page.
476 enum ParallelSweepingState {
477 PARALLEL_SWEEPING_DONE,
478 PARALLEL_SWEEPING_IN_PROGRESS,
479 PARALLEL_SWEEPING_PENDING
480 };
481
482 ParallelSweepingState parallel_sweeping() {
483 return static_cast<ParallelSweepingState>(
484 NoBarrier_Load(&parallel_sweeping_));
462 } 485 }
463 486
464 void set_parallel_sweeping(intptr_t state) { 487 void set_parallel_sweeping(ParallelSweepingState state) {
465 parallel_sweeping_ = state; 488 NoBarrier_Store(&parallel_sweeping_, state);
466 } 489 }
467 490
468 bool TryParallelSweeping() { 491 bool TryParallelSweeping() {
469 return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1; 492 return NoBarrier_CompareAndSwap(&parallel_sweeping_,
493 PARALLEL_SWEEPING_PENDING,
494 PARALLEL_SWEEPING_IN_PROGRESS) ==
495 PARALLEL_SWEEPING_PENDING;
470 } 496 }
471 497
472 // Manage live byte count (count of bytes known to be live, 498 // Manage live byte count (count of bytes known to be live,
473 // because they are marked black). 499 // because they are marked black).
474 void ResetLiveBytes() { 500 void ResetLiveBytes() {
475 if (FLAG_gc_verbose) { 501 if (FLAG_gc_verbose) {
476 PrintF("ResetLiveBytes:%p:%x->0\n", 502 PrintF("ResetLiveBytes:%p:%x->0\n",
477 static_cast<void*>(this), live_byte_count_); 503 static_cast<void*>(this), live_byte_count_);
478 } 504 }
479 live_byte_count_ = 0; 505 live_byte_count_ = 0;
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
529 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); 555 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
530 } 556 }
531 557
532 static void IncrementLiveBytesFromMutator(Address address, int by); 558 static void IncrementLiveBytesFromMutator(Address address, int by);
533 559
534 static const intptr_t kAlignment = 560 static const intptr_t kAlignment =
535 (static_cast<uintptr_t>(1) << kPageSizeBits); 561 (static_cast<uintptr_t>(1) << kPageSizeBits);
536 562
537 static const intptr_t kAlignmentMask = kAlignment - 1; 563 static const intptr_t kAlignmentMask = kAlignment - 1;
538 564
539 static const intptr_t kSizeOffset = kPointerSize + kPointerSize; 565 static const intptr_t kSizeOffset = 0;
540 566
541 static const intptr_t kLiveBytesOffset = 567 static const intptr_t kLiveBytesOffset =
542 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + 568 kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
543 kPointerSize + kPointerSize + 569 kPointerSize + kPointerSize +
544 kPointerSize + kPointerSize + kPointerSize + kIntSize; 570 kPointerSize + kPointerSize + kPointerSize + kIntSize;
545 571
546 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; 572 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
547 573
548 static const size_t kWriteBarrierCounterOffset = 574 static const size_t kWriteBarrierCounterOffset =
549 kSlotsBufferOffset + kPointerSize + kPointerSize; 575 kSlotsBufferOffset + kPointerSize + kPointerSize;
550 576
551 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + 577 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
552 kIntSize + kIntSize + kPointerSize + 578 kIntSize + kIntSize + kPointerSize +
553 5 * kPointerSize; 579 5 * kPointerSize +
580 kPointerSize + kPointerSize;
554 581
555 static const int kBodyOffset = 582 static const int kBodyOffset =
556 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); 583 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
557 584
558 // The start offset of the object area in a page. Aligned to both maps and 585 // The start offset of the object area in a page. Aligned to both maps and
559 // code alignment to be suitable for both. Also aligned to 32 words because 586 // code alignment to be suitable for both. Also aligned to 32 words because
560 // the marking bitmap is arranged in 32 bit chunks. 587 // the marking bitmap is arranged in 32 bit chunks.
561 static const int kObjectStartAlignment = 32 * kPointerSize; 588 static const int kObjectStartAlignment = 32 * kPointerSize;
562 static const int kObjectStartOffset = kBodyOffset - 1 + 589 static const int kObjectStartOffset = kBodyOffset - 1 +
563 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); 590 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 642
616 inline Address MarkbitIndexToAddress(uint32_t index) { 643 inline Address MarkbitIndexToAddress(uint32_t index) {
617 return this->address() + (index << kPointerSizeLog2); 644 return this->address() + (index << kPointerSizeLog2);
618 } 645 }
619 646
620 void InsertAfter(MemoryChunk* other); 647 void InsertAfter(MemoryChunk* other);
621 void Unlink(); 648 void Unlink();
622 649
623 inline Heap* heap() { return heap_; } 650 inline Heap* heap() { return heap_; }
624 651
625 static const int kFlagsOffset = kPointerSize * 3; 652 static const int kFlagsOffset = kPointerSize;
626 653
627 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } 654 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
628 655
629 bool ShouldSkipEvacuationSlotRecording() { 656 bool ShouldSkipEvacuationSlotRecording() {
630 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; 657 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
631 } 658 }
632 659
633 inline SkipList* skip_list() { 660 inline SkipList* skip_list() {
634 return skip_list_; 661 return skip_list_;
635 } 662 }
(...skipping 28 matching lines...) Expand all
664 bool CommitArea(size_t requested); 691 bool CommitArea(size_t requested);
665 692
666 // Approximate amount of physical memory committed for this chunk. 693 // Approximate amount of physical memory committed for this chunk.
667 size_t CommittedPhysicalMemory() { 694 size_t CommittedPhysicalMemory() {
668 return high_water_mark_; 695 return high_water_mark_;
669 } 696 }
670 697
671 static inline void UpdateHighWaterMark(Address mark); 698 static inline void UpdateHighWaterMark(Address mark);
672 699
673 protected: 700 protected:
674 MemoryChunk* next_chunk_;
675 MemoryChunk* prev_chunk_;
676 size_t size_; 701 size_t size_;
677 intptr_t flags_; 702 intptr_t flags_;
678 703
679 // Start and end of allocatable memory on this chunk. 704 // Start and end of allocatable memory on this chunk.
680 Address area_start_; 705 Address area_start_;
681 Address area_end_; 706 Address area_end_;
682 707
683 // If the chunk needs to remember its memory reservation, it is stored here. 708 // If the chunk needs to remember its memory reservation, it is stored here.
684 VirtualMemory reservation_; 709 VirtualMemory reservation_;
685 // The identity of the owning space. This is tagged as a failure pointer, but 710 // The identity of the owning space. This is tagged as a failure pointer, but
686 // no failure can be in an object, so this can be distinguished from any entry 711 // no failure can be in an object, so this can be distinguished from any entry
687 // in a fixed array. 712 // in a fixed array.
688 Address owner_; 713 Address owner_;
689 Heap* heap_; 714 Heap* heap_;
690 // Used by the store buffer to keep track of which pages to mark scan-on- 715 // Used by the store buffer to keep track of which pages to mark scan-on-
691 // scavenge. 716 // scavenge.
692 int store_buffer_counter_; 717 int store_buffer_counter_;
693 // Count of bytes marked black on page. 718 // Count of bytes marked black on page.
694 int live_byte_count_; 719 int live_byte_count_;
695 SlotsBuffer* slots_buffer_; 720 SlotsBuffer* slots_buffer_;
696 SkipList* skip_list_; 721 SkipList* skip_list_;
697 intptr_t write_barrier_counter_; 722 intptr_t write_barrier_counter_;
698 // Used by the incremental marker to keep track of the scanning progress in 723 // Used by the incremental marker to keep track of the scanning progress in
699 // large objects that have a progress bar and are scanned in increments. 724 // large objects that have a progress bar and are scanned in increments.
700 int progress_bar_; 725 int progress_bar_;
701 // Assuming the initial allocation on a page is sequential, 726 // Assuming the initial allocation on a page is sequential,
702 // count highest number of bytes ever allocated on the page. 727 // count highest number of bytes ever allocated on the page.
703 int high_water_mark_; 728 int high_water_mark_;
704 729
705 intptr_t parallel_sweeping_; 730 AtomicWord parallel_sweeping_;
706 731
707 // PagedSpace free-list statistics. 732 // PagedSpace free-list statistics.
708 intptr_t available_in_small_free_list_; 733 intptr_t available_in_small_free_list_;
709 intptr_t available_in_medium_free_list_; 734 intptr_t available_in_medium_free_list_;
710 intptr_t available_in_large_free_list_; 735 intptr_t available_in_large_free_list_;
711 intptr_t available_in_huge_free_list_; 736 intptr_t available_in_huge_free_list_;
712 intptr_t non_available_small_blocks_; 737 intptr_t non_available_small_blocks_;
713 738
714 static MemoryChunk* Initialize(Heap* heap, 739 static MemoryChunk* Initialize(Heap* heap,
715 Address base, 740 Address base,
716 size_t size, 741 size_t size,
717 Address area_start, 742 Address area_start,
718 Address area_end, 743 Address area_end,
719 Executability executable, 744 Executability executable,
720 Space* owner); 745 Space* owner);
721 746
747 private:
748 // next_chunk_ holds a pointer of type MemoryChunk
749 AtomicWord next_chunk_;
750 // prev_chunk_ holds a pointer of type MemoryChunk
751 AtomicWord prev_chunk_;
752
722 friend class MemoryAllocator; 753 friend class MemoryAllocator;
723 }; 754 };
724 755
725 756
726 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); 757 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
727 758
728 759
729 // ----------------------------------------------------------------------------- 760 // -----------------------------------------------------------------------------
730 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 761 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
731 // 762 //
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
772 Address OffsetToAddress(int offset) { 803 Address OffsetToAddress(int offset) {
773 ASSERT_PAGE_OFFSET(offset); 804 ASSERT_PAGE_OFFSET(offset);
774 return address() + offset; 805 return address() + offset;
775 } 806 }
776 807
777 // --------------------------------------------------------------------- 808 // ---------------------------------------------------------------------
778 809
779 // Page size in bytes. This must be a multiple of the OS page size. 810 // Page size in bytes. This must be a multiple of the OS page size.
780 static const int kPageSize = 1 << kPageSizeBits; 811 static const int kPageSize = 1 << kPageSizeBits;
781 812
782 // Object area size in bytes.
783 static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
784
785 // Maximum object size that fits in a page. Objects larger than that size 813 // Maximum object size that fits in a page. Objects larger than that size
786 // are allocated in large object space and are never moved in memory. This 814 // are allocated in large object space and are never moved in memory. This
787 // also applies to new space allocation, since objects are never migrated 815 // also applies to new space allocation, since objects are never migrated
788 // from new space to large object space. Takes double alignment into account. 816 // from new space to large object space. Takes double alignment into account.
789 static const int kMaxNonCodeHeapObjectSize = 817 static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
790 kNonCodeObjectAreaSize - kPointerSize;
791 818
792 // Page size mask. 819 // Page size mask.
793 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; 820 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
794 821
795 inline void ClearGCFields(); 822 inline void ClearGCFields();
796 823
797 static inline Page* Initialize(Heap* heap, 824 static inline Page* Initialize(Heap* heap,
798 MemoryChunk* chunk, 825 MemoryChunk* chunk,
799 Executability executable, 826 Executability executable,
800 PagedSpace* owner); 827 PagedSpace* owner);
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
1073 intptr_t AvailableExecutable() { 1100 intptr_t AvailableExecutable() {
1074 if (capacity_executable_ < size_executable_) return 0; 1101 if (capacity_executable_ < size_executable_) return 0;
1075 return capacity_executable_ - size_executable_; 1102 return capacity_executable_ - size_executable_;
1076 } 1103 }
1077 1104
1078 // Returns allocated executable spaces in bytes. 1105 // Returns allocated executable spaces in bytes.
1079 intptr_t SizeExecutable() { return size_executable_; } 1106 intptr_t SizeExecutable() { return size_executable_; }
1080 1107
1081 // Returns maximum available bytes that the old space can have. 1108 // Returns maximum available bytes that the old space can have.
1082 intptr_t MaxAvailable() { 1109 intptr_t MaxAvailable() {
1083 return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize; 1110 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1084 } 1111 }
1085 1112
1086 // Returns an indication of whether a pointer is in a space that has 1113 // Returns an indication of whether a pointer is in a space that has
1087 // been allocated by this MemoryAllocator. 1114 // been allocated by this MemoryAllocator.
1088 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { 1115 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1089 return address < lowest_ever_allocated_ || 1116 return address < lowest_ever_allocated_ ||
1090 address >= highest_ever_allocated_; 1117 address >= highest_ever_allocated_;
1091 } 1118 }
1092 1119
1093 #ifdef DEBUG 1120 #ifdef DEBUG
(...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after
1500 1527
1501 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); 1528 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1502 }; 1529 };
1503 1530
1504 1531
1505 // The free list category holds a pointer to the top element and a pointer to 1532 // The free list category holds a pointer to the top element and a pointer to
1506 // the end element of the linked list of free memory blocks. 1533 // the end element of the linked list of free memory blocks.
1507 class FreeListCategory { 1534 class FreeListCategory {
1508 public: 1535 public:
1509 FreeListCategory() : 1536 FreeListCategory() :
1510 top_(NULL), 1537 top_(0),
1511 end_(NULL), 1538 end_(NULL),
1512 available_(0) {} 1539 available_(0) {}
1513 1540
1514 intptr_t Concatenate(FreeListCategory* category); 1541 intptr_t Concatenate(FreeListCategory* category);
1515 1542
1516 void Reset(); 1543 void Reset();
1517 1544
1518 void Free(FreeListNode* node, int size_in_bytes); 1545 void Free(FreeListNode* node, int size_in_bytes);
1519 1546
1520 FreeListNode* PickNodeFromList(int *node_size); 1547 FreeListNode* PickNodeFromList(int *node_size);
1521 FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size); 1548 FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
1522 1549
1523 intptr_t EvictFreeListItemsInList(Page* p); 1550 intptr_t EvictFreeListItemsInList(Page* p);
1551 bool ContainsPageFreeListItemsInList(Page* p);
1524 1552
1525 void RepairFreeList(Heap* heap); 1553 void RepairFreeList(Heap* heap);
1526 1554
1527 FreeListNode** GetTopAddress() { return &top_; } 1555 FreeListNode* top() const {
1528 FreeListNode* top() const { return top_; } 1556 return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_));
1529 void set_top(FreeListNode* top) { top_ = top; } 1557 }
1558
1559 void set_top(FreeListNode* top) {
1560 NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top));
1561 }
1530 1562
1531 FreeListNode** GetEndAddress() { return &end_; } 1563 FreeListNode** GetEndAddress() { return &end_; }
1532 FreeListNode* end() const { return end_; } 1564 FreeListNode* end() const { return end_; }
1533 void set_end(FreeListNode* end) { end_ = end; } 1565 void set_end(FreeListNode* end) { end_ = end; }
1534 1566
1535 int* GetAvailableAddress() { return &available_; } 1567 int* GetAvailableAddress() { return &available_; }
1536 int available() const { return available_; } 1568 int available() const { return available_; }
1537 void set_available(int available) { available_ = available; } 1569 void set_available(int available) { available_ = available; }
1538 1570
1539 Mutex* mutex() { return &mutex_; } 1571 Mutex* mutex() { return &mutex_; }
1540 1572
1573 bool IsEmpty() {
1574 return top() == 0;
1575 }
1576
1541 #ifdef DEBUG 1577 #ifdef DEBUG
1542 intptr_t SumFreeList(); 1578 intptr_t SumFreeList();
1543 int FreeListLength(); 1579 int FreeListLength();
1544 #endif 1580 #endif
1545 1581
1546 private: 1582 private:
1547 FreeListNode* top_; 1583 // top_ points to the top FreeListNode* in the free list category.
1584 AtomicWord top_;
1548 FreeListNode* end_; 1585 FreeListNode* end_;
1549 Mutex mutex_; 1586 Mutex mutex_;
1550 1587
1551 // Total available bytes in all blocks of this free list category. 1588 // Total available bytes in all blocks of this free list category.
1552 int available_; 1589 int available_;
1553 }; 1590 };
1554 1591
1555 1592
1556 // The free list for the old space. The free list is organized in such a way 1593 // The free list for the old space. The free list is organized in such a way
1557 // as to encourage objects allocated around the same time to be near each 1594 // as to encourage objects allocated around the same time to be near each
(...skipping 11 matching lines...) Expand all
1569 // limit when the object we need to allocate is 1-31 words in size. These 1606 // limit when the object we need to allocate is 1-31 words in size. These
1570 // spaces are called small. 1607 // spaces are called small.
1571 // 256-2047 words: There is a list of spaces this large. It is used for top and 1608 // 256-2047 words: There is a list of spaces this large. It is used for top and
1572 // limit when the object we need to allocate is 32-255 words in size. These 1609 // limit when the object we need to allocate is 32-255 words in size. These
1573 // spaces are called medium. 1610 // spaces are called medium.
1574 // 1048-16383 words: There is a list of spaces this large. It is used for top 1611 // 1048-16383 words: There is a list of spaces this large. It is used for top
1575 // and limit when the object we need to allocate is 256-2047 words in size. 1612 // and limit when the object we need to allocate is 256-2047 words in size.
1576 // These spaces are call large. 1613 // These spaces are call large.
1577 // At least 16384 words. This list is for objects of 2048 words or larger. 1614 // At least 16384 words. This list is for objects of 2048 words or larger.
1578 // Empty pages are added to this list. These spaces are called huge. 1615 // Empty pages are added to this list. These spaces are called huge.
1579 class FreeList BASE_EMBEDDED { 1616 class FreeList {
1580 public: 1617 public:
1581 explicit FreeList(PagedSpace* owner); 1618 explicit FreeList(PagedSpace* owner);
1582 1619
1583 intptr_t Concatenate(FreeList* free_list); 1620 intptr_t Concatenate(FreeList* free_list);
1584 1621
1585 // Clear the free list. 1622 // Clear the free list.
1586 void Reset(); 1623 void Reset();
1587 1624
1588 // Return the number of bytes available on the free list. 1625 // Return the number of bytes available on the free list.
1589 intptr_t available() { 1626 intptr_t available() {
1590 return small_list_.available() + medium_list_.available() + 1627 return small_list_.available() + medium_list_.available() +
1591 large_list_.available() + huge_list_.available(); 1628 large_list_.available() + huge_list_.available();
1592 } 1629 }
1593 1630
1594 // Place a node on the free list. The block of size 'size_in_bytes' 1631 // Place a node on the free list. The block of size 'size_in_bytes'
1595 // starting at 'start' is placed on the free list. The return value is the 1632 // starting at 'start' is placed on the free list. The return value is the
1596 // number of bytes that have been lost due to internal fragmentation by 1633 // number of bytes that have been lost due to internal fragmentation by
1597 // freeing the block. Bookkeeping information will be written to the block, 1634 // freeing the block. Bookkeeping information will be written to the block,
1598 // i.e., its contents will be destroyed. The start address should be word 1635 // i.e., its contents will be destroyed. The start address should be word
1599 // aligned, and the size should be a non-zero multiple of the word size. 1636 // aligned, and the size should be a non-zero multiple of the word size.
1600 int Free(Address start, int size_in_bytes); 1637 int Free(Address start, int size_in_bytes);
1601 1638
1602 // Allocate a block of size 'size_in_bytes' from the free list. The block 1639 // Allocate a block of size 'size_in_bytes' from the free list. The block
1603 // is unitialized. A failure is returned if no block is available. The 1640 // is unitialized. A failure is returned if no block is available. The
1604 // number of bytes lost to fragmentation is returned in the output parameter 1641 // number of bytes lost to fragmentation is returned in the output parameter
1605 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. 1642 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1606 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); 1643 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1607 1644
1645 bool IsEmpty() {
1646 return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1647 large_list_.IsEmpty() && huge_list_.IsEmpty();
1648 }
1649
1608 #ifdef DEBUG 1650 #ifdef DEBUG
1609 void Zap(); 1651 void Zap();
1610 intptr_t SumFreeLists(); 1652 intptr_t SumFreeLists();
1611 bool IsVeryLong(); 1653 bool IsVeryLong();
1612 #endif 1654 #endif
1613 1655
1614 // Used after booting the VM. 1656 // Used after booting the VM.
1615 void RepairLists(Heap* heap); 1657 void RepairLists(Heap* heap);
1616 1658
1617 intptr_t EvictFreeListItems(Page* p); 1659 intptr_t EvictFreeListItems(Page* p);
1660 bool ContainsPageFreeListItems(Page* p);
1618 1661
1619 FreeListCategory* small_list() { return &small_list_; } 1662 FreeListCategory* small_list() { return &small_list_; }
1620 FreeListCategory* medium_list() { return &medium_list_; } 1663 FreeListCategory* medium_list() { return &medium_list_; }
1621 FreeListCategory* large_list() { return &large_list_; } 1664 FreeListCategory* large_list() { return &large_list_; }
1622 FreeListCategory* huge_list() { return &huge_list_; } 1665 FreeListCategory* huge_list() { return &huge_list_; }
1623 1666
1624 private: 1667 private:
1625 // The size range of blocks, in bytes. 1668 // The size range of blocks, in bytes.
1626 static const int kMinBlockSize = 3 * kPointerSize; 1669 static const int kMinBlockSize = 3 * kPointerSize;
1627 static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize; 1670 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1628 1671
1629 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); 1672 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1630 1673
1631 PagedSpace* owner_; 1674 PagedSpace* owner_;
1632 Heap* heap_; 1675 Heap* heap_;
1633 1676
1634 static const int kSmallListMin = 0x20 * kPointerSize; 1677 static const int kSmallListMin = 0x20 * kPointerSize;
1635 static const int kSmallListMax = 0xff * kPointerSize; 1678 static const int kSmallListMax = 0xff * kPointerSize;
1636 static const int kMediumListMax = 0x7ff * kPointerSize; 1679 static const int kMediumListMax = 0x7ff * kPointerSize;
1637 static const int kLargeListMax = 0x3fff * kPointerSize; 1680 static const int kLargeListMax = 0x3fff * kPointerSize;
(...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after
1938 bool Expand(); 1981 bool Expand();
1939 1982
1940 // Generic fast case allocation function that tries linear allocation at the 1983 // Generic fast case allocation function that tries linear allocation at the
1941 // address denoted by top in allocation_info_. 1984 // address denoted by top in allocation_info_.
1942 inline HeapObject* AllocateLinearly(int size_in_bytes); 1985 inline HeapObject* AllocateLinearly(int size_in_bytes);
1943 1986
1944 // Slow path of AllocateRaw. This function is space-dependent. 1987 // Slow path of AllocateRaw. This function is space-dependent.
1945 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); 1988 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1946 1989
1947 friend class PageIterator; 1990 friend class PageIterator;
1948 friend class SweeperThread; 1991 friend class MarkCompactCollector;
1949 }; 1992 };
1950 1993
1951 1994
1952 class NumberAndSizeInfo BASE_EMBEDDED { 1995 class NumberAndSizeInfo BASE_EMBEDDED {
1953 public: 1996 public:
1954 NumberAndSizeInfo() : number_(0), bytes_(0) {} 1997 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1955 1998
1956 int number() const { return number_; } 1999 int number() const { return number_; }
1957 void increment_number(int num) { number_ += num; } 2000 void increment_number(int num) { number_ += num; }
1958 2001
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1995 2038
1996 class NewSpacePage : public MemoryChunk { 2039 class NewSpacePage : public MemoryChunk {
1997 public: 2040 public:
1998 // GC related flags copied from from-space to to-space when 2041 // GC related flags copied from from-space to to-space when
1999 // flipping semispaces. 2042 // flipping semispaces.
2000 static const intptr_t kCopyOnFlipFlagsMask = 2043 static const intptr_t kCopyOnFlipFlagsMask =
2001 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | 2044 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2002 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | 2045 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
2003 (1 << MemoryChunk::SCAN_ON_SCAVENGE); 2046 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
2004 2047
2005 static const int kAreaSize = Page::kNonCodeObjectAreaSize; 2048 static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
2006 2049
2007 inline NewSpacePage* next_page() const { 2050 inline NewSpacePage* next_page() const {
2008 return static_cast<NewSpacePage*>(next_chunk()); 2051 return static_cast<NewSpacePage*>(next_chunk());
2009 } 2052 }
2010 2053
2011 inline void set_next_page(NewSpacePage* page) { 2054 inline void set_next_page(NewSpacePage* page) {
2012 set_next_chunk(page); 2055 set_next_chunk(page);
2013 } 2056 }
2014 2057
2015 inline NewSpacePage* prev_page() const { 2058 inline NewSpacePage* prev_page() const {
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2047 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); 2090 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2048 return page; 2091 return page;
2049 } 2092 }
2050 2093
2051 // Find the page for a limit address. A limit address is either an address 2094 // Find the page for a limit address. A limit address is either an address
2052 // inside a page, or the address right after the last byte of a page. 2095 // inside a page, or the address right after the last byte of a page.
2053 static inline NewSpacePage* FromLimit(Address address_limit) { 2096 static inline NewSpacePage* FromLimit(Address address_limit) {
2054 return NewSpacePage::FromAddress(address_limit - 1); 2097 return NewSpacePage::FromAddress(address_limit - 1);
2055 } 2098 }
2056 2099
2100 // Checks if address1 and address2 are on the same new space page.
2101 static inline bool OnSamePage(Address address1, Address address2) {
2102 return NewSpacePage::FromAddress(address1) ==
2103 NewSpacePage::FromAddress(address2);
2104 }
2105
2057 private: 2106 private:
2058 // Create a NewSpacePage object that is only used as anchor 2107 // Create a NewSpacePage object that is only used as anchor
2059 // for the doubly-linked list of real pages. 2108 // for the doubly-linked list of real pages.
2060 explicit NewSpacePage(SemiSpace* owner) { 2109 explicit NewSpacePage(SemiSpace* owner) {
2061 InitializeAsAnchor(owner); 2110 InitializeAsAnchor(owner);
2062 } 2111 }
2063 2112
2064 static NewSpacePage* Initialize(Heap* heap, 2113 static NewSpacePage* Initialize(Heap* heap,
2065 Address start, 2114 Address start,
2066 SemiSpace* semi_space); 2115 SemiSpace* semi_space);
(...skipping 374 matching lines...) Expand 10 before | Expand all | Expand 10 after
2441 Address top() { 2490 Address top() {
2442 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top())); 2491 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2443 return allocation_info_.top(); 2492 return allocation_info_.top();
2444 } 2493 }
2445 2494
2446 void set_top(Address top) { 2495 void set_top(Address top) {
2447 ASSERT(to_space_.current_page()->ContainsLimit(top)); 2496 ASSERT(to_space_.current_page()->ContainsLimit(top));
2448 allocation_info_.set_top(top); 2497 allocation_info_.set_top(top);
2449 } 2498 }
2450 2499
2500 // Return the address of the allocation pointer limit in the active semispace.
2501 Address limit() {
2502 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2503 return allocation_info_.limit();
2504 }
2505
2451 // Return the address of the first object in the active semispace. 2506 // Return the address of the first object in the active semispace.
2452 Address bottom() { return to_space_.space_start(); } 2507 Address bottom() { return to_space_.space_start(); }
2453 2508
2454 // Get the age mark of the inactive semispace. 2509 // Get the age mark of the inactive semispace.
2455 Address age_mark() { return from_space_.age_mark(); } 2510 Address age_mark() { return from_space_.age_mark(); }
2456 // Set the age mark in the active semispace. 2511 // Set the age mark in the active semispace.
2457 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } 2512 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2458 2513
2459 // The start address of the space and a bit mask. Anding an address in the 2514 // The start address of the space and a bit mask. Anding an address in the
2460 // new space with the mask will result in the start address. 2515 // new space with the mask will result in the start address.
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
2655 return RoundDown(size, Map::kSize); 2710 return RoundDown(size, Map::kSize);
2656 } else { 2711 } else {
2657 return (size / Map::kSize) * Map::kSize; 2712 return (size / Map::kSize) * Map::kSize;
2658 } 2713 }
2659 } 2714 }
2660 2715
2661 protected: 2716 protected:
2662 virtual void VerifyObject(HeapObject* obj); 2717 virtual void VerifyObject(HeapObject* obj);
2663 2718
2664 private: 2719 private:
2665 static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize; 2720 static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
2666 2721
2667 // Do map space compaction if there is a page gap. 2722 // Do map space compaction if there is a page gap.
2668 int CompactionThreshold() { 2723 int CompactionThreshold() {
2669 return kMapsPerPage * (max_map_space_pages_ - 1); 2724 return kMapsPerPage * (max_map_space_pages_ - 1);
2670 } 2725 }
2671 2726
2672 const int max_map_space_pages_; 2727 const int max_map_space_pages_;
2673 2728
2674 public: 2729 public:
2675 TRACK_MEMORY("MapSpace") 2730 TRACK_MEMORY("MapSpace")
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
2923 } 2978 }
2924 // Must be small, since an iteration is used for lookup. 2979 // Must be small, since an iteration is used for lookup.
2925 static const int kMaxComments = 64; 2980 static const int kMaxComments = 64;
2926 }; 2981 };
2927 #endif 2982 #endif
2928 2983
2929 2984
2930 } } // namespace v8::internal 2985 } } // namespace v8::internal
2931 2986
2932 #endif // V8_SPACES_H_ 2987 #endif // V8_SPACES_H_
OLDNEW
« no previous file with comments | « src/simulator.h ('k') | src/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698