Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/spaces.h

Issue 259173003: Kiss goodbye to MaybeObject. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: rebase + addressed comments Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/objects-printer.cc ('k') | src/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_SPACES_H_ 5 #ifndef V8_SPACES_H_
6 #define V8_SPACES_H_ 6 #define V8_SPACES_H_
7 7
8 #include "allocation.h" 8 #include "allocation.h"
9 #include "hashmap.h" 9 #include "hashmap.h"
10 #include "list.h" 10 #include "list.h"
(...skipping 1479 matching lines...) Expand 10 before | Expand all | Expand 10 after
1490 // functions. 1490 // functions.
1491 void set_size(Heap* heap, int size_in_bytes); 1491 void set_size(Heap* heap, int size_in_bytes);
1492 1492
1493 // Accessors for the next field. 1493 // Accessors for the next field.
1494 inline FreeListNode* next(); 1494 inline FreeListNode* next();
1495 inline FreeListNode** next_address(); 1495 inline FreeListNode** next_address();
1496 inline void set_next(FreeListNode* next); 1496 inline void set_next(FreeListNode* next);
1497 1497
1498 inline void Zap(); 1498 inline void Zap();
1499 1499
1500 static inline FreeListNode* cast(MaybeObject* maybe) { 1500 static inline FreeListNode* cast(Object* object) {
1501 ASSERT(!maybe->IsFailure()); 1501 return reinterpret_cast<FreeListNode*>(object);
1502 return reinterpret_cast<FreeListNode*>(maybe);
1503 } 1502 }
1504 1503
1505 private: 1504 private:
1506 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); 1505 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1507 1506
1508 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); 1507 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1509 }; 1508 };
1510 1509
1511 1510
1512 // The free list category holds a pointer to the top element and a pointer to 1511 // The free list category holds a pointer to the top element and a pointer to
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
1663 static const int kLargeAllocationMax = kMediumListMax; 1662 static const int kLargeAllocationMax = kMediumListMax;
1664 FreeListCategory small_list_; 1663 FreeListCategory small_list_;
1665 FreeListCategory medium_list_; 1664 FreeListCategory medium_list_;
1666 FreeListCategory large_list_; 1665 FreeListCategory large_list_;
1667 FreeListCategory huge_list_; 1666 FreeListCategory huge_list_;
1668 1667
1669 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); 1668 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1670 }; 1669 };
1671 1670
1672 1671
1672 class AllocationResult {
1673 public:
1674 // Implicit constructor from Object*.
1675 AllocationResult(Object* object) : object_(object), // NOLINT
1676 retry_space_(INVALID_SPACE) { }
1677
1678 AllocationResult() : object_(NULL),
1679 retry_space_(INVALID_SPACE) { }
1680
1681 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1682 return AllocationResult(space);
1683 }
1684
1685 inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
1686
1687 template <typename T>
1688 bool To(T** obj) {
1689 if (IsRetry()) return false;
1690 *obj = T::cast(object_);
1691 return true;
1692 }
1693
1694 Object* ToObjectChecked() {
1695 CHECK(!IsRetry());
1696 return object_;
1697 }
1698
1699 AllocationSpace RetrySpace() {
1700 ASSERT(IsRetry());
1701 return retry_space_;
1702 }
1703
1704 private:
1705 explicit AllocationResult(AllocationSpace space) : object_(NULL),
1706 retry_space_(space) { }
1707
1708 Object* object_;
1709 AllocationSpace retry_space_;
1710 };
1711
1712
1673 class PagedSpace : public Space { 1713 class PagedSpace : public Space {
1674 public: 1714 public:
1675 // Creates a space with a maximum capacity, and an id. 1715 // Creates a space with a maximum capacity, and an id.
1676 PagedSpace(Heap* heap, 1716 PagedSpace(Heap* heap,
1677 intptr_t max_capacity, 1717 intptr_t max_capacity,
1678 AllocationSpace id, 1718 AllocationSpace id,
1679 Executability executable); 1719 Executability executable);
1680 1720
1681 virtual ~PagedSpace() {} 1721 virtual ~PagedSpace() {}
1682 1722
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1783 return allocation_info_.top_address(); 1823 return allocation_info_.top_address();
1784 } 1824 }
1785 1825
1786 // The allocation limit address. 1826 // The allocation limit address.
1787 Address* allocation_limit_address() { 1827 Address* allocation_limit_address() {
1788 return allocation_info_.limit_address(); 1828 return allocation_info_.limit_address();
1789 } 1829 }
1790 1830
1791 // Allocate the requested number of bytes in the space if possible, return a 1831 // Allocate the requested number of bytes in the space if possible, return a
1792 // failure object if not. 1832 // failure object if not.
1793 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); 1833 MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
1794 1834
1795 // Give a block of memory to the space's free list. It might be added to 1835 // Give a block of memory to the space's free list. It might be added to
1796 // the free list or accounted as waste. 1836 // the free list or accounted as waste.
1797 // If add_to_freelist is false then just accounting stats are updated and 1837 // If add_to_freelist is false then just accounting stats are updated and
1798 // no attempt to add area to free list is made. 1838 // no attempt to add area to free list is made.
1799 int Free(Address start, int size_in_bytes) { 1839 int Free(Address start, int size_in_bytes) {
1800 int wasted = free_list_.Free(start, size_in_bytes); 1840 int wasted = free_list_.Free(start, size_in_bytes);
1801 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); 1841 accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1802 return size_in_bytes - wasted; 1842 return size_in_bytes - wasted;
1803 } 1843 }
(...skipping 705 matching lines...) Expand 10 before | Expand all | Expand 10 after
2509 // The allocation top and limit address. 2549 // The allocation top and limit address.
2510 Address* allocation_top_address() { 2550 Address* allocation_top_address() {
2511 return allocation_info_.top_address(); 2551 return allocation_info_.top_address();
2512 } 2552 }
2513 2553
2514 // The allocation limit address. 2554 // The allocation limit address.
2515 Address* allocation_limit_address() { 2555 Address* allocation_limit_address() {
2516 return allocation_info_.limit_address(); 2556 return allocation_info_.limit_address();
2517 } 2557 }
2518 2558
2519 MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes)); 2559 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
2520 2560
2521 // Reset the allocation pointer to the beginning of the active semispace. 2561 // Reset the allocation pointer to the beginning of the active semispace.
2522 void ResetAllocationInfo(); 2562 void ResetAllocationInfo();
2523 2563
2524 void UpdateInlineAllocationLimit(int size_in_bytes); 2564 void UpdateInlineAllocationLimit(int size_in_bytes);
2525 void LowerInlineAllocationLimit(intptr_t step) { 2565 void LowerInlineAllocationLimit(intptr_t step) {
2526 inline_allocation_limit_step_ = step; 2566 inline_allocation_limit_step_ = step;
2527 UpdateInlineAllocationLimit(0); 2567 UpdateInlineAllocationLimit(0);
2528 top_on_previous_step_ = allocation_info_.top(); 2568 top_on_previous_step_ = allocation_info_.top();
2529 } 2569 }
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
2626 // to be lower than actual limit and then will gradually increase it 2666 // to be lower than actual limit and then will gradually increase it
2627 // in steps to guarantee that we do incremental marking steps even 2667 // in steps to guarantee that we do incremental marking steps even
2628 // when all allocation is performed from inlined generated code. 2668 // when all allocation is performed from inlined generated code.
2629 intptr_t inline_allocation_limit_step_; 2669 intptr_t inline_allocation_limit_step_;
2630 2670
2631 Address top_on_previous_step_; 2671 Address top_on_previous_step_;
2632 2672
2633 HistogramInfo* allocated_histogram_; 2673 HistogramInfo* allocated_histogram_;
2634 HistogramInfo* promoted_histogram_; 2674 HistogramInfo* promoted_histogram_;
2635 2675
2636 MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes); 2676 MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
2637 2677
2638 friend class SemiSpaceIterator; 2678 friend class SemiSpaceIterator;
2639 2679
2640 public: 2680 public:
2641 TRACK_MEMORY("NewSpace") 2681 TRACK_MEMORY("NewSpace")
2642 }; 2682 };
2643 2683
2644 2684
2645 // ----------------------------------------------------------------------------- 2685 // -----------------------------------------------------------------------------
2646 // Old object space (excluding map objects) 2686 // Old object space (excluding map objects)
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
2781 // Releases internal resources, frees objects in this space. 2821 // Releases internal resources, frees objects in this space.
2782 void TearDown(); 2822 void TearDown();
2783 2823
2784 static intptr_t ObjectSizeFor(intptr_t chunk_size) { 2824 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2785 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; 2825 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2786 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; 2826 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2787 } 2827 }
2788 2828
2789 // Shared implementation of AllocateRaw, AllocateRawCode and 2829 // Shared implementation of AllocateRaw, AllocateRawCode and
2790 // AllocateRawFixedArray. 2830 // AllocateRawFixedArray.
2791 MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size, 2831 MUST_USE_RESULT AllocationResult AllocateRaw(int object_size,
2792 Executability executable); 2832 Executability executable);
2793 2833
2794 // Available bytes for objects in this space. 2834 // Available bytes for objects in this space.
2795 inline intptr_t Available(); 2835 inline intptr_t Available();
2796 2836
2797 virtual intptr_t Size() { 2837 virtual intptr_t Size() {
2798 return size_; 2838 return size_;
2799 } 2839 }
2800 2840
2801 virtual intptr_t SizeOfObjects() { 2841 virtual intptr_t SizeOfObjects() {
2802 return objects_size_; 2842 return objects_size_;
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
2956 } 2996 }
2957 // Must be small, since an iteration is used for lookup. 2997 // Must be small, since an iteration is used for lookup.
2958 static const int kMaxComments = 64; 2998 static const int kMaxComments = 64;
2959 }; 2999 };
2960 #endif 3000 #endif
2961 3001
2962 3002
2963 } } // namespace v8::internal 3003 } } // namespace v8::internal
2964 3004
2965 #endif // V8_SPACES_H_ 3005 #endif // V8_SPACES_H_
OLDNEW
« no previous file with comments | « src/objects-printer.cc ('k') | src/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698