Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(356)

Side by Side Diff: src/heap/spaces.h

Issue 1809863003: [heap] Remove separate constant for newspace page allocatable size (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/atomic-utils.h" 9 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h" 10 #include "src/base/atomicops.h"
(...skipping 399 matching lines...) Expand 10 before | Expand all | Expand 10 after
410 // The start offset of the object area in a page. Aligned to both maps and 410 // The start offset of the object area in a page. Aligned to both maps and
411 // code alignment to be suitable for both. Also aligned to 32 words because 411 // code alignment to be suitable for both. Also aligned to 32 words because
412 // the marking bitmap is arranged in 32 bit chunks. 412 // the marking bitmap is arranged in 32 bit chunks.
413 static const int kObjectStartAlignment = 32 * kPointerSize; 413 static const int kObjectStartAlignment = 32 * kPointerSize;
414 static const int kObjectStartOffset = 414 static const int kObjectStartOffset =
415 kBodyOffset - 1 + 415 kBodyOffset - 1 +
416 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); 416 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
417 417
418 static const int kFlagsOffset = kPointerSize; 418 static const int kFlagsOffset = kPointerSize;
419 419
420 // Page size in bytes. This must be a multiple of the OS page size.
421 static const int kPageSize = 1 << kPageSizeBits;
422 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
423
424 static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
425
420 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by); 426 static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
421 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by); 427 static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
422 428
423 // Only works if the pointer is in the first kPageSize of the MemoryChunk. 429 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
424 static MemoryChunk* FromAddress(Address a) { 430 static MemoryChunk* FromAddress(Address a) {
425 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); 431 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
426 } 432 }
427 433
428 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); 434 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
429 435
(...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after
771 } 777 }
772 778
773 // Returns the address for a given offset to the this page. 779 // Returns the address for a given offset to the this page.
774 Address OffsetToAddress(int offset) { 780 Address OffsetToAddress(int offset) {
775 DCHECK_PAGE_OFFSET(offset); 781 DCHECK_PAGE_OFFSET(offset);
776 return address() + offset; 782 return address() + offset;
777 } 783 }
778 784
779 // --------------------------------------------------------------------- 785 // ---------------------------------------------------------------------
780 786
781 // Page size in bytes. This must be a multiple of the OS page size.
782 static const int kPageSize = 1 << kPageSizeBits;
783
784 // Maximum object size that gets allocated into regular pages. Objects larger 787 // Maximum object size that gets allocated into regular pages. Objects larger
785 // than that size are allocated in large object space and are never moved in 788 // than that size are allocated in large object space and are never moved in
786 // memory. This also applies to new space allocation, since objects are never 789 // memory. This also applies to new space allocation, since objects are never
787 // migrated from new space to large object space. Takes double alignment into 790 // migrated from new space to large object space. Takes double alignment into
788 // account. 791 // account.
789 // TODO(hpayer): This limit should be way smaller but we currently have 792 // TODO(hpayer): This limit should be way smaller but we currently have
790 // short living objects >256K. 793 // short living objects >256K.
791 static const int kMaxRegularHeapObjectSize = 600 * KB; 794 static const int kMaxRegularHeapObjectSize = 600 * KB;
792 795
793 static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
794
795 // Page size mask.
796 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
797
798 inline void ClearGCFields(); 796 inline void ClearGCFields();
799 797
800 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk, 798 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
801 Executability executable, PagedSpace* owner); 799 Executability executable, PagedSpace* owner);
802 800
803 void InitializeAsAnchor(PagedSpace* owner); 801 void InitializeAsAnchor(PagedSpace* owner);
804 802
805 // WaitUntilSweepingCompleted only works when concurrent sweeping is in 803 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
806 // progress. In particular, when we know that right before this call a 804 // progress. In particular, when we know that right before this call a
807 // sweeper thread was sweeping this page. 805 // sweeper thread was sweeping this page.
(...skipping 1383 matching lines...) Expand 10 before | Expand all | Expand 10 after
2191 private: 2189 private:
2192 const char* name_; 2190 const char* name_;
2193 }; 2191 };
2194 2192
2195 2193
2196 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 }; 2194 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2197 2195
2198 2196
2199 class NewSpacePage : public MemoryChunk { 2197 class NewSpacePage : public MemoryChunk {
2200 public: 2198 public:
2201 // GC related flags copied from from-space to to-space when
2202 // flipping semispaces.
2203 static const intptr_t kCopyOnFlipFlagsMask =
2204 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2205 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
2206
2207 static const int kAreaSize = Page::kAllocatableMemory;
Michael Lippautz 2016/03/17 10:13:43 Now available on MemoryChunk since the allocatable
2208
2209 inline NewSpacePage* next_page() {
2210 return static_cast<NewSpacePage*>(next_chunk());
2211 }
2212
2213 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
2214
2215 inline NewSpacePage* prev_page() {
2216 return static_cast<NewSpacePage*>(prev_chunk());
2217 }
2218
2219 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
2220
2221 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
2222
2223 bool is_anchor() { return !this->InNewSpace(); }
2224
2225 static bool IsAtStart(Address addr) { 2199 static bool IsAtStart(Address addr) {
2226 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 2200 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2227 kObjectStartOffset; 2201 kObjectStartOffset;
2228 } 2202 }
2229 2203
2230 static bool IsAtEnd(Address addr) { 2204 static bool IsAtEnd(Address addr) {
2231 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; 2205 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2232 } 2206 }
2233 2207
2234 Address address() { return reinterpret_cast<Address>(this); }
Michael Lippautz 2016/03/17 10:13:43 dead code
2235
2236 // Finds the NewSpacePage containing the given address. 2208 // Finds the NewSpacePage containing the given address.
2237 static inline NewSpacePage* FromAddress(Address address_in_page) { 2209 static inline NewSpacePage* FromAddress(Address address_in_page) {
2238 Address page_start = 2210 Address page_start =
2239 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & 2211 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2240 ~Page::kPageAlignmentMask); 2212 ~Page::kPageAlignmentMask);
2241 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); 2213 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2242 return page; 2214 return page;
2243 } 2215 }
2244 2216
2245 // Find the page for a limit address. A limit address is either an address 2217 // Find the page for a limit address. A limit address is either an address
2246 // inside a page, or the address right after the last byte of a page. 2218 // inside a page, or the address right after the last byte of a page.
2247 static inline NewSpacePage* FromLimit(Address address_limit) { 2219 static inline NewSpacePage* FromLimit(Address address_limit) {
2248 return NewSpacePage::FromAddress(address_limit - 1); 2220 return NewSpacePage::FromAddress(address_limit - 1);
2249 } 2221 }
2250 2222
2251 // Checks if address1 and address2 are on the same new space page. 2223 // Checks if address1 and address2 are on the same new space page.
2252 static inline bool OnSamePage(Address address1, Address address2) { 2224 static inline bool OnSamePage(Address address1, Address address2) {
2253 return NewSpacePage::FromAddress(address1) == 2225 return NewSpacePage::FromAddress(address1) ==
2254 NewSpacePage::FromAddress(address2); 2226 NewSpacePage::FromAddress(address2);
2255 } 2227 }
2256 2228
2229 inline NewSpacePage* next_page() {
2230 return static_cast<NewSpacePage*>(next_chunk());
2231 }
2232
2233 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
2234
2235 inline NewSpacePage* prev_page() {
2236 return static_cast<NewSpacePage*>(prev_chunk());
2237 }
2238
2239 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
2240
2241 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
2242
2243 bool is_anchor() { return !this->InNewSpace(); }
2244
2257 private: 2245 private:
2246 // GC related flags copied from from-space to to-space when
2247 // flipping semispaces.
2248 static const intptr_t kCopyOnFlipFlagsMask =
2249 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2250 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
2251
2258 // Create a NewSpacePage object that is only used as anchor 2252 // Create a NewSpacePage object that is only used as anchor
2259 // for the doubly-linked list of real pages. 2253 // for the doubly-linked list of real pages.
2260 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); } 2254 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
2261 2255
2262 static NewSpacePage* Initialize(Heap* heap, Address start, 2256 static NewSpacePage* Initialize(Heap* heap, Address start,
2263 SemiSpace* semi_space); 2257 SemiSpace* semi_space);
2264 2258
2265 // Intialize a fake NewSpacePage used as sentinel at the ends 2259 // Intialize a fake NewSpacePage used as sentinel at the ends
2266 // of a doubly-linked list of real NewSpacePages. 2260 // of a doubly-linked list of real NewSpacePages.
2267 // Only uses the prev/next links, and sets flags to not be in new-space. 2261 // Only uses the prev/next links, and sets flags to not be in new-space.
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after
2524 2518
2525 // Grow the capacity of the semispaces. Assumes that they are not at 2519 // Grow the capacity of the semispaces. Assumes that they are not at
2526 // their maximum capacity. 2520 // their maximum capacity.
2527 void Grow(); 2521 void Grow();
2528 2522
2529 // Shrink the capacity of the semispaces. 2523 // Shrink the capacity of the semispaces.
2530 void Shrink(); 2524 void Shrink();
2531 2525
2532 // Return the allocated bytes in the active semispace. 2526 // Return the allocated bytes in the active semispace.
2533 intptr_t Size() override { 2527 intptr_t Size() override {
2534 return pages_used_ * NewSpacePage::kAreaSize + 2528 return pages_used_ * NewSpacePage::kAllocatableMemory +
2535 static_cast<int>(top() - to_space_.page_low()); 2529 static_cast<int>(top() - to_space_.page_low());
2536 } 2530 }
2537 2531
2538 // The same, but returning an int. We have to have the one that returns 2532 // The same, but returning an int. We have to have the one that returns
2539 // intptr_t because it is inherited, but if we know we are dealing with the 2533 // intptr_t because it is inherited, but if we know we are dealing with the
2540 // new space, which can't get as big as the other spaces then this is useful: 2534 // new space, which can't get as big as the other spaces then this is useful:
2541 int SizeAsInt() { return static_cast<int>(Size()); } 2535 int SizeAsInt() { return static_cast<int>(Size()); }
2542 2536
2543 // Return the allocatable capacity of a semispace. 2537 // Return the allocatable capacity of a semispace.
2544 intptr_t Capacity() { 2538 intptr_t Capacity() {
2545 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity()); 2539 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2546 return (to_space_.current_capacity() / Page::kPageSize) * 2540 return (to_space_.current_capacity() / Page::kPageSize) *
2547 NewSpacePage::kAreaSize; 2541 NewSpacePage::kAllocatableMemory;
2548 } 2542 }
2549 2543
2550 // Return the current size of a semispace, allocatable and non-allocatable 2544 // Return the current size of a semispace, allocatable and non-allocatable
2551 // memory. 2545 // memory.
2552 intptr_t TotalCapacity() { 2546 intptr_t TotalCapacity() {
2553 DCHECK(to_space_.current_capacity() == from_space_.current_capacity()); 2547 DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2554 return to_space_.current_capacity(); 2548 return to_space_.current_capacity();
2555 } 2549 }
2556 2550
2557 // Committed memory for NewSpace is the committed memory of both semi-spaces 2551 // Committed memory for NewSpace is the committed memory of both semi-spaces
(...skipping 469 matching lines...) Expand 10 before | Expand all | Expand 10 after
3027 count = 0; 3021 count = 0;
3028 } 3022 }
3029 // Must be small, since an iteration is used for lookup. 3023 // Must be small, since an iteration is used for lookup.
3030 static const int kMaxComments = 64; 3024 static const int kMaxComments = 64;
3031 }; 3025 };
3032 #endif 3026 #endif
3033 } // namespace internal 3027 } // namespace internal
3034 } // namespace v8 3028 } // namespace v8
3035 3029
3036 #endif // V8_HEAP_SPACES_H_ 3030 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698