Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(150)

Side by Side Diff: src/heap/spaces.h

Issue 2286613002: [heap] MemoryChunk cleanup (Closed)
Patch Set: rebase Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/arm64/macro-assembler-arm64.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list> 8 #include <list>
9 #include <memory> 9 #include <memory>
10 #include <unordered_set> 10 #include <unordered_set>
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after
220 friend class FreeList; 220 friend class FreeList;
221 friend class PagedSpace; 221 friend class PagedSpace;
222 }; 222 };
223 223
224 // MemoryChunk represents a memory region owned by a specific space. 224 // MemoryChunk represents a memory region owned by a specific space.
225 // It is divided into the header and the body. Chunk start is always 225 // It is divided into the header and the body. Chunk start is always
226 // 1MB aligned. Start of the body is aligned so it can accommodate 226 // 1MB aligned. Start of the body is aligned so it can accommodate
227 // any heap object. 227 // any heap object.
228 class MemoryChunk { 228 class MemoryChunk {
229 public: 229 public:
230 enum MemoryChunkFlags { 230 enum Flag {
231 IS_EXECUTABLE, 231 NO_FLAGS = 0u,
232 POINTERS_TO_HERE_ARE_INTERESTING, 232 IS_EXECUTABLE = 1u << 0,
233 POINTERS_FROM_HERE_ARE_INTERESTING, 233 POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
234 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. 234 POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
235 IN_TO_SPACE, // All pages in new space has one of these two set. 235 // A page in new space has one of the next to flags set.
236 NEW_SPACE_BELOW_AGE_MARK, 236 IN_FROM_SPACE = 1u << 3,
237 EVACUATION_CANDIDATE, 237 IN_TO_SPACE = 1u << 4,
238 NEVER_EVACUATE, // May contain immortal immutables. 238 NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
239 EVACUATION_CANDIDATE = 1u << 6,
240 NEVER_EVACUATE = 1u << 7,
239 241
240 // Large objects can have a progress bar in their page header. These object 242 // Large objects can have a progress bar in their page header. These object
241 // are scanned in increments and will be kept black while being scanned. 243 // are scanned in increments and will be kept black while being scanned.
242 // Even if the mutator writes to them they will be kept black and a white 244 // Even if the mutator writes to them they will be kept black and a white
243 // to grey transition is performed in the value. 245 // to grey transition is performed in the value.
244 HAS_PROGRESS_BAR, 246 HAS_PROGRESS_BAR = 1u << 8,
245 247
246 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted 248 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
247 // from new to old space during evacuation. 249 // from new to old space during evacuation.
248 PAGE_NEW_OLD_PROMOTION, 250 PAGE_NEW_OLD_PROMOTION = 1u << 9,
249 251
250 // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved 252 // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
251 // within the new space during evacuation. 253 // within the new space during evacuation.
252 PAGE_NEW_NEW_PROMOTION, 254 PAGE_NEW_NEW_PROMOTION = 1u << 10,
253 255
254 // This flag is intended to be used for testing. Works only when both 256 // This flag is intended to be used for testing. Works only when both
255 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection 257 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
256 // are set. It forces the page to become an evacuation candidate at next 258 // are set. It forces the page to become an evacuation candidate at next
257 // candidates selection cycle. 259 // candidates selection cycle.
258 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, 260 FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
259 261
260 // This flag is intended to be used for testing. 262 // This flag is intended to be used for testing.
261 NEVER_ALLOCATE_ON_PAGE, 263 NEVER_ALLOCATE_ON_PAGE = 1u << 12,
262 264
263 // The memory chunk is already logically freed, however the actual freeing 265 // The memory chunk is already logically freed, however the actual freeing
264 // still has to be performed. 266 // still has to be performed.
265 PRE_FREED, 267 PRE_FREED = 1u << 13,
266 268
267 // |POOLED|: When actually freeing this chunk, only uncommit and do not 269 // |POOLED|: When actually freeing this chunk, only uncommit and do not
268 // give up the reservation as we still reuse the chunk at some point. 270 // give up the reservation as we still reuse the chunk at some point.
269 POOLED, 271 POOLED = 1u << 14,
270 272
271 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page 273 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
272 // has been aborted and needs special handling by the sweeper. 274 // has been aborted and needs special handling by the sweeper.
273 COMPACTION_WAS_ABORTED, 275 COMPACTION_WAS_ABORTED = 1u << 15,
274 276
275 // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation 277 // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
276 // on pages is sometimes aborted. The flag is used to avoid repeatedly 278 // on pages is sometimes aborted. The flag is used to avoid repeatedly
277 // triggering on the same page. 279 // triggering on the same page.
278 COMPACTION_WAS_ABORTED_FOR_TESTING, 280 COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
279 281
280 // |ANCHOR|: Flag is set if page is an anchor. 282 // |ANCHOR|: Flag is set if page is an anchor.
281 ANCHOR, 283 ANCHOR = 1u << 17,
284 };
285 typedef base::Flags<Flag, uintptr_t> Flags;
282 286
283 // Last flag, keep at bottom. 287 static const int kPointersToHereAreInterestingMask =
284 NUM_MEMORY_CHUNK_FLAGS 288 POINTERS_TO_HERE_ARE_INTERESTING;
285 }; 289
290 static const int kPointersFromHereAreInterestingMask =
291 POINTERS_FROM_HERE_ARE_INTERESTING;
292
293 static const int kEvacuationCandidateMask = EVACUATION_CANDIDATE;
294
295 static const int kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
296
297 static const int kSkipEvacuationSlotsRecordingMask =
298 kEvacuationCandidateMask | kIsInNewSpaceMask;
286 299
287 // |kSweepingDone|: The page state when sweeping is complete or sweeping must 300 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
288 // not be performed on that page. Sweeper threads that are done with their 301 // not be performed on that page. Sweeper threads that are done with their
289 // work will set this value and not touch the page anymore. 302 // work will set this value and not touch the page anymore.
290 // |kSweepingPending|: This page is ready for parallel sweeping. 303 // |kSweepingPending|: This page is ready for parallel sweeping.
291 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. 304 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
292 enum ConcurrentSweepingState { 305 enum ConcurrentSweepingState {
293 kSweepingDone, 306 kSweepingDone,
294 kSweepingPending, 307 kSweepingPending,
295 kSweepingInProgress, 308 kSweepingInProgress,
296 }; 309 };
297 310
298 // Every n write barrier invocations we go to runtime even though 311 // Every n write barrier invocations we go to runtime even though
299 // we could have handled it in generated code. This lets us check 312 // we could have handled it in generated code. This lets us check
300 // whether we have hit the limit and should do some more marking. 313 // whether we have hit the limit and should do some more marking.
301 static const int kWriteBarrierCounterGranularity = 500; 314 static const int kWriteBarrierCounterGranularity = 500;
302 315
303 static const int kPointersToHereAreInterestingMask =
304 1 << POINTERS_TO_HERE_ARE_INTERESTING;
305
306 static const int kPointersFromHereAreInterestingMask =
307 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
308
309 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
310
311 static const int kSkipEvacuationSlotsRecordingMask =
312 (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
313
314 static const intptr_t kAlignment = 316 static const intptr_t kAlignment =
315 (static_cast<uintptr_t>(1) << kPageSizeBits); 317 (static_cast<uintptr_t>(1) << kPageSizeBits);
316 318
317 static const intptr_t kAlignmentMask = kAlignment - 1; 319 static const intptr_t kAlignmentMask = kAlignment - 1;
318 320
319 static const intptr_t kSizeOffset = 0; 321 static const intptr_t kSizeOffset = 0;
320 322
321 static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize; 323 static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
322 324
323 static const intptr_t kLiveBytesOffset = 325 static const size_t kWriteBarrierCounterOffset =
324 kSizeOffset + kPointerSize // size_t size 326 kSizeOffset + kPointerSize // size_t size
325 + kIntptrSize // intptr_t flags_ 327 + kIntptrSize // Flags flags_
326 + kPointerSize // Address area_start_ 328 + kPointerSize // Address area_start_
327 + kPointerSize // Address area_end_ 329 + kPointerSize // Address area_end_
328 + 2 * kPointerSize // base::VirtualMemory reservation_ 330 + 2 * kPointerSize // base::VirtualMemory reservation_
329 + kPointerSize // Address owner_ 331 + kPointerSize // Address owner_
330 + kPointerSize // Heap* heap_ 332 + kPointerSize // Heap* heap_
331 + kIntSize; // int progress_bar_ 333 + kIntSize // int progress_bar_
332 334 + kIntSize // int live_bytes_count_
333 static const size_t kOldToNewSlotsOffset = 335 + kPointerSize // SlotSet* old_to_new_slots_;
334 kLiveBytesOffset + kIntSize; // int live_byte_count_ 336 + kPointerSize // SlotSet* old_to_old_slots_;
335 337 + kPointerSize // TypedSlotSet* typed_old_to_new_slots_;
336 static const size_t kWriteBarrierCounterOffset = 338 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
337 kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_; 339 + kPointerSize; // SkipList* skip_list_;
338 + kPointerSize // SlotSet* old_to_old_slots_;
339 + kPointerSize // TypedSlotSet* typed_old_to_new_slots_;
340 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
341 + kPointerSize; // SkipList* skip_list_;
342 340
343 static const size_t kMinHeaderSize = 341 static const size_t kMinHeaderSize =
344 kWriteBarrierCounterOffset + 342 kWriteBarrierCounterOffset +
345 kIntptrSize // intptr_t write_barrier_counter_ 343 kIntptrSize // intptr_t write_barrier_counter_
346 + kPointerSize // AtomicValue high_water_mark_ 344 + kPointerSize // AtomicValue high_water_mark_
347 + kPointerSize // base::Mutex* mutex_ 345 + kPointerSize // base::Mutex* mutex_
348 + kPointerSize // base::AtomicWord concurrent_sweeping_ 346 + kPointerSize // base::AtomicWord concurrent_sweeping_
349 + 2 * kPointerSize // AtomicNumber free-list statistics 347 + 2 * kPointerSize // AtomicNumber free-list statistics
350 + kPointerSize // AtomicValue next_chunk_ 348 + kPointerSize // AtomicValue next_chunk_
351 + kPointerSize // AtomicValue prev_chunk_ 349 + kPointerSize // AtomicValue prev_chunk_
352 // FreeListCategory categories_[kNumberOfCategories] 350 // FreeListCategory categories_[kNumberOfCategories]
353 + FreeListCategory::kSize * kNumberOfCategories + 351 + FreeListCategory::kSize * kNumberOfCategories +
354 kPointerSize // LocalArrayBufferTracker* local_tracker_; 352 kPointerSize // LocalArrayBufferTracker* local_tracker_
355 // std::unordered_set<Address>* black_area_end_marker_map_ 353 // std::unordered_set<Address>* black_area_end_marker_map_
356 + kPointerSize; 354 + kPointerSize;
357 355
358 // We add some more space to the computed header size to amount for missing 356 // We add some more space to the computed header size to amount for missing
359 // alignment requirements in our computation. 357 // alignment requirements in our computation.
360 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 358 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
361 static const size_t kHeaderSize = kMinHeaderSize; 359 static const size_t kHeaderSize = kMinHeaderSize;
362 360
363 static const int kBodyOffset = 361 static const int kBodyOffset =
364 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); 362 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
511 } 509 }
512 510
513 inline Address MarkbitIndexToAddress(uint32_t index) { 511 inline Address MarkbitIndexToAddress(uint32_t index) {
514 return this->address() + (index << kPointerSizeLog2); 512 return this->address() + (index << kPointerSizeLog2);
515 } 513 }
516 514
517 void ClearLiveness(); 515 void ClearLiveness();
518 516
519 void PrintMarkbits() { markbits()->Print(); } 517 void PrintMarkbits() { markbits()->Print(); }
520 518
521 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; } 519 void SetFlag(Flag flag) { flags_ |= flag; }
522 520 void ClearFlag(Flag flag) { flags_ &= ~Flags(flag); }
523 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); } 521 bool IsFlagSet(Flag flag) { return flags_ & flag; }
524
525 bool IsFlagSet(int flag) {
526 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
527 }
528 522
529 // Set or clear multiple flags at a time. The flags in the mask are set to 523 // Set or clear multiple flags at a time. The flags in the mask are set to
530 // the value in "flags", the rest retain the current value in |flags_|. 524 // the value in "flags", the rest retain the current value in |flags_|.
531 void SetFlags(intptr_t flags, intptr_t mask) { 525 void SetFlags(uintptr_t flags, uintptr_t mask) {
532 flags_ = (flags_ & ~mask) | (flags & mask); 526 flags_ = (flags_ & ~Flags(mask)) | (Flags(flags) & Flags(mask));
533 } 527 }
534 528
535 // Return all current flags. 529 // Return all current flags.
536 intptr_t GetFlags() { return flags_; } 530 uintptr_t GetFlags() { return flags_; }
537 531
538 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); } 532 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
539 533
540 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); } 534 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
541 535
542 bool IsEvacuationCandidate() { 536 bool IsEvacuationCandidate() {
543 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); 537 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
544 return IsFlagSet(EVACUATION_CANDIDATE); 538 return IsFlagSet(EVACUATION_CANDIDATE);
545 } 539 }
546 540
547 bool CanAllocate() { 541 bool CanAllocate() {
548 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE); 542 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
549 } 543 }
550 544
551 bool ShouldSkipEvacuationSlotRecording() { 545 bool ShouldSkipEvacuationSlotRecording() {
552 return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) && 546 return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
553 !IsFlagSet(COMPACTION_WAS_ABORTED); 547 !IsFlagSet(COMPACTION_WAS_ABORTED);
554 } 548 }
555 549
556 Executability executable() { 550 Executability executable() {
557 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 551 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
558 } 552 }
559 553
560 bool InNewSpace() { 554 bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
561 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
562 }
563 555
564 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } 556 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
565 557
566 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } 558 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
567 559
568 MemoryChunk* next_chunk() { return next_chunk_.Value(); } 560 MemoryChunk* next_chunk() { return next_chunk_.Value(); }
569 561
570 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); } 562 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
571 563
572 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); } 564 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
627 Address area_start, Address area_end, 619 Address area_start, Address area_end,
628 Executability executable, Space* owner, 620 Executability executable, Space* owner,
629 base::VirtualMemory* reservation); 621 base::VirtualMemory* reservation);
630 622
631 // Should be called when memory chunk is about to be freed. 623 // Should be called when memory chunk is about to be freed.
632 void ReleaseAllocatedMemory(); 624 void ReleaseAllocatedMemory();
633 625
634 base::VirtualMemory* reserved_memory() { return &reservation_; } 626 base::VirtualMemory* reserved_memory() { return &reservation_; }
635 627
636 size_t size_; 628 size_t size_;
637 intptr_t flags_; 629 Flags flags_;
638 630
639 // Start and end of allocatable memory on this chunk. 631 // Start and end of allocatable memory on this chunk.
640 Address area_start_; 632 Address area_start_;
641 Address area_end_; 633 Address area_end_;
642 634
643 // If the chunk needs to remember its memory reservation, it is stored here. 635 // If the chunk needs to remember its memory reservation, it is stored here.
644 base::VirtualMemory reservation_; 636 base::VirtualMemory reservation_;
645 637
646 // The identity of the owning space. This is tagged as a failure pointer, but 638 // The identity of the owning space. This is tagged as a failure pointer, but
647 // no failure can be in an object, so this can be distinguished from any entry 639 // no failure can be in an object, so this can be distinguished from any entry
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
693 // Stores the end addresses of black areas. 685 // Stores the end addresses of black areas.
694 std::unordered_set<Address>* black_area_end_marker_map_; 686 std::unordered_set<Address>* black_area_end_marker_map_;
695 687
696 private: 688 private:
697 void InitializeReservedMemory() { reservation_.Reset(); } 689 void InitializeReservedMemory() { reservation_.Reset(); }
698 690
699 friend class MemoryAllocator; 691 friend class MemoryAllocator;
700 friend class MemoryChunkValidator; 692 friend class MemoryChunkValidator;
701 }; 693 };
702 694
695 DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags)
696
703 // ----------------------------------------------------------------------------- 697 // -----------------------------------------------------------------------------
704 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 698 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
705 // 699 //
706 // The only way to get a page pointer is by calling factory methods: 700 // The only way to get a page pointer is by calling factory methods:
707 // Page* p = Page::FromAddress(addr); or 701 // Page* p = Page::FromAddress(addr); or
708 // Page* p = Page::FromTopOrLimit(top); 702 // Page* p = Page::FromTopOrLimit(top);
709 class Page : public MemoryChunk { 703 class Page : public MemoryChunk {
710 public: 704 public:
711 static const intptr_t kCopyAllFlags = ~0; 705 static const intptr_t kCopyAllFlags = ~0;
712 706
713 // Page flags copied from from-space to to-space when flipping semispaces. 707 // Page flags copied from from-space to to-space when flipping semispaces.
714 static const intptr_t kCopyOnFlipFlagsMask = 708 static const intptr_t kCopyOnFlipFlagsMask =
715 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | 709 static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
716 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); 710 static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
717 711
718 // Maximum object size that gets allocated into regular pages. Objects larger 712 // Maximum object size that gets allocated into regular pages. Objects larger
719 // than that size are allocated in large object space and are never moved in 713 // than that size are allocated in large object space and are never moved in
720 // memory. This also applies to new space allocation, since objects are never 714 // memory. This also applies to new space allocation, since objects are never
721 // migrated from new space to large object space. Takes double alignment into 715 // migrated from new space to large object space. Takes double alignment into
722 // account. 716 // account.
723 // TODO(hpayer): This limit should be way smaller but we currently have 717 // TODO(hpayer): This limit should be way smaller but we currently have
724 // short living objects >256K. 718 // short living objects >256K.
725 static const int kMaxRegularHeapObjectSize = 600 * KB; 719 static const int kMaxRegularHeapObjectSize = 600 * KB;
726 720
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
974 intptr_t committed_; 968 intptr_t committed_;
975 intptr_t max_committed_; 969 intptr_t max_committed_;
976 970
977 DISALLOW_COPY_AND_ASSIGN(Space); 971 DISALLOW_COPY_AND_ASSIGN(Space);
978 }; 972 };
979 973
980 974
981 class MemoryChunkValidator { 975 class MemoryChunkValidator {
982 // Computed offsets should match the compiler generated ones. 976 // Computed offsets should match the compiler generated ones.
983 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_)); 977 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
984 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
985 offsetof(MemoryChunk, live_byte_count_));
986 STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
987 offsetof(MemoryChunk, old_to_new_slots_));
988 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset == 978 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
989 offsetof(MemoryChunk, write_barrier_counter_)); 979 offsetof(MemoryChunk, write_barrier_counter_));
990 980
991 // Validate our estimates on the header size. 981 // Validate our estimates on the header size.
992 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); 982 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
993 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); 983 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
994 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); 984 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
995 }; 985 };
996 986
997 987
(...skipping 2043 matching lines...) Expand 10 before | Expand all | Expand 10 after
3041 count = 0; 3031 count = 0;
3042 } 3032 }
3043 // Must be small, since an iteration is used for lookup. 3033 // Must be small, since an iteration is used for lookup.
3044 static const int kMaxComments = 64; 3034 static const int kMaxComments = 64;
3045 }; 3035 };
3046 #endif 3036 #endif
3047 } // namespace internal 3037 } // namespace internal
3048 } // namespace v8 3038 } // namespace v8
3049 3039
3050 #endif // V8_HEAP_SPACES_H_ 3040 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/arm64/macro-assembler-arm64.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698