Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(160)

Side by Side Diff: src/heap/spaces.h

Issue 2236543002: [heap] Register end of black areas to support faster filtering of invalid slots. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list> 8 #include <list>
9 #include <memory> 9 #include <memory>
10 10
(...skipping 332 matching lines...) Expand 10 before | Expand all | Expand 10 after
343 kWriteBarrierCounterOffset + 343 kWriteBarrierCounterOffset +
344 kIntptrSize // intptr_t write_barrier_counter_ 344 kIntptrSize // intptr_t write_barrier_counter_
345 + kPointerSize // AtomicValue high_water_mark_ 345 + kPointerSize // AtomicValue high_water_mark_
346 + kPointerSize // base::Mutex* mutex_ 346 + kPointerSize // base::Mutex* mutex_
347 + kPointerSize // base::AtomicWord concurrent_sweeping_ 347 + kPointerSize // base::AtomicWord concurrent_sweeping_
348 + 2 * kPointerSize // AtomicNumber free-list statistics 348 + 2 * kPointerSize // AtomicNumber free-list statistics
349 + kPointerSize // AtomicValue next_chunk_ 349 + kPointerSize // AtomicValue next_chunk_
350 + kPointerSize // AtomicValue prev_chunk_ 350 + kPointerSize // AtomicValue prev_chunk_
351 // FreeListCategory categories_[kNumberOfCategories] 351 // FreeListCategory categories_[kNumberOfCategories]
352 + FreeListCategory::kSize * kNumberOfCategories + 352 + FreeListCategory::kSize * kNumberOfCategories +
353 kPointerSize; // LocalArrayBufferTracker* local_tracker_; 353 kPointerSize // LocalArrayBufferTracker* local_tracker_;
354 + kPointerSize; // base::HashMap* black_area_end_marker_map_;
354 355
355 // We add some more space to the computed header size to amount for missing 356 // We add some more space to the computed header size to amount for missing
356 // alignment requirements in our computation. 357 // alignment requirements in our computation.
357 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 358 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
358 static const size_t kHeaderSize = kMinHeaderSize; 359 static const size_t kHeaderSize = kMinHeaderSize;
359 360
360 static const int kBodyOffset = 361 static const int kBodyOffset =
361 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); 362 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
362 363
363 // The start offset of the object area in a page. Aligned to both maps and 364 // The start offset of the object area in a page. Aligned to both maps and
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after
585 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; 586 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
586 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == 587 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
587 kPageHeaderTag); 588 kPageHeaderTag);
588 } 589 }
589 590
590 bool HasPageHeader() { return owner() != nullptr; } 591 bool HasPageHeader() { return owner() != nullptr; }
591 592
592 void InsertAfter(MemoryChunk* other); 593 void InsertAfter(MemoryChunk* other);
593 void Unlink(); 594 void Unlink();
594 595
596 void ReleaseBlackAreaEndMarkerMap() {
597 if (black_area_end_marker_map_) {
598 delete black_area_end_marker_map_;
Michael Lippautz 2016/08/10 16:05:42 nit: we usually also add black_area_end_marker_m
Hannes Payer (out of office) 2016/08/11 10:51:56 Done.
599 }
600 }
601
602 bool IsBlackAreaEndMarker(Address address) {
603 if (black_area_end_marker_map_) {
604 return black_area_end_marker_map_->Lookup(
605 reinterpret_cast<void*>(address), ObjectHash(address));
606 }
607 return false;
608 }
609
610 void AddBlackAreaEndMarker(Address address) {
611 if (!black_area_end_marker_map_) {
612 black_area_end_marker_map_ =
613 new base::HashMap(base::HashMap::PointersMatch, 8);
614 }
615 black_area_end_marker_map_->InsertNew(reinterpret_cast<void*>(address),
616 ObjectHash(address));
617 }
618
619 bool HasBlackAreas() { return black_area_end_marker_map_ != nullptr; }
620
595 protected: 621 protected:
596 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, 622 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
597 Address area_start, Address area_end, 623 Address area_start, Address area_end,
598 Executability executable, Space* owner, 624 Executability executable, Space* owner,
599 base::VirtualMemory* reservation); 625 base::VirtualMemory* reservation);
600 626
601 // Should be called when memory chunk is about to be freed. 627 // Should be called when memory chunk is about to be freed.
602 void ReleaseAllocatedMemory(); 628 void ReleaseAllocatedMemory();
603 629
604 base::VirtualMemory* reserved_memory() { return &reservation_; } 630 base::VirtualMemory* reserved_memory() { return &reservation_; }
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
653 679
654 // next_chunk_ holds a pointer of type MemoryChunk 680 // next_chunk_ holds a pointer of type MemoryChunk
655 base::AtomicValue<MemoryChunk*> next_chunk_; 681 base::AtomicValue<MemoryChunk*> next_chunk_;
656 // prev_chunk_ holds a pointer of type MemoryChunk 682 // prev_chunk_ holds a pointer of type MemoryChunk
657 base::AtomicValue<MemoryChunk*> prev_chunk_; 683 base::AtomicValue<MemoryChunk*> prev_chunk_;
658 684
659 FreeListCategory categories_[kNumberOfCategories]; 685 FreeListCategory categories_[kNumberOfCategories];
660 686
661 LocalArrayBufferTracker* local_tracker_; 687 LocalArrayBufferTracker* local_tracker_;
662 688
689 // Stores the end addresses of black areas.
690 base::HashMap* black_area_end_marker_map_;
Michael Lippautz 2016/08/10 16:05:42 How about std::unordered_map?
Hannes Payer (out of office) 2016/08/11 10:51:56 Using std::unordered_set which is sufficient.
691
663 private: 692 private:
664 void InitializeReservedMemory() { reservation_.Reset(); } 693 void InitializeReservedMemory() { reservation_.Reset(); }
665 694
666 friend class MemoryAllocator; 695 friend class MemoryAllocator;
667 friend class MemoryChunkValidator; 696 friend class MemoryChunkValidator;
668 }; 697 };
669 698
670 // ----------------------------------------------------------------------------- 699 // -----------------------------------------------------------------------------
671 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 700 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
672 // 701 //
(...skipping 801 matching lines...) Expand 10 before | Expand all | Expand 10 after
1474 1503
1475 1504
1476 // ----------------------------------------------------------------------------- 1505 // -----------------------------------------------------------------------------
1477 // A space has a circular list of pages. The next page can be accessed via 1506 // A space has a circular list of pages. The next page can be accessed via
1478 // Page::next_page() call. 1507 // Page::next_page() call.
1479 1508
1480 // An abstraction of allocation and relocation pointers in a page-structured 1509 // An abstraction of allocation and relocation pointers in a page-structured
1481 // space. 1510 // space.
1482 class AllocationInfo { 1511 class AllocationInfo {
1483 public: 1512 public:
1484 AllocationInfo() : top_(nullptr), limit_(nullptr) {} 1513 AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
1485 AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {} 1514 AllocationInfo(Address top, Address limit)
1515 : original_top_(top), top_(top), limit_(limit) {}
1486 1516
1487 void Reset(Address top, Address limit) { 1517 void Reset(Address top, Address limit) {
1518 original_top_ = top;
1488 set_top(top); 1519 set_top(top);
1489 set_limit(limit); 1520 set_limit(limit);
1490 } 1521 }
1491 1522
1523 Address original_top() { return original_top_; }
Michael Lippautz 2016/08/10 16:05:41 nit: Let's add the SLOW_DCHECK (like below) so tha
Hannes Payer (out of office) 2016/08/11 10:51:56 Done.
1524
1492 INLINE(void set_top(Address top)) { 1525 INLINE(void set_top(Address top)) {
1493 SLOW_DCHECK(top == NULL || 1526 SLOW_DCHECK(top == NULL ||
1494 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0); 1527 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
1495 top_ = top; 1528 top_ = top;
1496 } 1529 }
1497 1530
1498 INLINE(Address top()) const { 1531 INLINE(Address top()) const {
1499 SLOW_DCHECK(top_ == NULL || 1532 SLOW_DCHECK(top_ == NULL ||
1500 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0); 1533 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1501 return top_; 1534 return top_;
(...skipping 13 matching lines...) Expand all
1515 1548
1516 #ifdef DEBUG 1549 #ifdef DEBUG
1517 bool VerifyPagedAllocation() { 1550 bool VerifyPagedAllocation() {
1518 return (Page::FromAllocationAreaAddress(top_) == 1551 return (Page::FromAllocationAreaAddress(top_) ==
1519 Page::FromAllocationAreaAddress(limit_)) && 1552 Page::FromAllocationAreaAddress(limit_)) &&
1520 (top_ <= limit_); 1553 (top_ <= limit_);
1521 } 1554 }
1522 #endif 1555 #endif
1523 1556
1524 private: 1557 private:
1558 // The original top address when the allocation info was initialized.
1559 Address original_top_;
1525 // Current allocation top. 1560 // Current allocation top.
1526 Address top_; 1561 Address top_;
1527 // Current allocation limit. 1562 // Current allocation limit.
1528 Address limit_; 1563 Address limit_;
1529 }; 1564 };
1530 1565
1531 1566
1532 // An abstraction of the accounting statistics of a page-structured space. 1567 // An abstraction of the accounting statistics of a page-structured space.
1533 // 1568 //
1534 // The stats are only set by functions that ensure they stay balanced. These 1569 // The stats are only set by functions that ensure they stay balanced. These
(...skipping 1450 matching lines...) Expand 10 before | Expand all | Expand 10 after
2985 count = 0; 3020 count = 0;
2986 } 3021 }
2987 // Must be small, since an iteration is used for lookup. 3022 // Must be small, since an iteration is used for lookup.
2988 static const int kMaxComments = 64; 3023 static const int kMaxComments = 64;
2989 }; 3024 };
2990 #endif 3025 #endif
2991 } // namespace internal 3026 } // namespace internal
2992 } // namespace v8 3027 } // namespace v8
2993 3028
2994 #endif // V8_HEAP_SPACES_H_ 3029 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698