Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(716)

Side by Side Diff: src/heap/spaces.h

Issue 2236543002: [heap] Register end of black areas to support faster filtering of invalid slots. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: format Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_SPACES_H_ 5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_ 6 #define V8_HEAP_SPACES_H_
7 7
8 #include <list> 8 #include <list>
9 #include <memory> 9 #include <memory>
10 #include <unordered_set>
10 11
11 #include "src/allocation.h" 12 #include "src/allocation.h"
12 #include "src/base/atomic-utils.h" 13 #include "src/base/atomic-utils.h"
13 #include "src/base/atomicops.h" 14 #include "src/base/atomicops.h"
14 #include "src/base/bits.h" 15 #include "src/base/bits.h"
15 #include "src/base/hashmap.h" 16 #include "src/base/hashmap.h"
16 #include "src/base/platform/mutex.h" 17 #include "src/base/platform/mutex.h"
17 #include "src/flags.h" 18 #include "src/flags.h"
18 #include "src/heap/marking.h" 19 #include "src/heap/marking.h"
19 #include "src/list.h" 20 #include "src/list.h"
(...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after
343 kWriteBarrierCounterOffset + 344 kWriteBarrierCounterOffset +
344 kIntptrSize // intptr_t write_barrier_counter_ 345 kIntptrSize // intptr_t write_barrier_counter_
345 + kPointerSize // AtomicValue high_water_mark_ 346 + kPointerSize // AtomicValue high_water_mark_
346 + kPointerSize // base::Mutex* mutex_ 347 + kPointerSize // base::Mutex* mutex_
347 + kPointerSize // base::AtomicWord concurrent_sweeping_ 348 + kPointerSize // base::AtomicWord concurrent_sweeping_
348 + 2 * kPointerSize // AtomicNumber free-list statistics 349 + 2 * kPointerSize // AtomicNumber free-list statistics
349 + kPointerSize // AtomicValue next_chunk_ 350 + kPointerSize // AtomicValue next_chunk_
350 + kPointerSize // AtomicValue prev_chunk_ 351 + kPointerSize // AtomicValue prev_chunk_
351 // FreeListCategory categories_[kNumberOfCategories] 352 // FreeListCategory categories_[kNumberOfCategories]
352 + FreeListCategory::kSize * kNumberOfCategories + 353 + FreeListCategory::kSize * kNumberOfCategories +
353 kPointerSize; // LocalArrayBufferTracker* local_tracker_; 354 kPointerSize // LocalArrayBufferTracker* local_tracker_;
355 // std::unordered_set<Address>* black_area_end_marker_map_
356 + kPointerSize;
354 357
355 // We add some more space to the computed header size to amount for missing 358 // We add some more space to the computed header size to amount for missing
356 // alignment requirements in our computation. 359 // alignment requirements in our computation.
357 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. 360 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
358 static const size_t kHeaderSize = kMinHeaderSize; 361 static const size_t kHeaderSize = kMinHeaderSize;
359 362
360 static const int kBodyOffset = 363 static const int kBodyOffset =
361 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); 364 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
362 365
363 // The start offset of the object area in a page. Aligned to both maps and 366 // The start offset of the object area in a page. Aligned to both maps and
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after
585 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; 588 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
586 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == 589 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
587 kPageHeaderTag); 590 kPageHeaderTag);
588 } 591 }
589 592
590 bool HasPageHeader() { return owner() != nullptr; } 593 bool HasPageHeader() { return owner() != nullptr; }
591 594
592 void InsertAfter(MemoryChunk* other); 595 void InsertAfter(MemoryChunk* other);
593 void Unlink(); 596 void Unlink();
594 597
598 void ReleaseBlackAreaEndMarkerMap() {
599 if (black_area_end_marker_map_) {
600 delete black_area_end_marker_map_;
601 black_area_end_marker_map_ = nullptr;
602 }
603 }
604
605 bool IsBlackAreaEndMarker(Address address) {
606 if (black_area_end_marker_map_) {
607 return black_area_end_marker_map_->find(address) !=
608 black_area_end_marker_map_->end();
609 }
610 return false;
611 }
612
613 void AddBlackAreaEndMarker(Address address) {
614 if (!black_area_end_marker_map_) {
615 black_area_end_marker_map_ = new std::unordered_set<Address>();
616 }
617 black_area_end_marker_map_->insert(address);
Michael Lippautz 2016/08/11 10:58:48 nit: Check that you don't insert duplicates. e.g.
618 }
619
620 bool HasBlackAreas() { return black_area_end_marker_map_ != nullptr; }
621
595 protected: 622 protected:
596 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, 623 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
597 Address area_start, Address area_end, 624 Address area_start, Address area_end,
598 Executability executable, Space* owner, 625 Executability executable, Space* owner,
599 base::VirtualMemory* reservation); 626 base::VirtualMemory* reservation);
600 627
601 // Should be called when memory chunk is about to be freed. 628 // Should be called when memory chunk is about to be freed.
602 void ReleaseAllocatedMemory(); 629 void ReleaseAllocatedMemory();
603 630
604 base::VirtualMemory* reserved_memory() { return &reservation_; } 631 base::VirtualMemory* reserved_memory() { return &reservation_; }
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
653 680
654 // next_chunk_ holds a pointer of type MemoryChunk 681 // next_chunk_ holds a pointer of type MemoryChunk
655 base::AtomicValue<MemoryChunk*> next_chunk_; 682 base::AtomicValue<MemoryChunk*> next_chunk_;
656 // prev_chunk_ holds a pointer of type MemoryChunk 683 // prev_chunk_ holds a pointer of type MemoryChunk
657 base::AtomicValue<MemoryChunk*> prev_chunk_; 684 base::AtomicValue<MemoryChunk*> prev_chunk_;
658 685
659 FreeListCategory categories_[kNumberOfCategories]; 686 FreeListCategory categories_[kNumberOfCategories];
660 687
661 LocalArrayBufferTracker* local_tracker_; 688 LocalArrayBufferTracker* local_tracker_;
662 689
690 // Stores the end addresses of black areas.
691 std::unordered_set<Address>* black_area_end_marker_map_;
692
663 private: 693 private:
664 void InitializeReservedMemory() { reservation_.Reset(); } 694 void InitializeReservedMemory() { reservation_.Reset(); }
665 695
666 friend class MemoryAllocator; 696 friend class MemoryAllocator;
667 friend class MemoryChunkValidator; 697 friend class MemoryChunkValidator;
668 }; 698 };
669 699
670 // ----------------------------------------------------------------------------- 700 // -----------------------------------------------------------------------------
671 // A page is a memory chunk of a size 1MB. Large object pages may be larger. 701 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
672 // 702 //
(...skipping 801 matching lines...) Expand 10 before | Expand all | Expand 10 after
1474 1504
1475 1505
1476 // ----------------------------------------------------------------------------- 1506 // -----------------------------------------------------------------------------
1477 // A space has a circular list of pages. The next page can be accessed via 1507 // A space has a circular list of pages. The next page can be accessed via
1478 // Page::next_page() call. 1508 // Page::next_page() call.
1479 1509
1480 // An abstraction of allocation and relocation pointers in a page-structured 1510 // An abstraction of allocation and relocation pointers in a page-structured
1481 // space. 1511 // space.
1482 class AllocationInfo { 1512 class AllocationInfo {
1483 public: 1513 public:
1484 AllocationInfo() : top_(nullptr), limit_(nullptr) {} 1514 AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
1485 AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {} 1515 AllocationInfo(Address top, Address limit)
1516 : original_top_(top), top_(top), limit_(limit) {}
1486 1517
1487 void Reset(Address top, Address limit) { 1518 void Reset(Address top, Address limit) {
1519 original_top_ = top;
1488 set_top(top); 1520 set_top(top);
1489 set_limit(limit); 1521 set_limit(limit);
1490 } 1522 }
1491 1523
1524 Address original_top() {
1525 SLOW_DCHECK(top_ == NULL ||
1526 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1527 return original_top_;
1528 }
1529
1492 INLINE(void set_top(Address top)) { 1530 INLINE(void set_top(Address top)) {
1493 SLOW_DCHECK(top == NULL || 1531 SLOW_DCHECK(top == NULL ||
1494 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0); 1532 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
1495 top_ = top; 1533 top_ = top;
1496 } 1534 }
1497 1535
1498 INLINE(Address top()) const { 1536 INLINE(Address top()) const {
1499 SLOW_DCHECK(top_ == NULL || 1537 SLOW_DCHECK(top_ == NULL ||
1500 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0); 1538 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1501 return top_; 1539 return top_;
(...skipping 13 matching lines...) Expand all
1515 1553
1516 #ifdef DEBUG 1554 #ifdef DEBUG
1517 bool VerifyPagedAllocation() { 1555 bool VerifyPagedAllocation() {
1518 return (Page::FromAllocationAreaAddress(top_) == 1556 return (Page::FromAllocationAreaAddress(top_) ==
1519 Page::FromAllocationAreaAddress(limit_)) && 1557 Page::FromAllocationAreaAddress(limit_)) &&
1520 (top_ <= limit_); 1558 (top_ <= limit_);
1521 } 1559 }
1522 #endif 1560 #endif
1523 1561
1524 private: 1562 private:
1563 // The original top address when the allocation info was initialized.
1564 Address original_top_;
1525 // Current allocation top. 1565 // Current allocation top.
1526 Address top_; 1566 Address top_;
1527 // Current allocation limit. 1567 // Current allocation limit.
1528 Address limit_; 1568 Address limit_;
1529 }; 1569 };
1530 1570
1531 1571
1532 // An abstraction of the accounting statistics of a page-structured space. 1572 // An abstraction of the accounting statistics of a page-structured space.
1533 // 1573 //
1534 // The stats are only set by functions that ensure they stay balanced. These 1574 // The stats are only set by functions that ensure they stay balanced. These
(...skipping 1450 matching lines...) Expand 10 before | Expand all | Expand 10 after
2985 count = 0; 3025 count = 0;
2986 } 3026 }
2987 // Must be small, since an iteration is used for lookup. 3027 // Must be small, since an iteration is used for lookup.
2988 static const int kMaxComments = 64; 3028 static const int kMaxComments = 64;
2989 }; 3029 };
2990 #endif 3030 #endif
2991 } // namespace internal 3031 } // namespace internal
2992 } // namespace v8 3032 } // namespace v8
2993 3033
2994 #endif // V8_HEAP_SPACES_H_ 3034 #endif // V8_HEAP_SPACES_H_
OLDNEW
« no previous file with comments | « src/heap/mark-compact.cc ('k') | src/heap/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698