| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_SPACES_H_ | 5 #ifndef V8_SPACES_H_ |
| 6 #define V8_SPACES_H_ | 6 #define V8_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
| 10 #include "src/base/platform/mutex.h" |
| 10 #include "src/hashmap.h" | 11 #include "src/hashmap.h" |
| 11 #include "src/list.h" | 12 #include "src/list.h" |
| 12 #include "src/log.h" | 13 #include "src/log.h" |
| 13 #include "src/platform/mutex.h" | |
| 14 #include "src/utils.h" | 14 #include "src/utils.h" |
| 15 | 15 |
| 16 namespace v8 { | 16 namespace v8 { |
| 17 namespace internal { | 17 namespace internal { |
| 18 | 18 |
| 19 class Isolate; | 19 class Isolate; |
| 20 | 20 |
| 21 // ----------------------------------------------------------------------------- | 21 // ----------------------------------------------------------------------------- |
| 22 // Heap structures: | 22 // Heap structures: |
| 23 // | 23 // |
| (...skipping 297 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 321 } | 321 } |
| 322 } | 322 } |
| 323 | 323 |
| 324 void set_owner(Space* space) { | 324 void set_owner(Space* space) { |
| 325 ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0); | 325 ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0); |
| 326 owner_ = reinterpret_cast<Address>(space) + kFailureTag; | 326 owner_ = reinterpret_cast<Address>(space) + kFailureTag; |
| 327 ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == | 327 ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == |
| 328 kFailureTag); | 328 kFailureTag); |
| 329 } | 329 } |
| 330 | 330 |
| 331 VirtualMemory* reserved_memory() { | 331 base::VirtualMemory* reserved_memory() { |
| 332 return &reservation_; | 332 return &reservation_; |
| 333 } | 333 } |
| 334 | 334 |
| 335 void InitializeReservedMemory() { | 335 void InitializeReservedMemory() { |
| 336 reservation_.Reset(); | 336 reservation_.Reset(); |
| 337 } | 337 } |
| 338 | 338 |
| 339 void set_reserved_memory(VirtualMemory* reservation) { | 339 void set_reserved_memory(base::VirtualMemory* reservation) { |
| 340 ASSERT_NOT_NULL(reservation); | 340 ASSERT_NOT_NULL(reservation); |
| 341 reservation_.TakeControl(reservation); | 341 reservation_.TakeControl(reservation); |
| 342 } | 342 } |
| 343 | 343 |
| 344 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } | 344 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } |
| 345 void initialize_scan_on_scavenge(bool scan) { | 345 void initialize_scan_on_scavenge(bool scan) { |
| 346 if (scan) { | 346 if (scan) { |
| 347 SetFlag(SCAN_ON_SCAVENGE); | 347 SetFlag(SCAN_ON_SCAVENGE); |
| 348 } else { | 348 } else { |
| 349 ClearFlag(SCAN_ON_SCAVENGE); | 349 ClearFlag(SCAN_ON_SCAVENGE); |
| (...skipping 333 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 683 | 683 |
| 684 protected: | 684 protected: |
| 685 size_t size_; | 685 size_t size_; |
| 686 intptr_t flags_; | 686 intptr_t flags_; |
| 687 | 687 |
| 688 // Start and end of allocatable memory on this chunk. | 688 // Start and end of allocatable memory on this chunk. |
| 689 Address area_start_; | 689 Address area_start_; |
| 690 Address area_end_; | 690 Address area_end_; |
| 691 | 691 |
| 692 // If the chunk needs to remember its memory reservation, it is stored here. | 692 // If the chunk needs to remember its memory reservation, it is stored here. |
| 693 VirtualMemory reservation_; | 693 base::VirtualMemory reservation_; |
| 694 // The identity of the owning space. This is tagged as a failure pointer, but | 694 // The identity of the owning space. This is tagged as a failure pointer, but |
| 695 // no failure can be in an object, so this can be distinguished from any entry | 695 // no failure can be in an object, so this can be distinguished from any entry |
| 696 // in a fixed array. | 696 // in a fixed array. |
| 697 Address owner_; | 697 Address owner_; |
| 698 Heap* heap_; | 698 Heap* heap_; |
| 699 // Used by the store buffer to keep track of which pages to mark scan-on- | 699 // Used by the store buffer to keep track of which pages to mark scan-on- |
| 700 // scavenge. | 700 // scavenge. |
| 701 int store_buffer_counter_; | 701 int store_buffer_counter_; |
| 702 // Count of bytes marked black on page. | 702 // Count of bytes marked black on page. |
| 703 int live_byte_count_; | 703 int live_byte_count_; |
| (...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 951 const size_t commit_size, | 951 const size_t commit_size, |
| 952 size_t* allocated); | 952 size_t* allocated); |
| 953 bool CommitRawMemory(Address start, size_t length); | 953 bool CommitRawMemory(Address start, size_t length); |
| 954 bool UncommitRawMemory(Address start, size_t length); | 954 bool UncommitRawMemory(Address start, size_t length); |
| 955 void FreeRawMemory(Address buf, size_t length); | 955 void FreeRawMemory(Address buf, size_t length); |
| 956 | 956 |
| 957 private: | 957 private: |
| 958 Isolate* isolate_; | 958 Isolate* isolate_; |
| 959 | 959 |
| 960 // The reserved range of virtual memory that all code objects are put in. | 960 // The reserved range of virtual memory that all code objects are put in. |
| 961 VirtualMemory* code_range_; | 961 base::VirtualMemory* code_range_; |
| 962 // Plain old data class, just a struct plus a constructor. | 962 // Plain old data class, just a struct plus a constructor. |
| 963 class FreeBlock { | 963 class FreeBlock { |
| 964 public: | 964 public: |
| 965 FreeBlock(Address start_arg, size_t size_arg) | 965 FreeBlock(Address start_arg, size_t size_arg) |
| 966 : start(start_arg), size(size_arg) { | 966 : start(start_arg), size(size_arg) { |
| 967 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 967 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 968 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); | 968 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |
| 969 } | 969 } |
| 970 FreeBlock(void* start_arg, size_t size_arg) | 970 FreeBlock(void* start_arg, size_t size_arg) |
| 971 : start(static_cast<Address>(start_arg)), size(size_arg) { | 971 : start(static_cast<Address>(start_arg)), size(size_arg) { |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1109 // Returns a MemoryChunk in which the memory region from commit_area_size to | 1109 // Returns a MemoryChunk in which the memory region from commit_area_size to |
| 1110 // reserve_area_size of the chunk area is reserved but not committed, it | 1110 // reserve_area_size of the chunk area is reserved but not committed, it |
| 1111 // could be committed later by calling MemoryChunk::CommitArea. | 1111 // could be committed later by calling MemoryChunk::CommitArea. |
| 1112 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, | 1112 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, |
| 1113 intptr_t commit_area_size, | 1113 intptr_t commit_area_size, |
| 1114 Executability executable, | 1114 Executability executable, |
| 1115 Space* space); | 1115 Space* space); |
| 1116 | 1116 |
| 1117 Address ReserveAlignedMemory(size_t requested, | 1117 Address ReserveAlignedMemory(size_t requested, |
| 1118 size_t alignment, | 1118 size_t alignment, |
| 1119 VirtualMemory* controller); | 1119 base::VirtualMemory* controller); |
| 1120 Address AllocateAlignedMemory(size_t reserve_size, | 1120 Address AllocateAlignedMemory(size_t reserve_size, |
| 1121 size_t commit_size, | 1121 size_t commit_size, |
| 1122 size_t alignment, | 1122 size_t alignment, |
| 1123 Executability executable, | 1123 Executability executable, |
| 1124 VirtualMemory* controller); | 1124 base::VirtualMemory* controller); |
| 1125 | 1125 |
| 1126 bool CommitMemory(Address addr, size_t size, Executability executable); | 1126 bool CommitMemory(Address addr, size_t size, Executability executable); |
| 1127 | 1127 |
| 1128 void FreeMemory(VirtualMemory* reservation, Executability executable); | 1128 void FreeMemory(base::VirtualMemory* reservation, Executability executable); |
| 1129 void FreeMemory(Address addr, size_t size, Executability executable); | 1129 void FreeMemory(Address addr, size_t size, Executability executable); |
| 1130 | 1130 |
| 1131 // Commit a contiguous block of memory from the initial chunk. Assumes that | 1131 // Commit a contiguous block of memory from the initial chunk. Assumes that |
| 1132 // the address is not NULL, the size is greater than zero, and that the | 1132 // the address is not NULL, the size is greater than zero, and that the |
| 1133 // block is contained in the initial chunk. Returns true if it succeeded | 1133 // block is contained in the initial chunk. Returns true if it succeeded |
| 1134 // and false otherwise. | 1134 // and false otherwise. |
| 1135 bool CommitBlock(Address start, size_t size, Executability executable); | 1135 bool CommitBlock(Address start, size_t size, Executability executable); |
| 1136 | 1136 |
| 1137 // Uncommit a contiguous block of memory [start..(start+size)[. | 1137 // Uncommit a contiguous block of memory [start..(start+size)[. |
| 1138 // start is not NULL, the size is greater than zero, and the | 1138 // start is not NULL, the size is greater than zero, and the |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1163 static int CodePageGuardSize(); | 1163 static int CodePageGuardSize(); |
| 1164 | 1164 |
| 1165 static int CodePageAreaStartOffset(); | 1165 static int CodePageAreaStartOffset(); |
| 1166 | 1166 |
| 1167 static int CodePageAreaEndOffset(); | 1167 static int CodePageAreaEndOffset(); |
| 1168 | 1168 |
| 1169 static int CodePageAreaSize() { | 1169 static int CodePageAreaSize() { |
| 1170 return CodePageAreaEndOffset() - CodePageAreaStartOffset(); | 1170 return CodePageAreaEndOffset() - CodePageAreaStartOffset(); |
| 1171 } | 1171 } |
| 1172 | 1172 |
| 1173 MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, | 1173 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, |
| 1174 Address start, | 1174 Address start, |
| 1175 size_t commit_size, | 1175 size_t commit_size, |
| 1176 size_t reserved_size); | 1176 size_t reserved_size); |
| 1177 | 1177 |
| 1178 private: | 1178 private: |
| 1179 Isolate* isolate_; | 1179 Isolate* isolate_; |
| 1180 | 1180 |
| 1181 // Maximum space size in bytes. | 1181 // Maximum space size in bytes. |
| 1182 size_t capacity_; | 1182 size_t capacity_; |
| 1183 // Maximum subset of capacity_ that can be executable | 1183 // Maximum subset of capacity_ that can be executable |
| (...skipping 360 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1544 } | 1544 } |
| 1545 | 1545 |
| 1546 FreeListNode** GetEndAddress() { return &end_; } | 1546 FreeListNode** GetEndAddress() { return &end_; } |
| 1547 FreeListNode* end() const { return end_; } | 1547 FreeListNode* end() const { return end_; } |
| 1548 void set_end(FreeListNode* end) { end_ = end; } | 1548 void set_end(FreeListNode* end) { end_ = end; } |
| 1549 | 1549 |
| 1550 int* GetAvailableAddress() { return &available_; } | 1550 int* GetAvailableAddress() { return &available_; } |
| 1551 int available() const { return available_; } | 1551 int available() const { return available_; } |
| 1552 void set_available(int available) { available_ = available; } | 1552 void set_available(int available) { available_ = available; } |
| 1553 | 1553 |
| 1554 Mutex* mutex() { return &mutex_; } | 1554 base::Mutex* mutex() { return &mutex_; } |
| 1555 | 1555 |
| 1556 bool IsEmpty() { | 1556 bool IsEmpty() { |
| 1557 return top() == 0; | 1557 return top() == 0; |
| 1558 } | 1558 } |
| 1559 | 1559 |
| 1560 #ifdef DEBUG | 1560 #ifdef DEBUG |
| 1561 intptr_t SumFreeList(); | 1561 intptr_t SumFreeList(); |
| 1562 int FreeListLength(); | 1562 int FreeListLength(); |
| 1563 #endif | 1563 #endif |
| 1564 | 1564 |
| 1565 private: | 1565 private: |
| 1566 // top_ points to the top FreeListNode* in the free list category. | 1566 // top_ points to the top FreeListNode* in the free list category. |
| 1567 base::AtomicWord top_; | 1567 base::AtomicWord top_; |
| 1568 FreeListNode* end_; | 1568 FreeListNode* end_; |
| 1569 Mutex mutex_; | 1569 base::Mutex mutex_; |
| 1570 | 1570 |
| 1571 // Total available bytes in all blocks of this free list category. | 1571 // Total available bytes in all blocks of this free list category. |
| 1572 int available_; | 1572 int available_; |
| 1573 }; | 1573 }; |
| 1574 | 1574 |
| 1575 | 1575 |
| 1576 // The free list for the old space. The free list is organized in such a way | 1576 // The free list for the old space. The free list is organized in such a way |
| 1577 // as to encourage objects allocated around the same time to be near each | 1577 // as to encourage objects allocated around the same time to be near each |
| 1578 // other. The normal way to allocate is intended to be by bumping a 'top' | 1578 // other. The normal way to allocate is intended to be by bumping a 'top' |
| 1579 // pointer until it hits a 'limit' pointer. When the limit is hit we need to | 1579 // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
| (...skipping 1070 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2650 private: | 2650 private: |
| 2651 // Update allocation info to match the current to-space page. | 2651 // Update allocation info to match the current to-space page. |
| 2652 void UpdateAllocationInfo(); | 2652 void UpdateAllocationInfo(); |
| 2653 | 2653 |
| 2654 Address chunk_base_; | 2654 Address chunk_base_; |
| 2655 uintptr_t chunk_size_; | 2655 uintptr_t chunk_size_; |
| 2656 | 2656 |
| 2657 // The semispaces. | 2657 // The semispaces. |
| 2658 SemiSpace to_space_; | 2658 SemiSpace to_space_; |
| 2659 SemiSpace from_space_; | 2659 SemiSpace from_space_; |
| 2660 VirtualMemory reservation_; | 2660 base::VirtualMemory reservation_; |
| 2661 int pages_used_; | 2661 int pages_used_; |
| 2662 | 2662 |
| 2663 // Start address and bit mask for containment testing. | 2663 // Start address and bit mask for containment testing. |
| 2664 Address start_; | 2664 Address start_; |
| 2665 uintptr_t address_mask_; | 2665 uintptr_t address_mask_; |
| 2666 uintptr_t object_mask_; | 2666 uintptr_t object_mask_; |
| 2667 uintptr_t object_expected_; | 2667 uintptr_t object_expected_; |
| 2668 | 2668 |
| 2669 // Allocation pointer and limit for normal allocation and allocation during | 2669 // Allocation pointer and limit for normal allocation and allocation during |
| 2670 // mark-compact collection. | 2670 // mark-compact collection. |
| (...skipping 333 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3004 } | 3004 } |
| 3005 // Must be small, since an iteration is used for lookup. | 3005 // Must be small, since an iteration is used for lookup. |
| 3006 static const int kMaxComments = 64; | 3006 static const int kMaxComments = 64; |
| 3007 }; | 3007 }; |
| 3008 #endif | 3008 #endif |
| 3009 | 3009 |
| 3010 | 3010 |
| 3011 } } // namespace v8::internal | 3011 } } // namespace v8::internal |
| 3012 | 3012 |
| 3013 #endif // V8_SPACES_H_ | 3013 #endif // V8_SPACES_H_ |
| OLD | NEW |