| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_SPACES_H_ | 5 #ifndef V8_SPACES_H_ |
| 6 #define V8_SPACES_H_ | 6 #define V8_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
| 10 #include "src/base/platform/mutex.h" | 10 #include "src/base/platform/mutex.h" |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 67 // is to enable linear allocation without having to constantly update the byte | 67 // is to enable linear allocation without having to constantly update the byte |
| 68 // array every time the top field is updated and a new object is created. The | 68 // array every time the top field is updated and a new object is created. The |
| 69 // special garbage section is not in the chain of garbage sections. | 69 // special garbage section is not in the chain of garbage sections. |
| 70 // | 70 // |
| 71 // Since the top and limit fields are in the space, not the page, only one page | 71 // Since the top and limit fields are in the space, not the page, only one page |
| 72 // has a special garbage section, and if the top and limit are equal then there | 72 // has a special garbage section, and if the top and limit are equal then there |
| 73 // is no special garbage section. | 73 // is no special garbage section. |
| 74 | 74 |
| 75 // Some assertion macros used in the debugging mode. | 75 // Some assertion macros used in the debugging mode. |
| 76 | 76 |
| 77 #define ASSERT_PAGE_ALIGNED(address) \ | 77 #define DCHECK_PAGE_ALIGNED(address) \ |
| 78 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) | 78 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
| 79 | 79 |
| 80 #define ASSERT_OBJECT_ALIGNED(address) \ | 80 #define DCHECK_OBJECT_ALIGNED(address) \ |
| 81 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) | 81 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
| 82 | 82 |
| 83 #define ASSERT_OBJECT_SIZE(size) \ | 83 #define DCHECK_OBJECT_SIZE(size) \ |
| 84 ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) | 84 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) |
| 85 | 85 |
| 86 #define ASSERT_PAGE_OFFSET(offset) \ | 86 #define DCHECK_PAGE_OFFSET(offset) \ |
| 87 ASSERT((Page::kObjectStartOffset <= offset) \ | 87 DCHECK((Page::kObjectStartOffset <= offset) \ |
| 88 && (offset <= Page::kPageSize)) | 88 && (offset <= Page::kPageSize)) |
| 89 | 89 |
| 90 #define ASSERT_MAP_PAGE_INDEX(index) \ | 90 #define DCHECK_MAP_PAGE_INDEX(index) \ |
| 91 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | 91 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
| 92 | 92 |
| 93 | 93 |
| 94 class PagedSpace; | 94 class PagedSpace; |
| 95 class MemoryAllocator; | 95 class MemoryAllocator; |
| 96 class AllocationInfo; | 96 class AllocationInfo; |
| 97 class Space; | 97 class Space; |
| 98 class FreeList; | 98 class FreeList; |
| 99 class MemoryChunk; | 99 class MemoryChunk; |
| 100 | 100 |
| 101 class MarkBit { | 101 class MarkBit { |
| (...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 315 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | 315 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 316 kPageHeaderTag) { | 316 kPageHeaderTag) { |
| 317 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - | 317 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - |
| 318 kPageHeaderTag); | 318 kPageHeaderTag); |
| 319 } else { | 319 } else { |
| 320 return NULL; | 320 return NULL; |
| 321 } | 321 } |
| 322 } | 322 } |
| 323 | 323 |
| 324 void set_owner(Space* space) { | 324 void set_owner(Space* space) { |
| 325 ASSERT((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); | 325 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); |
| 326 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; | 326 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; |
| 327 ASSERT((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | 327 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 328 kPageHeaderTag); | 328 kPageHeaderTag); |
| 329 } | 329 } |
| 330 | 330 |
| 331 base::VirtualMemory* reserved_memory() { | 331 base::VirtualMemory* reserved_memory() { |
| 332 return &reservation_; | 332 return &reservation_; |
| 333 } | 333 } |
| 334 | 334 |
| 335 void InitializeReservedMemory() { | 335 void InitializeReservedMemory() { |
| 336 reservation_.Reset(); | 336 reservation_.Reset(); |
| 337 } | 337 } |
| 338 | 338 |
| 339 void set_reserved_memory(base::VirtualMemory* reservation) { | 339 void set_reserved_memory(base::VirtualMemory* reservation) { |
| 340 ASSERT_NOT_NULL(reservation); | 340 DCHECK_NOT_NULL(reservation); |
| 341 reservation_.TakeControl(reservation); | 341 reservation_.TakeControl(reservation); |
| 342 } | 342 } |
| 343 | 343 |
| 344 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } | 344 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } |
| 345 void initialize_scan_on_scavenge(bool scan) { | 345 void initialize_scan_on_scavenge(bool scan) { |
| 346 if (scan) { | 346 if (scan) { |
| 347 SetFlag(SCAN_ON_SCAVENGE); | 347 SetFlag(SCAN_ON_SCAVENGE); |
| 348 } else { | 348 } else { |
| 349 ClearFlag(SCAN_ON_SCAVENGE); | 349 ClearFlag(SCAN_ON_SCAVENGE); |
| 350 } | 350 } |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 490 live_byte_count_ = 0; | 490 live_byte_count_ = 0; |
| 491 } | 491 } |
| 492 void IncrementLiveBytes(int by) { | 492 void IncrementLiveBytes(int by) { |
| 493 if (FLAG_gc_verbose) { | 493 if (FLAG_gc_verbose) { |
| 494 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", | 494 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", |
| 495 static_cast<void*>(this), live_byte_count_, | 495 static_cast<void*>(this), live_byte_count_, |
| 496 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), | 496 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), |
| 497 live_byte_count_ + by); | 497 live_byte_count_ + by); |
| 498 } | 498 } |
| 499 live_byte_count_ += by; | 499 live_byte_count_ += by; |
| 500 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_); | 500 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); |
| 501 } | 501 } |
| 502 int LiveBytes() { | 502 int LiveBytes() { |
| 503 ASSERT(static_cast<unsigned>(live_byte_count_) <= size_); | 503 DCHECK(static_cast<unsigned>(live_byte_count_) <= size_); |
| 504 return live_byte_count_; | 504 return live_byte_count_; |
| 505 } | 505 } |
| 506 | 506 |
| 507 int write_barrier_counter() { | 507 int write_barrier_counter() { |
| 508 return static_cast<int>(write_barrier_counter_); | 508 return static_cast<int>(write_barrier_counter_); |
| 509 } | 509 } |
| 510 | 510 |
| 511 void set_write_barrier_counter(int counter) { | 511 void set_write_barrier_counter(int counter) { |
| 512 write_barrier_counter_ = counter; | 512 write_barrier_counter_ = counter; |
| 513 } | 513 } |
| 514 | 514 |
| 515 int progress_bar() { | 515 int progress_bar() { |
| 516 ASSERT(IsFlagSet(HAS_PROGRESS_BAR)); | 516 DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); |
| 517 return progress_bar_; | 517 return progress_bar_; |
| 518 } | 518 } |
| 519 | 519 |
| 520 void set_progress_bar(int progress_bar) { | 520 void set_progress_bar(int progress_bar) { |
| 521 ASSERT(IsFlagSet(HAS_PROGRESS_BAR)); | 521 DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); |
| 522 progress_bar_ = progress_bar; | 522 progress_bar_ = progress_bar; |
| 523 } | 523 } |
| 524 | 524 |
| 525 void ResetProgressBar() { | 525 void ResetProgressBar() { |
| 526 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { | 526 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { |
| 527 set_progress_bar(0); | 527 set_progress_bar(0); |
| 528 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); | 528 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); |
| 529 } | 529 } |
| 530 } | 530 } |
| 531 | 531 |
| 532 bool IsLeftOfProgressBar(Object** slot) { | 532 bool IsLeftOfProgressBar(Object** slot) { |
| 533 Address slot_address = reinterpret_cast<Address>(slot); | 533 Address slot_address = reinterpret_cast<Address>(slot); |
| 534 ASSERT(slot_address > this->address()); | 534 DCHECK(slot_address > this->address()); |
| 535 return (slot_address - (this->address() + kObjectStartOffset)) < | 535 return (slot_address - (this->address() + kObjectStartOffset)) < |
| 536 progress_bar(); | 536 progress_bar(); |
| 537 } | 537 } |
| 538 | 538 |
| 539 static void IncrementLiveBytesFromGC(Address address, int by) { | 539 static void IncrementLiveBytesFromGC(Address address, int by) { |
| 540 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); | 540 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); |
| 541 } | 541 } |
| 542 | 542 |
| 543 static void IncrementLiveBytesFromMutator(Address address, int by); | 543 static void IncrementLiveBytesFromMutator(Address address, int by); |
| 544 | 544 |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 652 | 652 |
| 653 inline SlotsBuffer* slots_buffer() { | 653 inline SlotsBuffer* slots_buffer() { |
| 654 return slots_buffer_; | 654 return slots_buffer_; |
| 655 } | 655 } |
| 656 | 656 |
| 657 inline SlotsBuffer** slots_buffer_address() { | 657 inline SlotsBuffer** slots_buffer_address() { |
| 658 return &slots_buffer_; | 658 return &slots_buffer_; |
| 659 } | 659 } |
| 660 | 660 |
| 661 void MarkEvacuationCandidate() { | 661 void MarkEvacuationCandidate() { |
| 662 ASSERT(slots_buffer_ == NULL); | 662 DCHECK(slots_buffer_ == NULL); |
| 663 SetFlag(EVACUATION_CANDIDATE); | 663 SetFlag(EVACUATION_CANDIDATE); |
| 664 } | 664 } |
| 665 | 665 |
| 666 void ClearEvacuationCandidate() { | 666 void ClearEvacuationCandidate() { |
| 667 ASSERT(slots_buffer_ == NULL); | 667 DCHECK(slots_buffer_ == NULL); |
| 668 ClearFlag(EVACUATION_CANDIDATE); | 668 ClearFlag(EVACUATION_CANDIDATE); |
| 669 } | 669 } |
| 670 | 670 |
| 671 Address area_start() { return area_start_; } | 671 Address area_start() { return area_start_; } |
| 672 Address area_end() { return area_end_; } | 672 Address area_end() { return area_end_; } |
| 673 int area_size() { | 673 int area_size() { |
| 674 return static_cast<int>(area_end() - area_start()); | 674 return static_cast<int>(area_end() - area_start()); |
| 675 } | 675 } |
| 676 bool CommitArea(size_t requested); | 676 bool CommitArea(size_t requested); |
| 677 | 677 |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 779 } | 779 } |
| 780 | 780 |
| 781 // Returns the offset of a given address to this page. | 781 // Returns the offset of a given address to this page. |
| 782 INLINE(int Offset(Address a)) { | 782 INLINE(int Offset(Address a)) { |
| 783 int offset = static_cast<int>(a - address()); | 783 int offset = static_cast<int>(a - address()); |
| 784 return offset; | 784 return offset; |
| 785 } | 785 } |
| 786 | 786 |
| 787 // Returns the address for a given offset to the this page. | 787 // Returns the address for a given offset to the this page. |
| 788 Address OffsetToAddress(int offset) { | 788 Address OffsetToAddress(int offset) { |
| 789 ASSERT_PAGE_OFFSET(offset); | 789 DCHECK_PAGE_OFFSET(offset); |
| 790 return address() + offset; | 790 return address() + offset; |
| 791 } | 791 } |
| 792 | 792 |
| 793 // --------------------------------------------------------------------- | 793 // --------------------------------------------------------------------- |
| 794 | 794 |
| 795 // Page size in bytes. This must be a multiple of the OS page size. | 795 // Page size in bytes. This must be a multiple of the OS page size. |
| 796 static const int kPageSize = 1 << kPageSizeBits; | 796 static const int kPageSize = 1 << kPageSizeBits; |
| 797 | 797 |
| 798 // Maximum object size that fits in a page. Objects larger than that size | 798 // Maximum object size that fits in a page. Objects larger than that size |
| 799 // are allocated in large object space and are never moved in memory. This | 799 // are allocated in large object space and are never moved in memory. This |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 929 // Can only be called once, at heap initialization time. | 929 // Can only be called once, at heap initialization time. |
| 930 // Returns false on failure. | 930 // Returns false on failure. |
| 931 bool SetUp(size_t requested_size); | 931 bool SetUp(size_t requested_size); |
| 932 | 932 |
| 933 // Frees the range of virtual memory, and frees the data structures used to | 933 // Frees the range of virtual memory, and frees the data structures used to |
| 934 // manage it. | 934 // manage it. |
| 935 void TearDown(); | 935 void TearDown(); |
| 936 | 936 |
| 937 bool valid() { return code_range_ != NULL; } | 937 bool valid() { return code_range_ != NULL; } |
| 938 Address start() { | 938 Address start() { |
| 939 ASSERT(valid()); | 939 DCHECK(valid()); |
| 940 return static_cast<Address>(code_range_->address()); | 940 return static_cast<Address>(code_range_->address()); |
| 941 } | 941 } |
| 942 bool contains(Address address) { | 942 bool contains(Address address) { |
| 943 if (!valid()) return false; | 943 if (!valid()) return false; |
| 944 Address start = static_cast<Address>(code_range_->address()); | 944 Address start = static_cast<Address>(code_range_->address()); |
| 945 return start <= address && address < start + code_range_->size(); | 945 return start <= address && address < start + code_range_->size(); |
| 946 } | 946 } |
| 947 | 947 |
| 948 // Allocates a chunk of memory from the large-object portion of | 948 // Allocates a chunk of memory from the large-object portion of |
| 949 // the code range. On platforms with no separate code range, should | 949 // the code range. On platforms with no separate code range, should |
| 950 // not be called. | 950 // not be called. |
| 951 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, | 951 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, |
| 952 const size_t commit_size, | 952 const size_t commit_size, |
| 953 size_t* allocated); | 953 size_t* allocated); |
| 954 bool CommitRawMemory(Address start, size_t length); | 954 bool CommitRawMemory(Address start, size_t length); |
| 955 bool UncommitRawMemory(Address start, size_t length); | 955 bool UncommitRawMemory(Address start, size_t length); |
| 956 void FreeRawMemory(Address buf, size_t length); | 956 void FreeRawMemory(Address buf, size_t length); |
| 957 | 957 |
| 958 private: | 958 private: |
| 959 Isolate* isolate_; | 959 Isolate* isolate_; |
| 960 | 960 |
| 961 // The reserved range of virtual memory that all code objects are put in. | 961 // The reserved range of virtual memory that all code objects are put in. |
| 962 base::VirtualMemory* code_range_; | 962 base::VirtualMemory* code_range_; |
| 963 // Plain old data class, just a struct plus a constructor. | 963 // Plain old data class, just a struct plus a constructor. |
| 964 class FreeBlock { | 964 class FreeBlock { |
| 965 public: | 965 public: |
| 966 FreeBlock(Address start_arg, size_t size_arg) | 966 FreeBlock(Address start_arg, size_t size_arg) |
| 967 : start(start_arg), size(size_arg) { | 967 : start(start_arg), size(size_arg) { |
| 968 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 968 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 969 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); | 969 DCHECK(size >= static_cast<size_t>(Page::kPageSize)); |
| 970 } | 970 } |
| 971 FreeBlock(void* start_arg, size_t size_arg) | 971 FreeBlock(void* start_arg, size_t size_arg) |
| 972 : start(static_cast<Address>(start_arg)), size(size_arg) { | 972 : start(static_cast<Address>(start_arg)), size(size_arg) { |
| 973 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 973 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 974 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); | 974 DCHECK(size >= static_cast<size_t>(Page::kPageSize)); |
| 975 } | 975 } |
| 976 | 976 |
| 977 Address start; | 977 Address start; |
| 978 size_t size; | 978 size_t size; |
| 979 }; | 979 }; |
| 980 | 980 |
| 981 // Freed blocks of memory are added to the free list. When the allocation | 981 // Freed blocks of memory are added to the free list. When the allocation |
| 982 // list is exhausted, the free list is sorted and merged to make the new | 982 // list is exhausted, the free list is sorted and merged to make the new |
| 983 // allocation list. | 983 // allocation list. |
| 984 List<FreeBlock> free_list_; | 984 List<FreeBlock> free_list_; |
| (...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1326 // Page::next_page() call. | 1326 // Page::next_page() call. |
| 1327 | 1327 |
| 1328 // An abstraction of allocation and relocation pointers in a page-structured | 1328 // An abstraction of allocation and relocation pointers in a page-structured |
| 1329 // space. | 1329 // space. |
| 1330 class AllocationInfo { | 1330 class AllocationInfo { |
| 1331 public: | 1331 public: |
| 1332 AllocationInfo() : top_(NULL), limit_(NULL) { | 1332 AllocationInfo() : top_(NULL), limit_(NULL) { |
| 1333 } | 1333 } |
| 1334 | 1334 |
| 1335 INLINE(void set_top(Address top)) { | 1335 INLINE(void set_top(Address top)) { |
| 1336 SLOW_ASSERT(top == NULL || | 1336 SLOW_DCHECK(top == NULL || |
| 1337 (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0); | 1337 (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0); |
| 1338 top_ = top; | 1338 top_ = top; |
| 1339 } | 1339 } |
| 1340 | 1340 |
| 1341 INLINE(Address top()) const { | 1341 INLINE(Address top()) const { |
| 1342 SLOW_ASSERT(top_ == NULL || | 1342 SLOW_DCHECK(top_ == NULL || |
| 1343 (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0); | 1343 (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0); |
| 1344 return top_; | 1344 return top_; |
| 1345 } | 1345 } |
| 1346 | 1346 |
| 1347 Address* top_address() { | 1347 Address* top_address() { |
| 1348 return &top_; | 1348 return &top_; |
| 1349 } | 1349 } |
| 1350 | 1350 |
| 1351 INLINE(void set_limit(Address limit)) { | 1351 INLINE(void set_limit(Address limit)) { |
| 1352 SLOW_ASSERT(limit == NULL || | 1352 SLOW_DCHECK(limit == NULL || |
| 1353 (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0); | 1353 (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0); |
| 1354 limit_ = limit; | 1354 limit_ = limit; |
| 1355 } | 1355 } |
| 1356 | 1356 |
| 1357 INLINE(Address limit()) const { | 1357 INLINE(Address limit()) const { |
| 1358 SLOW_ASSERT(limit_ == NULL || | 1358 SLOW_DCHECK(limit_ == NULL || |
| 1359 (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0); | 1359 (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0); |
| 1360 return limit_; | 1360 return limit_; |
| 1361 } | 1361 } |
| 1362 | 1362 |
| 1363 Address* limit_address() { | 1363 Address* limit_address() { |
| 1364 return &limit_; | 1364 return &limit_; |
| 1365 } | 1365 } |
| 1366 | 1366 |
| 1367 #ifdef DEBUG | 1367 #ifdef DEBUG |
| 1368 bool VerifyPagedAllocation() { | 1368 bool VerifyPagedAllocation() { |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1425 | 1425 |
| 1426 // Grow the space by adding available bytes. They are initially marked as | 1426 // Grow the space by adding available bytes. They are initially marked as |
| 1427 // being in use (part of the size), but will normally be immediately freed, | 1427 // being in use (part of the size), but will normally be immediately freed, |
| 1428 // putting them on the free list and removing them from size_. | 1428 // putting them on the free list and removing them from size_. |
| 1429 void ExpandSpace(int size_in_bytes) { | 1429 void ExpandSpace(int size_in_bytes) { |
| 1430 capacity_ += size_in_bytes; | 1430 capacity_ += size_in_bytes; |
| 1431 size_ += size_in_bytes; | 1431 size_ += size_in_bytes; |
| 1432 if (capacity_ > max_capacity_) { | 1432 if (capacity_ > max_capacity_) { |
| 1433 max_capacity_ = capacity_; | 1433 max_capacity_ = capacity_; |
| 1434 } | 1434 } |
| 1435 ASSERT(size_ >= 0); | 1435 DCHECK(size_ >= 0); |
| 1436 } | 1436 } |
| 1437 | 1437 |
| 1438 // Shrink the space by removing available bytes. Since shrinking is done | 1438 // Shrink the space by removing available bytes. Since shrinking is done |
| 1439 // during sweeping, bytes have been marked as being in use (part of the size) | 1439 // during sweeping, bytes have been marked as being in use (part of the size) |
| 1440 // and are hereby freed. | 1440 // and are hereby freed. |
| 1441 void ShrinkSpace(int size_in_bytes) { | 1441 void ShrinkSpace(int size_in_bytes) { |
| 1442 capacity_ -= size_in_bytes; | 1442 capacity_ -= size_in_bytes; |
| 1443 size_ -= size_in_bytes; | 1443 size_ -= size_in_bytes; |
| 1444 ASSERT(size_ >= 0); | 1444 DCHECK(size_ >= 0); |
| 1445 } | 1445 } |
| 1446 | 1446 |
| 1447 // Allocate from available bytes (available -> size). | 1447 // Allocate from available bytes (available -> size). |
| 1448 void AllocateBytes(intptr_t size_in_bytes) { | 1448 void AllocateBytes(intptr_t size_in_bytes) { |
| 1449 size_ += size_in_bytes; | 1449 size_ += size_in_bytes; |
| 1450 ASSERT(size_ >= 0); | 1450 DCHECK(size_ >= 0); |
| 1451 } | 1451 } |
| 1452 | 1452 |
| 1453 // Free allocated bytes, making them available (size -> available). | 1453 // Free allocated bytes, making them available (size -> available). |
| 1454 void DeallocateBytes(intptr_t size_in_bytes) { | 1454 void DeallocateBytes(intptr_t size_in_bytes) { |
| 1455 size_ -= size_in_bytes; | 1455 size_ -= size_in_bytes; |
| 1456 ASSERT(size_ >= 0); | 1456 DCHECK(size_ >= 0); |
| 1457 } | 1457 } |
| 1458 | 1458 |
| 1459 // Waste free bytes (available -> waste). | 1459 // Waste free bytes (available -> waste). |
| 1460 void WasteBytes(int size_in_bytes) { | 1460 void WasteBytes(int size_in_bytes) { |
| 1461 ASSERT(size_in_bytes >= 0); | 1461 DCHECK(size_in_bytes >= 0); |
| 1462 waste_ += size_in_bytes; | 1462 waste_ += size_in_bytes; |
| 1463 } | 1463 } |
| 1464 | 1464 |
| 1465 private: | 1465 private: |
| 1466 intptr_t capacity_; | 1466 intptr_t capacity_; |
| 1467 intptr_t max_capacity_; | 1467 intptr_t max_capacity_; |
| 1468 intptr_t size_; | 1468 intptr_t size_; |
| 1469 intptr_t waste_; | 1469 intptr_t waste_; |
| 1470 }; | 1470 }; |
| 1471 | 1471 |
| (...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1709 *obj = T::cast(object_); | 1709 *obj = T::cast(object_); |
| 1710 return true; | 1710 return true; |
| 1711 } | 1711 } |
| 1712 | 1712 |
| 1713 Object* ToObjectChecked() { | 1713 Object* ToObjectChecked() { |
| 1714 CHECK(!IsRetry()); | 1714 CHECK(!IsRetry()); |
| 1715 return object_; | 1715 return object_; |
| 1716 } | 1716 } |
| 1717 | 1717 |
| 1718 AllocationSpace RetrySpace() { | 1718 AllocationSpace RetrySpace() { |
| 1719 ASSERT(IsRetry()); | 1719 DCHECK(IsRetry()); |
| 1720 return retry_space_; | 1720 return retry_space_; |
| 1721 } | 1721 } |
| 1722 | 1722 |
| 1723 private: | 1723 private: |
| 1724 explicit AllocationResult(AllocationSpace space) : object_(NULL), | 1724 explicit AllocationResult(AllocationSpace space) : object_(NULL), |
| 1725 retry_space_(space) { } | 1725 retry_space_(space) { } |
| 1726 | 1726 |
| 1727 Object* object_; | 1727 Object* object_; |
| 1728 AllocationSpace retry_space_; | 1728 AllocationSpace retry_space_; |
| 1729 }; | 1729 }; |
| (...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1861 accounting_stats_.WasteBytes(wasted); | 1861 accounting_stats_.WasteBytes(wasted); |
| 1862 return size_in_bytes - wasted; | 1862 return size_in_bytes - wasted; |
| 1863 } | 1863 } |
| 1864 | 1864 |
| 1865 void ResetFreeList() { | 1865 void ResetFreeList() { |
| 1866 free_list_.Reset(); | 1866 free_list_.Reset(); |
| 1867 } | 1867 } |
| 1868 | 1868 |
| 1869 // Set space allocation info. | 1869 // Set space allocation info. |
| 1870 void SetTopAndLimit(Address top, Address limit) { | 1870 void SetTopAndLimit(Address top, Address limit) { |
| 1871 ASSERT(top == limit || | 1871 DCHECK(top == limit || |
| 1872 Page::FromAddress(top) == Page::FromAddress(limit - 1)); | 1872 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
| 1873 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1873 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1874 allocation_info_.set_top(top); | 1874 allocation_info_.set_top(top); |
| 1875 allocation_info_.set_limit(limit); | 1875 allocation_info_.set_limit(limit); |
| 1876 } | 1876 } |
| 1877 | 1877 |
| 1878 // Empty space allocation info, returning unused area to free list. | 1878 // Empty space allocation info, returning unused area to free list. |
| 1879 void EmptyAllocationInfo() { | 1879 void EmptyAllocationInfo() { |
| 1880 // Mark the old linear allocation area with a free space map so it can be | 1880 // Mark the old linear allocation area with a free space map so it can be |
| 1881 // skipped when scanning the heap. | 1881 // skipped when scanning the heap. |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1927 return !p->IsEvacuationCandidate() && | 1927 return !p->IsEvacuationCandidate() && |
| 1928 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && | 1928 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && |
| 1929 !p->WasSweptPrecisely(); | 1929 !p->WasSweptPrecisely(); |
| 1930 } | 1930 } |
| 1931 | 1931 |
| 1932 void IncrementUnsweptFreeBytes(intptr_t by) { | 1932 void IncrementUnsweptFreeBytes(intptr_t by) { |
| 1933 unswept_free_bytes_ += by; | 1933 unswept_free_bytes_ += by; |
| 1934 } | 1934 } |
| 1935 | 1935 |
| 1936 void IncreaseUnsweptFreeBytes(Page* p) { | 1936 void IncreaseUnsweptFreeBytes(Page* p) { |
| 1937 ASSERT(ShouldBeSweptBySweeperThreads(p)); | 1937 DCHECK(ShouldBeSweptBySweeperThreads(p)); |
| 1938 unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); | 1938 unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); |
| 1939 } | 1939 } |
| 1940 | 1940 |
| 1941 void DecrementUnsweptFreeBytes(intptr_t by) { | 1941 void DecrementUnsweptFreeBytes(intptr_t by) { |
| 1942 unswept_free_bytes_ -= by; | 1942 unswept_free_bytes_ -= by; |
| 1943 } | 1943 } |
| 1944 | 1944 |
| 1945 void DecreaseUnsweptFreeBytes(Page* p) { | 1945 void DecreaseUnsweptFreeBytes(Page* p) { |
| 1946 ASSERT(ShouldBeSweptBySweeperThreads(p)); | 1946 DCHECK(ShouldBeSweptBySweeperThreads(p)); |
| 1947 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); | 1947 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); |
| 1948 } | 1948 } |
| 1949 | 1949 |
| 1950 void ResetUnsweptFreeBytes() { | 1950 void ResetUnsweptFreeBytes() { |
| 1951 unswept_free_bytes_ = 0; | 1951 unswept_free_bytes_ = 0; |
| 1952 } | 1952 } |
| 1953 | 1953 |
| 1954 // This function tries to steal size_in_bytes memory from the sweeper threads | 1954 // This function tries to steal size_in_bytes memory from the sweeper threads |
| 1955 // free-lists. If it does not succeed stealing enough memory, it will wait | 1955 // free-lists. If it does not succeed stealing enough memory, it will wait |
| 1956 // for the sweeper threads to finish sweeping. | 1956 // for the sweeper threads to finish sweeping. |
| (...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2215 // the maximum capacity. | 2215 // the maximum capacity. |
| 2216 bool GrowTo(int new_capacity); | 2216 bool GrowTo(int new_capacity); |
| 2217 | 2217 |
| 2218 // Shrinks the semispace to the new capacity. The new capacity | 2218 // Shrinks the semispace to the new capacity. The new capacity |
| 2219 // requested must be more than the amount of used memory in the | 2219 // requested must be more than the amount of used memory in the |
| 2220 // semispace and less than the current capacity. | 2220 // semispace and less than the current capacity. |
| 2221 bool ShrinkTo(int new_capacity); | 2221 bool ShrinkTo(int new_capacity); |
| 2222 | 2222 |
| 2223 // Returns the start address of the first page of the space. | 2223 // Returns the start address of the first page of the space. |
| 2224 Address space_start() { | 2224 Address space_start() { |
| 2225 ASSERT(anchor_.next_page() != &anchor_); | 2225 DCHECK(anchor_.next_page() != &anchor_); |
| 2226 return anchor_.next_page()->area_start(); | 2226 return anchor_.next_page()->area_start(); |
| 2227 } | 2227 } |
| 2228 | 2228 |
| 2229 // Returns the start address of the current page of the space. | 2229 // Returns the start address of the current page of the space. |
| 2230 Address page_low() { | 2230 Address page_low() { |
| 2231 return current_page_->area_start(); | 2231 return current_page_->area_start(); |
| 2232 } | 2232 } |
| 2233 | 2233 |
| 2234 // Returns one past the end address of the space. | 2234 // Returns one past the end address of the space. |
| 2235 Address space_end() { | 2235 Address space_end() { |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2375 // of allocation. | 2375 // of allocation. |
| 2376 SemiSpaceIterator(NewSpace* space, Address start); | 2376 SemiSpaceIterator(NewSpace* space, Address start); |
| 2377 // Iterate from one address to another in the same semi-space. | 2377 // Iterate from one address to another in the same semi-space. |
| 2378 SemiSpaceIterator(Address from, Address to); | 2378 SemiSpaceIterator(Address from, Address to); |
| 2379 | 2379 |
| 2380 HeapObject* Next() { | 2380 HeapObject* Next() { |
| 2381 if (current_ == limit_) return NULL; | 2381 if (current_ == limit_) return NULL; |
| 2382 if (NewSpacePage::IsAtEnd(current_)) { | 2382 if (NewSpacePage::IsAtEnd(current_)) { |
| 2383 NewSpacePage* page = NewSpacePage::FromLimit(current_); | 2383 NewSpacePage* page = NewSpacePage::FromLimit(current_); |
| 2384 page = page->next_page(); | 2384 page = page->next_page(); |
| 2385 ASSERT(!page->is_anchor()); | 2385 DCHECK(!page->is_anchor()); |
| 2386 current_ = page->area_start(); | 2386 current_ = page->area_start(); |
| 2387 if (current_ == limit_) return NULL; | 2387 if (current_ == limit_) return NULL; |
| 2388 } | 2388 } |
| 2389 | 2389 |
| 2390 HeapObject* object = HeapObject::FromAddress(current_); | 2390 HeapObject* object = HeapObject::FromAddress(current_); |
| 2391 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); | 2391 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); |
| 2392 | 2392 |
| 2393 current_ += size; | 2393 current_ += size; |
| 2394 return object; | 2394 return object; |
| 2395 } | 2395 } |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2495 static_cast<int>(top() - to_space_.page_low()); | 2495 static_cast<int>(top() - to_space_.page_low()); |
| 2496 } | 2496 } |
| 2497 | 2497 |
| 2498 // The same, but returning an int. We have to have the one that returns | 2498 // The same, but returning an int. We have to have the one that returns |
| 2499 // intptr_t because it is inherited, but if we know we are dealing with the | 2499 // intptr_t because it is inherited, but if we know we are dealing with the |
| 2500 // new space, which can't get as big as the other spaces then this is useful: | 2500 // new space, which can't get as big as the other spaces then this is useful: |
| 2501 int SizeAsInt() { return static_cast<int>(Size()); } | 2501 int SizeAsInt() { return static_cast<int>(Size()); } |
| 2502 | 2502 |
| 2503 // Return the current capacity of a semispace. | 2503 // Return the current capacity of a semispace. |
| 2504 intptr_t EffectiveCapacity() { | 2504 intptr_t EffectiveCapacity() { |
| 2505 SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity()); | 2505 SLOW_DCHECK(to_space_.Capacity() == from_space_.Capacity()); |
| 2506 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize; | 2506 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize; |
| 2507 } | 2507 } |
| 2508 | 2508 |
| 2509 // Return the current capacity of a semispace. | 2509 // Return the current capacity of a semispace. |
| 2510 intptr_t Capacity() { | 2510 intptr_t Capacity() { |
| 2511 ASSERT(to_space_.Capacity() == from_space_.Capacity()); | 2511 DCHECK(to_space_.Capacity() == from_space_.Capacity()); |
| 2512 return to_space_.Capacity(); | 2512 return to_space_.Capacity(); |
| 2513 } | 2513 } |
| 2514 | 2514 |
| 2515 // Return the total amount of memory committed for new space. | 2515 // Return the total amount of memory committed for new space. |
| 2516 intptr_t CommittedMemory() { | 2516 intptr_t CommittedMemory() { |
| 2517 if (from_space_.is_committed()) return 2 * Capacity(); | 2517 if (from_space_.is_committed()) return 2 * Capacity(); |
| 2518 return Capacity(); | 2518 return Capacity(); |
| 2519 } | 2519 } |
| 2520 | 2520 |
| 2521 // Return the total amount of memory committed for new space. | 2521 // Return the total amount of memory committed for new space. |
| 2522 intptr_t MaximumCommittedMemory() { | 2522 intptr_t MaximumCommittedMemory() { |
| 2523 return to_space_.MaximumCommittedMemory() + | 2523 return to_space_.MaximumCommittedMemory() + |
| 2524 from_space_.MaximumCommittedMemory(); | 2524 from_space_.MaximumCommittedMemory(); |
| 2525 } | 2525 } |
| 2526 | 2526 |
| 2527 // Approximate amount of physical memory committed for this space. | 2527 // Approximate amount of physical memory committed for this space. |
| 2528 size_t CommittedPhysicalMemory(); | 2528 size_t CommittedPhysicalMemory(); |
| 2529 | 2529 |
| 2530 // Return the available bytes without growing. | 2530 // Return the available bytes without growing. |
| 2531 intptr_t Available() { | 2531 intptr_t Available() { |
| 2532 return Capacity() - Size(); | 2532 return Capacity() - Size(); |
| 2533 } | 2533 } |
| 2534 | 2534 |
| 2535 // Return the maximum capacity of a semispace. | 2535 // Return the maximum capacity of a semispace. |
| 2536 int MaximumCapacity() { | 2536 int MaximumCapacity() { |
| 2537 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); | 2537 DCHECK(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); |
| 2538 return to_space_.MaximumCapacity(); | 2538 return to_space_.MaximumCapacity(); |
| 2539 } | 2539 } |
| 2540 | 2540 |
| 2541 bool IsAtMaximumCapacity() { | 2541 bool IsAtMaximumCapacity() { |
| 2542 return Capacity() == MaximumCapacity(); | 2542 return Capacity() == MaximumCapacity(); |
| 2543 } | 2543 } |
| 2544 | 2544 |
| 2545 // Returns the initial capacity of a semispace. | 2545 // Returns the initial capacity of a semispace. |
| 2546 int InitialCapacity() { | 2546 int InitialCapacity() { |
| 2547 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity()); | 2547 DCHECK(to_space_.InitialCapacity() == from_space_.InitialCapacity()); |
| 2548 return to_space_.InitialCapacity(); | 2548 return to_space_.InitialCapacity(); |
| 2549 } | 2549 } |
| 2550 | 2550 |
| 2551 // Return the address of the allocation pointer in the active semispace. | 2551 // Return the address of the allocation pointer in the active semispace. |
| 2552 Address top() { | 2552 Address top() { |
| 2553 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top())); | 2553 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top())); |
| 2554 return allocation_info_.top(); | 2554 return allocation_info_.top(); |
| 2555 } | 2555 } |
| 2556 | 2556 |
| 2557 void set_top(Address top) { | 2557 void set_top(Address top) { |
| 2558 ASSERT(to_space_.current_page()->ContainsLimit(top)); | 2558 DCHECK(to_space_.current_page()->ContainsLimit(top)); |
| 2559 allocation_info_.set_top(top); | 2559 allocation_info_.set_top(top); |
| 2560 } | 2560 } |
| 2561 | 2561 |
| 2562 // Return the address of the allocation pointer limit in the active semispace. | 2562 // Return the address of the allocation pointer limit in the active semispace. |
| 2563 Address limit() { | 2563 Address limit() { |
| 2564 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit())); | 2564 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit())); |
| 2565 return allocation_info_.limit(); | 2565 return allocation_info_.limit(); |
| 2566 } | 2566 } |
| 2567 | 2567 |
| 2568 // Return the address of the first object in the active semispace. | 2568 // Return the address of the first object in the active semispace. |
| 2569 Address bottom() { return to_space_.space_start(); } | 2569 Address bottom() { return to_space_.space_start(); } |
| 2570 | 2570 |
| 2571 // Get the age mark of the inactive semispace. | 2571 // Get the age mark of the inactive semispace. |
| 2572 Address age_mark() { return from_space_.age_mark(); } | 2572 Address age_mark() { return from_space_.age_mark(); } |
| 2573 // Set the age mark in the active semispace. | 2573 // Set the age mark in the active semispace. |
| 2574 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } | 2574 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } |
| 2575 | 2575 |
| 2576 // The start address of the space and a bit mask. Anding an address in the | 2576 // The start address of the space and a bit mask. Anding an address in the |
| 2577 // new space with the mask will result in the start address. | 2577 // new space with the mask will result in the start address. |
| 2578 Address start() { return start_; } | 2578 Address start() { return start_; } |
| 2579 uintptr_t mask() { return address_mask_; } | 2579 uintptr_t mask() { return address_mask_; } |
| 2580 | 2580 |
| 2581 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { | 2581 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { |
| 2582 ASSERT(Contains(addr)); | 2582 DCHECK(Contains(addr)); |
| 2583 ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) || | 2583 DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) || |
| 2584 IsAligned(OffsetFrom(addr) - 1, kPointerSize)); | 2584 IsAligned(OffsetFrom(addr) - 1, kPointerSize)); |
| 2585 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; | 2585 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; |
| 2586 } | 2586 } |
| 2587 | 2587 |
| 2588 INLINE(Address MarkbitIndexToAddress(uint32_t index)) { | 2588 INLINE(Address MarkbitIndexToAddress(uint32_t index)) { |
| 2589 return reinterpret_cast<Address>(index << kPointerSizeLog2); | 2589 return reinterpret_cast<Address>(index << kPointerSizeLog2); |
| 2590 } | 2590 } |
| 2591 | 2591 |
| 2592 // The allocation top and limit address. | 2592 // The allocation top and limit address. |
| 2593 Address* allocation_top_address() { | 2593 Address* allocation_top_address() { |
| (...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2739 : PagedSpace(heap, max_capacity, id, executable) { | 2739 : PagedSpace(heap, max_capacity, id, executable) { |
| 2740 } | 2740 } |
| 2741 | 2741 |
| 2742 public: | 2742 public: |
| 2743 TRACK_MEMORY("OldSpace") | 2743 TRACK_MEMORY("OldSpace") |
| 2744 }; | 2744 }; |
| 2745 | 2745 |
| 2746 | 2746 |
| 2747 // For contiguous spaces, top should be in the space (or at the end) and limit | 2747 // For contiguous spaces, top should be in the space (or at the end) and limit |
| 2748 // should be the end of the space. | 2748 // should be the end of the space. |
| 2749 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ | 2749 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \ |
| 2750 SLOW_ASSERT((space).page_low() <= (info).top() \ | 2750 SLOW_DCHECK((space).page_low() <= (info).top() \ |
| 2751 && (info).top() <= (space).page_high() \ | 2751 && (info).top() <= (space).page_high() \ |
| 2752 && (info).limit() <= (space).page_high()) | 2752 && (info).limit() <= (space).page_high()) |
| 2753 | 2753 |
| 2754 | 2754 |
| 2755 // ----------------------------------------------------------------------------- | 2755 // ----------------------------------------------------------------------------- |
| 2756 // Old space for all map objects | 2756 // Old space for all map objects |
| 2757 | 2757 |
| 2758 class MapSpace : public PagedSpace { | 2758 class MapSpace : public PagedSpace { |
| 2759 public: | 2759 public: |
| 2760 // Creates a map space object with a maximum capacity. | 2760 // Creates a map space object with a maximum capacity. |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3039 } | 3039 } |
| 3040 // Must be small, since an iteration is used for lookup. | 3040 // Must be small, since an iteration is used for lookup. |
| 3041 static const int kMaxComments = 64; | 3041 static const int kMaxComments = 64; |
| 3042 }; | 3042 }; |
| 3043 #endif | 3043 #endif |
| 3044 | 3044 |
| 3045 | 3045 |
| 3046 } } // namespace v8::internal | 3046 } } // namespace v8::internal |
| 3047 | 3047 |
| 3048 #endif // V8_SPACES_H_ | 3048 #endif // V8_SPACES_H_ |
| OLD | NEW |