OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
495 static const int kBodyOffset = | 495 static const int kBodyOffset = |
496 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); | 496 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); |
497 | 497 |
498 // The start offset of the object area in a page. Aligned to both maps and | 498 // The start offset of the object area in a page. Aligned to both maps and |
499 // code alignment to be suitable for both. Also aligned to 32 words because | 499 // code alignment to be suitable for both. Also aligned to 32 words because |
500 // the marking bitmap is arranged in 32 bit chunks. | 500 // the marking bitmap is arranged in 32 bit chunks. |
501 static const int kObjectStartAlignment = 32 * kPointerSize; | 501 static const int kObjectStartAlignment = 32 * kPointerSize; |
502 static const int kObjectStartOffset = kBodyOffset - 1 + | 502 static const int kObjectStartOffset = kBodyOffset - 1 + |
503 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | 503 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
504 | 504 |
505 intptr_t size() const { return size_; } | 505 size_t size() const { return size_; } |
506 | 506 |
507 void set_size(size_t size) { size_ = size; } | 507 void set_size(size_t size) { |
| 508 size_ = size; |
| 509 } |
508 | 510 |
509 Executability executable() { | 511 Executability executable() { |
510 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 512 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
511 } | 513 } |
512 | 514 |
513 bool ContainsOnlyData() { | 515 bool ContainsOnlyData() { |
514 return IsFlagSet(CONTAINS_ONLY_DATA); | 516 return IsFlagSet(CONTAINS_ONLY_DATA); |
515 } | 517 } |
516 | 518 |
517 bool InNewSpace() { | 519 bool InNewSpace() { |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
649 // Returns the next page in the chain of pages owned by a space. | 651 // Returns the next page in the chain of pages owned by a space. |
650 inline Page* next_page(); | 652 inline Page* next_page(); |
651 inline Page* prev_page(); | 653 inline Page* prev_page(); |
652 inline void set_next_page(Page* page); | 654 inline void set_next_page(Page* page); |
653 inline void set_prev_page(Page* page); | 655 inline void set_prev_page(Page* page); |
654 | 656 |
655 // Returns the start address of the object area in this page. | 657 // Returns the start address of the object area in this page. |
656 Address ObjectAreaStart() { return address() + kObjectStartOffset; } | 658 Address ObjectAreaStart() { return address() + kObjectStartOffset; } |
657 | 659 |
658 // Returns the end address (exclusive) of the object area in this page. | 660 // Returns the end address (exclusive) of the object area in this page. |
659 Address ObjectAreaEnd() { return address() + size(); } | 661 Address ObjectAreaEnd() { return address() + Page::kPageSize; } |
660 | 662 |
661 // Checks whether an address is page aligned. | 663 // Checks whether an address is page aligned. |
662 static bool IsAlignedToPageSize(Address a) { | 664 static bool IsAlignedToPageSize(Address a) { |
663 return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 665 return 0 == (OffsetFrom(a) & kPageAlignmentMask); |
664 } | 666 } |
665 | 667 |
666 // Returns the offset of a given address to this page. | 668 // Returns the offset of a given address to this page. |
667 INLINE(int Offset(Address a)) { | 669 INLINE(int Offset(Address a)) { |
668 int offset = static_cast<int>(a - address()); | 670 int offset = static_cast<int>(a - address()); |
669 return offset; | 671 return offset; |
670 } | 672 } |
671 | 673 |
672 // Returns the address for a given offset to the this page. | 674 // Returns the address for a given offset to the this page. |
673 Address OffsetToAddress(int offset) { | 675 Address OffsetToAddress(int offset) { |
674 ASSERT_PAGE_OFFSET(offset); | 676 ASSERT_PAGE_OFFSET(offset); |
675 return address() + offset; | 677 return address() + offset; |
676 } | 678 } |
677 | 679 |
678 // Expand the committed area for pages that are small. This | |
679 // happens primarily when the VM is newly booted. | |
680 void CommitMore(intptr_t space_needed); | |
681 | |
682 // --------------------------------------------------------------------- | 680 // --------------------------------------------------------------------- |
683 | 681 |
684 // Page size in bytes. This must be a multiple of the OS page size. | 682 // Page size in bytes. This must be a multiple of the OS page size. |
685 static const int kPageSize = 1 << kPageSizeBits; | 683 static const int kPageSize = 1 << kPageSizeBits; |
686 | 684 |
687 // Page size mask. | 685 // Page size mask. |
688 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 686 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
689 | 687 |
690 // Object area size in bytes. | 688 // Object area size in bytes. |
691 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 689 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
841 Isolate* isolate_; | 839 Isolate* isolate_; |
842 | 840 |
843 // The reserved range of virtual memory that all code objects are put in. | 841 // The reserved range of virtual memory that all code objects are put in. |
844 VirtualMemory* code_range_; | 842 VirtualMemory* code_range_; |
845 // Plain old data class, just a struct plus a constructor. | 843 // Plain old data class, just a struct plus a constructor. |
846 class FreeBlock { | 844 class FreeBlock { |
847 public: | 845 public: |
848 FreeBlock(Address start_arg, size_t size_arg) | 846 FreeBlock(Address start_arg, size_t size_arg) |
849 : start(start_arg), size(size_arg) { | 847 : start(start_arg), size(size_arg) { |
850 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 848 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 849 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |
851 } | 850 } |
852 FreeBlock(void* start_arg, size_t size_arg) | 851 FreeBlock(void* start_arg, size_t size_arg) |
853 : start(static_cast<Address>(start_arg)), size(size_arg) { | 852 : start(static_cast<Address>(start_arg)), size(size_arg) { |
854 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 853 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 854 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |
855 } | 855 } |
856 | 856 |
857 Address start; | 857 Address start; |
858 size_t size; | 858 size_t size; |
859 }; | 859 }; |
860 | 860 |
861 // Freed blocks of memory are added to the free list. When the allocation | 861 // Freed blocks of memory are added to the free list. When the allocation |
862 // list is exhausted, the free list is sorted and merged to make the new | 862 // list is exhausted, the free list is sorted and merged to make the new |
863 // allocation list. | 863 // allocation list. |
864 List<FreeBlock> free_list_; | 864 List<FreeBlock> free_list_; |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
940 class MemoryAllocator { | 940 class MemoryAllocator { |
941 public: | 941 public: |
942 explicit MemoryAllocator(Isolate* isolate); | 942 explicit MemoryAllocator(Isolate* isolate); |
943 | 943 |
944 // Initializes its internal bookkeeping structures. | 944 // Initializes its internal bookkeeping structures. |
945 // Max capacity of the total space and executable memory limit. | 945 // Max capacity of the total space and executable memory limit. |
946 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); | 946 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); |
947 | 947 |
948 void TearDown(); | 948 void TearDown(); |
949 | 949 |
950 Page* AllocatePage(intptr_t object_area_size, | 950 Page* AllocatePage(PagedSpace* owner, Executability executable); |
951 PagedSpace* owner, | |
952 Executability executable); | |
953 | 951 |
954 LargePage* AllocateLargePage(intptr_t object_size, | 952 LargePage* AllocateLargePage(intptr_t object_size, |
955 Executability executable, | 953 Executability executable, |
956 Space* owner); | 954 Space* owner); |
957 | 955 |
958 void Free(MemoryChunk* chunk); | 956 void Free(MemoryChunk* chunk); |
959 | 957 |
960 // Returns the maximum available bytes of heaps. | 958 // Returns the maximum available bytes of heaps. |
961 intptr_t Available() { | 959 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
962 return capacity_ < memory_allocator_reserved_ ? | |
963 0 : | |
964 capacity_ - memory_allocator_reserved_; | |
965 } | |
966 | 960 |
967 // Returns allocated spaces in bytes. | 961 // Returns allocated spaces in bytes. |
968 intptr_t Size() { return memory_allocator_reserved_; } | 962 intptr_t Size() { return size_; } |
969 | 963 |
970 // Returns the maximum available executable bytes of heaps. | 964 // Returns the maximum available executable bytes of heaps. |
971 intptr_t AvailableExecutable() { | 965 intptr_t AvailableExecutable() { |
972 if (capacity_executable_ < size_executable_) return 0; | 966 if (capacity_executable_ < size_executable_) return 0; |
973 return capacity_executable_ - size_executable_; | 967 return capacity_executable_ - size_executable_; |
974 } | 968 } |
975 | 969 |
976 // Returns allocated executable spaces in bytes. | 970 // Returns allocated executable spaces in bytes. |
977 intptr_t SizeExecutable() { return size_executable_; } | 971 intptr_t SizeExecutable() { return size_executable_; } |
978 | 972 |
979 // Returns maximum available bytes that the old space can have. | 973 // Returns maximum available bytes that the old space can have. |
980 intptr_t MaxAvailable() { | 974 intptr_t MaxAvailable() { |
981 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; | 975 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; |
982 } | 976 } |
983 | 977 |
984 #ifdef DEBUG | 978 #ifdef DEBUG |
985 // Reports statistic info of the space. | 979 // Reports statistic info of the space. |
986 void ReportStatistics(); | 980 void ReportStatistics(); |
987 #endif | 981 #endif |
988 | 982 |
989 MemoryChunk* AllocateChunk(intptr_t body_size, | 983 MemoryChunk* AllocateChunk(intptr_t body_size, |
990 intptr_t committed_body_size, | |
991 Executability executable, | 984 Executability executable, |
992 Space* space); | 985 Space* space); |
993 | 986 |
994 Address ReserveAlignedMemory(size_t requested, | 987 Address ReserveAlignedMemory(size_t requested, |
995 size_t alignment, | 988 size_t alignment, |
996 VirtualMemory* controller); | 989 VirtualMemory* controller); |
997 Address AllocateAlignedMemory(size_t requested, | 990 Address AllocateAlignedMemory(size_t requested, |
998 size_t committed, | |
999 size_t alignment, | 991 size_t alignment, |
1000 Executability executable, | 992 Executability executable, |
1001 VirtualMemory* controller); | 993 VirtualMemory* controller); |
1002 | 994 |
1003 void FreeMemory(VirtualMemory* reservation, Executability executable); | 995 void FreeMemory(VirtualMemory* reservation, Executability executable); |
1004 void FreeMemory(Address addr, size_t size, Executability executable); | 996 void FreeMemory(Address addr, size_t size, Executability executable); |
1005 | 997 |
1006 // Commit a contiguous block of memory from the initial chunk. Assumes that | 998 // Commit a contiguous block of memory from the initial chunk. Assumes that |
1007 // the address is not NULL, the size is greater than zero, and that the | 999 // the address is not NULL, the size is greater than zero, and that the |
1008 // block is contained in the initial chunk. Returns true if it succeeded | 1000 // block is contained in the initial chunk. Returns true if it succeeded |
1009 // and false otherwise. | 1001 // and false otherwise. |
1010 bool CommitBlock(Address start, size_t size, Executability executable); | 1002 bool CommitBlock(Address start, size_t size, Executability executable); |
1011 | 1003 |
1012 // Uncommit a contiguous block of memory [start..(start+size)[. | 1004 // Uncommit a contiguous block of memory [start..(start+size)[. |
1013 // start is not NULL, the size is greater than zero, and the | 1005 // start is not NULL, the size is greater than zero, and the |
1014 // block is contained in the initial chunk. Returns true if it succeeded | 1006 // block is contained in the initial chunk. Returns true if it succeeded |
1015 // and false otherwise. | 1007 // and false otherwise. |
1016 bool UncommitBlock(Address start, size_t size); | 1008 bool UncommitBlock(Address start, size_t size); |
1017 | 1009 |
1018 void AllocationBookkeeping(Space* owner, | |
1019 Address base, | |
1020 intptr_t reserved_size, | |
1021 intptr_t committed_size, | |
1022 Executability executable); | |
1023 | |
1024 // Zaps a contiguous block of memory [start..(start+size)[ thus | 1010 // Zaps a contiguous block of memory [start..(start+size)[ thus |
1025 // filling it up with a recognizable non-NULL bit pattern. | 1011 // filling it up with a recognizable non-NULL bit pattern. |
1026 void ZapBlock(Address start, size_t size); | 1012 void ZapBlock(Address start, size_t size); |
1027 | 1013 |
1028 void PerformAllocationCallback(ObjectSpace space, | 1014 void PerformAllocationCallback(ObjectSpace space, |
1029 AllocationAction action, | 1015 AllocationAction action, |
1030 size_t size); | 1016 size_t size); |
1031 | 1017 |
1032 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | 1018 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, |
1033 ObjectSpace space, | 1019 ObjectSpace space, |
1034 AllocationAction action); | 1020 AllocationAction action); |
1035 | 1021 |
1036 void RemoveMemoryAllocationCallback( | 1022 void RemoveMemoryAllocationCallback( |
1037 MemoryAllocationCallback callback); | 1023 MemoryAllocationCallback callback); |
1038 | 1024 |
1039 bool MemoryAllocationCallbackRegistered( | 1025 bool MemoryAllocationCallbackRegistered( |
1040 MemoryAllocationCallback callback); | 1026 MemoryAllocationCallback callback); |
1041 | 1027 |
1042 private: | 1028 private: |
1043 Isolate* isolate_; | 1029 Isolate* isolate_; |
1044 | 1030 |
1045 // Maximum space size in bytes. | 1031 // Maximum space size in bytes. |
1046 size_t capacity_; | 1032 size_t capacity_; |
1047 // Maximum subset of capacity_ that can be executable | 1033 // Maximum subset of capacity_ that can be executable |
1048 size_t capacity_executable_; | 1034 size_t capacity_executable_; |
1049 | 1035 |
1050 // Allocated space size in bytes. | 1036 // Allocated space size in bytes. |
1051 size_t memory_allocator_reserved_; | 1037 size_t size_; |
1052 // Allocated executable space size in bytes. | 1038 // Allocated executable space size in bytes. |
1053 size_t size_executable_; | 1039 size_t size_executable_; |
1054 | 1040 |
1055 struct MemoryAllocationCallbackRegistration { | 1041 struct MemoryAllocationCallbackRegistration { |
1056 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | 1042 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
1057 ObjectSpace space, | 1043 ObjectSpace space, |
1058 AllocationAction action) | 1044 AllocationAction action) |
1059 : callback(callback), space(space), action(action) { | 1045 : callback(callback), space(space), action(action) { |
1060 } | 1046 } |
1061 MemoryAllocationCallback callback; | 1047 MemoryAllocationCallback callback; |
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1386 | 1372 |
1387 void CountFreeListItems(Page* p, SizeStats* sizes); | 1373 void CountFreeListItems(Page* p, SizeStats* sizes); |
1388 | 1374 |
1389 intptr_t EvictFreeListItems(Page* p); | 1375 intptr_t EvictFreeListItems(Page* p); |
1390 | 1376 |
1391 private: | 1377 private: |
1392 // The size range of blocks, in bytes. | 1378 // The size range of blocks, in bytes. |
1393 static const int kMinBlockSize = 3 * kPointerSize; | 1379 static const int kMinBlockSize = 3 * kPointerSize; |
1394 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; | 1380 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; |
1395 | 1381 |
1396 FreeListNode* PickNodeFromList(FreeListNode** list, | 1382 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); |
1397 int* node_size, | |
1398 int minimum_size); | |
1399 | 1383 |
1400 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit); | 1384 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); |
1401 FreeListNode* FindAbuttingNode(int size_in_bytes, | |
1402 int* node_size, | |
1403 Address limit, | |
1404 FreeListNode** list_head); | |
1405 | 1385 |
1406 PagedSpace* owner_; | 1386 PagedSpace* owner_; |
1407 Heap* heap_; | 1387 Heap* heap_; |
1408 | 1388 |
1409 // Total available bytes in all blocks on this free list. | 1389 // Total available bytes in all blocks on this free list. |
1410 int available_; | 1390 int available_; |
1411 | 1391 |
1412 static const int kSmallListMin = 0x20 * kPointerSize; | 1392 static const int kSmallListMin = 0x20 * kPointerSize; |
1413 static const int kSmallListMax = 0xff * kPointerSize; | 1393 static const int kSmallListMax = 0xff * kPointerSize; |
1414 static const int kMediumListMax = 0x7ff * kPointerSize; | 1394 static const int kMediumListMax = 0x7ff * kPointerSize; |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1494 virtual intptr_t SizeOfObjects() { | 1474 virtual intptr_t SizeOfObjects() { |
1495 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); | 1475 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); |
1496 return Size() - unswept_free_bytes_ - (limit() - top()); | 1476 return Size() - unswept_free_bytes_ - (limit() - top()); |
1497 } | 1477 } |
1498 | 1478 |
1499 // Wasted bytes in this space. These are just the bytes that were thrown away | 1479 // Wasted bytes in this space. These are just the bytes that were thrown away |
1500 // due to being too small to use for allocation. They do not include the | 1480 // due to being too small to use for allocation. They do not include the |
1501 // free bytes that were not found at all due to lazy sweeping. | 1481 // free bytes that were not found at all due to lazy sweeping. |
1502 virtual intptr_t Waste() { return accounting_stats_.Waste(); } | 1482 virtual intptr_t Waste() { return accounting_stats_.Waste(); } |
1503 | 1483 |
1504 virtual int ObjectAlignment() { return kObjectAlignment; } | |
1505 | |
1506 // Returns the allocation pointer in this space. | 1484 // Returns the allocation pointer in this space. |
1507 Address top() { return allocation_info_.top; } | 1485 Address top() { return allocation_info_.top; } |
1508 Address limit() { return allocation_info_.limit; } | 1486 Address limit() { return allocation_info_.limit; } |
1509 | 1487 |
1510 // Allocate the requested number of bytes in the space if possible, return a | 1488 // Allocate the requested number of bytes in the space if possible, return a |
1511 // failure object if not. | 1489 // failure object if not. |
1512 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 1490 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); |
1513 | 1491 |
1514 virtual bool ReserveSpace(int bytes); | 1492 virtual bool ReserveSpace(int bytes); |
1515 | 1493 |
1516 // Give a block of memory to the space's free list. It might be added to | 1494 // Give a block of memory to the space's free list. It might be added to |
1517 // the free list or accounted as waste. | 1495 // the free list or accounted as waste. |
1518 // If add_to_freelist is false then just accounting stats are updated and | 1496 // If add_to_freelist is false then just accounting stats are updated and |
1519 // no attempt to add area to free list is made. | 1497 // no attempt to add area to free list is made. |
1520 int AddToFreeLists(Address start, int size_in_bytes) { | 1498 int Free(Address start, int size_in_bytes) { |
1521 int wasted = free_list_.Free(start, size_in_bytes); | 1499 int wasted = free_list_.Free(start, size_in_bytes); |
1522 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); | 1500 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); |
1523 return size_in_bytes - wasted; | 1501 return size_in_bytes - wasted; |
1524 } | 1502 } |
1525 | 1503 |
1526 // Set space allocation info. | 1504 // Set space allocation info. |
1527 void SetTop(Address top, Address limit) { | 1505 void SetTop(Address top, Address limit) { |
1528 ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart()); | |
1529 ASSERT(top == limit || | 1506 ASSERT(top == limit || |
1530 Page::FromAddress(top) == Page::FromAddress(limit - 1)); | 1507 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
1531 allocation_info_.top = top; | 1508 allocation_info_.top = top; |
1532 allocation_info_.limit = limit; | 1509 allocation_info_.limit = limit; |
1533 } | 1510 } |
1534 | 1511 |
1535 void Allocate(int bytes) { | 1512 void Allocate(int bytes) { |
1536 accounting_stats_.AllocateBytes(bytes); | 1513 accounting_stats_.AllocateBytes(bytes); |
1537 } | 1514 } |
1538 | 1515 |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1589 void MarkPageForLazySweeping(Page* p) { | 1566 void MarkPageForLazySweeping(Page* p) { |
1590 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes()); | 1567 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes()); |
1591 } | 1568 } |
1592 | 1569 |
1593 bool AdvanceSweeper(intptr_t bytes_to_sweep); | 1570 bool AdvanceSweeper(intptr_t bytes_to_sweep); |
1594 | 1571 |
1595 bool IsSweepingComplete() { | 1572 bool IsSweepingComplete() { |
1596 return !first_unswept_page_->is_valid(); | 1573 return !first_unswept_page_->is_valid(); |
1597 } | 1574 } |
1598 | 1575 |
1599 inline bool HasAPage() { return anchor_.next_page() != &anchor_; } | |
1600 Page* FirstPage() { return anchor_.next_page(); } | 1576 Page* FirstPage() { return anchor_.next_page(); } |
1601 Page* LastPage() { return anchor_.prev_page(); } | 1577 Page* LastPage() { return anchor_.prev_page(); } |
1602 | 1578 |
1603 // Returns zero for pages that have so little fragmentation that it is not | 1579 // Returns zero for pages that have so little fragmentation that it is not |
1604 // worth defragmenting them. Otherwise a positive integer that gives an | 1580 // worth defragmenting them. Otherwise a positive integer that gives an |
1605 // estimate of fragmentation on an arbitrary scale. | 1581 // estimate of fragmentation on an arbitrary scale. |
1606 int Fragmentation(Page* p) { | 1582 int Fragmentation(Page* p) { |
1607 FreeList::SizeStats sizes; | 1583 FreeList::SizeStats sizes; |
1608 free_list_.CountFreeListItems(p, &sizes); | 1584 free_list_.CountFreeListItems(p, &sizes); |
1609 | 1585 |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1662 | 1638 |
1663 // The dummy page that anchors the double linked list of pages. | 1639 // The dummy page that anchors the double linked list of pages. |
1664 Page anchor_; | 1640 Page anchor_; |
1665 | 1641 |
1666 // The space's free list. | 1642 // The space's free list. |
1667 FreeList free_list_; | 1643 FreeList free_list_; |
1668 | 1644 |
1669 // Normal allocation information. | 1645 // Normal allocation information. |
1670 AllocationInfo allocation_info_; | 1646 AllocationInfo allocation_info_; |
1671 | 1647 |
| 1648 // Bytes of each page that cannot be allocated. Possibly non-zero |
| 1649 // for pages in spaces with only fixed-size objects. Always zero |
| 1650 // for pages in spaces with variable sized objects (those pages are |
| 1651 // padded with free-list nodes). |
| 1652 int page_extra_; |
| 1653 |
1672 bool was_swept_conservatively_; | 1654 bool was_swept_conservatively_; |
1673 | 1655 |
1674 // The first page to be swept when the lazy sweeper advances. Is set | 1656 // The first page to be swept when the lazy sweeper advances. Is set |
1675 // to NULL when all pages have been swept. | 1657 // to NULL when all pages have been swept. |
1676 Page* first_unswept_page_; | 1658 Page* first_unswept_page_; |
1677 | 1659 |
1678 // The number of free bytes which could be reclaimed by advancing the | 1660 // The number of free bytes which could be reclaimed by advancing the |
1679 // lazy sweeper. This is only an estimation because lazy sweeping is | 1661 // lazy sweeper. This is only an estimation because lazy sweeping is |
1680 // done conservatively. | 1662 // done conservatively. |
1681 intptr_t unswept_free_bytes_; | 1663 intptr_t unswept_free_bytes_; |
1682 | 1664 |
1683 // Expands the space by allocating a page. Returns false if it cannot | 1665 // Expands the space by allocating a fixed number of pages. Returns false if |
1684 // allocate a page from OS, or if the hard heap size limit has been hit. The | 1666 // it cannot allocate requested number of pages from OS, or if the hard heap |
1685 // new page will have at least enough committed space to satisfy the object | 1667 // size limit has been hit. |
1686 // size indicated by the allocation_size argument; | 1668 bool Expand(); |
1687 bool Expand(intptr_t allocation_size); | |
1688 | 1669 |
1689 // Generic fast case allocation function that tries linear allocation at the | 1670 // Generic fast case allocation function that tries linear allocation at the |
1690 // address denoted by top in allocation_info_. | 1671 // address denoted by top in allocation_info_. |
1691 inline HeapObject* AllocateLinearly(int size_in_bytes); | 1672 inline HeapObject* AllocateLinearly(int size_in_bytes); |
1692 | 1673 |
1693 // Slow path of AllocateRaw. This function is space-dependent. | 1674 // Slow path of AllocateRaw. This function is space-dependent. |
1694 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); | 1675 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); |
1695 | 1676 |
1696 friend class PageIterator; | 1677 friend class PageIterator; |
1697 }; | 1678 }; |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1832 public: | 1813 public: |
1833 // Constructor. | 1814 // Constructor. |
1834 SemiSpace(Heap* heap, SemiSpaceId semispace) | 1815 SemiSpace(Heap* heap, SemiSpaceId semispace) |
1835 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | 1816 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
1836 start_(NULL), | 1817 start_(NULL), |
1837 age_mark_(NULL), | 1818 age_mark_(NULL), |
1838 id_(semispace), | 1819 id_(semispace), |
1839 anchor_(this), | 1820 anchor_(this), |
1840 current_page_(NULL) { } | 1821 current_page_(NULL) { } |
1841 | 1822 |
1842 // Sets up the semispace using the given chunk. After this, call Commit() | 1823 // Sets up the semispace using the given chunk. |
1843 // to make the semispace usable. | 1824 bool SetUp(Address start, int initial_capacity, int maximum_capacity); |
1844 void SetUp(Address start, int initial_capacity, int maximum_capacity); | |
1845 | 1825 |
1846 // Tear down the space. Heap memory was not allocated by the space, so it | 1826 // Tear down the space. Heap memory was not allocated by the space, so it |
1847 // is not deallocated here. | 1827 // is not deallocated here. |
1848 void TearDown(); | 1828 void TearDown(); |
1849 | 1829 |
1850 // True if the space has been set up but not torn down. | 1830 // True if the space has been set up but not torn down. |
1851 bool HasBeenSetUp() { return start_ != NULL; } | 1831 bool HasBeenSetUp() { return start_ != NULL; } |
1852 | 1832 |
1853 // Grow the semispace to the new capacity. The new capacity | 1833 // Grow the semispace to the new capacity. The new capacity |
1854 // requested must be larger than the current capacity and less than | 1834 // requested must be larger than the current capacity and less than |
(...skipping 483 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2338 // Old object space (excluding map objects) | 2318 // Old object space (excluding map objects) |
2339 | 2319 |
2340 class OldSpace : public PagedSpace { | 2320 class OldSpace : public PagedSpace { |
2341 public: | 2321 public: |
2342 // Creates an old space object with a given maximum capacity. | 2322 // Creates an old space object with a given maximum capacity. |
2343 // The constructor does not allocate pages from OS. | 2323 // The constructor does not allocate pages from OS. |
2344 OldSpace(Heap* heap, | 2324 OldSpace(Heap* heap, |
2345 intptr_t max_capacity, | 2325 intptr_t max_capacity, |
2346 AllocationSpace id, | 2326 AllocationSpace id, |
2347 Executability executable) | 2327 Executability executable) |
2348 : PagedSpace(heap, max_capacity, id, executable) { } | 2328 : PagedSpace(heap, max_capacity, id, executable) { |
| 2329 page_extra_ = 0; |
| 2330 } |
| 2331 |
| 2332 // The limit of allocation for a page in this space. |
| 2333 virtual Address PageAllocationLimit(Page* page) { |
| 2334 return page->ObjectAreaEnd(); |
| 2335 } |
2349 | 2336 |
2350 public: | 2337 public: |
2351 TRACK_MEMORY("OldSpace") | 2338 TRACK_MEMORY("OldSpace") |
2352 }; | 2339 }; |
2353 | 2340 |
2354 | 2341 |
2355 // For contiguous spaces, top should be in the space (or at the end) and limit | 2342 // For contiguous spaces, top should be in the space (or at the end) and limit |
2356 // should be the end of the space. | 2343 // should be the end of the space. |
2357 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ | 2344 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ |
2358 SLOW_ASSERT((space).page_low() <= (info).top \ | 2345 SLOW_ASSERT((space).page_low() <= (info).top \ |
2359 && (info).top <= (space).page_high() \ | 2346 && (info).top <= (space).page_high() \ |
2360 && (info).limit <= (space).page_high()) | 2347 && (info).limit <= (space).page_high()) |
2361 | 2348 |
2362 | 2349 |
2363 // ----------------------------------------------------------------------------- | 2350 // ----------------------------------------------------------------------------- |
2364 // Old space for objects of a fixed size | 2351 // Old space for objects of a fixed size |
2365 | 2352 |
2366 class FixedSpace : public PagedSpace { | 2353 class FixedSpace : public PagedSpace { |
2367 public: | 2354 public: |
2368 FixedSpace(Heap* heap, | 2355 FixedSpace(Heap* heap, |
2369 intptr_t max_capacity, | 2356 intptr_t max_capacity, |
2370 AllocationSpace id, | 2357 AllocationSpace id, |
2371 int object_size_in_bytes, | 2358 int object_size_in_bytes, |
2372 const char* name) | 2359 const char* name) |
2373 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), | 2360 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), |
2374 object_size_in_bytes_(object_size_in_bytes), | 2361 object_size_in_bytes_(object_size_in_bytes), |
2375 name_(name) { } | 2362 name_(name) { |
| 2363 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; |
| 2364 } |
| 2365 |
| 2366 // The limit of allocation for a page in this space. |
| 2367 virtual Address PageAllocationLimit(Page* page) { |
| 2368 return page->ObjectAreaEnd() - page_extra_; |
| 2369 } |
2376 | 2370 |
2377 int object_size_in_bytes() { return object_size_in_bytes_; } | 2371 int object_size_in_bytes() { return object_size_in_bytes_; } |
2378 | 2372 |
2379 virtual int ObjectAlignment() { return object_size_in_bytes_; } | |
2380 | |
2381 // Prepares for a mark-compact GC. | 2373 // Prepares for a mark-compact GC. |
2382 virtual void PrepareForMarkCompact(); | 2374 virtual void PrepareForMarkCompact(); |
2383 | 2375 |
2384 protected: | 2376 protected: |
2385 void ResetFreeList() { | 2377 void ResetFreeList() { |
2386 free_list_.Reset(); | 2378 free_list_.Reset(); |
2387 } | 2379 } |
2388 | 2380 |
2389 private: | 2381 private: |
2390 // The size of objects in this space. | 2382 // The size of objects in this space. |
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2651 } | 2643 } |
2652 // Must be small, since an iteration is used for lookup. | 2644 // Must be small, since an iteration is used for lookup. |
2653 static const int kMaxComments = 64; | 2645 static const int kMaxComments = 64; |
2654 }; | 2646 }; |
2655 #endif | 2647 #endif |
2656 | 2648 |
2657 | 2649 |
2658 } } // namespace v8::internal | 2650 } } // namespace v8::internal |
2659 | 2651 |
2660 #endif // V8_SPACES_H_ | 2652 #endif // V8_SPACES_H_ |
OLD | NEW |