| OLD | NEW | 
|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 //     * Redistributions of source code must retain the above copyright | 6 //     * Redistributions of source code must retain the above copyright | 
| 7 //       notice, this list of conditions and the following disclaimer. | 7 //       notice, this list of conditions and the following disclaimer. | 
| 8 //     * Redistributions in binary form must reproduce the above | 8 //     * Redistributions in binary form must reproduce the above | 
| 9 //       copyright notice, this list of conditions and the following | 9 //       copyright notice, this list of conditions and the following | 
| 10 //       disclaimer in the documentation and/or other materials provided | 10 //       disclaimer in the documentation and/or other materials provided | 
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 288     return true; | 288     return true; | 
| 289   } | 289   } | 
| 290 }; | 290 }; | 
| 291 | 291 | 
| 292 | 292 | 
| 293 class SkipList; | 293 class SkipList; | 
| 294 class SlotsBuffer; | 294 class SlotsBuffer; | 
| 295 | 295 | 
| 296 // MemoryChunk represents a memory region owned by a specific space. | 296 // MemoryChunk represents a memory region owned by a specific space. | 
| 297 // It is divided into the header and the body. Chunk start is always | 297 // It is divided into the header and the body. Chunk start is always | 
| 298 // 1MB aligned. Start of the body is aligned so it can accomodate | 298 // 1MB aligned. Start of the body is aligned so it can accommodate | 
| 299 // any heap object. | 299 // any heap object. | 
| 300 class MemoryChunk { | 300 class MemoryChunk { | 
| 301  public: | 301  public: | 
| 302   // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 302   // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 
| 303   static MemoryChunk* FromAddress(Address a) { | 303   static MemoryChunk* FromAddress(Address a) { | 
| 304     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 304     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 
| 305   } | 305   } | 
| 306 | 306 | 
| 307   // Only works for addresses in pointer spaces, not data or code spaces. | 307   // Only works for addresses in pointer spaces, not data or code spaces. | 
| 308   static inline MemoryChunk* FromAnyPointerAddress(Address addr); | 308   static inline MemoryChunk* FromAnyPointerAddress(Address addr); | 
| (...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 495   static const int kBodyOffset = | 495   static const int kBodyOffset = | 
| 496     CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); | 496     CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); | 
| 497 | 497 | 
| 498   // The start offset of the object area in a page. Aligned to both maps and | 498   // The start offset of the object area in a page. Aligned to both maps and | 
| 499   // code alignment to be suitable for both.  Also aligned to 32 words because | 499   // code alignment to be suitable for both.  Also aligned to 32 words because | 
| 500   // the marking bitmap is arranged in 32 bit chunks. | 500   // the marking bitmap is arranged in 32 bit chunks. | 
| 501   static const int kObjectStartAlignment = 32 * kPointerSize; | 501   static const int kObjectStartAlignment = 32 * kPointerSize; | 
| 502   static const int kObjectStartOffset = kBodyOffset - 1 + | 502   static const int kObjectStartOffset = kBodyOffset - 1 + | 
| 503       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | 503       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | 
| 504 | 504 | 
| 505   size_t size() const { return size_; } | 505   intptr_t size() const { return size_; } | 
| 506 | 506 | 
| 507   void set_size(size_t size) { | 507   void set_size(size_t size) { size_ = size; } | 
| 508     size_ = size; |  | 
| 509   } |  | 
| 510 | 508 | 
| 511   Executability executable() { | 509   Executability executable() { | 
| 512     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 510     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 
| 513   } | 511   } | 
| 514 | 512 | 
| 515   bool ContainsOnlyData() { | 513   bool ContainsOnlyData() { | 
| 516     return IsFlagSet(CONTAINS_ONLY_DATA); | 514     return IsFlagSet(CONTAINS_ONLY_DATA); | 
| 517   } | 515   } | 
| 518 | 516 | 
| 519   bool InNewSpace() { | 517   bool InNewSpace() { | 
| (...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 651   // Returns the next page in the chain of pages owned by a space. | 649   // Returns the next page in the chain of pages owned by a space. | 
| 652   inline Page* next_page(); | 650   inline Page* next_page(); | 
| 653   inline Page* prev_page(); | 651   inline Page* prev_page(); | 
| 654   inline void set_next_page(Page* page); | 652   inline void set_next_page(Page* page); | 
| 655   inline void set_prev_page(Page* page); | 653   inline void set_prev_page(Page* page); | 
| 656 | 654 | 
| 657   // Returns the start address of the object area in this page. | 655   // Returns the start address of the object area in this page. | 
| 658   Address ObjectAreaStart() { return address() + kObjectStartOffset; } | 656   Address ObjectAreaStart() { return address() + kObjectStartOffset; } | 
| 659 | 657 | 
| 660   // Returns the end address (exclusive) of the object area in this page. | 658   // Returns the end address (exclusive) of the object area in this page. | 
| 661   Address ObjectAreaEnd() { return address() + Page::kPageSize; } | 659   Address ObjectAreaEnd() { return address() + size(); } | 
| 662 | 660 | 
| 663   // Checks whether an address is page aligned. | 661   // Checks whether an address is page aligned. | 
| 664   static bool IsAlignedToPageSize(Address a) { | 662   static bool IsAlignedToPageSize(Address a) { | 
| 665     return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 663     return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 
| 666   } | 664   } | 
| 667 | 665 | 
| 668   // Returns the offset of a given address to this page. | 666   // Returns the offset of a given address to this page. | 
| 669   INLINE(int Offset(Address a)) { | 667   INLINE(int Offset(Address a)) { | 
| 670     int offset = static_cast<int>(a - address()); | 668     int offset = static_cast<int>(a - address()); | 
| 671     return offset; | 669     return offset; | 
| 672   } | 670   } | 
| 673 | 671 | 
| 674   // Returns the address for a given offset to the this page. | 672   // Returns the address for a given offset to the this page. | 
| 675   Address OffsetToAddress(int offset) { | 673   Address OffsetToAddress(int offset) { | 
| 676     ASSERT_PAGE_OFFSET(offset); | 674     ASSERT_PAGE_OFFSET(offset); | 
| 677     return address() + offset; | 675     return address() + offset; | 
| 678   } | 676   } | 
| 679 | 677 | 
|  | 678   // Expand the committed area for pages that are small.  This | 
|  | 679   // happens primarily when the VM is newly booted. | 
|  | 680   void CommitMore(intptr_t space_needed); | 
|  | 681 | 
| 680   // --------------------------------------------------------------------- | 682   // --------------------------------------------------------------------- | 
| 681 | 683 | 
| 682   // Page size in bytes.  This must be a multiple of the OS page size. | 684   // Page size in bytes.  This must be a multiple of the OS page size. | 
| 683   static const int kPageSize = 1 << kPageSizeBits; | 685   static const int kPageSize = 1 << kPageSizeBits; | 
| 684 | 686 | 
| 685   // Page size mask. | 687   // Page size mask. | 
| 686   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 688   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 
| 687 | 689 | 
| 688   // Object area size in bytes. | 690   // Object area size in bytes. | 
| 689   static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 691   static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 
| 690 | 692 | 
|  | 693   // The part of the page that is committed until we need more.  If you | 
|  | 694   // make this too small then deserializing the initial boot snapshot | 
|  | 695   // fails. | 
|  | 696   static const int kInitiallyCommittedPartOfPage = kPageSize >> 4; | 
|  | 697 | 
| 691   // Maximum object size that fits in a page. | 698   // Maximum object size that fits in a page. | 
| 692   static const int kMaxHeapObjectSize = kObjectAreaSize; | 699   static const int kMaxHeapObjectSize = kObjectAreaSize; | 
| 693 | 700 | 
| 694   static const int kFirstUsedCell = | 701   static const int kFirstUsedCell = | 
| 695     (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2; | 702     (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2; | 
| 696 | 703 | 
| 697   static const int kLastUsedCell = | 704   static const int kLastUsedCell = | 
| 698     ((kPageSize - kPointerSize)/kPointerSize) >> | 705     ((kPageSize - kPointerSize)/kPointerSize) >> | 
| 699       Bitmap::kBitsPerCellLog2; | 706       Bitmap::kBitsPerCellLog2; | 
| 700 | 707 | 
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 839   Isolate* isolate_; | 846   Isolate* isolate_; | 
| 840 | 847 | 
| 841   // The reserved range of virtual memory that all code objects are put in. | 848   // The reserved range of virtual memory that all code objects are put in. | 
| 842   VirtualMemory* code_range_; | 849   VirtualMemory* code_range_; | 
| 843   // Plain old data class, just a struct plus a constructor. | 850   // Plain old data class, just a struct plus a constructor. | 
| 844   class FreeBlock { | 851   class FreeBlock { | 
| 845    public: | 852    public: | 
| 846     FreeBlock(Address start_arg, size_t size_arg) | 853     FreeBlock(Address start_arg, size_t size_arg) | 
| 847         : start(start_arg), size(size_arg) { | 854         : start(start_arg), size(size_arg) { | 
| 848       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 855       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 
| 849       ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |  | 
| 850     } | 856     } | 
| 851     FreeBlock(void* start_arg, size_t size_arg) | 857     FreeBlock(void* start_arg, size_t size_arg) | 
| 852         : start(static_cast<Address>(start_arg)), size(size_arg) { | 858         : start(static_cast<Address>(start_arg)), size(size_arg) { | 
| 853       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 859       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); | 
| 854       ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |  | 
| 855     } | 860     } | 
| 856 | 861 | 
| 857     Address start; | 862     Address start; | 
| 858     size_t size; | 863     size_t size; | 
| 859   }; | 864   }; | 
| 860 | 865 | 
| 861   // Freed blocks of memory are added to the free list.  When the allocation | 866   // Freed blocks of memory are added to the free list.  When the allocation | 
| 862   // list is exhausted, the free list is sorted and merged to make the new | 867   // list is exhausted, the free list is sorted and merged to make the new | 
| 863   // allocation list. | 868   // allocation list. | 
| 864   List<FreeBlock> free_list_; | 869   List<FreeBlock> free_list_; | 
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 940 class MemoryAllocator { | 945 class MemoryAllocator { | 
| 941  public: | 946  public: | 
| 942   explicit MemoryAllocator(Isolate* isolate); | 947   explicit MemoryAllocator(Isolate* isolate); | 
| 943 | 948 | 
| 944   // Initializes its internal bookkeeping structures. | 949   // Initializes its internal bookkeeping structures. | 
| 945   // Max capacity of the total space and executable memory limit. | 950   // Max capacity of the total space and executable memory limit. | 
| 946   bool Setup(intptr_t max_capacity, intptr_t capacity_executable); | 951   bool Setup(intptr_t max_capacity, intptr_t capacity_executable); | 
| 947 | 952 | 
| 948   void TearDown(); | 953   void TearDown(); | 
| 949 | 954 | 
| 950   Page* AllocatePage(PagedSpace* owner, Executability executable); | 955   Page* AllocatePage(intptr_t object_area_size, | 
|  | 956                      PagedSpace* owner, | 
|  | 957                      Executability executable); | 
| 951 | 958 | 
| 952   LargePage* AllocateLargePage(intptr_t object_size, | 959   LargePage* AllocateLargePage(intptr_t object_size, | 
| 953                                       Executability executable, | 960                                       Executability executable, | 
| 954                                       Space* owner); | 961                                       Space* owner); | 
| 955 | 962 | 
| 956   void Free(MemoryChunk* chunk); | 963   void Free(MemoryChunk* chunk); | 
| 957 | 964 | 
| 958   // Returns the maximum available bytes of heaps. | 965   // Returns the maximum available bytes of heaps. | 
| 959   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } | 966   intptr_t Available() { | 
|  | 967     return capacity_ < memory_allocator_reserved_ ? | 
|  | 968            0 : | 
|  | 969            capacity_ - memory_allocator_reserved_; | 
|  | 970   } | 
| 960 | 971 | 
| 961   // Returns allocated spaces in bytes. | 972   // Returns allocated spaces in bytes. | 
| 962   intptr_t Size() { return size_; } | 973   intptr_t Size() { return memory_allocator_reserved_; } | 
| 963 | 974 | 
| 964   // Returns the maximum available executable bytes of heaps. | 975   // Returns the maximum available executable bytes of heaps. | 
| 965   intptr_t AvailableExecutable() { | 976   intptr_t AvailableExecutable() { | 
| 966     if (capacity_executable_ < size_executable_) return 0; | 977     if (capacity_executable_ < size_executable_) return 0; | 
| 967     return capacity_executable_ - size_executable_; | 978     return capacity_executable_ - size_executable_; | 
| 968   } | 979   } | 
| 969 | 980 | 
| 970   // Returns allocated executable spaces in bytes. | 981   // Returns allocated executable spaces in bytes. | 
| 971   intptr_t SizeExecutable() { return size_executable_; } | 982   intptr_t SizeExecutable() { return size_executable_; } | 
| 972 | 983 | 
| 973   // Returns maximum available bytes that the old space can have. | 984   // Returns maximum available bytes that the old space can have. | 
| 974   intptr_t MaxAvailable() { | 985   intptr_t MaxAvailable() { | 
| 975     return (Available() / Page::kPageSize) * Page::kObjectAreaSize; | 986     return (Available() / Page::kPageSize) * Page::kObjectAreaSize; | 
| 976   } | 987   } | 
| 977 | 988 | 
| 978 #ifdef DEBUG | 989 #ifdef DEBUG | 
| 979   // Reports statistic info of the space. | 990   // Reports statistic info of the space. | 
| 980   void ReportStatistics(); | 991   void ReportStatistics(); | 
| 981 #endif | 992 #endif | 
| 982 | 993 | 
| 983   MemoryChunk* AllocateChunk(intptr_t body_size, | 994   MemoryChunk* AllocateChunk(intptr_t body_size, | 
|  | 995                              intptr_t committed_body_size, | 
| 984                              Executability executable, | 996                              Executability executable, | 
| 985                              Space* space); | 997                              Space* space); | 
| 986 | 998 | 
| 987   Address ReserveAlignedMemory(size_t requested, | 999   Address ReserveAlignedMemory(size_t requested, | 
| 988                                size_t alignment, | 1000                                size_t alignment, | 
| 989                                VirtualMemory* controller); | 1001                                VirtualMemory* controller); | 
| 990   Address AllocateAlignedMemory(size_t requested, | 1002   Address AllocateAlignedMemory(size_t requested, | 
|  | 1003                                 size_t committed, | 
| 991                                 size_t alignment, | 1004                                 size_t alignment, | 
| 992                                 Executability executable, | 1005                                 Executability executable, | 
| 993                                 VirtualMemory* controller); | 1006                                 VirtualMemory* controller); | 
| 994 | 1007 | 
| 995   void FreeMemory(VirtualMemory* reservation, Executability executable); | 1008   void FreeMemory(VirtualMemory* reservation, Executability executable); | 
| 996   void FreeMemory(Address addr, size_t size, Executability executable); | 1009   void FreeMemory(Address addr, size_t size, Executability executable); | 
| 997 | 1010 | 
| 998   // Commit a contiguous block of memory from the initial chunk.  Assumes that | 1011   // Commit a contiguous block of memory from the initial chunk.  Assumes that | 
| 999   // the address is not NULL, the size is greater than zero, and that the | 1012   // the address is not NULL, the size is greater than zero, and that the | 
| 1000   // block is contained in the initial chunk.  Returns true if it succeeded | 1013   // block is contained in the initial chunk.  Returns true if it succeeded | 
| 1001   // and false otherwise. | 1014   // and false otherwise. | 
| 1002   bool CommitBlock(Address start, size_t size, Executability executable); | 1015   bool CommitBlock(Address start, size_t size, Executability executable); | 
| 1003 | 1016 | 
| 1004   // Uncommit a contiguous block of memory [start..(start+size)[. | 1017   // Uncommit a contiguous block of memory [start..(start+size)[. | 
| 1005   // start is not NULL, the size is greater than zero, and the | 1018   // start is not NULL, the size is greater than zero, and the | 
| 1006   // block is contained in the initial chunk.  Returns true if it succeeded | 1019   // block is contained in the initial chunk.  Returns true if it succeeded | 
| 1007   // and false otherwise. | 1020   // and false otherwise. | 
| 1008   bool UncommitBlock(Address start, size_t size); | 1021   bool UncommitBlock(Address start, size_t size); | 
| 1009 | 1022 | 
|  | 1023   void AllocationBookkeeping(Space* owner, | 
|  | 1024                              Address base, | 
|  | 1025                              intptr_t reserved_size, | 
|  | 1026                              intptr_t committed_size, | 
|  | 1027                              Executability executable); | 
|  | 1028 | 
| 1010   // Zaps a contiguous block of memory [start..(start+size)[ thus | 1029   // Zaps a contiguous block of memory [start..(start+size)[ thus | 
| 1011   // filling it up with a recognizable non-NULL bit pattern. | 1030   // filling it up with a recognizable non-NULL bit pattern. | 
| 1012   void ZapBlock(Address start, size_t size); | 1031   void ZapBlock(Address start, size_t size); | 
| 1013 | 1032 | 
| 1014   void PerformAllocationCallback(ObjectSpace space, | 1033   void PerformAllocationCallback(ObjectSpace space, | 
| 1015                                  AllocationAction action, | 1034                                  AllocationAction action, | 
| 1016                                  size_t size); | 1035                                  size_t size); | 
| 1017 | 1036 | 
| 1018   void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | 1037   void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | 
| 1019                                           ObjectSpace space, | 1038                                           ObjectSpace space, | 
| 1020                                           AllocationAction action); | 1039                                           AllocationAction action); | 
| 1021 | 1040 | 
| 1022   void RemoveMemoryAllocationCallback( | 1041   void RemoveMemoryAllocationCallback( | 
| 1023       MemoryAllocationCallback callback); | 1042       MemoryAllocationCallback callback); | 
| 1024 | 1043 | 
| 1025   bool MemoryAllocationCallbackRegistered( | 1044   bool MemoryAllocationCallbackRegistered( | 
| 1026       MemoryAllocationCallback callback); | 1045       MemoryAllocationCallback callback); | 
| 1027 | 1046 | 
| 1028  private: | 1047  private: | 
| 1029   Isolate* isolate_; | 1048   Isolate* isolate_; | 
| 1030 | 1049 | 
| 1031   // Maximum space size in bytes. | 1050   // Maximum space size in bytes. | 
| 1032   size_t capacity_; | 1051   size_t capacity_; | 
| 1033   // Maximum subset of capacity_ that can be executable | 1052   // Maximum subset of capacity_ that can be executable | 
| 1034   size_t capacity_executable_; | 1053   size_t capacity_executable_; | 
| 1035 | 1054 | 
| 1036   // Allocated space size in bytes. | 1055   // Allocated space size in bytes. | 
| 1037   size_t size_; | 1056   size_t memory_allocator_reserved_; | 
| 1038   // Allocated executable space size in bytes. | 1057   // Allocated executable space size in bytes. | 
| 1039   size_t size_executable_; | 1058   size_t size_executable_; | 
| 1040 | 1059 | 
| 1041   struct MemoryAllocationCallbackRegistration { | 1060   struct MemoryAllocationCallbackRegistration { | 
| 1042     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | 1061     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | 
| 1043                                          ObjectSpace space, | 1062                                          ObjectSpace space, | 
| 1044                                          AllocationAction action) | 1063                                          AllocationAction action) | 
| 1045         : callback(callback), space(space), action(action) { | 1064         : callback(callback), space(space), action(action) { | 
| 1046     } | 1065     } | 
| 1047     MemoryAllocationCallback callback; | 1066     MemoryAllocationCallback callback; | 
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1174 #ifdef DEBUG | 1193 #ifdef DEBUG | 
| 1175   bool VerifyPagedAllocation() { | 1194   bool VerifyPagedAllocation() { | 
| 1176     return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) | 1195     return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) | 
| 1177         && (top <= limit); | 1196         && (top <= limit); | 
| 1178   } | 1197   } | 
| 1179 #endif | 1198 #endif | 
| 1180 }; | 1199 }; | 
| 1181 | 1200 | 
| 1182 | 1201 | 
| 1183 // An abstraction of the accounting statistics of a page-structured space. | 1202 // An abstraction of the accounting statistics of a page-structured space. | 
| 1184 // The 'capacity' of a space is the number of object-area bytes (ie, not | 1203 // The 'capacity' of a space is the number of object-area bytes (i.e., not | 
| 1185 // including page bookkeeping structures) currently in the space. The 'size' | 1204 // including page bookkeeping structures) currently in the space. The 'size' | 
| 1186 // of a space is the number of allocated bytes, the 'waste' in the space is | 1205 // of a space is the number of allocated bytes, the 'waste' in the space is | 
| 1187 // the number of bytes that are not allocated and not available to | 1206 // the number of bytes that are not allocated and not available to | 
| 1188 // allocation without reorganizing the space via a GC (eg, small blocks due | 1207 // allocation without reorganizing the space via a GC (e.g. small blocks due | 
| 1189 // to internal fragmentation, top of page areas in map space), and the bytes | 1208 // to internal fragmentation, top of page areas in map space), and the bytes | 
| 1190 // 'available' is the number of unallocated bytes that are not waste.  The | 1209 // 'available' is the number of unallocated bytes that are not waste.  The | 
| 1191 // capacity is the sum of size, waste, and available. | 1210 // capacity is the sum of size, waste, and available. | 
| 1192 // | 1211 // | 
| 1193 // The stats are only set by functions that ensure they stay balanced. These | 1212 // The stats are only set by functions that ensure they stay balanced. These | 
| 1194 // functions increase or decrease one of the non-capacity stats in | 1213 // functions increase or decrease one of the non-capacity stats in | 
| 1195 // conjunction with capacity, or else they always balance increases and | 1214 // conjunction with capacity, or else they always balance increases and | 
| 1196 // decreases to the non-capacity stats. | 1215 // decreases to the non-capacity stats. | 
| 1197 class AllocationStats BASE_EMBEDDED { | 1216 class AllocationStats BASE_EMBEDDED { | 
| 1198  public: | 1217  public: | 
| 1199   AllocationStats() { Clear(); } | 1218   AllocationStats() { Clear(); } | 
| 1200 | 1219 | 
| 1201   // Zero out all the allocation statistics (ie, no capacity). | 1220   // Zero out all the allocation statistics (i.e., no capacity). | 
| 1202   void Clear() { | 1221   void Clear() { | 
| 1203     capacity_ = 0; | 1222     capacity_ = 0; | 
| 1204     size_ = 0; | 1223     size_ = 0; | 
| 1205     waste_ = 0; | 1224     waste_ = 0; | 
| 1206   } | 1225   } | 
| 1207 | 1226 | 
| 1208   void ClearSizeWaste() { | 1227   void ClearSizeWaste() { | 
| 1209     size_ = capacity_; | 1228     size_ = capacity_; | 
| 1210     waste_ = 0; | 1229     waste_ = 0; | 
| 1211   } | 1230   } | 
| 1212 | 1231 | 
| 1213   // Reset the allocation statistics (ie, available = capacity with no | 1232   // Reset the allocation statistics (i.e., available = capacity with no | 
| 1214   // wasted or allocated bytes). | 1233   // wasted or allocated bytes). | 
| 1215   void Reset() { | 1234   void Reset() { | 
| 1216     size_ = 0; | 1235     size_ = 0; | 
| 1217     waste_ = 0; | 1236     waste_ = 0; | 
| 1218   } | 1237   } | 
| 1219 | 1238 | 
| 1220   // Accessors for the allocation statistics. | 1239   // Accessors for the allocation statistics. | 
| 1221   intptr_t Capacity() { return capacity_; } | 1240   intptr_t Capacity() { return capacity_; } | 
| 1222   intptr_t Size() { return size_; } | 1241   intptr_t Size() { return size_; } | 
| 1223   intptr_t Waste() { return waste_; } | 1242   intptr_t Waste() { return waste_; } | 
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1334   // Clear the free list. | 1353   // Clear the free list. | 
| 1335   void Reset(); | 1354   void Reset(); | 
| 1336 | 1355 | 
| 1337   // Return the number of bytes available on the free list. | 1356   // Return the number of bytes available on the free list. | 
| 1338   intptr_t available() { return available_; } | 1357   intptr_t available() { return available_; } | 
| 1339 | 1358 | 
| 1340   // Place a node on the free list.  The block of size 'size_in_bytes' | 1359   // Place a node on the free list.  The block of size 'size_in_bytes' | 
| 1341   // starting at 'start' is placed on the free list.  The return value is the | 1360   // starting at 'start' is placed on the free list.  The return value is the | 
| 1342   // number of bytes that have been lost due to internal fragmentation by | 1361   // number of bytes that have been lost due to internal fragmentation by | 
| 1343   // freeing the block.  Bookkeeping information will be written to the block, | 1362   // freeing the block.  Bookkeeping information will be written to the block, | 
| 1344   // ie, its contents will be destroyed.  The start address should be word | 1363   // i.e., its contents will be destroyed.  The start address should be word | 
| 1345   // aligned, and the size should be a non-zero multiple of the word size. | 1364   // aligned, and the size should be a non-zero multiple of the word size. | 
| 1346   int Free(Address start, int size_in_bytes); | 1365   int Free(Address start, int size_in_bytes); | 
| 1347 | 1366 | 
| 1348   // Allocate a block of size 'size_in_bytes' from the free list.  The block | 1367   // Allocate a block of size 'size_in_bytes' from the free list.  The block | 
| 1349   // is unitialized.  A failure is returned if no block is available.  The | 1368   // is unitialized.  A failure is returned if no block is available.  The | 
| 1350   // number of bytes lost to fragmentation is returned in the output parameter | 1369   // number of bytes lost to fragmentation is returned in the output parameter | 
| 1351   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size. | 1370   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size. | 
| 1352   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); | 1371   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); | 
| 1353 | 1372 | 
| 1354 #ifdef DEBUG | 1373 #ifdef DEBUG | 
| (...skipping 17 matching lines...) Expand all  Loading... | 
| 1372 | 1391 | 
| 1373   void CountFreeListItems(Page* p, SizeStats* sizes); | 1392   void CountFreeListItems(Page* p, SizeStats* sizes); | 
| 1374 | 1393 | 
| 1375   intptr_t EvictFreeListItems(Page* p); | 1394   intptr_t EvictFreeListItems(Page* p); | 
| 1376 | 1395 | 
| 1377  private: | 1396  private: | 
| 1378   // The size range of blocks, in bytes. | 1397   // The size range of blocks, in bytes. | 
| 1379   static const int kMinBlockSize = 3 * kPointerSize; | 1398   static const int kMinBlockSize = 3 * kPointerSize; | 
| 1380   static const int kMaxBlockSize = Page::kMaxHeapObjectSize; | 1399   static const int kMaxBlockSize = Page::kMaxHeapObjectSize; | 
| 1381 | 1400 | 
| 1382   FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); | 1401   FreeListNode* PickNodeFromList(FreeListNode** list, | 
|  | 1402                                  int* node_size, | 
|  | 1403                                  int minimum_size); | 
| 1383 | 1404 | 
| 1384   FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); | 1405   FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit); | 
|  | 1406   FreeListNode* FindAbuttingNode( | 
|  | 1407       int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head
      ); | 
| 1385 | 1408 | 
| 1386   PagedSpace* owner_; | 1409   PagedSpace* owner_; | 
| 1387   Heap* heap_; | 1410   Heap* heap_; | 
| 1388 | 1411 | 
| 1389   // Total available bytes in all blocks on this free list. | 1412   // Total available bytes in all blocks on this free list. | 
| 1390   int available_; | 1413   int available_; | 
| 1391 | 1414 | 
| 1392   static const int kSmallListMin = 0x20 * kPointerSize; | 1415   static const int kSmallListMin = 0x20 * kPointerSize; | 
| 1393   static const int kSmallListMax = 0xff * kPointerSize; | 1416   static const int kSmallListMax = 0xff * kPointerSize; | 
| 1394   static const int kMediumListMax = 0x7ff * kPointerSize; | 1417   static const int kMediumListMax = 0x7ff * kPointerSize; | 
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1471 | 1494 | 
| 1472   // As size, but the bytes in the current linear allocation area are not | 1495   // As size, but the bytes in the current linear allocation area are not | 
| 1473   // included. | 1496   // included. | 
| 1474   virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); } | 1497   virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); } | 
| 1475 | 1498 | 
| 1476   // Wasted bytes in this space.  These are just the bytes that were thrown away | 1499   // Wasted bytes in this space.  These are just the bytes that were thrown away | 
| 1477   // due to being too small to use for allocation.  They do not include the | 1500   // due to being too small to use for allocation.  They do not include the | 
| 1478   // free bytes that were not found at all due to lazy sweeping. | 1501   // free bytes that were not found at all due to lazy sweeping. | 
| 1479   virtual intptr_t Waste() { return accounting_stats_.Waste(); } | 1502   virtual intptr_t Waste() { return accounting_stats_.Waste(); } | 
| 1480 | 1503 | 
|  | 1504   virtual int ObjectAlignment() { return kPointerSize; } | 
|  | 1505 | 
| 1481   // Returns the allocation pointer in this space. | 1506   // Returns the allocation pointer in this space. | 
| 1482   Address top() { | 1507   Address top() { | 
| 1483     return allocation_info_.top; | 1508     return allocation_info_.top; | 
| 1484   } | 1509   } | 
| 1485   Address limit() { return allocation_info_.limit; } | 1510   Address limit() { return allocation_info_.limit; } | 
| 1486 | 1511 | 
| 1487   // Allocate the requested number of bytes in the space if possible, return a | 1512   // Allocate the requested number of bytes in the space if possible, return a | 
| 1488   // failure object if not. | 1513   // failure object if not. | 
| 1489   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 1514   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 
| 1490 | 1515 | 
| 1491   virtual bool ReserveSpace(int bytes); | 1516   virtual bool ReserveSpace(int bytes); | 
| 1492 | 1517 | 
| 1493   // Give a block of memory to the space's free list.  It might be added to | 1518   // Give a block of memory to the space's free list.  It might be added to | 
| 1494   // the free list or accounted as waste. | 1519   // the free list or accounted as waste. | 
| 1495   // If add_to_freelist is false then just accounting stats are updated and | 1520   // If add_to_freelist is false then just accounting stats are updated and | 
| 1496   // no attempt to add area to free list is made. | 1521   // no attempt to add area to free list is made. | 
| 1497   int Free(Address start, int size_in_bytes) { | 1522   int AddToFreeLists(Address start, int size_in_bytes) { | 
|  | 1523     printf("Add to free list: %p (%d bytes)\n", (void*)start, size_in_bytes); | 
| 1498     int wasted = free_list_.Free(start, size_in_bytes); | 1524     int wasted = free_list_.Free(start, size_in_bytes); | 
| 1499     accounting_stats_.DeallocateBytes(size_in_bytes - wasted); | 1525     accounting_stats_.DeallocateBytes(size_in_bytes - wasted); | 
| 1500     return size_in_bytes - wasted; | 1526     return size_in_bytes - wasted; | 
| 1501   } | 1527   } | 
| 1502 | 1528 | 
| 1503   // Set space allocation info. | 1529   // Set space allocation info. | 
| 1504   void SetTop(Address top, Address limit) { | 1530   void SetTop(Address top, Address limit) { | 
|  | 1531     ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart()); | 
| 1505     ASSERT(top == limit || | 1532     ASSERT(top == limit || | 
| 1506            Page::FromAddress(top) == Page::FromAddress(limit - 1)); | 1533            Page::FromAddress(top) == Page::FromAddress(limit - 1)); | 
| 1507     allocation_info_.top = top; | 1534     allocation_info_.top = top; | 
| 1508     allocation_info_.limit = limit; | 1535     allocation_info_.limit = limit; | 
| 1509   } | 1536   } | 
| 1510 | 1537 | 
| 1511   void Allocate(int bytes) { | 1538   void Allocate(int bytes) { | 
| 1512     accounting_stats_.AllocateBytes(bytes); | 1539     accounting_stats_.AllocateBytes(bytes); | 
| 1513   } | 1540   } | 
| 1514 | 1541 | 
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1560     if (first == &anchor_) first = NULL; | 1587     if (first == &anchor_) first = NULL; | 
| 1561     first_unswept_page_ = first; | 1588     first_unswept_page_ = first; | 
| 1562   } | 1589   } | 
| 1563 | 1590 | 
| 1564   bool AdvanceSweeper(intptr_t bytes_to_sweep); | 1591   bool AdvanceSweeper(intptr_t bytes_to_sweep); | 
| 1565 | 1592 | 
| 1566   bool IsSweepingComplete() { | 1593   bool IsSweepingComplete() { | 
| 1567     return !first_unswept_page_->is_valid(); | 1594     return !first_unswept_page_->is_valid(); | 
| 1568   } | 1595   } | 
| 1569 | 1596 | 
|  | 1597   inline bool HasAPage() { return anchor_.next_page() != &anchor_; } | 
| 1570   Page* FirstPage() { return anchor_.next_page(); } | 1598   Page* FirstPage() { return anchor_.next_page(); } | 
| 1571   Page* LastPage() { return anchor_.prev_page(); } | 1599   Page* LastPage() { return anchor_.prev_page(); } | 
| 1572 | 1600 | 
| 1573   // Returns zero for pages that have so little fragmentation that it is not | 1601   // Returns zero for pages that have so little fragmentation that it is not | 
| 1574   // worth defragmenting them.  Otherwise a positive integer that gives an | 1602   // worth defragmenting them.  Otherwise a positive integer that gives an | 
| 1575   // estimate of fragmentation on an arbitrary scale. | 1603   // estimate of fragmentation on an arbitrary scale. | 
| 1576   int Fragmentation(Page* p) { | 1604   int Fragmentation(Page* p) { | 
| 1577     FreeList::SizeStats sizes; | 1605     FreeList::SizeStats sizes; | 
| 1578     free_list_.CountFreeListItems(p, &sizes); | 1606     free_list_.CountFreeListItems(p, &sizes); | 
| 1579 | 1607 | 
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1632 | 1660 | 
| 1633   // The dummy page that anchors the double linked list of pages. | 1661   // The dummy page that anchors the double linked list of pages. | 
| 1634   Page anchor_; | 1662   Page anchor_; | 
| 1635 | 1663 | 
| 1636   // The space's free list. | 1664   // The space's free list. | 
| 1637   FreeList free_list_; | 1665   FreeList free_list_; | 
| 1638 | 1666 | 
| 1639   // Normal allocation information. | 1667   // Normal allocation information. | 
| 1640   AllocationInfo allocation_info_; | 1668   AllocationInfo allocation_info_; | 
| 1641 | 1669 | 
| 1642   // Bytes of each page that cannot be allocated.  Possibly non-zero |  | 
| 1643   // for pages in spaces with only fixed-size objects.  Always zero |  | 
| 1644   // for pages in spaces with variable sized objects (those pages are |  | 
| 1645   // padded with free-list nodes). |  | 
| 1646   int page_extra_; |  | 
| 1647 |  | 
| 1648   bool was_swept_conservatively_; | 1670   bool was_swept_conservatively_; | 
| 1649 | 1671 | 
| 1650   Page* first_unswept_page_; | 1672   Page* first_unswept_page_; | 
| 1651 | 1673 | 
| 1652   // Expands the space by allocating a fixed number of pages. Returns false if | 1674   // Expands the space by allocating a page. Returns false if it cannot | 
| 1653   // it cannot allocate requested number of pages from OS, or if the hard heap | 1675   // allocate a page from OS, or if the hard heap size limit has been hit.  The | 
| 1654   // size limit has been hit. | 1676   // new page will have at least enough committed space to satisfy the object | 
| 1655   bool Expand(); | 1677   // size indicated by the allocation_size argument; | 
|  | 1678   bool Expand(intptr_t allocation_size); | 
| 1656 | 1679 | 
| 1657   // Generic fast case allocation function that tries linear allocation at the | 1680   // Generic fast case allocation function that tries linear allocation at the | 
| 1658   // address denoted by top in allocation_info_. | 1681   // address denoted by top in allocation_info_. | 
| 1659   inline HeapObject* AllocateLinearly(int size_in_bytes); | 1682   inline HeapObject* AllocateLinearly(int size_in_bytes); | 
| 1660 | 1683 | 
| 1661   // Slow path of AllocateRaw.  This function is space-dependent. | 1684   // Slow path of AllocateRaw.  This function is space-dependent. | 
| 1662   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); | 1685   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); | 
| 1663 | 1686 | 
| 1664   friend class PageIterator; | 1687   friend class PageIterator; | 
| 1665 }; | 1688 }; | 
| (...skipping 639 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2305 // Old object space (excluding map objects) | 2328 // Old object space (excluding map objects) | 
| 2306 | 2329 | 
| 2307 class OldSpace : public PagedSpace { | 2330 class OldSpace : public PagedSpace { | 
| 2308  public: | 2331  public: | 
| 2309   // Creates an old space object with a given maximum capacity. | 2332   // Creates an old space object with a given maximum capacity. | 
| 2310   // The constructor does not allocate pages from OS. | 2333   // The constructor does not allocate pages from OS. | 
| 2311   OldSpace(Heap* heap, | 2334   OldSpace(Heap* heap, | 
| 2312            intptr_t max_capacity, | 2335            intptr_t max_capacity, | 
| 2313            AllocationSpace id, | 2336            AllocationSpace id, | 
| 2314            Executability executable) | 2337            Executability executable) | 
| 2315       : PagedSpace(heap, max_capacity, id, executable) { | 2338       : PagedSpace(heap, max_capacity, id, executable) { } | 
| 2316     page_extra_ = 0; |  | 
| 2317   } |  | 
| 2318 |  | 
| 2319   // The limit of allocation for a page in this space. |  | 
| 2320   virtual Address PageAllocationLimit(Page* page) { |  | 
| 2321     return page->ObjectAreaEnd(); |  | 
| 2322   } |  | 
| 2323 | 2339 | 
| 2324  public: | 2340  public: | 
| 2325   TRACK_MEMORY("OldSpace") | 2341   TRACK_MEMORY("OldSpace") | 
| 2326 }; | 2342 }; | 
| 2327 | 2343 | 
| 2328 | 2344 | 
| 2329 // For contiguous spaces, top should be in the space (or at the end) and limit | 2345 // For contiguous spaces, top should be in the space (or at the end) and limit | 
| 2330 // should be the end of the space. | 2346 // should be the end of the space. | 
| 2331 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ | 2347 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ | 
| 2332   SLOW_ASSERT((space).page_low() <= (info).top             \ | 2348   SLOW_ASSERT((space).page_low() <= (info).top             \ | 
| 2333               && (info).top <= (space).page_high()         \ | 2349               && (info).top <= (space).page_high()         \ | 
| 2334               && (info).limit <= (space).page_high()) | 2350               && (info).limit <= (space).page_high()) | 
| 2335 | 2351 | 
| 2336 | 2352 | 
| 2337 // ----------------------------------------------------------------------------- | 2353 // ----------------------------------------------------------------------------- | 
| 2338 // Old space for objects of a fixed size | 2354 // Old space for objects of a fixed size | 
| 2339 | 2355 | 
| 2340 class FixedSpace : public PagedSpace { | 2356 class FixedSpace : public PagedSpace { | 
| 2341  public: | 2357  public: | 
| 2342   FixedSpace(Heap* heap, | 2358   FixedSpace(Heap* heap, | 
| 2343              intptr_t max_capacity, | 2359              intptr_t max_capacity, | 
| 2344              AllocationSpace id, | 2360              AllocationSpace id, | 
| 2345              int object_size_in_bytes, | 2361              int object_size_in_bytes, | 
| 2346              const char* name) | 2362              const char* name) | 
| 2347       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), | 2363       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), | 
| 2348         object_size_in_bytes_(object_size_in_bytes), | 2364         object_size_in_bytes_(object_size_in_bytes), | 
| 2349         name_(name) { | 2365         name_(name) { } | 
| 2350     page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; |  | 
| 2351   } |  | 
| 2352 |  | 
| 2353   // The limit of allocation for a page in this space. |  | 
| 2354   virtual Address PageAllocationLimit(Page* page) { |  | 
| 2355     return page->ObjectAreaEnd() - page_extra_; |  | 
| 2356   } |  | 
| 2357 | 2366 | 
| 2358   int object_size_in_bytes() { return object_size_in_bytes_; } | 2367   int object_size_in_bytes() { return object_size_in_bytes_; } | 
| 2359 | 2368 | 
|  | 2369   virtual int ObjectAlignment() { return object_size_in_bytes_; } | 
|  | 2370 | 
| 2360   // Prepares for a mark-compact GC. | 2371   // Prepares for a mark-compact GC. | 
| 2361   virtual void PrepareForMarkCompact(); | 2372   virtual void PrepareForMarkCompact(); | 
| 2362 | 2373 | 
| 2363  protected: | 2374  protected: | 
| 2364   void ResetFreeList() { | 2375   void ResetFreeList() { | 
| 2365     free_list_.Reset(); | 2376     free_list_.Reset(); | 
| 2366   } | 2377   } | 
| 2367 | 2378 | 
| 2368  private: | 2379  private: | 
| 2369   // The size of objects in this space. | 2380   // The size of objects in this space. | 
| (...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2630   } | 2641   } | 
| 2631   // Must be small, since an iteration is used for lookup. | 2642   // Must be small, since an iteration is used for lookup. | 
| 2632   static const int kMaxComments = 64; | 2643   static const int kMaxComments = 64; | 
| 2633 }; | 2644 }; | 
| 2634 #endif | 2645 #endif | 
| 2635 | 2646 | 
| 2636 | 2647 | 
| 2637 } }  // namespace v8::internal | 2648 } }  // namespace v8::internal | 
| 2638 | 2649 | 
| 2639 #endif  // V8_SPACES_H_ | 2650 #endif  // V8_SPACES_H_ | 
| OLD | NEW | 
|---|