| OLD | NEW | 
|---|
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 //     * Redistributions of source code must retain the above copyright | 6 //     * Redistributions of source code must retain the above copyright | 
| 7 //       notice, this list of conditions and the following disclaimer. | 7 //       notice, this list of conditions and the following disclaimer. | 
| 8 //     * Redistributions in binary form must reproduce the above | 8 //     * Redistributions in binary form must reproduce the above | 
| 9 //       copyright notice, this list of conditions and the following | 9 //       copyright notice, this list of conditions and the following | 
| 10 //       disclaimer in the documentation and/or other materials provided | 10 //       disclaimer in the documentation and/or other materials provided | 
| (...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 160   Address RSetStart() { return address() + kRSetStartOffset; } | 160   Address RSetStart() { return address() + kRSetStartOffset; } | 
| 161 | 161 | 
| 162   // Returns the end address of the remembered set area (exclusive). | 162   // Returns the end address of the remembered set area (exclusive). | 
| 163   Address RSetEnd() { return address() + kRSetEndOffset; } | 163   Address RSetEnd() { return address() + kRSetEndOffset; } | 
| 164 | 164 | 
| 165   // Checks whether an address is page aligned. | 165   // Checks whether an address is page aligned. | 
| 166   static bool IsAlignedToPageSize(Address a) { | 166   static bool IsAlignedToPageSize(Address a) { | 
| 167     return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 167     return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 
| 168   } | 168   } | 
| 169 | 169 | 
|  | 170   // True if this page was in use before current compaction started. | 
|  | 171   // Result is valid only for pages owned by paged spaces and | 
|  | 172   // only after PagedSpace::PrepareForMarkCompact was called. | 
|  | 173   inline bool WasInUseBeforeMC(); | 
|  | 174 | 
|  | 175   inline void SetWasInUseBeforeMC(bool was_in_use); | 
|  | 176 | 
| 170   // True if this page is a large object page. | 177   // True if this page is a large object page. | 
| 171   bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; } | 178   inline bool IsLargeObjectPage(); | 
|  | 179 | 
|  | 180   inline void SetIsLargeObjectPage(bool is_large_object_page); | 
| 172 | 181 | 
| 173   // Returns the offset of a given address to this page. | 182   // Returns the offset of a given address to this page. | 
| 174   INLINE(int Offset(Address a)) { | 183   INLINE(int Offset(Address a)) { | 
| 175     int offset = static_cast<int>(a - address()); | 184     int offset = static_cast<int>(a - address()); | 
| 176     ASSERT_PAGE_OFFSET(offset); | 185     ASSERT_PAGE_OFFSET(offset); | 
| 177     return offset; | 186     return offset; | 
| 178   } | 187   } | 
| 179 | 188 | 
| 180   // Returns the address for a given offset to the this page. | 189   // Returns the address for a given offset to the this page. | 
| 181   Address OffsetToAddress(int offset) { | 190   Address OffsetToAddress(int offset) { | 
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 237   // The start offset of the used part of the remembered set in a page. | 246   // The start offset of the used part of the remembered set in a page. | 
| 238   static const int kRSetStartOffset = kRSetOffset + | 247   static const int kRSetStartOffset = kRSetOffset + | 
| 239       kObjectStartOffset / kBitsPerPointer; | 248       kObjectStartOffset / kBitsPerPointer; | 
| 240 | 249 | 
| 241   // Object area size in bytes. | 250   // Object area size in bytes. | 
| 242   static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 251   static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 
| 243 | 252 | 
| 244   // Maximum object size that fits in a page. | 253   // Maximum object size that fits in a page. | 
| 245   static const int kMaxHeapObjectSize = kObjectAreaSize; | 254   static const int kMaxHeapObjectSize = kObjectAreaSize; | 
| 246 | 255 | 
|  | 256   enum PageFlag { | 
|  | 257     IS_NORMAL_PAGE = 1 << 0, | 
|  | 258     WAS_IN_USE_BEFORE_MC = 1 << 1 | 
|  | 259   }; | 
|  | 260 | 
|  | 261   inline bool GetPageFlag(PageFlag flag); | 
|  | 262   inline void SetPageFlag(PageFlag flag, bool value); | 
|  | 263 | 
| 247   //--------------------------------------------------------------------------- | 264   //--------------------------------------------------------------------------- | 
| 248   // Page header description. | 265   // Page header description. | 
| 249   // | 266   // | 
| 250   // If a page is not in the large object space, the first word, | 267   // If a page is not in the large object space, the first word, | 
| 251   // opaque_header, encodes the next page address (aligned to kPageSize 8K) | 268   // opaque_header, encodes the next page address (aligned to kPageSize 8K) | 
| 252   // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use | 269   // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use | 
| 253   // opaque_header. The value range of the opaque_header is [0..kPageSize[, | 270   // opaque_header. The value range of the opaque_header is [0..kPageSize[, | 
| 254   // or [next_page_start, next_page_end[. It cannot point to a valid address | 271   // or [next_page_start, next_page_end[. It cannot point to a valid address | 
| 255   // in the current page.  If a page is in the large object space, the first | 272   // in the current page.  If a page is in the large object space, the first | 
| 256   // word *may* (if the page start and large object chunk start are the | 273   // word *may* (if the page start and large object chunk start are the | 
| 257   // same) contain the address of the next large object chunk. | 274   // same) contain the address of the next large object chunk. | 
| 258   intptr_t opaque_header; | 275   intptr_t opaque_header; | 
| 259 | 276 | 
| 260   // If the page is not in the large object space, the low-order bit of the | 277   // If the page is not in the large object space, the low-order bit of the | 
| 261   // second word is set. If the page is in the large object space, the | 278   // second word is set. If the page is in the large object space, the | 
| 262   // second word *may* (if the page start and large object chunk start are | 279   // second word *may* (if the page start and large object chunk start are | 
| 263   // the same) contain the large object chunk size.  In either case, the | 280   // the same) contain the large object chunk size.  In either case, the | 
| 264   // low-order bit for large object pages will be cleared. | 281   // low-order bit for large object pages will be cleared. | 
| 265   int is_normal_page; | 282   // For normal pages this word is used to store various page flags. | 
|  | 283   int flags; | 
| 266 | 284 | 
| 267   // The following fields may overlap with remembered set, they can only | 285   // The following fields may overlap with remembered set, they can only | 
| 268   // be used in the mark-compact collector when remembered set is not | 286   // be used in the mark-compact collector when remembered set is not | 
| 269   // used. | 287   // used. | 
| 270 | 288 | 
| 271   // The index of the page in its owner space. | 289   // The index of the page in its owner space. | 
| 272   int mc_page_index; | 290   int mc_page_index; | 
| 273 | 291 | 
| 274   // The allocation pointer after relocating objects to this page. | 292   // The allocation pointer after relocating objects to this page. | 
| 275   Address mc_relocation_top; | 293   Address mc_relocation_top; | 
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 400 // The allocator keeps an initial chunk which is used for the new space.  The | 418 // The allocator keeps an initial chunk which is used for the new space.  The | 
| 401 // leftover regions of the initial chunk are used for the initial chunks of | 419 // leftover regions of the initial chunk are used for the initial chunks of | 
| 402 // old space and map space if they are big enough to hold at least one page. | 420 // old space and map space if they are big enough to hold at least one page. | 
| 403 // The allocator assumes that there is one old space and one map space, each | 421 // The allocator assumes that there is one old space and one map space, each | 
| 404 // expands the space by allocating kPagesPerChunk pages except the last | 422 // expands the space by allocating kPagesPerChunk pages except the last | 
| 405 // expansion (before running out of space).  The first chunk may contain fewer | 423 // expansion (before running out of space).  The first chunk may contain fewer | 
| 406 // than kPagesPerChunk pages as well. | 424 // than kPagesPerChunk pages as well. | 
| 407 // | 425 // | 
| 408 // The memory allocator also allocates chunks for the large object space, but | 426 // The memory allocator also allocates chunks for the large object space, but | 
| 409 // they are managed by the space itself.  The new space does not expand. | 427 // they are managed by the space itself.  The new space does not expand. | 
|  | 428 // | 
|  | 429 // The fact that pages for paged spaces are allocated and deallocated in chunks | 
|  | 430 // induces a constraint on the order of pages in a linked lists. We say that | 
|  | 431 // pages are linked in the chunk-order if and only if every two consecutive | 
|  | 432 // pages from the same chunk are consecutive in the linked list. | 
|  | 433 // | 
|  | 434 | 
| 410 | 435 | 
| 411 class MemoryAllocator : public AllStatic { | 436 class MemoryAllocator : public AllStatic { | 
| 412  public: | 437  public: | 
| 413   // Initializes its internal bookkeeping structures. | 438   // Initializes its internal bookkeeping structures. | 
| 414   // Max capacity of the total space. | 439   // Max capacity of the total space. | 
| 415   static bool Setup(int max_capacity); | 440   static bool Setup(int max_capacity); | 
| 416 | 441 | 
| 417   // Deletes valid chunks. | 442   // Deletes valid chunks. | 
| 418   static void TearDown(); | 443   static void TearDown(); | 
| 419 | 444 | 
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 459   // allocate memory for the OS or cannot allocate a single page, this | 484   // allocate memory for the OS or cannot allocate a single page, this | 
| 460   // function returns an invalid page pointer (NULL). The caller must check | 485   // function returns an invalid page pointer (NULL). The caller must check | 
| 461   // whether the returned page is valid (by calling Page::is_valid()).  It is | 486   // whether the returned page is valid (by calling Page::is_valid()).  It is | 
| 462   // guaranteed that allocated pages have contiguous addresses.  The actual | 487   // guaranteed that allocated pages have contiguous addresses.  The actual | 
| 463   // number of allocated pages is returned in the output parameter | 488   // number of allocated pages is returned in the output parameter | 
| 464   // allocated_pages.  If the PagedSpace owner is executable and there is | 489   // allocated_pages.  If the PagedSpace owner is executable and there is | 
| 465   // a code range, the pages are allocated from the code range. | 490   // a code range, the pages are allocated from the code range. | 
| 466   static Page* AllocatePages(int requested_pages, int* allocated_pages, | 491   static Page* AllocatePages(int requested_pages, int* allocated_pages, | 
| 467                              PagedSpace* owner); | 492                              PagedSpace* owner); | 
| 468 | 493 | 
| 469   // Frees pages from a given page and after. If 'p' is the first page | 494   // Frees pages from a given page and after. Requires pages to be | 
| 470   // of a chunk, pages from 'p' are freed and this function returns an | 495   // linked in chunk-order (see comment for class). | 
| 471   // invalid page pointer. Otherwise, the function searches a page | 496   // If 'p' is the first page of a chunk, pages from 'p' are freed | 
| 472   // after 'p' that is the first page of a chunk. Pages after the | 497   // and this function returns an invalid page pointer. | 
| 473   // found page are freed and the function returns 'p'. | 498   // Otherwise, the function searches a page after 'p' that is | 
|  | 499   // the first page of a chunk. Pages after the found page | 
|  | 500   // are freed and the function returns 'p'. | 
| 474   static Page* FreePages(Page* p); | 501   static Page* FreePages(Page* p); | 
| 475 | 502 | 
|  | 503   // Frees all pages owned by given space. | 
|  | 504   static void FreeAllPages(PagedSpace* space); | 
|  | 505 | 
| 476   // Allocates and frees raw memory of certain size. | 506   // Allocates and frees raw memory of certain size. | 
| 477   // These are just thin wrappers around OS::Allocate and OS::Free, | 507   // These are just thin wrappers around OS::Allocate and OS::Free, | 
| 478   // but keep track of allocated bytes as part of heap. | 508   // but keep track of allocated bytes as part of heap. | 
| 479   // If the flag is EXECUTABLE and a code range exists, the requested | 509   // If the flag is EXECUTABLE and a code range exists, the requested | 
| 480   // memory is allocated from the code range.  If a code range exists | 510   // memory is allocated from the code range.  If a code range exists | 
| 481   // and the freed memory is in it, the code range manages the freed memory. | 511   // and the freed memory is in it, the code range manages the freed memory. | 
| 482   static void* AllocateRawMemory(const size_t requested, | 512   static void* AllocateRawMemory(const size_t requested, | 
| 483                                  size_t* allocated, | 513                                  size_t* allocated, | 
| 484                                  Executability executable); | 514                                  Executability executable); | 
| 485   static void FreeRawMemory(void* buf, size_t length); | 515   static void FreeRawMemory(void* buf, size_t length); | 
| (...skipping 18 matching lines...) Expand all  Loading... | 
| 504   // Checks whether a page belongs to a space. | 534   // Checks whether a page belongs to a space. | 
| 505   static inline bool IsPageInSpace(Page* p, PagedSpace* space); | 535   static inline bool IsPageInSpace(Page* p, PagedSpace* space); | 
| 506 | 536 | 
| 507   // Returns the space that owns the given page. | 537   // Returns the space that owns the given page. | 
| 508   static inline PagedSpace* PageOwner(Page* page); | 538   static inline PagedSpace* PageOwner(Page* page); | 
| 509 | 539 | 
| 510   // Finds the first/last page in the same chunk as a given page. | 540   // Finds the first/last page in the same chunk as a given page. | 
| 511   static Page* FindFirstPageInSameChunk(Page* p); | 541   static Page* FindFirstPageInSameChunk(Page* p); | 
| 512   static Page* FindLastPageInSameChunk(Page* p); | 542   static Page* FindLastPageInSameChunk(Page* p); | 
| 513 | 543 | 
|  | 544   // Relinks list of pages owned by space to make it chunk-ordered. | 
|  | 545   // Returns new first and last pages of space. | 
|  | 546   // Also returns last page in relinked list which has WasInUsedBeforeMC | 
|  | 547   // flag set. | 
|  | 548   static void RelinkPageListInChunkOrder(PagedSpace* space, | 
|  | 549                                          Page** first_page, | 
|  | 550                                          Page** last_page, | 
|  | 551                                          Page** last_page_in_use); | 
|  | 552 | 
| 514 #ifdef ENABLE_HEAP_PROTECTION | 553 #ifdef ENABLE_HEAP_PROTECTION | 
| 515   // Protect/unprotect a block of memory by marking it read-only/writable. | 554   // Protect/unprotect a block of memory by marking it read-only/writable. | 
| 516   static inline void Protect(Address start, size_t size); | 555   static inline void Protect(Address start, size_t size); | 
| 517   static inline void Unprotect(Address start, size_t size, | 556   static inline void Unprotect(Address start, size_t size, | 
| 518                                Executability executable); | 557                                Executability executable); | 
| 519 | 558 | 
| 520   // Protect/unprotect a chunk given a page in the chunk. | 559   // Protect/unprotect a chunk given a page in the chunk. | 
| 521   static inline void ProtectChunkFromPage(Page* page); | 560   static inline void ProtectChunkFromPage(Page* page); | 
| 522   static inline void UnprotectChunkFromPage(Page* page); | 561   static inline void UnprotectChunkFromPage(Page* page); | 
| 523 #endif | 562 #endif | 
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 592 | 631 | 
| 593   // True if the address lies in the initial chunk. | 632   // True if the address lies in the initial chunk. | 
| 594   static inline bool InInitialChunk(Address address); | 633   static inline bool InInitialChunk(Address address); | 
| 595 | 634 | 
| 596   // Initializes pages in a chunk. Returns the first page address. | 635   // Initializes pages in a chunk. Returns the first page address. | 
| 597   // This function and GetChunkId() are provided for the mark-compact | 636   // This function and GetChunkId() are provided for the mark-compact | 
| 598   // collector to rebuild page headers in the from space, which is | 637   // collector to rebuild page headers in the from space, which is | 
| 599   // used as a marking stack and its page headers are destroyed. | 638   // used as a marking stack and its page headers are destroyed. | 
| 600   static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 639   static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 
| 601                                       PagedSpace* owner); | 640                                       PagedSpace* owner); | 
|  | 641 | 
|  | 642   static Page* RelinkPagesInChunk(int chunk_id, | 
|  | 643                                   Address chunk_start, | 
|  | 644                                   int chunk_size, | 
|  | 645                                   Page* prev, | 
|  | 646                                   Page** last_page_in_use); | 
| 602 }; | 647 }; | 
| 603 | 648 | 
| 604 | 649 | 
| 605 // ----------------------------------------------------------------------------- | 650 // ----------------------------------------------------------------------------- | 
| 606 // Interface for heap object iterator to be implemented by all object space | 651 // Interface for heap object iterator to be implemented by all object space | 
| 607 // object iterators. | 652 // object iterators. | 
| 608 // | 653 // | 
| 609 // NOTE: The space specific object iterators also implements the own next() | 654 // NOTE: The space specific object iterators also implements the own next() | 
| 610 //       method which is used to avoid using virtual functions | 655 //       method which is used to avoid using virtual functions | 
| 611 //       iterating a specific space. | 656 //       iterating a specific space. | 
| (...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 873   // linear in the number of objects in the page. It may be slow. | 918   // linear in the number of objects in the page. It may be slow. | 
| 874   Object* FindObject(Address addr); | 919   Object* FindObject(Address addr); | 
| 875 | 920 | 
| 876   // Checks whether page is currently in use by this space. | 921   // Checks whether page is currently in use by this space. | 
| 877   bool IsUsed(Page* page); | 922   bool IsUsed(Page* page); | 
| 878 | 923 | 
| 879   // Clears remembered sets of pages in this space. | 924   // Clears remembered sets of pages in this space. | 
| 880   void ClearRSet(); | 925   void ClearRSet(); | 
| 881 | 926 | 
| 882   // Prepares for a mark-compact GC. | 927   // Prepares for a mark-compact GC. | 
| 883   virtual void PrepareForMarkCompact(bool will_compact) = 0; | 928   virtual void PrepareForMarkCompact(bool will_compact); | 
| 884 | 929 | 
| 885   virtual Address PageAllocationTop(Page* page) = 0; | 930   virtual Address PageAllocationTop(Page* page) = 0; | 
| 886 | 931 | 
| 887   // Current capacity without growing (Size() + Available() + Waste()). | 932   // Current capacity without growing (Size() + Available() + Waste()). | 
| 888   int Capacity() { return accounting_stats_.Capacity(); } | 933   int Capacity() { return accounting_stats_.Capacity(); } | 
| 889 | 934 | 
| 890   // Total amount of memory committed for this space.  For paged | 935   // Total amount of memory committed for this space.  For paged | 
| 891   // spaces this equals the capacity. | 936   // spaces this equals the capacity. | 
| 892   int CommittedMemory() { return Capacity(); } | 937   int CommittedMemory() { return Capacity(); } | 
| 893 | 938 | 
| (...skipping 19 matching lines...) Expand all  Loading... | 
| 913 | 958 | 
| 914   // Allocate the requested number of bytes for relocation during mark-compact | 959   // Allocate the requested number of bytes for relocation during mark-compact | 
| 915   // collection. | 960   // collection. | 
| 916   inline Object* MCAllocateRaw(int size_in_bytes); | 961   inline Object* MCAllocateRaw(int size_in_bytes); | 
| 917 | 962 | 
| 918   virtual bool ReserveSpace(int bytes); | 963   virtual bool ReserveSpace(int bytes); | 
| 919 | 964 | 
| 920   // Used by ReserveSpace. | 965   // Used by ReserveSpace. | 
| 921   virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0; | 966   virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0; | 
| 922 | 967 | 
|  | 968   // Free all pages in range from prev (exclusive) to last (inclusive). | 
|  | 969   // Freed pages are moved to the end of page list. | 
|  | 970   void FreePages(Page* prev, Page* last); | 
|  | 971 | 
|  | 972   // Set space allocation info. | 
|  | 973   void SetTop(Address top, Address limit) { | 
|  | 974     allocation_info_.top = top; | 
|  | 975     allocation_info_.limit = limit; | 
|  | 976   } | 
|  | 977 | 
| 923   // --------------------------------------------------------------------------- | 978   // --------------------------------------------------------------------------- | 
| 924   // Mark-compact collection support functions | 979   // Mark-compact collection support functions | 
| 925 | 980 | 
| 926   // Set the relocation point to the beginning of the space. | 981   // Set the relocation point to the beginning of the space. | 
| 927   void MCResetRelocationInfo(); | 982   void MCResetRelocationInfo(); | 
| 928 | 983 | 
| 929   // Writes relocation info to the top page. | 984   // Writes relocation info to the top page. | 
| 930   void MCWriteRelocationInfoToPage() { | 985   void MCWriteRelocationInfoToPage() { | 
| 931     TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top; | 986     TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top; | 
| 932   } | 987   } | 
| (...skipping 28 matching lines...) Expand all  Loading... | 
| 961   // Overridden by subclasses to verify space-specific object | 1016   // Overridden by subclasses to verify space-specific object | 
| 962   // properties (e.g., only maps or free-list nodes are in map space). | 1017   // properties (e.g., only maps or free-list nodes are in map space). | 
| 963   virtual void VerifyObject(HeapObject* obj) {} | 1018   virtual void VerifyObject(HeapObject* obj) {} | 
| 964 | 1019 | 
| 965   // Report code object related statistics | 1020   // Report code object related statistics | 
| 966   void CollectCodeStatistics(); | 1021   void CollectCodeStatistics(); | 
| 967   static void ReportCodeStatistics(); | 1022   static void ReportCodeStatistics(); | 
| 968   static void ResetCodeStatistics(); | 1023   static void ResetCodeStatistics(); | 
| 969 #endif | 1024 #endif | 
| 970 | 1025 | 
|  | 1026   // Returns the page of the allocation pointer. | 
|  | 1027   Page* AllocationTopPage() { return TopPageOf(allocation_info_); } | 
|  | 1028 | 
| 971  protected: | 1029  protected: | 
| 972   // Maximum capacity of this space. | 1030   // Maximum capacity of this space. | 
| 973   int max_capacity_; | 1031   int max_capacity_; | 
| 974 | 1032 | 
| 975   // Accounting information for this space. | 1033   // Accounting information for this space. | 
| 976   AllocationStats accounting_stats_; | 1034   AllocationStats accounting_stats_; | 
| 977 | 1035 | 
| 978   // The first page in this space. | 1036   // The first page in this space. | 
| 979   Page* first_page_; | 1037   Page* first_page_; | 
| 980 | 1038 | 
| 981   // The last page in this space.  Initially set in Setup, updated in | 1039   // The last page in this space.  Initially set in Setup, updated in | 
| 982   // Expand and Shrink. | 1040   // Expand and Shrink. | 
| 983   Page* last_page_; | 1041   Page* last_page_; | 
| 984 | 1042 | 
|  | 1043   // True if pages owned by this space are linked in chunk-order. | 
|  | 1044   // See comment for class MemoryAllocator for definition of chunk-order. | 
|  | 1045   bool page_list_is_chunk_ordered_; | 
|  | 1046 | 
| 985   // Normal allocation information. | 1047   // Normal allocation information. | 
| 986   AllocationInfo allocation_info_; | 1048   AllocationInfo allocation_info_; | 
| 987 | 1049 | 
| 988   // Relocation information during mark-compact collections. | 1050   // Relocation information during mark-compact collections. | 
| 989   AllocationInfo mc_forwarding_info_; | 1051   AllocationInfo mc_forwarding_info_; | 
| 990 | 1052 | 
| 991   // Bytes of each page that cannot be allocated.  Possibly non-zero | 1053   // Bytes of each page that cannot be allocated.  Possibly non-zero | 
| 992   // for pages in spaces with only fixed-size objects.  Always zero | 1054   // for pages in spaces with only fixed-size objects.  Always zero | 
| 993   // for pages in spaces with variable sized objects (those pages are | 1055   // for pages in spaces with variable sized objects (those pages are | 
| 994   // padded with free-list nodes). | 1056   // padded with free-list nodes). | 
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1036   // Slow path of MCAllocateRaw. | 1098   // Slow path of MCAllocateRaw. | 
| 1037   HeapObject* SlowMCAllocateRaw(int size_in_bytes); | 1099   HeapObject* SlowMCAllocateRaw(int size_in_bytes); | 
| 1038 | 1100 | 
| 1039 #ifdef DEBUG | 1101 #ifdef DEBUG | 
| 1040   // Returns the number of total pages in this space. | 1102   // Returns the number of total pages in this space. | 
| 1041   int CountTotalPages(); | 1103   int CountTotalPages(); | 
| 1042 | 1104 | 
| 1043   void DoPrintRSet(const char* space_name); | 1105   void DoPrintRSet(const char* space_name); | 
| 1044 #endif | 1106 #endif | 
| 1045  private: | 1107  private: | 
| 1046   // Returns the page of the allocation pointer. |  | 
| 1047   Page* AllocationTopPage() { return TopPageOf(allocation_info_); } |  | 
| 1048 | 1108 | 
| 1049   // Returns a pointer to the page of the relocation pointer. | 1109   // Returns a pointer to the page of the relocation pointer. | 
| 1050   Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); } | 1110   Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); } | 
| 1051 | 1111 | 
| 1052   friend class PageIterator; | 1112   friend class PageIterator; | 
| 1053 }; | 1113 }; | 
| 1054 | 1114 | 
| 1055 | 1115 | 
| 1056 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1116 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 
| 1057 class NumberAndSizeInfo BASE_EMBEDDED { | 1117 class NumberAndSizeInfo BASE_EMBEDDED { | 
| (...skipping 606 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1664   // pointer). | 1724   // pointer). | 
| 1665   int AvailableFree() { return free_list_.available(); } | 1725   int AvailableFree() { return free_list_.available(); } | 
| 1666 | 1726 | 
| 1667   // The top of allocation in a page in this space. Undefined if page is unused. | 1727   // The top of allocation in a page in this space. Undefined if page is unused. | 
| 1668   virtual Address PageAllocationTop(Page* page) { | 1728   virtual Address PageAllocationTop(Page* page) { | 
| 1669     return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd(); | 1729     return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd(); | 
| 1670   } | 1730   } | 
| 1671 | 1731 | 
| 1672   // Give a block of memory to the space's free list.  It might be added to | 1732   // Give a block of memory to the space's free list.  It might be added to | 
| 1673   // the free list or accounted as waste. | 1733   // the free list or accounted as waste. | 
| 1674   void Free(Address start, int size_in_bytes) { | 1734   // If add_to_freelist is false then just accounting stats are updated and | 
| 1675     int wasted_bytes = free_list_.Free(start, size_in_bytes); | 1735   // no attempt to add area to free list is made. | 
|  | 1736   void Free(Address start, int size_in_bytes, bool add_to_freelist) { | 
| 1676     accounting_stats_.DeallocateBytes(size_in_bytes); | 1737     accounting_stats_.DeallocateBytes(size_in_bytes); | 
| 1677     accounting_stats_.WasteBytes(wasted_bytes); | 1738 | 
|  | 1739     if (add_to_freelist) { | 
|  | 1740       int wasted_bytes = free_list_.Free(start, size_in_bytes); | 
|  | 1741       accounting_stats_.WasteBytes(wasted_bytes); | 
|  | 1742     } | 
| 1678   } | 1743   } | 
| 1679 | 1744 | 
| 1680   // Prepare for full garbage collection.  Resets the relocation pointer and | 1745   // Prepare for full garbage collection.  Resets the relocation pointer and | 
| 1681   // clears the free list. | 1746   // clears the free list. | 
| 1682   virtual void PrepareForMarkCompact(bool will_compact); | 1747   virtual void PrepareForMarkCompact(bool will_compact); | 
| 1683 | 1748 | 
| 1684   // Updates the allocation pointer to the relocation top after a mark-compact | 1749   // Updates the allocation pointer to the relocation top after a mark-compact | 
| 1685   // collection. | 1750   // collection. | 
| 1686   virtual void MCCommitRelocationInfo(); | 1751   virtual void MCCommitRelocationInfo(); | 
| 1687 | 1752 | 
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1729 | 1794 | 
| 1730   // The top of allocation in a page in this space. Undefined if page is unused. | 1795   // The top of allocation in a page in this space. Undefined if page is unused. | 
| 1731   virtual Address PageAllocationTop(Page* page) { | 1796   virtual Address PageAllocationTop(Page* page) { | 
| 1732     return page == TopPageOf(allocation_info_) ? top() | 1797     return page == TopPageOf(allocation_info_) ? top() | 
| 1733         : page->ObjectAreaEnd() - page_extra_; | 1798         : page->ObjectAreaEnd() - page_extra_; | 
| 1734   } | 1799   } | 
| 1735 | 1800 | 
| 1736   int object_size_in_bytes() { return object_size_in_bytes_; } | 1801   int object_size_in_bytes() { return object_size_in_bytes_; } | 
| 1737 | 1802 | 
| 1738   // Give a fixed sized block of memory to the space's free list. | 1803   // Give a fixed sized block of memory to the space's free list. | 
| 1739   void Free(Address start) { | 1804   // If add_to_freelist is false then just accounting stats are updated and | 
| 1740     free_list_.Free(start); | 1805   // no attempt to add area to free list is made. | 
|  | 1806   void Free(Address start, bool add_to_freelist) { | 
|  | 1807     if (add_to_freelist) { | 
|  | 1808       free_list_.Free(start); | 
|  | 1809     } | 
| 1741     accounting_stats_.DeallocateBytes(object_size_in_bytes_); | 1810     accounting_stats_.DeallocateBytes(object_size_in_bytes_); | 
| 1742   } | 1811   } | 
| 1743 | 1812 | 
| 1744   // Prepares for a mark-compact GC. | 1813   // Prepares for a mark-compact GC. | 
| 1745   virtual void PrepareForMarkCompact(bool will_compact); | 1814   virtual void PrepareForMarkCompact(bool will_compact); | 
| 1746 | 1815 | 
| 1747   // Updates the allocation pointer to the relocation top after a mark-compact | 1816   // Updates the allocation pointer to the relocation top after a mark-compact | 
| 1748   // collection. | 1817   // collection. | 
| 1749   virtual void MCCommitRelocationInfo(); | 1818   virtual void MCCommitRelocationInfo(); | 
| 1750 | 1819 | 
| (...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2082 | 2151 | 
| 2083  private: | 2152  private: | 
| 2084   LargeObjectChunk* current_; | 2153   LargeObjectChunk* current_; | 
| 2085   HeapObjectCallback size_func_; | 2154   HeapObjectCallback size_func_; | 
| 2086 }; | 2155 }; | 
| 2087 | 2156 | 
| 2088 | 2157 | 
| 2089 } }  // namespace v8::internal | 2158 } }  // namespace v8::internal | 
| 2090 | 2159 | 
| 2091 #endif  // V8_SPACES_H_ | 2160 #endif  // V8_SPACES_H_ | 
| OLD | NEW | 
|---|