| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 390 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 401 | 401 |
| 402 // ---------------------------------------------------------------------------- | 402 // ---------------------------------------------------------------------------- |
| 403 // All heap objects containing executable code (code objects) must be allocated | 403 // All heap objects containing executable code (code objects) must be allocated |
| 404 // from a 2 GB range of memory, so that they can call each other using 32-bit | 404 // from a 2 GB range of memory, so that they can call each other using 32-bit |
| 405 // displacements. This happens automatically on 32-bit platforms, where 32-bit | 405 // displacements. This happens automatically on 32-bit platforms, where 32-bit |
| 406 // displacements cover the entire 4GB virtual address space. On 64-bit | 406 // displacements cover the entire 4GB virtual address space. On 64-bit |
| 407 // platforms, we support this using the CodeRange object, which reserves and | 407 // platforms, we support this using the CodeRange object, which reserves and |
| 408 // manages a range of virtual memory. | 408 // manages a range of virtual memory. |
| 409 class CodeRange { | 409 class CodeRange { |
| 410 public: | 410 public: |
| 411 explicit CodeRange(Isolate* isolate); |
| 412 |
| 411 // Reserves a range of virtual memory, but does not commit any of it. | 413 // Reserves a range of virtual memory, but does not commit any of it. |
| 412 // Can only be called once, at heap initialization time. | 414 // Can only be called once, at heap initialization time. |
| 413 // Returns false on failure. | 415 // Returns false on failure. |
| 414 bool Setup(const size_t requested_size); | 416 bool Setup(const size_t requested_size); |
| 415 | 417 |
| 416 // Frees the range of virtual memory, and frees the data structures used to | 418 // Frees the range of virtual memory, and frees the data structures used to |
| 417 // manage it. | 419 // manage it. |
| 418 void TearDown(); | 420 void TearDown(); |
| 419 | 421 |
| 420 bool exists() { return code_range_ != NULL; } | 422 bool exists() { return this != NULL && code_range_ != NULL; } |
| 421 bool contains(Address address) { | 423 bool contains(Address address) { |
| 422 if (code_range_ == NULL) return false; | 424 if (this == NULL || code_range_ == NULL) return false; |
| 423 Address start = static_cast<Address>(code_range_->address()); | 425 Address start = static_cast<Address>(code_range_->address()); |
| 424 return start <= address && address < start + code_range_->size(); | 426 return start <= address && address < start + code_range_->size(); |
| 425 } | 427 } |
| 426 | 428 |
| 427 // Allocates a chunk of memory from the large-object portion of | 429 // Allocates a chunk of memory from the large-object portion of |
| 428 // the code range. On platforms with no separate code range, should | 430 // the code range. On platforms with no separate code range, should |
| 429 // not be called. | 431 // not be called. |
| 430 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, | 432 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, |
| 431 size_t* allocated); | 433 size_t* allocated); |
| 432 void FreeRawMemory(void* buf, size_t length); | 434 void FreeRawMemory(void* buf, size_t length); |
| 433 | 435 |
| 434 private: | 436 private: |
| 435 CodeRange(); | 437 Isolate* isolate_; |
| 436 | 438 |
| 437 // The reserved range of virtual memory that all code objects are put in. | 439 // The reserved range of virtual memory that all code objects are put in. |
| 438 VirtualMemory* code_range_; | 440 VirtualMemory* code_range_; |
| 439 // Plain old data class, just a struct plus a constructor. | 441 // Plain old data class, just a struct plus a constructor. |
| 440 class FreeBlock { | 442 class FreeBlock { |
| 441 public: | 443 public: |
| 442 FreeBlock(Address start_arg, size_t size_arg) | 444 FreeBlock(Address start_arg, size_t size_arg) |
| 443 : start(start_arg), size(size_arg) {} | 445 : start(start_arg), size(size_arg) {} |
| 444 FreeBlock(void* start_arg, size_t size_arg) | 446 FreeBlock(void* start_arg, size_t size_arg) |
| 445 : start(static_cast<Address>(start_arg)), size(size_arg) {} | 447 : start(static_cast<Address>(start_arg)), size(size_arg) {} |
| (...skipping 13 matching lines...) Expand all Loading... |
| 459 | 461 |
| 460 // Finds a block on the allocation list that contains at least the | 462 // Finds a block on the allocation list that contains at least the |
| 461 // requested amount of memory. If none is found, sorts and merges | 463 // requested amount of memory. If none is found, sorts and merges |
| 462 // the existing free memory blocks, and searches again. | 464 // the existing free memory blocks, and searches again. |
| 463 // If none can be found, terminates V8 with FatalProcessOutOfMemory. | 465 // If none can be found, terminates V8 with FatalProcessOutOfMemory. |
| 464 void GetNextAllocationBlock(size_t requested); | 466 void GetNextAllocationBlock(size_t requested); |
| 465 // Compares the start addresses of two free blocks. | 467 // Compares the start addresses of two free blocks. |
| 466 static int CompareFreeBlockAddress(const FreeBlock* left, | 468 static int CompareFreeBlockAddress(const FreeBlock* left, |
| 467 const FreeBlock* right); | 469 const FreeBlock* right); |
| 468 | 470 |
| 469 friend class Isolate; | |
| 470 | |
| 471 Isolate* isolate_; | |
| 472 | |
| 473 DISALLOW_COPY_AND_ASSIGN(CodeRange); | 471 DISALLOW_COPY_AND_ASSIGN(CodeRange); |
| 474 }; | 472 }; |
| 475 | 473 |
| 476 | 474 |
| 477 // ---------------------------------------------------------------------------- | 475 // ---------------------------------------------------------------------------- |
| 478 // A space acquires chunks of memory from the operating system. The memory | 476 // A space acquires chunks of memory from the operating system. The memory |
| 479 // allocator manages chunks for the paged heap spaces (old space and map | 477 // allocator manages chunks for the paged heap spaces (old space and map |
| 480 // space). A paged chunk consists of pages. Pages in a chunk have contiguous | 478 // space). A paged chunk consists of pages. Pages in a chunk have contiguous |
| 481 // addresses and are linked as a list. | 479 // addresses and are linked as a list. |
| 482 // | 480 // |
| (...skipping 10 matching lines...) Expand all Loading... |
| 493 // | 491 // |
| 494 // The fact that pages for paged spaces are allocated and deallocated in chunks | 492 // The fact that pages for paged spaces are allocated and deallocated in chunks |
| 495 // induces a constraint on the order of pages in a linked lists. We say that | 493 // induces a constraint on the order of pages in a linked lists. We say that |
| 496 // pages are linked in the chunk-order if and only if every two consecutive | 494 // pages are linked in the chunk-order if and only if every two consecutive |
| 497 // pages from the same chunk are consecutive in the linked list. | 495 // pages from the same chunk are consecutive in the linked list. |
| 498 // | 496 // |
| 499 | 497 |
| 500 | 498 |
| 501 class MemoryAllocator { | 499 class MemoryAllocator { |
| 502 public: | 500 public: |
| 501 explicit MemoryAllocator(Isolate* isolate); |
| 502 |
| 503 // Initializes its internal bookkeeping structures. | 503 // Initializes its internal bookkeeping structures. |
| 504 // Max capacity of the total space and executable memory limit. | 504 // Max capacity of the total space and executable memory limit. |
| 505 bool Setup(intptr_t max_capacity, intptr_t capacity_executable); | 505 bool Setup(intptr_t max_capacity, intptr_t capacity_executable); |
| 506 | 506 |
| 507 // Deletes valid chunks. | 507 // Deletes valid chunks. |
| 508 void TearDown(); | 508 void TearDown(); |
| 509 | 509 |
| 510 // Reserves an initial address range of virtual memory to be split between | 510 // Reserves an initial address range of virtual memory to be split between |
| 511 // the two new space semispaces, the old space, and the map space. The | 511 // the two new space semispaces, the old space, and the map space. The |
| 512 // memory is not yet committed or assigned to spaces and split into pages. | 512 // memory is not yet committed or assigned to spaces and split into pages. |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 650 static const int kChunkTableLevels = 4; | 650 static const int kChunkTableLevels = 4; |
| 651 static const int kChunkTableBitsPerLevel = 12; | 651 static const int kChunkTableBitsPerLevel = 12; |
| 652 #else | 652 #else |
| 653 static const int kPagesPerChunk = 16; | 653 static const int kPagesPerChunk = 16; |
| 654 // On 32 bit the chunk table consists of 2 levels of 256-entry tables. | 654 // On 32 bit the chunk table consists of 2 levels of 256-entry tables. |
| 655 static const int kChunkTableLevels = 2; | 655 static const int kChunkTableLevels = 2; |
| 656 static const int kChunkTableBitsPerLevel = 8; | 656 static const int kChunkTableBitsPerLevel = 8; |
| 657 #endif | 657 #endif |
| 658 | 658 |
| 659 private: | 659 private: |
| 660 MemoryAllocator(); | 660 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; |
| 661 | 661 |
| 662 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; | 662 Isolate* isolate_; |
| 663 | 663 |
| 664 // Maximum space size in bytes. | 664 // Maximum space size in bytes. |
| 665 intptr_t capacity_; | 665 intptr_t capacity_; |
| 666 // Maximum subset of capacity_ that can be executable | 666 // Maximum subset of capacity_ that can be executable |
| 667 intptr_t capacity_executable_; | 667 intptr_t capacity_executable_; |
| 668 | 668 |
| 669 // Allocated space size in bytes. | 669 // Allocated space size in bytes. |
| 670 intptr_t size_; | 670 intptr_t size_; |
| 671 | 671 |
| 672 // Allocated executable space size in bytes. | 672 // Allocated executable space size in bytes. |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 746 // used as a marking stack and its page headers are destroyed. | 746 // used as a marking stack and its page headers are destroyed. |
| 747 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 747 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
| 748 PagedSpace* owner); | 748 PagedSpace* owner); |
| 749 | 749 |
| 750 Page* RelinkPagesInChunk(int chunk_id, | 750 Page* RelinkPagesInChunk(int chunk_id, |
| 751 Address chunk_start, | 751 Address chunk_start, |
| 752 size_t chunk_size, | 752 size_t chunk_size, |
| 753 Page* prev, | 753 Page* prev, |
| 754 Page** last_page_in_use); | 754 Page** last_page_in_use); |
| 755 | 755 |
| 756 friend class Isolate; | |
| 757 | |
| 758 Isolate* isolate_; | |
| 759 | |
| 760 DISALLOW_COPY_AND_ASSIGN(MemoryAllocator); | 756 DISALLOW_COPY_AND_ASSIGN(MemoryAllocator); |
| 761 }; | 757 }; |
| 762 | 758 |
| 763 | 759 |
| 764 // ----------------------------------------------------------------------------- | 760 // ----------------------------------------------------------------------------- |
| 765 // Interface for heap object iterator to be implemented by all object space | 761 // Interface for heap object iterator to be implemented by all object space |
| 766 // object iterators. | 762 // object iterators. |
| 767 // | 763 // |
| 768 // NOTE: The space specific object iterators also implements the own next() | 764 // NOTE: The space specific object iterators also implements the own next() |
| 769 // method which is used to avoid using virtual functions | 765 // method which is used to avoid using virtual functions |
| (...skipping 1538 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2308 } | 2304 } |
| 2309 // Must be small, since an iteration is used for lookup. | 2305 // Must be small, since an iteration is used for lookup. |
| 2310 static const int kMaxComments = 64; | 2306 static const int kMaxComments = 64; |
| 2311 }; | 2307 }; |
| 2312 #endif | 2308 #endif |
| 2313 | 2309 |
| 2314 | 2310 |
| 2315 } } // namespace v8::internal | 2311 } } // namespace v8::internal |
| 2316 | 2312 |
| 2317 #endif // V8_SPACES_H_ | 2313 #endif // V8_SPACES_H_ |
| OLD | NEW |