Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(630)

Side by Side Diff: src/spaces.h

Issue 6880010: Merge (7265, 7271] from bleeding_edge to experimental/gc branch.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: '' Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 16 matching lines...) Expand all
27 27
28 #ifndef V8_SPACES_H_ 28 #ifndef V8_SPACES_H_
29 #define V8_SPACES_H_ 29 #define V8_SPACES_H_
30 30
31 #include "list-inl.h" 31 #include "list-inl.h"
32 #include "log.h" 32 #include "log.h"
33 33
34 namespace v8 { 34 namespace v8 {
35 namespace internal { 35 namespace internal {
36 36
37 class Isolate;
38
37 // ----------------------------------------------------------------------------- 39 // -----------------------------------------------------------------------------
38 // Heap structures: 40 // Heap structures:
39 // 41 //
40 // A JS heap consists of a young generation, an old generation, and a large 42 // A JS heap consists of a young generation, an old generation, and a large
41 // object space. The young generation is divided into two semispaces. A 43 // object space. The young generation is divided into two semispaces. A
42 // scavenger implements Cheney's copying algorithm. The old generation is 44 // scavenger implements Cheney's copying algorithm. The old generation is
43 // separated into a map space and an old object space. The map space contains 45 // separated into a map space and an old object space. The map space contains
44 // all (and only) map objects, the rest of old objects go into the old space. 46 // all (and only) map objects, the rest of old objects go into the old space.
45 // The old generation is collected by a mark-sweep-compact collector. 47 // The old generation is collected by a mark-sweep-compact collector.
46 // 48 //
(...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after
370 372
371 bool IsFlagSet(int flag) { 373 bool IsFlagSet(int flag) {
372 return (flags_ & (1 << flag)) != 0; 374 return (flags_ & (1 << flag)) != 0;
373 } 375 }
374 376
375 static const intptr_t kAlignment = (1 << kPageSizeBits); 377 static const intptr_t kAlignment = (1 << kPageSizeBits);
376 378
377 static const intptr_t kAlignmentMask = kAlignment - 1; 379 static const intptr_t kAlignmentMask = kAlignment - 1;
378 380
379 static const size_t kHeaderSize = kPointerSize + kPointerSize + kPointerSize + 381 static const size_t kHeaderSize = kPointerSize + kPointerSize + kPointerSize +
380 kPointerSize + kPointerSize + kPointerSize + kPointerSize; 382 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kPointerSize;
381 383
382 static const size_t kMarksBitmapLength = 384 static const size_t kMarksBitmapLength =
383 (1 << kPageSizeBits) >> (kPointerSizeLog2); 385 (1 << kPageSizeBits) >> (kPointerSizeLog2);
384 386
385 static const size_t kMarksBitmapSize = 387 static const size_t kMarksBitmapSize =
386 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); 388 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
387 389
388 static const int kBodyOffset = 390 static const int kBodyOffset =
389 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + kMarksBitmapSize)); 391 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + kMarksBitmapSize));
390 392
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
435 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; 437 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
436 } 438 }
437 439
438 inline Address MarkbitIndexToAddress(uint32_t index) { 440 inline Address MarkbitIndexToAddress(uint32_t index) {
439 return this->address() + (index << kPointerSizeLog2); 441 return this->address() + (index << kPointerSizeLog2);
440 } 442 }
441 443
442 void InsertAfter(MemoryChunk* other); 444 void InsertAfter(MemoryChunk* other);
443 void Unlink(); 445 void Unlink();
444 446
447 inline Heap* heap() { return heap_; }
448
445 protected: 449 protected:
446 MemoryChunk* next_chunk_; 450 MemoryChunk* next_chunk_;
447 MemoryChunk* prev_chunk_; 451 MemoryChunk* prev_chunk_;
448 size_t size_; 452 size_t size_;
449 intptr_t flags_; 453 intptr_t flags_;
450 // The identity of the owning space. This is tagged as a failure pointer, but 454 // The identity of the owning space. This is tagged as a failure pointer, but
451 // no failure can be in an object, so this can be distinguished from any entry 455 // no failure can be in an object, so this can be distinguished from any entry
452 // in a fixed array. 456 // in a fixed array.
453 Address owner_; 457 Address owner_;
458 Heap* heap_;
454 // This flag indicates that the page is not being tracked by the store buffer. 459 // This flag indicates that the page is not being tracked by the store buffer.
455 // At any point where we have to iterate over pointers to new space, we must 460 // At any point where we have to iterate over pointers to new space, we must
456 // search this page for pointers to new space. 461 // search this page for pointers to new space.
457 bool scan_on_scavenge_; 462 bool scan_on_scavenge_;
458 // Used by the store buffer to keep track of which pages to mark scan-on- 463 // Used by the store buffer to keep track of which pages to mark scan-on-
459 // scavenge. 464 // scavenge.
460 int store_buffer_counter_; 465 int store_buffer_counter_;
461 466
462 static MemoryChunk* Initialize(Address base, 467 static MemoryChunk* Initialize(Heap* heap,
468 Address base,
463 size_t size, 469 size_t size,
464 Executability executable, 470 Executability executable,
465 Space* owner); 471 Space* owner);
466 472
467 friend class MemoryAllocator; 473 friend class MemoryAllocator;
468 }; 474 };
469 475
470 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); 476 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
471 477
472 // ----------------------------------------------------------------------------- 478 // -----------------------------------------------------------------------------
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
541 547
542 static const int kFirstUsedCell = 548 static const int kFirstUsedCell =
543 (kObjectStartOffset/kPointerSize) >> MarkbitsBitmap::kBitsPerCellLog2; 549 (kObjectStartOffset/kPointerSize) >> MarkbitsBitmap::kBitsPerCellLog2;
544 550
545 static const int kLastUsedCell = 551 static const int kLastUsedCell =
546 ((kPageSize - kPointerSize)/kPointerSize) >> 552 ((kPageSize - kPointerSize)/kPointerSize) >>
547 MarkbitsBitmap::kBitsPerCellLog2; 553 MarkbitsBitmap::kBitsPerCellLog2;
548 554
549 inline void ClearGCFields(); 555 inline void ClearGCFields();
550 556
551 static inline Page* Initialize(MemoryChunk* chunk, 557 static inline Page* Initialize(Heap* heap,
558 MemoryChunk* chunk,
552 Executability executable, 559 Executability executable,
553 PagedSpace* owner); 560 PagedSpace* owner);
554 561
555 void InitializeAsAnchor(PagedSpace* owner); 562 void InitializeAsAnchor(PagedSpace* owner);
556 563
557 friend class MemoryAllocator; 564 friend class MemoryAllocator;
558 }; 565 };
559 566
560 567
561 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); 568 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
562 569
563 570
564 class LargePage : public MemoryChunk { 571 class LargePage : public MemoryChunk {
565 public: 572 public:
566 HeapObject* GetObject() { 573 HeapObject* GetObject() {
567 return HeapObject::FromAddress(body()); 574 return HeapObject::FromAddress(body());
568 } 575 }
569 576
570 inline LargePage* next_page() const { 577 inline LargePage* next_page() const {
571 return static_cast<LargePage*>(next_chunk()); 578 return static_cast<LargePage*>(next_chunk());
572 } 579 }
573 580
574 inline void set_next_page(LargePage* page) { 581 inline void set_next_page(LargePage* page) {
575 set_next_chunk(page); 582 set_next_chunk(page);
576 } 583 }
577 private: 584 private:
578 static LargePage* Initialize(MemoryChunk* chunk) { 585 static LargePage* Initialize(Heap* heap,
586 MemoryChunk* chunk) {
587 // TODO(gc) ISOLATESMERGE initialize chunk to point to heap?
579 return static_cast<LargePage*>(chunk); 588 return static_cast<LargePage*>(chunk);
580 } 589 }
581 590
582 friend class MemoryAllocator; 591 friend class MemoryAllocator;
583 }; 592 };
584 593
585 STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize); 594 STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
586 595
587 // ---------------------------------------------------------------------------- 596 // ----------------------------------------------------------------------------
588 // Space is the abstract superclass for all allocation spaces. 597 // Space is the abstract superclass for all allocation spaces.
589 class Space : public Malloced { 598 class Space : public Malloced {
590 public: 599 public:
591 Space(AllocationSpace id, Executability executable) 600 Space(Heap* heap, AllocationSpace id, Executability executable)
592 : id_(id), executable_(executable) {} 601 : heap_(heap), id_(id), executable_(executable) {}
593 602
594 virtual ~Space() {} 603 virtual ~Space() {}
595 604
605 Heap* heap() const { return heap_; }
606
596 // Does the space need executable memory? 607 // Does the space need executable memory?
597 Executability executable() { return executable_; } 608 Executability executable() { return executable_; }
598 609
599 // Identity used in error reporting. 610 // Identity used in error reporting.
600 AllocationSpace identity() { return id_; } 611 AllocationSpace identity() { return id_; }
601 612
602 // Returns allocated size. 613 // Returns allocated size.
603 virtual intptr_t Size() = 0; 614 virtual intptr_t Size() = 0;
604 615
605 // Returns size of objects. Can differ from the allocated size 616 // Returns size of objects. Can differ from the allocated size
(...skipping 12 matching lines...) Expand all
618 629
619 // After calling this we can allocate a certain number of bytes using only 630 // After calling this we can allocate a certain number of bytes using only
620 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope) 631 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
621 // without using freelists or causing a GC. This is used by partial 632 // without using freelists or causing a GC. This is used by partial
622 // snapshots. It returns true of space was reserved or false if a GC is 633 // snapshots. It returns true of space was reserved or false if a GC is
623 // needed. For paged spaces the space requested must include the space wasted 634 // needed. For paged spaces the space requested must include the space wasted
624 // at the end of each when allocating linearly. 635 // at the end of each when allocating linearly.
625 virtual bool ReserveSpace(int bytes) = 0; 636 virtual bool ReserveSpace(int bytes) = 0;
626 637
627 private: 638 private:
639 Heap* heap_;
628 AllocationSpace id_; 640 AllocationSpace id_;
629 Executability executable_; 641 Executability executable_;
630 }; 642 };
631 643
632 644
633 // ---------------------------------------------------------------------------- 645 // ----------------------------------------------------------------------------
634 // All heap objects containing executable code (code objects) must be allocated 646 // All heap objects containing executable code (code objects) must be allocated
635 // from a 2 GB range of memory, so that they can call each other using 32-bit 647 // from a 2 GB range of memory, so that they can call each other using 32-bit
636 // displacements. This happens automatically on 32-bit platforms, where 32-bit 648 // displacements. This happens automatically on 32-bit platforms, where 32-bit
637 // displacements cover the entire 4GB virtual address space. On 64-bit 649 // displacements cover the entire 4GB virtual address space. On 64-bit
638 // platforms, we support this using the CodeRange object, which reserves and 650 // platforms, we support this using the CodeRange object, which reserves and
639 // manages a range of virtual memory. 651 // manages a range of virtual memory.
640 class CodeRange : public AllStatic { 652 class CodeRange {
641 public: 653 public:
642 // Reserves a range of virtual memory, but does not commit any of it. 654 // Reserves a range of virtual memory, but does not commit any of it.
643 // Can only be called once, at heap initialization time. 655 // Can only be called once, at heap initialization time.
644 // Returns false on failure. 656 // Returns false on failure.
645 static bool Setup(const size_t requested_size); 657 bool Setup(const size_t requested_size);
646 658
647 // Frees the range of virtual memory, and frees the data structures used to 659 // Frees the range of virtual memory, and frees the data structures used to
648 // manage it. 660 // manage it.
649 static void TearDown(); 661 void TearDown();
650 662
651 static bool exists() { return code_range_ != NULL; } 663 bool exists() { return code_range_ != NULL; }
652 static bool contains(Address address) { 664 bool contains(Address address) {
653 if (code_range_ == NULL) return false; 665 if (code_range_ == NULL) return false;
654 Address start = static_cast<Address>(code_range_->address()); 666 Address start = static_cast<Address>(code_range_->address());
655 return start <= address && address < start + code_range_->size(); 667 return start <= address && address < start + code_range_->size();
656 } 668 }
657 669
658 // Allocates a chunk of memory from the large-object portion of 670 // Allocates a chunk of memory from the large-object portion of
659 // the code range. On platforms with no separate code range, should 671 // the code range. On platforms with no separate code range, should
660 // not be called. 672 // not be called.
661 MUST_USE_RESULT static Address AllocateRawMemory(const size_t requested, 673 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
662 size_t* allocated); 674 size_t* allocated);
663 static void FreeRawMemory(Address buf, size_t length); 675 void FreeRawMemory(Address buf, size_t length);
664 676
665 private: 677 private:
678 CodeRange();
679
666 // The reserved range of virtual memory that all code objects are put in. 680 // The reserved range of virtual memory that all code objects are put in.
667 static VirtualMemory* code_range_; 681 VirtualMemory* code_range_;
668 // Plain old data class, just a struct plus a constructor. 682 // Plain old data class, just a struct plus a constructor.
669 class FreeBlock { 683 class FreeBlock {
670 public: 684 public:
671 FreeBlock(Address start_arg, size_t size_arg) 685 FreeBlock(Address start_arg, size_t size_arg)
672 : start(start_arg), size(size_arg) { 686 : start(start_arg), size(size_arg) {
673 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 687 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
674 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); 688 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
675 } 689 }
676 FreeBlock(void* start_arg, size_t size_arg) 690 FreeBlock(void* start_arg, size_t size_arg)
677 : start(static_cast<Address>(start_arg)), size(size_arg) { 691 : start(static_cast<Address>(start_arg)), size(size_arg) {
678 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 692 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
679 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); 693 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
680 } 694 }
681 695
682 Address start; 696 Address start;
683 size_t size; 697 size_t size;
684 }; 698 };
685 699
686 // Freed blocks of memory are added to the free list. When the allocation 700 // Freed blocks of memory are added to the free list. When the allocation
687 // list is exhausted, the free list is sorted and merged to make the new 701 // list is exhausted, the free list is sorted and merged to make the new
688 // allocation list. 702 // allocation list.
689 static List<FreeBlock> free_list_; 703 List<FreeBlock> free_list_;
690 // Memory is allocated from the free blocks on the allocation list. 704 // Memory is allocated from the free blocks on the allocation list.
691 // The block at current_allocation_block_index_ is the current block. 705 // The block at current_allocation_block_index_ is the current block.
692 static List<FreeBlock> allocation_list_; 706 List<FreeBlock> allocation_list_;
693 static int current_allocation_block_index_; 707 int current_allocation_block_index_;
694 708
695 // Finds a block on the allocation list that contains at least the 709 // Finds a block on the allocation list that contains at least the
696 // requested amount of memory. If none is found, sorts and merges 710 // requested amount of memory. If none is found, sorts and merges
697 // the existing free memory blocks, and searches again. 711 // the existing free memory blocks, and searches again.
698 // If none can be found, terminates V8 with FatalProcessOutOfMemory. 712 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
699 static void GetNextAllocationBlock(size_t requested); 713 void GetNextAllocationBlock(size_t requested);
700 // Compares the start addresses of two free blocks. 714 // Compares the start addresses of two free blocks.
701 static int CompareFreeBlockAddress(const FreeBlock* left, 715 static int CompareFreeBlockAddress(const FreeBlock* left,
702 const FreeBlock* right); 716 const FreeBlock* right);
717
718 friend class Isolate;
719
720 Isolate* isolate_;
721
722 DISALLOW_COPY_AND_ASSIGN(CodeRange);
703 }; 723 };
704 724
705 725
706 // ---------------------------------------------------------------------------- 726 // ----------------------------------------------------------------------------
707 // A space acquires chunks of memory from the operating system. The memory 727 // A space acquires chunks of memory from the operating system. The memory
708 // allocator allocated and deallocates pages for the paged heap spaces and large 728 // allocator allocated and deallocates pages for the paged heap spaces and large
709 // pages for large object space. 729 // pages for large object space.
710 // 730 //
711 // Each space has to manage it's own pages. 731 // Each space has to manage it's own pages.
712 // 732 //
713 class MemoryAllocator : public AllStatic { 733 class MemoryAllocator {
714 public: 734 public:
715 // Initializes its internal bookkeeping structures. 735 // Initializes its internal bookkeeping structures.
716 // Max capacity of the total space and executable memory limit. 736 // Max capacity of the total space and executable memory limit.
717 static bool Setup(intptr_t max_capacity, intptr_t capacity_executable); 737 bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
718 738
719 static void TearDown(); 739 void TearDown();
720 740
721 static Page* AllocatePage(PagedSpace* owner, Executability executable); 741 Page* AllocatePage(PagedSpace* owner, Executability executable);
722 742
723 static LargePage* AllocateLargePage(intptr_t object_size, 743 LargePage* AllocateLargePage(intptr_t object_size,
724 Executability executable, 744 Executability executable,
725 Space* owner); 745 Space* owner);
726 746
727 static void Free(MemoryChunk* chunk); 747 void Free(MemoryChunk* chunk);
728 748
729 // Returns the maximum available bytes of heaps. 749 // Returns the maximum available bytes of heaps.
730 static intptr_t Available() { 750 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
731 return capacity_ < size_ ? 0 : capacity_ - size_;
732 }
733 751
734 // Returns allocated spaces in bytes. 752 // Returns allocated spaces in bytes.
735 static intptr_t Size() { return size_; } 753 intptr_t Size() { return size_; }
736 754
737 // Returns the maximum available executable bytes of heaps. 755 // Returns the maximum available executable bytes of heaps.
738 static intptr_t AvailableExecutable() { 756 intptr_t AvailableExecutable() {
739 if (capacity_executable_ < size_executable_) return 0; 757 if (capacity_executable_ < size_executable_) return 0;
740 return capacity_executable_ - size_executable_; 758 return capacity_executable_ - size_executable_;
741 } 759 }
742 760
743 // Returns allocated executable spaces in bytes. 761 // Returns allocated executable spaces in bytes.
744 static intptr_t SizeExecutable() { return size_executable_; } 762 intptr_t SizeExecutable() { return size_executable_; }
745 763
746 // Returns maximum available bytes that the old space can have. 764 // Returns maximum available bytes that the old space can have.
747 static intptr_t MaxAvailable() { 765 intptr_t MaxAvailable() {
748 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; 766 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
749 } 767 }
750 768
751 #ifdef ENABLE_HEAP_PROTECTION 769 #ifdef ENABLE_HEAP_PROTECTION
752 // Protect/unprotect a block of memory by marking it read-only/writable. 770 // Protect/unprotect a block of memory by marking it read-only/writable.
753 static inline void Protect(Address start, size_t size); 771 inline void Protect(Address start, size_t size);
754 static inline void Unprotect(Address start, size_t size, 772 inline void Unprotect(Address start, size_t size,
755 Executability executable); 773 Executability executable);
756 774
757 // Protect/unprotect a chunk given a page in the chunk. 775 // Protect/unprotect a chunk given a page in the chunk.
758 static inline void ProtectChunkFromPage(Page* page); 776 inline void ProtectChunkFromPage(Page* page);
759 static inline void UnprotectChunkFromPage(Page* page); 777 inline void UnprotectChunkFromPage(Page* page);
760 #endif 778 #endif
761 779
762 #ifdef DEBUG 780 #ifdef DEBUG
763 // Reports statistic info of the space. 781 // Reports statistic info of the space.
764 static void ReportStatistics(); 782 void ReportStatistics();
765 #endif 783 #endif
766 784
767 static MemoryChunk* AllocateChunk(intptr_t body_size, 785 MemoryChunk* AllocateChunk(intptr_t body_size,
768 Executability executable, 786 Executability executable,
769 Space* space); 787 Space* space);
770 788
771 static Address AllocateAlignedMemory(const size_t requested, 789 Address AllocateAlignedMemory(const size_t requested,
772 size_t alignment, 790 size_t alignment,
773 Executability executable, 791 Executability executable,
774 size_t* allocated_size); 792 size_t* allocated_size);
775 793
776 static Address ReserveAlignedMemory(const size_t requested, 794 Address ReserveAlignedMemory(const size_t requested,
777 size_t alignment, 795 size_t alignment,
778 size_t* allocated_size); 796 size_t* allocated_size);
779 797
780 static void FreeMemory(Address addr, size_t size, Executability executable); 798 void FreeMemory(Address addr, size_t size, Executability executable);
781 799
782 // Commit a contiguous block of memory from the initial chunk. Assumes that 800 // Commit a contiguous block of memory from the initial chunk. Assumes that
783 // the address is not NULL, the size is greater than zero, and that the 801 // the address is not NULL, the size is greater than zero, and that the
784 // block is contained in the initial chunk. Returns true if it succeeded 802 // block is contained in the initial chunk. Returns true if it succeeded
785 // and false otherwise. 803 // and false otherwise.
786 static bool CommitBlock(Address start, size_t size, Executability executable); 804 bool CommitBlock(Address start, size_t size, Executability executable);
787 805
788 // Uncommit a contiguous block of memory [start..(start+size)[. 806 // Uncommit a contiguous block of memory [start..(start+size)[.
789 // start is not NULL, the size is greater than zero, and the 807 // start is not NULL, the size is greater than zero, and the
790 // block is contained in the initial chunk. Returns true if it succeeded 808 // block is contained in the initial chunk. Returns true if it succeeded
791 // and false otherwise. 809 // and false otherwise.
792 static bool UncommitBlock(Address start, size_t size); 810 bool UncommitBlock(Address start, size_t size);
793 811
794 // Zaps a contiguous block of memory [start..(start+size)[ thus 812 // Zaps a contiguous block of memory [start..(start+size)[ thus
795 // filling it up with a recognizable non-NULL bit pattern. 813 // filling it up with a recognizable non-NULL bit pattern.
796 static void ZapBlock(Address start, size_t size); 814 void ZapBlock(Address start, size_t size);
797 815
798 static void PerformAllocationCallback(ObjectSpace space, 816 void PerformAllocationCallback(ObjectSpace space,
799 AllocationAction action, 817 AllocationAction action,
800 size_t size); 818 size_t size);
801 819
802 static void AddMemoryAllocationCallback(MemoryAllocationCallback callback, 820 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
803 ObjectSpace space, 821 ObjectSpace space,
804 AllocationAction action); 822 AllocationAction action);
805 823
806 static void RemoveMemoryAllocationCallback( 824 void RemoveMemoryAllocationCallback(
807 MemoryAllocationCallback callback); 825 MemoryAllocationCallback callback);
808 826
809 static bool MemoryAllocationCallbackRegistered( 827 bool MemoryAllocationCallbackRegistered(
810 MemoryAllocationCallback callback); 828 MemoryAllocationCallback callback);
811 829
812 830
831 // TODO(gc) ISOLATSE
832 Isolate* isolate_;
813 833
814 private: 834 private:
835
815 // Maximum space size in bytes. 836 // Maximum space size in bytes.
816 static size_t capacity_; 837 size_t capacity_;
817 // Maximum subset of capacity_ that can be executable 838 // Maximum subset of capacity_ that can be executable
818 static size_t capacity_executable_; 839 size_t capacity_executable_;
819 840
820 // Allocated space size in bytes. 841 // Allocated space size in bytes.
821 static size_t size_; 842 size_t size_;
822 // Allocated executable space size in bytes. 843 // Allocated executable space size in bytes.
823 static size_t size_executable_; 844 size_t size_executable_;
824 845
825 struct MemoryAllocationCallbackRegistration { 846 struct MemoryAllocationCallbackRegistration {
826 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, 847 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
827 ObjectSpace space, 848 ObjectSpace space,
828 AllocationAction action) 849 AllocationAction action)
829 : callback(callback), space(space), action(action) { 850 : callback(callback), space(space), action(action) {
830 } 851 }
831 MemoryAllocationCallback callback; 852 MemoryAllocationCallback callback;
832 ObjectSpace space; 853 ObjectSpace space;
833 AllocationAction action; 854 AllocationAction action;
834 }; 855 };
856
835 // A List of callback that are triggered when memory is allocated or free'd 857 // A List of callback that are triggered when memory is allocated or free'd
836 static List<MemoryAllocationCallbackRegistration> 858 List<MemoryAllocationCallbackRegistration>
837 memory_allocation_callbacks_; 859 memory_allocation_callbacks_;
838 860
839 // Initializes pages in a chunk. Returns the first page address. 861 // Initializes pages in a chunk. Returns the first page address.
840 // This function and GetChunkId() are provided for the mark-compact 862 // This function and GetChunkId() are provided for the mark-compact
841 // collector to rebuild page headers in the from space, which is 863 // collector to rebuild page headers in the from space, which is
842 // used as a marking stack and its page headers are destroyed. 864 // used as a marking stack and its page headers are destroyed.
843 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, 865 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
844 PagedSpace* owner); 866 PagedSpace* owner);
845 }; 867 };
846 868
847 869
848 // ----------------------------------------------------------------------------- 870 // -----------------------------------------------------------------------------
849 // Interface for heap object iterator to be implemented by all object space 871 // Interface for heap object iterator to be implemented by all object space
850 // object iterators. 872 // object iterators.
851 // 873 //
852 // NOTE: The space specific object iterators also implements the own next() 874 // NOTE: The space specific object iterators also implements the own next()
853 // method which is used to avoid using virtual functions 875 // method which is used to avoid using virtual functions
854 // iterating a specific space. 876 // iterating a specific space.
(...skipping 297 matching lines...) Expand 10 before | Expand all | Expand 10 after
1152 FreeListNode* large_list_; 1174 FreeListNode* large_list_;
1153 FreeListNode* huge_list_; 1175 FreeListNode* huge_list_;
1154 1176
1155 DISALLOW_IMPLICIT_CONSTRUCTORS(OldSpaceFreeList); 1177 DISALLOW_IMPLICIT_CONSTRUCTORS(OldSpaceFreeList);
1156 }; 1178 };
1157 1179
1158 1180
1159 class PagedSpace : public Space { 1181 class PagedSpace : public Space {
1160 public: 1182 public:
1161 // Creates a space with a maximum capacity, and an id. 1183 // Creates a space with a maximum capacity, and an id.
1162 PagedSpace(intptr_t max_capacity, 1184 PagedSpace(Heap* heap,
1185 intptr_t max_capacity,
1163 AllocationSpace id, 1186 AllocationSpace id,
1164 Executability executable); 1187 Executability executable);
1165 1188
1166 virtual ~PagedSpace() {} 1189 virtual ~PagedSpace() {}
1167 1190
1168 // Set up the space using the given address range of virtual memory (from 1191 // Set up the space using the given address range of virtual memory (from
1169 // the memory allocator's initial chunk) if possible. If the block of 1192 // the memory allocator's initial chunk) if possible. If the block of
1170 // addresses is not big enough to contain a single page-aligned page, a 1193 // addresses is not big enough to contain a single page-aligned page, a
1171 // fresh chunk will be allocated. 1194 // fresh chunk will be allocated.
1172 bool Setup(); 1195 bool Setup();
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after
1390 // ----------------------------------------------------------------------------- 1413 // -----------------------------------------------------------------------------
1391 // SemiSpace in young generation 1414 // SemiSpace in young generation
1392 // 1415 //
1393 // A semispace is a contiguous chunk of memory. The mark-compact collector 1416 // A semispace is a contiguous chunk of memory. The mark-compact collector
1394 // uses the memory in the from space as a marking stack when tracing live 1417 // uses the memory in the from space as a marking stack when tracing live
1395 // objects. 1418 // objects.
1396 1419
1397 class SemiSpace : public Space { 1420 class SemiSpace : public Space {
1398 public: 1421 public:
1399 // Constructor. 1422 // Constructor.
1400 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) { 1423 explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
1401 start_ = NULL; 1424 start_ = NULL;
1402 age_mark_ = NULL; 1425 age_mark_ = NULL;
1403 } 1426 }
1404 1427
1405 // Sets up the semispace using the given chunk. 1428 // Sets up the semispace using the given chunk.
1406 bool Setup(Address start, int initial_capacity, int maximum_capacity); 1429 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1407 1430
1408 // Tear down the space. Heap memory was not allocated by the space, so it 1431 // Tear down the space. Heap memory was not allocated by the space, so it
1409 // is not deallocated here. 1432 // is not deallocated here.
1410 void TearDown(); 1433 void TearDown();
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
1534 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); 1557 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1535 1558
1536 current_ += size; 1559 current_ += size;
1537 return object; 1560 return object;
1538 } 1561 }
1539 1562
1540 // Implementation of the ObjectIterator functions. 1563 // Implementation of the ObjectIterator functions.
1541 virtual HeapObject* next_object() { return next(); } 1564 virtual HeapObject* next_object() { return next(); }
1542 1565
1543 private: 1566 private:
1544 void Initialize(NewSpace* space, Address start, Address end, 1567 void Initialize(NewSpace* space,
1568 Address start,
1569 Address end,
1545 HeapObjectCallback size_func); 1570 HeapObjectCallback size_func);
1546 1571
1547 // The semispace. 1572 // The semispace.
1548 SemiSpace* space_; 1573 SemiSpace* space_;
1549 // The current iteration point. 1574 // The current iteration point.
1550 Address current_; 1575 Address current_;
1551 // The end of iteration. 1576 // The end of iteration.
1552 Address limit_; 1577 Address limit_;
1553 // The callback function. 1578 // The callback function.
1554 HeapObjectCallback size_func_; 1579 HeapObjectCallback size_func_;
1555 }; 1580 };
1556 1581
1557 1582
1558 // ----------------------------------------------------------------------------- 1583 // -----------------------------------------------------------------------------
1559 // The young generation space. 1584 // The young generation space.
1560 // 1585 //
1561 // The new space consists of a contiguous pair of semispaces. It simply 1586 // The new space consists of a contiguous pair of semispaces. It simply
1562 // forwards most functions to the appropriate semispace. 1587 // forwards most functions to the appropriate semispace.
1563 1588
1564 class NewSpace : public Space { 1589 class NewSpace : public Space {
1565 public: 1590 public:
1566 // Constructor. 1591 // Constructor.
1567 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {} 1592 explicit NewSpace(Heap* heap)
1593 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1594 to_space_(heap),
1595 from_space_(heap) {}
1568 1596
1569 // Sets up the new space using the given chunk. 1597 // Sets up the new space using the given chunk.
1570 bool Setup(int max_semispace_size); 1598 bool Setup(int max_semispace_size);
1571 1599
1572 // Tears down the space. Heap memory was not allocated by the space, so it 1600 // Tears down the space. Heap memory was not allocated by the space, so it
1573 // is not deallocated here. 1601 // is not deallocated here.
1574 void TearDown(); 1602 void TearDown();
1575 1603
1576 // True if the space has been set up but not torn down. 1604 // True if the space has been set up but not torn down.
1577 bool HasBeenSetup() { 1605 bool HasBeenSetup() {
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
1790 }; 1818 };
1791 1819
1792 1820
1793 // ----------------------------------------------------------------------------- 1821 // -----------------------------------------------------------------------------
1794 // Old object space (excluding map objects) 1822 // Old object space (excluding map objects)
1795 1823
1796 class OldSpace : public PagedSpace { 1824 class OldSpace : public PagedSpace {
1797 public: 1825 public:
1798 // Creates an old space object with a given maximum capacity. 1826 // Creates an old space object with a given maximum capacity.
1799 // The constructor does not allocate pages from OS. 1827 // The constructor does not allocate pages from OS.
1800 explicit OldSpace(intptr_t max_capacity, 1828 explicit OldSpace(Heap* heap,
1829 intptr_t max_capacity,
1801 AllocationSpace id, 1830 AllocationSpace id,
1802 Executability executable) 1831 Executability executable)
1803 : PagedSpace(max_capacity, id, executable) { 1832 : PagedSpace(heap, max_capacity, id, executable) {
1804 page_extra_ = 0; 1833 page_extra_ = 0;
1805 } 1834 }
1806 1835
1807 // The limit of allocation for a page in this space. 1836 // The limit of allocation for a page in this space.
1808 virtual Address PageAllocationLimit(Page* page) { 1837 virtual Address PageAllocationLimit(Page* page) {
1809 return page->ObjectAreaEnd(); 1838 return page->ObjectAreaEnd();
1810 } 1839 }
1811 1840
1812 // Prepare for full garbage collection. Resets the relocation pointer and 1841 // Prepare for full garbage collection. Resets the relocation pointer and
1813 // clears the free list. 1842 // clears the free list.
(...skipping 10 matching lines...) Expand all
1824 ASSERT((space).low() <= (info).top \ 1853 ASSERT((space).low() <= (info).top \
1825 && (info).top <= (space).high() \ 1854 && (info).top <= (space).high() \
1826 && (info).limit <= (space).high()) 1855 && (info).limit <= (space).high())
1827 1856
1828 1857
1829 // ----------------------------------------------------------------------------- 1858 // -----------------------------------------------------------------------------
1830 // Old space for objects of a fixed size 1859 // Old space for objects of a fixed size
1831 1860
1832 class FixedSpace : public PagedSpace { 1861 class FixedSpace : public PagedSpace {
1833 public: 1862 public:
1834 FixedSpace(intptr_t max_capacity, 1863 FixedSpace(Heap* heap,
1864 intptr_t max_capacity,
1835 AllocationSpace id, 1865 AllocationSpace id,
1836 int object_size_in_bytes, 1866 int object_size_in_bytes,
1837 const char* name) 1867 const char* name)
1838 : PagedSpace(max_capacity, id, NOT_EXECUTABLE), 1868 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
1839 object_size_in_bytes_(object_size_in_bytes), 1869 object_size_in_bytes_(object_size_in_bytes),
1840 name_(name) { 1870 name_(name) {
1841 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; 1871 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1842 } 1872 }
1843 1873
1844 // The limit of allocation for a page in this space. 1874 // The limit of allocation for a page in this space.
1845 virtual Address PageAllocationLimit(Page* page) { 1875 virtual Address PageAllocationLimit(Page* page) {
1846 return page->ObjectAreaEnd() - page_extra_; 1876 return page->ObjectAreaEnd() - page_extra_;
1847 } 1877 }
1848 1878
(...skipping 17 matching lines...) Expand all
1866 const char* name_; 1896 const char* name_;
1867 }; 1897 };
1868 1898
1869 1899
1870 // ----------------------------------------------------------------------------- 1900 // -----------------------------------------------------------------------------
1871 // Old space for all map objects 1901 // Old space for all map objects
1872 1902
1873 class MapSpace : public FixedSpace { 1903 class MapSpace : public FixedSpace {
1874 public: 1904 public:
1875 // Creates a map space object with a maximum capacity. 1905 // Creates a map space object with a maximum capacity.
1876 MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id) 1906 MapSpace(Heap* heap,
1877 : FixedSpace(max_capacity, id, Map::kSize, "map"), 1907 intptr_t max_capacity,
1908 int max_map_space_pages,
1909 AllocationSpace id)
1910 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
1878 max_map_space_pages_(max_map_space_pages) { 1911 max_map_space_pages_(max_map_space_pages) {
1879 } 1912 }
1880 1913
1881 // Prepares for a mark-compact GC. 1914 // Prepares for a mark-compact GC.
1882 virtual void PrepareForMarkCompact(bool will_compact); 1915 virtual void PrepareForMarkCompact(bool will_compact);
1883 1916
1884 // Given an index, returns the page address. 1917 // Given an index, returns the page address.
1885 // TODO(gc): this limit is artifical just to keep code compilable 1918 // TODO(gc): this limit is artifical just to keep code compilable
1886 static const int kMaxMapPageIndex = 1 << 16; 1919 static const int kMaxMapPageIndex = 1 << 16;
1887 1920
(...skipping 27 matching lines...) Expand all
1915 TRACK_MEMORY("MapSpace") 1948 TRACK_MEMORY("MapSpace")
1916 }; 1949 };
1917 1950
1918 1951
1919 // ----------------------------------------------------------------------------- 1952 // -----------------------------------------------------------------------------
1920 // Old space for all global object property cell objects 1953 // Old space for all global object property cell objects
1921 1954
1922 class CellSpace : public FixedSpace { 1955 class CellSpace : public FixedSpace {
1923 public: 1956 public:
1924 // Creates a property cell space object with a maximum capacity. 1957 // Creates a property cell space object with a maximum capacity.
1925 CellSpace(intptr_t max_capacity, AllocationSpace id) 1958 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
1926 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {} 1959 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
1960 {}
1927 1961
1928 protected: 1962 protected:
1929 #ifdef DEBUG 1963 #ifdef DEBUG
1930 virtual void VerifyObject(HeapObject* obj); 1964 virtual void VerifyObject(HeapObject* obj);
1931 #endif 1965 #endif
1932 1966
1933 public: 1967 public:
1934 TRACK_MEMORY("CellSpace") 1968 TRACK_MEMORY("CellSpace")
1935 }; 1969 };
1936 1970
1937 1971
1938 // ----------------------------------------------------------------------------- 1972 // -----------------------------------------------------------------------------
1939 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by 1973 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1940 // the large object space. A large object is allocated from OS heap with 1974 // the large object space. A large object is allocated from OS heap with
1941 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). 1975 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1942 // A large object always starts at Page::kObjectStartOffset to a page. 1976 // A large object always starts at Page::kObjectStartOffset to a page.
1943 // Large objects do not move during garbage collections. 1977 // Large objects do not move during garbage collections.
1944 1978
1945 class LargeObjectSpace : public Space { 1979 class LargeObjectSpace : public Space {
1946 public: 1980 public:
1947 explicit LargeObjectSpace(AllocationSpace id); 1981 LargeObjectSpace(Heap* heap, AllocationSpace id);
1948 virtual ~LargeObjectSpace() {} 1982 virtual ~LargeObjectSpace() {}
1949 1983
1950 // Initializes internal data structures. 1984 // Initializes internal data structures.
1951 bool Setup(); 1985 bool Setup();
1952 1986
1953 // Releases internal resources, frees objects in this space. 1987 // Releases internal resources, frees objects in this space.
1954 void TearDown(); 1988 void TearDown();
1955 1989
1956 // Allocates a (non-FixedArray, non-Code) large object. 1990 // Allocates a (non-FixedArray, non-Code) large object.
1957 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes); 1991 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
1958 // Allocates a large Code object. 1992 // Allocates a large Code object.
1959 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes); 1993 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
1960 // Allocates a large FixedArray. 1994 // Allocates a large FixedArray.
1961 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes); 1995 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
1962 1996
1963 static intptr_t ObjectSizeFor(intptr_t chunk_size) { 1997 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
1964 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; 1998 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
1965 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; 1999 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
1966 } 2000 }
1967 2001
1968 // Available bytes for objects in this space. 2002 // Available bytes for objects in this space.
1969 intptr_t Available() { 2003 inline intptr_t Available();
1970 return ObjectSizeFor(MemoryAllocator::Available());
1971 }
1972 2004
1973 virtual intptr_t Size() { 2005 virtual intptr_t Size() {
1974 return size_; 2006 return size_;
1975 } 2007 }
1976 2008
1977 virtual intptr_t SizeOfObjects() { 2009 virtual intptr_t SizeOfObjects() {
1978 return objects_size_; 2010 return objects_size_;
1979 } 2011 }
1980 2012
1981 int PageCount() { 2013 int PageCount() {
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
2113 kLargeObjectState, 2145 kLargeObjectState,
2114 kFinishedState 2146 kFinishedState
2115 }; 2147 };
2116 State state_; 2148 State state_;
2117 PageIterator old_pointer_iterator_; 2149 PageIterator old_pointer_iterator_;
2118 PageIterator map_iterator_; 2150 PageIterator map_iterator_;
2119 LargeObjectIterator lo_iterator_; 2151 LargeObjectIterator lo_iterator_;
2120 }; 2152 };
2121 2153
2122 2154
2155 #ifdef DEBUG
2156 struct CommentStatistic {
2157 const char* comment;
2158 int size;
2159 int count;
2160 void Clear() {
2161 comment = NULL;
2162 size = 0;
2163 count = 0;
2164 }
2165 // Must be small, since an iteration is used for lookup.
2166 static const int kMaxComments = 64;
2167 };
2168 #endif
2169
2170
2123 } } // namespace v8::internal 2171 } } // namespace v8::internal
2124 2172
2125 #endif // V8_SPACES_H_ 2173 #endif // V8_SPACES_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698