Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/spaces.h

Issue 9179012: Reduce boot-up memory use of V8. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 8 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/snapshot.h ('k') | src/spaces.cc » ('j') | src/spaces.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after
495 static const int kBodyOffset = 495 static const int kBodyOffset =
496 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); 496 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
497 497
498 // The start offset of the object area in a page. Aligned to both maps and 498 // The start offset of the object area in a page. Aligned to both maps and
499 // code alignment to be suitable for both. Also aligned to 32 words because 499 // code alignment to be suitable for both. Also aligned to 32 words because
500 // the marking bitmap is arranged in 32 bit chunks. 500 // the marking bitmap is arranged in 32 bit chunks.
501 static const int kObjectStartAlignment = 32 * kPointerSize; 501 static const int kObjectStartAlignment = 32 * kPointerSize;
502 static const int kObjectStartOffset = kBodyOffset - 1 + 502 static const int kObjectStartOffset = kBodyOffset - 1 +
503 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); 503 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
504 504
505 size_t size() const { return size_; } 505 intptr_t size() const { return size_; }
506 506
507 void set_size(size_t size) { 507 void set_size(size_t size) { size_ = size; }
508 size_ = size;
509 }
510 508
511 Executability executable() { 509 Executability executable() {
512 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; 510 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
513 } 511 }
514 512
515 bool ContainsOnlyData() { 513 bool ContainsOnlyData() {
516 return IsFlagSet(CONTAINS_ONLY_DATA); 514 return IsFlagSet(CONTAINS_ONLY_DATA);
517 } 515 }
518 516
519 bool InNewSpace() { 517 bool InNewSpace() {
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
651 // Returns the next page in the chain of pages owned by a space. 649 // Returns the next page in the chain of pages owned by a space.
652 inline Page* next_page(); 650 inline Page* next_page();
653 inline Page* prev_page(); 651 inline Page* prev_page();
654 inline void set_next_page(Page* page); 652 inline void set_next_page(Page* page);
655 inline void set_prev_page(Page* page); 653 inline void set_prev_page(Page* page);
656 654
657 // Returns the start address of the object area in this page. 655 // Returns the start address of the object area in this page.
658 Address ObjectAreaStart() { return address() + kObjectStartOffset; } 656 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
659 657
660 // Returns the end address (exclusive) of the object area in this page. 658 // Returns the end address (exclusive) of the object area in this page.
661 Address ObjectAreaEnd() { return address() + Page::kPageSize; } 659 Address ObjectAreaEnd() { return address() + size(); }
662 660
663 // Checks whether an address is page aligned. 661 // Checks whether an address is page aligned.
664 static bool IsAlignedToPageSize(Address a) { 662 static bool IsAlignedToPageSize(Address a) {
665 return 0 == (OffsetFrom(a) & kPageAlignmentMask); 663 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
666 } 664 }
667 665
668 // Returns the offset of a given address to this page. 666 // Returns the offset of a given address to this page.
669 INLINE(int Offset(Address a)) { 667 INLINE(int Offset(Address a)) {
670 int offset = static_cast<int>(a - address()); 668 int offset = static_cast<int>(a - address());
671 return offset; 669 return offset;
672 } 670 }
673 671
674 // Returns the address for a given offset to the this page. 672 // Returns the address for a given offset to the this page.
675 Address OffsetToAddress(int offset) { 673 Address OffsetToAddress(int offset) {
676 ASSERT_PAGE_OFFSET(offset); 674 ASSERT_PAGE_OFFSET(offset);
677 return address() + offset; 675 return address() + offset;
678 } 676 }
679 677
678 // Expand the committed area for pages that are small. This
679 // happens primarily when the VM is newly booted.
680 void CommitMore(intptr_t space_needed);
681
680 // --------------------------------------------------------------------- 682 // ---------------------------------------------------------------------
681 683
682 // Page size in bytes. This must be a multiple of the OS page size. 684 // Page size in bytes. This must be a multiple of the OS page size.
683 static const int kPageSize = 1 << kPageSizeBits; 685 static const int kPageSize = 1 << kPageSizeBits;
684 686
685 // Page size mask. 687 // Page size mask.
686 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; 688 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
687 689
688 // Object area size in bytes. 690 // Object area size in bytes.
689 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; 691 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
839 Isolate* isolate_; 841 Isolate* isolate_;
840 842
841 // The reserved range of virtual memory that all code objects are put in. 843 // The reserved range of virtual memory that all code objects are put in.
842 VirtualMemory* code_range_; 844 VirtualMemory* code_range_;
843 // Plain old data class, just a struct plus a constructor. 845 // Plain old data class, just a struct plus a constructor.
844 class FreeBlock { 846 class FreeBlock {
845 public: 847 public:
846 FreeBlock(Address start_arg, size_t size_arg) 848 FreeBlock(Address start_arg, size_t size_arg)
847 : start(start_arg), size(size_arg) { 849 : start(start_arg), size(size_arg) {
848 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 850 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
849 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
850 } 851 }
851 FreeBlock(void* start_arg, size_t size_arg) 852 FreeBlock(void* start_arg, size_t size_arg)
852 : start(static_cast<Address>(start_arg)), size(size_arg) { 853 : start(static_cast<Address>(start_arg)), size(size_arg) {
853 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); 854 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
854 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
855 } 855 }
856 856
857 Address start; 857 Address start;
858 size_t size; 858 size_t size;
859 }; 859 };
860 860
861 // Freed blocks of memory are added to the free list. When the allocation 861 // Freed blocks of memory are added to the free list. When the allocation
862 // list is exhausted, the free list is sorted and merged to make the new 862 // list is exhausted, the free list is sorted and merged to make the new
863 // allocation list. 863 // allocation list.
864 List<FreeBlock> free_list_; 864 List<FreeBlock> free_list_;
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
940 class MemoryAllocator { 940 class MemoryAllocator {
941 public: 941 public:
942 explicit MemoryAllocator(Isolate* isolate); 942 explicit MemoryAllocator(Isolate* isolate);
943 943
944 // Initializes its internal bookkeeping structures. 944 // Initializes its internal bookkeeping structures.
945 // Max capacity of the total space and executable memory limit. 945 // Max capacity of the total space and executable memory limit.
946 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); 946 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
947 947
948 void TearDown(); 948 void TearDown();
949 949
950 Page* AllocatePage(PagedSpace* owner, Executability executable); 950 Page* AllocatePage(intptr_t object_area_size,
951 PagedSpace* owner,
952 Executability executable);
951 953
952 LargePage* AllocateLargePage(intptr_t object_size, 954 LargePage* AllocateLargePage(intptr_t object_size,
953 Executability executable, 955 Executability executable,
954 Space* owner); 956 Space* owner);
955 957
956 void Free(MemoryChunk* chunk); 958 void Free(MemoryChunk* chunk);
957 959
958 // Returns the maximum available bytes of heaps. 960 // Returns the maximum available bytes of heaps.
959 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } 961 intptr_t Available() {
962 return capacity_ < memory_allocator_reserved_ ?
963 0 :
964 capacity_ - memory_allocator_reserved_;
965 }
960 966
961 // Returns allocated spaces in bytes. 967 // Returns allocated spaces in bytes.
962 intptr_t Size() { return size_; } 968 intptr_t Size() { return memory_allocator_reserved_; }
963 969
964 // Returns the maximum available executable bytes of heaps. 970 // Returns the maximum available executable bytes of heaps.
965 intptr_t AvailableExecutable() { 971 intptr_t AvailableExecutable() {
966 if (capacity_executable_ < size_executable_) return 0; 972 if (capacity_executable_ < size_executable_) return 0;
967 return capacity_executable_ - size_executable_; 973 return capacity_executable_ - size_executable_;
968 } 974 }
969 975
970 // Returns allocated executable spaces in bytes. 976 // Returns allocated executable spaces in bytes.
971 intptr_t SizeExecutable() { return size_executable_; } 977 intptr_t SizeExecutable() { return size_executable_; }
972 978
973 // Returns maximum available bytes that the old space can have. 979 // Returns maximum available bytes that the old space can have.
974 intptr_t MaxAvailable() { 980 intptr_t MaxAvailable() {
975 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; 981 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
976 } 982 }
977 983
978 #ifdef DEBUG 984 #ifdef DEBUG
979 // Reports statistic info of the space. 985 // Reports statistic info of the space.
980 void ReportStatistics(); 986 void ReportStatistics();
981 #endif 987 #endif
982 988
983 MemoryChunk* AllocateChunk(intptr_t body_size, 989 MemoryChunk* AllocateChunk(intptr_t body_size,
990 intptr_t committed_body_size,
984 Executability executable, 991 Executability executable,
985 Space* space); 992 Space* space);
986 993
987 Address ReserveAlignedMemory(size_t requested, 994 Address ReserveAlignedMemory(size_t requested,
988 size_t alignment, 995 size_t alignment,
989 VirtualMemory* controller); 996 VirtualMemory* controller);
990 Address AllocateAlignedMemory(size_t requested, 997 Address AllocateAlignedMemory(size_t requested,
998 size_t committed,
991 size_t alignment, 999 size_t alignment,
992 Executability executable, 1000 Executability executable,
993 VirtualMemory* controller); 1001 VirtualMemory* controller);
994 1002
995 void FreeMemory(VirtualMemory* reservation, Executability executable); 1003 void FreeMemory(VirtualMemory* reservation, Executability executable);
996 void FreeMemory(Address addr, size_t size, Executability executable); 1004 void FreeMemory(Address addr, size_t size, Executability executable);
997 1005
998 // Commit a contiguous block of memory from the initial chunk. Assumes that 1006 // Commit a contiguous block of memory from the initial chunk. Assumes that
999 // the address is not NULL, the size is greater than zero, and that the 1007 // the address is not NULL, the size is greater than zero, and that the
1000 // block is contained in the initial chunk. Returns true if it succeeded 1008 // block is contained in the initial chunk. Returns true if it succeeded
1001 // and false otherwise. 1009 // and false otherwise.
1002 bool CommitBlock(Address start, size_t size, Executability executable); 1010 bool CommitBlock(Address start, size_t size, Executability executable);
1003 1011
1004 // Uncommit a contiguous block of memory [start..(start+size)[. 1012 // Uncommit a contiguous block of memory [start..(start+size)[.
1005 // start is not NULL, the size is greater than zero, and the 1013 // start is not NULL, the size is greater than zero, and the
1006 // block is contained in the initial chunk. Returns true if it succeeded 1014 // block is contained in the initial chunk. Returns true if it succeeded
1007 // and false otherwise. 1015 // and false otherwise.
1008 bool UncommitBlock(Address start, size_t size); 1016 bool UncommitBlock(Address start, size_t size);
1009 1017
1018 void AllocationBookkeeping(Space* owner,
1019 Address base,
1020 intptr_t reserved_size,
1021 intptr_t committed_size,
1022 Executability executable);
1023
1010 // Zaps a contiguous block of memory [start..(start+size)[ thus 1024 // Zaps a contiguous block of memory [start..(start+size)[ thus
1011 // filling it up with a recognizable non-NULL bit pattern. 1025 // filling it up with a recognizable non-NULL bit pattern.
1012 void ZapBlock(Address start, size_t size); 1026 void ZapBlock(Address start, size_t size);
1013 1027
1014 void PerformAllocationCallback(ObjectSpace space, 1028 void PerformAllocationCallback(ObjectSpace space,
1015 AllocationAction action, 1029 AllocationAction action,
1016 size_t size); 1030 size_t size);
1017 1031
1018 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, 1032 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1019 ObjectSpace space, 1033 ObjectSpace space,
1020 AllocationAction action); 1034 AllocationAction action);
1021 1035
1022 void RemoveMemoryAllocationCallback( 1036 void RemoveMemoryAllocationCallback(
1023 MemoryAllocationCallback callback); 1037 MemoryAllocationCallback callback);
1024 1038
1025 bool MemoryAllocationCallbackRegistered( 1039 bool MemoryAllocationCallbackRegistered(
1026 MemoryAllocationCallback callback); 1040 MemoryAllocationCallback callback);
1027 1041
1028 private: 1042 private:
1029 Isolate* isolate_; 1043 Isolate* isolate_;
1030 1044
1031 // Maximum space size in bytes. 1045 // Maximum space size in bytes.
1032 size_t capacity_; 1046 size_t capacity_;
1033 // Maximum subset of capacity_ that can be executable 1047 // Maximum subset of capacity_ that can be executable
1034 size_t capacity_executable_; 1048 size_t capacity_executable_;
1035 1049
1036 // Allocated space size in bytes. 1050 // Allocated space size in bytes.
1037 size_t size_; 1051 size_t memory_allocator_reserved_;
1038 // Allocated executable space size in bytes. 1052 // Allocated executable space size in bytes.
1039 size_t size_executable_; 1053 size_t size_executable_;
1040 1054
1041 struct MemoryAllocationCallbackRegistration { 1055 struct MemoryAllocationCallbackRegistration {
1042 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, 1056 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1043 ObjectSpace space, 1057 ObjectSpace space,
1044 AllocationAction action) 1058 AllocationAction action)
1045 : callback(callback), space(space), action(action) { 1059 : callback(callback), space(space), action(action) {
1046 } 1060 }
1047 MemoryAllocationCallback callback; 1061 MemoryAllocationCallback callback;
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after
1372 1386
1373 void CountFreeListItems(Page* p, SizeStats* sizes); 1387 void CountFreeListItems(Page* p, SizeStats* sizes);
1374 1388
1375 intptr_t EvictFreeListItems(Page* p); 1389 intptr_t EvictFreeListItems(Page* p);
1376 1390
1377 private: 1391 private:
1378 // The size range of blocks, in bytes. 1392 // The size range of blocks, in bytes.
1379 static const int kMinBlockSize = 3 * kPointerSize; 1393 static const int kMinBlockSize = 3 * kPointerSize;
1380 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; 1394 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1381 1395
1382 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); 1396 FreeListNode* PickNodeFromList(FreeListNode** list,
1397 int* node_size,
1398 int minimum_size);
1383 1399
1384 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); 1400 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit);
1401 FreeListNode* FindAbuttingNode(int size_in_bytes,
1402 int* node_size,
1403 Address limit,
1404 FreeListNode** list_head);
1385 1405
1386 PagedSpace* owner_; 1406 PagedSpace* owner_;
1387 Heap* heap_; 1407 Heap* heap_;
1388 1408
1389 // Total available bytes in all blocks on this free list. 1409 // Total available bytes in all blocks on this free list.
1390 int available_; 1410 int available_;
1391 1411
1392 static const int kSmallListMin = 0x20 * kPointerSize; 1412 static const int kSmallListMin = 0x20 * kPointerSize;
1393 static const int kSmallListMax = 0xff * kPointerSize; 1413 static const int kSmallListMax = 0xff * kPointerSize;
1394 static const int kMediumListMax = 0x7ff * kPointerSize; 1414 static const int kMediumListMax = 0x7ff * kPointerSize;
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1474 virtual intptr_t SizeOfObjects() { 1494 virtual intptr_t SizeOfObjects() {
1475 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); 1495 ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
1476 return Size() - unswept_free_bytes_ - (limit() - top()); 1496 return Size() - unswept_free_bytes_ - (limit() - top());
1477 } 1497 }
1478 1498
1479 // Wasted bytes in this space. These are just the bytes that were thrown away 1499 // Wasted bytes in this space. These are just the bytes that were thrown away
1480 // due to being too small to use for allocation. They do not include the 1500 // due to being too small to use for allocation. They do not include the
1481 // free bytes that were not found at all due to lazy sweeping. 1501 // free bytes that were not found at all due to lazy sweeping.
1482 virtual intptr_t Waste() { return accounting_stats_.Waste(); } 1502 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1483 1503
1504 virtual int ObjectAlignment() { return kPointerSize; }
Vyacheslav Egorov (Chromium) 2012/01/16 17:02:54 kObjectAlignment instead of kPointerSize?
Erik Corry 2012/01/17 11:37:22 Done.
1505
1484 // Returns the allocation pointer in this space. 1506 // Returns the allocation pointer in this space.
1485 Address top() { return allocation_info_.top; } 1507 Address top() { return allocation_info_.top; }
1486 Address limit() { return allocation_info_.limit; } 1508 Address limit() { return allocation_info_.limit; }
1487 1509
1488 // Allocate the requested number of bytes in the space if possible, return a 1510 // Allocate the requested number of bytes in the space if possible, return a
1489 // failure object if not. 1511 // failure object if not.
1490 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); 1512 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1491 1513
1492 virtual bool ReserveSpace(int bytes); 1514 virtual bool ReserveSpace(int bytes);
1493 1515
1494 // Give a block of memory to the space's free list. It might be added to 1516 // Give a block of memory to the space's free list. It might be added to
1495 // the free list or accounted as waste. 1517 // the free list or accounted as waste.
1496 // If add_to_freelist is false then just accounting stats are updated and 1518 // If add_to_freelist is false then just accounting stats are updated and
1497 // no attempt to add area to free list is made. 1519 // no attempt to add area to free list is made.
1498 int Free(Address start, int size_in_bytes) { 1520 int AddToFreeLists(Address start, int size_in_bytes) {
1499 int wasted = free_list_.Free(start, size_in_bytes); 1521 int wasted = free_list_.Free(start, size_in_bytes);
1500 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); 1522 accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1501 return size_in_bytes - wasted; 1523 return size_in_bytes - wasted;
1502 } 1524 }
1503 1525
1504 // Set space allocation info. 1526 // Set space allocation info.
1505 void SetTop(Address top, Address limit) { 1527 void SetTop(Address top, Address limit) {
1528 ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart());
1506 ASSERT(top == limit || 1529 ASSERT(top == limit ||
1507 Page::FromAddress(top) == Page::FromAddress(limit - 1)); 1530 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1508 allocation_info_.top = top; 1531 allocation_info_.top = top;
1509 allocation_info_.limit = limit; 1532 allocation_info_.limit = limit;
1510 } 1533 }
1511 1534
1512 void Allocate(int bytes) { 1535 void Allocate(int bytes) {
1513 accounting_stats_.AllocateBytes(bytes); 1536 accounting_stats_.AllocateBytes(bytes);
1514 } 1537 }
1515 1538
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1566 void MarkPageForLazySweeping(Page* p) { 1589 void MarkPageForLazySweeping(Page* p) {
1567 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes()); 1590 unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
1568 } 1591 }
1569 1592
1570 bool AdvanceSweeper(intptr_t bytes_to_sweep); 1593 bool AdvanceSweeper(intptr_t bytes_to_sweep);
1571 1594
1572 bool IsSweepingComplete() { 1595 bool IsSweepingComplete() {
1573 return !first_unswept_page_->is_valid(); 1596 return !first_unswept_page_->is_valid();
1574 } 1597 }
1575 1598
1599 inline bool HasAPage() { return anchor_.next_page() != &anchor_; }
1576 Page* FirstPage() { return anchor_.next_page(); } 1600 Page* FirstPage() { return anchor_.next_page(); }
1577 Page* LastPage() { return anchor_.prev_page(); } 1601 Page* LastPage() { return anchor_.prev_page(); }
1578 1602
1579 // Returns zero for pages that have so little fragmentation that it is not 1603 // Returns zero for pages that have so little fragmentation that it is not
1580 // worth defragmenting them. Otherwise a positive integer that gives an 1604 // worth defragmenting them. Otherwise a positive integer that gives an
1581 // estimate of fragmentation on an arbitrary scale. 1605 // estimate of fragmentation on an arbitrary scale.
1582 int Fragmentation(Page* p) { 1606 int Fragmentation(Page* p) {
1583 FreeList::SizeStats sizes; 1607 FreeList::SizeStats sizes;
1584 free_list_.CountFreeListItems(p, &sizes); 1608 free_list_.CountFreeListItems(p, &sizes);
1585 1609
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1638 1662
1639 // The dummy page that anchors the double linked list of pages. 1663 // The dummy page that anchors the double linked list of pages.
1640 Page anchor_; 1664 Page anchor_;
1641 1665
1642 // The space's free list. 1666 // The space's free list.
1643 FreeList free_list_; 1667 FreeList free_list_;
1644 1668
1645 // Normal allocation information. 1669 // Normal allocation information.
1646 AllocationInfo allocation_info_; 1670 AllocationInfo allocation_info_;
1647 1671
1648 // Bytes of each page that cannot be allocated. Possibly non-zero
1649 // for pages in spaces with only fixed-size objects. Always zero
1650 // for pages in spaces with variable sized objects (those pages are
1651 // padded with free-list nodes).
1652 int page_extra_;
1653
1654 bool was_swept_conservatively_; 1672 bool was_swept_conservatively_;
1655 1673
1656 // The first page to be swept when the lazy sweeper advances. Is set 1674 // The first page to be swept when the lazy sweeper advances. Is set
1657 // to NULL when all pages have been swept. 1675 // to NULL when all pages have been swept.
1658 Page* first_unswept_page_; 1676 Page* first_unswept_page_;
1659 1677
1660 // The number of free bytes which could be reclaimed by advancing the 1678 // The number of free bytes which could be reclaimed by advancing the
1661 // lazy sweeper. This is only an estimation because lazy sweeping is 1679 // lazy sweeper. This is only an estimation because lazy sweeping is
1662 // done conservatively. 1680 // done conservatively.
1663 intptr_t unswept_free_bytes_; 1681 intptr_t unswept_free_bytes_;
1664 1682
1665 // Expands the space by allocating a fixed number of pages. Returns false if 1683 // Expands the space by allocating a page. Returns false if it cannot
1666 // it cannot allocate requested number of pages from OS, or if the hard heap 1684 // allocate a page from OS, or if the hard heap size limit has been hit. The
1667 // size limit has been hit. 1685 // new page will have at least enough committed space to satisfy the object
1668 bool Expand(); 1686 // size indicated by the allocation_size argument;
1687 bool Expand(intptr_t allocation_size);
1669 1688
1670 // Generic fast case allocation function that tries linear allocation at the 1689 // Generic fast case allocation function that tries linear allocation at the
1671 // address denoted by top in allocation_info_. 1690 // address denoted by top in allocation_info_.
1672 inline HeapObject* AllocateLinearly(int size_in_bytes); 1691 inline HeapObject* AllocateLinearly(int size_in_bytes);
1673 1692
1674 // Slow path of AllocateRaw. This function is space-dependent. 1693 // Slow path of AllocateRaw. This function is space-dependent.
1675 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); 1694 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1676 1695
1677 friend class PageIterator; 1696 friend class PageIterator;
1678 }; 1697 };
(...skipping 639 matching lines...) Expand 10 before | Expand all | Expand 10 after
2318 // Old object space (excluding map objects) 2337 // Old object space (excluding map objects)
2319 2338
2320 class OldSpace : public PagedSpace { 2339 class OldSpace : public PagedSpace {
2321 public: 2340 public:
2322 // Creates an old space object with a given maximum capacity. 2341 // Creates an old space object with a given maximum capacity.
2323 // The constructor does not allocate pages from OS. 2342 // The constructor does not allocate pages from OS.
2324 OldSpace(Heap* heap, 2343 OldSpace(Heap* heap,
2325 intptr_t max_capacity, 2344 intptr_t max_capacity,
2326 AllocationSpace id, 2345 AllocationSpace id,
2327 Executability executable) 2346 Executability executable)
2328 : PagedSpace(heap, max_capacity, id, executable) { 2347 : PagedSpace(heap, max_capacity, id, executable) { }
2329 page_extra_ = 0;
2330 }
2331
2332 // The limit of allocation for a page in this space.
2333 virtual Address PageAllocationLimit(Page* page) {
2334 return page->ObjectAreaEnd();
2335 }
2336 2348
2337 public: 2349 public:
2338 TRACK_MEMORY("OldSpace") 2350 TRACK_MEMORY("OldSpace")
2339 }; 2351 };
2340 2352
2341 2353
2342 // For contiguous spaces, top should be in the space (or at the end) and limit 2354 // For contiguous spaces, top should be in the space (or at the end) and limit
2343 // should be the end of the space. 2355 // should be the end of the space.
2344 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ 2356 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2345 SLOW_ASSERT((space).page_low() <= (info).top \ 2357 SLOW_ASSERT((space).page_low() <= (info).top \
2346 && (info).top <= (space).page_high() \ 2358 && (info).top <= (space).page_high() \
2347 && (info).limit <= (space).page_high()) 2359 && (info).limit <= (space).page_high())
2348 2360
2349 2361
2350 // ----------------------------------------------------------------------------- 2362 // -----------------------------------------------------------------------------
2351 // Old space for objects of a fixed size 2363 // Old space for objects of a fixed size
2352 2364
2353 class FixedSpace : public PagedSpace { 2365 class FixedSpace : public PagedSpace {
2354 public: 2366 public:
2355 FixedSpace(Heap* heap, 2367 FixedSpace(Heap* heap,
2356 intptr_t max_capacity, 2368 intptr_t max_capacity,
2357 AllocationSpace id, 2369 AllocationSpace id,
2358 int object_size_in_bytes, 2370 int object_size_in_bytes,
2359 const char* name) 2371 const char* name)
2360 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), 2372 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2361 object_size_in_bytes_(object_size_in_bytes), 2373 object_size_in_bytes_(object_size_in_bytes),
2362 name_(name) { 2374 name_(name) { }
2363 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
2364 }
2365
2366 // The limit of allocation for a page in this space.
2367 virtual Address PageAllocationLimit(Page* page) {
2368 return page->ObjectAreaEnd() - page_extra_;
2369 }
2370 2375
2371 int object_size_in_bytes() { return object_size_in_bytes_; } 2376 int object_size_in_bytes() { return object_size_in_bytes_; }
2372 2377
2378 virtual int ObjectAlignment() { return object_size_in_bytes_; }
Vyacheslav Egorov (Chromium) 2012/01/16 17:02:54 for this to be object alignment we have to guarant
Erik Corry 2012/01/17 11:37:22 No, the alignment is relative to the ObjectAreaSta
2379
2373 // Prepares for a mark-compact GC. 2380 // Prepares for a mark-compact GC.
2374 virtual void PrepareForMarkCompact(); 2381 virtual void PrepareForMarkCompact();
2375 2382
2376 protected: 2383 protected:
2377 void ResetFreeList() { 2384 void ResetFreeList() {
2378 free_list_.Reset(); 2385 free_list_.Reset();
2379 } 2386 }
2380 2387
2381 private: 2388 private:
2382 // The size of objects in this space. 2389 // The size of objects in this space.
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
2643 } 2650 }
2644 // Must be small, since an iteration is used for lookup. 2651 // Must be small, since an iteration is used for lookup.
2645 static const int kMaxComments = 64; 2652 static const int kMaxComments = 64;
2646 }; 2653 };
2647 #endif 2654 #endif
2648 2655
2649 2656
2650 } } // namespace v8::internal 2657 } } // namespace v8::internal
2651 2658
2652 #endif // V8_SPACES_H_ 2659 #endif // V8_SPACES_H_
OLDNEW
« no previous file with comments | « src/snapshot.h ('k') | src/spaces.cc » ('j') | src/spaces.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698