Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1143)

Unified Diff: src/spaces.cc

Issue 151152: Create a separate space for global property cells (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/spaces.cc
===================================================================
--- src/spaces.cc (revision 2321)
+++ src/spaces.cc (working copy)
@@ -1445,42 +1445,42 @@
#endif
-MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {
- owner_ = owner;
- Reset();
+FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
+ : owner_(owner), object_size_(object_size) {
+ Reset();
}
-void MapSpaceFreeList::Reset() {
+void FixedSizeFreeList::Reset() {
available_ = 0;
head_ = NULL;
}
-void MapSpaceFreeList::Free(Address start) {
+void FixedSizeFreeList::Free(Address start) {
#ifdef DEBUG
- for (int i = 0; i < Map::kSize; i += kPointerSize) {
+ for (int i = 0; i < object_size_; i += kPointerSize) {
Memory::Address_at(start + i) = kZapValue;
}
#endif
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(Map::kSize);
+ node->set_size(object_size_);
node->set_next(head_);
head_ = node->address();
- available_ += Map::kSize;
+ available_ += object_size_;
}
-Object* MapSpaceFreeList::Allocate() {
+Object* FixedSizeFreeList::Allocate() {
if (head_ == NULL) {
- return Failure::RetryAfterGC(Map::kSize, owner_);
+ return Failure::RetryAfterGC(object_size_, owner_);
}
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(head_);
head_ = node->next();
- available_ -= Map::kSize;
+ available_ -= object_size_;
return node;
}
@@ -1987,25 +1987,13 @@
#endif
// -----------------------------------------------------------------------------
-// MapSpace implementation
+// PagedSpaceForFixedSizedObjects implementation
-void MapSpace::PrepareForMarkCompact(bool will_compact) {
+void PagedSpaceForFixedSizedObjects::PrepareForMarkCompact(bool will_compact) {
if (will_compact) {
// Reset relocation info.
MCResetRelocationInfo();
- // Initialize map index entry.
- int page_count = 0;
- PageIterator it(this, PageIterator::ALL_PAGES);
- while (it.has_next()) {
- ASSERT_MAP_PAGE_INDEX(page_count);
-
- Page* p = it.next();
- ASSERT(p->mc_page_index == page_count);
-
- page_addresses_[page_count++] = p->address();
- }
-
// During a compacting collection, everything in the space is considered
// 'available' (set by the call to MCResetRelocationInfo) and we will
// rediscover live and wasted bytes during the collection.
@@ -2023,7 +2011,7 @@
}
-void MapSpace::MCCommitRelocationInfo() {
+void PagedSpaceForFixedSizedObjects::MCCommitRelocationInfo() {
// Update fast allocation info.
allocation_info_.top = mc_forwarding_info_.top;
allocation_info_.limit = mc_forwarding_info_.limit;
@@ -2053,7 +2041,8 @@
// Slow case for normal allocation. Try in order: (1) allocate in the next
// page in the space, (2) allocate off the space's free list, (3) expand the
// space, (4) fail.
-HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
+HeapObject* PagedSpaceForFixedSizedObjects::SlowAllocateRaw(int size_in_bytes) {
+ ASSERT_EQ(object_size_, size_in_bytes);
// Linear allocation in this space has failed. If there is another page
// in the space, move to that page and allocate there. This allocation
// should succeed.
@@ -2063,9 +2052,9 @@
}
// There is no next page in this space. Try free list allocation. The
- // map space free list implicitly assumes that all free blocks are map
- // sized.
- if (size_in_bytes == Map::kSize) {
+ // space free list implicitly assumes that all free blocks are of the fixed
+ // size.
+ if (size_in_bytes == object_size_) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
@@ -2094,11 +2083,12 @@
// Move to the next page (there is assumed to be one) and allocate there.
// The top of page block is always wasted, because it is too small to hold a
// map.
-HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
- int size_in_bytes) {
+HeapObject* PagedSpaceForFixedSizedObjects::AllocateInNextPage(
+ Page* current_page, int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
- ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);
- accounting_stats_.WasteBytes(kPageExtra);
+ ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
+ ASSERT_EQ(object_size_, size_in_bytes);
+ accounting_stats_.WasteBytes(page_extra_);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
@@ -2107,7 +2097,7 @@
#ifdef DEBUG
// We do not assume that the PageIterator works, because it depends on the
// invariants we are checking during verification.
-void MapSpace::Verify() {
+void PagedSpaceForFixedSizedObjects::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
@@ -2130,7 +2120,7 @@
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
- ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra);
+ ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
}
// It should be packed with objects from the bottom to the top.
@@ -2144,8 +2134,8 @@
ASSERT(map->IsMap());
ASSERT(Heap::map_space()->Contains(map));
- // The object should be a map or a byte array.
- ASSERT(object->IsMap() || object->IsByteArray());
+ // Verify the object in the space.
+ VerifyObject(object);
// The object itself should look OK.
object->Verify();
@@ -2168,7 +2158,7 @@
}
-void MapSpace::ReportStatistics() {
+void PagedSpaceForFixedSizedObjects::ReportStatistics() {
int pct = Available() * 100 / Capacity();
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
@@ -2215,11 +2205,53 @@
}
-void MapSpace::PrintRSet() { DoPrintRSet("map"); }
+void PagedSpaceForFixedSizedObjects::PrintRSet() { DoPrintRSet(name_); }
#endif
// -----------------------------------------------------------------------------
+// MapSpace implementation
+
+void MapSpace::PrepareForMarkCompact(bool will_compact) {
+ // Call prepare of the super class.
+ PagedSpaceForFixedSizedObjects::PrepareForMarkCompact(will_compact);
+
+ if (will_compact) {
+ // Initialize map index entry.
+ int page_count = 0;
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (it.has_next()) {
+ ASSERT_MAP_PAGE_INDEX(page_count);
+
+ Page* p = it.next();
+ ASSERT(p->mc_page_index == page_count);
+
+ page_addresses_[page_count++] = p->address();
+ }
+ }
+}
+
+
+#ifdef DEBUG
+void MapSpace::VerifyObject(HeapObject* object) {
+ // The object should be a map or a byte array.
+ ASSERT(object->IsMap() || object->IsByteArray());
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// GlobalPropertyCellSpace implementation
+
+#ifdef DEBUG
+void GlobalPropertyCellSpace::VerifyObject(HeapObject* object) {
+ // The object should be a global object property cell or a byte array.
+ ASSERT(object->IsJSGlobalPropertyCell() || object->IsByteArray());
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
// LargeObjectIterator
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698