Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1565)

Unified Diff: src/spaces.cc

Issue 3301008: [Isolates] Add heap pointer to all maps and use map->heap() more. (Closed)
Patch Set: even more Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/spaces.cc
diff --git a/src/spaces.cc b/src/spaces.cc
index 9bd18744fe9c767c9a49e617f5a8d3a183a0f1d5..c583d6f65129b38006a74c5745114c30b1c4a0f4 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -771,10 +771,11 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
// -----------------------------------------------------------------------------
// PagedSpace implementation
-PagedSpace::PagedSpace(int max_capacity,
+PagedSpace::PagedSpace(Heap* heap,
+ int max_capacity,
AllocationSpace id,
Executability executable)
- : Space(id, executable) {
+ : Space(heap, id, executable) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
@@ -878,7 +879,7 @@ void PagedSpace::MarkAllPagesClean() {
Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called before or after mark-compact GC
// because it accesses map pointers.
- ASSERT(!HEAP->mark_compact_collector()->in_use());
+ ASSERT(!heap()->mark_compact_collector()->in_use());
if (!Contains(addr)) return Failure::Exception();
@@ -994,14 +995,14 @@ bool PagedSpace::Expand(Page* last_page) {
if (available_pages <= 0) return false;
int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
- Page* p = Isolate::Current()->memory_allocator()->AllocatePages(
+ Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
desired_pages, &desired_pages, this);
if (!p->is_valid()) return false;
accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
- Isolate::Current()->memory_allocator()->SetNextPage(last_page, p);
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
// Sequentially clear region marks of new pages and and cache the
// new last page in the space.
@@ -1044,9 +1045,9 @@ void PagedSpace::Shrink() {
}
// Free pages after top_page.
- Page* p = Isolate::Current()->memory_allocator()->
+ Page* p = heap()->isolate()->memory_allocator()->
FreePages(top_page->next_page());
- Isolate::Current()->memory_allocator()->SetNextPage(top_page, p);
+ heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
// Find out how many pages we failed to free and update last_page_.
// Please note pages can only be freed in whole chunks.
@@ -1068,7 +1069,7 @@ bool PagedSpace::EnsureCapacity(int capacity) {
Page* last_page = AllocationTopPage();
Page* next_page = last_page->next_page();
while (next_page->is_valid()) {
- last_page = Isolate::Current()->memory_allocator()->
+ last_page = heap()->isolate()->memory_allocator()->
FindLastPageInSameChunk(next_page);
next_page = last_page->next_page();
}
@@ -1078,7 +1079,7 @@ bool PagedSpace::EnsureCapacity(int capacity) {
if (!Expand(last_page)) return false;
ASSERT(last_page->next_page()->is_valid());
last_page =
- Isolate::Current()->memory_allocator()->FindLastPageInSameChunk(
+ heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
last_page->next_page());
} while (Capacity() < capacity);
@@ -1099,7 +1100,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
- ASSERT(Isolate::Current()->memory_allocator()->IsPageInSpace(top_page, this));
+ ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
// Loop over all the pages.
bool above_allocation_top = false;
@@ -1124,7 +1125,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(HEAP->map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -1160,8 +1161,8 @@ bool NewSpace::Setup(Address start, int size) {
// start and size. The provided space is divided into two semi-spaces.
// To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size.
- int initial_semispace_capacity = HEAP->InitialSemiSpaceSize();
- int maximum_semispace_capacity = HEAP->MaxSemiSpaceSize();
+ int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
+ int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -1177,7 +1178,7 @@ bool NewSpace::Setup(Address start, int size) {
#undef SET_NAME
#endif
- ASSERT(size == 2 * HEAP->ReservedSemiSpaceSize());
+ ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(start, size, 0));
if (!to_space_.Setup(start,
@@ -1232,16 +1233,16 @@ void NewSpace::TearDown() {
#ifdef ENABLE_HEAP_PROTECTION
void NewSpace::Protect() {
- Isolate::Current()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
- Isolate::Current()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
+ heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
+ heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
}
void NewSpace::Unprotect() {
- Isolate::Current()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
- to_space_.executable());
- Isolate::Current()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
- from_space_.executable());
+ heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
+ to_space_.executable());
+ heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
+ from_space_.executable());
}
#endif
@@ -1335,7 +1336,7 @@ void NewSpace::Verify() {
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(HEAP->map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// The object should not be code or a map.
ASSERT(!object->IsMap());
@@ -1360,7 +1361,7 @@ void NewSpace::Verify() {
bool SemiSpace::Commit() {
ASSERT(!is_committed());
- if (!Isolate::Current()->memory_allocator()->CommitBlock(
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
start_, capacity_, executable())) {
return false;
}
@@ -1371,7 +1372,7 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
ASSERT(is_committed());
- if (!Isolate::Current()->memory_allocator()->UncommitBlock(
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
start_, capacity_)) {
return false;
}
@@ -1418,7 +1419,7 @@ bool SemiSpace::Grow() {
int maximum_extra = maximum_capacity_ - capacity_;
int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
maximum_extra);
- if (!Isolate::Current()->memory_allocator()->CommitBlock(
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
high(), extra, executable())) {
return false;
}
@@ -1432,7 +1433,7 @@ bool SemiSpace::GrowTo(int new_capacity) {
ASSERT(new_capacity > capacity_);
size_t delta = new_capacity - capacity_;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!Isolate::Current()->memory_allocator()->CommitBlock(
+ if (!heap()->isolate()->memory_allocator()->CommitBlock(
high(), delta, executable())) {
return false;
}
@@ -1446,7 +1447,7 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
ASSERT(new_capacity < capacity_);
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!Isolate::Current()->memory_allocator()->UncommitBlock(
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
high() - delta, delta)) {
return false;
}
@@ -2036,14 +2037,14 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
first_page_ = last->next_page();
} else {
first = prev->next_page();
- Isolate::Current()->memory_allocator()->SetNextPage(
+ heap()->isolate()->memory_allocator()->SetNextPage(
prev, last->next_page());
}
// Attach it after the last page.
- Isolate::Current()->memory_allocator()->SetNextPage(last_page_, first);
+ heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
last_page_ = last;
- Isolate::Current()->memory_allocator()->SetNextPage(last, NULL);
+ heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
// Clean them up.
do {
@@ -2082,7 +2083,7 @@ void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
if (page_list_is_chunk_ordered_) return;
Page* new_last_in_use = Page::FromAddress(NULL);
- Isolate::Current()->memory_allocator()->RelinkPageListInChunkOrder(
+ heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
this, &first_page_, &last_page_, &new_last_in_use);
ASSERT(new_last_in_use->is_valid());
@@ -2100,7 +2101,7 @@ void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
- HEAP->CreateFillerObjectAt(start, size_in_bytes);
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
}
}
@@ -2127,7 +2128,7 @@ void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
accounting_stats_.AllocateBytes(size_in_bytes);
DeallocateBlock(start, size_in_bytes, add_to_freelist);
} else {
- HEAP->CreateFillerObjectAt(start, size_in_bytes);
+ heap()->CreateFillerObjectAt(start, size_in_bytes);
}
}
}
@@ -2156,7 +2157,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
int bytes_left_to_reserve = bytes;
while (bytes_left_to_reserve > 0) {
if (!reserved_page->next_page()->is_valid()) {
- if (HEAP->OldGenerationAllocationLimitReached()) return false;
+ if (heap()->OldGenerationAllocationLimitReached()) return false;
Expand(reserved_page);
}
bytes_left_to_reserve -= Page::kPageSize;
@@ -2174,7 +2175,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
- return HEAP->OldGenerationSpaceAvailable() >= bytes;
+ return heap()->OldGenerationSpaceAvailable() >= bytes;
}
@@ -2193,7 +2194,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
// There is no next page in this space. Try free list allocation unless that
// is currently forbidden.
- if (!HEAP->linear_allocation()) {
+ if (!heap()->linear_allocation()) {
int wasted_bytes;
Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
accounting_stats_.WasteBytes(wasted_bytes);
@@ -2219,7 +2220,8 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!HEAP->always_allocate() && HEAP->OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return NULL;
}
@@ -2381,7 +2383,7 @@ static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
// - by code kind
// - by code comment
void PagedSpace::CollectCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = heap()->isolate();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
@@ -2494,7 +2496,7 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
// There is no next page in this space. Try free list allocation unless
// that is currently forbidden. The fixed space free list implicitly assumes
// that all free blocks are of the fixed size.
- if (!HEAP->linear_allocation()) {
+ if (!heap()->linear_allocation()) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
@@ -2517,7 +2519,8 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!HEAP->always_allocate() && HEAP->OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return NULL;
}
@@ -2617,7 +2620,7 @@ void MapSpace::VerifyObject(HeapObject* object) {
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list node.
ASSERT(object->IsJSGlobalPropertyCell() ||
- object->map() == HEAP->two_pointer_filler_map());
+ object->map() == heap()->two_pointer_filler_map());
}
#endif
@@ -2682,8 +2685,8 @@ int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
- : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
+ : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
first_chunk_(NULL),
size_(0),
page_count_(0) {}
@@ -2708,10 +2711,10 @@ void LargeObjectSpace::TearDown() {
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
size_t size = chunk->size();
- Isolate::Current()->memory_allocator()->FreeRawMemory(chunk->address(),
- size,
- executable);
- Isolate::Current()->memory_allocator()->PerformAllocationCallback(
+ heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
+ size,
+ executable);
+ heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size);
}
@@ -2725,8 +2728,8 @@ void LargeObjectSpace::TearDown() {
void LargeObjectSpace::Protect() {
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
- Isolate::Current()->memory_allocator()->Protect(chunk->address(),
- chunk->size());
+ heap()->isolate()->memory_allocator()->Protect(chunk->address(),
+ chunk->size());
chunk = chunk->next();
}
}
@@ -2736,7 +2739,7 @@ void LargeObjectSpace::Unprotect() {
LargeObjectChunk* chunk = first_chunk_;
while (chunk != NULL) {
bool is_code = chunk->GetObject()->IsCode();
- Isolate::Current()->memory_allocator()->Unprotect(chunk->address(),
+ heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
chunk = chunk->next();
}
@@ -2752,7 +2755,8 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!HEAP->always_allocate() && HEAP->OldGenerationAllocationLimitReached()) {
+ if (!heap()->always_allocate() &&
+ heap()->OldGenerationAllocationLimitReached()) {
return Failure::RetryAfterGC(requested_size, identity());
}
@@ -2859,22 +2863,22 @@ void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
// Iterate regions of the first normal page covering object.
uint32_t first_region_number = page->GetRegionNumberForAddress(start);
newmarks |=
- HEAP->IterateDirtyRegions(marks >> first_region_number,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object) << first_region_number;
+ heap()->IterateDirtyRegions(marks >> first_region_number,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object) << first_region_number;
start = end;
end = start + Page::kPageSize;
while (end <= object_end) {
// Iterate next 32 regions.
newmarks |=
- HEAP->IterateDirtyRegions(marks,
- start,
- end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
+ heap()->IterateDirtyRegions(marks,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
start = end;
end = start + Page::kPageSize;
}
@@ -2883,11 +2887,11 @@ void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
// Iterate the last piece of an object which is less than
// Page::kPageSize.
newmarks |=
- HEAP->IterateDirtyRegions(marks,
- start,
- object_end,
- &Heap::IteratePointersInDirtyRegion,
- copy_object);
+ heap()->IterateDirtyRegions(marks,
+ start,
+ object_end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
}
page->SetRegionMarks(newmarks);
@@ -2904,7 +2908,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
HeapObject* object = current->GetObject();
if (object->IsMarked()) {
object->ClearMark();
- HEAP->mark_compact_collector()->tracer()->decrement_marked_count();
+ heap()->mark_compact_collector()->tracer()->decrement_marked_count();
previous = current;
current = current->next();
} else {
@@ -2924,15 +2928,15 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
// Free the chunk.
- HEAP->mark_compact_collector()->ReportDeleteIfNeeded(object);
+ heap()->mark_compact_collector()->ReportDeleteIfNeeded(object);
size_ -= static_cast<int>(chunk_size);
page_count_--;
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
- Isolate::Current()->memory_allocator()->FreeRawMemory(chunk_address,
- chunk_size,
- executable);
- Isolate::Current()->memory_allocator()->PerformAllocationCallback(
+ heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
+ chunk_size,
+ executable);
+ heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size_);
LOG(DeleteEvent("LargeObjectChunk", chunk_address));
}
@@ -2942,7 +2946,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
bool LargeObjectSpace::Contains(HeapObject* object) {
Address address = object->address();
- if (HEAP->new_space()->Contains(address)) {
+ if (heap()->new_space()->Contains(address)) {
return false;
}
Page* page = Page::FromAddress(address);
@@ -2971,7 +2975,7 @@ void LargeObjectSpace::Verify() {
// in map space.
Map* map = object->map();
ASSERT(map->IsMap());
- ASSERT(HEAP->map_space()->Contains(map));
+ ASSERT(heap()->map_space()->Contains(map));
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
@@ -2998,9 +3002,9 @@ void LargeObjectSpace::Verify() {
Object* element = array->get(j);
if (element->IsHeapObject()) {
HeapObject* element_object = HeapObject::cast(element);
- ASSERT(HEAP->Contains(element_object));
+ ASSERT(heap()->Contains(element_object));
ASSERT(element_object->map()->IsMap());
- if (HEAP->InNewSpace(element_object)) {
+ if (heap()->InNewSpace(element_object)) {
Address array_addr = object->address();
Address element_addr = array_addr + FixedArray::kHeaderSize +
j * kPointerSize;
@@ -3038,7 +3042,7 @@ void LargeObjectSpace::ReportStatistics() {
void LargeObjectSpace::CollectCodeStatistics() {
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = heap()->isolate();
LargeObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
if (obj->IsCode()) {
« src/mark-compact.cc ('K') | « src/spaces.h ('k') | test/cctest/test-spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698