Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1272)

Unified Diff: src/spaces.cc

Issue 11566011: Use MemoryChunk-based allocation for deoptimization entry code (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« src/spaces.h ('K') | « src/spaces.h ('k') | test/cctest/test-alloc.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/spaces.cc
===================================================================
--- src/spaces.cc (revision 13384)
+++ src/spaces.cc (working copy)
@@ -207,9 +207,10 @@
}
-
Address CodeRange::AllocateRawMemory(const size_t requested,
- size_t* allocated) {
+ size_t* allocated,
+ size_t commit_body_size) {
+ ASSERT(commit_body_size <= requested);
ASSERT(current_allocation_block_index_ < allocation_list_.length());
if (requested > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough. This function call may
@@ -229,7 +230,8 @@
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!MemoryAllocator::CommitCodePage(code_range_,
current.start,
- *allocated)) {
+ *allocated,
+ commit_body_size)) {
*allocated = 0;
return NULL;
}
@@ -242,6 +244,13 @@
}
+bool CodeRange::CommitRawMemory(Address start, size_t length) {
+ // Commit page body (executable).
+ if (!code_range_->Commit(start, length, true)) return false;
danno 2013/01/17 10:49:55 how about just: return code_range_->Commit(start,
haitao.feng 2013/01/17 14:20:33 Done.
+ return true;
+}
+
+
void CodeRange::FreeRawMemory(Address address, size_t length) {
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
free_list_.Add(FreeBlock(address, length));
@@ -346,8 +355,7 @@
if (!reservation.IsReserved()) return NULL;
size_ += reservation.size();
- Address base = RoundUp(static_cast<Address>(reservation.address()),
- alignment);
+ Address base = static_cast<Address>(reservation.address());
controller->TakeControl(&reservation);
return base;
}
@@ -355,18 +363,21 @@
Address MemoryAllocator::AllocateAlignedMemory(size_t size,
danno 2013/01/17 10:49:55 nit: "requested_size" is a better name
haitao.feng 2013/01/17 14:20:33 Done.
size_t alignment,
+ size_t commit_body_size,
danno 2013/01/17 10:49:55 nit: "commit_size" is a better name nit: could you
haitao.feng 2013/01/17 14:20:33 Done.
haitao.feng 2013/01/17 14:20:33 Done.
Executability executable,
VirtualMemory* controller) {
+ ASSERT(commit_body_size <= size);
VirtualMemory reservation;
Address base = ReserveAlignedMemory(size, alignment, &reservation);
if (base == NULL) return NULL;
if (executable == EXECUTABLE) {
- if (!CommitCodePage(&reservation, base, size)) {
+ if (!CommitCodePage(&reservation, base, size, commit_body_size)) {
base = NULL;
}
} else {
- if (!reservation.Commit(base, size, false)) {
+ size_t commit_size = MemoryChunk::kObjectStartOffset + commit_body_size;
+ if (!reservation.Commit(base, commit_size, false)) {
base = NULL;
}
}
@@ -470,6 +481,31 @@
}
+bool MemoryChunk::CommitBody(size_t body_size, Executability executable) {
+ // Already committed, no uncommitment
+ if (body_size <= (area_end_ - area_start_)) return true;
danno 2013/01/17 10:49:55 The problem with using area_end is that you change
haitao.feng 2013/01/17 14:20:33 How about "ASSERT(body_size <= static_cast<size_t>
+
+ size_t length = body_size - (area_end_ - area_start_);
+ if (reservation_.IsReserved()) {
+ if (!reservation_.Commit(area_end_, length, executable == EXECUTABLE)) {
+ return false;
+ }
+ } else {
+ CodeRange* code_range = heap_->isolate()->code_range();
+ ASSERT(code_range->exists() && (executable == EXECUTABLE));
+ if (!code_range->CommitRawMemory(area_end_, length)) return false;
+ }
+
+ if (Heap::ShouldZapGarbage()) {
+ heap_->isolate()->memory_allocator()->ZapBlock(area_end_, length);
+ }
+
+ area_end_ = area_start_ + body_size;
+
+ return true;
+}
+
+
void MemoryChunk::InsertAfter(MemoryChunk* other) {
next_chunk_ = other->next_chunk_;
prev_chunk_ = other;
@@ -490,9 +526,12 @@
}
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_body_size,
+ intptr_t commit_body_size,
Executability executable,
Space* owner) {
+ ASSERT(commit_body_size <= reserve_body_size);
+
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = NULL;
@@ -501,7 +540,7 @@
Address area_end = NULL;
if (executable == EXECUTABLE) {
- chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
+ chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_body_size,
OS::CommitPageSize()) + CodePageGuardSize();
// Check executable memory limit.
@@ -515,7 +554,9 @@
// Allocate executable memory either from code range or from the
// OS.
if (isolate_->code_range()->exists()) {
- base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
+ base = isolate_->code_range()->AllocateRawMemory(chunk_size,
+ &chunk_size,
+ commit_body_size);
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
MemoryChunk::kAlignment));
if (base == NULL) return NULL;
@@ -525,6 +566,7 @@
} else {
base = AllocateAlignedMemory(chunk_size,
MemoryChunk::kAlignment,
+ commit_body_size,
executable,
&reservation);
if (base == NULL) return NULL;
@@ -534,28 +576,32 @@
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), body_size);
+ ZapBlock(base + CodePageAreaStartOffset(), commit_body_size);
}
area_start = base + CodePageAreaStartOffset();
- area_end = area_start + body_size;
+ area_end = area_start + commit_body_size;
danno 2013/01/17 10:49:55 area_end should always be the end of the reserved
haitao.feng 2013/01/17 14:20:33 The end could be calculated by address() + size_.
} else {
- chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+ chunk_size = MemoryChunk::kObjectStartOffset + reserve_body_size;
base = AllocateAlignedMemory(chunk_size,
MemoryChunk::kAlignment,
+ commit_body_size,
executable,
&reservation);
if (base == NULL) return NULL;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, chunk_size);
+ ZapBlock(base, MemoryChunk::kObjectStartOffset + commit_body_size);
}
area_start = base + Page::kObjectStartOffset;
- area_end = base + chunk_size;
+ area_end = area_start + commit_body_size;
}
+ // Use chunk_size for statistics and callbacks as the reserved
+ // but uncommitted memory regions are only meaningful for this allocated
+ // memory trunk.
danno 2013/01/17 10:49:55 nit: I don't know what you mean by "allocated memo
haitao.feng 2013/01/17 14:20:33 Done.
isolate_->counters()->memory_allocated()->
Increment(static_cast<int>(chunk_size));
@@ -580,7 +626,7 @@
Page* MemoryAllocator::AllocatePage(intptr_t size,
PagedSpace* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, executable, owner);
+ MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
if (chunk == NULL) return NULL;
@@ -591,7 +637,10 @@
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Space* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
+ MemoryChunk* chunk = AllocateChunk(object_size,
+ object_size,
+ executable,
+ owner);
if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk);
}
@@ -735,7 +784,8 @@
bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
Address start,
- size_t size) {
+ size_t size,
danno 2013/01/17 10:49:55 I think it's clearer if you swap the order of the
haitao.feng 2013/01/17 14:20:33 Done.
+ size_t body_size) {
// Commit page header (not executable).
if (!vm->Commit(start,
CodePageGuardStartOffset(),
@@ -749,15 +799,14 @@
}
// Commit page body (executable).
- size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
if (!vm->Commit(start + CodePageAreaStartOffset(),
- area_size,
+ body_size,
true)) {
return false;
}
- // Create guard page after the allocatable area.
- if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
+ // Create guard page before the end.
+ if (!vm->Guard(start + size - CodePageGuardSize())) {
return false;
}
« src/spaces.h ('K') | « src/spaces.h ('k') | test/cctest/test-alloc.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698