Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(389)

Unified Diff: src/spaces.cc

Issue 310083002: VERIFY_PREDICTABLE build mode introduced. Shadow pages support added. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/spaces.cc
diff --git a/src/spaces.cc b/src/spaces.cc
index 8e923af5480842d180226d8f8bb08e20fd2ffe97..a6cce2db13767acd524f111a193b233c8f1573bc 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -275,6 +275,10 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
+ if (FLAG_shadow_pages) {
+ // We need up to two times more non-executable memory for shadow pages.
+ capacity *= 2;
+ }
capacity_ = RoundUp(capacity, Page::kPageSize);
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
ASSERT_GE(capacity_, capacity_executable_);
@@ -466,6 +470,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->area_end_ = area_end;
chunk->flags_ = 0;
chunk->set_owner(owner);
+ chunk->shadow_chunk_ = NULL;
+ chunk->shadow_data_offset_ = 0;
+ chunk->large_object_shadow_data_ = reinterpret_cast<intptr_t>(kZapValue);
chunk->InitializeReservedMemory();
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
@@ -701,6 +708,36 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
}
+bool MemoryAllocator::AllocateShadowChunkFor(MemoryChunk* chunk) {
+ ASSERT(FLAG_shadow_pages);
+ ASSERT(chunk->shadow_chunk() == NULL);
+
+ int size =
+ chunk->area_end() - (chunk->address() + MemoryChunk::kObjectStartOffset);
+ MemoryChunk* shadow = AllocateChunk(size, size, NOT_EXECUTABLE, NULL);
+ if (shadow == NULL) {
+ return false;
+ }
+ ASSERT((chunk->area_end() - chunk->address()) ==
+ (shadow->area_end() - shadow->address()));
+ chunk->set_shadow_chunk(shadow);
+ chunk->shadow_data_offset_ = shadow->address() - chunk->address();
+ ASSERT(IsAligned(chunk->shadow_data_offset_, Page::kPageSize));
+
+ return true;
+}
+
+
+void MemoryAllocator::FreeShadowChunkFor(MemoryChunk* chunk) {
+ ASSERT(FLAG_shadow_pages);
+
+ if (chunk->shadow_chunk() != NULL) {
+ Free(chunk->shadow_chunk());
+ chunk->set_shadow_chunk(NULL);
+ }
+}
+
+
void Page::ResetFreeListStatistics() {
non_available_small_blocks_ = 0;
available_in_small_free_list_ = 0;
@@ -717,7 +754,14 @@ Page* MemoryAllocator::AllocatePage(intptr_t size,
if (chunk == NULL) return NULL;
- return Page::Initialize(isolate_->heap(), chunk, executable, owner);
+ Page* p = Page::Initialize(isolate_->heap(), chunk, executable, owner);
+ if (FLAG_shadow_pages) {
+ if (!AllocateShadowChunkFor(p)) {
+ Free(p);
+ return NULL;
+ }
+ }
+ return p;
}
@@ -729,7 +773,17 @@ LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
executable,
owner);
if (chunk == NULL) return NULL;
- return LargePage::Initialize(isolate_->heap(), chunk);
+
+ LargePage* p = LargePage::Initialize(isolate_->heap(), chunk);
+
+ if (FLAG_shadow_pages) {
+ // Instead of allocating shadow chunk we initialize shadow offset so that
+ // it points directly into p.large_object_shadow_data_.
+ p->shadow_data_offset_ =
+ reinterpret_cast<Address>(&p->large_object_shadow_data_) -
+ p->GetObject()->address();
+ }
+ return p;
}
@@ -747,6 +801,10 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
delete chunk->slots_buffer();
delete chunk->skip_list();
+ if (FLAG_shadow_pages) {
+ FreeShadowChunkFor(chunk);
+ }
+
VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
FreeMemory(reservation, chunk->executable());
@@ -1508,7 +1566,41 @@ void SemiSpace::SetUp(Address start,
}
+bool SemiSpace::AllocateShadowChunksForPages(int start_index, int end_index) {
+ ASSERT(FLAG_shadow_pages);
+
+ MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
+
+ for (int i = start_index; i < end_index; i++) {
+ NewSpacePage* page =
+ NewSpacePage::FromAddress(start_ + i * Page::kPageSize);
+
+ if (!allocator->AllocateShadowChunkFor(page)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+void SemiSpace::FreeShadowChunksForPages(int start_index, int end_index) {
+ ASSERT(FLAG_shadow_pages);
+ MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
+
+ for (int i = start_index; i < end_index; i++) {
+ NewSpacePage* page =
+ NewSpacePage::FromAddress(start_ + i * Page::kPageSize);
+
+ allocator->FreeShadowChunkFor(page);
+ }
+}
+
+
void SemiSpace::TearDown() {
+ if (FLAG_shadow_pages && is_committed()) {
+ int pages = capacity_ / Page::kPageSize;
+ FreeShadowChunksForPages(0, pages);
+ }
start_ = NULL;
capacity_ = 0;
}
@@ -1530,6 +1622,11 @@ bool SemiSpace::Commit() {
new_page->InsertAfter(current);
current = new_page;
}
+ if (FLAG_shadow_pages) {
+ if (!AllocateShadowChunksForPages(0, pages)) {
+ return false;
+ }
+ }
SetCapacity(capacity_);
committed_ = true;
@@ -1540,6 +1637,12 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
ASSERT(is_committed());
+
+ if (FLAG_shadow_pages) {
+ int pages_before = capacity_ / Page::kPageSize;
+ FreeShadowChunksForPages(0, pages_before);
+ }
+
Address start = start_ + maximum_capacity_ - capacity_;
if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
return false;
@@ -1595,6 +1698,13 @@ bool SemiSpace::GrowTo(int new_capacity) {
NewSpacePage::kCopyOnFlipFlagsMask);
last_page = new_page;
}
+
+ if (FLAG_shadow_pages) {
+ if (!AllocateShadowChunksForPages(pages_before, pages_after)) {
+ return false;
+ }
+ }
+
return true;
}
@@ -1608,11 +1718,17 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
+ int pages_after = new_capacity / Page::kPageSize;
+
+ if (FLAG_shadow_pages) {
+ int pages_before = capacity_ / Page::kPageSize;
+ FreeShadowChunksForPages(pages_after, pages_before);
+ }
+
if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
return false;
}
- int pages_after = new_capacity / Page::kPageSize;
NewSpacePage* new_last_page =
NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
new_last_page->set_next_page(anchor());
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698