Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(916)

Unified Diff: src/spaces.cc

Issue 40083002: Make top and limit field in AllocationInfo private, assert on non-aligned setting of these fields, … (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/spaces.cc
diff --git a/src/spaces.cc b/src/spaces.cc
index ee6a890424be06e7da351c44c5f2757cbb214a9a..fe5eeb5e4311fe434b952261239fed62999e43b7 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -960,8 +960,8 @@ PagedSpace::PagedSpace(Heap* heap,
* AreaSize();
accounting_stats_.Clear();
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
anchor_.InitializeAsAnchor(this);
}
@@ -990,7 +990,7 @@ void PagedSpace::TearDown() {
size_t PagedSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
PageIterator it(this);
while (it.has_next()) {
@@ -1142,8 +1142,9 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
DecreaseUnsweptFreeBytes(page);
}
- if (Page::FromAllocationTop(allocation_info_.top) == page) {
- allocation_info_.top = allocation_info_.limit = NULL;
+ if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
}
if (unlink) {
@@ -1170,12 +1171,12 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (was_swept_conservatively_) return;
bool allocation_pointer_found_in_space =
- (allocation_info_.top == allocation_info_.limit);
+ (allocation_info_.top() == allocation_info_.limit());
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
CHECK(page->owner() == this);
- if (page == Page::FromAllocationTop(allocation_info_.top)) {
+ if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->WasSweptPrecisely());
@@ -1286,8 +1287,8 @@ void NewSpace::TearDown() {
}
start_ = NULL;
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
to_space_.TearDown();
from_space_.TearDown();
@@ -1344,22 +1345,22 @@ void NewSpace::Shrink() {
}
}
}
- allocation_info_.limit = to_space_.page_high();
+ allocation_info_.set_limit(to_space_.page_high());
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::UpdateAllocationInfo() {
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = to_space_.page_low();
- allocation_info_.limit = to_space_.page_high();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(to_space_.page_low());
+ allocation_info_.set_limit(to_space_.page_high());
// Lower limit during incremental marking.
if (heap()->incremental_marking()->IsMarking() &&
inline_allocation_limit_step() != 0) {
Address new_limit =
- allocation_info_.top + inline_allocation_limit_step();
- allocation_info_.limit = Min(new_limit, allocation_info_.limit);
+ allocation_info_.top() + inline_allocation_limit_step();
+ allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
}
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1378,7 +1379,7 @@ void NewSpace::ResetAllocationInfo() {
bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top;
+ Address top = allocation_info_.top();
if (NewSpacePage::IsAtStart(top)) {
// The current page is already empty. Don't try to make another.
@@ -1410,15 +1411,16 @@ bool NewSpace::AddFreshPage() {
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
+ Address old_top = allocation_info_.top();
Address new_top = old_top + size_in_bytes;
Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
+ if (allocation_info_.limit() < high) {
// Incremental marking has lowered the limit to get a
// chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
+ Address new_limit = Min(
+ allocation_info_.limit() + inline_allocation_limit_step_,
high);
+ allocation_info_.set_limit(new_limit);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
@@ -1973,7 +1975,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
size_t NewSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
size += from_space_.CommittedPhysicalMemory();
@@ -2499,9 +2501,9 @@ bool NewSpace::ReserveSpace(int bytes) {
Object* object = NULL;
if (!maybe->ToObject(&object)) return false;
HeapObject* allocation = HeapObject::cast(object);
- Address top = allocation_info_.top;
+ Address top = allocation_info_.top();
if ((top - bytes) == allocation->address()) {
- allocation_info_.top = allocation->address();
+ allocation_info_.set_top(allocation->address());
return true;
}
// There may be a borderline case here where the allocation succeeded, but
@@ -2547,9 +2549,9 @@ void PagedSpace::PrepareForMarkCompact() {
bool PagedSpace::ReserveSpace(int size_in_bytes) {
ASSERT(size_in_bytes <= AreaSize());
ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
- Address current_top = allocation_info_.top;
+ Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
- if (new_top <= allocation_info_.limit) return true;
+ if (new_top <= allocation_info_.limit()) return true;
HeapObject* new_area = free_list_.Allocate(size_in_bytes);
if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
@@ -2624,16 +2626,17 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
- if (allocation_info_.top >= allocation_info_.limit) return;
+ if (allocation_info_.top() >= allocation_info_.limit()) return;
- if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
+ if (Page::FromAllocationTop(allocation_info_.top())->
+ IsEvacuationCandidate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
- static_cast<int>(allocation_info_.limit - allocation_info_.top);
- heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+ heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
}
}

Powered by Google App Engine
This is Rietveld 408576698