Index: src/spaces-inl.h |
diff --git a/src/spaces-inl.h b/src/spaces-inl.h |
index 7178b5783b94be7a063171fcc15445bd47d3c0f4..0b673e086a837887bfffe70577f39e87a9719f8c 100644 |
--- a/src/spaces-inl.h |
+++ b/src/spaces-inl.h |
@@ -264,11 +264,11 @@ void Page::set_prev_page(Page* page) { |
// allocation) so it can be used by all the allocation functions and for all |
// the paged spaces. |
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { |
- Address current_top = allocation_info_.top; |
+ Address current_top = allocation_info_.top(); |
Address new_top = current_top + size_in_bytes; |
- if (new_top > allocation_info_.limit) return NULL; |
+ if (new_top > allocation_info_.limit()) return NULL; |
- allocation_info_.top = new_top; |
+ allocation_info_.set_top(new_top); |
return HeapObject::FromAddress(current_top); |
} |
@@ -324,29 +324,31 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes, |
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { |
- Address old_top = allocation_info_.top; |
+ Address old_top = allocation_info_.top(); |
#ifdef DEBUG |
// If we are stressing compaction we waste some memory in new space |
// in order to get more frequent GCs. |
if (FLAG_stress_compaction && !heap()->linear_allocation()) { |
- if (allocation_info_.limit - old_top >= size_in_bytes * 4) { |
+ if (allocation_info_.limit() - old_top >= size_in_bytes * 4) { |
int filler_size = size_in_bytes * 4; |
for (int i = 0; i < filler_size; i += kPointerSize) { |
*(reinterpret_cast<Object**>(old_top + i)) = |
heap()->one_pointer_filler_map(); |
} |
old_top += filler_size; |
- allocation_info_.top += filler_size; |
+ Address new_top = allocation_info_.top() + filler_size; |
Michael Starzinger
2013/10/25 08:34:56
nit: Only one white-space after assignment operato
Hannes Payer (out of office)
2013/11/20 09:58:47
Done.
|
+ allocation_info_.set_top(new_top); |
Michael Starzinger
2013/10/25 08:34:56
nit: Maybe inline calculation instead of local var
Hannes Payer (out of office)
2013/11/20 09:58:47
Done.
|
} |
} |
#endif |
- if (allocation_info_.limit - old_top < size_in_bytes) { |
+ if (allocation_info_.limit() - old_top < size_in_bytes) { |
return SlowAllocateRaw(size_in_bytes); |
} |
HeapObject* obj = HeapObject::FromAddress(old_top); |
- allocation_info_.top += size_in_bytes; |
+ Address new_top = allocation_info_.top() + size_in_bytes; |
+ allocation_info_.set_top(new_top); |
Michael Starzinger
2013/10/25 08:34:56
nit: Maybe inline calculation instead of local var
Hannes Payer (out of office)
2013/11/20 09:58:47
Done.
|
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
HeapProfiler* profiler = heap()->isolate()->heap_profiler(); |