Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(341)

Unified Diff: src/heap/spaces-inl.h

Issue 1150593003: Clean up aligned allocation code in preparation for SIMD alignments. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Handle filler after object. Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« src/globals.h ('K') | « src/heap/spaces.cc ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/heap/spaces-inl.h
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index f7367650ebb24974226fe5a45e11dbdcb83be9e7..f82e7190348269b05af5fe1a5548f9b346bda6fc 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -250,28 +250,21 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
}
-HeapObject* PagedSpace::AllocateLinearlyAligned(int size_in_bytes,
+HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
- int alignment_size = 0;
-
- if (alignment == kDoubleAligned &&
- (OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
- alignment_size = kPointerSize;
- size_in_bytes += alignment_size;
- } else if (alignment == kDoubleUnaligned &&
- (OffsetFrom(current_top) & kDoubleAlignmentMask) == 0) {
- alignment_size = kPointerSize;
- size_in_bytes += alignment_size;
- }
- Address new_top = current_top + size_in_bytes;
+ int filler_size = Heap::GetMisalignment(current_top, alignment);
+
+ Address new_top = current_top + filler_size + *size_in_bytes;
if (new_top > allocation_info_.limit()) return NULL;
allocation_info_.set_top(new_top);
- if (alignment_size > 0) {
- return heap()->EnsureAligned(HeapObject::FromAddress(current_top),
- size_in_bytes, alignment);
+ if (filler_size > 0) {
+ *size_in_bytes += filler_size;
+ return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
+ filler_size);
}
+
return HeapObject::FromAddress(current_top);
}
@@ -303,21 +296,32 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE);
- HeapObject* object = AllocateLinearlyAligned(size_in_bytes, alignment);
- int aligned_size_in_bytes = size_in_bytes + kPointerSize;
+ int allocation_size = size_in_bytes;
+ HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
if (object == NULL) {
- object = free_list_.Allocate(aligned_size_in_bytes);
+ // We don't know the actual misalignment until the object is allocated, so
+ // add the worst case filler size to the allocation request.
+ allocation_size = size_in_bytes + Heap::GetMaximumMisalignment(alignment);
+ object = free_list_.Allocate(allocation_size);
if (object == NULL) {
- object = SlowAllocateRaw(aligned_size_in_bytes);
+ object = SlowAllocateRaw(allocation_size);
}
- if (object != NULL) {
- object = heap()->EnsureAligned(object, aligned_size_in_bytes, alignment);
+ if (object != NULL && allocation_size > size_in_bytes) {
+ int filler_size = Heap::GetMisalignment(object->address(), alignment);
+ if (filler_size) {
+ object = heap()->PrecedeWithFiller(object, filler_size);
+ } else {
+ // object is aligned, fill the extra space at the end of the allocation.
+ DCHECK(allocation_size > size_in_bytes);
+ heap()->CreateFillerObjectAt(object->address() + size_in_bytes,
+ allocation_size - size_in_bytes);
+ }
}
}
if (object != NULL) {
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
return object;
}
@@ -344,19 +348,8 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
- int alignment_size = 0;
- int aligned_size_in_bytes = 0;
-
- // If double alignment is required and top pointer is not aligned, we allocate
- // additional memory to take care of the alignment.
- if (alignment == kDoubleAligned &&
- (OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
- alignment_size += kPointerSize;
- } else if (alignment == kDoubleUnaligned &&
- (OffsetFrom(old_top) & kDoubleAlignmentMask) == 0) {
- alignment_size += kPointerSize;
- }
- aligned_size_in_bytes = size_in_bytes + alignment_size;
+ int alignment_size = Heap::GetMisalignment(old_top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + alignment_size;
if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
return SlowAllocateRaw(size_in_bytes, alignment);
@@ -367,15 +360,12 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (alignment_size > 0) {
- obj = heap()->PrecedeWithFiller(obj);
+ obj = heap()->PrecedeWithFiller(obj, alignment_size);
}
// The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
- DCHECK((kDoubleAligned && (OffsetFrom(obj) & kDoubleAlignmentMask) == 0) ||
- (kDoubleUnaligned && (OffsetFrom(obj) & kDoubleAlignmentMask) != 0));
-
return obj;
}
« src/globals.h ('K') | « src/heap/spaces.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698