Index: src/heap/heap.cc |
diff --git a/src/heap/heap.cc b/src/heap/heap.cc |
index 7fc7e954aefa55234fedc844c4ca13a1795a03c7..75d60bc7f499f686bedae3296253818ddaf011cd 100644 |
--- a/src/heap/heap.cc |
+++ b/src/heap/heap.cc |
@@ -1989,7 +1989,7 @@ |
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, |
int instance_size) { |
Object* result = nullptr; |
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE); |
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); |
if (!allocation.To(&result)) return allocation; |
// Map::cast cannot be used due to uninitialized map field. |
@@ -2023,7 +2023,7 @@ |
int instance_size, |
ElementsKind elements_kind) { |
HeapObject* result = nullptr; |
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE); |
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); |
if (!allocation.To(&result)) return allocation; |
result->set_map_no_write_barrier(meta_map()); |
@@ -2064,7 +2064,7 @@ |
HeapObject* obj = nullptr; |
{ |
AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned; |
- AllocationResult allocation = AllocateRaw(size, space, align); |
+ AllocationResult allocation = AllocateRaw(size, space, space, align); |
if (!allocation.To(&obj)) return allocation; |
} |
#ifdef DEBUG |
@@ -2377,7 +2377,8 @@ |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned); |
+ AllocationResult allocation = |
+ AllocateRaw(size, space, OLD_SPACE, kDoubleUnaligned); |
if (!allocation.To(&result)) return allocation; |
} |
@@ -2398,7 +2399,7 @@ |
HeapObject* result = nullptr; \ |
{ \ |
AllocationResult allocation = \ |
- AllocateRaw(size, space, kSimd128Unaligned); \ |
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned); \ |
if (!allocation.To(&result)) return allocation; \ |
} \ |
\ |
@@ -2419,7 +2420,7 @@ |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
} |
result->set_map_no_write_barrier(cell_map()); |
@@ -2433,7 +2434,7 @@ |
STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize); |
HeapObject* result = nullptr; |
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
result->set_map_no_write_barrier(global_property_cell_map()); |
@@ -2451,7 +2452,7 @@ |
STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize); |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
} |
result->set_map_no_write_barrier(weak_cell_map()); |
@@ -2936,7 +2937,7 @@ |
AllocationSpace space = SelectSpace(pretenure); |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, space); |
+ AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
} |
@@ -2960,7 +2961,7 @@ |
int size = BytecodeArray::SizeFor(length); |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
} |
@@ -3147,7 +3148,7 @@ |
AllocationSpace space = SelectSpace(pretenure); |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, space); |
+ AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
} |
@@ -3192,7 +3193,7 @@ |
HeapObject* object = nullptr; |
AllocationResult allocation = AllocateRaw( |
- size, space, |
+ size, space, OLD_SPACE, |
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned); |
if (!allocation.To(&object)) return allocation; |
@@ -3210,7 +3211,8 @@ |
AllocationResult Heap::AllocateCode(int object_size, bool immovable) { |
DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); |
- AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE); |
+ AllocationResult allocation = |
+ AllocateRaw(object_size, CODE_SPACE, CODE_SPACE); |
HeapObject* result = nullptr; |
if (!allocation.To(&result)) return allocation; |
@@ -3249,7 +3251,7 @@ |
HeapObject* result = nullptr; |
// Allocate an object the same size as the code object. |
int obj_size = code->Size(); |
- allocation = AllocateRaw(obj_size, CODE_SPACE); |
+ allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE); |
if (!allocation.To(&result)) return allocation; |
// Copy code object. |
@@ -3288,7 +3290,8 @@ |
static_cast<size_t>(code->instruction_end() - old_addr); |
HeapObject* result = nullptr; |
- AllocationResult allocation = AllocateRaw(new_obj_size, CODE_SPACE); |
+ AllocationResult allocation = |
+ AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE); |
if (!allocation.To(&result)) return allocation; |
// Copy code object. |
@@ -3334,12 +3337,15 @@ |
AllocationSite* allocation_site) { |
DCHECK(gc_state_ == NOT_IN_GC); |
DCHECK(map->instance_type() != MAP_TYPE); |
+ // If allocation failures are disallowed, we may allocate in a different |
+ // space when new space is full and the object is not a large object. |
+ AllocationSpace retry_space = (space != NEW_SPACE) ? space : OLD_SPACE; |
int size = map->instance_size(); |
if (allocation_site != NULL) { |
size += AllocationMemento::kSize; |
} |
HeapObject* result = nullptr; |
- AllocationResult allocation = AllocateRaw(size, space); |
+ AllocationResult allocation = AllocateRaw(size, space, retry_space); |
if (!allocation.To(&result)) return allocation; |
// No need for write barrier since object is white and map is in old space. |
result->set_map_no_write_barrier(map); |
@@ -3441,20 +3447,65 @@ |
DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type())); |
- int adjusted_object_size = |
- site != NULL ? object_size + AllocationMemento::kSize : object_size; |
- AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE); |
- if (!allocation.To(&clone)) return allocation; |
- |
- SLOW_DCHECK(InNewSpace(clone)); |
- // Since we know the clone is allocated in new space, we can copy |
- // the contents without worrying about updating the write barrier. |
- CopyBlock(clone->address(), source->address(), object_size); |
- |
- if (site != NULL) { |
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( |
- reinterpret_cast<Address>(clone) + object_size); |
- InitializeAllocationMemento(alloc_memento, site); |
+ WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; |
+ |
+ // If we're forced to always allocate, we use the general allocation |
+ // functions which may leave us with an object in old space. |
+ if (always_allocate()) { |
+ { |
+ AllocationResult allocation = |
+ AllocateRaw(object_size, NEW_SPACE, OLD_SPACE); |
+ if (!allocation.To(&clone)) return allocation; |
+ } |
+ Address clone_address = clone->address(); |
+ CopyBlock(clone_address, source->address(), object_size); |
+ |
+ // Update write barrier for all tagged fields that lie beyond the header. |
+ const int start_offset = JSObject::kHeaderSize; |
+ const int end_offset = object_size; |
+ |
+#if V8_DOUBLE_FIELDS_UNBOXING |
+ LayoutDescriptorHelper helper(map); |
+ bool has_only_tagged_fields = helper.all_fields_tagged(); |
+ |
+ if (!has_only_tagged_fields) { |
+ for (int offset = start_offset; offset < end_offset;) { |
+ int end_of_region_offset; |
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) { |
+ RecordWrites(clone_address, offset, |
+ (end_of_region_offset - offset) / kPointerSize); |
+ } |
+ offset = end_of_region_offset; |
+ } |
+ } else { |
+#endif |
+ // Object has only tagged fields. |
+ RecordWrites(clone_address, start_offset, |
+ (end_offset - start_offset) / kPointerSize); |
+#if V8_DOUBLE_FIELDS_UNBOXING |
+ } |
+#endif |
+ |
+ } else { |
+ wb_mode = SKIP_WRITE_BARRIER; |
+ |
+ { |
+ int adjusted_object_size = |
+ site != NULL ? object_size + AllocationMemento::kSize : object_size; |
+ AllocationResult allocation = |
+ AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE); |
+ if (!allocation.To(&clone)) return allocation; |
+ } |
+ SLOW_DCHECK(InNewSpace(clone)); |
+ // Since we know the clone is allocated in new space, we can copy |
+ // the contents without worrying about updating the write barrier. |
+ CopyBlock(clone->address(), source->address(), object_size); |
+ |
+ if (site != NULL) { |
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( |
+ reinterpret_cast<Address>(clone) + object_size); |
+ InitializeAllocationMemento(alloc_memento, site); |
+ } |
} |
SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() == |
@@ -3475,7 +3526,7 @@ |
} |
if (!allocation.To(&elem)) return allocation; |
} |
- JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER); |
+ JSObject::cast(clone)->set_elements(elem, wb_mode); |
} |
// Update properties if necessary. |
if (properties->length() > 0) { |
@@ -3484,7 +3535,7 @@ |
AllocationResult allocation = CopyFixedArray(properties); |
if (!allocation.To(&prop)) return allocation; |
} |
- JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER); |
+ JSObject::cast(clone)->set_properties(prop, wb_mode); |
} |
// Return the new clone. |
return clone; |
@@ -3558,7 +3609,7 @@ |
// Allocate string. |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
} |
@@ -3600,7 +3651,7 @@ |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, space); |
+ AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
} |
@@ -3624,7 +3675,7 @@ |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, space); |
+ AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
} |
@@ -3641,7 +3692,7 @@ |
int size = FixedArray::SizeFor(0); |
HeapObject* result = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
} |
// Initialize the object. |
@@ -3757,7 +3808,7 @@ |
int size = FixedArray::SizeFor(length); |
AllocationSpace space = SelectSpace(pretenure); |
- return AllocateRaw(size, space); |
+ return AllocateRaw(size, space, OLD_SPACE); |
} |
@@ -3828,7 +3879,8 @@ |
HeapObject* object = nullptr; |
{ |
- AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned); |
+ AllocationResult allocation = |
+ AllocateRaw(size, space, OLD_SPACE, kDoubleAligned); |
if (!allocation.To(&object)) return allocation; |
} |
@@ -3841,7 +3893,8 @@ |
STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize); |
HeapObject* result = nullptr; |
- AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE); |
+ AllocationResult allocation = |
+ AllocateRaw(Symbol::kSize, OLD_SPACE, OLD_SPACE); |
if (!allocation.To(&result)) return allocation; |
result->set_map_no_write_barrier(symbol_map()); |