| Index: src/heap/heap.cc
|
| diff --git a/src/heap/heap.cc b/src/heap/heap.cc
|
| index 38968ad0b33b026b2a6087a908044a9026d8c009..08eaf20308fbe815966fc8448ad174f00d7ea273 100644
|
| --- a/src/heap/heap.cc
|
| +++ b/src/heap/heap.cc
|
| @@ -2742,6 +2742,7 @@ bool Heap::CreateInitialMaps() {
|
| ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
|
| ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
|
| mutable_heap_number)
|
| + ALLOCATE_MAP(FLOAT32X4_TYPE, Float32x4::kSize, float32x4)
|
| ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
|
| ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
|
|
|
| @@ -2891,6 +2892,32 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
|
| }
|
|
|
|
|
| +AllocationResult Heap::AllocateFloat32x4(float w, float x, float y, float z,
|
| + PretenureFlag pretenure) {
|
| + // Statically ensure that it is safe to allocate SIMD values in paged
|
| + // spaces.
|
| + int size = Float32x4::kSize;
|
| + STATIC_ASSERT(Float32x4::kSize <= Page::kMaxRegularHeapObjectSize);
|
| +
|
| + AllocationSpace space = SelectSpace(size, pretenure);
|
| +
|
| + HeapObject* result;
|
| + {
|
| + AllocationResult allocation =
|
| + AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
|
| + if (!allocation.To(&result)) return allocation;
|
| + }
|
| +
|
| + result->set_map_no_write_barrier(float32x4_map());
|
| + Float32x4* float32x4 = Float32x4::cast(result);
|
| + float32x4->set_lane(0, w);
|
| + float32x4->set_lane(1, x);
|
| + float32x4->set_lane(2, y);
|
| + float32x4->set_lane(3, z);
|
| + return result;
|
| +}
|
| +
|
| +
|
| AllocationResult Heap::AllocateCell(Object* value) {
|
| int size = Cell::kSize;
|
| STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
|
|
|