| Index: src/heap/heap.cc
|
| diff --git a/src/heap/heap.cc b/src/heap/heap.cc
|
| index 4a508fb078f3a68ef965ddc664834964e07898dd..a586cc1cc12a645befb875d7b6b8a01240a8260f 100644
|
| --- a/src/heap/heap.cc
|
| +++ b/src/heap/heap.cc
|
| @@ -2718,6 +2718,7 @@ bool Heap::CreateInitialMaps() {
|
| ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
|
| ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
|
| mutable_heap_number)
|
| + ALLOCATE_MAP(FLOAT32X4_TYPE, Float32x4::kSize, float32x4)
|
| ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
|
| ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
|
|
|
| @@ -2867,6 +2868,31 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
|
| }
|
|
|
|
|
| +AllocationResult Heap::AllocateFloat32x4(double w, double x, double y, double z,
|
| + PretenureFlag pretenure) {
|
| + // Statically ensure that it is safe to allocate SIMD values in paged
|
| + // spaces.
|
| + int size = Float32x4::kSize;
|
| + STATIC_ASSERT(Float32x4::kSize <= Page::kMaxRegularHeapObjectSize);
|
| +
|
| + AllocationSpace space = SelectSpace(size, pretenure);
|
| +
|
| + HeapObject* result;
|
| + {
|
| + AllocationResult allocation =
|
| + AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
|
| + if (!allocation.To(&result)) return allocation;
|
| + }
|
| +
|
| + result->set_map_no_write_barrier(float32x4_map());
|
| + Float32x4::cast(result)->set_lane(0, w);
|
| + Float32x4::cast(result)->set_lane(1, x);
|
| + Float32x4::cast(result)->set_lane(2, y);
|
| + Float32x4::cast(result)->set_lane(3, z);
|
| + return result;
|
| +}
|
| +
|
| +
|
| AllocationResult Heap::AllocateCell(Object* value) {
|
| int size = Cell::kSize;
|
| STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
|
|
|