Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(212)

Unified Diff: src/heap/heap.cc

Issue 1250733005: SIMD.js Add the other SIMD Phase 1 types. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/heap/heap.cc
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index b75c6f7bc6083d0990089dd6f3aec50e6b057b99..5ab4eed21ec907d5750698456bc4a3202510d6fb 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -2970,6 +2970,12 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
mutable_heap_number)
ALLOCATE_MAP(FLOAT32X4_TYPE, Float32x4::kSize, float32x4)
+ ALLOCATE_MAP(INT32X4_TYPE, Int32x4::kSize, int32x4)
+ ALLOCATE_MAP(BOOL32X4_TYPE, Bool32x4::kSize, bool32x4)
+ ALLOCATE_MAP(INT16X8_TYPE, Int16x8::kSize, int16x8)
+ ALLOCATE_MAP(BOOL16X8_TYPE, Bool16x8::kSize, bool16x8)
+ ALLOCATE_MAP(INT8X16_TYPE, Int8x16::kSize, int8x16)
+ ALLOCATE_MAP(BOOL8X16_TYPE, Bool8x16::kSize, bool8x16)
ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
@@ -3110,7 +3116,7 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
}
-AllocationResult Heap::AllocateFloat32x4(float w, float x, float y, float z,
+AllocationResult Heap::AllocateFloat32x4(float lanes[4],
rossberg 2015/07/29 14:37:55 Macrofication might help here, too.
bbudge 2015/07/30 13:46:58 Definitely. Done.
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate SIMD values in paged
// spaces.
@@ -3127,11 +3133,199 @@ AllocationResult Heap::AllocateFloat32x4(float w, float x, float y, float z,
}
result->set_map_no_write_barrier(float32x4_map());
- Float32x4* float32x4 = Float32x4::cast(result);
- float32x4->set_lane(0, w);
- float32x4->set_lane(1, x);
- float32x4->set_lane(2, y);
- float32x4->set_lane(3, z);
+ Float32x4* instance = Float32x4::cast(result);
+ instance->set_lane(0, lanes[0]);
+ instance->set_lane(1, lanes[1]);
+ instance->set_lane(2, lanes[2]);
+ instance->set_lane(3, lanes[3]);
+ return result;
+}
+
+
+AllocationResult Heap::AllocateInt32x4(int32_t lanes[4],
+ PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate SIMD values in paged
+ // spaces.
+ int size = Int32x4::kSize;
+ STATIC_ASSERT(Int32x4::kSize <= Page::kMaxRegularHeapObjectSize);
+
+ AllocationSpace space = SelectSpace(size, pretenure);
+
+ HeapObject* result;
+ {
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_no_write_barrier(int32x4_map());
+ Int32x4* instance = Int32x4::cast(result);
+ instance->set_lane(0, lanes[0]);
+ instance->set_lane(1, lanes[1]);
+ instance->set_lane(2, lanes[2]);
+ instance->set_lane(3, lanes[3]);
+ return result;
+}
+
+
+AllocationResult Heap::AllocateBool32x4(bool lanes[4],
+ PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate SIMD values in paged
+ // spaces.
+ int size = Bool32x4::kSize;
+ STATIC_ASSERT(Bool32x4::kSize <= Page::kMaxRegularHeapObjectSize);
+
+ AllocationSpace space = SelectSpace(size, pretenure);
+
+ HeapObject* result;
+ {
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_no_write_barrier(bool32x4_map());
+ Bool32x4* instance = Bool32x4::cast(result);
+ instance->set_lane(0, lanes[0]);
+ instance->set_lane(1, lanes[1]);
+ instance->set_lane(2, lanes[2]);
+ instance->set_lane(3, lanes[3]);
+ return result;
+}
+
+
+AllocationResult Heap::AllocateInt16x8(int16_t lanes[8],
+ PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate SIMD values in paged
+ // spaces.
+ int size = Int16x8::kSize;
+ STATIC_ASSERT(Int16x8::kSize <= Page::kMaxRegularHeapObjectSize);
+
+ AllocationSpace space = SelectSpace(size, pretenure);
+
+ HeapObject* result;
+ {
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_no_write_barrier(int16x8_map());
+ Int16x8* instance = Int16x8::cast(result);
+ instance->set_lane(0, lanes[0]);
+ instance->set_lane(1, lanes[1]);
+ instance->set_lane(2, lanes[2]);
+ instance->set_lane(3, lanes[3]);
+ instance->set_lane(4, lanes[4]);
+ instance->set_lane(5, lanes[5]);
+ instance->set_lane(6, lanes[6]);
+ instance->set_lane(7, lanes[7]);
+ return result;
+}
+
+
+AllocationResult Heap::AllocateBool16x8(bool lanes[8],
+ PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate SIMD values in paged
+ // spaces.
+ int size = Bool16x8::kSize;
+ STATIC_ASSERT(Bool16x8::kSize <= Page::kMaxRegularHeapObjectSize);
+
+ AllocationSpace space = SelectSpace(size, pretenure);
+
+ HeapObject* result;
+ {
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_no_write_barrier(bool16x8_map());
+ Bool16x8* instance = Bool16x8::cast(result);
+ instance->set_lane(0, lanes[0]);
+ instance->set_lane(1, lanes[1]);
+ instance->set_lane(2, lanes[2]);
+ instance->set_lane(3, lanes[3]);
+ instance->set_lane(4, lanes[4]);
+ instance->set_lane(5, lanes[5]);
+ instance->set_lane(6, lanes[6]);
+ instance->set_lane(7, lanes[7]);
+ return result;
+}
+
+
+AllocationResult Heap::AllocateInt8x16(int8_t lanes[16],
+ PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate SIMD values in paged
+ // spaces.
+ int size = Int8x16::kSize;
+ STATIC_ASSERT(Int8x16::kSize <= Page::kMaxRegularHeapObjectSize);
+
+ AllocationSpace space = SelectSpace(size, pretenure);
+
+ HeapObject* result;
+ {
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_no_write_barrier(int8x16_map());
+ Int8x16* instance = Int8x16::cast(result);
+ instance->set_lane(0, lanes[0]);
+ instance->set_lane(1, lanes[1]);
+ instance->set_lane(2, lanes[2]);
+ instance->set_lane(3, lanes[3]);
+ instance->set_lane(4, lanes[4]);
+ instance->set_lane(5, lanes[5]);
+ instance->set_lane(6, lanes[6]);
+ instance->set_lane(7, lanes[7]);
+ instance->set_lane(8, lanes[8]);
+ instance->set_lane(9, lanes[9]);
+ instance->set_lane(10, lanes[10]);
+ instance->set_lane(11, lanes[11]);
+ instance->set_lane(12, lanes[12]);
+ instance->set_lane(13, lanes[13]);
+ instance->set_lane(14, lanes[14]);
+ instance->set_lane(15, lanes[15]);
+ return result;
+}
+
+
+AllocationResult Heap::AllocateBool8x16(bool lanes[16],
+ PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate SIMD values in paged
+ // spaces.
+ int size = Bool8x16::kSize;
+ STATIC_ASSERT(Bool8x16::kSize <= Page::kMaxRegularHeapObjectSize);
+
+ AllocationSpace space = SelectSpace(size, pretenure);
+
+ HeapObject* result;
+ {
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_no_write_barrier(bool8x16_map());
+ Bool8x16* instance = Bool8x16::cast(result);
+ instance->set_lane(0, lanes[0]);
+ instance->set_lane(1, lanes[1]);
+ instance->set_lane(2, lanes[2]);
+ instance->set_lane(3, lanes[3]);
+ instance->set_lane(4, lanes[4]);
+ instance->set_lane(5, lanes[5]);
+ instance->set_lane(6, lanes[6]);
+ instance->set_lane(7, lanes[7]);
+ instance->set_lane(8, lanes[8]);
+ instance->set_lane(9, lanes[9]);
+ instance->set_lane(10, lanes[10]);
+ instance->set_lane(11, lanes[11]);
+ instance->set_lane(12, lanes[12]);
+ instance->set_lane(13, lanes[13]);
+ instance->set_lane(14, lanes[14]);
+ instance->set_lane(15, lanes[15]);
return result;
}

Powered by Google App Engine
This is Rietveld 408576698