Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(318)

Side by Side Diff: src/heap/heap.cc

Issue 1250733005: SIMD.js Add the other SIMD Phase 1 types. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 2952 matching lines...) Expand 10 before | Expand all | Expand 10 after
2963 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name) 2963 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2964 2964
2965 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array) 2965 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2966 DCHECK(fixed_array_map() != fixed_cow_array_map()); 2966 DCHECK(fixed_array_map() != fixed_cow_array_map());
2967 2967
2968 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info) 2968 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2969 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number) 2969 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2970 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize, 2970 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2971 mutable_heap_number) 2971 mutable_heap_number)
2972 ALLOCATE_MAP(FLOAT32X4_TYPE, Float32x4::kSize, float32x4) 2972 ALLOCATE_MAP(FLOAT32X4_TYPE, Float32x4::kSize, float32x4)
2973 ALLOCATE_MAP(INT32X4_TYPE, Int32x4::kSize, int32x4)
2974 ALLOCATE_MAP(BOOL32X4_TYPE, Bool32x4::kSize, bool32x4)
2975 ALLOCATE_MAP(INT16X8_TYPE, Int16x8::kSize, int16x8)
2976 ALLOCATE_MAP(BOOL16X8_TYPE, Bool16x8::kSize, bool16x8)
2977 ALLOCATE_MAP(INT8X16_TYPE, Int8x16::kSize, int8x16)
2978 ALLOCATE_MAP(BOOL8X16_TYPE, Bool8x16::kSize, bool8x16)
2973 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol) 2979 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2974 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign) 2980 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2975 2981
2976 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole); 2982 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2977 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean); 2983 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2978 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized); 2984 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2979 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker); 2985 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2980 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel); 2986 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2981 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception); 2987 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2982 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception); 2988 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
3103 if (!allocation.To(&result)) return allocation; 3109 if (!allocation.To(&result)) return allocation;
3104 } 3110 }
3105 3111
3106 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map(); 3112 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
3107 HeapObject::cast(result)->set_map_no_write_barrier(map); 3113 HeapObject::cast(result)->set_map_no_write_barrier(map);
3108 HeapNumber::cast(result)->set_value(value); 3114 HeapNumber::cast(result)->set_value(value);
3109 return result; 3115 return result;
3110 } 3116 }
3111 3117
3112 3118
3113 AllocationResult Heap::AllocateFloat32x4(float w, float x, float y, float z, 3119 AllocationResult Heap::AllocateFloat32x4(float lanes[4],
rossberg 2015/07/29 14:37:55 Macrofication might help here, too.
bbudge 2015/07/30 13:46:58 Definitely. Done.
3114 PretenureFlag pretenure) { 3120 PretenureFlag pretenure) {
3115 // Statically ensure that it is safe to allocate SIMD values in paged 3121 // Statically ensure that it is safe to allocate SIMD values in paged
3116 // spaces. 3122 // spaces.
3117 int size = Float32x4::kSize; 3123 int size = Float32x4::kSize;
3118 STATIC_ASSERT(Float32x4::kSize <= Page::kMaxRegularHeapObjectSize); 3124 STATIC_ASSERT(Float32x4::kSize <= Page::kMaxRegularHeapObjectSize);
3119 3125
3120 AllocationSpace space = SelectSpace(size, pretenure); 3126 AllocationSpace space = SelectSpace(size, pretenure);
3121 3127
3122 HeapObject* result; 3128 HeapObject* result;
3123 { 3129 {
3124 AllocationResult allocation = 3130 AllocationResult allocation =
3125 AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned); 3131 AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
3126 if (!allocation.To(&result)) return allocation; 3132 if (!allocation.To(&result)) return allocation;
3127 } 3133 }
3128 3134
3129 result->set_map_no_write_barrier(float32x4_map()); 3135 result->set_map_no_write_barrier(float32x4_map());
3130 Float32x4* float32x4 = Float32x4::cast(result); 3136 Float32x4* instance = Float32x4::cast(result);
3131 float32x4->set_lane(0, w); 3137 instance->set_lane(0, lanes[0]);
3132 float32x4->set_lane(1, x); 3138 instance->set_lane(1, lanes[1]);
3133 float32x4->set_lane(2, y); 3139 instance->set_lane(2, lanes[2]);
3134 float32x4->set_lane(3, z); 3140 instance->set_lane(3, lanes[3]);
3135 return result; 3141 return result;
3136 } 3142 }
3137 3143
3144
3145 AllocationResult Heap::AllocateInt32x4(int32_t lanes[4],
3146 PretenureFlag pretenure) {
3147 // Statically ensure that it is safe to allocate SIMD values in paged
3148 // spaces.
3149 int size = Int32x4::kSize;
3150 STATIC_ASSERT(Int32x4::kSize <= Page::kMaxRegularHeapObjectSize);
3151
3152 AllocationSpace space = SelectSpace(size, pretenure);
3153
3154 HeapObject* result;
3155 {
3156 AllocationResult allocation =
3157 AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
3158 if (!allocation.To(&result)) return allocation;
3159 }
3160
3161 result->set_map_no_write_barrier(int32x4_map());
3162 Int32x4* instance = Int32x4::cast(result);
3163 instance->set_lane(0, lanes[0]);
3164 instance->set_lane(1, lanes[1]);
3165 instance->set_lane(2, lanes[2]);
3166 instance->set_lane(3, lanes[3]);
3167 return result;
3168 }
3169
3170
3171 AllocationResult Heap::AllocateBool32x4(bool lanes[4],
3172 PretenureFlag pretenure) {
3173 // Statically ensure that it is safe to allocate SIMD values in paged
3174 // spaces.
3175 int size = Bool32x4::kSize;
3176 STATIC_ASSERT(Bool32x4::kSize <= Page::kMaxRegularHeapObjectSize);
3177
3178 AllocationSpace space = SelectSpace(size, pretenure);
3179
3180 HeapObject* result;
3181 {
3182 AllocationResult allocation =
3183 AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
3184 if (!allocation.To(&result)) return allocation;
3185 }
3186
3187 result->set_map_no_write_barrier(bool32x4_map());
3188 Bool32x4* instance = Bool32x4::cast(result);
3189 instance->set_lane(0, lanes[0]);
3190 instance->set_lane(1, lanes[1]);
3191 instance->set_lane(2, lanes[2]);
3192 instance->set_lane(3, lanes[3]);
3193 return result;
3194 }
3195
3196
3197 AllocationResult Heap::AllocateInt16x8(int16_t lanes[8],
3198 PretenureFlag pretenure) {
3199 // Statically ensure that it is safe to allocate SIMD values in paged
3200 // spaces.
3201 int size = Int16x8::kSize;
3202 STATIC_ASSERT(Int16x8::kSize <= Page::kMaxRegularHeapObjectSize);
3203
3204 AllocationSpace space = SelectSpace(size, pretenure);
3205
3206 HeapObject* result;
3207 {
3208 AllocationResult allocation =
3209 AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
3210 if (!allocation.To(&result)) return allocation;
3211 }
3212
3213 result->set_map_no_write_barrier(int16x8_map());
3214 Int16x8* instance = Int16x8::cast(result);
3215 instance->set_lane(0, lanes[0]);
3216 instance->set_lane(1, lanes[1]);
3217 instance->set_lane(2, lanes[2]);
3218 instance->set_lane(3, lanes[3]);
3219 instance->set_lane(4, lanes[4]);
3220 instance->set_lane(5, lanes[5]);
3221 instance->set_lane(6, lanes[6]);
3222 instance->set_lane(7, lanes[7]);
3223 return result;
3224 }
3225
3226
3227 AllocationResult Heap::AllocateBool16x8(bool lanes[8],
3228 PretenureFlag pretenure) {
3229 // Statically ensure that it is safe to allocate SIMD values in paged
3230 // spaces.
3231 int size = Bool16x8::kSize;
3232 STATIC_ASSERT(Bool16x8::kSize <= Page::kMaxRegularHeapObjectSize);
3233
3234 AllocationSpace space = SelectSpace(size, pretenure);
3235
3236 HeapObject* result;
3237 {
3238 AllocationResult allocation =
3239 AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
3240 if (!allocation.To(&result)) return allocation;
3241 }
3242
3243 result->set_map_no_write_barrier(bool16x8_map());
3244 Bool16x8* instance = Bool16x8::cast(result);
3245 instance->set_lane(0, lanes[0]);
3246 instance->set_lane(1, lanes[1]);
3247 instance->set_lane(2, lanes[2]);
3248 instance->set_lane(3, lanes[3]);
3249 instance->set_lane(4, lanes[4]);
3250 instance->set_lane(5, lanes[5]);
3251 instance->set_lane(6, lanes[6]);
3252 instance->set_lane(7, lanes[7]);
3253 return result;
3254 }
3255
3256
3257 AllocationResult Heap::AllocateInt8x16(int8_t lanes[16],
3258 PretenureFlag pretenure) {
3259 // Statically ensure that it is safe to allocate SIMD values in paged
3260 // spaces.
3261 int size = Int8x16::kSize;
3262 STATIC_ASSERT(Int8x16::kSize <= Page::kMaxRegularHeapObjectSize);
3263
3264 AllocationSpace space = SelectSpace(size, pretenure);
3265
3266 HeapObject* result;
3267 {
3268 AllocationResult allocation =
3269 AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
3270 if (!allocation.To(&result)) return allocation;
3271 }
3272
3273 result->set_map_no_write_barrier(int8x16_map());
3274 Int8x16* instance = Int8x16::cast(result);
3275 instance->set_lane(0, lanes[0]);
3276 instance->set_lane(1, lanes[1]);
3277 instance->set_lane(2, lanes[2]);
3278 instance->set_lane(3, lanes[3]);
3279 instance->set_lane(4, lanes[4]);
3280 instance->set_lane(5, lanes[5]);
3281 instance->set_lane(6, lanes[6]);
3282 instance->set_lane(7, lanes[7]);
3283 instance->set_lane(8, lanes[8]);
3284 instance->set_lane(9, lanes[9]);
3285 instance->set_lane(10, lanes[10]);
3286 instance->set_lane(11, lanes[11]);
3287 instance->set_lane(12, lanes[12]);
3288 instance->set_lane(13, lanes[13]);
3289 instance->set_lane(14, lanes[14]);
3290 instance->set_lane(15, lanes[15]);
3291 return result;
3292 }
3293
3294
3295 AllocationResult Heap::AllocateBool8x16(bool lanes[16],
3296 PretenureFlag pretenure) {
3297 // Statically ensure that it is safe to allocate SIMD values in paged
3298 // spaces.
3299 int size = Bool8x16::kSize;
3300 STATIC_ASSERT(Bool8x16::kSize <= Page::kMaxRegularHeapObjectSize);
3301
3302 AllocationSpace space = SelectSpace(size, pretenure);
3303
3304 HeapObject* result;
3305 {
3306 AllocationResult allocation =
3307 AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
3308 if (!allocation.To(&result)) return allocation;
3309 }
3310
3311 result->set_map_no_write_barrier(bool8x16_map());
3312 Bool8x16* instance = Bool8x16::cast(result);
3313 instance->set_lane(0, lanes[0]);
3314 instance->set_lane(1, lanes[1]);
3315 instance->set_lane(2, lanes[2]);
3316 instance->set_lane(3, lanes[3]);
3317 instance->set_lane(4, lanes[4]);
3318 instance->set_lane(5, lanes[5]);
3319 instance->set_lane(6, lanes[6]);
3320 instance->set_lane(7, lanes[7]);
3321 instance->set_lane(8, lanes[8]);
3322 instance->set_lane(9, lanes[9]);
3323 instance->set_lane(10, lanes[10]);
3324 instance->set_lane(11, lanes[11]);
3325 instance->set_lane(12, lanes[12]);
3326 instance->set_lane(13, lanes[13]);
3327 instance->set_lane(14, lanes[14]);
3328 instance->set_lane(15, lanes[15]);
3329 return result;
3330 }
3331
3138 3332
3139 AllocationResult Heap::AllocateCell(Object* value) { 3333 AllocationResult Heap::AllocateCell(Object* value) {
3140 int size = Cell::kSize; 3334 int size = Cell::kSize;
3141 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize); 3335 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
3142 3336
3143 HeapObject* result; 3337 HeapObject* result;
3144 { 3338 {
3145 AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE); 3339 AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
3146 if (!allocation.To(&result)) return allocation; 3340 if (!allocation.To(&result)) return allocation;
3147 } 3341 }
(...skipping 3775 matching lines...) Expand 10 before | Expand all | Expand 10 after
6923 *object_type = "CODE_TYPE"; \ 7117 *object_type = "CODE_TYPE"; \
6924 *object_sub_type = "CODE_AGE/" #name; \ 7118 *object_sub_type = "CODE_AGE/" #name; \
6925 return true; 7119 return true;
6926 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) 7120 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6927 #undef COMPARE_AND_RETURN_NAME 7121 #undef COMPARE_AND_RETURN_NAME
6928 } 7122 }
6929 return false; 7123 return false;
6930 } 7124 }
6931 } // namespace internal 7125 } // namespace internal
6932 } // namespace v8 7126 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698