Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(467)

Side by Side Diff: src/a64/code-stubs-a64.cc

Issue 148503002: A64: Synchronize with r15545. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/code-stubs-a64.h ('k') | src/a64/codegen-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #if defined(V8_TARGET_ARCH_A64) 30 #if V8_TARGET_ARCH_A64
31 31
32 #include "bootstrapper.h" 32 #include "bootstrapper.h"
33 #include "code-stubs.h" 33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h" 34 #include "regexp-macro-assembler.h"
35 #include "stub-cache.h" 35 #include "stub-cache.h"
36 36
37 namespace v8 { 37 namespace v8 {
38 namespace internal { 38 namespace internal {
39 39
40 40
(...skipping 19 matching lines...) Expand all
60 // x1: constant properties 60 // x1: constant properties
61 // x0: object literal flags 61 // x0: object literal flags
62 static Register registers[] = { x3, x2, x1, x0 }; 62 static Register registers[] = { x3, x2, x1, x0 };
63 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); 63 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
64 descriptor->register_params_ = registers; 64 descriptor->register_params_ = registers;
65 descriptor->deoptimization_handler_ = 65 descriptor->deoptimization_handler_ =
66 Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; 66 Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
67 } 67 }
68 68
69 69
70 void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
71 Isolate* isolate,
72 CodeStubInterfaceDescriptor* descriptor) {
73 // x2: cache cell
74 static Register registers[] = { x2 };
75 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
76 descriptor->register_params_ = registers;
77 descriptor->deoptimization_handler_ = NULL;
78 }
79
80
70 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( 81 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
71 Isolate* isolate, 82 Isolate* isolate,
72 CodeStubInterfaceDescriptor* descriptor) { 83 CodeStubInterfaceDescriptor* descriptor) {
73 // x1: receiver 84 // x1: receiver
74 // x0: key 85 // x0: key
75 static Register registers[] = { x1, x0 }; 86 static Register registers[] = { x1, x0 };
76 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); 87 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
77 descriptor->register_params_ = registers; 88 descriptor->register_params_ = registers;
78 descriptor->deoptimization_handler_ = 89 descriptor->deoptimization_handler_ =
79 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); 90 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
233 // x0: value 244 // x0: value
234 static Register registers[] = { x0 }; 245 static Register registers[] = { x0 };
235 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); 246 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
236 descriptor->register_params_ = registers; 247 descriptor->register_params_ = registers;
237 descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss); 248 descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
238 descriptor->SetMissHandler( 249 descriptor->SetMissHandler(
239 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); 250 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
240 } 251 }
241 252
242 253
254 void UnaryOpStub::InitializeInterfaceDescriptor(
255 Isolate* isolate,
256 CodeStubInterfaceDescriptor* descriptor) {
257 // x0: value
258 static Register registers[] = { x0 };
259 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
260 descriptor->register_params_ = registers;
261 descriptor->deoptimization_handler_ = FUNCTION_ADDR(UnaryOpIC_Miss);
262 }
263
264
265 void StoreGlobalStub::InitializeInterfaceDescriptor(
266 Isolate* isolate,
267 CodeStubInterfaceDescriptor* descriptor) {
268 // x1: receiver
269 // x2: key (unused)
270 // x0: value
271 static Register registers[] = { x1, x2, x0 };
272 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
273 descriptor->register_params_ = registers;
274 descriptor->deoptimization_handler_ =
275 FUNCTION_ADDR(StoreIC_MissFromStubFailure);
276 }
277
278
243 #define __ ACCESS_MASM(masm) 279 #define __ ACCESS_MASM(masm)
244 280
245 281
246 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { 282 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
247 // Update the static counter each time a new code stub is generated. 283 // Update the static counter each time a new code stub is generated.
248 Isolate* isolate = masm->isolate(); 284 Isolate* isolate = masm->isolate();
249 isolate->counters()->code_stubs()->Increment(); 285 isolate->counters()->code_stubs()->Increment();
250 286
251 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); 287 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
252 int param_count = descriptor->register_param_count_; 288 int param_count = descriptor->register_param_count_;
(...skipping 787 matching lines...) Expand 10 before | Expand all | Expand 10 after
1040 } 1076 }
1041 1077
1042 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 1078 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1043 // tagged as a small integer. 1079 // tagged as a small integer.
1044 __ InvokeBuiltin(native, JUMP_FUNCTION); 1080 __ InvokeBuiltin(native, JUMP_FUNCTION);
1045 1081
1046 __ Bind(&miss); 1082 __ Bind(&miss);
1047 GenerateMiss(masm); 1083 GenerateMiss(masm);
1048 } 1084 }
1049 1085
1086
1050 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { 1087 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1051 // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9, 1088 // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9,
1052 // ip0 and ip1 are corrupted by the call into C. 1089 // ip0 and ip1 are corrupted by the call into C.
1053 CPURegList saved_regs = kCallerSaved; 1090 CPURegList saved_regs = kCallerSaved;
1054 saved_regs.Remove(ip0); 1091 saved_regs.Remove(ip0);
1055 saved_regs.Remove(ip1); 1092 saved_regs.Remove(ip1);
1056 saved_regs.Remove(x8); 1093 saved_regs.Remove(x8);
1057 saved_regs.Remove(x9); 1094 saved_regs.Remove(x9);
1058 1095
1059 // We don't allow a GC during a store buffer overflow so there is no need to 1096 // We don't allow a GC during a store buffer overflow so there is no need to
(...skipping 20 matching lines...) Expand all
1080 1117
1081 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( 1118 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
1082 Isolate* isolate) { 1119 Isolate* isolate) {
1083 StoreBufferOverflowStub stub1(kDontSaveFPRegs); 1120 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
1084 stub1.GetCode(isolate)->set_is_pregenerated(true); 1121 stub1.GetCode(isolate)->set_is_pregenerated(true);
1085 StoreBufferOverflowStub stub2(kSaveFPRegs); 1122 StoreBufferOverflowStub stub2(kSaveFPRegs);
1086 stub2.GetCode(isolate)->set_is_pregenerated(true); 1123 stub2.GetCode(isolate)->set_is_pregenerated(true);
1087 } 1124 }
1088 1125
1089 1126
1090 void UnaryOpStub::PrintName(StringStream* stream) {
1091 const char* op_name = Token::Name(op_);
1092 const char* overwrite_name = NULL;
1093 switch (mode_) {
1094 case UNARY_NO_OVERWRITE:
1095 overwrite_name = "Alloc";
1096 break;
1097 case UNARY_OVERWRITE:
1098 overwrite_name = "Overwrite";
1099 break;
1100 default:
1101 UNREACHABLE();
1102 }
1103 stream->Add("UnaryOpStub_%s_%s_%s",
1104 op_name,
1105 overwrite_name,
1106 UnaryOpIC::GetName(operand_type_));
1107 }
1108
1109
1110 void UnaryOpStub::Generate(MacroAssembler* masm) {
1111 switch (operand_type_) {
1112 case UnaryOpIC::UNINITIALIZED:
1113 GenerateTypeTransition(masm);
1114 break;
1115 case UnaryOpIC::SMI:
1116 GenerateSmiStub(masm);
1117 break;
1118 case UnaryOpIC::NUMBER:
1119 GenerateNumberStub(masm);
1120 break;
1121 case UnaryOpIC::GENERIC:
1122 GenerateGenericStub(masm);
1123 break;
1124 }
1125 }
1126
1127
1128 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1129 __ Mov(x1, Operand(Smi::FromInt(op_)));
1130 __ Mov(x2, Operand(Smi::FromInt(mode_)));
1131 __ Mov(x3, Operand(Smi::FromInt(operand_type_)));
1132 // x0 contains the operand
1133 __ Push(x0, x1, x2, x3);
1134
1135 __ TailCallExternalReference(
1136 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
1137 }
1138
1139
1140 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1141 switch (op_) {
1142 case Token::SUB:
1143 GenerateSmiStubSub(masm);
1144 break;
1145 case Token::BIT_NOT:
1146 GenerateSmiStubBitNot(masm);
1147 break;
1148 default:
1149 UNREACHABLE();
1150 }
1151 }
1152
1153
1154 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
1155 Label non_smi, slow;
1156 GenerateSmiCodeSub(masm, &non_smi, &slow);
1157 __ Bind(&non_smi);
1158 __ Bind(&slow);
1159 GenerateTypeTransition(masm);
1160 }
1161
1162
1163 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
1164 Label non_smi;
1165 GenerateSmiCodeBitNot(masm, &non_smi);
1166 __ Bind(&non_smi);
1167 GenerateTypeTransition(masm);
1168 }
1169
1170
1171 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
1172 Label* non_smi,
1173 Label* slow) {
1174 __ JumpIfNotSmi(x0, non_smi);
1175
1176 // The result of negating zero or the smallest negative smi is not a smi.
1177 __ Ands(x1, x0, 0x7fffffff00000000UL);
1178 __ B(eq, slow);
1179
1180 __ Neg(x0, x0);
1181 __ Ret();
1182 }
1183
1184
1185 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
1186 Label* non_smi) {
1187 __ JumpIfNotSmi(x0, non_smi);
1188
1189 // Eor the top 32 bits with 0xffffffff to invert.
1190 __ Eor(x0, x0, 0xffffffff00000000UL);
1191 __ Ret();
1192 }
1193
1194
1195 void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1196 switch (op_) {
1197 case Token::SUB:
1198 GenerateNumberStubSub(masm);
1199 break;
1200 case Token::BIT_NOT:
1201 GenerateNumberStubBitNot(masm);
1202 break;
1203 default:
1204 UNREACHABLE();
1205 }
1206 }
1207
1208
1209 void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
1210 Label non_smi, slow, call_builtin;
1211 GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
1212 __ Bind(&non_smi);
1213 GenerateHeapNumberCodeSub(masm, &slow);
1214 __ Bind(&slow);
1215 GenerateTypeTransition(masm);
1216 __ Bind(&call_builtin);
1217 __ Push(x0);
1218 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1219 }
1220
1221
1222 void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
1223 Label non_smi, slow;
1224 GenerateSmiCodeBitNot(masm, &non_smi);
1225 __ Bind(&non_smi);
1226 GenerateHeapNumberCodeBitNot(masm, &slow);
1227 __ Bind(&slow);
1228 GenerateTypeTransition(masm);
1229 }
1230
1231
1232 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
1233 Label* slow) {
1234 Register heap_num = x0;
1235 Register heap_num_map = x1;
1236
1237 __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
1238 __ JumpIfNotHeapNumber(heap_num, slow, heap_num_map);
1239
1240 if (mode_ == UNARY_OVERWRITE) {
1241 Register exponent = w2;
1242
1243 // Flip the sign bit of the existing heap number.
1244 __ Ldr(exponent, FieldMemOperand(heap_num, HeapNumber::kExponentOffset));
1245 __ Eor(exponent, exponent, HeapNumber::kSignMask);
1246 __ Str(exponent, FieldMemOperand(heap_num, HeapNumber::kExponentOffset));
1247 } else {
1248 Register allocated_num = x0;
1249 Register double_bits = x2;
1250 Register heap_num_orig = x3;
1251
1252 __ Mov(heap_num_orig, heap_num);
1253
1254 // Create a new heap number.
1255 Label slow_allocate_heapnumber, heapnumber_allocated;
1256 __ AllocateHeapNumber(allocated_num, &slow_allocate_heapnumber, x6, x7,
1257 heap_num_map);
1258 __ B(&heapnumber_allocated);
1259
1260 __ Bind(&slow_allocate_heapnumber);
1261 {
1262 FrameScope scope(masm, StackFrame::INTERNAL);
1263 __ Push(heap_num_orig);
1264 __ CallRuntime(Runtime::kNumberAlloc, 0);
1265 __ Pop(heap_num_orig);
1266 // allocated_num is x0, so contains the result of the runtime allocation.
1267 }
1268
1269 __ Bind(&heapnumber_allocated);
1270 // Load the original heap number as a double precision float, and flip the
1271 // sign bit.
1272 STATIC_ASSERT(HeapNumber::kExponentOffset ==
1273 (HeapNumber::kMantissaOffset + 4));
1274 __ Ldr(double_bits, FieldMemOperand(heap_num_orig,
1275 HeapNumber::kMantissaOffset));
1276 __ Eor(double_bits, double_bits, Double::kSignMask);
1277
1278 // Store the negated double to the newly allocated heap number.
1279 __ Str(double_bits, FieldMemOperand(allocated_num,
1280 HeapNumber::kValueOffset));
1281 }
1282 __ Ret();
1283 }
1284
1285
1286 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
1287 Label* slow) {
1288 Register heap_num = x0;
1289 Register smi_num = x0;
1290
1291 __ JumpIfNotHeapNumber(heap_num, slow);
1292
1293 // Convert the heap number to a smi.
1294 __ HeapNumberECMA262ToInt32(smi_num, heap_num, x6, x7, d0,
1295 MacroAssembler::SMI);
1296
1297 // Eor the top 32 bits with 0xffffffff to invert.
1298 __ Eor(x0, smi_num, 0xffffffff00000000UL);
1299 __ Ret();
1300 }
1301
1302
1303 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
1304 switch (op_) {
1305 case Token::SUB: {
1306 Label non_smi, slow;
1307 GenerateSmiCodeSub(masm, &non_smi, &slow);
1308 __ Bind(&non_smi);
1309 GenerateHeapNumberCodeSub(masm, &slow);
1310 __ Bind(&slow);
1311 __ Push(x0);
1312 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1313 break;
1314 }
1315 case Token::BIT_NOT: {
1316 Label non_smi, slow;
1317 GenerateSmiCodeBitNot(masm, &non_smi);
1318 __ Bind(&non_smi);
1319 GenerateHeapNumberCodeBitNot(masm, &slow);
1320 __ Bind(&slow);
1321 __ Push(x0);
1322 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
1323 break;
1324 }
1325 default:
1326 UNREACHABLE();
1327 }
1328 }
1329
1330
1331 void BinaryOpStub::Initialize() { 1127 void BinaryOpStub::Initialize() {
1332 // Nothing to do here. 1128 // Nothing to do here.
1333 } 1129 }
1334 1130
1335 1131
1336 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 1132 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1337 ASM_LOCATION("BinaryOpStub::GenerateTypeTransition"); 1133 ASM_LOCATION("BinaryOpStub::GenerateTypeTransition");
1338 Label get_result; 1134 Label get_result;
1339 1135
1340 __ Mov(x12, Operand(Smi::FromInt(MinorKey()))); 1136 __ Mov(x12, Operand(Smi::FromInt(MinorKey())));
(...skipping 1160 matching lines...) Expand 10 before | Expand all | Expand 10 after
2501 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { 2297 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
2502 // It is important that the following stubs are generated in this order 2298 // It is important that the following stubs are generated in this order
2503 // because pregenerated stubs can only call other pregenerated stubs. 2299 // because pregenerated stubs can only call other pregenerated stubs.
2504 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses 2300 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
2505 // CEntryStub. 2301 // CEntryStub.
2506 CEntryStub::GenerateAheadOfTime(isolate); 2302 CEntryStub::GenerateAheadOfTime(isolate);
2507 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); 2303 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
2508 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); 2304 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
2509 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); 2305 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
2510 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); 2306 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
2307 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2511 } 2308 }
2512 2309
2513 2310
2514 void CodeStub::GenerateFPStubs(Isolate* isolate) { 2311 void CodeStub::GenerateFPStubs(Isolate* isolate) {
2515 // Floating-point code doesn't get special handling in A64, so there's 2312 // Floating-point code doesn't get special handling in A64, so there's
2516 // nothing to do here. 2313 // nothing to do here.
2517 USE(isolate); 2314 USE(isolate);
2518 } 2315 }
2519 2316
2520 2317
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
2745 2542
2746 void CEntryStub::Generate(MacroAssembler* masm) { 2543 void CEntryStub::Generate(MacroAssembler* masm) {
2747 // The Abort mechanism relies on CallRuntime, which in turn relies on 2544 // The Abort mechanism relies on CallRuntime, which in turn relies on
2748 // CEntryStub, so until this stub has been generated, we have to use a 2545 // CEntryStub, so until this stub has been generated, we have to use a
2749 // fall-back Abort mechanism. 2546 // fall-back Abort mechanism.
2750 // 2547 //
2751 // Note that this stub must be generated before any use of Abort. 2548 // Note that this stub must be generated before any use of Abort.
2752 masm->set_use_real_aborts(false); 2549 masm->set_use_real_aborts(false);
2753 2550
2754 ASM_LOCATION("CEntryStub::Generate entry"); 2551 ASM_LOCATION("CEntryStub::Generate entry");
2552 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2553
2755 // Register parameters: 2554 // Register parameters:
2756 // x0: argc (including receiver, untagged) 2555 // x0: argc (including receiver, untagged)
2757 // x1: target 2556 // x1: target
2758 // 2557 //
2759 // The stack on entry holds the arguments and the receiver, with the receiver 2558 // The stack on entry holds the arguments and the receiver, with the receiver
2760 // at the highest address: 2559 // at the highest address:
2761 // 2560 //
2762 // jssp]argc-1]: receiver 2561 // jssp]argc-1]: receiver
2763 // jssp[argc-2]: arg[argc-2] 2562 // jssp[argc-2]: arg[argc-2]
2764 // ... ... 2563 // ... ...
(...skipping 1705 matching lines...) Expand 10 before | Expand all | Expand 10 after
4470 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), 4269 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
4471 masm->isolate()->heap()->the_hole_value()); 4270 masm->isolate()->heap()->the_hole_value());
4472 4271
4473 // Load the cache state. 4272 // Load the cache state.
4474 __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset)); 4273 __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset));
4475 4274
4476 // A monomorphic cache hit or an already megamorphic state: invoke the 4275 // A monomorphic cache hit or an already megamorphic state: invoke the
4477 // function without changing the state. 4276 // function without changing the state.
4478 __ Cmp(x3, x1); 4277 __ Cmp(x3, x1);
4479 __ B(eq, &done); 4278 __ B(eq, &done);
4480 __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex, &done);
4481 4279
4482 // Special handling of the Array() function, which caches not only the 4280 // If we came here, we need to see if we are the array function.
4483 // monomorphic Array function but the initial ElementsKind with special 4281 // If we didn't have a matching function, and we didn't find the megamorph
4484 // sentinels 4282 // sentinel, then we have in the cell either some other function or an
4485 Handle<Object> terminal_kind_sentinel = 4283 // AllocationSite. Do a map check on the object in ecx.
4486 TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), 4284 __ Ldr(x5, FieldMemOperand(x3, AllocationSite::kMapOffset));
4487 LAST_FAST_ELEMENTS_KIND); 4285 __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &miss);
4488 __ JumpIfNotSmi(x3, &miss); 4286
4489 __ Cmp(x3, Operand(terminal_kind_sentinel));
4490 __ B(gt, &miss);
4491 // Make sure the function is the Array() function 4287 // Make sure the function is the Array() function
4492 __ LoadArrayFunction(x3); 4288 __ LoadArrayFunction(x3);
4493 __ Cmp(x1, x3); 4289 __ Cmp(x1, x3);
4494 __ B(ne, &megamorphic); 4290 __ B(ne, &megamorphic);
4495 __ B(&done); 4291 __ B(&done);
4496 4292
4497 __ Bind(&miss); 4293 __ Bind(&miss);
4498 4294
4499 // A monomorphic miss (i.e, here the cache is not uninitialized) goes 4295 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
4500 // megamorphic. 4296 // megamorphic.
4501 __ JumpIfRoot(x3, Heap::kTheHoleValueRootIndex, &initialize); 4297 __ JumpIfRoot(x3, Heap::kTheHoleValueRootIndex, &initialize);
4502 // MegamorphicSentinel is an immortal immovable object (undefined) so no 4298 // MegamorphicSentinel is an immortal immovable object (undefined) so no
4503 // write-barrier is needed. 4299 // write-barrier is needed.
4504 __ Bind(&megamorphic); 4300 __ Bind(&megamorphic);
4505 __ LoadRoot(x3, Heap::kUndefinedValueRootIndex); 4301 __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
4506 __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset)); 4302 __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
4507 __ B(&done); 4303 __ B(&done);
4508 4304
4509 // An uninitialized cache is patched with the function or sentinel to 4305 // An uninitialized cache is patched with the function or sentinel to
4510 // indicate the ElementsKind if function is the Array constructor. 4306 // indicate the ElementsKind if function is the Array constructor.
4511 __ Bind(&initialize); 4307 __ Bind(&initialize);
4512 // Make sure the function is the Array() function 4308 // Make sure the function is the Array() function
4513 __ LoadArrayFunction(x3); 4309 __ LoadArrayFunction(x3);
4514 __ Cmp(x1, x3); 4310 __ Cmp(x1, x3);
4515 __ B(ne, &not_array_function); 4311 __ B(ne, &not_array_function);
4516 4312
4517 // The target function is the Array constructor, install a sentinel value in 4313 // The target function is the Array constructor,
4518 // the constructor's type info cell that will track the initial ElementsKind 4314 // Create an AllocationSite if we don't already have it, store it in the cell
4519 // that should be used for the array when its constructed. 4315 {
4520 Handle<Object> initial_kind_sentinel = 4316 FrameScope scope(masm, StackFrame::INTERNAL);
4521 TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), 4317 CreateAllocationSiteStub create_stub;
4522 GetInitialFastElementsKind()); 4318 __ Push(x0, x1, x2);
4523 __ Mov(x3, Operand(initial_kind_sentinel)); 4319 __ CallStub(&create_stub);
4524 __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset)); 4320 __ Pop(x2, x1, x0);
4321 }
4525 __ B(&done); 4322 __ B(&done);
4526 4323
4527 __ Bind(&not_array_function); 4324 __ Bind(&not_array_function);
4528 // An uninitialized cache is patched with the function. 4325 // An uninitialized cache is patched with the function.
4529 __ Str(x1, FieldMemOperand(x2, Cell::kValueOffset)); 4326 __ Str(x1, FieldMemOperand(x2, Cell::kValueOffset));
4530 // No need for a write barrier here - cells are rescanned. 4327 // No need for a write barrier here - cells are rescanned.
4531 4328
4532 __ Bind(&done); 4329 __ Bind(&done);
4533 } 4330 }
4534 4331
(...skipping 2080 matching lines...) Expand 10 before | Expand all | Expand 10 after
6615 } 6412 }
6616 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); 6413 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
6617 __ Add(__ StackPointer(), __ StackPointer(), 6414 __ Add(__ StackPointer(), __ StackPointer(),
6618 Operand(x1, LSL, kPointerSizeLog2)); 6415 Operand(x1, LSL, kPointerSizeLog2));
6619 // Return to IC Miss stub, continuation still on stack. 6416 // Return to IC Miss stub, continuation still on stack.
6620 __ Ret(); 6417 __ Ret();
6621 } 6418 }
6622 6419
6623 6420
6624 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { 6421 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
6625 if (entry_hook_ != NULL) { 6422 if (masm->isolate()->function_entry_hook() != NULL) {
6626 // TODO(all) this needs a literal pool blocking scope and predictable code 6423 // TODO(all): This needs to be reliably consistent with
6627 // size. 6424 // kReturnAddressDistanceFromFunctionStart in ::Generate.
6425 Assembler::BlockConstPoolScope no_const_pools(masm);
6426 AllowStubCallsScope allow_stub_calls(masm, true);
6628 ProfileEntryHookStub stub; 6427 ProfileEntryHookStub stub;
6629 __ Push(lr); 6428 __ Push(lr);
6630 __ CallStub(&stub); 6429 __ CallStub(&stub);
6631 __ Pop(lr); 6430 __ Pop(lr);
6632 } 6431 }
6633 } 6432 }
6634 6433
6635 6434
6636 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { 6435 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
6637 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by 6436 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
6638 // a "Push lr" instruction, followed by a call. 6437 // a "Push lr" instruction, followed by a call.
6639 // TODO(jbramley): Verify that this call is always made with relocation. 6438 // TODO(jbramley): Verify that this call is always made with relocation.
6640 static const int kReturnAddressDistanceFromFunctionStart = 6439 static const int kReturnAddressDistanceFromFunctionStart =
6641 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); 6440 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
6642 6441
6643 // Save live volatile registers. 6442 // Save all kCallerSaved registers (including lr), since this can be called
6644 __ Push(lr, x1, x5); 6443 // from anywhere.
6645 static const int kNumSavedRegs = 3; 6444 // TODO(jbramley): What about FP registers?
6445 __ PushCPURegList(kCallerSaved);
6446 ASSERT(kCallerSaved.IncludesAliasOf(lr));
6447 const int kNumSavedRegs = kCallerSaved.Count();
6646 6448
6647 // Compute the function's address as the first argument. 6449 // Compute the function's address as the first argument.
6648 __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart); 6450 __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart);
6649 6451
6650 #if defined(V8_HOST_ARCH_A64) 6452 #if V8_HOST_ARCH_A64
6651 __ Mov(x10, Operand(reinterpret_cast<intptr_t>(&entry_hook_))); 6453 uintptr_t entry_hook =
6652 __ Ldr(x10, MemOperand(x10)); 6454 reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook());
6455 __ Mov(x10, entry_hook);
6653 #else 6456 #else
6654 // Under the simulator we need to indirect the entry hook through a trampoline 6457 // Under the simulator we need to indirect the entry hook through a trampoline
6655 // function at a known address. 6458 // function at a known address.
6656 Address trampoline_address = reinterpret_cast<Address>( 6459 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
6657 reinterpret_cast<intptr_t>(EntryHookTrampoline));
6658 ApiFunction dispatcher(trampoline_address);
6659 __ Mov(x10, Operand(ExternalReference(&dispatcher, 6460 __ Mov(x10, Operand(ExternalReference(&dispatcher,
6660 ExternalReference::BUILTIN_CALL, 6461 ExternalReference::BUILTIN_CALL,
6661 masm->isolate()))); 6462 masm->isolate())));
6662 #endif 6463 #endif
6663 6464
6664 // The caller's return address is above the saved temporaries. 6465 // The caller's return address is above the saved temporaries.
6665 // Grab its location for the second argument to the hook. 6466 // Grab its location for the second argument to the hook.
6666 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize); 6467 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
6667 6468
6668 { 6469 {
6669 // Create a dummy frame, as CallCFunction requires this. 6470 // Create a dummy frame, as CallCFunction requires this.
6670 FrameScope frame(masm, StackFrame::MANUAL); 6471 FrameScope frame(masm, StackFrame::MANUAL);
6671 __ CallCFunction(x10, 2, 0); 6472 __ CallCFunction(x10, 2, 0);
6672 } 6473 }
6673 6474
6674 __ Pop(x5, x1, lr); 6475 __ PopCPURegList(kCallerSaved);
6675 __ Ret(); 6476 __ Ret();
6676 } 6477 }
6677 6478
6678 6479
6679 void DirectCEntryStub::Generate(MacroAssembler* masm) { 6480 void DirectCEntryStub::Generate(MacroAssembler* masm) {
6680 // When calling into C++ code the stack pointer must be csp. 6481 // When calling into C++ code the stack pointer must be csp.
6681 // Therefore this code must use csp for peek/poke operations when the 6482 // Therefore this code must use csp for peek/poke operations when the
6682 // stub is generated. When the stub is called 6483 // stub is generated. When the stub is called
6683 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame 6484 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
6684 // and configure the stack pointer *before* doing the call. 6485 // and configure the stack pointer *before* doing the call.
(...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after
6985 Register type_info_cell = x2; 6786 Register type_info_cell = x2;
6986 Register kind = x3; 6787 Register kind = x3;
6987 6788
6988 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); 6789 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
6989 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); 6790 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
6990 STATIC_ASSERT(FAST_ELEMENTS == 2); 6791 STATIC_ASSERT(FAST_ELEMENTS == 2);
6991 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); 6792 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
6992 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); 6793 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
6993 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); 6794 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
6994 6795
6995 Handle<Object> undefined_sentinel(
6996 masm->isolate()->heap()->undefined_value(),
6997 masm->isolate());
6998
6999 // Is the low bit set? If so, the array is holey. 6796 // Is the low bit set? If so, the array is holey.
7000 Label normal_sequence; 6797 Label normal_sequence;
7001 __ Tbnz(kind, 0, &normal_sequence); 6798 __ Tbnz(kind, 0, &normal_sequence);
7002 6799
7003 // Look at the last argument. 6800 // Look at the last argument.
7004 // TODO(jbramley): What does a 0 argument represent? 6801 // TODO(jbramley): What does a 0 argument represent?
7005 __ Peek(x10, 0); 6802 __ Peek(x10, 0);
7006 __ Cbz(x10, &normal_sequence); 6803 __ Cbz(x10, &normal_sequence);
7007 6804
7008 // We are going to create a holey array, but our kind is non-holey. 6805 // We are going to create a holey array, but our kind is non-holey.
7009 // Fix kind and retry. 6806 // Fix kind and retry (only if we have an allocation site in the cell).
7010 __ Orr(kind, kind, 1); 6807 __ Orr(kind, kind, 1);
7011 __ Cmp(type_info_cell, Operand(undefined_sentinel)); 6808 __ JumpIfRoot(type_info_cell, Heap::kUndefinedValueRootIndex,
7012 __ B(eq, &normal_sequence); 6809 &normal_sequence);
6810
6811 __ Ldr(x10, FieldMemOperand(type_info_cell, Cell::kValueOffset));
6812 __ Ldr(x10, FieldMemOperand(x10, 0));
6813 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, &normal_sequence);
7013 6814
7014 // Save the resulting elements kind in type info. 6815 // Save the resulting elements kind in type info.
7015 // TODO(jbramley): Tag and store at the same time. 6816 // TODO(jbramley): Tag and store at the same time.
7016 __ SmiTag(x10, kind); 6817 __ SmiTag(x10, kind);
7017 __ Str(x10, FieldMemOperand(type_info_cell, kPointerSize)); 6818 __ Ldr(x11, FieldMemOperand(type_info_cell, Cell::kValueOffset));
6819 __ Str(x10, FieldMemOperand(x11, AllocationSite::kPayloadOffset));
7018 6820
7019 __ Bind(&normal_sequence); 6821 __ Bind(&normal_sequence);
7020 int last_index = GetSequenceIndexFromFastElementsKind( 6822 int last_index = GetSequenceIndexFromFastElementsKind(
7021 TERMINAL_FAST_ELEMENTS_KIND); 6823 TERMINAL_FAST_ELEMENTS_KIND);
7022 for (int i = 0; i <= last_index; ++i) { 6824 for (int i = 0; i <= last_index; ++i) {
7023 Label next; 6825 Label next;
7024 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i); 6826 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
7025 // TODO(jbramley): Is this the best way to handle this? Can we make the tail 6827 // TODO(jbramley): Is this the best way to handle this? Can we make the tail
7026 // calls conditional, rather than hopping over each one? 6828 // calls conditional, rather than hopping over each one?
7027 __ CompareAndBranch(kind, candidate_kind, ne, &next); 6829 __ CompareAndBranch(kind, candidate_kind, ne, &next);
7028 ArraySingleArgumentConstructorStub stub(candidate_kind); 6830 ArraySingleArgumentConstructorStub stub(candidate_kind);
7029 __ TailCallStub(&stub); 6831 __ TailCallStub(&stub);
7030 __ Bind(&next); 6832 __ Bind(&next);
7031 } 6833 }
7032 6834
7033 // If we reached this point there is a problem. 6835 // If we reached this point there is a problem.
7034 __ Abort("Unexpected ElementsKind in array constructor"); 6836 __ Abort("Unexpected ElementsKind in array constructor");
7035 } 6837 }
7036 6838
7037 6839
7038 template<class T> 6840 template<class T>
7039 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { 6841 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
7040 int to_index = GetSequenceIndexFromFastElementsKind( 6842 int to_index = GetSequenceIndexFromFastElementsKind(
7041 TERMINAL_FAST_ELEMENTS_KIND); 6843 TERMINAL_FAST_ELEMENTS_KIND);
7042 for (int i = 0; i <= to_index; ++i) { 6844 for (int i = 0; i <= to_index; ++i) {
7043 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); 6845 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
7044 T stub(kind); 6846 T stub(kind);
7045 stub.GetCode(isolate)->set_is_pregenerated(true); 6847 stub.GetCode(isolate)->set_is_pregenerated(true);
7046 if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { 6848 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
7047 T stub1(kind, true); 6849 T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
7048 stub1.GetCode(isolate)->set_is_pregenerated(true); 6850 stub1.GetCode(isolate)->set_is_pregenerated(true);
7049 } 6851 }
7050 } 6852 }
7051 } 6853 }
7052 6854
7053 6855
7054 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { 6856 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
7055 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( 6857 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
7056 isolate); 6858 isolate);
7057 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( 6859 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
(...skipping 19 matching lines...) Expand all
7077 6879
7078 6880
7079 void ArrayConstructorStub::Generate(MacroAssembler* masm) { 6881 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
7080 // ----------- S t a t e ------------- 6882 // ----------- S t a t e -------------
7081 // -- x0 : argc (only if argument_count_ == ANY) 6883 // -- x0 : argc (only if argument_count_ == ANY)
7082 // -- x1 : constructor 6884 // -- x1 : constructor
7083 // -- x2 : type info cell 6885 // -- x2 : type info cell
7084 // -- sp[0] : return address 6886 // -- sp[0] : return address
7085 // -- sp[4] : last argument 6887 // -- sp[4] : last argument
7086 // ----------------------------------- 6888 // -----------------------------------
7087 Handle<Object> undefined_sentinel(
7088 masm->isolate()->heap()->undefined_value(), masm->isolate());
7089
7090 Register argc = x0; 6889 Register argc = x0;
7091 Register constructor = x1; 6890 Register constructor = x1;
7092 Register type_info_cell = x2; 6891 Register type_info_cell = x2;
7093 6892
7094 if (FLAG_debug_code) { 6893 if (FLAG_debug_code) {
7095 // The array construct code is only set for the global and natives 6894 // The array construct code is only set for the global and natives
7096 // builtin Array functions which always have maps. 6895 // builtin Array functions which always have maps.
7097 6896
7098 Label unexpected_map, map_ok; 6897 Label unexpected_map, map_ok;
7099 // Initial map for the builtin Array function should be a map. 6898 // Initial map for the builtin Array function should be a map.
7100 __ Ldr(x10, FieldMemOperand(constructor, 6899 __ Ldr(x10, FieldMemOperand(constructor,
7101 JSFunction::kPrototypeOrInitialMapOffset)); 6900 JSFunction::kPrototypeOrInitialMapOffset));
7102 // Will both indicate a NULL and a Smi. 6901 // Will both indicate a NULL and a Smi.
7103 __ JumpIfSmi(x10, &unexpected_map); 6902 __ JumpIfSmi(x10, &unexpected_map);
7104 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok); 6903 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
7105 __ Bind(&unexpected_map); 6904 __ Bind(&unexpected_map);
7106 __ Abort("Unexpected initial map for Array function"); 6905 __ Abort("Unexpected initial map for Array function");
7107 __ Bind(&map_ok); 6906 __ Bind(&map_ok);
7108 6907
7109 // In type_info_cell, we expect either undefined or a valid Cell. 6908 // In type_info_cell, we expect either undefined or a valid Cell.
7110 Label okay_here; 6909 Label okay_here;
7111 Handle<Map> cell_map(masm->isolate()->heap()->global_property_cell_map()); 6910 Handle<Map> cell_map(masm->isolate()->heap()->global_property_cell_map());
7112 __ CompareAndBranch(type_info_cell, Operand(undefined_sentinel), 6911 __ JumpIfRoot(type_info_cell, Heap::kUndefinedValueRootIndex, &okay_here);
7113 eq, &okay_here);
7114 __ Ldr(x10, FieldMemOperand(type_info_cell, Cell::kMapOffset)); 6912 __ Ldr(x10, FieldMemOperand(type_info_cell, Cell::kMapOffset));
7115 __ Cmp(x10, Operand(cell_map)); 6913 __ Cmp(x10, Operand(cell_map));
7116 __ Assert(eq, "Expected property cell in type_info_cell"); 6914 __ Assert(eq, "Expected property cell in type_info_cell");
7117 __ Bind(&okay_here); 6915 __ Bind(&okay_here);
7118 } 6916 }
7119 6917
7120 Register kind = x3; 6918 Register kind = x3;
7121 Label no_info, switch_ready; 6919 Label no_info, switch_ready;
7122 // Get the elements kind and case on that. 6920 // Get the elements kind and case on that.
7123 __ CompareAndBranch(type_info_cell, Operand(undefined_sentinel), 6921 __ JumpIfRoot(type_info_cell, Heap::kUndefinedValueRootIndex, &no_info);
7124 eq, &no_info);
7125 __ Ldr(kind, FieldMemOperand(type_info_cell, PropertyCell::kValueOffset)); 6922 __ Ldr(kind, FieldMemOperand(type_info_cell, PropertyCell::kValueOffset));
7126 __ JumpIfNotSmi(kind, &no_info); 6923
7127 __ SmiUntag(kind); 6924 // The type cell may have undefined in its value.
6925 __ JumpIfRoot(kind, Heap::kUndefinedValueRootIndex, &no_info);
6926
6927 // We should have an allocation site object
6928 if (FLAG_debug_code) {
6929 __ Ldr(x10, FieldMemOperand(kind, AllocationSite::kMapOffset));
6930 __ CompareRoot(x10, Heap::kAllocationSiteMapRootIndex);
6931 __ Assert(eq, "Expected AllocationSite object.");
6932 }
6933
6934 __ Ldrsw(kind, UntagSmiFieldMemOperand(kind, AllocationSite::kPayloadOffset));
7128 __ B(&switch_ready); 6935 __ B(&switch_ready);
7129 6936
7130 __ Bind(&no_info); 6937 __ Bind(&no_info);
7131 __ Mov(kind, GetInitialFastElementsKind()); 6938 __ Mov(kind, GetInitialFastElementsKind());
7132 __ Bind(&switch_ready); 6939 __ Bind(&switch_ready);
7133 6940
7134 if (argument_count_ == ANY) { 6941 if (argument_count_ == ANY) {
7135 Label zero_case, n_case; 6942 Label zero_case, n_case;
7136 __ Cbz(argc, &zero_case); 6943 __ Cbz(argc, &zero_case);
7137 __ Cmp(argc, 1); 6944 __ Cmp(argc, 1);
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
7252 __ Bind(&fast_elements_case); 7059 __ Bind(&fast_elements_case);
7253 GenerateCase(masm, FAST_ELEMENTS); 7060 GenerateCase(masm, FAST_ELEMENTS);
7254 } 7061 }
7255 7062
7256 7063
7257 #undef __ 7064 #undef __
7258 7065
7259 } } // namespace v8::internal 7066 } } // namespace v8::internal
7260 7067
7261 #endif // V8_TARGET_ARCH_A64 7068 #endif // V8_TARGET_ARCH_A64
OLDNEW
« no previous file with comments | « src/a64/code-stubs-a64.h ('k') | src/a64/codegen-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698