| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_ARM64 | 5 #if V8_TARGET_ARCH_ARM64 |
| 6 | 6 |
| 7 #include "src/arm64/frames-arm64.h" | 7 #include "src/arm64/frames-arm64.h" |
| 8 #include "src/codegen.h" | 8 #include "src/codegen.h" |
| 9 #include "src/debug/debug.h" | 9 #include "src/debug/debug.h" |
| 10 #include "src/deoptimizer.h" | 10 #include "src/deoptimizer.h" |
| (...skipping 1179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1190 // This simulates the initial call to bytecode handlers in interpreter entry | 1190 // This simulates the initial call to bytecode handlers in interpreter entry |
| 1191 // trampoline. The return will never actually be taken, but our stack walker | 1191 // trampoline. The return will never actually be taken, but our stack walker |
| 1192 // uses this address to determine whether a frame is interpreted. | 1192 // uses this address to determine whether a frame is interpreted. |
| 1193 __ LoadObject(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); | 1193 __ LoadObject(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); |
| 1194 | 1194 |
| 1195 Generate_EnterBytecodeDispatch(masm); | 1195 Generate_EnterBytecodeDispatch(masm); |
| 1196 } | 1196 } |
| 1197 | 1197 |
| 1198 | 1198 |
| 1199 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1199 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
| 1200 // ----------- S t a t e ------------- | |
| 1201 // -- x3 : new target (preserved for callee) | |
| 1202 // -- x1 : target function (preserved for callee) | |
| 1203 // ----------------------------------- | |
| 1204 // First lookup code, maybe we don't need to compile! | |
| 1205 Label gotta_call_runtime; | |
| 1206 Label maybe_call_runtime; | |
| 1207 Label try_shared; | |
| 1208 Label loop_top, loop_bottom; | |
| 1209 | |
| 1210 Register closure = x1; | |
| 1211 Register new_target = x3; | |
| 1212 Register map = x0; | |
| 1213 Register index = x2; | |
| 1214 __ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | |
| 1215 __ Ldr(map, | |
| 1216 FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset)); | |
| 1217 __ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset)); | |
| 1218 __ Cmp(index, Operand(2)); | |
| 1219 __ B(lt, &gotta_call_runtime); | |
| 1220 | |
| 1221 // Find literals. | |
| 1222 // x3 : native context | |
| 1223 // x2 : length / index | |
| 1224 // x0 : optimized code map | |
| 1225 // stack[0] : new target | |
| 1226 // stack[4] : closure | |
| 1227 Register native_context = x4; | |
| 1228 __ Ldr(native_context, NativeContextMemOperand()); | |
| 1229 | |
| 1230 __ Bind(&loop_top); | |
| 1231 Register temp = x5; | |
| 1232 Register array_pointer = x6; | |
| 1233 | |
| 1234 // Does the native context match? | |
| 1235 __ Add(array_pointer, map, Operand(index, LSL, kPointerSizeLog2)); | |
| 1236 __ Ldr(temp, FieldMemOperand(array_pointer, | |
| 1237 SharedFunctionInfo::OffsetToPreviousContext())); | |
| 1238 __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); | |
| 1239 __ Cmp(temp, native_context); | |
| 1240 __ B(ne, &loop_bottom); | |
| 1241 // OSR id set to none? | |
| 1242 __ Ldr(temp, FieldMemOperand(array_pointer, | |
| 1243 SharedFunctionInfo::OffsetToPreviousOsrAstId())); | |
| 1244 const int bailout_id = BailoutId::None().ToInt(); | |
| 1245 __ Cmp(temp, Operand(Smi::FromInt(bailout_id))); | |
| 1246 __ B(ne, &loop_bottom); | |
| 1247 // Literals available? | |
| 1248 __ Ldr(temp, FieldMemOperand(array_pointer, | |
| 1249 SharedFunctionInfo::OffsetToPreviousLiterals())); | |
| 1250 __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset)); | |
| 1251 __ JumpIfSmi(temp, &gotta_call_runtime); | |
| 1252 | |
| 1253 // Save the literals in the closure. | |
| 1254 __ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset)); | |
| 1255 __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7, | |
| 1256 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
| 1257 OMIT_SMI_CHECK); | |
| 1258 | |
| 1259 // Code available? | |
| 1260 Register entry = x7; | |
| 1261 __ Ldr(entry, | |
| 1262 FieldMemOperand(array_pointer, | |
| 1263 SharedFunctionInfo::OffsetToPreviousCachedCode())); | |
| 1264 __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | |
| 1265 __ JumpIfSmi(entry, &maybe_call_runtime); | |
| 1266 | |
| 1267 // Found literals and code. Get them into the closure and return. | |
| 1268 __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
| 1269 | |
| 1270 Label install_optimized_code_and_tailcall; | |
| 1271 __ Bind(&install_optimized_code_and_tailcall); | |
| 1272 __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); | |
| 1273 __ RecordWriteCodeEntryField(closure, entry, x5); | |
| 1274 | |
| 1275 // Link the closure into the optimized function list. | |
| 1276 // x7 : code entry | |
| 1277 // x4 : native context | |
| 1278 // x1 : closure | |
| 1279 __ Ldr(x8, | |
| 1280 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
| 1281 __ Str(x8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset)); | |
| 1282 __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, x8, x0, | |
| 1283 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | |
| 1284 OMIT_SMI_CHECK); | |
| 1285 const int function_list_offset = | |
| 1286 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST); | |
| 1287 __ Str(closure, | |
| 1288 ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST)); | |
| 1289 __ Mov(x5, closure); | |
| 1290 __ RecordWriteContextSlot(native_context, function_list_offset, x5, x0, | |
| 1291 kLRHasNotBeenSaved, kDontSaveFPRegs); | |
| 1292 __ Jump(entry); | |
| 1293 | |
| 1294 __ Bind(&loop_bottom); | |
| 1295 __ Sub(index, index, Operand(SharedFunctionInfo::kEntryLength)); | |
| 1296 __ Cmp(index, Operand(1)); | |
| 1297 __ B(gt, &loop_top); | |
| 1298 | |
| 1299 // We found neither literals nor code. | |
| 1300 __ B(&gotta_call_runtime); | |
| 1301 | |
| 1302 __ Bind(&maybe_call_runtime); | |
| 1303 | |
| 1304 // Last possibility. Check the context free optimized code map entry. | |
| 1305 __ Ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize + | |
| 1306 SharedFunctionInfo::kSharedCodeIndex)); | |
| 1307 __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset)); | |
| 1308 __ JumpIfSmi(entry, &try_shared); | |
| 1309 | |
| 1310 // Store code entry in the closure. | |
| 1311 __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
| 1312 __ B(&install_optimized_code_and_tailcall); | |
| 1313 | |
| 1314 __ Bind(&try_shared); | |
| 1315 // Is the full code valid? | |
| 1316 __ Ldr(entry, | |
| 1317 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); | |
| 1318 __ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset)); | |
| 1319 __ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset)); | |
| 1320 __ and_(x5, x5, Operand(Code::KindField::kMask)); | |
| 1321 __ Mov(x5, Operand(x5, LSR, Code::KindField::kShift)); | |
| 1322 __ Cmp(x5, Operand(Code::BUILTIN)); | |
| 1323 __ B(eq, &gotta_call_runtime); | |
| 1324 // Yes, install the full code. | |
| 1325 __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
| 1326 __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset)); | |
| 1327 __ RecordWriteCodeEntryField(closure, entry, x5); | |
| 1328 __ Jump(entry); | |
| 1329 | |
| 1330 __ Bind(&gotta_call_runtime); | |
| 1331 CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 1200 CallRuntimePassFunction(masm, Runtime::kCompileLazy); |
| 1332 GenerateTailCallToReturnedCode(masm); | 1201 GenerateTailCallToReturnedCode(masm); |
| 1333 } | 1202 } |
| 1334 | 1203 |
| 1335 | 1204 |
| 1336 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1205 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
| 1337 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 1206 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); |
| 1338 GenerateTailCallToReturnedCode(masm); | 1207 GenerateTailCallToReturnedCode(masm); |
| 1339 } | 1208 } |
| 1340 | 1209 |
| (...skipping 1615 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2956 } | 2825 } |
| 2957 } | 2826 } |
| 2958 | 2827 |
| 2959 | 2828 |
| 2960 #undef __ | 2829 #undef __ |
| 2961 | 2830 |
| 2962 } // namespace internal | 2831 } // namespace internal |
| 2963 } // namespace v8 | 2832 } // namespace v8 |
| 2964 | 2833 |
| 2965 #endif // V8_TARGET_ARCH_ARM | 2834 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |