OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_ARM64 | 5 #if V8_TARGET_ARCH_ARM64 |
6 | 6 |
7 #include "src/arm64/frames-arm64.h" | 7 #include "src/arm64/frames-arm64.h" |
8 #include "src/codegen.h" | 8 #include "src/codegen.h" |
9 #include "src/debug/debug.h" | 9 #include "src/debug/debug.h" |
10 #include "src/deoptimizer.h" | 10 #include "src/deoptimizer.h" |
(...skipping 437 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
448 { | 448 { |
449 FrameScope scope(masm, StackFrame::INTERNAL); | 449 FrameScope scope(masm, StackFrame::INTERNAL); |
450 __ Push(x2, x1, x3); // first argument, constructor, new target | 450 __ Push(x2, x1, x3); // first argument, constructor, new target |
451 __ CallRuntime(Runtime::kNewObject); | 451 __ CallRuntime(Runtime::kNewObject); |
452 __ Pop(x2); | 452 __ Pop(x2); |
453 } | 453 } |
454 __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset)); | 454 __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset)); |
455 __ Ret(); | 455 __ Ret(); |
456 } | 456 } |
457 | 457 |
458 | |
459 static void CallRuntimePassFunction(MacroAssembler* masm, | |
460 Runtime::FunctionId function_id) { | |
461 // ----------- S t a t e ------------- | |
462 // -- x1 : target function (preserved for callee) | |
463 // -- x3 : new target (preserved for callee) | |
464 // ----------------------------------- | |
465 | |
466 FrameScope scope(masm, StackFrame::INTERNAL); | |
467 // Push a copy of the target function and the new target. | |
468 // Push another copy as a parameter to the runtime call. | |
469 __ Push(x1, x3, x1); | |
470 | |
471 __ CallRuntime(function_id, 1); | |
472 | |
473 // Restore target function and new target. | |
474 __ Pop(x3, x1); | |
475 } | |
476 | |
477 | |
478 static void GenerateTailCallToSharedCode(MacroAssembler* masm) { | 458 static void GenerateTailCallToSharedCode(MacroAssembler* masm) { |
479 __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); | 459 __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); |
480 __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset)); | 460 __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset)); |
481 __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag); | 461 __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag); |
482 __ Br(x2); | 462 __ Br(x2); |
483 } | 463 } |
484 | 464 |
| 465 static void GenerateTailCallToReturnedCode(MacroAssembler* masm, |
| 466 Runtime::FunctionId function_id) { |
| 467 // ----------- S t a t e ------------- |
| 468 // -- x0 : argument count (preserved for callee) |
| 469 // -- x1 : target function (preserved for callee) |
| 470 // -- x3 : new target (preserved for callee) |
| 471 // ----------------------------------- |
| 472 { |
| 473 FrameScope scope(masm, StackFrame::INTERNAL); |
| 474 // Push a copy of the target function and the new target. |
| 475 // Push another copy as a parameter to the runtime call. |
| 476 __ SmiTag(x0); |
| 477 __ Push(x0, x1, x3, x1); |
485 | 478 |
486 static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { | 479 __ CallRuntime(function_id, 1); |
487 __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag); | 480 __ Move(x2, x0); |
488 __ Br(x0); | 481 |
| 482 // Restore target function and new target. |
| 483 __ Pop(x3, x1, x0); |
| 484 __ SmiUntag(x0); |
| 485 } |
| 486 |
| 487 __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag); |
| 488 __ Br(x2); |
489 } | 489 } |
490 | 490 |
491 | 491 |
492 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { | 492 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { |
493 // Checking whether the queued function is ready for install is optional, | 493 // Checking whether the queued function is ready for install is optional, |
494 // since we come across interrupts and stack checks elsewhere. However, not | 494 // since we come across interrupts and stack checks elsewhere. However, not |
495 // checking may delay installing ready functions, and always checking would be | 495 // checking may delay installing ready functions, and always checking would be |
496 // quite expensive. A good compromise is to first check against stack limit as | 496 // quite expensive. A good compromise is to first check against stack limit as |
497 // a cue for an interrupt signal. | 497 // a cue for an interrupt signal. |
498 Label ok; | 498 Label ok; |
499 __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex); | 499 __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex); |
500 __ B(hs, &ok); | 500 __ B(hs, &ok); |
501 | 501 |
502 CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); | 502 GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); |
503 GenerateTailCallToReturnedCode(masm); | |
504 | 503 |
505 __ Bind(&ok); | 504 __ Bind(&ok); |
506 GenerateTailCallToSharedCode(masm); | 505 GenerateTailCallToSharedCode(masm); |
507 } | 506 } |
508 | 507 |
509 | 508 |
510 static void Generate_JSConstructStubHelper(MacroAssembler* masm, | 509 static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
511 bool is_api_function, | 510 bool is_api_function, |
512 bool create_implicit_receiver, | 511 bool create_implicit_receiver, |
513 bool check_derived_construct) { | 512 bool check_derived_construct) { |
(...skipping 673 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1187 // This simulates the initial call to bytecode handlers in interpreter entry | 1186 // This simulates the initial call to bytecode handlers in interpreter entry |
1188 // trampoline. The return will never actually be taken, but our stack walker | 1187 // trampoline. The return will never actually be taken, but our stack walker |
1189 // uses this address to determine whether a frame is interpreted. | 1188 // uses this address to determine whether a frame is interpreted. |
1190 __ LoadObject(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); | 1189 __ LoadObject(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline()); |
1191 | 1190 |
1192 Generate_EnterBytecodeDispatch(masm); | 1191 Generate_EnterBytecodeDispatch(masm); |
1193 } | 1192 } |
1194 | 1193 |
1195 | 1194 |
1196 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1195 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
1197 CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 1196 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); |
1198 GenerateTailCallToReturnedCode(masm); | |
1199 } | 1197 } |
1200 | 1198 |
1201 | 1199 |
1202 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1200 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
1203 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 1201 GenerateTailCallToReturnedCode(masm, |
1204 GenerateTailCallToReturnedCode(masm); | 1202 Runtime::kCompileOptimized_NotConcurrent); |
1205 } | 1203 } |
1206 | 1204 |
1207 | 1205 |
1208 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { | 1206 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { |
1209 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent); | 1207 GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent); |
1210 GenerateTailCallToReturnedCode(masm); | |
1211 } | 1208 } |
1212 | 1209 |
1213 | 1210 |
1214 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { | 1211 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { |
1215 // For now, we are relying on the fact that make_code_young doesn't do any | 1212 // For now, we are relying on the fact that make_code_young doesn't do any |
1216 // garbage collection which allows us to save/restore the registers without | 1213 // garbage collection which allows us to save/restore the registers without |
1217 // worrying about which of them contain pointers. We also don't build an | 1214 // worrying about which of them contain pointers. We also don't build an |
1218 // internal frame to make the code fast, since we shouldn't have to do stack | 1215 // internal frame to make the code fast, since we shouldn't have to do stack |
1219 // crawls in MakeCodeYoung. This seems a bit fragile. | 1216 // crawls in MakeCodeYoung. This seems a bit fragile. |
1220 | 1217 |
(...skipping 1598 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2819 } | 2816 } |
2820 } | 2817 } |
2821 | 2818 |
2822 | 2819 |
2823 #undef __ | 2820 #undef __ |
2824 | 2821 |
2825 } // namespace internal | 2822 } // namespace internal |
2826 } // namespace v8 | 2823 } // namespace v8 |
2827 | 2824 |
2828 #endif // V8_TARGET_ARCH_ARM | 2825 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |