OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_MIPS | 5 #if V8_TARGET_ARCH_MIPS |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
462 { | 462 { |
463 FrameScope scope(masm, StackFrame::INTERNAL); | 463 FrameScope scope(masm, StackFrame::INTERNAL); |
464 __ Push(a0, a1, a3); // first argument, constructor, new target | 464 __ Push(a0, a1, a3); // first argument, constructor, new target |
465 __ CallRuntime(Runtime::kNewObject); | 465 __ CallRuntime(Runtime::kNewObject); |
466 __ Pop(a0); | 466 __ Pop(a0); |
467 } | 467 } |
468 __ Ret(USE_DELAY_SLOT); | 468 __ Ret(USE_DELAY_SLOT); |
469 __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot | 469 __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot |
470 } | 470 } |
471 | 471 |
472 | |
473 static void CallRuntimePassFunction( | |
474 MacroAssembler* masm, Runtime::FunctionId function_id) { | |
475 // ----------- S t a t e ------------- | |
476 // -- a1 : target function (preserved for callee) | |
477 // -- a3 : new target (preserved for callee) | |
478 // ----------------------------------- | |
479 | |
480 FrameScope scope(masm, StackFrame::INTERNAL); | |
481 // Push a copy of the target function and the new target. | |
482 // Push function as parameter to the runtime call. | |
483 __ Push(a1, a3, a1); | |
484 | |
485 __ CallRuntime(function_id, 1); | |
486 // Restore target function and new target. | |
487 __ Pop(a1, a3); | |
488 } | |
489 | |
490 | |
491 static void GenerateTailCallToSharedCode(MacroAssembler* masm) { | 472 static void GenerateTailCallToSharedCode(MacroAssembler* masm) { |
492 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | 473 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
493 __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset)); | 474 __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset)); |
494 __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); | 475 __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); |
495 __ Jump(at); | 476 __ Jump(at); |
496 } | 477 } |
497 | 478 |
| 479 static void GenerateTailCallToReturnedCode(MacroAssembler* masm, |
| 480 Runtime::FunctionId function_id) { |
| 481 // ----------- S t a t e ------------- |
| 482 // -- a0 : argument count (preserved for callee) |
| 483 // -- a1 : target function (preserved for callee) |
| 484 // -- a3 : new target (preserved for callee) |
| 485 // ----------------------------------- |
| 486 { |
| 487 FrameScope scope(masm, StackFrame::INTERNAL); |
| 488 // Push a copy of the target function and the new target. |
| 489 // Push function as parameter to the runtime call. |
| 490 __ SmiTag(a0); |
| 491 __ Push(a0, a1, a3, a1); |
498 | 492 |
499 static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { | 493 __ CallRuntime(function_id, 1); |
| 494 |
| 495 // Restore target function and new target. |
| 496 __ Pop(a0, a1, a3); |
| 497 __ SmiUntag(a0); |
| 498 } |
| 499 |
500 __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); | 500 __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
501 __ Jump(at); | 501 __ Jump(at); |
502 } | 502 } |
503 | 503 |
504 | 504 |
505 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { | 505 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { |
506 // Checking whether the queued function is ready for install is optional, | 506 // Checking whether the queued function is ready for install is optional, |
507 // since we come across interrupts and stack checks elsewhere. However, | 507 // since we come across interrupts and stack checks elsewhere. However, |
508 // not checking may delay installing ready functions, and always checking | 508 // not checking may delay installing ready functions, and always checking |
509 // would be quite expensive. A good compromise is to first check against | 509 // would be quite expensive. A good compromise is to first check against |
510 // stack limit as a cue for an interrupt signal. | 510 // stack limit as a cue for an interrupt signal. |
511 Label ok; | 511 Label ok; |
512 __ LoadRoot(t0, Heap::kStackLimitRootIndex); | 512 __ LoadRoot(t0, Heap::kStackLimitRootIndex); |
513 __ Branch(&ok, hs, sp, Operand(t0)); | 513 __ Branch(&ok, hs, sp, Operand(t0)); |
514 | 514 |
515 CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); | 515 GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode); |
516 GenerateTailCallToReturnedCode(masm); | |
517 | 516 |
518 __ bind(&ok); | 517 __ bind(&ok); |
519 GenerateTailCallToSharedCode(masm); | 518 GenerateTailCallToSharedCode(masm); |
520 } | 519 } |
521 | 520 |
522 | 521 |
523 static void Generate_JSConstructStubHelper(MacroAssembler* masm, | 522 static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
524 bool is_api_function, | 523 bool is_api_function, |
525 bool create_implicit_receiver, | 524 bool create_implicit_receiver, |
526 bool check_derived_construct) { | 525 bool check_derived_construct) { |
(...skipping 700 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1227 // This simulates the initial call to bytecode handlers in interpreter entry | 1226 // This simulates the initial call to bytecode handlers in interpreter entry |
1228 // trampoline. The return will never actually be taken, but our stack walker | 1227 // trampoline. The return will never actually be taken, but our stack walker |
1229 // uses this address to determine whether a frame is interpreted. | 1228 // uses this address to determine whether a frame is interpreted. |
1230 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); | 1229 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); |
1231 | 1230 |
1232 Generate_EnterBytecodeDispatch(masm); | 1231 Generate_EnterBytecodeDispatch(masm); |
1233 } | 1232 } |
1234 | 1233 |
1235 | 1234 |
1236 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { | 1235 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { |
1237 CallRuntimePassFunction(masm, Runtime::kCompileLazy); | 1236 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); |
1238 GenerateTailCallToReturnedCode(masm); | |
1239 } | 1237 } |
1240 | 1238 |
1241 | 1239 |
1242 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | 1240 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { |
1243 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); | 1241 GenerateTailCallToReturnedCode(masm, |
1244 GenerateTailCallToReturnedCode(masm); | 1242 Runtime::kCompileOptimized_NotConcurrent); |
1245 } | 1243 } |
1246 | 1244 |
1247 | 1245 |
1248 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { | 1246 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { |
1249 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent); | 1247 GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent); |
1250 GenerateTailCallToReturnedCode(masm); | |
1251 } | 1248 } |
1252 | 1249 |
1253 | 1250 |
1254 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { | 1251 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { |
1255 // For now, we are relying on the fact that make_code_young doesn't do any | 1252 // For now, we are relying on the fact that make_code_young doesn't do any |
1256 // garbage collection which allows us to save/restore the registers without | 1253 // garbage collection which allows us to save/restore the registers without |
1257 // worrying about which of them contain pointers. We also don't build an | 1254 // worrying about which of them contain pointers. We also don't build an |
1258 // internal frame to make the code faster, since we shouldn't have to do stack | 1255 // internal frame to make the code faster, since we shouldn't have to do stack |
1259 // crawls in MakeCodeYoung. This seems a bit fragile. | 1256 // crawls in MakeCodeYoung. This seems a bit fragile. |
1260 | 1257 |
(...skipping 1498 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2759 } | 2756 } |
2760 } | 2757 } |
2761 | 2758 |
2762 | 2759 |
2763 #undef __ | 2760 #undef __ |
2764 | 2761 |
2765 } // namespace internal | 2762 } // namespace internal |
2766 } // namespace v8 | 2763 } // namespace v8 |
2767 | 2764 |
2768 #endif // V8_TARGET_ARCH_MIPS | 2765 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |