Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/mips64/builtins-mips64.cc

Issue 1683593003: Preserve argument count for calls. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fixed mips64 compile issue. Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/x64/builtins-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_MIPS64 5 #if V8_TARGET_ARCH_MIPS64
6 6
7 #include "src/codegen.h" 7 #include "src/codegen.h"
8 #include "src/debug/debug.h" 8 #include "src/debug/debug.h"
9 #include "src/deoptimizer.h" 9 #include "src/deoptimizer.h"
10 #include "src/full-codegen/full-codegen.h" 10 #include "src/full-codegen/full-codegen.h"
(...skipping 450 matching lines...) Expand 10 before | Expand all | Expand 10 after
461 { 461 {
462 FrameScope scope(masm, StackFrame::INTERNAL); 462 FrameScope scope(masm, StackFrame::INTERNAL);
463 __ Push(a0, a1, a3); // first argument, constructor, new target 463 __ Push(a0, a1, a3); // first argument, constructor, new target
464 __ CallRuntime(Runtime::kNewObject); 464 __ CallRuntime(Runtime::kNewObject);
465 __ Pop(a0); 465 __ Pop(a0);
466 } 466 }
467 __ Ret(USE_DELAY_SLOT); 467 __ Ret(USE_DELAY_SLOT);
468 __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot. 468 __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot.
469 } 469 }
470 470
471
472 static void CallRuntimePassFunction(
473 MacroAssembler* masm, Runtime::FunctionId function_id) {
474 // ----------- S t a t e -------------
475 // -- a1 : target function (preserved for callee)
476 // -- a3 : new target (preserved for callee)
477 // -----------------------------------
478
479 FrameScope scope(masm, StackFrame::INTERNAL);
480 // Push a copy of the function onto the stack.
481 // Push a copy of the target function and the new target.
482 __ Push(a1, a3, a1);
483
484 __ CallRuntime(function_id, 1);
485 // Restore target function and new target.
486 __ Pop(a1, a3);
487 }
488
489
490 static void GenerateTailCallToSharedCode(MacroAssembler* masm) { 471 static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
491 __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); 472 __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
492 __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset)); 473 __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
493 __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); 474 __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
494 __ Jump(at); 475 __ Jump(at);
495 } 476 }
496 477
478 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
479 Runtime::FunctionId function_id) {
480 // ----------- S t a t e -------------
481 // -- a0 : argument count (preserved for callee)
482 // -- a1 : target function (preserved for callee)
483 // -- a3 : new target (preserved for callee)
484 // -----------------------------------
485 {
486 FrameScope scope(masm, StackFrame::INTERNAL);
487 // Push a copy of the function onto the stack.
488 // Push a copy of the target function and the new target.
489 __ SmiTag(a0);
490 __ Push(a0, a1, a3, a1);
497 491
498 static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { 492 __ CallRuntime(function_id, 1);
493 // Restore target function and new target.
494 __ Pop(a0, a1, a3);
495 __ SmiUntag(a0);
496 }
497
499 __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); 498 __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
500 __ Jump(at); 499 __ Jump(at);
501 } 500 }
502 501
503 502
504 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { 503 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
505 // Checking whether the queued function is ready for install is optional, 504 // Checking whether the queued function is ready for install is optional,
506 // since we come across interrupts and stack checks elsewhere. However, 505 // since we come across interrupts and stack checks elsewhere. However,
507 // not checking may delay installing ready functions, and always checking 506 // not checking may delay installing ready functions, and always checking
508 // would be quite expensive. A good compromise is to first check against 507 // would be quite expensive. A good compromise is to first check against
509 // stack limit as a cue for an interrupt signal. 508 // stack limit as a cue for an interrupt signal.
510 Label ok; 509 Label ok;
511 __ LoadRoot(a4, Heap::kStackLimitRootIndex); 510 __ LoadRoot(a4, Heap::kStackLimitRootIndex);
512 __ Branch(&ok, hs, sp, Operand(a4)); 511 __ Branch(&ok, hs, sp, Operand(a4));
513 512
514 CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); 513 GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
515 GenerateTailCallToReturnedCode(masm);
516 514
517 __ bind(&ok); 515 __ bind(&ok);
518 GenerateTailCallToSharedCode(masm); 516 GenerateTailCallToSharedCode(masm);
519 } 517 }
520 518
521 519
522 static void Generate_JSConstructStubHelper(MacroAssembler* masm, 520 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
523 bool is_api_function, 521 bool is_api_function,
524 bool create_implicit_receiver, 522 bool create_implicit_receiver,
525 bool check_derived_construct) { 523 bool check_derived_construct) {
(...skipping 696 matching lines...) Expand 10 before | Expand all | Expand 10 after
1222 // This simulates the initial call to bytecode handlers in interpreter entry 1220 // This simulates the initial call to bytecode handlers in interpreter entry
1223 // trampoline. The return will never actually be taken, but our stack walker 1221 // trampoline. The return will never actually be taken, but our stack walker
1224 // uses this address to determine whether a frame is interpreted. 1222 // uses this address to determine whether a frame is interpreted.
1225 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline())); 1223 __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
1226 1224
1227 Generate_EnterBytecodeDispatch(masm); 1225 Generate_EnterBytecodeDispatch(masm);
1228 } 1226 }
1229 1227
1230 1228
1231 void Builtins::Generate_CompileLazy(MacroAssembler* masm) { 1229 void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
1232 CallRuntimePassFunction(masm, Runtime::kCompileLazy); 1230 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1233 GenerateTailCallToReturnedCode(masm);
1234 } 1231 }
1235 1232
1236 1233
1237 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { 1234 void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
1238 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent); 1235 GenerateTailCallToReturnedCode(masm,
1239 GenerateTailCallToReturnedCode(masm); 1236 Runtime::kCompileOptimized_NotConcurrent);
1240 } 1237 }
1241 1238
1242 1239
1243 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { 1240 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
1244 CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent); 1241 GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
1245 GenerateTailCallToReturnedCode(masm);
1246 } 1242 }
1247 1243
1248 1244
1249 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { 1245 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
1250 // For now, we are relying on the fact that make_code_young doesn't do any 1246 // For now, we are relying on the fact that make_code_young doesn't do any
1251 // garbage collection which allows us to save/restore the registers without 1247 // garbage collection which allows us to save/restore the registers without
1252 // worrying about which of them contain pointers. We also don't build an 1248 // worrying about which of them contain pointers. We also don't build an
1253 // internal frame to make the code faster, since we shouldn't have to do stack 1249 // internal frame to make the code faster, since we shouldn't have to do stack
1254 // crawls in MakeCodeYoung. This seems a bit fragile. 1250 // crawls in MakeCodeYoung. This seems a bit fragile.
1255 1251
(...skipping 1500 matching lines...) Expand 10 before | Expand all | Expand 10 after
2756 } 2752 }
2757 } 2753 }
2758 2754
2759 2755
2760 #undef __ 2756 #undef __
2761 2757
2762 } // namespace internal 2758 } // namespace internal
2763 } // namespace v8 2759 } // namespace v8
2764 2760
2765 #endif // V8_TARGET_ARCH_MIPS64 2761 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips/builtins-mips.cc ('k') | src/x64/builtins-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698