| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/arm64/frames-arm64.h" | 7 #include "src/arm64/frames-arm64.h" |
| 8 #include "src/arm64/macro-assembler-arm64.h" | 8 #include "src/arm64/macro-assembler-arm64.h" |
| 9 #include "src/ast/scopes.h" | 9 #include "src/ast/scopes.h" |
| 10 #include "src/compiler/code-generator-impl.h" | 10 #include "src/compiler/code-generator-impl.h" |
| (...skipping 470 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 481 __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 481 __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 482 } | 482 } |
| 483 frame_access_state()->SetFrameAccessToSP(); | 483 frame_access_state()->SetFrameAccessToSP(); |
| 484 } | 484 } |
| 485 | 485 |
| 486 | 486 |
| 487 // Assembles an instruction after register allocation, producing machine code. | 487 // Assembles an instruction after register allocation, producing machine code. |
| 488 void CodeGenerator::AssembleArchInstruction(Instruction* instr) { | 488 void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
| 489 Arm64OperandConverter i(this, instr); | 489 Arm64OperandConverter i(this, instr); |
| 490 InstructionCode opcode = instr->opcode(); | 490 InstructionCode opcode = instr->opcode(); |
| 491 switch (ArchOpcodeField::decode(opcode)) { | 491 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); |
| 492 switch (arch_opcode) { |
| 492 case kArchCallCodeObject: { | 493 case kArchCallCodeObject: { |
| 493 EnsureSpaceForLazyDeopt(); | 494 EnsureSpaceForLazyDeopt(); |
| 494 if (instr->InputAt(0)->IsImmediate()) { | 495 if (instr->InputAt(0)->IsImmediate()) { |
| 495 __ Call(Handle<Code>::cast(i.InputHeapObject(0)), | 496 __ Call(Handle<Code>::cast(i.InputHeapObject(0)), |
| 496 RelocInfo::CODE_TARGET); | 497 RelocInfo::CODE_TARGET); |
| 497 } else { | 498 } else { |
| 498 Register target = i.InputRegister(0); | 499 Register target = i.InputRegister(0); |
| 499 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); | 500 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); |
| 500 __ Call(target); | 501 __ Call(target); |
| 501 } | 502 } |
| 503 // TODO(titzer): this is ugly. JSSP should be a caller-save register |
| 504 // in this case, but it is not possible to express in the register |
| 505 // allocator. |
| 506 CallDescriptor::Flags flags = |
| 507 static_cast<CallDescriptor::Flags>(MiscField::decode(opcode)); |
| 508 if (flags & CallDescriptor::kRestoreJSSP) { |
| 509 __ mov(jssp, csp); |
| 510 } |
| 502 frame_access_state()->ClearSPDelta(); | 511 frame_access_state()->ClearSPDelta(); |
| 503 RecordCallPosition(instr); | 512 RecordCallPosition(instr); |
| 504 break; | 513 break; |
| 505 } | 514 } |
| 506 case kArchTailCallCodeObject: { | 515 case kArchTailCallCodeObject: { |
| 507 int stack_param_delta = i.InputInt32(instr->InputCount() - 1); | 516 int stack_param_delta = i.InputInt32(instr->InputCount() - 1); |
| 508 AssembleDeconstructActivationRecord(stack_param_delta); | 517 AssembleDeconstructActivationRecord(stack_param_delta); |
| 509 if (instr->InputAt(0)->IsImmediate()) { | 518 if (instr->InputAt(0)->IsImmediate()) { |
| 510 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)), | 519 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)), |
| 511 RelocInfo::CODE_TARGET); | 520 RelocInfo::CODE_TARGET); |
| (...skipping 11 matching lines...) Expand all Loading... |
| 523 if (FLAG_debug_code) { | 532 if (FLAG_debug_code) { |
| 524 // Check the function's context matches the context argument. | 533 // Check the function's context matches the context argument. |
| 525 UseScratchRegisterScope scope(masm()); | 534 UseScratchRegisterScope scope(masm()); |
| 526 Register temp = scope.AcquireX(); | 535 Register temp = scope.AcquireX(); |
| 527 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset)); | 536 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset)); |
| 528 __ cmp(cp, temp); | 537 __ cmp(cp, temp); |
| 529 __ Assert(eq, kWrongFunctionContext); | 538 __ Assert(eq, kWrongFunctionContext); |
| 530 } | 539 } |
| 531 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); | 540 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); |
| 532 __ Call(x10); | 541 __ Call(x10); |
| 542 // TODO(titzer): this is ugly. JSSP should be a caller-save register |
| 543 // in this case, but it is not possible to express in the register |
| 544 // allocator. |
| 545 CallDescriptor::Flags flags = |
| 546 static_cast<CallDescriptor::Flags>(MiscField::decode(opcode)); |
| 547 if (flags & CallDescriptor::kRestoreJSSP) { |
| 548 __ mov(jssp, csp); |
| 549 } |
| 533 frame_access_state()->ClearSPDelta(); | 550 frame_access_state()->ClearSPDelta(); |
| 534 RecordCallPosition(instr); | 551 RecordCallPosition(instr); |
| 535 break; | 552 break; |
| 536 } | 553 } |
| 537 case kArchTailCallJSFunction: { | 554 case kArchTailCallJSFunction: { |
| 538 Register func = i.InputRegister(0); | 555 Register func = i.InputRegister(0); |
| 539 if (FLAG_debug_code) { | 556 if (FLAG_debug_code) { |
| 540 // Check the function's context matches the context argument. | 557 // Check the function's context matches the context argument. |
| 541 UseScratchRegisterScope scope(masm()); | 558 UseScratchRegisterScope scope(masm()); |
| 542 Register temp = scope.AcquireX(); | 559 Register temp = scope.AcquireX(); |
| (...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 878 __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2), | 895 __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2), |
| 879 i.InputInt6(3)); | 896 i.InputInt6(3)); |
| 880 break; | 897 break; |
| 881 case kArm64TestAndBranch32: | 898 case kArm64TestAndBranch32: |
| 882 case kArm64TestAndBranch: | 899 case kArm64TestAndBranch: |
| 883 // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch. | 900 // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch. |
| 884 break; | 901 break; |
| 885 case kArm64CompareAndBranch32: | 902 case kArm64CompareAndBranch32: |
| 886 // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch. | 903 // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch. |
| 887 break; | 904 break; |
| 888 case kArm64ClaimForCallArguments: { | 905 case kArm64ClaimCSP: { |
| 889 __ Claim(i.InputInt32(0)); | 906 int count = i.InputInt32(0); |
| 890 frame_access_state()->IncreaseSPDelta(i.InputInt32(0)); | 907 Register prev = __ StackPointer(); |
| 908 __ SetStackPointer(csp); |
| 909 __ Claim(count); |
| 910 __ SetStackPointer(prev); |
| 911 frame_access_state()->IncreaseSPDelta(count); |
| 891 break; | 912 break; |
| 892 } | 913 } |
| 893 case kArm64Poke: { | 914 case kArm64ClaimJSSP: { |
| 915 int count = i.InputInt32(0); |
| 916 if (csp.Is(__ StackPointer())) { |
| 917 // No JSP is set up. Compute it from the CSP. |
| 918 int even = RoundUp(count, 2); |
| 919 __ Sub(jssp, csp, count * kPointerSize); |
| 920 __ Sub(csp, csp, even * kPointerSize); // Must always be aligned. |
| 921 frame_access_state()->IncreaseSPDelta(even); |
| 922 } else { |
| 923 // JSSP is the current stack pointer, just use regular Claim(). |
| 924 __ Claim(count); |
| 925 frame_access_state()->IncreaseSPDelta(count); |
| 926 } |
| 927 break; |
| 928 } |
| 929 case kArm64PokeCSP: // fall through |
| 930 case kArm64PokeJSSP: { |
| 931 Register prev = __ StackPointer(); |
| 932 __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp); |
| 894 Operand operand(i.InputInt32(1) * kPointerSize); | 933 Operand operand(i.InputInt32(1) * kPointerSize); |
| 895 if (instr->InputAt(0)->IsDoubleRegister()) { | 934 if (instr->InputAt(0)->IsDoubleRegister()) { |
| 896 __ Poke(i.InputFloat64Register(0), operand); | 935 __ Poke(i.InputFloat64Register(0), operand); |
| 897 } else { | 936 } else { |
| 898 __ Poke(i.InputRegister(0), operand); | 937 __ Poke(i.InputRegister(0), operand); |
| 899 } | 938 } |
| 939 __ SetStackPointer(prev); |
| 900 break; | 940 break; |
| 901 } | 941 } |
| 902 case kArm64PokePair: { | 942 case kArm64PokePair: { |
| 903 int slot = i.InputInt32(2) - 1; | 943 int slot = i.InputInt32(2) - 1; |
| 904 if (instr->InputAt(0)->IsDoubleRegister()) { | 944 if (instr->InputAt(0)->IsDoubleRegister()) { |
| 905 __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0), | 945 __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0), |
| 906 slot * kPointerSize); | 946 slot * kPointerSize); |
| 907 } else { | 947 } else { |
| 908 __ PokePair(i.InputRegister(1), i.InputRegister(0), | 948 __ PokePair(i.InputRegister(1), i.InputRegister(0), |
| 909 slot * kPointerSize); | 949 slot * kPointerSize); |
| (...skipping 736 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1646 padding_size -= kInstructionSize; | 1686 padding_size -= kInstructionSize; |
| 1647 } | 1687 } |
| 1648 } | 1688 } |
| 1649 } | 1689 } |
| 1650 | 1690 |
| 1651 #undef __ | 1691 #undef __ |
| 1652 | 1692 |
| 1653 } // namespace compiler | 1693 } // namespace compiler |
| 1654 } // namespace internal | 1694 } // namespace internal |
| 1655 } // namespace v8 | 1695 } // namespace v8 |
| OLD | NEW |