| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/compiler/code-generator-impl.h" | 7 #include "src/compiler/code-generator-impl.h" |
| 8 #include "src/compiler/gap-resolver.h" | 8 #include "src/compiler/gap-resolver.h" |
| 9 #include "src/compiler/node-matchers.h" | 9 #include "src/compiler/node-matchers.h" |
| 10 #include "src/ia32/assembler-ia32.h" | 10 #include "src/ia32/assembler-ia32.h" |
| (...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 303 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset)); | 303 __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset)); |
| 304 __ Assert(equal, kWrongFunctionContext); | 304 __ Assert(equal, kWrongFunctionContext); |
| 305 } | 305 } |
| 306 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset)); | 306 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset)); |
| 307 AddSafepointAndDeopt(instr); | 307 AddSafepointAndDeopt(instr); |
| 308 break; | 308 break; |
| 309 } | 309 } |
| 310 case kArchJmp: | 310 case kArchJmp: |
| 311 AssembleArchJump(i.InputRpo(0)); | 311 AssembleArchJump(i.InputRpo(0)); |
| 312 break; | 312 break; |
| 313 case kArchSwitch: | 313 case kArchLookupSwitch: |
| 314 AssembleArchSwitch(instr); | 314 AssembleArchLookupSwitch(instr); |
| 315 break; |
| 316 case kArchTableSwitch: |
| 317 AssembleArchTableSwitch(instr); |
| 315 break; | 318 break; |
| 316 case kArchNop: | 319 case kArchNop: |
| 317 // don't emit code for nops. | 320 // don't emit code for nops. |
| 318 break; | 321 break; |
| 319 case kArchRet: | 322 case kArchRet: |
| 320 AssembleReturn(); | 323 AssembleReturn(); |
| 321 break; | 324 break; |
| 322 case kArchStackPointer: | 325 case kArchStackPointer: |
| 323 __ mov(i.OutputRegister(), esp); | 326 __ mov(i.OutputRegister(), esp); |
| 324 break; | 327 break; |
| (...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 754 // Add a jump if not falling through to the next block. | 757 // Add a jump if not falling through to the next block. |
| 755 if (!branch->fallthru) __ jmp(flabel); | 758 if (!branch->fallthru) __ jmp(flabel); |
| 756 } | 759 } |
| 757 | 760 |
| 758 | 761 |
| 759 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) { | 762 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) { |
| 760 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target)); | 763 if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target)); |
| 761 } | 764 } |
| 762 | 765 |
| 763 | 766 |
| 764 void CodeGenerator::AssembleArchSwitch(Instruction* instr) { | |
| 765 IA32OperandConverter i(this, instr); | |
| 766 size_t const label_count = instr->InputCount() - 1; | |
| 767 Label** labels = zone()->NewArray<Label*>(label_count); | |
| 768 for (size_t index = 0; index < label_count; ++index) { | |
| 769 labels[index] = GetLabel(i.InputRpo(index + 1)); | |
| 770 } | |
| 771 Label* const table = AddJumpTable(labels, label_count); | |
| 772 __ jmp(Operand::JumpTable(i.InputRegister(0), times_4, table)); | |
| 773 } | |
| 774 | |
| 775 | |
| 776 // Assembles boolean materializations after an instruction. | 767 // Assembles boolean materializations after an instruction. |
| 777 void CodeGenerator::AssembleArchBoolean(Instruction* instr, | 768 void CodeGenerator::AssembleArchBoolean(Instruction* instr, |
| 778 FlagsCondition condition) { | 769 FlagsCondition condition) { |
| 779 IA32OperandConverter i(this, instr); | 770 IA32OperandConverter i(this, instr); |
| 780 Label done; | 771 Label done; |
| 781 | 772 |
| 782 // Materialize a full 32-bit 1 or 0 value. The result register is always the | 773 // Materialize a full 32-bit 1 or 0 value. The result register is always the |
| 783 // last output of the instruction. | 774 // last output of the instruction. |
| 784 Label check; | 775 Label check; |
| 785 DCHECK_NE(0u, instr->OutputCount()); | 776 DCHECK_NE(0u, instr->OutputCount()); |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 844 __ j(cc, &set, Label::kNear); | 835 __ j(cc, &set, Label::kNear); |
| 845 __ Move(reg, Immediate(0)); | 836 __ Move(reg, Immediate(0)); |
| 846 __ jmp(&done, Label::kNear); | 837 __ jmp(&done, Label::kNear); |
| 847 __ bind(&set); | 838 __ bind(&set); |
| 848 __ mov(reg, Immediate(1)); | 839 __ mov(reg, Immediate(1)); |
| 849 } | 840 } |
| 850 __ bind(&done); | 841 __ bind(&done); |
| 851 } | 842 } |
| 852 | 843 |
| 853 | 844 |
| 845 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) { |
| 846 IA32OperandConverter i(this, instr); |
| 847 Register input = i.InputRegister(0); |
| 848 for (size_t index = 2; index < instr->InputCount(); index += 2) { |
| 849 __ cmp(input, Immediate(i.InputInt32(index + 0))); |
| 850 __ j(equal, GetLabel(i.InputRpo(index + 1))); |
| 851 } |
| 852 AssembleArchJump(i.InputRpo(1)); |
| 853 } |
| 854 |
| 855 |
| 856 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { |
| 857 IA32OperandConverter i(this, instr); |
| 858 Register input = i.InputRegister(0); |
| 859 size_t const case_count = instr->InputCount() - 2; |
| 860 Label** cases = zone()->NewArray<Label*>(case_count); |
| 861 for (size_t index = 0; index < case_count; ++index) { |
| 862 cases[index] = GetLabel(i.InputRpo(index + 2)); |
| 863 } |
| 864 Label* const table = AddJumpTable(cases, case_count); |
| 865 __ cmp(input, Immediate(case_count)); |
| 866 __ j(above_equal, GetLabel(i.InputRpo(1))); |
| 867 __ jmp(Operand::JumpTable(input, times_4, table)); |
| 868 } |
| 869 |
| 870 |
| 854 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) { | 871 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) { |
| 855 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( | 872 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( |
| 856 isolate(), deoptimization_id, Deoptimizer::LAZY); | 873 isolate(), deoptimization_id, Deoptimizer::LAZY); |
| 857 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY); | 874 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY); |
| 858 } | 875 } |
| 859 | 876 |
| 860 | 877 |
| 861 // The calling convention for JSFunctions on IA32 passes arguments on the | 878 // The calling convention for JSFunctions on IA32 passes arguments on the |
| 862 // stack and the JSFunction and context in EDI and ESI, respectively, thus | 879 // stack and the JSFunction and context in EDI and ESI, respectively, thus |
| 863 // the steps of the call look as follows: | 880 // the steps of the call look as follows: |
| (...skipping 389 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1253 } | 1270 } |
| 1254 } | 1271 } |
| 1255 MarkLazyDeoptSite(); | 1272 MarkLazyDeoptSite(); |
| 1256 } | 1273 } |
| 1257 | 1274 |
| 1258 #undef __ | 1275 #undef __ |
| 1259 | 1276 |
| 1260 } // namespace compiler | 1277 } // namespace compiler |
| 1261 } // namespace internal | 1278 } // namespace internal |
| 1262 } // namespace v8 | 1279 } // namespace v8 |
| OLD | NEW |