| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/compiler/code-generator-impl.h" | 7 #include "src/compiler/code-generator-impl.h" |
| 8 #include "src/compiler/gap-resolver.h" | 8 #include "src/compiler/gap-resolver.h" |
| 9 #include "src/compiler/node-matchers.h" | 9 #include "src/compiler/node-matchers.h" |
| 10 #include "src/compiler/node-properties-inl.h" | 10 #include "src/compiler/node-properties-inl.h" |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 86 case kMode_MR1I: | 86 case kMode_MR1I: |
| 87 case kMode_MR2I: | 87 case kMode_MR2I: |
| 88 case kMode_MR4I: | 88 case kMode_MR4I: |
| 89 case kMode_MR8I: { | 89 case kMode_MR8I: { |
| 90 Register base = InputRegister(NextOffset(offset)); | 90 Register base = InputRegister(NextOffset(offset)); |
| 91 Register index = InputRegister(NextOffset(offset)); | 91 Register index = InputRegister(NextOffset(offset)); |
| 92 ScaleFactor scale = ScaleFor(kMode_MR1I, mode); | 92 ScaleFactor scale = ScaleFor(kMode_MR1I, mode); |
| 93 int32_t disp = InputInt32(NextOffset(offset)); | 93 int32_t disp = InputInt32(NextOffset(offset)); |
| 94 return Operand(base, index, scale, disp); | 94 return Operand(base, index, scale, disp); |
| 95 } | 95 } |
| 96 case kMode_M1: | 96 case kMode_M1: { |
| 97 Register base = InputRegister(NextOffset(offset)); |
| 98 int32_t disp = 0; |
| 99 return Operand(base, disp); |
| 100 } |
| 97 case kMode_M2: | 101 case kMode_M2: |
| 102 UNREACHABLE(); // Should use kModeMR with more compact encoding instead |
| 103 return Operand(no_reg, 0); |
| 98 case kMode_M4: | 104 case kMode_M4: |
| 99 case kMode_M8: { | 105 case kMode_M8: { |
| 100 Register index = InputRegister(NextOffset(offset)); | 106 Register index = InputRegister(NextOffset(offset)); |
| 101 ScaleFactor scale = ScaleFor(kMode_M1, mode); | 107 ScaleFactor scale = ScaleFor(kMode_M1, mode); |
| 102 int32_t disp = 0; | 108 int32_t disp = 0; |
| 103 return Operand(index, scale, disp); | 109 return Operand(index, scale, disp); |
| 104 } | 110 } |
| 105 case kMode_M1I: | 111 case kMode_M1I: |
| 106 case kMode_M2I: | 112 case kMode_M2I: |
| 107 case kMode_M4I: | 113 case kMode_M4I: |
| (...skipping 596 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 704 if (instr->HasOutput()) { | 710 if (instr->HasOutput()) { |
| 705 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand()); | 711 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand()); |
| 706 } else { | 712 } else { |
| 707 int index = 0; | 713 int index = 0; |
| 708 Operand operand = i.MemoryOperand(&index); | 714 Operand operand = i.MemoryOperand(&index); |
| 709 __ movsd(operand, i.InputDoubleRegister(index)); | 715 __ movsd(operand, i.InputDoubleRegister(index)); |
| 710 } | 716 } |
| 711 break; | 717 break; |
| 712 case kX64Lea32: { | 718 case kX64Lea32: { |
| 713 AddressingMode mode = AddressingModeField::decode(instr->opcode()); | 719 AddressingMode mode = AddressingModeField::decode(instr->opcode()); |
| 714 // Shorten "leal" to "addl" or "subl" if the register allocation just | 720 // Shorten "leal" to "addl", "subl" or "shll" if the register allocation |
| 715 // happens to work out for operations with immediate operands where the | 721 // and addressing mode just happens to work out. The "addl"/"subl" forms |
| 716 // non-constant input register is the same as output register. The | 722 // in these cases are faster based on measurements. |
| 717 // "addl"/"subl" forms in these cases are faster based on empirical | 723 if (i.InputRegister(0).is(i.OutputRegister())) { |
| 718 // measurements. | 724 if (mode == kMode_MRI) { |
| 719 if (mode == kMode_MRI && i.InputRegister(0).is(i.OutputRegister())) { | 725 int32_t constant_summand = i.InputInt32(1); |
| 720 int32_t constant_summand = i.InputInt32(1); | 726 if (constant_summand > 0) { |
| 721 if (constant_summand > 0) { | 727 __ addl(i.OutputRegister(), Immediate(constant_summand)); |
| 722 __ addl(i.OutputRegister(), Immediate(constant_summand)); | 728 } else if (constant_summand < 0) { |
| 723 } else if (constant_summand < 0) { | 729 __ subl(i.OutputRegister(), Immediate(-constant_summand)); |
| 724 __ subl(i.OutputRegister(), Immediate(-constant_summand)); | 730 } |
| 731 } else if (mode == kMode_MR1 || mode == kMode_M2) { |
| 732 // Using "addl %r1, %r1" is generally faster than "shll %r1, 1" |
| 733 __ addl(i.OutputRegister(), i.InputRegister(1)); |
| 734 } else if (mode == kMode_M4) { |
| 735 __ shll(i.OutputRegister(), Immediate(2)); |
| 736 } else if (mode == kMode_M8) { |
| 737 __ shll(i.OutputRegister(), Immediate(3)); |
| 738 } else { |
| 739 __ leal(i.OutputRegister(), i.MemoryOperand()); |
| 725 } | 740 } |
| 726 } else { | 741 } else { |
| 727 __ leal(i.OutputRegister(), i.MemoryOperand()); | 742 __ leal(i.OutputRegister(), i.MemoryOperand()); |
| 728 } | 743 } |
| 729 __ AssertZeroExtended(i.OutputRegister()); | 744 __ AssertZeroExtended(i.OutputRegister()); |
| 730 break; | 745 break; |
| 731 } | 746 } |
| 732 case kX64Lea: | 747 case kX64Lea: |
| 733 __ leaq(i.OutputRegister(), i.MemoryOperand()); | 748 __ leaq(i.OutputRegister(), i.MemoryOperand()); |
| 734 break; | 749 break; |
| (...skipping 471 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1206 } | 1221 } |
| 1207 } | 1222 } |
| 1208 MarkLazyDeoptSite(); | 1223 MarkLazyDeoptSite(); |
| 1209 } | 1224 } |
| 1210 | 1225 |
| 1211 #undef __ | 1226 #undef __ |
| 1212 | 1227 |
| 1213 } // namespace internal | 1228 } // namespace internal |
| 1214 } // namespace compiler | 1229 } // namespace compiler |
| 1215 } // namespace v8 | 1230 } // namespace v8 |
| OLD | NEW |