Index: src/compiler/arm64/code-generator-arm64.cc |
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc |
index fa3a04e55f285aee9d658f2dedeeff16457d6d27..7b4640db0a444731b28685aa275678a0eff95c45 100644 |
--- a/src/compiler/arm64/code-generator-arm64.cc |
+++ b/src/compiler/arm64/code-generator-arm64.cc |
@@ -385,8 +385,7 @@ void CodeGenerator::AssembleDeconstructActivationRecord() { |
// Assembles an instruction after register allocation, producing machine code. |
void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
Arm64OperandConverter i(this, instr); |
- InstructionCode opcode = instr->opcode(); |
- switch (ArchOpcodeField::decode(opcode)) { |
+ switch (instr->arch_opcode()) { |
case kArchCallCodeObject: { |
EnsureSpaceForLazyDeopt(); |
if (instr->InputAt(0)->IsImmediate()) { |
@@ -506,7 +505,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1)); |
break; |
case kArm64Add32: |
- if (FlagsModeField::decode(opcode) != kFlags_none) { |
+ if (instr->flags_mode() != kFlags_none) { |
__ Adds(i.OutputRegister32(), i.InputRegister32(0), |
i.InputOperand2_32(1)); |
} else { |
@@ -642,7 +641,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
__ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1)); |
break; |
case kArm64Sub32: |
- if (FlagsModeField::decode(opcode) != kFlags_none) { |
+ if (instr->flags_mode() != kFlags_none) { |
__ Subs(i.OutputRegister32(), i.InputRegister32(0), |
i.InputOperand2_32(1)); |
} else { |
@@ -748,6 +747,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
case kArm64Tst32: |
__ Tst(i.InputRegister32(0), i.InputOperand32(1)); |
break; |
+ case kArm64Float32CmpAndFloat32Sel: |
Benedikt Meurer
2015/09/21 17:31:43
Hm, this instruction looks redundant. Why do you n
jbramley
2015/09/22 07:45:20
Because, although I haven't done so yet, we might
|
+ DCHECK(instr->flags_mode() == kFlags_select); |
+ // Fall through. |
case kArm64Float32Cmp: |
if (instr->InputAt(1)->IsDoubleRegister()) { |
__ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1)); |
@@ -788,6 +790,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
case kArm64Float32Sqrt: |
__ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0)); |
break; |
+ case kArm64Float64CmpAndFloat64Sel: |
Benedikt Meurer
2015/09/21 17:31:43
Same here?
|
+ DCHECK(instr->flags_mode() == kFlags_select); |
+ // Fall through. |
case kArm64Float64Cmp: |
if (instr->InputAt(1)->IsDoubleRegister()) { |
__ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
@@ -1069,6 +1074,35 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, |
} |
+void CodeGenerator::AssembleArchSelect(Instruction* instr, |
+ FlagsCondition condition) { |
+ Arm64OperandConverter i(this, instr); |
+ |
+ DCHECK_EQ(1u, instr->OutputCount()); |
+ |
+ Condition cc = FlagsConditionToCondition(condition); |
+ switch (instr->arch_opcode()) { |
+ case kArm64Float32CmpAndFloat32Sel: { |
Benedikt Meurer
2015/09/21 17:31:43
I'm not sure if this approach scales well. Because
jbramley
2015/09/22 07:45:21
Indeed, but (unless I'm mistaken), TF can't track
|
+ DoubleRegister result = i.OutputFloat32Register(); |
+ DoubleRegister a = i.InputFloat32Register(instr->InputCount() - 2); |
+ DoubleRegister b = i.InputFloat32Register(instr->InputCount() - 1); |
+ __ Fcsel(result, a, b, cc); |
+ return; |
+ } |
+ case kArm64Float64CmpAndFloat64Sel: { |
+ DoubleRegister result = i.OutputFloat64Register(); |
+ DoubleRegister a = i.InputFloat64Register(instr->InputCount() - 2); |
+ DoubleRegister b = i.InputFloat64Register(instr->InputCount() - 1); |
+ __ Fcsel(result, a, b, cc); |
+ return; |
+ } |
+ default: |
+ UNREACHABLE(); |
+ return; |
+ } |
+} |
+ |
+ |
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) { |
Arm64OperandConverter i(this, instr); |
Register input = i.InputRegister32(0); |