Index: src/compiler/ppc/code-generator-ppc.cc |
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc |
index c61983395c69ee5204f40fb1d0038881569ccc74..44a82c36b621e09b23dc0d4d87391fad495e3dd3 100644 |
--- a/src/compiler/ppc/code-generator-ppc.cc |
+++ b/src/compiler/ppc/code-generator-ppc.cc |
@@ -383,16 +383,32 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { |
} while (0) |
-#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx) \ |
+#define ASSEMBLE_STORE_FLOAT32() \ |
do { \ |
size_t index = 0; \ |
AddressingMode mode = kMode_None; \ |
MemOperand operand = i.MemoryOperand(&mode, &index); \ |
DoubleRegister value = i.InputDoubleRegister(index); \ |
+ __ frsp(kScratchDoubleReg, value); \ |
if (mode == kMode_MRI) { \ |
- __ asm_instr(value, operand); \ |
+ __ stfs(kScratchDoubleReg, operand); \ |
} else { \ |
- __ asm_instrx(value, operand); \ |
+ __ stfsx(kScratchDoubleReg, operand); \ |
+ } \ |
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
+ } while (0) |
+ |
+ |
+#define ASSEMBLE_STORE_DOUBLE() \ |
+ do { \ |
+ size_t index = 0; \ |
+ AddressingMode mode = kMode_None; \ |
+ MemOperand operand = i.MemoryOperand(&mode, &index); \ |
+ DoubleRegister value = i.InputDoubleRegister(index); \ |
+ if (mode == kMode_MRI) { \ |
+ __ stfd(value, operand); \ |
+ } else { \ |
+ __ stfdx(value, operand); \ |
} \ |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
} while (0) |
@@ -468,29 +484,57 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { |
// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits. |
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr, asm_instrx) \ |
- do { \ |
- Label done; \ |
- size_t index = 0; \ |
- AddressingMode mode = kMode_None; \ |
- MemOperand operand = i.MemoryOperand(&mode, index); \ |
- DCHECK_EQ(kMode_MRR, mode); \ |
- Register offset = operand.rb(); \ |
- __ extsw(offset, offset); \ |
- if (HasRegisterInput(instr, 2)) { \ |
- __ cmplw(offset, i.InputRegister(2)); \ |
- } else { \ |
- __ cmplwi(offset, i.InputImmediate(2)); \ |
- } \ |
- __ bge(&done); \ |
- DoubleRegister value = i.InputDoubleRegister(3); \ |
- if (mode == kMode_MRI) { \ |
- __ asm_instr(value, operand); \ |
- } else { \ |
- __ asm_instrx(value, operand); \ |
- } \ |
- __ bind(&done); \ |
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
+#define ASSEMBLE_CHECKED_STORE_FLOAT32() \ |
+ do { \ |
+ Label done; \ |
+ size_t index = 0; \ |
+ AddressingMode mode = kMode_None; \ |
+ MemOperand operand = i.MemoryOperand(&mode, index); \ |
+ DCHECK_EQ(kMode_MRR, mode); \ |
+ Register offset = operand.rb(); \ |
+ __ extsw(offset, offset); \ |
+ if (HasRegisterInput(instr, 2)) { \ |
+ __ cmplw(offset, i.InputRegister(2)); \ |
+ } else { \ |
+ __ cmplwi(offset, i.InputImmediate(2)); \ |
+ } \ |
+ __ bge(&done); \ |
+ DoubleRegister value = i.InputDoubleRegister(3); \ |
+ __ frsp(kScratchDoubleReg, value); \ |
+ if (mode == kMode_MRI) { \ |
+ __ stfs(kScratchDoubleReg, operand); \ |
+ } else { \ |
+ __ stfsx(kScratchDoubleReg, operand); \ |
+ } \ |
+ __ bind(&done); \ |
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
+ } while (0) |
+ |
+ |
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits. |
+#define ASSEMBLE_CHECKED_STORE_DOUBLE() \ |
+ do { \ |
+ Label done; \ |
+ size_t index = 0; \ |
+ AddressingMode mode = kMode_None; \ |
+ MemOperand operand = i.MemoryOperand(&mode, index); \ |
+ DCHECK_EQ(kMode_MRR, mode); \ |
+ Register offset = operand.rb(); \ |
+ __ extsw(offset, offset); \ |
+ if (HasRegisterInput(instr, 2)) { \ |
+ __ cmplw(offset, i.InputRegister(2)); \ |
+ } else { \ |
+ __ cmplwi(offset, i.InputImmediate(2)); \ |
+ } \ |
+ __ bge(&done); \ |
+ DoubleRegister value = i.InputDoubleRegister(3); \ |
+ if (mode == kMode_MRI) { \ |
+ __ stfd(value, operand); \ |
+ } else { \ |
+ __ stfdx(value, operand); \ |
+ } \ |
+ __ bind(&done); \ |
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \ |
} while (0) |
@@ -607,8 +651,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0)); |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
- case kPPC_And32: |
- case kPPC_And64: |
+ case kPPC_And: |
if (HasRegisterInput(instr, 1)) { |
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
i.OutputRCBit()); |
@@ -616,13 +659,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
__ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1)); |
} |
break; |
- case kPPC_AndComplement32: |
- case kPPC_AndComplement64: |
+ case kPPC_AndComplement: |
__ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
i.OutputRCBit()); |
break; |
- case kPPC_Or32: |
- case kPPC_Or64: |
+ case kPPC_Or: |
if (HasRegisterInput(instr, 1)) { |
__ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
i.OutputRCBit()); |
@@ -631,13 +672,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
} |
break; |
- case kPPC_OrComplement32: |
- case kPPC_OrComplement64: |
+ case kPPC_OrComplement: |
__ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
i.OutputRCBit()); |
break; |
- case kPPC_Xor32: |
- case kPPC_Xor64: |
+ case kPPC_Xor: |
if (HasRegisterInput(instr, 1)) { |
__ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
i.OutputRCBit()); |
@@ -692,8 +731,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
} |
break; |
#endif |
- case kPPC_Not32: |
- case kPPC_Not64: |
+ case kPPC_Not: |
__ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit()); |
break; |
case kPPC_RotLeftAndMask32: |
@@ -714,8 +752,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
63 - i.InputInt32(2), i.OutputRCBit()); |
break; |
#endif |
- case kPPC_Add32: |
- case kPPC_Add64: |
+ case kPPC_Add: |
if (HasRegisterInput(instr, 1)) { |
__ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
LeaveOE, i.OutputRCBit()); |
@@ -727,11 +764,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
case kPPC_AddWithOverflow32: |
ASSEMBLE_ADD_WITH_OVERFLOW(); |
break; |
- case kPPC_AddFloat64: |
+ case kPPC_AddDouble: |
ASSEMBLE_FLOAT_BINOP_RC(fadd); |
break; |
- case kPPC_Sub32: |
- case kPPC_Sub64: |
+ case kPPC_Sub: |
if (HasRegisterInput(instr, 1)) { |
__ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
LeaveOE, i.OutputRCBit()); |
@@ -743,7 +779,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
case kPPC_SubWithOverflow32: |
ASSEMBLE_SUB_WITH_OVERFLOW(); |
break; |
- case kPPC_SubFloat64: |
+ case kPPC_SubDouble: |
ASSEMBLE_FLOAT_BINOP_RC(fsub); |
break; |
case kPPC_Mul32: |
@@ -764,7 +800,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
__ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
i.OutputRCBit()); |
break; |
- case kPPC_MulFloat64: |
+ case kPPC_MulDouble: |
ASSEMBLE_FLOAT_BINOP_RC(fmul); |
break; |
case kPPC_Div32: |
@@ -787,7 +823,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
#endif |
- case kPPC_DivFloat64: |
+ case kPPC_DivDouble: |
ASSEMBLE_FLOAT_BINOP_RC(fdiv); |
break; |
case kPPC_Mod32: |
@@ -806,37 +842,36 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
ASSEMBLE_MODULO(divdu, mulld); |
break; |
#endif |
- case kPPC_ModFloat64: |
+ case kPPC_ModDouble: |
// TODO(bmeurer): We should really get rid of this special instruction, |
// and generate a CallAddress instruction instead. |
ASSEMBLE_FLOAT_MODULO(); |
break; |
- case kPPC_Neg32: |
- case kPPC_Neg64: |
+ case kPPC_Neg: |
__ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit()); |
break; |
- case kPPC_MaxFloat64: |
+ case kPPC_MaxDouble: |
ASSEMBLE_FLOAT_MAX(kScratchDoubleReg); |
break; |
- case kPPC_MinFloat64: |
+ case kPPC_MinDouble: |
ASSEMBLE_FLOAT_MIN(kScratchDoubleReg); |
break; |
- case kPPC_SqrtFloat64: |
+ case kPPC_SqrtDouble: |
ASSEMBLE_FLOAT_UNOP_RC(fsqrt); |
break; |
- case kPPC_FloorFloat64: |
+ case kPPC_FloorDouble: |
ASSEMBLE_FLOAT_UNOP_RC(frim); |
break; |
- case kPPC_CeilFloat64: |
+ case kPPC_CeilDouble: |
ASSEMBLE_FLOAT_UNOP_RC(frip); |
break; |
- case kPPC_TruncateFloat64: |
+ case kPPC_TruncateDouble: |
ASSEMBLE_FLOAT_UNOP_RC(friz); |
break; |
- case kPPC_RoundFloat64: |
+ case kPPC_RoundDouble: |
ASSEMBLE_FLOAT_UNOP_RC(frin); |
break; |
- case kPPC_NegFloat64: |
+ case kPPC_NegDouble: |
ASSEMBLE_FLOAT_UNOP_RC(fneg); |
break; |
case kPPC_Cntlz32: |
@@ -851,7 +886,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
ASSEMBLE_COMPARE(cmp, cmpl); |
break; |
#endif |
- case kPPC_CmpFloat64: |
+ case kPPC_CmpDouble: |
ASSEMBLE_FLOAT_COMPARE(fcmpu); |
break; |
case kPPC_Tst32: |
@@ -903,17 +938,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
#endif |
- case kPPC_Int32ToFloat64: |
+ case kPPC_Int32ToDouble: |
__ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister()); |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
- case kPPC_Uint32ToFloat64: |
+ case kPPC_Uint32ToDouble: |
__ ConvertUnsignedIntToDouble(i.InputRegister(0), |
i.OutputDoubleRegister()); |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
- case kPPC_Float64ToInt32: |
- case kPPC_Float64ToUint32: |
+ case kPPC_DoubleToInt32: |
+ case kPPC_DoubleToUint32: |
__ ConvertDoubleToInt64(i.InputDoubleRegister(0), |
#if !V8_TARGET_ARCH_PPC64 |
kScratchReg, |
@@ -921,31 +956,31 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
i.OutputRegister(), kScratchDoubleReg); |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
- case kPPC_Float64ToFloat32: |
+ case kPPC_DoubleToFloat32: |
ASSEMBLE_FLOAT_UNOP_RC(frsp); |
break; |
- case kPPC_Float32ToFloat64: |
+ case kPPC_Float32ToDouble: |
// Nothing to do. |
__ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
- case kPPC_Float64ExtractLowWord32: |
+ case kPPC_DoubleExtractLowWord32: |
__ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0)); |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
- case kPPC_Float64ExtractHighWord32: |
+ case kPPC_DoubleExtractHighWord32: |
__ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0)); |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
- case kPPC_Float64InsertLowWord32: |
+ case kPPC_DoubleInsertLowWord32: |
__ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0); |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
- case kPPC_Float64InsertHighWord32: |
+ case kPPC_DoubleInsertHighWord32: |
__ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0); |
DCHECK_EQ(LeaveRC, i.OutputRCBit()); |
break; |
- case kPPC_Float64Construct: |
+ case kPPC_DoubleConstruct: |
#if V8_TARGET_ARCH_PPC64 |
__ MovInt64ComponentsToDouble(i.OutputDoubleRegister(), |
i.InputRegister(0), i.InputRegister(1), r0); |
@@ -979,7 +1014,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
case kPPC_LoadFloat32: |
ASSEMBLE_LOAD_FLOAT(lfs, lfsx); |
break; |
- case kPPC_LoadFloat64: |
+ case kPPC_LoadDouble: |
ASSEMBLE_LOAD_FLOAT(lfd, lfdx); |
break; |
case kPPC_StoreWord8: |
@@ -997,10 +1032,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
break; |
#endif |
case kPPC_StoreFloat32: |
- ASSEMBLE_STORE_FLOAT(stfs, stfsx); |
+ ASSEMBLE_STORE_FLOAT32(); |
break; |
- case kPPC_StoreFloat64: |
- ASSEMBLE_STORE_FLOAT(stfd, stfdx); |
+ case kPPC_StoreDouble: |
+ ASSEMBLE_STORE_DOUBLE(); |
break; |
case kPPC_StoreWriteBarrier: |
ASSEMBLE_STORE_WRITE_BARRIER(); |
@@ -1037,10 +1072,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx); |
break; |
case kCheckedStoreFloat32: |
- ASSEMBLE_CHECKED_STORE_FLOAT(stfs, stfsx); |
+ ASSEMBLE_CHECKED_STORE_FLOAT32(); |
break; |
case kCheckedStoreFloat64: |
- ASSEMBLE_CHECKED_STORE_FLOAT(stfd, stfdx); |
+ ASSEMBLE_CHECKED_STORE_DOUBLE(); |
break; |
default: |
UNREACHABLE(); |
@@ -1063,7 +1098,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { |
(op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32)); |
Condition cond = FlagsConditionToCondition(condition); |
- if (op == kPPC_CmpFloat64) { |
+ if (op == kPPC_CmpDouble) { |
// check for unordered if necessary |
if (cond == le) { |
__ bunordered(flabel, cr); |
@@ -1089,7 +1124,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, |
PPCOperandConverter i(this, instr); |
Label done; |
ArchOpcode op = instr->arch_opcode(); |
- bool check_unordered = (op == kPPC_CmpFloat64); |
+ bool check_unordered = (op == kPPC_CmpDouble); |
CRegister cr = cr0; |
// Overflow checked for add/sub only. |