Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// | 1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// |
| 2 // | 2 // |
| 3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
| 4 // | 4 // |
| 5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
| 6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
| 7 // | 7 // |
| 8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
| 9 // | 9 // |
| 10 // This file implements the TargetLoweringX8632 class, which | 10 // This file implements the TargetLoweringX8632 class, which |
| (...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 239 #undef X | 239 #undef X |
| 240 // Repeat the static asserts with respect to the high-level table | 240 // Repeat the static asserts with respect to the high-level table |
| 241 // entries in case the high-level table has extra entries. | 241 // entries in case the high-level table has extra entries. |
| 242 #define X(tag, size, align, elts, elty, str) \ | 242 #define X(tag, size, align, elts, elty, str) \ |
| 243 static_assert(_table1_##tag == _table2_##tag, \ | 243 static_assert(_table1_##tag == _table2_##tag, \ |
| 244 "Inconsistency between ICETYPEX8632_TABLE and ICETYPE_TABLE"); | 244 "Inconsistency between ICETYPEX8632_TABLE and ICETYPE_TABLE"); |
| 245 ICETYPE_TABLE | 245 ICETYPE_TABLE |
| 246 #undef X | 246 #undef X |
| 247 } // end of namespace dummy3 | 247 } // end of namespace dummy3 |
| 248 | 248 |
| 249 // A helper class to ease the settings of RandomizationPoolingPause | |
| 250 // to disable constant blinding or pooling for some translation phases. | |
| 251 class BoolFlagSaver { | |
| 252 BoolFlagSaver() = delete; | |
| 253 BoolFlagSaver(const BoolFlagSaver &) = delete; | |
| 254 BoolFlagSaver &operator=(const BoolFlagSaver &) = delete; | |
| 255 | |
| 256 public: | |
| 257 BoolFlagSaver(bool &F, bool NewValue) : Flag(F) { | |
| 258 OldValue = F; | |
| 259 F = NewValue; | |
| 260 } | |
| 261 ~BoolFlagSaver() { Flag = OldValue; } | |
| 262 | |
| 263 private: | |
| 264 bool &Flag; | |
|
Jim Stichnoth
2015/06/19 16:51:03
Can you do this?
bool &const Flag;
const bool
qining
2015/06/19 20:22:25
I can initialize OldValue in the initialization li
Jim Stichnoth
2015/06/19 23:12:15
I think I was wrong about const and Flag, sorry.
| |
| 265 bool OldValue; | |
| 266 }; | |
| 267 | |
| 249 } // end of anonymous namespace | 268 } // end of anonymous namespace |
| 250 | 269 |
| 251 BoolFoldingEntry::BoolFoldingEntry(Inst *I) | 270 BoolFoldingEntry::BoolFoldingEntry(Inst *I) |
| 252 : Instr(I), IsComplex(BoolFolding::hasComplexLowering(I)), IsLiveOut(true), | 271 : Instr(I), IsComplex(BoolFolding::hasComplexLowering(I)), IsLiveOut(true), |
| 253 NumUses(0) {} | 272 NumUses(0) {} |
| 254 | 273 |
| 255 BoolFolding::BoolFoldingProducerKind | 274 BoolFolding::BoolFoldingProducerKind |
| 256 BoolFolding::getProducerKind(const Inst *Instr) { | 275 BoolFolding::getProducerKind(const Inst *Instr) { |
| 257 if (llvm::isa<InstIcmp>(Instr)) { | 276 if (llvm::isa<InstIcmp>(Instr)) { |
| 258 if (Instr->getSrc(0)->getType() != IceType_i64) | 277 if (Instr->getSrc(0)->getType() != IceType_i64) |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 392 void TargetX8632::initNodeForLowering(CfgNode *Node) { | 411 void TargetX8632::initNodeForLowering(CfgNode *Node) { |
| 393 FoldingInfo.init(Node); | 412 FoldingInfo.init(Node); |
| 394 FoldingInfo.dump(Func); | 413 FoldingInfo.dump(Func); |
| 395 } | 414 } |
| 396 | 415 |
| 397 TargetX8632::TargetX8632(Cfg *Func) | 416 TargetX8632::TargetX8632(Cfg *Func) |
| 398 : TargetLowering(Func), | 417 : TargetLowering(Func), |
| 399 InstructionSet(static_cast<X86InstructionSet>( | 418 InstructionSet(static_cast<X86InstructionSet>( |
| 400 Func->getContext()->getFlags().getTargetInstructionSet() - | 419 Func->getContext()->getFlags().getTargetInstructionSet() - |
| 401 TargetInstructionSet::X86InstructionSet_Begin)), | 420 TargetInstructionSet::X86InstructionSet_Begin)), |
| 402 IsEbpBasedFrame(false), NeedsStackAlignment(false), | 421 IsEbpBasedFrame(false), NeedsStackAlignment(false), SpillAreaSizeBytes(0), |
| 403 SpillAreaSizeBytes(0) { | 422 RandomizationPoolingPaused(false) { |
| 404 static_assert((X86InstructionSet::End - X86InstructionSet::Begin) == | 423 static_assert((X86InstructionSet::End - X86InstructionSet::Begin) == |
| 405 (TargetInstructionSet::X86InstructionSet_End - | 424 (TargetInstructionSet::X86InstructionSet_End - |
| 406 TargetInstructionSet::X86InstructionSet_Begin), | 425 TargetInstructionSet::X86InstructionSet_Begin), |
| 407 "X86InstructionSet range different from TargetInstructionSet"); | 426 "X86InstructionSet range different from TargetInstructionSet"); |
| 408 // TODO: Don't initialize IntegerRegisters and friends every time. | 427 // TODO: Don't initialize IntegerRegisters and friends every time. |
| 409 // Instead, initialize in some sort of static initializer for the | 428 // Instead, initialize in some sort of static initializer for the |
| 410 // class. | 429 // class. |
| 411 llvm::SmallBitVector IntegerRegisters(RegX8632::Reg_NUM); | 430 llvm::SmallBitVector IntegerRegisters(RegX8632::Reg_NUM); |
| 412 llvm::SmallBitVector IntegerRegistersI8(RegX8632::Reg_NUM); | 431 llvm::SmallBitVector IntegerRegistersI8(RegX8632::Reg_NUM); |
| 413 llvm::SmallBitVector FloatRegisters(RegX8632::Reg_NUM); | 432 llvm::SmallBitVector FloatRegisters(RegX8632::Reg_NUM); |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 475 return; | 494 return; |
| 476 | 495 |
| 477 // TODO: It should be sufficient to use the fastest liveness | 496 // TODO: It should be sufficient to use the fastest liveness |
| 478 // calculation, i.e. livenessLightweight(). However, for some | 497 // calculation, i.e. livenessLightweight(). However, for some |
| 479 // reason that slows down the rest of the translation. Investigate. | 498 // reason that slows down the rest of the translation. Investigate. |
| 480 Func->liveness(Liveness_Basic); | 499 Func->liveness(Liveness_Basic); |
| 481 if (Func->hasError()) | 500 if (Func->hasError()) |
| 482 return; | 501 return; |
| 483 Func->dump("After x86 address mode opt"); | 502 Func->dump("After x86 address mode opt"); |
| 484 | 503 |
| 485 doLoadOpt(); | 504 // qining: disable constant blinding or pooling for load optimization |
| 505 { | |
| 506 BoolFlagSaver B(RandomizationPoolingPaused, true); | |
| 507 doLoadOpt(); | |
| 508 } | |
| 486 Func->genCode(); | 509 Func->genCode(); |
| 487 if (Func->hasError()) | 510 if (Func->hasError()) |
| 488 return; | 511 return; |
| 489 Func->dump("After x86 codegen"); | 512 Func->dump("After x86 codegen"); |
| 490 | 513 |
| 491 // Register allocation. This requires instruction renumbering and | 514 // Register allocation. This requires instruction renumbering and |
| 492 // full liveness analysis. | 515 // full liveness analysis. |
| 493 Func->renumberInstructions(); | 516 Func->renumberInstructions(); |
| 494 if (Func->hasError()) | 517 if (Func->hasError()) |
| 495 return; | 518 return; |
| 496 Func->liveness(Liveness_Intervals); | 519 Func->liveness(Liveness_Intervals); |
| 497 if (Func->hasError()) | 520 if (Func->hasError()) |
| 498 return; | 521 return; |
| 499 // Validate the live range computations. The expensive validation | 522 // Validate the live range computations. The expensive validation |
| 500 // call is deliberately only made when assertions are enabled. | 523 // call is deliberately only made when assertions are enabled. |
| 501 assert(Func->validateLiveness()); | 524 assert(Func->validateLiveness()); |
| 502 // The post-codegen dump is done here, after liveness analysis and | 525 // The post-codegen dump is done here, after liveness analysis and |
| 503 // associated cleanup, to make the dump cleaner and more useful. | 526 // associated cleanup, to make the dump cleaner and more useful. |
| 504 Func->dump("After initial x8632 codegen"); | 527 Func->dump("After initial x8632 codegen"); |
| 505 Func->getVMetadata()->init(VMK_All); | 528 Func->getVMetadata()->init(VMK_All); |
| 506 regAlloc(RAK_Global); | 529 regAlloc(RAK_Global); |
| 507 if (Func->hasError()) | 530 if (Func->hasError()) |
| 508 return; | 531 return; |
| 509 Func->dump("After linear scan regalloc"); | 532 Func->dump("After linear scan regalloc"); |
| 510 | 533 |
| 511 if (Ctx->getFlags().getPhiEdgeSplit()) { | 534 if (Ctx->getFlags().getPhiEdgeSplit()) { |
| 512 Func->advancedPhiLowering(); | 535 // qining: In general we need to pause constant blinding or pooling |
|
Jim Stichnoth
2015/06/19 16:51:03
Don't tag comments with your name. Use TODO(qinin
qining
2015/06/19 20:22:26
Done.
| |
| 536 // during advanced phi lowering, unless the lowering assignment has a | |
| 537 // physical register for the Dest Variable | |
| 538 { | |
| 539 BoolFlagSaver B(RandomizationPoolingPaused, true); | |
| 540 Func->advancedPhiLowering(); | |
| 541 } | |
| 513 Func->dump("After advanced Phi lowering"); | 542 Func->dump("After advanced Phi lowering"); |
| 514 } | 543 } |
| 515 | 544 |
| 516 // Stack frame mapping. | 545 // Stack frame mapping. |
| 517 Func->genFrame(); | 546 Func->genFrame(); |
| 518 if (Func->hasError()) | 547 if (Func->hasError()) |
| 519 return; | 548 return; |
| 520 Func->dump("After stack frame mapping"); | 549 Func->dump("After stack frame mapping"); |
| 521 | 550 |
| 522 Func->contractEmptyNodes(); | 551 Func->contractEmptyNodes(); |
| (...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 755 return RegNames[RegNum]; | 784 return RegNames[RegNum]; |
| 756 } | 785 } |
| 757 } | 786 } |
| 758 | 787 |
| 759 void TargetX8632::emitVariable(const Variable *Var) const { | 788 void TargetX8632::emitVariable(const Variable *Var) const { |
| 760 Ostream &Str = Ctx->getStrEmit(); | 789 Ostream &Str = Ctx->getStrEmit(); |
| 761 if (Var->hasReg()) { | 790 if (Var->hasReg()) { |
| 762 Str << "%" << getRegName(Var->getRegNum(), Var->getType()); | 791 Str << "%" << getRegName(Var->getRegNum(), Var->getType()); |
| 763 return; | 792 return; |
| 764 } | 793 } |
| 765 if (Var->getWeight().isInf()) | 794 if (Var->getWeight().isInf()) { |
| 766 llvm_unreachable("Infinite-weight Variable has no register assigned"); | 795 llvm_unreachable("Infinite-weight Variable has no register assigned"); |
| 796 } | |
| 767 int32_t Offset = Var->getStackOffset(); | 797 int32_t Offset = Var->getStackOffset(); |
| 768 if (!hasFramePointer()) | 798 if (!hasFramePointer()) |
| 769 Offset += getStackAdjustment(); | 799 Offset += getStackAdjustment(); |
| 770 if (Offset) | 800 if (Offset) |
| 771 Str << Offset; | 801 Str << Offset; |
| 772 const Type FrameSPTy = IceType_i32; | 802 const Type FrameSPTy = IceType_i32; |
| 773 Str << "(%" << getRegName(getFrameOrStackReg(), FrameSPTy) << ")"; | 803 Str << "(%" << getRegName(getFrameOrStackReg(), FrameSPTy) << ")"; |
| 774 } | 804 } |
| 775 | 805 |
| 776 X8632::Address TargetX8632::stackVarToAsmOperand(const Variable *Var) const { | 806 X8632::Address TargetX8632::stackVarToAsmOperand(const Variable *Var) const { |
| 777 if (Var->hasReg()) | 807 if (Var->hasReg()) |
| 778 llvm_unreachable("Stack Variable has a register assigned"); | 808 llvm_unreachable("Stack Variable has a register assigned"); |
| 779 if (Var->getWeight().isInf()) | 809 if (Var->getWeight().isInf()) { |
| 780 llvm_unreachable("Infinite-weight Variable has no register assigned"); | 810 llvm_unreachable("Infinite-weight Variable has no register assigned"); |
| 811 } | |
| 781 int32_t Offset = Var->getStackOffset(); | 812 int32_t Offset = Var->getStackOffset(); |
| 782 if (!hasFramePointer()) | 813 if (!hasFramePointer()) |
| 783 Offset += getStackAdjustment(); | 814 Offset += getStackAdjustment(); |
| 784 return X8632::Address(RegX8632::getEncodedGPR(getFrameOrStackReg()), Offset); | 815 return X8632::Address(RegX8632::getEncodedGPR(getFrameOrStackReg()), Offset); |
| 785 } | 816 } |
| 786 | 817 |
| 787 void TargetX8632::lowerArguments() { | 818 void TargetX8632::lowerArguments() { |
| 788 VarList &Args = Func->getArgs(); | 819 VarList &Args = Func->getArgs(); |
| 789 // The first four arguments of vector type, regardless of their | 820 // The first four arguments of vector type, regardless of their |
| 790 // position relative to the other arguments in the argument list, are | 821 // position relative to the other arguments in the argument list, are |
| (...skipping 370 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1161 Operand *TargetX8632::loOperand(Operand *Operand) { | 1192 Operand *TargetX8632::loOperand(Operand *Operand) { |
| 1162 assert(Operand->getType() == IceType_i64 || | 1193 assert(Operand->getType() == IceType_i64 || |
| 1163 Operand->getType() == IceType_f64); | 1194 Operand->getType() == IceType_f64); |
| 1164 if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64) | 1195 if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64) |
| 1165 return Operand; | 1196 return Operand; |
| 1166 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) { | 1197 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) { |
| 1167 split64(Var); | 1198 split64(Var); |
| 1168 return Var->getLo(); | 1199 return Var->getLo(); |
| 1169 } | 1200 } |
| 1170 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { | 1201 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { |
| 1171 return Ctx->getConstantInt32(static_cast<uint32_t>(Const->getValue())); | 1202 ConstantInteger32 *ConstInt = llvm::dyn_cast<ConstantInteger32>( |
| 1203 Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue()))); | |
| 1204 return legalize(ConstInt); | |
| 1172 } | 1205 } |
| 1173 if (OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand)) { | 1206 if (OperandX8632Mem *Mem = llvm::cast<OperandX8632Mem>(Operand)) { |
|
Jim Stichnoth
2015/06/19 16:51:03
llvm::dyn_cast
qining
2015/06/19 20:22:25
Done.
| |
| 1174 return OperandX8632Mem::create(Func, IceType_i32, Mem->getBase(), | 1207 OperandX8632Mem *MemOperand = OperandX8632Mem::create( |
| 1175 Mem->getOffset(), Mem->getIndex(), | 1208 Func, IceType_i32, Mem->getBase(), Mem->getOffset(), Mem->getIndex(), |
| 1176 Mem->getShift(), Mem->getSegmentRegister()); | 1209 Mem->getShift(), Mem->getSegmentRegister()); |
| 1210 // Test if we should randomize or pool the offset, if so randomize it or | |
| 1211 // pool it then create mem operand with the blinded/pooled constant. | |
| 1212 // Otherwise, return the mem operand as ordinary mem operand. | |
| 1213 return legalize(MemOperand); | |
| 1177 } | 1214 } |
| 1178 llvm_unreachable("Unsupported operand type"); | 1215 llvm_unreachable("Unsupported operand type"); |
| 1179 return nullptr; | 1216 return nullptr; |
| 1180 } | 1217 } |
| 1181 | 1218 |
| 1182 Operand *TargetX8632::hiOperand(Operand *Operand) { | 1219 Operand *TargetX8632::hiOperand(Operand *Operand) { |
| 1183 assert(Operand->getType() == IceType_i64 || | 1220 assert(Operand->getType() == IceType_i64 || |
| 1184 Operand->getType() == IceType_f64); | 1221 Operand->getType() == IceType_f64); |
| 1185 if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64) | 1222 if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64) |
| 1186 return Operand; | 1223 return Operand; |
| 1187 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) { | 1224 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) { |
| 1188 split64(Var); | 1225 split64(Var); |
| 1189 return Var->getHi(); | 1226 return Var->getHi(); |
| 1190 } | 1227 } |
| 1191 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { | 1228 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { |
| 1192 return Ctx->getConstantInt32( | 1229 ConstantInteger32 *ConstInt = llvm::dyn_cast<ConstantInteger32>( |
| 1193 static_cast<uint32_t>(Const->getValue() >> 32)); | 1230 Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue() >> 32))); |
| 1231 // check if we need to blind/pool the constant | |
| 1232 return legalize(ConstInt); | |
| 1194 } | 1233 } |
| 1195 if (OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand)) { | 1234 if (OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand)) { |
| 1196 Constant *Offset = Mem->getOffset(); | 1235 Constant *Offset = Mem->getOffset(); |
| 1197 if (Offset == nullptr) { | 1236 if (Offset == nullptr) { |
| 1198 Offset = Ctx->getConstantInt32(4); | 1237 Offset = Ctx->getConstantInt32(4); |
| 1199 } else if (ConstantInteger32 *IntOffset = | 1238 } else if (ConstantInteger32 *IntOffset = |
| 1200 llvm::dyn_cast<ConstantInteger32>(Offset)) { | 1239 llvm::dyn_cast<ConstantInteger32>(Offset)) { |
| 1201 Offset = Ctx->getConstantInt32(4 + IntOffset->getValue()); | 1240 Offset = Ctx->getConstantInt32(4 + IntOffset->getValue()); |
| 1202 } else if (ConstantRelocatable *SymOffset = | 1241 } else if (ConstantRelocatable *SymOffset = |
| 1203 llvm::dyn_cast<ConstantRelocatable>(Offset)) { | 1242 llvm::dyn_cast<ConstantRelocatable>(Offset)) { |
| 1204 assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4)); | 1243 assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4)); |
| 1205 Offset = | 1244 Offset = |
| 1206 Ctx->getConstantSym(4 + SymOffset->getOffset(), SymOffset->getName(), | 1245 Ctx->getConstantSym(4 + SymOffset->getOffset(), SymOffset->getName(), |
| 1207 SymOffset->getSuppressMangling()); | 1246 SymOffset->getSuppressMangling()); |
| 1208 } | 1247 } |
| 1209 return OperandX8632Mem::create(Func, IceType_i32, Mem->getBase(), Offset, | 1248 OperandX8632Mem *MemOperand = OperandX8632Mem::create( |
| 1210 Mem->getIndex(), Mem->getShift(), | 1249 Func, IceType_i32, Mem->getBase(), Offset, Mem->getIndex(), |
| 1211 Mem->getSegmentRegister()); | 1250 Mem->getShift(), Mem->getSegmentRegister()); |
| 1251 // Test if the Offset is an eligible i32 constants for randomization and | |
| 1252 // pooling. Blind/pool it if it is. Otherwise return as oridinary mem | |
| 1253 // operand. | |
| 1254 return legalize(MemOperand); | |
| 1212 } | 1255 } |
| 1213 llvm_unreachable("Unsupported operand type"); | 1256 llvm_unreachable("Unsupported operand type"); |
| 1214 return nullptr; | 1257 return nullptr; |
| 1215 } | 1258 } |
| 1216 | 1259 |
| 1217 llvm::SmallBitVector TargetX8632::getRegisterSet(RegSetMask Include, | 1260 llvm::SmallBitVector TargetX8632::getRegisterSet(RegSetMask Include, |
| 1218 RegSetMask Exclude) const { | 1261 RegSetMask Exclude) const { |
| 1219 llvm::SmallBitVector Registers(RegX8632::Reg_NUM); | 1262 llvm::SmallBitVector Registers(RegX8632::Reg_NUM); |
| 1220 | 1263 |
| 1221 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \ | 1264 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \ |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1280 // multiple of the required alignment at runtime. | 1323 // multiple of the required alignment at runtime. |
| 1281 Variable *T = makeReg(IceType_i32); | 1324 Variable *T = makeReg(IceType_i32); |
| 1282 _mov(T, TotalSize); | 1325 _mov(T, TotalSize); |
| 1283 _add(T, Ctx->getConstantInt32(Alignment - 1)); | 1326 _add(T, Ctx->getConstantInt32(Alignment - 1)); |
| 1284 _and(T, Ctx->getConstantInt32(-Alignment)); | 1327 _and(T, Ctx->getConstantInt32(-Alignment)); |
| 1285 _sub(esp, T); | 1328 _sub(esp, T); |
| 1286 } | 1329 } |
| 1287 _mov(Dest, esp); | 1330 _mov(Dest, esp); |
| 1288 } | 1331 } |
| 1289 | 1332 |
| 1333 // Strength-reduce scalar integer multiplication by a constant (for | |
| 1334 // i32 or narrower) for certain constants. The lea instruction can be | |
| 1335 // used to multiply by 3, 5, or 9, and the lsh instruction can be used | |
| 1336 // to multiply by powers of 2. These can be combined such that | |
| 1337 // e.g. multiplying by 100 can be done as 2 lea-based multiplies by 5, | |
| 1338 // combined with left-shifting by 2. | |
| 1339 bool TargetX8632::optimizeScalarMul(Variable *Dest, Operand *Src0, | |
| 1340 int32_t Src1) { | |
| 1341 // Disable this optimization for Om1 and O0, just to keep things | |
| 1342 // simple there. | |
| 1343 if (Ctx->getFlags().getOptLevel() < Opt_1) | |
| 1344 return false; | |
| 1345 Type Ty = Dest->getType(); | |
| 1346 Variable *T = nullptr; | |
| 1347 if (Src1 == -1) { | |
| 1348 _mov(T, Src0); | |
| 1349 _neg(T); | |
| 1350 _mov(Dest, T); | |
| 1351 return true; | |
| 1352 } | |
| 1353 if (Src1 == 0) { | |
| 1354 _mov(Dest, Ctx->getConstantZero(Ty)); | |
| 1355 return true; | |
| 1356 } | |
| 1357 if (Src1 == 1) { | |
| 1358 _mov(T, Src0); | |
| 1359 _mov(Dest, T); | |
| 1360 return true; | |
| 1361 } | |
| 1362 // Don't bother with the edge case where Src1 == MININT. | |
| 1363 if (Src1 == -Src1) | |
| 1364 return false; | |
| 1365 const bool Src1IsNegative = Src1 < 0; | |
| 1366 if (Src1IsNegative) | |
| 1367 Src1 = -Src1; | |
| 1368 uint32_t Count9 = 0; | |
| 1369 uint32_t Count5 = 0; | |
| 1370 uint32_t Count3 = 0; | |
| 1371 uint32_t Count2 = 0; | |
| 1372 uint32_t CountOps = 0; | |
| 1373 while (Src1 > 1) { | |
| 1374 if (Src1 % 9 == 0) { | |
| 1375 ++CountOps; | |
| 1376 ++Count9; | |
| 1377 Src1 /= 9; | |
| 1378 } else if (Src1 % 5 == 0) { | |
| 1379 ++CountOps; | |
| 1380 ++Count5; | |
| 1381 Src1 /= 5; | |
| 1382 } else if (Src1 % 3 == 0) { | |
| 1383 ++CountOps; | |
| 1384 ++Count3; | |
| 1385 Src1 /= 3; | |
| 1386 } else if (Src1 % 2 == 0) { | |
| 1387 if (Count2 == 0) | |
| 1388 ++CountOps; | |
| 1389 ++Count2; | |
| 1390 Src1 /= 2; | |
| 1391 } else { | |
| 1392 return false; | |
| 1393 } | |
| 1394 } | |
| 1395 // Lea optimization only works for i16 and i32 types, not i8. | |
| 1396 if (Ty != IceType_i16 && Ty != IceType_i32 && (Count3 || Count5 || Count9)) | |
| 1397 return false; | |
| 1398 // Limit the number of lea/shl operations for a single multiply, to | |
| 1399 // a somewhat arbitrary choice of 3. | |
| 1400 const uint32_t MaxOpsForOptimizedMul = 3; | |
| 1401 if (CountOps > MaxOpsForOptimizedMul) | |
| 1402 return false; | |
| 1403 _mov(T, Src0); | |
| 1404 Constant *Zero = Ctx->getConstantZero(IceType_i32); | |
| 1405 for (uint32_t i = 0; i < Count9; ++i) { | |
| 1406 const uint16_t Shift = 3; // log2(9-1) | |
| 1407 _lea(T, OperandX8632Mem::create(Func, IceType_void, T, Zero, T, Shift)); | |
| 1408 _set_dest_nonkillable(); | |
| 1409 } | |
| 1410 for (uint32_t i = 0; i < Count5; ++i) { | |
| 1411 const uint16_t Shift = 2; // log2(5-1) | |
| 1412 _lea(T, OperandX8632Mem::create(Func, IceType_void, T, Zero, T, Shift)); | |
| 1413 _set_dest_nonkillable(); | |
| 1414 } | |
| 1415 for (uint32_t i = 0; i < Count3; ++i) { | |
| 1416 const uint16_t Shift = 1; // log2(3-1) | |
| 1417 _lea(T, OperandX8632Mem::create(Func, IceType_void, T, Zero, T, Shift)); | |
| 1418 _set_dest_nonkillable(); | |
| 1419 } | |
| 1420 if (Count2) { | |
| 1421 _shl(T, Ctx->getConstantInt(Ty, Count2)); | |
| 1422 } | |
| 1423 if (Src1IsNegative) | |
| 1424 _neg(T); | |
| 1425 _mov(Dest, T); | |
| 1426 return true; | |
| 1427 } | |
| 1428 | |
| 1290 void TargetX8632::lowerArithmetic(const InstArithmetic *Inst) { | 1429 void TargetX8632::lowerArithmetic(const InstArithmetic *Inst) { |
| 1291 Variable *Dest = Inst->getDest(); | 1430 Variable *Dest = Inst->getDest(); |
| 1292 Operand *Src0 = legalize(Inst->getSrc(0)); | 1431 Operand *Src0 = legalize(Inst->getSrc(0)); |
| 1293 Operand *Src1 = legalize(Inst->getSrc(1)); | 1432 Operand *Src1 = legalize(Inst->getSrc(1)); |
| 1294 if (Inst->isCommutative()) { | 1433 if (Inst->isCommutative()) { |
| 1295 if (!llvm::isa<Variable>(Src0) && llvm::isa<Variable>(Src1)) | 1434 if (!llvm::isa<Variable>(Src0) && llvm::isa<Variable>(Src1)) |
| 1296 std::swap(Src0, Src1); | 1435 std::swap(Src0, Src1); |
| 1436 if (llvm::isa<Constant>(Src0) && !llvm::isa<Constant>(Src1)) | |
| 1437 std::swap(Src0, Src1); | |
| 1297 } | 1438 } |
| 1298 if (Dest->getType() == IceType_i64) { | 1439 if (Dest->getType() == IceType_i64) { |
| 1440 switch (Inst->getOp()) { | |
|
Jim Stichnoth
2015/06/19 16:51:03
Add a comment explaining why these instructions ar
qining
2015/06/19 20:22:26
Done.
| |
| 1441 case InstArithmetic::Udiv: { | |
| 1442 const SizeT MaxSrcs = 2; | |
| 1443 InstCall *Call = makeHelperCall(H_udiv_i64, Dest, MaxSrcs); | |
| 1444 Call->addArg(Inst->getSrc(0)); | |
| 1445 Call->addArg(Inst->getSrc(1)); | |
| 1446 lowerCall(Call); | |
| 1447 return; | |
| 1448 } | |
| 1449 case InstArithmetic::Sdiv: { | |
| 1450 const SizeT MaxSrcs = 2; | |
| 1451 InstCall *Call = makeHelperCall(H_sdiv_i64, Dest, MaxSrcs); | |
| 1452 Call->addArg(Inst->getSrc(0)); | |
| 1453 Call->addArg(Inst->getSrc(1)); | |
| 1454 lowerCall(Call); | |
| 1455 return; | |
| 1456 } | |
| 1457 case InstArithmetic::Urem: { | |
| 1458 const SizeT MaxSrcs = 2; | |
| 1459 InstCall *Call = makeHelperCall(H_urem_i64, Dest, MaxSrcs); | |
| 1460 Call->addArg(Inst->getSrc(0)); | |
| 1461 Call->addArg(Inst->getSrc(1)); | |
| 1462 lowerCall(Call); | |
| 1463 return; | |
| 1464 } | |
| 1465 case InstArithmetic::Srem: { | |
| 1466 const SizeT MaxSrcs = 2; | |
| 1467 InstCall *Call = makeHelperCall(H_srem_i64, Dest, MaxSrcs); | |
| 1468 Call->addArg(Inst->getSrc(0)); | |
| 1469 Call->addArg(Inst->getSrc(1)); | |
| 1470 lowerCall(Call); | |
| 1471 return; | |
| 1472 } | |
| 1473 default: | |
| 1474 break; | |
| 1475 } | |
| 1476 | |
| 1299 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); | 1477 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); |
| 1300 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); | 1478 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); |
| 1301 Operand *Src0Lo = loOperand(Src0); | 1479 Operand *Src0Lo = loOperand(Src0); |
| 1302 Operand *Src0Hi = hiOperand(Src0); | 1480 Operand *Src0Hi = hiOperand(Src0); |
| 1303 Operand *Src1Lo = loOperand(Src1); | 1481 Operand *Src1Lo = loOperand(Src1); |
| 1304 Operand *Src1Hi = hiOperand(Src1); | 1482 Operand *Src1Hi = hiOperand(Src1); |
| 1305 Variable *T_Lo = nullptr, *T_Hi = nullptr; | 1483 Variable *T_Lo = nullptr, *T_Hi = nullptr; |
| 1306 switch (Inst->getOp()) { | 1484 switch (Inst->getOp()) { |
| 1307 case InstArithmetic::_num: | 1485 case InstArithmetic::_num: |
| 1308 llvm_unreachable("Unknown arithmetic operator"); | 1486 llvm_unreachable("Unknown arithmetic operator"); |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1478 // T_2 and T_3 are being assigned again because of the | 1656 // T_2 and T_3 are being assigned again because of the |
| 1479 // intra-block control flow, so T_2 needs the _mov_nonkillable | 1657 // intra-block control flow, so T_2 needs the _mov_nonkillable |
| 1480 // variant to avoid liveness problems. T_3 doesn't need special | 1658 // variant to avoid liveness problems. T_3 doesn't need special |
| 1481 // treatment because it is reassigned via _sar instead of _mov. | 1659 // treatment because it is reassigned via _sar instead of _mov. |
| 1482 _mov_nonkillable(T_2, T_3); | 1660 _mov_nonkillable(T_2, T_3); |
| 1483 _sar(T_3, SignExtend); | 1661 _sar(T_3, SignExtend); |
| 1484 Context.insert(Label); | 1662 Context.insert(Label); |
| 1485 _mov(DestLo, T_2); | 1663 _mov(DestLo, T_2); |
| 1486 _mov(DestHi, T_3); | 1664 _mov(DestHi, T_3); |
| 1487 } break; | 1665 } break; |
| 1488 case InstArithmetic::Udiv: { | |
| 1489 const SizeT MaxSrcs = 2; | |
| 1490 InstCall *Call = makeHelperCall(H_udiv_i64, Dest, MaxSrcs); | |
| 1491 Call->addArg(Inst->getSrc(0)); | |
| 1492 Call->addArg(Inst->getSrc(1)); | |
| 1493 lowerCall(Call); | |
| 1494 } break; | |
| 1495 case InstArithmetic::Sdiv: { | |
| 1496 const SizeT MaxSrcs = 2; | |
| 1497 InstCall *Call = makeHelperCall(H_sdiv_i64, Dest, MaxSrcs); | |
| 1498 Call->addArg(Inst->getSrc(0)); | |
| 1499 Call->addArg(Inst->getSrc(1)); | |
| 1500 lowerCall(Call); | |
| 1501 } break; | |
| 1502 case InstArithmetic::Urem: { | |
| 1503 const SizeT MaxSrcs = 2; | |
| 1504 InstCall *Call = makeHelperCall(H_urem_i64, Dest, MaxSrcs); | |
| 1505 Call->addArg(Inst->getSrc(0)); | |
| 1506 Call->addArg(Inst->getSrc(1)); | |
| 1507 lowerCall(Call); | |
| 1508 } break; | |
| 1509 case InstArithmetic::Srem: { | |
| 1510 const SizeT MaxSrcs = 2; | |
| 1511 InstCall *Call = makeHelperCall(H_srem_i64, Dest, MaxSrcs); | |
| 1512 Call->addArg(Inst->getSrc(0)); | |
| 1513 Call->addArg(Inst->getSrc(1)); | |
| 1514 lowerCall(Call); | |
| 1515 } break; | |
| 1516 case InstArithmetic::Fadd: | 1666 case InstArithmetic::Fadd: |
| 1517 case InstArithmetic::Fsub: | 1667 case InstArithmetic::Fsub: |
| 1518 case InstArithmetic::Fmul: | 1668 case InstArithmetic::Fmul: |
| 1519 case InstArithmetic::Fdiv: | 1669 case InstArithmetic::Fdiv: |
| 1520 case InstArithmetic::Frem: | 1670 case InstArithmetic::Frem: |
| 1521 llvm_unreachable("FP instruction with i64 type"); | 1671 llvm_unreachable("FP instruction with i64 type"); |
| 1522 break; | 1672 break; |
| 1673 default: | |
|
Jim Stichnoth
2015/06/19 16:51:03
Don't use default, instead just explicitly list Ud
qining
2015/06/19 20:22:25
Done.
| |
| 1674 llvm_unreachable("Unknown instruction with i64 type"); | |
| 1675 break; | |
| 1523 } | 1676 } |
| 1524 } else if (isVectorType(Dest->getType())) { | 1677 return; |
| 1678 } | |
| 1679 if (isVectorType(Dest->getType())) { | |
| 1525 // TODO: Trap on integer divide and integer modulo by zero. | 1680 // TODO: Trap on integer divide and integer modulo by zero. |
| 1526 // See: https://code.google.com/p/nativeclient/issues/detail?id=3899 | 1681 // See: https://code.google.com/p/nativeclient/issues/detail?id=3899 |
| 1527 if (llvm::isa<OperandX8632Mem>(Src1)) | 1682 if (llvm::isa<OperandX8632Mem>(Src1)) |
| 1528 Src1 = legalizeToVar(Src1); | 1683 Src1 = legalizeToVar(Src1); |
| 1529 switch (Inst->getOp()) { | 1684 switch (Inst->getOp()) { |
| 1530 case InstArithmetic::_num: | 1685 case InstArithmetic::_num: |
| 1531 llvm_unreachable("Unknown arithmetic operator"); | 1686 llvm_unreachable("Unknown arithmetic operator"); |
| 1532 break; | 1687 break; |
| 1533 case InstArithmetic::Add: { | 1688 case InstArithmetic::Add: { |
| 1534 Variable *T = makeReg(Dest->getType()); | 1689 Variable *T = makeReg(Dest->getType()); |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1643 case InstArithmetic::Fdiv: { | 1798 case InstArithmetic::Fdiv: { |
| 1644 Variable *T = makeReg(Dest->getType()); | 1799 Variable *T = makeReg(Dest->getType()); |
| 1645 _movp(T, Src0); | 1800 _movp(T, Src0); |
| 1646 _divps(T, Src1); | 1801 _divps(T, Src1); |
| 1647 _movp(Dest, T); | 1802 _movp(Dest, T); |
| 1648 } break; | 1803 } break; |
| 1649 case InstArithmetic::Frem: | 1804 case InstArithmetic::Frem: |
| 1650 scalarizeArithmetic(Inst->getOp(), Dest, Src0, Src1); | 1805 scalarizeArithmetic(Inst->getOp(), Dest, Src0, Src1); |
| 1651 break; | 1806 break; |
| 1652 } | 1807 } |
| 1653 } else { // Dest->getType() is non-i64 scalar | 1808 return; |
| 1654 Variable *T_edx = nullptr; | 1809 } |
| 1655 Variable *T = nullptr; | 1810 Variable *T_edx = nullptr; |
| 1656 switch (Inst->getOp()) { | 1811 Variable *T = nullptr; |
| 1657 case InstArithmetic::_num: | 1812 switch (Inst->getOp()) { |
| 1658 llvm_unreachable("Unknown arithmetic operator"); | 1813 case InstArithmetic::_num: |
| 1659 break; | 1814 llvm_unreachable("Unknown arithmetic operator"); |
| 1660 case InstArithmetic::Add: | 1815 break; |
| 1816 case InstArithmetic::Add: | |
| 1817 _mov(T, Src0); | |
| 1818 _add(T, Src1); | |
| 1819 _mov(Dest, T); | |
| 1820 break; | |
| 1821 case InstArithmetic::And: | |
| 1822 _mov(T, Src0); | |
| 1823 _and(T, Src1); | |
| 1824 _mov(Dest, T); | |
| 1825 break; | |
| 1826 case InstArithmetic::Or: | |
| 1827 _mov(T, Src0); | |
| 1828 _or(T, Src1); | |
| 1829 _mov(Dest, T); | |
| 1830 break; | |
| 1831 case InstArithmetic::Xor: | |
| 1832 _mov(T, Src0); | |
| 1833 _xor(T, Src1); | |
| 1834 _mov(Dest, T); | |
| 1835 break; | |
| 1836 case InstArithmetic::Sub: | |
| 1837 _mov(T, Src0); | |
| 1838 _sub(T, Src1); | |
| 1839 _mov(Dest, T); | |
| 1840 break; | |
| 1841 case InstArithmetic::Mul: | |
| 1842 if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) { | |
| 1843 if (optimizeScalarMul(Dest, Src0, C->getValue())) | |
| 1844 return; | |
| 1845 } | |
| 1846 // The 8-bit version of imul only allows the form "imul r/m8" | |
| 1847 // where T must be in eax. | |
| 1848 if (isByteSizedArithType(Dest->getType())) { | |
| 1849 _mov(T, Src0, RegX8632::Reg_eax); | |
| 1850 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); | |
| 1851 } else { | |
| 1661 _mov(T, Src0); | 1852 _mov(T, Src0); |
| 1662 _add(T, Src1); | 1853 } |
| 1663 _mov(Dest, T); | 1854 _imul(T, Src1); |
| 1664 break; | 1855 _mov(Dest, T); |
| 1665 case InstArithmetic::And: | 1856 break; |
| 1666 _mov(T, Src0); | 1857 case InstArithmetic::Shl: |
| 1667 _and(T, Src1); | 1858 _mov(T, Src0); |
| 1668 _mov(Dest, T); | 1859 if (!llvm::isa<Constant>(Src1)) |
| 1669 break; | 1860 Src1 = legalizeToVar(Src1, RegX8632::Reg_ecx); |
| 1670 case InstArithmetic::Or: | 1861 _shl(T, Src1); |
| 1671 _mov(T, Src0); | 1862 _mov(Dest, T); |
| 1672 _or(T, Src1); | 1863 break; |
| 1673 _mov(Dest, T); | 1864 case InstArithmetic::Lshr: |
| 1674 break; | 1865 _mov(T, Src0); |
| 1675 case InstArithmetic::Xor: | 1866 if (!llvm::isa<Constant>(Src1)) |
| 1676 _mov(T, Src0); | 1867 Src1 = legalizeToVar(Src1, RegX8632::Reg_ecx); |
| 1677 _xor(T, Src1); | 1868 _shr(T, Src1); |
| 1678 _mov(Dest, T); | 1869 _mov(Dest, T); |
| 1679 break; | 1870 break; |
| 1680 case InstArithmetic::Sub: | 1871 case InstArithmetic::Ashr: |
| 1681 _mov(T, Src0); | 1872 _mov(T, Src0); |
| 1682 _sub(T, Src1); | 1873 if (!llvm::isa<Constant>(Src1)) |
| 1683 _mov(Dest, T); | 1874 Src1 = legalizeToVar(Src1, RegX8632::Reg_ecx); |
| 1684 break; | 1875 _sar(T, Src1); |
| 1685 case InstArithmetic::Mul: | 1876 _mov(Dest, T); |
| 1686 // TODO: Optimize for llvm::isa<Constant>(Src1) | 1877 break; |
| 1687 // TODO: Strength-reduce multiplications by a constant, | 1878 case InstArithmetic::Udiv: |
| 1688 // particularly -1 and powers of 2. Advanced: use lea to | 1879 // div and idiv are the few arithmetic operators that do not allow |
| 1689 // multiply by 3, 5, 9. | 1880 // immediates as the operand. |
| 1690 // | 1881 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); |
| 1691 // The 8-bit version of imul only allows the form "imul r/m8" | 1882 if (isByteSizedArithType(Dest->getType())) { |
| 1692 // where T must be in eax. | 1883 Variable *T_ah = nullptr; |
| 1693 if (isByteSizedArithType(Dest->getType())) { | 1884 Constant *Zero = Ctx->getConstantZero(IceType_i8); |
| 1694 _mov(T, Src0, RegX8632::Reg_eax); | 1885 _mov(T, Src0, RegX8632::Reg_eax); |
| 1695 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); | 1886 _mov(T_ah, Zero, RegX8632::Reg_ah); |
| 1696 } else { | 1887 _div(T, Src1, T_ah); |
| 1697 _mov(T, Src0); | 1888 _mov(Dest, T); |
| 1889 } else { | |
| 1890 Constant *Zero = Ctx->getConstantZero(IceType_i32); | |
| 1891 _mov(T, Src0, RegX8632::Reg_eax); | |
| 1892 _mov(T_edx, Zero, RegX8632::Reg_edx); | |
| 1893 _div(T, Src1, T_edx); | |
| 1894 _mov(Dest, T); | |
| 1895 } | |
| 1896 break; | |
| 1897 case InstArithmetic::Sdiv: | |
| 1898 // TODO(stichnot): Enable this after doing better performance | |
| 1899 // and cross testing. | |
| 1900 if (false && Ctx->getFlags().getOptLevel() >= Opt_1) { | |
| 1901 // Optimize division by constant power of 2, but not for Om1 | |
| 1902 // or O0, just to keep things simple there. | |
| 1903 if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) { | |
| 1904 int32_t Divisor = C->getValue(); | |
| 1905 uint32_t UDivisor = static_cast<uint32_t>(Divisor); | |
| 1906 if (Divisor > 0 && llvm::isPowerOf2_32(UDivisor)) { | |
| 1907 uint32_t LogDiv = llvm::Log2_32(UDivisor); | |
| 1908 Type Ty = Dest->getType(); | |
| 1909 // LLVM does the following for dest=src/(1<<log): | |
| 1910 // t=src | |
| 1911 // sar t,typewidth-1 // -1 if src is negative, 0 if not | |
| 1912 // shr t,typewidth-log | |
| 1913 // add t,src | |
| 1914 // sar t,log | |
| 1915 // dest=t | |
| 1916 uint32_t TypeWidth = X86_CHAR_BIT * typeWidthInBytes(Ty); | |
| 1917 _mov(T, Src0); | |
| 1918 // If for some reason we are dividing by 1, just treat it | |
| 1919 // like an assignment. | |
| 1920 if (LogDiv > 0) { | |
| 1921 // The initial sar is unnecessary when dividing by 2. | |
| 1922 if (LogDiv > 1) | |
| 1923 _sar(T, Ctx->getConstantInt(Ty, TypeWidth - 1)); | |
| 1924 _shr(T, Ctx->getConstantInt(Ty, TypeWidth - LogDiv)); | |
| 1925 _add(T, Src0); | |
| 1926 _sar(T, Ctx->getConstantInt(Ty, LogDiv)); | |
| 1927 } | |
| 1928 _mov(Dest, T); | |
| 1929 return; | |
| 1930 } | |
| 1698 } | 1931 } |
| 1699 _imul(T, Src1); | 1932 } |
| 1700 _mov(Dest, T); | 1933 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); |
| 1701 break; | 1934 if (isByteSizedArithType(Dest->getType())) { |
| 1702 case InstArithmetic::Shl: | 1935 _mov(T, Src0, RegX8632::Reg_eax); |
| 1703 _mov(T, Src0); | 1936 _cbwdq(T, T); |
| 1704 if (!llvm::isa<Constant>(Src1)) | 1937 _idiv(T, Src1, T); |
| 1705 Src1 = legalizeToVar(Src1, RegX8632::Reg_ecx); | 1938 _mov(Dest, T); |
| 1706 _shl(T, Src1); | 1939 } else { |
| 1707 _mov(Dest, T); | 1940 T_edx = makeReg(IceType_i32, RegX8632::Reg_edx); |
| 1708 break; | 1941 _mov(T, Src0, RegX8632::Reg_eax); |
| 1709 case InstArithmetic::Lshr: | 1942 _cbwdq(T_edx, T); |
| 1710 _mov(T, Src0); | 1943 _idiv(T, Src1, T_edx); |
| 1711 if (!llvm::isa<Constant>(Src1)) | 1944 _mov(Dest, T); |
| 1712 Src1 = legalizeToVar(Src1, RegX8632::Reg_ecx); | 1945 } |
| 1713 _shr(T, Src1); | 1946 break; |
| 1714 _mov(Dest, T); | 1947 case InstArithmetic::Urem: |
| 1715 break; | 1948 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); |
| 1716 case InstArithmetic::Ashr: | 1949 if (isByteSizedArithType(Dest->getType())) { |
| 1717 _mov(T, Src0); | 1950 Variable *T_ah = nullptr; |
| 1718 if (!llvm::isa<Constant>(Src1)) | 1951 Constant *Zero = Ctx->getConstantZero(IceType_i8); |
| 1719 Src1 = legalizeToVar(Src1, RegX8632::Reg_ecx); | 1952 _mov(T, Src0, RegX8632::Reg_eax); |
| 1720 _sar(T, Src1); | 1953 _mov(T_ah, Zero, RegX8632::Reg_ah); |
| 1721 _mov(Dest, T); | 1954 _div(T_ah, Src1, T); |
| 1722 break; | 1955 _mov(Dest, T_ah); |
| 1723 case InstArithmetic::Udiv: | 1956 } else { |
| 1724 // div and idiv are the few arithmetic operators that do not allow | 1957 Constant *Zero = Ctx->getConstantZero(IceType_i32); |
| 1725 // immediates as the operand. | 1958 _mov(T_edx, Zero, RegX8632::Reg_edx); |
| 1726 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); | 1959 _mov(T, Src0, RegX8632::Reg_eax); |
| 1727 if (isByteSizedArithType(Dest->getType())) { | 1960 _div(T_edx, Src1, T); |
| 1728 Variable *T_ah = nullptr; | 1961 _mov(Dest, T_edx); |
| 1729 Constant *Zero = Ctx->getConstantZero(IceType_i8); | 1962 } |
| 1730 _mov(T, Src0, RegX8632::Reg_eax); | 1963 break; |
| 1731 _mov(T_ah, Zero, RegX8632::Reg_ah); | 1964 case InstArithmetic::Srem: |
| 1732 _div(T, Src1, T_ah); | 1965 // TODO(stichnot): Enable this after doing better performance |
| 1733 _mov(Dest, T); | 1966 // and cross testing. |
| 1734 } else { | 1967 if (false && Ctx->getFlags().getOptLevel() >= Opt_1) { |
| 1735 Constant *Zero = Ctx->getConstantZero(IceType_i32); | 1968 // Optimize mod by constant power of 2, but not for Om1 or O0, |
| 1736 _mov(T, Src0, RegX8632::Reg_eax); | 1969 // just to keep things simple there. |
| 1737 _mov(T_edx, Zero, RegX8632::Reg_edx); | 1970 if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) { |
| 1738 _div(T, Src1, T_edx); | 1971 int32_t Divisor = C->getValue(); |
| 1739 _mov(Dest, T); | 1972 uint32_t UDivisor = static_cast<uint32_t>(Divisor); |
| 1973 if (Divisor > 0 && llvm::isPowerOf2_32(UDivisor)) { | |
| 1974 uint32_t LogDiv = llvm::Log2_32(UDivisor); | |
| 1975 Type Ty = Dest->getType(); | |
| 1976 // LLVM does the following for dest=src%(1<<log): | |
| 1977 // t=src | |
| 1978 // sar t,typewidth-1 // -1 if src is negative, 0 if not | |
| 1979 // shr t,typewidth-log | |
| 1980 // add t,src | |
| 1981 // and t, -(1<<log) | |
| 1982 // sub t,src | |
| 1983 // neg t | |
| 1984 // dest=t | |
| 1985 uint32_t TypeWidth = X86_CHAR_BIT * typeWidthInBytes(Ty); | |
| 1986 // If for some reason we are dividing by 1, just assign 0. | |
| 1987 if (LogDiv == 0) { | |
| 1988 _mov(Dest, Ctx->getConstantZero(Ty)); | |
| 1989 return; | |
| 1990 } | |
| 1991 _mov(T, Src0); | |
| 1992 // The initial sar is unnecessary when dividing by 2. | |
| 1993 if (LogDiv > 1) | |
| 1994 _sar(T, Ctx->getConstantInt(Ty, TypeWidth - 1)); | |
| 1995 _shr(T, Ctx->getConstantInt(Ty, TypeWidth - LogDiv)); | |
| 1996 _add(T, Src0); | |
| 1997 _and(T, Ctx->getConstantInt(Ty, -(1 << LogDiv))); | |
| 1998 _sub(T, Src0); | |
| 1999 _neg(T); | |
| 2000 _mov(Dest, T); | |
| 2001 return; | |
| 2002 } | |
| 1740 } | 2003 } |
| 1741 break; | 2004 } |
| 1742 case InstArithmetic::Sdiv: | 2005 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); |
| 1743 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); | 2006 if (isByteSizedArithType(Dest->getType())) { |
| 1744 if (isByteSizedArithType(Dest->getType())) { | 2007 Variable *T_ah = makeReg(IceType_i8, RegX8632::Reg_ah); |
| 1745 _mov(T, Src0, RegX8632::Reg_eax); | 2008 _mov(T, Src0, RegX8632::Reg_eax); |
| 1746 _cbwdq(T, T); | 2009 _cbwdq(T, T); |
| 1747 _idiv(T, Src1, T); | 2010 Context.insert(InstFakeDef::create(Func, T_ah)); |
| 1748 _mov(Dest, T); | 2011 _idiv(T_ah, Src1, T); |
| 1749 } else { | 2012 _mov(Dest, T_ah); |
| 1750 T_edx = makeReg(IceType_i32, RegX8632::Reg_edx); | 2013 } else { |
| 1751 _mov(T, Src0, RegX8632::Reg_eax); | 2014 T_edx = makeReg(IceType_i32, RegX8632::Reg_edx); |
| 1752 _cbwdq(T_edx, T); | 2015 _mov(T, Src0, RegX8632::Reg_eax); |
| 1753 _idiv(T, Src1, T_edx); | 2016 _cbwdq(T_edx, T); |
| 1754 _mov(Dest, T); | 2017 _idiv(T_edx, Src1, T); |
| 1755 } | 2018 _mov(Dest, T_edx); |
| 1756 break; | 2019 } |
| 1757 case InstArithmetic::Urem: | 2020 break; |
| 1758 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); | 2021 case InstArithmetic::Fadd: |
| 1759 if (isByteSizedArithType(Dest->getType())) { | 2022 _mov(T, Src0); |
| 1760 Variable *T_ah = nullptr; | 2023 _addss(T, Src1); |
| 1761 Constant *Zero = Ctx->getConstantZero(IceType_i8); | 2024 _mov(Dest, T); |
| 1762 _mov(T, Src0, RegX8632::Reg_eax); | 2025 break; |
| 1763 _mov(T_ah, Zero, RegX8632::Reg_ah); | 2026 case InstArithmetic::Fsub: |
| 1764 _div(T_ah, Src1, T); | 2027 _mov(T, Src0); |
| 1765 _mov(Dest, T_ah); | 2028 _subss(T, Src1); |
| 1766 } else { | 2029 _mov(Dest, T); |
| 1767 Constant *Zero = Ctx->getConstantZero(IceType_i32); | 2030 break; |
| 1768 _mov(T_edx, Zero, RegX8632::Reg_edx); | 2031 case InstArithmetic::Fmul: |
| 1769 _mov(T, Src0, RegX8632::Reg_eax); | 2032 _mov(T, Src0); |
| 1770 _div(T_edx, Src1, T); | 2033 _mulss(T, Src1); |
| 1771 _mov(Dest, T_edx); | 2034 _mov(Dest, T); |
| 1772 } | 2035 break; |
| 1773 break; | 2036 case InstArithmetic::Fdiv: |
| 1774 case InstArithmetic::Srem: | 2037 _mov(T, Src0); |
| 1775 Src1 = legalize(Src1, Legal_Reg | Legal_Mem); | 2038 _divss(T, Src1); |
| 1776 if (isByteSizedArithType(Dest->getType())) { | 2039 _mov(Dest, T); |
| 1777 Variable *T_ah = makeReg(IceType_i8, RegX8632::Reg_ah); | 2040 break; |
| 1778 _mov(T, Src0, RegX8632::Reg_eax); | 2041 case InstArithmetic::Frem: { |
| 1779 _cbwdq(T, T); | 2042 const SizeT MaxSrcs = 2; |
| 1780 Context.insert(InstFakeDef::create(Func, T_ah)); | 2043 Type Ty = Dest->getType(); |
| 1781 _idiv(T_ah, Src1, T); | 2044 InstCall *Call = makeHelperCall( |
| 1782 _mov(Dest, T_ah); | 2045 isFloat32Asserting32Or64(Ty) ? H_frem_f32 : H_frem_f64, Dest, MaxSrcs); |
| 1783 } else { | 2046 Call->addArg(Src0); |
| 1784 T_edx = makeReg(IceType_i32, RegX8632::Reg_edx); | 2047 Call->addArg(Src1); |
| 1785 _mov(T, Src0, RegX8632::Reg_eax); | 2048 return lowerCall(Call); |
| 1786 _cbwdq(T_edx, T); | 2049 } |
| 1787 _idiv(T_edx, Src1, T); | |
| 1788 _mov(Dest, T_edx); | |
| 1789 } | |
| 1790 break; | |
| 1791 case InstArithmetic::Fadd: | |
| 1792 _mov(T, Src0); | |
| 1793 _addss(T, Src1); | |
| 1794 _mov(Dest, T); | |
| 1795 break; | |
| 1796 case InstArithmetic::Fsub: | |
| 1797 _mov(T, Src0); | |
| 1798 _subss(T, Src1); | |
| 1799 _mov(Dest, T); | |
| 1800 break; | |
| 1801 case InstArithmetic::Fmul: | |
| 1802 _mov(T, Src0); | |
| 1803 _mulss(T, Src1); | |
| 1804 _mov(Dest, T); | |
| 1805 break; | |
| 1806 case InstArithmetic::Fdiv: | |
| 1807 _mov(T, Src0); | |
| 1808 _divss(T, Src1); | |
| 1809 _mov(Dest, T); | |
| 1810 break; | |
| 1811 case InstArithmetic::Frem: { | |
| 1812 const SizeT MaxSrcs = 2; | |
| 1813 Type Ty = Dest->getType(); | |
| 1814 InstCall *Call = | |
| 1815 makeHelperCall(isFloat32Asserting32Or64(Ty) ? H_frem_f32 : H_frem_f64, | |
| 1816 Dest, MaxSrcs); | |
| 1817 Call->addArg(Src0); | |
| 1818 Call->addArg(Src1); | |
| 1819 return lowerCall(Call); | |
| 1820 } break; | |
| 1821 } | |
| 1822 } | 2050 } |
| 1823 } | 2051 } |
| 1824 | 2052 |
| 1825 void TargetX8632::lowerAssign(const InstAssign *Inst) { | 2053 void TargetX8632::lowerAssign(const InstAssign *Inst) { |
| 1826 Variable *Dest = Inst->getDest(); | 2054 Variable *Dest = Inst->getDest(); |
| 1827 Operand *Src0 = Inst->getSrc(0); | 2055 Operand *Src0 = Inst->getSrc(0); |
| 1828 assert(Dest->getType() == Src0->getType()); | 2056 assert(Dest->getType() == Src0->getType()); |
| 1829 if (Dest->getType() == IceType_i64) { | 2057 if (Dest->getType() == IceType_i64) { |
| 1830 Src0 = legalize(Src0); | 2058 Src0 = legalize(Src0); |
| 1831 Operand *Src0Lo = loOperand(Src0); | 2059 Operand *Src0Lo = loOperand(Src0); |
| 1832 Operand *Src0Hi = hiOperand(Src0); | 2060 Operand *Src0Hi = hiOperand(Src0); |
| 1833 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); | 2061 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); |
| 1834 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); | 2062 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); |
| 1835 Variable *T_Lo = nullptr, *T_Hi = nullptr; | 2063 Variable *T_Lo = nullptr, *T_Hi = nullptr; |
| 1836 _mov(T_Lo, Src0Lo); | 2064 _mov(T_Lo, Src0Lo); |
| 1837 _mov(DestLo, T_Lo); | 2065 _mov(DestLo, T_Lo); |
| 1838 _mov(T_Hi, Src0Hi); | 2066 _mov(T_Hi, Src0Hi); |
| 1839 _mov(DestHi, T_Hi); | 2067 _mov(DestHi, T_Hi); |
| 1840 } else { | 2068 } else { |
| 1841 Operand *RI; | 2069 Operand *RI; |
| 1842 if (Dest->hasReg()) | 2070 if (Dest->hasReg()) { |
| 1843 // If Dest already has a physical register, then legalize the | 2071 // If Dest already has a physical register, then legalize the |
| 1844 // Src operand into a Variable with the same register | 2072 // Src operand into a Variable with the same register |
| 1845 // assignment. This is mostly a workaround for advanced phi | 2073 // assignment. This is mostly a workaround for advanced phi |
| 1846 // lowering's ad-hoc register allocation which assumes no | 2074 // lowering's ad-hoc register allocation which assumes no |
| 1847 // register allocation is needed when at least one of the | 2075 // register allocation is needed when at least one of the |
| 1848 // operands is non-memory. | 2076 // operands is non-memory. |
| 1849 RI = legalize(Src0, Legal_Reg, Dest->getRegNum()); | 2077 |
| 1850 else | 2078 // qining: if we have a physical register for the dest variable, |
| 2079 // we can enable our constant blinding or pooling again. Note | |
| 2080 // this is only for advancedPhiLowering(), the flag flip should | |
| 2081 // leave no other side effect. | |
| 2082 { | |
| 2083 BoolFlagSaver B(RandomizationPoolingPaused, false); | |
| 2084 RI = legalize(Src0, Legal_Reg, Dest->getRegNum()); | |
| 2085 } | |
| 2086 } else { | |
| 1851 // If Dest could be a stack operand, then RI must be a physical | 2087 // If Dest could be a stack operand, then RI must be a physical |
| 1852 // register or a scalar integer immediate. | 2088 // register or a scalar integer immediate. |
| 1853 RI = legalize(Src0, Legal_Reg | Legal_Imm); | 2089 RI = legalize(Src0, Legal_Reg | Legal_Imm); |
| 2090 } | |
| 1854 if (isVectorType(Dest->getType())) | 2091 if (isVectorType(Dest->getType())) |
| 1855 _movp(Dest, RI); | 2092 _movp(Dest, RI); |
| 1856 else | 2093 else |
| 1857 _mov(Dest, RI); | 2094 _mov(Dest, RI); |
| 1858 } | 2095 } |
| 1859 } | 2096 } |
| 1860 | 2097 |
| 1861 void TargetX8632::lowerBr(const InstBr *Inst) { | 2098 void TargetX8632::lowerBr(const InstBr *Inst) { |
| 1862 if (Inst->isUnconditional()) { | 2099 if (Inst->isUnconditional()) { |
| 1863 _br(Inst->getTargetUnconditional()); | 2100 _br(Inst->getTargetUnconditional()); |
| (...skipping 1248 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3112 Context.insert( | 3349 Context.insert( |
| 3113 InstFakeUse::create(Func, Context.getLastInserted()->getDest())); | 3350 InstFakeUse::create(Func, Context.getLastInserted()->getDest())); |
| 3114 return; | 3351 return; |
| 3115 } | 3352 } |
| 3116 case Intrinsics::AtomicRMW: | 3353 case Intrinsics::AtomicRMW: |
| 3117 if (!Intrinsics::isMemoryOrderValid( | 3354 if (!Intrinsics::isMemoryOrderValid( |
| 3118 ID, getConstantMemoryOrder(Instr->getArg(3)))) { | 3355 ID, getConstantMemoryOrder(Instr->getArg(3)))) { |
| 3119 Func->setError("Unexpected memory ordering for AtomicRMW"); | 3356 Func->setError("Unexpected memory ordering for AtomicRMW"); |
| 3120 return; | 3357 return; |
| 3121 } | 3358 } |
| 3122 lowerAtomicRMW(Instr->getDest(), | 3359 lowerAtomicRMW( |
| 3123 static_cast<uint32_t>(llvm::cast<ConstantInteger32>( | 3360 Instr->getDest(), |
| 3124 Instr->getArg(0))->getValue()), | 3361 static_cast<uint32_t>( |
| 3125 Instr->getArg(1), Instr->getArg(2)); | 3362 llvm::cast<ConstantInteger32>(Instr->getArg(0))->getValue()), |
| 3363 Instr->getArg(1), Instr->getArg(2)); | |
| 3126 return; | 3364 return; |
| 3127 case Intrinsics::AtomicStore: { | 3365 case Intrinsics::AtomicStore: { |
| 3128 if (!Intrinsics::isMemoryOrderValid( | 3366 if (!Intrinsics::isMemoryOrderValid( |
| 3129 ID, getConstantMemoryOrder(Instr->getArg(2)))) { | 3367 ID, getConstantMemoryOrder(Instr->getArg(2)))) { |
| 3130 Func->setError("Unexpected memory ordering for AtomicStore"); | 3368 Func->setError("Unexpected memory ordering for AtomicStore"); |
| 3131 return; | 3369 return; |
| 3132 } | 3370 } |
| 3133 // We require the memory address to be naturally aligned. | 3371 // We require the memory address to be naturally aligned. |
| 3134 // Given that is the case, then normal stores are atomic. | 3372 // Given that is the case, then normal stores are atomic. |
| 3135 // Add a fence after the store to make it visible. | 3373 // Add a fence after the store to make it visible. |
| (...skipping 1182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4318 } | 4556 } |
| 4319 } | 4557 } |
| 4320 | 4558 |
| 4321 void TargetX8632::lowerUnreachable(const InstUnreachable * /*Inst*/) { _ud2(); } | 4559 void TargetX8632::lowerUnreachable(const InstUnreachable * /*Inst*/) { _ud2(); } |
| 4322 | 4560 |
| 4323 // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to | 4561 // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to |
| 4324 // preserve integrity of liveness analysis. Undef values are also | 4562 // preserve integrity of liveness analysis. Undef values are also |
| 4325 // turned into zeroes, since loOperand() and hiOperand() don't expect | 4563 // turned into zeroes, since loOperand() and hiOperand() don't expect |
| 4326 // Undef input. | 4564 // Undef input. |
| 4327 void TargetX8632::prelowerPhis() { | 4565 void TargetX8632::prelowerPhis() { |
| 4566 // Pause constant blinding or pooling, blinding or pooling will be done later | |
| 4567 // during phi lowering assignments | |
| 4568 BoolFlagSaver B(RandomizationPoolingPaused, true); | |
| 4569 | |
| 4328 CfgNode *Node = Context.getNode(); | 4570 CfgNode *Node = Context.getNode(); |
| 4329 for (Inst &I : Node->getPhis()) { | 4571 for (Inst &I : Node->getPhis()) { |
| 4330 auto Phi = llvm::dyn_cast<InstPhi>(&I); | 4572 auto Phi = llvm::dyn_cast<InstPhi>(&I); |
| 4331 if (Phi->isDeleted()) | 4573 if (Phi->isDeleted()) |
| 4332 continue; | 4574 continue; |
| 4333 Variable *Dest = Phi->getDest(); | 4575 Variable *Dest = Phi->getDest(); |
| 4334 if (Dest->getType() == IceType_i64) { | 4576 if (Dest->getType() == IceType_i64) { |
| 4335 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); | 4577 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); |
| 4336 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); | 4578 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); |
| 4337 InstPhi *PhiLo = InstPhi::create(Func, Phi->getSrcSize(), DestLo); | 4579 InstPhi *PhiLo = InstPhi::create(Func, Phi->getSrcSize(), DestLo); |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4417 // assignment, add Dest to the set of available registers, and | 4659 // assignment, add Dest to the set of available registers, and |
| 4418 // remove Src from the set of available registers. Iteration is | 4660 // remove Src from the set of available registers. Iteration is |
| 4419 // done backwards to enable incremental updates of the available | 4661 // done backwards to enable incremental updates of the available |
| 4420 // register set, and the lowered instruction numbers may be out of | 4662 // register set, and the lowered instruction numbers may be out of |
| 4421 // order, but that can be worked around by renumbering the block | 4663 // order, but that can be worked around by renumbering the block |
| 4422 // afterwards if necessary. | 4664 // afterwards if necessary. |
| 4423 for (const Inst &I : reverse_range(Assignments)) { | 4665 for (const Inst &I : reverse_range(Assignments)) { |
| 4424 Context.rewind(); | 4666 Context.rewind(); |
| 4425 auto Assign = llvm::dyn_cast<InstAssign>(&I); | 4667 auto Assign = llvm::dyn_cast<InstAssign>(&I); |
| 4426 Variable *Dest = Assign->getDest(); | 4668 Variable *Dest = Assign->getDest(); |
| 4669 | |
| 4670 // qining: Here is an ugly hack for phi.ll test. | |
| 4671 // In function test_split_undef_int_vec, the advanced phi | |
| 4672 // lowering process will find an assignment of undefined | |
| 4673 // vector. This vector, as the Src here, will crash if it | |
| 4674 // go through legalize(). legalize() will create new variable | |
| 4675 // with makeVectorOfZeros(), but this new variable will be | |
| 4676 // assigned a stack slot. This will fail the assertion in | |
| 4677 // IceInstX8632.cpp:789, as XmmEmitterRegOp() complain: | |
| 4678 // Var->hasReg() fails. Note this failure is irrelevant to | |
| 4679 // randomization or pooling of constants. | |
| 4680 // So, we do not call legalize() to add pool label for the | |
| 4681 // src operands of phi assignment instructions. | |
| 4682 // Instead, we manually add pool label for constant float and | |
| 4683 // constant double values here. | |
| 4684 // Note going through legalize() does not affect the testing | |
| 4685 // results of SPEC2K and xtests. | |
| 4427 Operand *Src = Assign->getSrc(0); | 4686 Operand *Src = Assign->getSrc(0); |
| 4687 if (!llvm::isa<ConstantUndef>(Assign->getSrc(0))) { | |
| 4688 Src = legalize(Src); | |
| 4689 } | |
| 4690 | |
| 4428 Variable *SrcVar = llvm::dyn_cast<Variable>(Src); | 4691 Variable *SrcVar = llvm::dyn_cast<Variable>(Src); |
| 4429 // Use normal assignment lowering, except lower mem=mem specially | 4692 // Use normal assignment lowering, except lower mem=mem specially |
| 4430 // so we can register-allocate at the same time. | 4693 // so we can register-allocate at the same time. |
| 4431 if (!isMemoryOperand(Dest) || !isMemoryOperand(Src)) { | 4694 if (!isMemoryOperand(Dest) || !isMemoryOperand(Src)) { |
| 4432 lowerAssign(Assign); | 4695 lowerAssign(Assign); |
| 4433 } else { | 4696 } else { |
| 4434 assert(Dest->getType() == Src->getType()); | 4697 assert(Dest->getType() == Src->getType()); |
| 4435 const llvm::SmallBitVector &RegsForType = | 4698 const llvm::SmallBitVector &RegsForType = |
| 4436 getRegisterSetForType(Dest->getType()); | 4699 getRegisterSetForType(Dest->getType()); |
| 4437 llvm::SmallBitVector AvailRegsForType = RegsForType & Available; | 4700 llvm::SmallBitVector AvailRegsForType = RegsForType & Available; |
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4593 // Assert that a physical register is allowed. To date, all calls | 4856 // Assert that a physical register is allowed. To date, all calls |
| 4594 // to legalize() allow a physical register. If a physical register | 4857 // to legalize() allow a physical register. If a physical register |
| 4595 // needs to be explicitly disallowed, then new code will need to be | 4858 // needs to be explicitly disallowed, then new code will need to be |
| 4596 // written to force a spill. | 4859 // written to force a spill. |
| 4597 assert(Allowed & Legal_Reg); | 4860 assert(Allowed & Legal_Reg); |
| 4598 // If we're asking for a specific physical register, make sure we're | 4861 // If we're asking for a specific physical register, make sure we're |
| 4599 // not allowing any other operand kinds. (This could be future | 4862 // not allowing any other operand kinds. (This could be future |
| 4600 // work, e.g. allow the shl shift amount to be either an immediate | 4863 // work, e.g. allow the shl shift amount to be either an immediate |
| 4601 // or in ecx.) | 4864 // or in ecx.) |
| 4602 assert(RegNum == Variable::NoRegister || Allowed == Legal_Reg); | 4865 assert(RegNum == Variable::NoRegister || Allowed == Legal_Reg); |
| 4866 | |
| 4603 if (auto Mem = llvm::dyn_cast<OperandX8632Mem>(From)) { | 4867 if (auto Mem = llvm::dyn_cast<OperandX8632Mem>(From)) { |
| 4604 // Before doing anything with a Mem operand, we need to ensure | 4868 // Before doing anything with a Mem operand, we need to ensure |
| 4605 // that the Base and Index components are in physical registers. | 4869 // that the Base and Index components are in physical registers. |
| 4606 Variable *Base = Mem->getBase(); | 4870 Variable *Base = Mem->getBase(); |
| 4607 Variable *Index = Mem->getIndex(); | 4871 Variable *Index = Mem->getIndex(); |
| 4608 Variable *RegBase = nullptr; | 4872 Variable *RegBase = nullptr; |
| 4609 Variable *RegIndex = nullptr; | 4873 Variable *RegIndex = nullptr; |
| 4610 if (Base) { | 4874 if (Base) { |
| 4611 RegBase = legalizeToVar(Base); | 4875 RegBase = legalizeToVar(Base); |
| 4612 } | 4876 } |
| 4613 if (Index) { | 4877 if (Index) { |
| 4614 RegIndex = legalizeToVar(Index); | 4878 RegIndex = legalizeToVar(Index); |
| 4615 } | 4879 } |
| 4616 if (Base != RegBase || Index != RegIndex) { | 4880 if (Base != RegBase || Index != RegIndex) { |
| 4617 From = | 4881 Mem = |
| 4618 OperandX8632Mem::create(Func, Ty, RegBase, Mem->getOffset(), RegIndex, | 4882 OperandX8632Mem::create(Func, Ty, RegBase, Mem->getOffset(), RegIndex, |
| 4619 Mem->getShift(), Mem->getSegmentRegister()); | 4883 Mem->getShift(), Mem->getSegmentRegister()); |
| 4620 } | 4884 } |
| 4621 | 4885 |
| 4886 // qining: For all Memory Operands, we do randomization/pooling here | |
| 4887 From = randomizeOrPoolImmediate(Mem); | |
| 4888 | |
| 4622 if (!(Allowed & Legal_Mem)) { | 4889 if (!(Allowed & Legal_Mem)) { |
| 4623 From = copyToReg(From, RegNum); | 4890 From = copyToReg(From, RegNum); |
| 4624 } | 4891 } |
| 4625 return From; | 4892 return From; |
| 4626 } | 4893 } |
| 4627 if (llvm::isa<Constant>(From)) { | 4894 if (llvm::isa<Constant>(From)) { |
|
Jim Stichnoth
2015/06/19 16:51:03
Change to something like:
if (auto *Const = llvm
qining
2015/06/19 20:22:25
Done.
| |
| 4628 if (llvm::isa<ConstantUndef>(From)) { | 4895 if (llvm::isa<ConstantUndef>(From)) { |
| 4629 // Lower undefs to zero. Another option is to lower undefs to an | 4896 // Lower undefs to zero. Another option is to lower undefs to an |
| 4630 // uninitialized register; however, using an uninitialized register | 4897 // uninitialized register; however, using an uninitialized register |
| 4631 // results in less predictable code. | 4898 // results in less predictable code. |
| 4632 // | 4899 // |
| 4633 // If in the future the implementation is changed to lower undef | 4900 // If in the future the implementation is changed to lower undef |
| 4634 // values to uninitialized registers, a FakeDef will be needed: | 4901 // values to uninitialized registers, a FakeDef will be needed: |
| 4635 // Context.insert(InstFakeDef::create(Func, Reg)); | 4902 // Context.insert(InstFakeDef::create(Func, Reg)); |
| 4636 // This is in order to ensure that the live range of Reg is not | 4903 // This is in order to ensure that the live range of Reg is not |
| 4637 // overestimated. If the constant being lowered is a 64 bit value, | 4904 // overestimated. If the constant being lowered is a 64 bit value, |
| 4638 // then the result should be split and the lo and hi components will | 4905 // then the result should be split and the lo and hi components will |
| 4639 // need to go in uninitialized registers. | 4906 // need to go in uninitialized registers. |
| 4640 if (isVectorType(Ty)) | 4907 if (isVectorType(Ty)) |
| 4641 return makeVectorOfZeros(Ty, RegNum); | 4908 return makeVectorOfZeros(Ty, RegNum); |
| 4642 From = Ctx->getConstantZero(Ty); | 4909 From = Ctx->getConstantZero(Ty); |
| 4643 } | 4910 } |
| 4644 // There should be no constants of vector type (other than undef). | 4911 // There should be no constants of vector type (other than undef). |
| 4645 assert(!isVectorType(Ty)); | 4912 assert(!isVectorType(Ty)); |
| 4913 | |
| 4914 // If the operand is an 32 bit constant integer, we should check | |
| 4915 // whether we need to randomize it or pool it. | |
| 4916 if (ConstantInteger32 *C = llvm::dyn_cast<ConstantInteger32>(From)) { | |
| 4917 Operand *NewFrom = randomizeOrPoolImmediate(C, RegNum); | |
| 4918 if (NewFrom != From) { | |
| 4919 return NewFrom; | |
| 4920 } | |
| 4921 } | |
| 4922 | |
| 4646 // Convert a scalar floating point constant into an explicit | 4923 // Convert a scalar floating point constant into an explicit |
| 4647 // memory operand. | 4924 // memory operand. |
| 4648 if (isScalarFloatingType(Ty)) { | 4925 if (isScalarFloatingType(Ty)) { |
| 4649 Variable *Base = nullptr; | 4926 Variable *Base = nullptr; |
| 4650 std::string Buffer; | 4927 std::string Buffer; |
| 4651 llvm::raw_string_ostream StrBuf(Buffer); | 4928 llvm::raw_string_ostream StrBuf(Buffer); |
| 4652 llvm::cast<Constant>(From)->emitPoolLabel(StrBuf); | 4929 llvm::cast<Constant>(From)->emitPoolLabel(StrBuf); |
| 4930 llvm::cast<Constant>(From)->shouldBePooled = true; | |
| 4653 Constant *Offset = Ctx->getConstantSym(0, StrBuf.str(), true); | 4931 Constant *Offset = Ctx->getConstantSym(0, StrBuf.str(), true); |
| 4654 From = OperandX8632Mem::create(Func, Ty, Base, Offset); | 4932 From = OperandX8632Mem::create(Func, Ty, Base, Offset); |
| 4655 } | 4933 } |
| 4656 bool NeedsReg = false; | 4934 bool NeedsReg = false; |
| 4657 if (!(Allowed & Legal_Imm) && !isScalarFloatingType(Ty)) | 4935 if (!(Allowed & Legal_Imm) && !isScalarFloatingType(Ty)) |
| 4658 // Immediate specifically not allowed | 4936 // Immediate specifically not allowed |
| 4659 NeedsReg = true; | 4937 NeedsReg = true; |
| 4660 if (!(Allowed & Legal_Mem) && isScalarFloatingType(Ty)) | 4938 if (!(Allowed & Legal_Mem) && isScalarFloatingType(Ty)) |
| 4661 // On x86, FP constants are lowered to mem operands. | 4939 // On x86, FP constants are lowered to mem operands. |
| 4662 NeedsReg = true; | 4940 NeedsReg = true; |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4699 bool IsSrc1ImmOrReg = false; | 4977 bool IsSrc1ImmOrReg = false; |
| 4700 if (llvm::isa<Constant>(Src1)) { | 4978 if (llvm::isa<Constant>(Src1)) { |
| 4701 IsSrc1ImmOrReg = true; | 4979 IsSrc1ImmOrReg = true; |
| 4702 } else if (Variable *Var = llvm::dyn_cast<Variable>(Src1)) { | 4980 } else if (Variable *Var = llvm::dyn_cast<Variable>(Src1)) { |
| 4703 if (Var->hasReg()) | 4981 if (Var->hasReg()) |
| 4704 IsSrc1ImmOrReg = true; | 4982 IsSrc1ImmOrReg = true; |
| 4705 } | 4983 } |
| 4706 return legalize(Src0, IsSrc1ImmOrReg ? (Legal_Reg | Legal_Mem) : Legal_Reg); | 4984 return legalize(Src0, IsSrc1ImmOrReg ? (Legal_Reg | Legal_Mem) : Legal_Reg); |
| 4707 } | 4985 } |
| 4708 | 4986 |
| 4709 OperandX8632Mem *TargetX8632::formMemoryOperand(Operand *Operand, Type Ty, | 4987 OperandX8632Mem *TargetX8632::formMemoryOperand(Operand *Opnd, Type Ty, |
| 4710 bool DoLegalize) { | 4988 bool DoLegalize) { |
| 4711 OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand); | 4989 OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Opnd); |
| 4712 // It may be the case that address mode optimization already creates | 4990 // It may be the case that address mode optimization already creates |
| 4713 // an OperandX8632Mem, so in that case it wouldn't need another level | 4991 // an OperandX8632Mem, so in that case it wouldn't need another level |
| 4714 // of transformation. | 4992 // of transformation. |
| 4715 if (!Mem) { | 4993 if (!Mem) { |
| 4716 Variable *Base = llvm::dyn_cast<Variable>(Operand); | 4994 Variable *Base = llvm::dyn_cast<Variable>(Opnd); |
| 4717 Constant *Offset = llvm::dyn_cast<Constant>(Operand); | 4995 Constant *Offset = llvm::dyn_cast<Constant>(Opnd); |
| 4718 assert(Base || Offset); | 4996 assert(Base || Offset); |
| 4719 if (Offset) { | 4997 if (Offset) { |
| 4720 // Make sure Offset is not undef. | 4998 // qining: during memory operand building, we do not |
| 4721 Offset = llvm::cast<Constant>(legalize(Offset)); | 4999 // blind or pool the constant offset, we will work on |
| 5000 // the whole memory operand later as one entity later, | |
| 5001 // this save one instruction. By turning blinding and | |
| 5002 // pooling off, we guarantee legalize(Offset) will return | |
| 5003 // a constant* | |
| 5004 { | |
| 5005 BoolFlagSaver B(RandomizationPoolingPaused, true); | |
| 5006 | |
| 5007 Offset = llvm::cast<Constant>(legalize(Offset)); | |
| 5008 } | |
| 5009 | |
| 4722 assert(llvm::isa<ConstantInteger32>(Offset) || | 5010 assert(llvm::isa<ConstantInteger32>(Offset) || |
| 4723 llvm::isa<ConstantRelocatable>(Offset)); | 5011 llvm::isa<ConstantRelocatable>(Offset)); |
| 4724 } | 5012 } |
| 4725 Mem = OperandX8632Mem::create(Func, Ty, Base, Offset); | 5013 Mem = OperandX8632Mem::create(Func, Ty, Base, Offset); |
| 4726 } | 5014 } |
| 4727 return llvm::cast<OperandX8632Mem>(DoLegalize ? legalize(Mem) : Mem); | 5015 // qining: do legalization, which contains randomization/pooling |
| 5016 // or do randomization/pooling. | |
| 5017 return llvm::cast<OperandX8632Mem>( | |
| 5018 DoLegalize ? legalize(Mem) : randomizeOrPoolImmediate(Mem)); | |
| 4728 } | 5019 } |
| 4729 | 5020 |
| 4730 Variable *TargetX8632::makeReg(Type Type, int32_t RegNum) { | 5021 Variable *TargetX8632::makeReg(Type Type, int32_t RegNum) { |
| 4731 // There aren't any 64-bit integer registers for x86-32. | 5022 // There aren't any 64-bit integer registers for x86-32. |
| 4732 assert(Type != IceType_i64); | 5023 assert(Type != IceType_i64); |
| 4733 Variable *Reg = Func->makeVariable(Type); | 5024 Variable *Reg = Func->makeVariable(Type); |
| 4734 if (RegNum == Variable::NoRegister) | 5025 if (RegNum == Variable::NoRegister) |
| 4735 Reg->setWeightInfinite(); | 5026 Reg->setWeightInfinite(); |
| 4736 else | 5027 else |
| 4737 Reg->setRegNum(RegNum); | 5028 Reg->setRegNum(RegNum); |
| (...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4958 typedef ConstantDouble IceType; | 5249 typedef ConstantDouble IceType; |
| 4959 static const Type Ty = IceType_f64; | 5250 static const Type Ty = IceType_f64; |
| 4960 static const char *TypeName; | 5251 static const char *TypeName; |
| 4961 static const char *AsmTag; | 5252 static const char *AsmTag; |
| 4962 static const char *PrintfString; | 5253 static const char *PrintfString; |
| 4963 }; | 5254 }; |
| 4964 const char *PoolTypeConverter<double>::TypeName = "double"; | 5255 const char *PoolTypeConverter<double>::TypeName = "double"; |
| 4965 const char *PoolTypeConverter<double>::AsmTag = ".quad"; | 5256 const char *PoolTypeConverter<double>::AsmTag = ".quad"; |
| 4966 const char *PoolTypeConverter<double>::PrintfString = "0x%llx"; | 5257 const char *PoolTypeConverter<double>::PrintfString = "0x%llx"; |
| 4967 | 5258 |
| 5259 // Add converter for int type constant pooling | |
| 5260 template <> struct PoolTypeConverter<int> { | |
|
Jim Stichnoth
2015/06/19 16:51:03
Please use uint32_t/uint16_t/uint8_t instead of in
qining
2015/06/19 20:22:26
Done. I think both signed and unsigned should be f
| |
| 5261 typedef uint32_t PrimitiveIntType; | |
| 5262 typedef ConstantInteger32 IceType; | |
| 5263 static const Type Ty = IceType_i32; | |
| 5264 static const char *TypeName; | |
| 5265 static const char *AsmTag; | |
| 5266 static const char *PrintfString; | |
| 5267 }; | |
| 5268 const char *PoolTypeConverter<int>::TypeName = "i32"; | |
| 5269 const char *PoolTypeConverter<int>::AsmTag = ".long"; | |
| 5270 const char *PoolTypeConverter<int>::PrintfString = "0x%x"; | |
| 5271 | |
| 5272 // Add converter for int type constant pooling | |
| 5273 template <> struct PoolTypeConverter<short> { | |
| 5274 typedef uint32_t PrimitiveIntType; | |
| 5275 typedef ConstantInteger32 IceType; | |
| 5276 static const Type Ty = IceType_i16; | |
| 5277 static const char *TypeName; | |
| 5278 static const char *AsmTag; | |
| 5279 static const char *PrintfString; | |
| 5280 }; | |
| 5281 const char *PoolTypeConverter<short>::TypeName = "i16"; | |
| 5282 const char *PoolTypeConverter<short>::AsmTag = ".short"; | |
| 5283 const char *PoolTypeConverter<short>::PrintfString = "0x%x"; | |
| 5284 | |
| 5285 // Add converter for int type constant pooling | |
| 5286 template <> struct PoolTypeConverter<char> { | |
| 5287 typedef uint32_t PrimitiveIntType; | |
| 5288 typedef ConstantInteger32 IceType; | |
| 5289 static const Type Ty = IceType_i8; | |
| 5290 static const char *TypeName; | |
| 5291 static const char *AsmTag; | |
| 5292 static const char *PrintfString; | |
| 5293 }; | |
| 5294 const char *PoolTypeConverter<char>::TypeName = "i8"; | |
| 5295 const char *PoolTypeConverter<char>::AsmTag = ".byte"; | |
| 5296 const char *PoolTypeConverter<char>::PrintfString = "0x%x"; | |
| 5297 | |
| 4968 template <typename T> | 5298 template <typename T> |
| 4969 void TargetDataX8632::emitConstantPool(GlobalContext *Ctx) { | 5299 void TargetDataX8632::emitConstantPool(GlobalContext *Ctx) { |
| 4970 if (!ALLOW_DUMP) | 5300 if (!ALLOW_DUMP) |
| 4971 return; | 5301 return; |
| 4972 Ostream &Str = Ctx->getStrEmit(); | 5302 Ostream &Str = Ctx->getStrEmit(); |
| 4973 Type Ty = T::Ty; | 5303 Type Ty = T::Ty; |
| 4974 SizeT Align = typeAlignInBytes(Ty); | 5304 SizeT Align = typeAlignInBytes(Ty); |
| 4975 ConstantList Pool = Ctx->getConstantPool(Ty); | 5305 ConstantList Pool = Ctx->getConstantPool(Ty); |
| 4976 | 5306 |
| 4977 Str << "\t.section\t.rodata.cst" << Align << ",\"aM\",@progbits," << Align | 5307 Str << "\t.section\t.rodata.cst" << Align << ",\"aM\",@progbits," << Align |
| 4978 << "\n"; | 5308 << "\n"; |
| 4979 Str << "\t.align\t" << Align << "\n"; | 5309 Str << "\t.align\t" << Align << "\n"; |
| 4980 for (Constant *C : Pool) { | 5310 for (Constant *C : Pool) { |
| 5311 if (!C->shouldBePooled) | |
| 5312 continue; | |
| 4981 typename T::IceType *Const = llvm::cast<typename T::IceType>(C); | 5313 typename T::IceType *Const = llvm::cast<typename T::IceType>(C); |
| 4982 typename T::IceType::PrimType Value = Const->getValue(); | 5314 typename T::IceType::PrimType Value = Const->getValue(); |
| 4983 // Use memcpy() to copy bits from Value into RawValue in a way | 5315 // Use memcpy() to copy bits from Value into RawValue in a way |
| 4984 // that avoids breaking strict-aliasing rules. | 5316 // that avoids breaking strict-aliasing rules. |
| 4985 typename T::PrimitiveIntType RawValue; | 5317 typename T::PrimitiveIntType RawValue; |
| 4986 memcpy(&RawValue, &Value, sizeof(Value)); | 5318 memcpy(&RawValue, &Value, sizeof(Value)); |
| 4987 char buf[30]; | 5319 char buf[30]; |
| 4988 int CharsPrinted = | 5320 int CharsPrinted = |
| 4989 snprintf(buf, llvm::array_lengthof(buf), T::PrintfString, RawValue); | 5321 snprintf(buf, llvm::array_lengthof(buf), T::PrintfString, RawValue); |
| 4990 assert(CharsPrinted >= 0 && | 5322 assert(CharsPrinted >= 0 && |
| 4991 (size_t)CharsPrinted < llvm::array_lengthof(buf)); | 5323 (size_t)CharsPrinted < llvm::array_lengthof(buf)); |
| 4992 (void)CharsPrinted; // avoid warnings if asserts are disabled | 5324 (void)CharsPrinted; // avoid warnings if asserts are disabled |
| 4993 Const->emitPoolLabel(Str); | 5325 Const->emitPoolLabel(Str); |
| 4994 Str << ":\n\t" << T::AsmTag << "\t" << buf << "\t# " << T::TypeName << " " | 5326 Str << ":\n\t" << T::AsmTag << "\t" << buf << "\t# " << T::TypeName << " " |
| 4995 << Value << "\n"; | 5327 << Value << "\n"; |
| 4996 } | 5328 } |
| 4997 } | 5329 } |
| 4998 | 5330 |
| 4999 void TargetDataX8632::lowerConstants() const { | 5331 void TargetDataX8632::lowerConstants() const { |
| 5000 if (Ctx->getFlags().getDisableTranslation()) | 5332 if (Ctx->getFlags().getDisableTranslation()) |
| 5001 return; | 5333 return; |
| 5002 // No need to emit constants from the int pool since (for x86) they | 5334 // No need to emit constants from the int pool since (for x86) they |
| 5003 // are embedded as immediates in the instructions, just emit float/double. | 5335 // are embedded as immediates in the instructions, just emit float/double. |
| 5004 switch (Ctx->getFlags().getOutFileType()) { | 5336 switch (Ctx->getFlags().getOutFileType()) { |
| 5005 case FT_Elf: { | 5337 case FT_Elf: { |
| 5006 ELFObjectWriter *Writer = Ctx->getObjectWriter(); | 5338 ELFObjectWriter *Writer = Ctx->getObjectWriter(); |
| 5339 | |
| 5340 Writer->writeConstantPool<ConstantInteger32>(IceType_i8); | |
| 5341 Writer->writeConstantPool<ConstantInteger32>(IceType_i16); | |
| 5342 Writer->writeConstantPool<ConstantInteger32>(IceType_i32); | |
| 5343 | |
| 5007 Writer->writeConstantPool<ConstantFloat>(IceType_f32); | 5344 Writer->writeConstantPool<ConstantFloat>(IceType_f32); |
| 5008 Writer->writeConstantPool<ConstantDouble>(IceType_f64); | 5345 Writer->writeConstantPool<ConstantDouble>(IceType_f64); |
| 5009 } break; | 5346 } break; |
| 5010 case FT_Asm: | 5347 case FT_Asm: |
| 5011 case FT_Iasm: { | 5348 case FT_Iasm: { |
| 5012 OstreamLocker L(Ctx); | 5349 OstreamLocker L(Ctx); |
| 5350 | |
| 5351 emitConstantPool<PoolTypeConverter<char>>(Ctx); | |
| 5352 emitConstantPool<PoolTypeConverter<short>>(Ctx); | |
| 5353 emitConstantPool<PoolTypeConverter<int>>(Ctx); | |
| 5354 | |
| 5013 emitConstantPool<PoolTypeConverter<float>>(Ctx); | 5355 emitConstantPool<PoolTypeConverter<float>>(Ctx); |
| 5014 emitConstantPool<PoolTypeConverter<double>>(Ctx); | 5356 emitConstantPool<PoolTypeConverter<double>>(Ctx); |
| 5015 } break; | 5357 } break; |
| 5016 } | 5358 } |
| 5017 } | 5359 } |
| 5018 | 5360 |
| 5019 TargetHeaderX8632::TargetHeaderX8632(GlobalContext *Ctx) | 5361 TargetHeaderX8632::TargetHeaderX8632(GlobalContext *Ctx) |
| 5020 : TargetHeaderLowering(Ctx) {} | 5362 : TargetHeaderLowering(Ctx) {} |
| 5021 | 5363 |
| 5364 // Blind/pool an Immediate | |
| 5365 Operand *TargetX8632::randomizeOrPoolImmediate(Constant *Immediate, | |
| 5366 int32_t RegNum) { | |
| 5367 assert(llvm::isa<ConstantInteger32>(Immediate) || | |
| 5368 llvm::isa<ConstantRelocatable>(Immediate)); | |
| 5369 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None || | |
| 5370 RandomizationPoolingPaused == true) { | |
| 5371 // immediates randomization/pool turned off | |
| 5372 return Immediate; | |
| 5373 } | |
| 5374 if (Constant *C = llvm::dyn_cast_or_null<Constant>(Immediate)) { | |
| 5375 if (C->shouldBeRandomizedOrPooled(Ctx)) { | |
| 5376 Ctx->statsUpdateRPImms(); | |
| 5377 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == | |
| 5378 RPI_Randomize) { | |
| 5379 // blind the constant | |
| 5380 // FROM: | |
| 5381 // imm | |
| 5382 // TO: | |
| 5383 // insert: mov imm+cookie, Reg | |
| 5384 // insert: lea -cookie[Reg], Reg | |
|
Jim Stichnoth
2015/06/19 16:51:03
Explain in a comment that lea is used (as opposed
qining
2015/06/19 20:22:26
Done.
| |
| 5385 // => Reg | |
| 5386 // If we have already assigned a phy register, we must come from | |
| 5387 // andvancedPhiLowering()=>lowerAssign(). In this case we should reuse | |
| 5388 // the assigned register as this assignment is that start of its use-def | |
| 5389 // chain. So we add RegNum argument here. | |
| 5390 Variable *Reg = makeReg(IceType_i32, RegNum); | |
| 5391 ConstantInteger32 *Integer = llvm::cast<ConstantInteger32>(Immediate); | |
| 5392 uint32_t Value = Integer->getValue(); | |
| 5393 uint32_t Cookie = Ctx->getRandomizationCookie(); | |
| 5394 _mov(Reg, Ctx->getConstantInt(IceType_i32, Cookie + Value)); | |
| 5395 Constant *Offset = Ctx->getConstantInt(IceType_i32, 0 - Cookie); | |
| 5396 _lea(Reg, | |
| 5397 OperandX8632Mem::create(Func, IceType_i32, Reg, Offset, NULL, 0)); | |
|
Jim Stichnoth
2015/06/19 16:51:03
nullptr
qining
2015/06/19 20:22:26
Done.
| |
| 5398 // make sure liveness analysis won't kill this variable, otherwise a | |
| 5399 // liveness | |
| 5400 // assertion will be triggered. | |
| 5401 _set_dest_nonkillable(); | |
| 5402 if (Immediate->getType() != IceType_i32) { | |
| 5403 Variable *TruncReg = makeReg(Immediate->getType(), RegNum); | |
| 5404 _mov(TruncReg, Reg); | |
| 5405 return TruncReg; | |
| 5406 } | |
| 5407 return Reg; | |
| 5408 } | |
| 5409 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_Pool) { | |
| 5410 // pool the constant | |
| 5411 // FROM: | |
| 5412 // imm | |
| 5413 // TO: | |
| 5414 // insert: mov $label, Reg | |
| 5415 // => Reg | |
| 5416 assert(Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == | |
| 5417 RPI_Pool); | |
| 5418 Immediate->shouldBePooled = true; | |
| 5419 // if we have already assigned a phy register, we must come from | |
| 5420 // andvancedPhiLowering()=>lowerAssign(). In this case we should reuse | |
| 5421 // the assigned register as this assignment is that start of its use-def | |
| 5422 // chain. So we add RegNum argument here. | |
| 5423 Variable *Reg = makeReg(Immediate->getType(), RegNum); | |
| 5424 IceString Label; | |
| 5425 llvm::raw_string_ostream Label_stream(Label); | |
| 5426 Immediate->emitPoolLabel(Label_stream); | |
| 5427 const RelocOffsetT Offset = 0; | |
| 5428 const bool SuppressMangling = true; | |
| 5429 Constant *Symbol = | |
| 5430 Ctx->getConstantSym(Offset, Label_stream.str(), SuppressMangling); | |
| 5431 OperandX8632Mem *MemOperand = | |
| 5432 OperandX8632Mem::create(Func, Immediate->getType(), NULL, Symbol); | |
| 5433 _mov(Reg, MemOperand); | |
| 5434 return Reg; | |
| 5435 } | |
| 5436 assert("Unsupported -randomize-pool-immediates option" && false); | |
| 5437 } | |
| 5438 } | |
| 5439 // the constant Immediate is not eligible for blinding/pooling | |
| 5440 return Immediate; | |
| 5441 } | |
| 5442 | |
| 5443 OperandX8632Mem * | |
| 5444 TargetX8632::randomizeOrPoolImmediate(OperandX8632Mem *MemOperand, | |
| 5445 int32_t RegNum) { | |
| 5446 assert(MemOperand); | |
| 5447 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None || | |
| 5448 RandomizationPoolingPaused == true) { | |
| 5449 // immediates randomization/pooling is turned off | |
| 5450 return MemOperand; | |
| 5451 } | |
| 5452 | |
| 5453 if (Constant *C = llvm::dyn_cast_or_null<Constant>(MemOperand->getOffset())) { | |
| 5454 if (C->shouldBeRandomizedOrPooled(Ctx)) { | |
| 5455 // The offset of this mem operand should be blinded or pooled | |
| 5456 Ctx->statsUpdateRPImms(); | |
| 5457 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == | |
| 5458 RPI_Randomize) { | |
| 5459 // blind the constant offset | |
| 5460 // FROM: | |
| 5461 // offset[base, index, shift] | |
| 5462 // TO: | |
| 5463 // insert: lea offset+cookie[base], RegTemp | |
| 5464 // => -cookie[RegTemp, index, shift] | |
| 5465 uint32_t Value = | |
| 5466 llvm::dyn_cast<ConstantInteger32>(MemOperand->getOffset()) | |
| 5467 ->getValue(); | |
| 5468 uint32_t Cookie = Ctx->getRandomizationCookie(); | |
| 5469 Constant *Mask1 = Ctx->getConstantInt( | |
| 5470 MemOperand->getOffset()->getType(), Cookie + Value); | |
| 5471 Constant *Mask2 = | |
| 5472 Ctx->getConstantInt(MemOperand->getOffset()->getType(), 0 - Cookie); | |
| 5473 | |
| 5474 // qining: if the offset value is -cookie, this memory operand should | |
| 5475 // have already been randomized, we just return it. | |
| 5476 if(Value == -Cookie) return MemOperand; | |
|
Jim Stichnoth
2015/06/19 16:51:03
make format
Also, there's an interesting one-in-f
qining
2015/06/19 20:22:25
I think Cookie==MIN_INT should still be fine.
Ass
| |
| 5477 | |
| 5478 // qining: We need to make sure the MemOperand->getBase() has a physical | |
| 5479 // register, if it is a variable! | |
| 5480 if (MemOperand->getBase() != NULL) | |
| 5481 MemOperand->getBase()->setWeightInfinite(); | |
| 5482 OperandX8632Mem *TempMemOperand = OperandX8632Mem::create( | |
| 5483 Func, MemOperand->getType(), MemOperand->getBase(), Mask1); | |
| 5484 // If we have already assigned a physical register, we must come from | |
| 5485 // advancedPhiLowering()=>lowerAssign(). In this case we should reuse | |
| 5486 // the assigned register as this assignment is that start of its use-def | |
| 5487 // chain. So we add RegNum argument here. | |
| 5488 Variable *RegTemp = makeReg(MemOperand->getOffset()->getType(), RegNum); | |
| 5489 _lea(RegTemp, TempMemOperand); | |
| 5490 // As source operand doesn't use the dstreg, we don't need to add | |
| 5491 // _set_dest_nonkillable(). | |
| 5492 // qining: but if we use the same Dest Reg, that is, with RegNum | |
| 5493 // assigned, we should add this _set_dest_nonkillable() | |
| 5494 if (RegNum != Variable::NoRegister) | |
| 5495 _set_dest_nonkillable(); | |
| 5496 | |
| 5497 OperandX8632Mem *NewMemOperand = OperandX8632Mem::create( | |
| 5498 Func, MemOperand->getType(), RegTemp, Mask2, MemOperand->getIndex(), | |
| 5499 MemOperand->getShift(), MemOperand->getSegmentRegister()); | |
| 5500 | |
| 5501 return NewMemOperand; | |
| 5502 } | |
| 5503 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_Pool) { | |
| 5504 // pool the constant offset | |
| 5505 // FROM: | |
| 5506 // offset[base, index, shift] | |
| 5507 // TO: | |
| 5508 // insert: mov $label, RegTemp | |
| 5509 // insert: lea [base, RegTemp], RegTemp | |
| 5510 // =>[RegTemp, index, shift] | |
| 5511 assert(Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == | |
| 5512 RPI_Pool); | |
| 5513 // qining: Mem operand should never exist as source operands in phi | |
| 5514 // lowering | |
| 5515 // assignments, so there is no need to reuse any registers here. | |
| 5516 // However, for phi lowering, we should not ask for new physical | |
| 5517 // registers in general. | |
| 5518 // However, if we do meet MemOperand during phi lowering, we should not | |
| 5519 // blind or pool the immediates for now | |
| 5520 if (RegNum != Variable::NoRegister) | |
| 5521 return MemOperand; | |
| 5522 Variable *RegTemp = makeReg(IceType_i32); | |
| 5523 IceString Label; | |
| 5524 llvm::raw_string_ostream Label_stream(Label); | |
| 5525 MemOperand->getOffset()->emitPoolLabel(Label_stream); | |
| 5526 MemOperand->getOffset()->shouldBePooled = true; | |
| 5527 const RelocOffsetT SymOffset = 0; | |
| 5528 bool SuppressMangling = true; | |
| 5529 Constant *Symbol = Ctx->getConstantSym(SymOffset, Label_stream.str(), | |
| 5530 SuppressMangling); | |
| 5531 OperandX8632Mem *SymbolOperand = OperandX8632Mem::create( | |
| 5532 Func, MemOperand->getOffset()->getType(), NULL, Symbol); | |
| 5533 _mov(RegTemp, SymbolOperand); | |
| 5534 // qining: We need to make sure the MemOperand->getBase() has a physical | |
| 5535 // register! If we do not have base register here, we won't need an | |
| 5536 // extra lea instruction anymore. | |
| 5537 if (MemOperand->getBase()) { | |
| 5538 OperandX8632Mem *CalculateOperand = OperandX8632Mem::create( | |
| 5539 Func, MemOperand->getType(), MemOperand->getBase(), NULL, RegTemp, | |
| 5540 0, MemOperand->getSegmentRegister()); | |
| 5541 _lea(RegTemp, CalculateOperand); | |
| 5542 _set_dest_nonkillable(); | |
| 5543 } | |
| 5544 OperandX8632Mem *NewMemOperand = OperandX8632Mem::create( | |
| 5545 Func, MemOperand->getType(), RegTemp, NULL, MemOperand->getIndex(), | |
| 5546 MemOperand->getShift(), MemOperand->getSegmentRegister()); | |
| 5547 return NewMemOperand; | |
| 5548 } | |
| 5549 assert("Unsupported -randomize-pool-immediates option" && false); | |
| 5550 } | |
| 5551 } | |
| 5552 // the offset is not eligible for blinding or pooling, return the original | |
| 5553 // mem operand | |
| 5554 return MemOperand; | |
| 5555 } | |
| 5556 | |
| 5022 } // end of namespace Ice | 5557 } // end of namespace Ice |
| OLD | NEW |