Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(465)

Side by Side Diff: src/IceTargetLoweringX86BaseImpl.h

Issue 1216933015: X8632 Templatization completed. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Eliminates all references to X8632 from Impl files. Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 //===- subzero/src/IceTargetLoweringX86BaseImpl.h - x86 lowering -*- C++ -*-==// 1 //===- subzero/src/IceTargetLoweringX86BaseImpl.h - x86 lowering -*- C++ -*-==//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This file implements the TargetLoweringX86Base class, which 10 // This file implements the TargetLoweringX86Base class, which
11 // consists almost entirely of the lowering sequence for each 11 // consists almost entirely of the lowering sequence for each
12 // high-level instruction. 12 // high-level instruction.
13 // 13 //
14 //===----------------------------------------------------------------------===// 14 //===----------------------------------------------------------------------===//
15 15
16 #ifndef SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H 16 #ifndef SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H
17 #define SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H 17 #define SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H
18 18
19 #include "IceCfg.h" 19 #include "IceCfg.h"
20 #include "IceCfgNode.h" 20 #include "IceCfgNode.h"
21 #include "IceClFlags.h" 21 #include "IceClFlags.h"
22 #include "IceDefs.h" 22 #include "IceDefs.h"
23 #include "IceELFObjectWriter.h" 23 #include "IceELFObjectWriter.h"
24 #include "IceGlobalInits.h" 24 #include "IceGlobalInits.h"
25 #include "IceInstX8632.h"
26 #include "IceLiveness.h" 25 #include "IceLiveness.h"
27 #include "IceOperand.h" 26 #include "IceOperand.h"
28 #include "IceRegistersX8632.h"
29 #include "IceTargetLoweringX8632.def"
30 #include "IceTargetLoweringX8632.h"
31 #include "IceUtils.h" 27 #include "IceUtils.h"
32 #include "llvm/Support/MathExtras.h" 28 #include "llvm/Support/MathExtras.h"
33 29
34 namespace Ice { 30 namespace Ice {
35 namespace X86Internal { 31 namespace X86Internal {
36 32
37 // A helper class to ease the settings of RandomizationPoolingPause 33 // A helper class to ease the settings of RandomizationPoolingPause
38 // to disable constant blinding or pooling for some translation phases. 34 // to disable constant blinding or pooling for some translation phases.
39 class BoolFlagSaver { 35 class BoolFlagSaver {
40 BoolFlagSaver() = delete; 36 BoolFlagSaver() = delete;
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after
284 } 280 }
285 // TODO: Don't initialize IntegerRegisters and friends every time. 281 // TODO: Don't initialize IntegerRegisters and friends every time.
286 // Instead, initialize in some sort of static initializer for the 282 // Instead, initialize in some sort of static initializer for the
287 // class. 283 // class.
288 llvm::SmallBitVector IntegerRegisters(Traits::RegisterSet::Reg_NUM); 284 llvm::SmallBitVector IntegerRegisters(Traits::RegisterSet::Reg_NUM);
289 llvm::SmallBitVector IntegerRegistersI8(Traits::RegisterSet::Reg_NUM); 285 llvm::SmallBitVector IntegerRegistersI8(Traits::RegisterSet::Reg_NUM);
290 llvm::SmallBitVector FloatRegisters(Traits::RegisterSet::Reg_NUM); 286 llvm::SmallBitVector FloatRegisters(Traits::RegisterSet::Reg_NUM);
291 llvm::SmallBitVector VectorRegisters(Traits::RegisterSet::Reg_NUM); 287 llvm::SmallBitVector VectorRegisters(Traits::RegisterSet::Reg_NUM);
292 llvm::SmallBitVector InvalidRegisters(Traits::RegisterSet::Reg_NUM); 288 llvm::SmallBitVector InvalidRegisters(Traits::RegisterSet::Reg_NUM);
293 ScratchRegs.resize(Traits::RegisterSet::Reg_NUM); 289 ScratchRegs.resize(Traits::RegisterSet::Reg_NUM);
294 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \ 290
295 frameptr, isI8, isInt, isFP) \ 291 Traits::InitRegisterSet(&IntegerRegisters, &IntegerRegistersI8,
296 IntegerRegisters[Traits::RegisterSet::val] = isInt; \ 292 &FloatRegisters, &VectorRegisters, &ScratchRegs);
297 IntegerRegistersI8[Traits::RegisterSet::val] = isI8; \ 293
298 FloatRegisters[Traits::RegisterSet::val] = isFP; \
299 VectorRegisters[Traits::RegisterSet::val] = isFP; \
300 ScratchRegs[Traits::RegisterSet::val] = scratch;
301 REGX8632_TABLE;
302 #undef X
303 TypeToRegisterSet[IceType_void] = InvalidRegisters; 294 TypeToRegisterSet[IceType_void] = InvalidRegisters;
304 TypeToRegisterSet[IceType_i1] = IntegerRegistersI8; 295 TypeToRegisterSet[IceType_i1] = IntegerRegistersI8;
305 TypeToRegisterSet[IceType_i8] = IntegerRegistersI8; 296 TypeToRegisterSet[IceType_i8] = IntegerRegistersI8;
306 TypeToRegisterSet[IceType_i16] = IntegerRegisters; 297 TypeToRegisterSet[IceType_i16] = IntegerRegisters;
307 TypeToRegisterSet[IceType_i32] = IntegerRegisters; 298 TypeToRegisterSet[IceType_i32] = IntegerRegisters;
308 TypeToRegisterSet[IceType_i64] = IntegerRegisters; 299 TypeToRegisterSet[IceType_i64] = IntegerRegisters;
309 TypeToRegisterSet[IceType_f32] = FloatRegisters; 300 TypeToRegisterSet[IceType_f32] = FloatRegisters;
310 TypeToRegisterSet[IceType_f64] = FloatRegisters; 301 TypeToRegisterSet[IceType_f64] = FloatRegisters;
311 TypeToRegisterSet[IceType_v4i1] = VectorRegisters; 302 TypeToRegisterSet[IceType_v4i1] = VectorRegisters;
312 TypeToRegisterSet[IceType_v8i1] = VectorRegisters; 303 TypeToRegisterSet[IceType_v8i1] = VectorRegisters;
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
488 case InstArithmetic::Xor: 479 case InstArithmetic::Xor:
489 return true; 480 return true;
490 case InstArithmetic::Shl: 481 case InstArithmetic::Shl:
491 case InstArithmetic::Lshr: 482 case InstArithmetic::Lshr:
492 case InstArithmetic::Ashr: 483 case InstArithmetic::Ashr:
493 return false; // TODO(stichnot): implement 484 return false; // TODO(stichnot): implement
494 return !isI64; 485 return !isI64;
495 } 486 }
496 } 487 }
497 488
489 template <class Machine>
498 bool isSameMemAddressOperand(const Operand *A, const Operand *B) { 490 bool isSameMemAddressOperand(const Operand *A, const Operand *B) {
499 if (A == B) 491 if (A == B)
500 return true; 492 return true;
501 if (auto *MemA = llvm::dyn_cast<OperandX8632Mem>(A)) { 493 if (auto *MemA = llvm::dyn_cast<
502 if (auto *MemB = llvm::dyn_cast<OperandX8632Mem>(B)) { 494 typename TargetX86Base<Machine>::Traits::X86OperandMem>(A)) {
495 if (auto *MemB = llvm::dyn_cast<
496 typename TargetX86Base<Machine>::Traits::X86OperandMem>(B)) {
503 return MemA->getBase() == MemB->getBase() && 497 return MemA->getBase() == MemB->getBase() &&
504 MemA->getOffset() == MemB->getOffset() && 498 MemA->getOffset() == MemB->getOffset() &&
505 MemA->getIndex() == MemB->getIndex() && 499 MemA->getIndex() == MemB->getIndex() &&
506 MemA->getShift() == MemB->getShift() && 500 MemA->getShift() == MemB->getShift() &&
507 MemA->getSegmentRegister() == MemB->getSegmentRegister(); 501 MemA->getSegmentRegister() == MemB->getSegmentRegister();
508 } 502 }
509 } 503 }
510 return false; 504 return false;
511 } 505 }
512 506
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
557 // instruction will be retained and later lowered. On the other 551 // instruction will be retained and later lowered. On the other
558 // hand, if the RMW instruction does not end x's live range, then 552 // hand, if the RMW instruction does not end x's live range, then
559 // the Store instruction must still be present, and therefore the 553 // the Store instruction must still be present, and therefore the
560 // RMW instruction is ignored during lowering because it is 554 // RMW instruction is ignored during lowering because it is
561 // redundant with the Store instruction. 555 // redundant with the Store instruction.
562 // 556 //
563 // Note that if "a" has further uses, the RMW transformation may 557 // Note that if "a" has further uses, the RMW transformation may
564 // still trigger, resulting in two loads and one store, which is 558 // still trigger, resulting in two loads and one store, which is
565 // worse than the original one load and one store. However, this is 559 // worse than the original one load and one store. However, this is
566 // probably rare, and caching probably keeps it just as fast. 560 // probably rare, and caching probably keeps it just as fast.
567 if (!isSameMemAddressOperand(Load->getSourceAddress(), 561 if (!isSameMemAddressOperand<Machine>(Load->getSourceAddress(),
568 Store->getAddr())) 562 Store->getAddr()))
569 continue; 563 continue;
570 Operand *ArithSrcFromLoad = Arith->getSrc(0); 564 Operand *ArithSrcFromLoad = Arith->getSrc(0);
571 Operand *ArithSrcOther = Arith->getSrc(1); 565 Operand *ArithSrcOther = Arith->getSrc(1);
572 if (ArithSrcFromLoad != Load->getDest()) { 566 if (ArithSrcFromLoad != Load->getDest()) {
573 if (!Arith->isCommutative() || ArithSrcOther != Load->getDest()) 567 if (!Arith->isCommutative() || ArithSrcOther != Load->getDest())
574 continue; 568 continue;
575 std::swap(ArithSrcFromLoad, ArithSrcOther); 569 std::swap(ArithSrcFromLoad, ArithSrcOther);
576 } 570 }
577 if (Arith->getDest() != Store->getData()) 571 if (Arith->getDest() != Store->getData())
578 continue; 572 continue;
579 if (!canRMW(Arith)) 573 if (!canRMW(Arith))
580 continue; 574 continue;
581 if (Func->isVerbose(IceV_RMW)) { 575 if (Func->isVerbose(IceV_RMW)) {
582 Str << "Found RMW in " << Func->getFunctionName() << ":\n "; 576 Str << "Found RMW in " << Func->getFunctionName() << ":\n ";
583 Load->dump(Func); 577 Load->dump(Func);
584 Str << "\n "; 578 Str << "\n ";
585 Arith->dump(Func); 579 Arith->dump(Func);
586 Str << "\n "; 580 Str << "\n ";
587 Store->dump(Func); 581 Store->dump(Func);
588 Str << "\n"; 582 Str << "\n";
589 } 583 }
590 Variable *Beacon = Func->template makeVariable(IceType_i32); 584 Variable *Beacon = Func->template makeVariable(IceType_i32);
591 Beacon->setWeight(0); 585 Beacon->setWeight(0);
592 Store->setRmwBeacon(Beacon); 586 Store->setRmwBeacon(Beacon);
593 InstFakeDef *BeaconDef = InstFakeDef::create(Func, Beacon); 587 InstFakeDef *BeaconDef = InstFakeDef::create(Func, Beacon);
594 Node->getInsts().insert(I3, BeaconDef); 588 Node->getInsts().insert(I3, BeaconDef);
595 InstX8632FakeRMW *RMW = InstX8632FakeRMW::create( 589 typename Traits::Insts::FakeRMW *RMW =
596 Func, ArithSrcOther, Store->getAddr(), Beacon, Arith->getOp()); 590 Traits::Insts::FakeRMW::create(Func, ArithSrcOther,
591 Store->getAddr(), Beacon,
592 Arith->getOp());
597 Node->getInsts().insert(I3, RMW); 593 Node->getInsts().insert(I3, RMW);
598 } 594 }
599 } 595 }
600 } 596 }
601 } 597 }
602 } 598 }
603 } 599 }
604 600
605 // Converts a ConstantInteger32 operand into its constant value, or 601 // Converts a ConstantInteger32 operand into its constant value, or
606 // MemoryOrderInvalid if the operand is not a ConstantInteger32. 602 // MemoryOrderInvalid if the operand is not a ConstantInteger32.
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
713 } 709 }
714 Context.advanceCur(); 710 Context.advanceCur();
715 Context.advanceNext(); 711 Context.advanceNext();
716 } 712 }
717 } 713 }
718 Func->dump("After load optimization"); 714 Func->dump("After load optimization");
719 } 715 }
720 716
721 template <class Machine> 717 template <class Machine>
722 bool TargetX86Base<Machine>::doBranchOpt(Inst *I, const CfgNode *NextNode) { 718 bool TargetX86Base<Machine>::doBranchOpt(Inst *I, const CfgNode *NextNode) {
723 if (InstX8632Br *Br = llvm::dyn_cast<InstX8632Br>(I)) { 719 if (auto *Br = llvm::dyn_cast<typename Traits::Insts::Br>(I)) {
724 return Br->optimizeBranch(NextNode); 720 return Br->optimizeBranch(NextNode);
725 } 721 }
726 return false; 722 return false;
727 } 723 }
728 724
729 template <class Machine> 725 template <class Machine>
730 IceString TargetX86Base<Machine>::RegNames[] = {
731 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \
732 frameptr, isI8, isInt, isFP) \
733 name,
734 REGX8632_TABLE
735 #undef X
736 };
737
738 template <class Machine>
739 Variable *TargetX86Base<Machine>::getPhysicalRegister(SizeT RegNum, Type Ty) { 726 Variable *TargetX86Base<Machine>::getPhysicalRegister(SizeT RegNum, Type Ty) {
740 if (Ty == IceType_void) 727 if (Ty == IceType_void)
741 Ty = IceType_i32; 728 Ty = IceType_i32;
742 if (PhysicalRegisters[Ty].empty()) 729 if (PhysicalRegisters[Ty].empty())
743 PhysicalRegisters[Ty].resize(Traits::RegisterSet::Reg_NUM); 730 PhysicalRegisters[Ty].resize(Traits::RegisterSet::Reg_NUM);
744 assert(RegNum < PhysicalRegisters[Ty].size()); 731 assert(RegNum < PhysicalRegisters[Ty].size());
745 Variable *Reg = PhysicalRegisters[Ty][RegNum]; 732 Variable *Reg = PhysicalRegisters[Ty][RegNum];
746 if (Reg == nullptr) { 733 if (Reg == nullptr) {
747 Reg = Func->template makeVariable(Ty); 734 Reg = Func->template makeVariable(Ty);
748 Reg->setRegNum(RegNum); 735 Reg->setRegNum(RegNum);
749 PhysicalRegisters[Ty][RegNum] = Reg; 736 PhysicalRegisters[Ty][RegNum] = Reg;
750 // Specially mark esp as an "argument" so that it is considered 737 // Specially mark esp as an "argument" so that it is considered
751 // live upon function entry. 738 // live upon function entry.
752 if (RegNum == Traits::RegisterSet::Reg_esp) { 739 if (RegNum == Traits::RegisterSet::Reg_esp) {
753 Func->addImplicitArg(Reg); 740 Func->addImplicitArg(Reg);
754 Reg->setIgnoreLiveness(); 741 Reg->setIgnoreLiveness();
755 } 742 }
756 } 743 }
757 return Reg; 744 return Reg;
758 } 745 }
759 746
760 template <class Machine> 747 template <class Machine>
761 IceString TargetX86Base<Machine>::getRegName(SizeT RegNum, Type Ty) const { 748 IceString TargetX86Base<Machine>::getRegName(SizeT RegNum, Type Ty) const {
762 assert(RegNum < Traits::RegisterSet::Reg_NUM); 749 return Traits::getRegName(RegNum, Ty);
763 static IceString RegNames8[] = {
764 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \
765 frameptr, isI8, isInt, isFP) \
766 name8,
767 REGX8632_TABLE
768 #undef X
769 };
770 static IceString RegNames16[] = {
771 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \
772 frameptr, isI8, isInt, isFP) \
773 name16,
774 REGX8632_TABLE
775 #undef X
776 };
777 switch (Ty) {
778 case IceType_i1:
779 case IceType_i8:
780 return RegNames8[RegNum];
781 case IceType_i16:
782 return RegNames16[RegNum];
783 default:
784 return RegNames[RegNum];
785 }
786 } 750 }
787 751
788 template <class Machine> 752 template <class Machine>
789 void TargetX86Base<Machine>::emitVariable(const Variable *Var) const { 753 void TargetX86Base<Machine>::emitVariable(const Variable *Var) const {
790 Ostream &Str = Ctx->getStrEmit(); 754 Ostream &Str = Ctx->getStrEmit();
791 if (Var->hasReg()) { 755 if (Var->hasReg()) {
792 Str << "%" << getRegName(Var->getRegNum(), Var->getType()); 756 Str << "%" << getRegName(Var->getRegNum(), Var->getType());
793 return; 757 return;
794 } 758 }
795 if (Var->getWeight().isInf()) { 759 if (Var->getWeight().isInf()) {
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
876 finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, InArgsSizeBytes); 840 finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, InArgsSizeBytes);
877 return; 841 return;
878 } 842 }
879 if (isVectorType(Ty)) { 843 if (isVectorType(Ty)) {
880 InArgsSizeBytes = Traits::applyStackAlignment(InArgsSizeBytes); 844 InArgsSizeBytes = Traits::applyStackAlignment(InArgsSizeBytes);
881 } 845 }
882 Arg->setStackOffset(BasicFrameOffset + InArgsSizeBytes); 846 Arg->setStackOffset(BasicFrameOffset + InArgsSizeBytes);
883 InArgsSizeBytes += typeWidthInBytesOnStack(Ty); 847 InArgsSizeBytes += typeWidthInBytesOnStack(Ty);
884 if (Arg->hasReg()) { 848 if (Arg->hasReg()) {
885 assert(Ty != IceType_i64); 849 assert(Ty != IceType_i64);
886 OperandX8632Mem *Mem = OperandX8632Mem::create( 850 typename Traits::X86OperandMem *Mem = Traits::X86OperandMem::create(
887 Func, Ty, FramePtr, Ctx->getConstantInt32(Arg->getStackOffset())); 851 Func, Ty, FramePtr, Ctx->getConstantInt32(Arg->getStackOffset()));
888 if (isVectorType(Arg->getType())) { 852 if (isVectorType(Arg->getType())) {
889 _movp(Arg, Mem); 853 _movp(Arg, Mem);
890 } else { 854 } else {
891 _mov(Arg, Mem); 855 _mov(Arg, Mem);
892 } 856 }
893 // This argument-copying instruction uses an explicit 857 // This argument-copying instruction uses an explicit
894 // OperandX8632Mem operand instead of a Variable, so its 858 // typename Traits::X86OperandMem operand instead of a Variable, so its
895 // fill-from-stack operation has to be tracked separately for 859 // fill-from-stack operation has to be tracked separately for
896 // statistics. 860 // statistics.
897 Ctx->statsUpdateFills(); 861 Ctx->statsUpdateFills();
898 } 862 }
899 } 863 }
900 864
901 template <class Machine> Type TargetX86Base<Machine>::stackSlotType() { 865 template <class Machine> Type TargetX86Base<Machine>::stackSlotType() {
902 return IceType_i32; 866 return IceType_i32;
903 } 867 }
904 868
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
957 // If there is a separate locals area, this specifies the alignment 921 // If there is a separate locals area, this specifies the alignment
958 // for it. 922 // for it.
959 uint32_t LocalsSlotsAlignmentBytes = 0; 923 uint32_t LocalsSlotsAlignmentBytes = 0;
960 // The entire spill locations area gets aligned to largest natural 924 // The entire spill locations area gets aligned to largest natural
961 // alignment of the variables that have a spill slot. 925 // alignment of the variables that have a spill slot.
962 uint32_t SpillAreaAlignmentBytes = 0; 926 uint32_t SpillAreaAlignmentBytes = 0;
963 // A spill slot linked to a variable with a stack slot should reuse 927 // A spill slot linked to a variable with a stack slot should reuse
964 // that stack slot. 928 // that stack slot.
965 std::function<bool(Variable *)> TargetVarHook = 929 std::function<bool(Variable *)> TargetVarHook =
966 [&VariablesLinkedToSpillSlots](Variable *Var) { 930 [&VariablesLinkedToSpillSlots](Variable *Var) {
967 if (SpillVariable *SpillVar = llvm::dyn_cast<SpillVariable>(Var)) { 931 if (auto *SpillVar =
932 llvm::dyn_cast<typename Traits::SpillVariable>(Var)) {
968 assert(Var->getWeight().isZero()); 933 assert(Var->getWeight().isZero());
969 if (SpillVar->getLinkedTo() && !SpillVar->getLinkedTo()->hasReg()) { 934 if (SpillVar->getLinkedTo() && !SpillVar->getLinkedTo()->hasReg()) {
970 VariablesLinkedToSpillSlots.push_back(Var); 935 VariablesLinkedToSpillSlots.push_back(Var);
971 return true; 936 return true;
972 } 937 }
973 } 938 }
974 return false; 939 return false;
975 }; 940 };
976 941
977 // Compute the list of spilled variables and bounds for GlobalsSize, etc. 942 // Compute the list of spilled variables and bounds for GlobalsSize, etc.
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
1061 finishArgumentLowering(Arg, FramePtr, BasicFrameOffset, InArgsSizeBytes); 1026 finishArgumentLowering(Arg, FramePtr, BasicFrameOffset, InArgsSizeBytes);
1062 } 1027 }
1063 1028
1064 // Fill in stack offsets for locals. 1029 // Fill in stack offsets for locals.
1065 assignVarStackSlots(SortedSpilledVariables, SpillAreaPaddingBytes, 1030 assignVarStackSlots(SortedSpilledVariables, SpillAreaPaddingBytes,
1066 SpillAreaSizeBytes, GlobalsAndSubsequentPaddingSize, 1031 SpillAreaSizeBytes, GlobalsAndSubsequentPaddingSize,
1067 IsEbpBasedFrame); 1032 IsEbpBasedFrame);
1068 // Assign stack offsets to variables that have been linked to spilled 1033 // Assign stack offsets to variables that have been linked to spilled
1069 // variables. 1034 // variables.
1070 for (Variable *Var : VariablesLinkedToSpillSlots) { 1035 for (Variable *Var : VariablesLinkedToSpillSlots) {
1071 Variable *Linked = (llvm::cast<SpillVariable>(Var))->getLinkedTo(); 1036 Variable *Linked =
1037 (llvm::cast<typename Traits::SpillVariable>(Var))->getLinkedTo();
1072 Var->setStackOffset(Linked->getStackOffset()); 1038 Var->setStackOffset(Linked->getStackOffset());
1073 } 1039 }
1074 this->HasComputedFrame = true; 1040 this->HasComputedFrame = true;
1075 1041
1076 if (BuildDefs::dump() && Func->isVerbose(IceV_Frame)) { 1042 if (BuildDefs::dump() && Func->isVerbose(IceV_Frame)) {
1077 OstreamLocker L(Func->getContext()); 1043 OstreamLocker L(Func->getContext());
1078 Ostream &Str = Func->getContext()->getStrDump(); 1044 Ostream &Str = Func->getContext()->getStrDump();
1079 1045
1080 Str << "Stack layout:\n"; 1046 Str << "Stack layout:\n";
1081 uint32_t EspAdjustmentPaddingSize = 1047 uint32_t EspAdjustmentPaddingSize =
(...skipping 16 matching lines...) Expand all
1098 << " locals spill area alignment = " << LocalsSlotsAlignmentBytes 1064 << " locals spill area alignment = " << LocalsSlotsAlignmentBytes
1099 << " bytes\n" 1065 << " bytes\n"
1100 << " is ebp based = " << IsEbpBasedFrame << "\n"; 1066 << " is ebp based = " << IsEbpBasedFrame << "\n";
1101 } 1067 }
1102 } 1068 }
1103 1069
1104 template <class Machine> void TargetX86Base<Machine>::addEpilog(CfgNode *Node) { 1070 template <class Machine> void TargetX86Base<Machine>::addEpilog(CfgNode *Node) {
1105 InstList &Insts = Node->getInsts(); 1071 InstList &Insts = Node->getInsts();
1106 InstList::reverse_iterator RI, E; 1072 InstList::reverse_iterator RI, E;
1107 for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) { 1073 for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) {
1108 if (llvm::isa<InstX8632Ret>(*RI)) 1074 if (llvm::isa<typename Traits::Insts::Ret>(*RI))
1109 break; 1075 break;
1110 } 1076 }
1111 if (RI == E) 1077 if (RI == E)
1112 return; 1078 return;
1113 1079
1114 // Convert the reverse_iterator position into its corresponding 1080 // Convert the reverse_iterator position into its corresponding
1115 // (forward) iterator position. 1081 // (forward) iterator position.
1116 InstList::iterator InsertPoint = RI.base(); 1082 InstList::iterator InsertPoint = RI.base();
1117 --InsertPoint; 1083 --InsertPoint;
1118 Context.init(Node); 1084 Context.init(Node);
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
1208 return Operand; 1174 return Operand;
1209 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) { 1175 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) {
1210 split64(Var); 1176 split64(Var);
1211 return Var->getLo(); 1177 return Var->getLo();
1212 } 1178 }
1213 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { 1179 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
1214 ConstantInteger32 *ConstInt = llvm::dyn_cast<ConstantInteger32>( 1180 ConstantInteger32 *ConstInt = llvm::dyn_cast<ConstantInteger32>(
1215 Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue()))); 1181 Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue())));
1216 return legalize(ConstInt); 1182 return legalize(ConstInt);
1217 } 1183 }
1218 if (OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand)) { 1184 if (typename Traits::X86OperandMem *Mem =
1219 OperandX8632Mem *MemOperand = OperandX8632Mem::create( 1185 llvm::dyn_cast<typename Traits::X86OperandMem>(Operand)) {
1186 typename Traits::X86OperandMem *MemOperand = Traits::X86OperandMem::create(
1220 Func, IceType_i32, Mem->getBase(), Mem->getOffset(), Mem->getIndex(), 1187 Func, IceType_i32, Mem->getBase(), Mem->getOffset(), Mem->getIndex(),
1221 Mem->getShift(), Mem->getSegmentRegister()); 1188 Mem->getShift(), Mem->getSegmentRegister());
1222 // Test if we should randomize or pool the offset, if so randomize it or 1189 // Test if we should randomize or pool the offset, if so randomize it or
1223 // pool it then create mem operand with the blinded/pooled constant. 1190 // pool it then create mem operand with the blinded/pooled constant.
1224 // Otherwise, return the mem operand as ordinary mem operand. 1191 // Otherwise, return the mem operand as ordinary mem operand.
1225 return legalize(MemOperand); 1192 return legalize(MemOperand);
1226 } 1193 }
1227 llvm_unreachable("Unsupported operand type"); 1194 llvm_unreachable("Unsupported operand type");
1228 return nullptr; 1195 return nullptr;
1229 } 1196 }
1230 1197
1231 template <class Machine> 1198 template <class Machine>
1232 Operand *TargetX86Base<Machine>::hiOperand(Operand *Operand) { 1199 Operand *TargetX86Base<Machine>::hiOperand(Operand *Operand) {
1233 assert(Operand->getType() == IceType_i64 || 1200 assert(Operand->getType() == IceType_i64 ||
1234 Operand->getType() == IceType_f64); 1201 Operand->getType() == IceType_f64);
1235 if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64) 1202 if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64)
1236 return Operand; 1203 return Operand;
1237 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) { 1204 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) {
1238 split64(Var); 1205 split64(Var);
1239 return Var->getHi(); 1206 return Var->getHi();
1240 } 1207 }
1241 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) { 1208 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
1242 ConstantInteger32 *ConstInt = llvm::dyn_cast<ConstantInteger32>( 1209 ConstantInteger32 *ConstInt = llvm::dyn_cast<ConstantInteger32>(
1243 Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue() >> 32))); 1210 Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue() >> 32)));
1244 // check if we need to blind/pool the constant 1211 // check if we need to blind/pool the constant
1245 return legalize(ConstInt); 1212 return legalize(ConstInt);
1246 } 1213 }
1247 if (OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Operand)) { 1214 if (typename Traits::X86OperandMem *Mem =
1215 llvm::dyn_cast<typename Traits::X86OperandMem>(Operand)) {
1248 Constant *Offset = Mem->getOffset(); 1216 Constant *Offset = Mem->getOffset();
1249 if (Offset == nullptr) { 1217 if (Offset == nullptr) {
1250 Offset = Ctx->getConstantInt32(4); 1218 Offset = Ctx->getConstantInt32(4);
1251 } else if (ConstantInteger32 *IntOffset = 1219 } else if (ConstantInteger32 *IntOffset =
1252 llvm::dyn_cast<ConstantInteger32>(Offset)) { 1220 llvm::dyn_cast<ConstantInteger32>(Offset)) {
1253 Offset = Ctx->getConstantInt32(4 + IntOffset->getValue()); 1221 Offset = Ctx->getConstantInt32(4 + IntOffset->getValue());
1254 } else if (ConstantRelocatable *SymOffset = 1222 } else if (ConstantRelocatable *SymOffset =
1255 llvm::dyn_cast<ConstantRelocatable>(Offset)) { 1223 llvm::dyn_cast<ConstantRelocatable>(Offset)) {
1256 assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4)); 1224 assert(!Utils::WouldOverflowAdd(SymOffset->getOffset(), 4));
1257 Offset = 1225 Offset =
1258 Ctx->getConstantSym(4 + SymOffset->getOffset(), SymOffset->getName(), 1226 Ctx->getConstantSym(4 + SymOffset->getOffset(), SymOffset->getName(),
1259 SymOffset->getSuppressMangling()); 1227 SymOffset->getSuppressMangling());
1260 } 1228 }
1261 OperandX8632Mem *MemOperand = OperandX8632Mem::create( 1229 typename Traits::X86OperandMem *MemOperand = Traits::X86OperandMem::create(
1262 Func, IceType_i32, Mem->getBase(), Offset, Mem->getIndex(), 1230 Func, IceType_i32, Mem->getBase(), Offset, Mem->getIndex(),
1263 Mem->getShift(), Mem->getSegmentRegister()); 1231 Mem->getShift(), Mem->getSegmentRegister());
1264 // Test if the Offset is an eligible i32 constants for randomization and 1232 // Test if the Offset is an eligible i32 constants for randomization and
1265 // pooling. Blind/pool it if it is. Otherwise return as oridinary mem 1233 // pooling. Blind/pool it if it is. Otherwise return as oridinary mem
1266 // operand. 1234 // operand.
1267 return legalize(MemOperand); 1235 return legalize(MemOperand);
1268 } 1236 }
1269 llvm_unreachable("Unsupported operand type"); 1237 llvm_unreachable("Unsupported operand type");
1270 return nullptr; 1238 return nullptr;
1271 } 1239 }
1272 1240
1273 template <class Machine> 1241 template <class Machine>
1274 llvm::SmallBitVector 1242 llvm::SmallBitVector
1275 TargetX86Base<Machine>::getRegisterSet(RegSetMask Include, 1243 TargetX86Base<Machine>::getRegisterSet(RegSetMask Include,
1276 RegSetMask Exclude) const { 1244 RegSetMask Exclude) const {
1277 llvm::SmallBitVector Registers(Traits::RegisterSet::Reg_NUM); 1245 return Traits::getRegisterSet(Include, Exclude);
1278
1279 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \
1280 frameptr, isI8, isInt, isFP) \
1281 if (scratch && (Include & RegSet_CallerSave)) \
1282 Registers[Traits::RegisterSet::val] = true; \
1283 if (preserved && (Include & RegSet_CalleeSave)) \
1284 Registers[Traits::RegisterSet::val] = true; \
1285 if (stackptr && (Include & RegSet_StackPointer)) \
1286 Registers[Traits::RegisterSet::val] = true; \
1287 if (frameptr && (Include & RegSet_FramePointer)) \
1288 Registers[Traits::RegisterSet::val] = true; \
1289 if (scratch && (Exclude & RegSet_CallerSave)) \
1290 Registers[Traits::RegisterSet::val] = false; \
1291 if (preserved && (Exclude & RegSet_CalleeSave)) \
1292 Registers[Traits::RegisterSet::val] = false; \
1293 if (stackptr && (Exclude & RegSet_StackPointer)) \
1294 Registers[Traits::RegisterSet::val] = false; \
1295 if (frameptr && (Exclude & RegSet_FramePointer)) \
1296 Registers[Traits::RegisterSet::val] = false;
1297
1298 REGX8632_TABLE
1299
1300 #undef X
1301
1302 return Registers;
1303 } 1246 }
1304 1247
1305 template <class Machine> 1248 template <class Machine>
1306 void TargetX86Base<Machine>::lowerAlloca(const InstAlloca *Inst) { 1249 void TargetX86Base<Machine>::lowerAlloca(const InstAlloca *Inst) {
1307 IsEbpBasedFrame = true; 1250 IsEbpBasedFrame = true;
1308 // Conservatively require the stack to be aligned. Some stack 1251 // Conservatively require the stack to be aligned. Some stack
1309 // adjustment operations implemented below assume that the stack is 1252 // adjustment operations implemented below assume that the stack is
1310 // aligned before the alloca. All the alloca code ensures that the 1253 // aligned before the alloca. All the alloca code ensures that the
1311 // stack alignment is preserved after the alloca. The stack alignment 1254 // stack alignment is preserved after the alloca. The stack alignment
1312 // restriction can be relaxed in some cases. 1255 // restriction can be relaxed in some cases.
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
1415 return false; 1358 return false;
1416 // Limit the number of lea/shl operations for a single multiply, to 1359 // Limit the number of lea/shl operations for a single multiply, to
1417 // a somewhat arbitrary choice of 3. 1360 // a somewhat arbitrary choice of 3.
1418 const uint32_t MaxOpsForOptimizedMul = 3; 1361 const uint32_t MaxOpsForOptimizedMul = 3;
1419 if (CountOps > MaxOpsForOptimizedMul) 1362 if (CountOps > MaxOpsForOptimizedMul)
1420 return false; 1363 return false;
1421 _mov(T, Src0); 1364 _mov(T, Src0);
1422 Constant *Zero = Ctx->getConstantZero(IceType_i32); 1365 Constant *Zero = Ctx->getConstantZero(IceType_i32);
1423 for (uint32_t i = 0; i < Count9; ++i) { 1366 for (uint32_t i = 0; i < Count9; ++i) {
1424 const uint16_t Shift = 3; // log2(9-1) 1367 const uint16_t Shift = 3; // log2(9-1)
1425 _lea(T, OperandX8632Mem::create(Func, IceType_void, T, Zero, T, Shift)); 1368 _lea(T,
1369 Traits::X86OperandMem::create(Func, IceType_void, T, Zero, T, Shift));
1426 _set_dest_nonkillable(); 1370 _set_dest_nonkillable();
1427 } 1371 }
1428 for (uint32_t i = 0; i < Count5; ++i) { 1372 for (uint32_t i = 0; i < Count5; ++i) {
1429 const uint16_t Shift = 2; // log2(5-1) 1373 const uint16_t Shift = 2; // log2(5-1)
1430 _lea(T, OperandX8632Mem::create(Func, IceType_void, T, Zero, T, Shift)); 1374 _lea(T,
1375 Traits::X86OperandMem::create(Func, IceType_void, T, Zero, T, Shift));
1431 _set_dest_nonkillable(); 1376 _set_dest_nonkillable();
1432 } 1377 }
1433 for (uint32_t i = 0; i < Count3; ++i) { 1378 for (uint32_t i = 0; i < Count3; ++i) {
1434 const uint16_t Shift = 1; // log2(3-1) 1379 const uint16_t Shift = 1; // log2(3-1)
1435 _lea(T, OperandX8632Mem::create(Func, IceType_void, T, Zero, T, Shift)); 1380 _lea(T,
1381 Traits::X86OperandMem::create(Func, IceType_void, T, Zero, T, Shift));
1436 _set_dest_nonkillable(); 1382 _set_dest_nonkillable();
1437 } 1383 }
1438 if (Count2) { 1384 if (Count2) {
1439 _shl(T, Ctx->getConstantInt(Ty, Count2)); 1385 _shl(T, Ctx->getConstantInt(Ty, Count2));
1440 } 1386 }
1441 if (Src1IsNegative) 1387 if (Src1IsNegative)
1442 _neg(T); 1388 _neg(T);
1443 _mov(Dest, T); 1389 _mov(Dest, T);
1444 return true; 1390 return true;
1445 } 1391 }
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after
1593 // je L1 1539 // je L1
1594 // use(t3) 1540 // use(t3)
1595 // t3 = t2 1541 // t3 = t2
1596 // t2 = 0 1542 // t2 = 0
1597 // L1: 1543 // L1:
1598 // a.lo = t2 1544 // a.lo = t2
1599 // a.hi = t3 1545 // a.hi = t3
1600 Variable *T_1 = nullptr, *T_2 = nullptr, *T_3 = nullptr; 1546 Variable *T_1 = nullptr, *T_2 = nullptr, *T_3 = nullptr;
1601 Constant *BitTest = Ctx->getConstantInt32(0x20); 1547 Constant *BitTest = Ctx->getConstantInt32(0x20);
1602 Constant *Zero = Ctx->getConstantZero(IceType_i32); 1548 Constant *Zero = Ctx->getConstantZero(IceType_i32);
1603 InstX8632Label *Label = InstX8632Label::create(Func, this); 1549 typename Traits::Insts::Label *Label =
1550 Traits::Insts::Label::create(Func, this);
1604 _mov(T_1, Src1Lo, Traits::RegisterSet::Reg_ecx); 1551 _mov(T_1, Src1Lo, Traits::RegisterSet::Reg_ecx);
1605 _mov(T_2, Src0Lo); 1552 _mov(T_2, Src0Lo);
1606 _mov(T_3, Src0Hi); 1553 _mov(T_3, Src0Hi);
1607 _shld(T_3, T_2, T_1); 1554 _shld(T_3, T_2, T_1);
1608 _shl(T_2, T_1); 1555 _shl(T_2, T_1);
1609 _test(T_1, BitTest); 1556 _test(T_1, BitTest);
1610 _br(Traits::Cond::Br_e, Label); 1557 _br(Traits::Cond::Br_e, Label);
1611 // T_2 and T_3 are being assigned again because of the 1558 // T_2 and T_3 are being assigned again because of the
1612 // intra-block control flow, so we need the _mov_nonkillable 1559 // intra-block control flow, so we need the _mov_nonkillable
1613 // variant to avoid liveness problems. 1560 // variant to avoid liveness problems.
(...skipping 14 matching lines...) Expand all
1628 // je L1 1575 // je L1
1629 // use(t2) 1576 // use(t2)
1630 // t2 = t3 1577 // t2 = t3
1631 // t3 = 0 1578 // t3 = 0
1632 // L1: 1579 // L1:
1633 // a.lo = t2 1580 // a.lo = t2
1634 // a.hi = t3 1581 // a.hi = t3
1635 Variable *T_1 = nullptr, *T_2 = nullptr, *T_3 = nullptr; 1582 Variable *T_1 = nullptr, *T_2 = nullptr, *T_3 = nullptr;
1636 Constant *BitTest = Ctx->getConstantInt32(0x20); 1583 Constant *BitTest = Ctx->getConstantInt32(0x20);
1637 Constant *Zero = Ctx->getConstantZero(IceType_i32); 1584 Constant *Zero = Ctx->getConstantZero(IceType_i32);
1638 InstX8632Label *Label = InstX8632Label::create(Func, this); 1585 typename Traits::Insts::Label *Label =
1586 Traits::Insts::Label::create(Func, this);
1639 _mov(T_1, Src1Lo, Traits::RegisterSet::Reg_ecx); 1587 _mov(T_1, Src1Lo, Traits::RegisterSet::Reg_ecx);
1640 _mov(T_2, Src0Lo); 1588 _mov(T_2, Src0Lo);
1641 _mov(T_3, Src0Hi); 1589 _mov(T_3, Src0Hi);
1642 _shrd(T_2, T_3, T_1); 1590 _shrd(T_2, T_3, T_1);
1643 _shr(T_3, T_1); 1591 _shr(T_3, T_1);
1644 _test(T_1, BitTest); 1592 _test(T_1, BitTest);
1645 _br(Traits::Cond::Br_e, Label); 1593 _br(Traits::Cond::Br_e, Label);
1646 // T_2 and T_3 are being assigned again because of the 1594 // T_2 and T_3 are being assigned again because of the
1647 // intra-block control flow, so we need the _mov_nonkillable 1595 // intra-block control flow, so we need the _mov_nonkillable
1648 // variant to avoid liveness problems. 1596 // variant to avoid liveness problems.
(...skipping 14 matching lines...) Expand all
1663 // je L1 1611 // je L1
1664 // use(t2) 1612 // use(t2)
1665 // t2 = t3 1613 // t2 = t3
1666 // t3 = sar t3, 0x1f 1614 // t3 = sar t3, 0x1f
1667 // L1: 1615 // L1:
1668 // a.lo = t2 1616 // a.lo = t2
1669 // a.hi = t3 1617 // a.hi = t3
1670 Variable *T_1 = nullptr, *T_2 = nullptr, *T_3 = nullptr; 1618 Variable *T_1 = nullptr, *T_2 = nullptr, *T_3 = nullptr;
1671 Constant *BitTest = Ctx->getConstantInt32(0x20); 1619 Constant *BitTest = Ctx->getConstantInt32(0x20);
1672 Constant *SignExtend = Ctx->getConstantInt32(0x1f); 1620 Constant *SignExtend = Ctx->getConstantInt32(0x1f);
1673 InstX8632Label *Label = InstX8632Label::create(Func, this); 1621 typename Traits::Insts::Label *Label =
1622 Traits::Insts::Label::create(Func, this);
1674 _mov(T_1, Src1Lo, Traits::RegisterSet::Reg_ecx); 1623 _mov(T_1, Src1Lo, Traits::RegisterSet::Reg_ecx);
1675 _mov(T_2, Src0Lo); 1624 _mov(T_2, Src0Lo);
1676 _mov(T_3, Src0Hi); 1625 _mov(T_3, Src0Hi);
1677 _shrd(T_2, T_3, T_1); 1626 _shrd(T_2, T_3, T_1);
1678 _sar(T_3, T_1); 1627 _sar(T_3, T_1);
1679 _test(T_1, BitTest); 1628 _test(T_1, BitTest);
1680 _br(Traits::Cond::Br_e, Label); 1629 _br(Traits::Cond::Br_e, Label);
1681 // T_2 and T_3 are being assigned again because of the 1630 // T_2 and T_3 are being assigned again because of the
1682 // intra-block control flow, so T_2 needs the _mov_nonkillable 1631 // intra-block control flow, so T_2 needs the _mov_nonkillable
1683 // variant to avoid liveness problems. T_3 doesn't need special 1632 // variant to avoid liveness problems. T_3 doesn't need special
(...skipping 17 matching lines...) Expand all
1701 case InstArithmetic::Srem: 1650 case InstArithmetic::Srem:
1702 llvm_unreachable("Call-helper-involved instruction for i64 type \ 1651 llvm_unreachable("Call-helper-involved instruction for i64 type \
1703 should have already been handled before"); 1652 should have already been handled before");
1704 break; 1653 break;
1705 } 1654 }
1706 return; 1655 return;
1707 } 1656 }
1708 if (isVectorType(Dest->getType())) { 1657 if (isVectorType(Dest->getType())) {
1709 // TODO: Trap on integer divide and integer modulo by zero. 1658 // TODO: Trap on integer divide and integer modulo by zero.
1710 // See: https://code.google.com/p/nativeclient/issues/detail?id=3899 1659 // See: https://code.google.com/p/nativeclient/issues/detail?id=3899
1711 if (llvm::isa<OperandX8632Mem>(Src1)) 1660 if (llvm::isa<typename Traits::X86OperandMem>(Src1))
1712 Src1 = legalizeToVar(Src1); 1661 Src1 = legalizeToVar(Src1);
1713 switch (Inst->getOp()) { 1662 switch (Inst->getOp()) {
1714 case InstArithmetic::_num: 1663 case InstArithmetic::_num:
1715 llvm_unreachable("Unknown arithmetic operator"); 1664 llvm_unreachable("Unknown arithmetic operator");
1716 break; 1665 break;
1717 case InstArithmetic::Add: { 1666 case InstArithmetic::Add: {
1718 Variable *T = makeReg(Dest->getType()); 1667 Variable *T = makeReg(Dest->getType());
1719 _movp(T, Src0); 1668 _movp(T, Src0);
1720 _padd(T, Src1); 1669 _padd(T, Src1);
1721 _movp(Dest, T); 1670 _movp(Dest, T);
(...skipping 478 matching lines...) Expand 10 before | Expand all | Expand 10 after
2200 XmmArgs.push_back(Arg); 2149 XmmArgs.push_back(Arg);
2201 } else { 2150 } else {
2202 StackArgs.push_back(Arg); 2151 StackArgs.push_back(Arg);
2203 if (isVectorType(Arg->getType())) { 2152 if (isVectorType(Arg->getType())) {
2204 ParameterAreaSizeBytes = 2153 ParameterAreaSizeBytes =
2205 Traits::applyStackAlignment(ParameterAreaSizeBytes); 2154 Traits::applyStackAlignment(ParameterAreaSizeBytes);
2206 } 2155 }
2207 Variable *esp = 2156 Variable *esp =
2208 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp); 2157 Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp);
2209 Constant *Loc = Ctx->getConstantInt32(ParameterAreaSizeBytes); 2158 Constant *Loc = Ctx->getConstantInt32(ParameterAreaSizeBytes);
2210 StackArgLocations.push_back(OperandX8632Mem::create(Func, Ty, esp, Loc)); 2159 StackArgLocations.push_back(
2160 Traits::X86OperandMem::create(Func, Ty, esp, Loc));
2211 ParameterAreaSizeBytes += typeWidthInBytesOnStack(Arg->getType()); 2161 ParameterAreaSizeBytes += typeWidthInBytesOnStack(Arg->getType());
2212 } 2162 }
2213 } 2163 }
2214 2164
2215 // Adjust the parameter area so that the stack is aligned. It is 2165 // Adjust the parameter area so that the stack is aligned. It is
2216 // assumed that the stack is already aligned at the start of the 2166 // assumed that the stack is already aligned at the start of the
2217 // calling sequence. 2167 // calling sequence.
2218 ParameterAreaSizeBytes = Traits::applyStackAlignment(ParameterAreaSizeBytes); 2168 ParameterAreaSizeBytes = Traits::applyStackAlignment(ParameterAreaSizeBytes);
2219 2169
2220 // Subtract the appropriate amount for the argument area. This also 2170 // Subtract the appropriate amount for the argument area. This also
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
2297 } else { 2247 } else {
2298 Variable *CallTargetVar = nullptr; 2248 Variable *CallTargetVar = nullptr;
2299 _mov(CallTargetVar, CallTarget); 2249 _mov(CallTargetVar, CallTarget);
2300 _bundle_lock(InstBundleLock::Opt_AlignToEnd); 2250 _bundle_lock(InstBundleLock::Opt_AlignToEnd);
2301 const SizeT BundleSize = 2251 const SizeT BundleSize =
2302 1 << Func->template getAssembler<>()->getBundleAlignLog2Bytes(); 2252 1 << Func->template getAssembler<>()->getBundleAlignLog2Bytes();
2303 _and(CallTargetVar, Ctx->getConstantInt32(~(BundleSize - 1))); 2253 _and(CallTargetVar, Ctx->getConstantInt32(~(BundleSize - 1)));
2304 CallTarget = CallTargetVar; 2254 CallTarget = CallTargetVar;
2305 } 2255 }
2306 } 2256 }
2307 Inst *NewCall = InstX8632Call::create(Func, ReturnReg, CallTarget); 2257 Inst *NewCall = Traits::Insts::Call::create(Func, ReturnReg, CallTarget);
2308 Context.insert(NewCall); 2258 Context.insert(NewCall);
2309 if (NeedSandboxing) 2259 if (NeedSandboxing)
2310 _bundle_unlock(); 2260 _bundle_unlock();
2311 if (ReturnRegHi) 2261 if (ReturnRegHi)
2312 Context.insert(InstFakeDef::create(Func, ReturnRegHi)); 2262 Context.insert(InstFakeDef::create(Func, ReturnRegHi));
2313 2263
2314 // Add the appropriate offset to esp. The call instruction takes care 2264 // Add the appropriate offset to esp. The call instruction takes care
2315 // of resetting the stack offset during emission. 2265 // of resetting the stack offset during emission.
2316 if (ParameterAreaSizeBytes) { 2266 if (ParameterAreaSizeBytes) {
2317 Variable *esp = 2267 Variable *esp =
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after
2524 _and(T, Ctx->getConstantInt1(1)); 2474 _and(T, Ctx->getConstantInt1(1));
2525 _mov(Dest, T); 2475 _mov(Dest, T);
2526 } 2476 }
2527 break; 2477 break;
2528 } 2478 }
2529 case InstCast::Fptrunc: 2479 case InstCast::Fptrunc:
2530 case InstCast::Fpext: { 2480 case InstCast::Fpext: {
2531 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem); 2481 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem);
2532 // t1 = cvt Src0RM; Dest = t1 2482 // t1 = cvt Src0RM; Dest = t1
2533 Variable *T = makeReg(Dest->getType()); 2483 Variable *T = makeReg(Dest->getType());
2534 _cvt(T, Src0RM, InstX8632Cvt::Float2float); 2484 _cvt(T, Src0RM, Traits::Insts::Cvt::Float2float);
2535 _mov(Dest, T); 2485 _mov(Dest, T);
2536 break; 2486 break;
2537 } 2487 }
2538 case InstCast::Fptosi: 2488 case InstCast::Fptosi:
2539 if (isVectorType(Dest->getType())) { 2489 if (isVectorType(Dest->getType())) {
2540 assert(Dest->getType() == IceType_v4i32 && 2490 assert(Dest->getType() == IceType_v4i32 &&
2541 Inst->getSrc(0)->getType() == IceType_v4f32); 2491 Inst->getSrc(0)->getType() == IceType_v4f32);
2542 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem); 2492 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem);
2543 if (llvm::isa<OperandX8632Mem>(Src0RM)) 2493 if (llvm::isa<typename Traits::X86OperandMem>(Src0RM))
2544 Src0RM = legalizeToVar(Src0RM); 2494 Src0RM = legalizeToVar(Src0RM);
2545 Variable *T = makeReg(Dest->getType()); 2495 Variable *T = makeReg(Dest->getType());
2546 _cvt(T, Src0RM, InstX8632Cvt::Tps2dq); 2496 _cvt(T, Src0RM, Traits::Insts::Cvt::Tps2dq);
2547 _movp(Dest, T); 2497 _movp(Dest, T);
2548 } else if (Dest->getType() == IceType_i64) { 2498 } else if (Dest->getType() == IceType_i64) {
2549 // Use a helper for converting floating-point values to 64-bit 2499 // Use a helper for converting floating-point values to 64-bit
2550 // integers. SSE2 appears to have no way to convert from xmm 2500 // integers. SSE2 appears to have no way to convert from xmm
2551 // registers to something like the edx:eax register pair, and 2501 // registers to something like the edx:eax register pair, and
2552 // gcc and clang both want to use x87 instructions complete with 2502 // gcc and clang both want to use x87 instructions complete with
2553 // temporary manipulation of the status word. This helper is 2503 // temporary manipulation of the status word. This helper is
2554 // not needed for x86-64. 2504 // not needed for x86-64.
2555 split64(Dest); 2505 split64(Dest);
2556 const SizeT MaxSrcs = 1; 2506 const SizeT MaxSrcs = 1;
2557 Type SrcType = Inst->getSrc(0)->getType(); 2507 Type SrcType = Inst->getSrc(0)->getType();
2558 InstCall *Call = 2508 InstCall *Call =
2559 makeHelperCall(isFloat32Asserting32Or64(SrcType) ? H_fptosi_f32_i64 2509 makeHelperCall(isFloat32Asserting32Or64(SrcType) ? H_fptosi_f32_i64
2560 : H_fptosi_f64_i64, 2510 : H_fptosi_f64_i64,
2561 Dest, MaxSrcs); 2511 Dest, MaxSrcs);
2562 Call->addArg(Inst->getSrc(0)); 2512 Call->addArg(Inst->getSrc(0));
2563 lowerCall(Call); 2513 lowerCall(Call);
2564 } else { 2514 } else {
2565 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem); 2515 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem);
2566 // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type 2516 // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type
2567 Variable *T_1 = makeReg(IceType_i32); 2517 Variable *T_1 = makeReg(IceType_i32);
2568 Variable *T_2 = makeReg(Dest->getType()); 2518 Variable *T_2 = makeReg(Dest->getType());
2569 _cvt(T_1, Src0RM, InstX8632Cvt::Tss2si); 2519 _cvt(T_1, Src0RM, Traits::Insts::Cvt::Tss2si);
2570 _mov(T_2, T_1); // T_1 and T_2 may have different integer types 2520 _mov(T_2, T_1); // T_1 and T_2 may have different integer types
2571 if (Dest->getType() == IceType_i1) 2521 if (Dest->getType() == IceType_i1)
2572 _and(T_2, Ctx->getConstantInt1(1)); 2522 _and(T_2, Ctx->getConstantInt1(1));
2573 _mov(Dest, T_2); 2523 _mov(Dest, T_2);
2574 } 2524 }
2575 break; 2525 break;
2576 case InstCast::Fptoui: 2526 case InstCast::Fptoui:
2577 if (isVectorType(Dest->getType())) { 2527 if (isVectorType(Dest->getType())) {
2578 assert(Dest->getType() == IceType_v4i32 && 2528 assert(Dest->getType() == IceType_v4i32 &&
2579 Inst->getSrc(0)->getType() == IceType_v4f32); 2529 Inst->getSrc(0)->getType() == IceType_v4f32);
(...skipping 18 matching lines...) Expand all
2598 } 2548 }
2599 InstCall *Call = makeHelperCall(TargetString, Dest, MaxSrcs); 2549 InstCall *Call = makeHelperCall(TargetString, Dest, MaxSrcs);
2600 Call->addArg(Inst->getSrc(0)); 2550 Call->addArg(Inst->getSrc(0));
2601 lowerCall(Call); 2551 lowerCall(Call);
2602 return; 2552 return;
2603 } else { 2553 } else {
2604 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem); 2554 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem);
2605 // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type 2555 // t1.i32 = cvt Src0RM; t2.dest_type = t1; Dest = t2.dest_type
2606 Variable *T_1 = makeReg(IceType_i32); 2556 Variable *T_1 = makeReg(IceType_i32);
2607 Variable *T_2 = makeReg(Dest->getType()); 2557 Variable *T_2 = makeReg(Dest->getType());
2608 _cvt(T_1, Src0RM, InstX8632Cvt::Tss2si); 2558 _cvt(T_1, Src0RM, Traits::Insts::Cvt::Tss2si);
2609 _mov(T_2, T_1); // T_1 and T_2 may have different integer types 2559 _mov(T_2, T_1); // T_1 and T_2 may have different integer types
2610 if (Dest->getType() == IceType_i1) 2560 if (Dest->getType() == IceType_i1)
2611 _and(T_2, Ctx->getConstantInt1(1)); 2561 _and(T_2, Ctx->getConstantInt1(1));
2612 _mov(Dest, T_2); 2562 _mov(Dest, T_2);
2613 } 2563 }
2614 break; 2564 break;
2615 case InstCast::Sitofp: 2565 case InstCast::Sitofp:
2616 if (isVectorType(Dest->getType())) { 2566 if (isVectorType(Dest->getType())) {
2617 assert(Dest->getType() == IceType_v4f32 && 2567 assert(Dest->getType() == IceType_v4f32 &&
2618 Inst->getSrc(0)->getType() == IceType_v4i32); 2568 Inst->getSrc(0)->getType() == IceType_v4i32);
2619 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem); 2569 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem);
2620 if (llvm::isa<OperandX8632Mem>(Src0RM)) 2570 if (llvm::isa<typename Traits::X86OperandMem>(Src0RM))
2621 Src0RM = legalizeToVar(Src0RM); 2571 Src0RM = legalizeToVar(Src0RM);
2622 Variable *T = makeReg(Dest->getType()); 2572 Variable *T = makeReg(Dest->getType());
2623 _cvt(T, Src0RM, InstX8632Cvt::Dq2ps); 2573 _cvt(T, Src0RM, Traits::Insts::Cvt::Dq2ps);
2624 _movp(Dest, T); 2574 _movp(Dest, T);
2625 } else if (Inst->getSrc(0)->getType() == IceType_i64) { 2575 } else if (Inst->getSrc(0)->getType() == IceType_i64) {
2626 // Use a helper for x86-32. 2576 // Use a helper for x86-32.
2627 const SizeT MaxSrcs = 1; 2577 const SizeT MaxSrcs = 1;
2628 Type DestType = Dest->getType(); 2578 Type DestType = Dest->getType();
2629 InstCall *Call = 2579 InstCall *Call =
2630 makeHelperCall(isFloat32Asserting32Or64(DestType) ? H_sitofp_i64_f32 2580 makeHelperCall(isFloat32Asserting32Or64(DestType) ? H_sitofp_i64_f32
2631 : H_sitofp_i64_f64, 2581 : H_sitofp_i64_f64,
2632 Dest, MaxSrcs); 2582 Dest, MaxSrcs);
2633 // TODO: Call the correct compiler-rt helper function. 2583 // TODO: Call the correct compiler-rt helper function.
2634 Call->addArg(Inst->getSrc(0)); 2584 Call->addArg(Inst->getSrc(0));
2635 lowerCall(Call); 2585 lowerCall(Call);
2636 return; 2586 return;
2637 } else { 2587 } else {
2638 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem); 2588 Operand *Src0RM = legalize(Inst->getSrc(0), Legal_Reg | Legal_Mem);
2639 // Sign-extend the operand. 2589 // Sign-extend the operand.
2640 // t1.i32 = movsx Src0RM; t2 = Cvt t1.i32; Dest = t2 2590 // t1.i32 = movsx Src0RM; t2 = Cvt t1.i32; Dest = t2
2641 Variable *T_1 = makeReg(IceType_i32); 2591 Variable *T_1 = makeReg(IceType_i32);
2642 Variable *T_2 = makeReg(Dest->getType()); 2592 Variable *T_2 = makeReg(Dest->getType());
2643 if (Src0RM->getType() == IceType_i32) 2593 if (Src0RM->getType() == IceType_i32)
2644 _mov(T_1, Src0RM); 2594 _mov(T_1, Src0RM);
2645 else 2595 else
2646 _movsx(T_1, Src0RM); 2596 _movsx(T_1, Src0RM);
2647 _cvt(T_2, T_1, InstX8632Cvt::Si2ss); 2597 _cvt(T_2, T_1, Traits::Insts::Cvt::Si2ss);
2648 _mov(Dest, T_2); 2598 _mov(Dest, T_2);
2649 } 2599 }
2650 break; 2600 break;
2651 case InstCast::Uitofp: { 2601 case InstCast::Uitofp: {
2652 Operand *Src0 = Inst->getSrc(0); 2602 Operand *Src0 = Inst->getSrc(0);
2653 if (isVectorType(Src0->getType())) { 2603 if (isVectorType(Src0->getType())) {
2654 assert(Dest->getType() == IceType_v4f32 && 2604 assert(Dest->getType() == IceType_v4f32 &&
2655 Src0->getType() == IceType_v4i32); 2605 Src0->getType() == IceType_v4i32);
2656 const SizeT MaxSrcs = 1; 2606 const SizeT MaxSrcs = 1;
2657 InstCall *Call = makeHelperCall(H_uitofp_4xi32_4xf32, Dest, MaxSrcs); 2607 InstCall *Call = makeHelperCall(H_uitofp_4xi32_4xf32, Dest, MaxSrcs);
(...skipping 20 matching lines...) Expand all
2678 } else { 2628 } else {
2679 Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem); 2629 Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
2680 // Zero-extend the operand. 2630 // Zero-extend the operand.
2681 // t1.i32 = movzx Src0RM; t2 = Cvt t1.i32; Dest = t2 2631 // t1.i32 = movzx Src0RM; t2 = Cvt t1.i32; Dest = t2
2682 Variable *T_1 = makeReg(IceType_i32); 2632 Variable *T_1 = makeReg(IceType_i32);
2683 Variable *T_2 = makeReg(Dest->getType()); 2633 Variable *T_2 = makeReg(Dest->getType());
2684 if (Src0RM->getType() == IceType_i32) 2634 if (Src0RM->getType() == IceType_i32)
2685 _mov(T_1, Src0RM); 2635 _mov(T_1, Src0RM);
2686 else 2636 else
2687 _movzx(T_1, Src0RM); 2637 _movzx(T_1, Src0RM);
2688 _cvt(T_2, T_1, InstX8632Cvt::Si2ss); 2638 _cvt(T_2, T_1, Traits::Insts::Cvt::Si2ss);
2689 _mov(Dest, T_2); 2639 _mov(Dest, T_2);
2690 } 2640 }
2691 break; 2641 break;
2692 } 2642 }
2693 case InstCast::Bitcast: { 2643 case InstCast::Bitcast: {
2694 Operand *Src0 = Inst->getSrc(0); 2644 Operand *Src0 = Inst->getSrc(0);
2695 if (Dest->getType() == Src0->getType()) { 2645 if (Dest->getType() == Src0->getType()) {
2696 InstAssign *Assign = InstAssign::create(Func, Dest, Src0); 2646 InstAssign *Assign = InstAssign::create(Func, Dest, Src0);
2697 lowerAssign(Assign); 2647 lowerAssign(Assign);
2698 return; 2648 return;
(...skipping 21 matching lines...) Expand all
2720 (void)DestType; 2670 (void)DestType;
2721 assert((DestType == IceType_i32 && SrcType == IceType_f32) || 2671 assert((DestType == IceType_i32 && SrcType == IceType_f32) ||
2722 (DestType == IceType_f32 && SrcType == IceType_i32)); 2672 (DestType == IceType_f32 && SrcType == IceType_i32));
2723 // a.i32 = bitcast b.f32 ==> 2673 // a.i32 = bitcast b.f32 ==>
2724 // t.f32 = b.f32 2674 // t.f32 = b.f32
2725 // s.f32 = spill t.f32 2675 // s.f32 = spill t.f32
2726 // a.i32 = s.f32 2676 // a.i32 = s.f32
2727 Variable *T = nullptr; 2677 Variable *T = nullptr;
2728 // TODO: Should be able to force a spill setup by calling legalize() with 2678 // TODO: Should be able to force a spill setup by calling legalize() with
2729 // Legal_Mem and not Legal_Reg or Legal_Imm. 2679 // Legal_Mem and not Legal_Reg or Legal_Imm.
2730 SpillVariable *SpillVar = 2680 typename Traits::SpillVariable *SpillVar =
2731 Func->template makeVariable<SpillVariable>(SrcType); 2681 Func->template makeVariable<typename Traits::SpillVariable>(SrcType);
2732 SpillVar->setLinkedTo(Dest); 2682 SpillVar->setLinkedTo(Dest);
2733 Variable *Spill = SpillVar; 2683 Variable *Spill = SpillVar;
2734 Spill->setWeight(RegWeight::Zero); 2684 Spill->setWeight(RegWeight::Zero);
2735 _mov(T, Src0RM); 2685 _mov(T, Src0RM);
2736 _mov(Spill, T); 2686 _mov(Spill, T);
2737 _mov(Dest, Spill); 2687 _mov(Dest, Spill);
2738 } break; 2688 } break;
2739 case IceType_i64: { 2689 case IceType_i64: {
2740 Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem); 2690 Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
2741 assert(Src0RM->getType() == IceType_f64); 2691 assert(Src0RM->getType() == IceType_f64);
2742 // a.i64 = bitcast b.f64 ==> 2692 // a.i64 = bitcast b.f64 ==>
2743 // s.f64 = spill b.f64 2693 // s.f64 = spill b.f64
2744 // t_lo.i32 = lo(s.f64) 2694 // t_lo.i32 = lo(s.f64)
2745 // a_lo.i32 = t_lo.i32 2695 // a_lo.i32 = t_lo.i32
2746 // t_hi.i32 = hi(s.f64) 2696 // t_hi.i32 = hi(s.f64)
2747 // a_hi.i32 = t_hi.i32 2697 // a_hi.i32 = t_hi.i32
2748 Operand *SpillLo, *SpillHi; 2698 Operand *SpillLo, *SpillHi;
2749 if (auto *Src0Var = llvm::dyn_cast<Variable>(Src0RM)) { 2699 if (auto *Src0Var = llvm::dyn_cast<Variable>(Src0RM)) {
2750 SpillVariable *SpillVar = 2700 typename Traits::SpillVariable *SpillVar =
2751 Func->template makeVariable<SpillVariable>(IceType_f64); 2701 Func->template makeVariable<typename Traits::SpillVariable>(
2702 IceType_f64);
2752 SpillVar->setLinkedTo(Src0Var); 2703 SpillVar->setLinkedTo(Src0Var);
2753 Variable *Spill = SpillVar; 2704 Variable *Spill = SpillVar;
2754 Spill->setWeight(RegWeight::Zero); 2705 Spill->setWeight(RegWeight::Zero);
2755 _movq(Spill, Src0RM); 2706 _movq(Spill, Src0RM);
2756 SpillLo = VariableSplit::create(Func, Spill, VariableSplit::Low); 2707 SpillLo = Traits::VariableSplit::create(Func, Spill,
2757 SpillHi = VariableSplit::create(Func, Spill, VariableSplit::High); 2708 Traits::VariableSplit::Low);
2709 SpillHi = Traits::VariableSplit::create(Func, Spill,
2710 Traits::VariableSplit::High);
2758 } else { 2711 } else {
2759 SpillLo = loOperand(Src0RM); 2712 SpillLo = loOperand(Src0RM);
2760 SpillHi = hiOperand(Src0RM); 2713 SpillHi = hiOperand(Src0RM);
2761 } 2714 }
2762 2715
2763 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); 2716 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
2764 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); 2717 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest));
2765 Variable *T_Lo = makeReg(IceType_i32); 2718 Variable *T_Lo = makeReg(IceType_i32);
2766 Variable *T_Hi = makeReg(IceType_i32); 2719 Variable *T_Hi = makeReg(IceType_i32);
2767 2720
2768 _mov(T_Lo, SpillLo); 2721 _mov(T_Lo, SpillLo);
2769 _mov(DestLo, T_Lo); 2722 _mov(DestLo, T_Lo);
2770 _mov(T_Hi, SpillHi); 2723 _mov(T_Hi, SpillHi);
2771 _mov(DestHi, T_Hi); 2724 _mov(DestHi, T_Hi);
2772 } break; 2725 } break;
2773 case IceType_f64: { 2726 case IceType_f64: {
2774 Src0 = legalize(Src0); 2727 Src0 = legalize(Src0);
2775 assert(Src0->getType() == IceType_i64); 2728 assert(Src0->getType() == IceType_i64);
2776 if (llvm::isa<OperandX8632Mem>(Src0)) { 2729 if (llvm::isa<typename Traits::X86OperandMem>(Src0)) {
2777 Variable *T = Func->template makeVariable(Dest->getType()); 2730 Variable *T = Func->template makeVariable(Dest->getType());
2778 _movq(T, Src0); 2731 _movq(T, Src0);
2779 _movq(Dest, T); 2732 _movq(Dest, T);
2780 break; 2733 break;
2781 } 2734 }
2782 // a.f64 = bitcast b.i64 ==> 2735 // a.f64 = bitcast b.i64 ==>
2783 // t_lo.i32 = b_lo.i32 2736 // t_lo.i32 = b_lo.i32
2784 // FakeDef(s.f64) 2737 // FakeDef(s.f64)
2785 // lo(s.f64) = t_lo.i32 2738 // lo(s.f64) = t_lo.i32
2786 // t_hi.i32 = b_hi.i32 2739 // t_hi.i32 = b_hi.i32
2787 // hi(s.f64) = t_hi.i32 2740 // hi(s.f64) = t_hi.i32
2788 // a.f64 = s.f64 2741 // a.f64 = s.f64
2789 SpillVariable *SpillVar = 2742 typename Traits::SpillVariable *SpillVar =
2790 Func->template makeVariable<SpillVariable>(IceType_f64); 2743 Func->template makeVariable<typename Traits::SpillVariable>(
2744 IceType_f64);
2791 SpillVar->setLinkedTo(Dest); 2745 SpillVar->setLinkedTo(Dest);
2792 Variable *Spill = SpillVar; 2746 Variable *Spill = SpillVar;
2793 Spill->setWeight(RegWeight::Zero); 2747 Spill->setWeight(RegWeight::Zero);
2794 2748
2795 Variable *T_Lo = nullptr, *T_Hi = nullptr; 2749 Variable *T_Lo = nullptr, *T_Hi = nullptr;
2796 VariableSplit *SpillLo = 2750 typename Traits::VariableSplit *SpillLo = Traits::VariableSplit::create(
2797 VariableSplit::create(Func, Spill, VariableSplit::Low); 2751 Func, Spill, Traits::VariableSplit::Low);
2798 VariableSplit *SpillHi = 2752 typename Traits::VariableSplit *SpillHi = Traits::VariableSplit::create(
2799 VariableSplit::create(Func, Spill, VariableSplit::High); 2753 Func, Spill, Traits::VariableSplit::High);
2800 _mov(T_Lo, loOperand(Src0)); 2754 _mov(T_Lo, loOperand(Src0));
2801 // Technically, the Spill is defined after the _store happens, but 2755 // Technically, the Spill is defined after the _store happens, but
2802 // SpillLo is considered a "use" of Spill so define Spill before it 2756 // SpillLo is considered a "use" of Spill so define Spill before it
2803 // is used. 2757 // is used.
2804 Context.insert(InstFakeDef::create(Func, Spill)); 2758 Context.insert(InstFakeDef::create(Func, Spill));
2805 _store(T_Lo, SpillLo); 2759 _store(T_Lo, SpillLo);
2806 _mov(T_Hi, hiOperand(Src0)); 2760 _mov(T_Hi, hiOperand(Src0));
2807 _store(T_Hi, SpillHi); 2761 _store(T_Hi, SpillHi);
2808 _movq(Dest, Spill); 2762 _movq(Dest, Spill);
2809 } break; 2763 } break;
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
2889 // Spill the value to a stack slot and do the extraction in memory. 2843 // Spill the value to a stack slot and do the extraction in memory.
2890 // 2844 //
2891 // TODO(wala): use legalize(SourceVectNotLegalized, Legal_Mem) when 2845 // TODO(wala): use legalize(SourceVectNotLegalized, Legal_Mem) when
2892 // support for legalizing to mem is implemented. 2846 // support for legalizing to mem is implemented.
2893 Variable *Slot = Func->template makeVariable(Ty); 2847 Variable *Slot = Func->template makeVariable(Ty);
2894 Slot->setWeight(RegWeight::Zero); 2848 Slot->setWeight(RegWeight::Zero);
2895 _movp(Slot, legalizeToVar(SourceVectNotLegalized)); 2849 _movp(Slot, legalizeToVar(SourceVectNotLegalized));
2896 2850
2897 // Compute the location of the element in memory. 2851 // Compute the location of the element in memory.
2898 unsigned Offset = Index * typeWidthInBytes(InVectorElementTy); 2852 unsigned Offset = Index * typeWidthInBytes(InVectorElementTy);
2899 OperandX8632Mem *Loc = 2853 typename Traits::X86OperandMem *Loc =
2900 getMemoryOperandForStackSlot(InVectorElementTy, Slot, Offset); 2854 getMemoryOperandForStackSlot(InVectorElementTy, Slot, Offset);
2901 _mov(ExtractedElementR, Loc); 2855 _mov(ExtractedElementR, Loc);
2902 } 2856 }
2903 2857
2904 if (ElementTy == IceType_i1) { 2858 if (ElementTy == IceType_i1) {
2905 // Truncate extracted integers to i1s if necessary. 2859 // Truncate extracted integers to i1s if necessary.
2906 Variable *T = makeReg(IceType_i1); 2860 Variable *T = makeReg(IceType_i1);
2907 InstCast *Cast = 2861 InstCast *Cast =
2908 InstCast::create(Func, InstCast::Trunc, T, ExtractedElementR); 2862 InstCast::create(Func, InstCast::Trunc, T, ExtractedElementR);
2909 lowerCast(Cast); 2863 lowerCast(Cast);
(...skipping 25 matching lines...) Expand all
2935 Variable *T = nullptr; 2889 Variable *T = nullptr;
2936 2890
2937 if (Condition == InstFcmp::True) { 2891 if (Condition == InstFcmp::True) {
2938 // makeVectorOfOnes() requires an integer vector type. 2892 // makeVectorOfOnes() requires an integer vector type.
2939 T = makeVectorOfMinusOnes(IceType_v4i32); 2893 T = makeVectorOfMinusOnes(IceType_v4i32);
2940 } else if (Condition == InstFcmp::False) { 2894 } else if (Condition == InstFcmp::False) {
2941 T = makeVectorOfZeros(Dest->getType()); 2895 T = makeVectorOfZeros(Dest->getType());
2942 } else { 2896 } else {
2943 Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem); 2897 Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
2944 Operand *Src1RM = legalize(Src1, Legal_Reg | Legal_Mem); 2898 Operand *Src1RM = legalize(Src1, Legal_Reg | Legal_Mem);
2945 if (llvm::isa<OperandX8632Mem>(Src1RM)) 2899 if (llvm::isa<typename Traits::X86OperandMem>(Src1RM))
2946 Src1RM = legalizeToVar(Src1RM); 2900 Src1RM = legalizeToVar(Src1RM);
2947 2901
2948 switch (Condition) { 2902 switch (Condition) {
2949 default: { 2903 default: {
2950 typename Traits::Cond::CmppsCond Predicate = 2904 typename Traits::Cond::CmppsCond Predicate =
2951 Traits::TableFcmp[Index].Predicate; 2905 Traits::TableFcmp[Index].Predicate;
2952 assert(Predicate != Traits::Cond::Cmpps_Invalid); 2906 assert(Predicate != Traits::Cond::Cmpps_Invalid);
2953 T = makeReg(Src0RM->getType()); 2907 T = makeReg(Src0RM->getType());
2954 _movp(T, Src0RM); 2908 _movp(T, Src0RM);
2955 _cmpps(T, Src1RM, Predicate); 2909 _cmpps(T, Src1RM, Predicate);
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
3010 _ucomiss(T, Src1RM); 2964 _ucomiss(T, Src1RM);
3011 if (!HasC2) { 2965 if (!HasC2) {
3012 assert(Traits::TableFcmp[Index].Default); 2966 assert(Traits::TableFcmp[Index].Default);
3013 _setcc(Dest, Traits::TableFcmp[Index].C1); 2967 _setcc(Dest, Traits::TableFcmp[Index].C1);
3014 return; 2968 return;
3015 } 2969 }
3016 } 2970 }
3017 Constant *Default = Ctx->getConstantInt32(Traits::TableFcmp[Index].Default); 2971 Constant *Default = Ctx->getConstantInt32(Traits::TableFcmp[Index].Default);
3018 _mov(Dest, Default); 2972 _mov(Dest, Default);
3019 if (HasC1) { 2973 if (HasC1) {
3020 InstX8632Label *Label = InstX8632Label::create(Func, this); 2974 typename Traits::Insts::Label *Label =
2975 Traits::Insts::Label::create(Func, this);
3021 _br(Traits::TableFcmp[Index].C1, Label); 2976 _br(Traits::TableFcmp[Index].C1, Label);
3022 if (HasC2) { 2977 if (HasC2) {
3023 _br(Traits::TableFcmp[Index].C2, Label); 2978 _br(Traits::TableFcmp[Index].C2, Label);
3024 } 2979 }
3025 Constant *NonDefault = 2980 Constant *NonDefault =
3026 Ctx->getConstantInt32(!Traits::TableFcmp[Index].Default); 2981 Ctx->getConstantInt32(!Traits::TableFcmp[Index].Default);
3027 _mov_nonkillable(Dest, NonDefault); 2982 _mov_nonkillable(Dest, NonDefault);
3028 Context.insert(Label); 2983 Context.insert(Label);
3029 } 2984 }
3030 } 2985 }
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
3083 Src0RM = T0; 3038 Src0RM = T0;
3084 Src1RM = T1; 3039 Src1RM = T1;
3085 } 3040 }
3086 3041
3087 Variable *T = makeReg(Ty); 3042 Variable *T = makeReg(Ty);
3088 switch (Condition) { 3043 switch (Condition) {
3089 default: 3044 default:
3090 llvm_unreachable("unexpected condition"); 3045 llvm_unreachable("unexpected condition");
3091 break; 3046 break;
3092 case InstIcmp::Eq: { 3047 case InstIcmp::Eq: {
3093 if (llvm::isa<OperandX8632Mem>(Src1RM)) 3048 if (llvm::isa<typename Traits::X86OperandMem>(Src1RM))
3094 Src1RM = legalizeToVar(Src1RM); 3049 Src1RM = legalizeToVar(Src1RM);
3095 _movp(T, Src0RM); 3050 _movp(T, Src0RM);
3096 _pcmpeq(T, Src1RM); 3051 _pcmpeq(T, Src1RM);
3097 } break; 3052 } break;
3098 case InstIcmp::Ne: { 3053 case InstIcmp::Ne: {
3099 if (llvm::isa<OperandX8632Mem>(Src1RM)) 3054 if (llvm::isa<typename Traits::X86OperandMem>(Src1RM))
3100 Src1RM = legalizeToVar(Src1RM); 3055 Src1RM = legalizeToVar(Src1RM);
3101 _movp(T, Src0RM); 3056 _movp(T, Src0RM);
3102 _pcmpeq(T, Src1RM); 3057 _pcmpeq(T, Src1RM);
3103 Variable *MinusOne = makeVectorOfMinusOnes(Ty); 3058 Variable *MinusOne = makeVectorOfMinusOnes(Ty);
3104 _pxor(T, MinusOne); 3059 _pxor(T, MinusOne);
3105 } break; 3060 } break;
3106 case InstIcmp::Ugt: 3061 case InstIcmp::Ugt:
3107 case InstIcmp::Sgt: { 3062 case InstIcmp::Sgt: {
3108 if (llvm::isa<OperandX8632Mem>(Src1RM)) 3063 if (llvm::isa<typename Traits::X86OperandMem>(Src1RM))
3109 Src1RM = legalizeToVar(Src1RM); 3064 Src1RM = legalizeToVar(Src1RM);
3110 _movp(T, Src0RM); 3065 _movp(T, Src0RM);
3111 _pcmpgt(T, Src1RM); 3066 _pcmpgt(T, Src1RM);
3112 } break; 3067 } break;
3113 case InstIcmp::Uge: 3068 case InstIcmp::Uge:
3114 case InstIcmp::Sge: { 3069 case InstIcmp::Sge: {
3115 // !(Src1RM > Src0RM) 3070 // !(Src1RM > Src0RM)
3116 if (llvm::isa<OperandX8632Mem>(Src0RM)) 3071 if (llvm::isa<typename Traits::X86OperandMem>(Src0RM))
3117 Src0RM = legalizeToVar(Src0RM); 3072 Src0RM = legalizeToVar(Src0RM);
3118 _movp(T, Src1RM); 3073 _movp(T, Src1RM);
3119 _pcmpgt(T, Src0RM); 3074 _pcmpgt(T, Src0RM);
3120 Variable *MinusOne = makeVectorOfMinusOnes(Ty); 3075 Variable *MinusOne = makeVectorOfMinusOnes(Ty);
3121 _pxor(T, MinusOne); 3076 _pxor(T, MinusOne);
3122 } break; 3077 } break;
3123 case InstIcmp::Ult: 3078 case InstIcmp::Ult:
3124 case InstIcmp::Slt: { 3079 case InstIcmp::Slt: {
3125 if (llvm::isa<OperandX8632Mem>(Src0RM)) 3080 if (llvm::isa<typename Traits::X86OperandMem>(Src0RM))
3126 Src0RM = legalizeToVar(Src0RM); 3081 Src0RM = legalizeToVar(Src0RM);
3127 _movp(T, Src1RM); 3082 _movp(T, Src1RM);
3128 _pcmpgt(T, Src0RM); 3083 _pcmpgt(T, Src0RM);
3129 } break; 3084 } break;
3130 case InstIcmp::Ule: 3085 case InstIcmp::Ule:
3131 case InstIcmp::Sle: { 3086 case InstIcmp::Sle: {
3132 // !(Src0RM > Src1RM) 3087 // !(Src0RM > Src1RM)
3133 if (llvm::isa<OperandX8632Mem>(Src1RM)) 3088 if (llvm::isa<typename Traits::X86OperandMem>(Src1RM))
3134 Src1RM = legalizeToVar(Src1RM); 3089 Src1RM = legalizeToVar(Src1RM);
3135 _movp(T, Src0RM); 3090 _movp(T, Src0RM);
3136 _pcmpgt(T, Src1RM); 3091 _pcmpgt(T, Src1RM);
3137 Variable *MinusOne = makeVectorOfMinusOnes(Ty); 3092 Variable *MinusOne = makeVectorOfMinusOnes(Ty);
3138 _pxor(T, MinusOne); 3093 _pxor(T, MinusOne);
3139 } break; 3094 } break;
3140 } 3095 }
3141 3096
3142 _movp(Dest, T); 3097 _movp(Dest, T);
3143 eliminateNextVectorSextInstruction(Dest); 3098 eliminateNextVectorSextInstruction(Dest);
3144 return; 3099 return;
3145 } 3100 }
3146 3101
3147 // a=icmp cond, b, c ==> cmp b,c; a=1; br cond,L1; FakeUse(a); a=0; L1: 3102 // a=icmp cond, b, c ==> cmp b,c; a=1; br cond,L1; FakeUse(a); a=0; L1:
3148 if (Src0->getType() == IceType_i64) { 3103 if (Src0->getType() == IceType_i64) {
3149 InstIcmp::ICond Condition = Inst->getCondition(); 3104 InstIcmp::ICond Condition = Inst->getCondition();
3150 size_t Index = static_cast<size_t>(Condition); 3105 size_t Index = static_cast<size_t>(Condition);
3151 assert(Index < Traits::TableIcmp64Size); 3106 assert(Index < Traits::TableIcmp64Size);
3152 Operand *Src0LoRM = legalize(loOperand(Src0), Legal_Reg | Legal_Mem); 3107 Operand *Src0LoRM = legalize(loOperand(Src0), Legal_Reg | Legal_Mem);
3153 Operand *Src0HiRM = legalize(hiOperand(Src0), Legal_Reg | Legal_Mem); 3108 Operand *Src0HiRM = legalize(hiOperand(Src0), Legal_Reg | Legal_Mem);
3154 Operand *Src1LoRI = legalize(loOperand(Src1), Legal_Reg | Legal_Imm); 3109 Operand *Src1LoRI = legalize(loOperand(Src1), Legal_Reg | Legal_Imm);
3155 Operand *Src1HiRI = legalize(hiOperand(Src1), Legal_Reg | Legal_Imm); 3110 Operand *Src1HiRI = legalize(hiOperand(Src1), Legal_Reg | Legal_Imm);
3156 Constant *Zero = Ctx->getConstantZero(IceType_i32); 3111 Constant *Zero = Ctx->getConstantZero(IceType_i32);
3157 Constant *One = Ctx->getConstantInt32(1); 3112 Constant *One = Ctx->getConstantInt32(1);
3158 InstX8632Label *LabelFalse = InstX8632Label::create(Func, this); 3113 typename Traits::Insts::Label *LabelFalse =
3159 InstX8632Label *LabelTrue = InstX8632Label::create(Func, this); 3114 Traits::Insts::Label::create(Func, this);
3115 typename Traits::Insts::Label *LabelTrue =
3116 Traits::Insts::Label::create(Func, this);
3160 _mov(Dest, One); 3117 _mov(Dest, One);
3161 _cmp(Src0HiRM, Src1HiRI); 3118 _cmp(Src0HiRM, Src1HiRI);
3162 if (Traits::TableIcmp64[Index].C1 != Traits::Cond::Br_None) 3119 if (Traits::TableIcmp64[Index].C1 != Traits::Cond::Br_None)
3163 _br(Traits::TableIcmp64[Index].C1, LabelTrue); 3120 _br(Traits::TableIcmp64[Index].C1, LabelTrue);
3164 if (Traits::TableIcmp64[Index].C2 != Traits::Cond::Br_None) 3121 if (Traits::TableIcmp64[Index].C2 != Traits::Cond::Br_None)
3165 _br(Traits::TableIcmp64[Index].C2, LabelFalse); 3122 _br(Traits::TableIcmp64[Index].C2, LabelFalse);
3166 _cmp(Src0LoRM, Src1LoRI); 3123 _cmp(Src0LoRM, Src1LoRI);
3167 _br(Traits::TableIcmp64[Index].C3, LabelTrue); 3124 _br(Traits::TableIcmp64[Index].C3, LabelTrue);
3168 Context.insert(LabelFalse); 3125 Context.insert(LabelFalse);
3169 _mov_nonkillable(Dest, Zero); 3126 _mov_nonkillable(Dest, Zero);
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
3285 // memory. 3242 // memory.
3286 // 3243 //
3287 // TODO(wala): use legalize(SourceVectNotLegalized, Legal_Mem) when 3244 // TODO(wala): use legalize(SourceVectNotLegalized, Legal_Mem) when
3288 // support for legalizing to mem is implemented. 3245 // support for legalizing to mem is implemented.
3289 Variable *Slot = Func->template makeVariable(Ty); 3246 Variable *Slot = Func->template makeVariable(Ty);
3290 Slot->setWeight(RegWeight::Zero); 3247 Slot->setWeight(RegWeight::Zero);
3291 _movp(Slot, legalizeToVar(SourceVectNotLegalized)); 3248 _movp(Slot, legalizeToVar(SourceVectNotLegalized));
3292 3249
3293 // Compute the location of the position to insert in memory. 3250 // Compute the location of the position to insert in memory.
3294 unsigned Offset = Index * typeWidthInBytes(InVectorElementTy); 3251 unsigned Offset = Index * typeWidthInBytes(InVectorElementTy);
3295 OperandX8632Mem *Loc = 3252 typename Traits::X86OperandMem *Loc =
3296 getMemoryOperandForStackSlot(InVectorElementTy, Slot, Offset); 3253 getMemoryOperandForStackSlot(InVectorElementTy, Slot, Offset);
3297 _store(legalizeToVar(ElementToInsertNotLegalized), Loc); 3254 _store(legalizeToVar(ElementToInsertNotLegalized), Loc);
3298 3255
3299 Variable *T = makeReg(Ty); 3256 Variable *T = makeReg(Ty);
3300 _movp(T, Slot); 3257 _movp(T, Slot);
3301 _movp(Inst->getDest(), T); 3258 _movp(Inst->getDest(), T);
3302 } 3259 }
3303 } 3260 }
3304 3261
3305 template <class Machine> 3262 template <class Machine>
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
3375 return; 3332 return;
3376 } 3333 }
3377 Variable *Dest = Instr->getDest(); 3334 Variable *Dest = Instr->getDest();
3378 if (Dest->getType() == IceType_i64) { 3335 if (Dest->getType() == IceType_i64) {
3379 // Follow what GCC does and use a movq instead of what lowerLoad() 3336 // Follow what GCC does and use a movq instead of what lowerLoad()
3380 // normally does (split the load into two). 3337 // normally does (split the load into two).
3381 // Thus, this skips load/arithmetic op folding. Load/arithmetic folding 3338 // Thus, this skips load/arithmetic op folding. Load/arithmetic folding
3382 // can't happen anyway, since this is x86-32 and integer arithmetic only 3339 // can't happen anyway, since this is x86-32 and integer arithmetic only
3383 // happens on 32-bit quantities. 3340 // happens on 32-bit quantities.
3384 Variable *T = makeReg(IceType_f64); 3341 Variable *T = makeReg(IceType_f64);
3385 OperandX8632Mem *Addr = formMemoryOperand(Instr->getArg(0), IceType_f64); 3342 typename Traits::X86OperandMem *Addr =
3343 formMemoryOperand(Instr->getArg(0), IceType_f64);
3386 _movq(T, Addr); 3344 _movq(T, Addr);
3387 // Then cast the bits back out of the XMM register to the i64 Dest. 3345 // Then cast the bits back out of the XMM register to the i64 Dest.
3388 InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, Dest, T); 3346 InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, Dest, T);
3389 lowerCast(Cast); 3347 lowerCast(Cast);
3390 // Make sure that the atomic load isn't elided when unused. 3348 // Make sure that the atomic load isn't elided when unused.
3391 Context.insert(InstFakeUse::create(Func, Dest->getLo())); 3349 Context.insert(InstFakeUse::create(Func, Dest->getLo()));
3392 Context.insert(InstFakeUse::create(Func, Dest->getHi())); 3350 Context.insert(InstFakeUse::create(Func, Dest->getHi()));
3393 return; 3351 return;
3394 } 3352 }
3395 InstLoad *Load = InstLoad::create(Func, Dest, Instr->getArg(0)); 3353 InstLoad *Load = InstLoad::create(Func, Dest, Instr->getArg(0));
(...skipping 29 matching lines...) Expand all
3425 Operand *Value = Instr->getArg(0); 3383 Operand *Value = Instr->getArg(0);
3426 Operand *Ptr = Instr->getArg(1); 3384 Operand *Ptr = Instr->getArg(1);
3427 if (Value->getType() == IceType_i64) { 3385 if (Value->getType() == IceType_i64) {
3428 // Use a movq instead of what lowerStore() normally does 3386 // Use a movq instead of what lowerStore() normally does
3429 // (split the store into two), following what GCC does. 3387 // (split the store into two), following what GCC does.
3430 // Cast the bits from int -> to an xmm register first. 3388 // Cast the bits from int -> to an xmm register first.
3431 Variable *T = makeReg(IceType_f64); 3389 Variable *T = makeReg(IceType_f64);
3432 InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, T, Value); 3390 InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, T, Value);
3433 lowerCast(Cast); 3391 lowerCast(Cast);
3434 // Then store XMM w/ a movq. 3392 // Then store XMM w/ a movq.
3435 OperandX8632Mem *Addr = formMemoryOperand(Ptr, IceType_f64); 3393 typename Traits::X86OperandMem *Addr =
3394 formMemoryOperand(Ptr, IceType_f64);
3436 _storeq(T, Addr); 3395 _storeq(T, Addr);
3437 _mfence(); 3396 _mfence();
3438 return; 3397 return;
3439 } 3398 }
3440 InstStore *Store = InstStore::create(Func, Value, Ptr); 3399 InstStore *Store = InstStore::create(Func, Value, Ptr);
3441 lowerStore(Store); 3400 lowerStore(Store);
3442 _mfence(); 3401 _mfence();
3443 return; 3402 return;
3444 } 3403 }
3445 case Intrinsics::Bswap: { 3404 case Intrinsics::Bswap: {
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
3527 return; 3486 return;
3528 } 3487 }
3529 case Intrinsics::Fabs: { 3488 case Intrinsics::Fabs: {
3530 Operand *Src = legalize(Instr->getArg(0)); 3489 Operand *Src = legalize(Instr->getArg(0));
3531 Type Ty = Src->getType(); 3490 Type Ty = Src->getType();
3532 Variable *Dest = Instr->getDest(); 3491 Variable *Dest = Instr->getDest();
3533 Variable *T = makeVectorOfFabsMask(Ty); 3492 Variable *T = makeVectorOfFabsMask(Ty);
3534 // The pand instruction operates on an m128 memory operand, so if 3493 // The pand instruction operates on an m128 memory operand, so if
3535 // Src is an f32 or f64, we need to make sure it's in a register. 3494 // Src is an f32 or f64, we need to make sure it's in a register.
3536 if (isVectorType(Ty)) { 3495 if (isVectorType(Ty)) {
3537 if (llvm::isa<OperandX8632Mem>(Src)) 3496 if (llvm::isa<typename Traits::X86OperandMem>(Src))
3538 Src = legalizeToVar(Src); 3497 Src = legalizeToVar(Src);
3539 } else { 3498 } else {
3540 Src = legalizeToVar(Src); 3499 Src = legalizeToVar(Src);
3541 } 3500 }
3542 _pand(T, Src); 3501 _pand(T, Src);
3543 if (isVectorType(Ty)) 3502 if (isVectorType(Ty))
3544 _movp(Dest, T); 3503 _movp(Dest, T);
3545 else 3504 else
3546 _mov(Dest, T); 3505 _mov(Dest, T);
3547 return; 3506 return;
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
3582 InstCall *Call = makeHelperCall(H_call_memset, nullptr, 3); 3541 InstCall *Call = makeHelperCall(H_call_memset, nullptr, 3);
3583 Call->addArg(Instr->getArg(0)); 3542 Call->addArg(Instr->getArg(0));
3584 Call->addArg(ValExt); 3543 Call->addArg(ValExt);
3585 Call->addArg(Instr->getArg(2)); 3544 Call->addArg(Instr->getArg(2));
3586 lowerCall(Call); 3545 lowerCall(Call);
3587 return; 3546 return;
3588 } 3547 }
3589 case Intrinsics::NaClReadTP: { 3548 case Intrinsics::NaClReadTP: {
3590 if (Ctx->getFlags().getUseSandboxing()) { 3549 if (Ctx->getFlags().getUseSandboxing()) {
3591 Constant *Zero = Ctx->getConstantZero(IceType_i32); 3550 Constant *Zero = Ctx->getConstantZero(IceType_i32);
3592 Operand *Src = 3551 Operand *Src = Traits::X86OperandMem::create(
3593 OperandX8632Mem::create(Func, IceType_i32, nullptr, Zero, nullptr, 0, 3552 Func, IceType_i32, nullptr, Zero, nullptr, 0,
3594 OperandX8632Mem::SegReg_GS); 3553 Traits::X86OperandMem::SegReg_GS);
3595 Variable *Dest = Instr->getDest(); 3554 Variable *Dest = Instr->getDest();
3596 Variable *T = nullptr; 3555 Variable *T = nullptr;
3597 _mov(T, Src); 3556 _mov(T, Src);
3598 _mov(Dest, T); 3557 _mov(Dest, T);
3599 } else { 3558 } else {
3600 InstCall *Call = makeHelperCall(H_call_read_tp, Instr->getDest(), 0); 3559 InstCall *Call = makeHelperCall(H_call_read_tp, Instr->getDest(), 0);
3601 lowerCall(Call); 3560 lowerCall(Call);
3602 } 3561 }
3603 return; 3562 return;
3604 } 3563 }
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
3647 // Reserve the pre-colored registers first, before adding any more 3606 // Reserve the pre-colored registers first, before adding any more
3648 // infinite-weight variables from formMemoryOperand's legalization. 3607 // infinite-weight variables from formMemoryOperand's legalization.
3649 Variable *T_edx = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx); 3608 Variable *T_edx = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
3650 Variable *T_eax = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax); 3609 Variable *T_eax = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
3651 Variable *T_ecx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ecx); 3610 Variable *T_ecx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ecx);
3652 Variable *T_ebx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ebx); 3611 Variable *T_ebx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ebx);
3653 _mov(T_eax, loOperand(Expected)); 3612 _mov(T_eax, loOperand(Expected));
3654 _mov(T_edx, hiOperand(Expected)); 3613 _mov(T_edx, hiOperand(Expected));
3655 _mov(T_ebx, loOperand(Desired)); 3614 _mov(T_ebx, loOperand(Desired));
3656 _mov(T_ecx, hiOperand(Desired)); 3615 _mov(T_ecx, hiOperand(Desired));
3657 OperandX8632Mem *Addr = formMemoryOperand(Ptr, Expected->getType()); 3616 typename Traits::X86OperandMem *Addr =
3617 formMemoryOperand(Ptr, Expected->getType());
3658 const bool Locked = true; 3618 const bool Locked = true;
3659 _cmpxchg8b(Addr, T_edx, T_eax, T_ecx, T_ebx, Locked); 3619 _cmpxchg8b(Addr, T_edx, T_eax, T_ecx, T_ebx, Locked);
3660 Variable *DestLo = llvm::cast<Variable>(loOperand(DestPrev)); 3620 Variable *DestLo = llvm::cast<Variable>(loOperand(DestPrev));
3661 Variable *DestHi = llvm::cast<Variable>(hiOperand(DestPrev)); 3621 Variable *DestHi = llvm::cast<Variable>(hiOperand(DestPrev));
3662 _mov(DestLo, T_eax); 3622 _mov(DestLo, T_eax);
3663 _mov(DestHi, T_edx); 3623 _mov(DestHi, T_edx);
3664 return; 3624 return;
3665 } 3625 }
3666 Variable *T_eax = makeReg(Expected->getType(), Traits::RegisterSet::Reg_eax); 3626 Variable *T_eax = makeReg(Expected->getType(), Traits::RegisterSet::Reg_eax);
3667 _mov(T_eax, Expected); 3627 _mov(T_eax, Expected);
3668 OperandX8632Mem *Addr = formMemoryOperand(Ptr, Expected->getType()); 3628 typename Traits::X86OperandMem *Addr =
3629 formMemoryOperand(Ptr, Expected->getType());
3669 Variable *DesiredReg = legalizeToVar(Desired); 3630 Variable *DesiredReg = legalizeToVar(Desired);
3670 const bool Locked = true; 3631 const bool Locked = true;
3671 _cmpxchg(Addr, T_eax, DesiredReg, Locked); 3632 _cmpxchg(Addr, T_eax, DesiredReg, Locked);
3672 _mov(DestPrev, T_eax); 3633 _mov(DestPrev, T_eax);
3673 } 3634 }
3674 3635
3675 template <class Machine> 3636 template <class Machine>
3676 bool TargetX86Base<Machine>::tryOptimizedCmpxchgCmpBr(Variable *Dest, 3637 bool TargetX86Base<Machine>::tryOptimizedCmpxchgCmpBr(Variable *Dest,
3677 Operand *PtrToMem, 3638 Operand *PtrToMem,
3678 Operand *Expected, 3639 Operand *Expected,
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
3760 return; 3721 return;
3761 case Intrinsics::AtomicAdd: { 3722 case Intrinsics::AtomicAdd: {
3762 if (Dest->getType() == IceType_i64) { 3723 if (Dest->getType() == IceType_i64) {
3763 // All the fall-through paths must set this to true, but use this 3724 // All the fall-through paths must set this to true, but use this
3764 // for asserting. 3725 // for asserting.
3765 NeedsCmpxchg = true; 3726 NeedsCmpxchg = true;
3766 Op_Lo = &TargetX86Base<Machine>::_add; 3727 Op_Lo = &TargetX86Base<Machine>::_add;
3767 Op_Hi = &TargetX86Base<Machine>::_adc; 3728 Op_Hi = &TargetX86Base<Machine>::_adc;
3768 break; 3729 break;
3769 } 3730 }
3770 OperandX8632Mem *Addr = formMemoryOperand(Ptr, Dest->getType()); 3731 typename Traits::X86OperandMem *Addr =
3732 formMemoryOperand(Ptr, Dest->getType());
3771 const bool Locked = true; 3733 const bool Locked = true;
3772 Variable *T = nullptr; 3734 Variable *T = nullptr;
3773 _mov(T, Val); 3735 _mov(T, Val);
3774 _xadd(Addr, T, Locked); 3736 _xadd(Addr, T, Locked);
3775 _mov(Dest, T); 3737 _mov(Dest, T);
3776 return; 3738 return;
3777 } 3739 }
3778 case Intrinsics::AtomicSub: { 3740 case Intrinsics::AtomicSub: {
3779 if (Dest->getType() == IceType_i64) { 3741 if (Dest->getType() == IceType_i64) {
3780 NeedsCmpxchg = true; 3742 NeedsCmpxchg = true;
3781 Op_Lo = &TargetX86Base<Machine>::_sub; 3743 Op_Lo = &TargetX86Base<Machine>::_sub;
3782 Op_Hi = &TargetX86Base<Machine>::_sbb; 3744 Op_Hi = &TargetX86Base<Machine>::_sbb;
3783 break; 3745 break;
3784 } 3746 }
3785 OperandX8632Mem *Addr = formMemoryOperand(Ptr, Dest->getType()); 3747 typename Traits::X86OperandMem *Addr =
3748 formMemoryOperand(Ptr, Dest->getType());
3786 const bool Locked = true; 3749 const bool Locked = true;
3787 Variable *T = nullptr; 3750 Variable *T = nullptr;
3788 _mov(T, Val); 3751 _mov(T, Val);
3789 _neg(T); 3752 _neg(T);
3790 _xadd(Addr, T, Locked); 3753 _xadd(Addr, T, Locked);
3791 _mov(Dest, T); 3754 _mov(Dest, T);
3792 return; 3755 return;
3793 } 3756 }
3794 case Intrinsics::AtomicOr: 3757 case Intrinsics::AtomicOr:
3795 // TODO(jvoung): If Dest is null or dead, then some of these 3758 // TODO(jvoung): If Dest is null or dead, then some of these
(...skipping 17 matching lines...) Expand all
3813 break; 3776 break;
3814 case Intrinsics::AtomicExchange: 3777 case Intrinsics::AtomicExchange:
3815 if (Dest->getType() == IceType_i64) { 3778 if (Dest->getType() == IceType_i64) {
3816 NeedsCmpxchg = true; 3779 NeedsCmpxchg = true;
3817 // NeedsCmpxchg, but no real Op_Lo/Op_Hi need to be done. The values 3780 // NeedsCmpxchg, but no real Op_Lo/Op_Hi need to be done. The values
3818 // just need to be moved to the ecx and ebx registers. 3781 // just need to be moved to the ecx and ebx registers.
3819 Op_Lo = nullptr; 3782 Op_Lo = nullptr;
3820 Op_Hi = nullptr; 3783 Op_Hi = nullptr;
3821 break; 3784 break;
3822 } 3785 }
3823 OperandX8632Mem *Addr = formMemoryOperand(Ptr, Dest->getType()); 3786 typename Traits::X86OperandMem *Addr =
3787 formMemoryOperand(Ptr, Dest->getType());
3824 Variable *T = nullptr; 3788 Variable *T = nullptr;
3825 _mov(T, Val); 3789 _mov(T, Val);
3826 _xchg(Addr, T); 3790 _xchg(Addr, T);
3827 _mov(Dest, T); 3791 _mov(Dest, T);
3828 return; 3792 return;
3829 } 3793 }
3830 // Otherwise, we need a cmpxchg loop. 3794 // Otherwise, we need a cmpxchg loop.
3831 (void)NeedsCmpxchg; 3795 (void)NeedsCmpxchg;
3832 assert(NeedsCmpxchg); 3796 assert(NeedsCmpxchg);
3833 expandAtomicRMWAsCmpxchg(Op_Lo, Op_Hi, Dest, Ptr, Val); 3797 expandAtomicRMWAsCmpxchg(Op_Lo, Op_Hi, Dest, Ptr, Val);
(...skipping 27 matching lines...) Expand all
3861 // lock cmpxchg [ptr], <reg> 3825 // lock cmpxchg [ptr], <reg>
3862 // jne .LABEL 3826 // jne .LABEL
3863 // mov <dest>, eax 3827 // mov <dest>, eax
3864 // 3828 //
3865 // If Op_{Lo,Hi} are nullptr, then just copy the value. 3829 // If Op_{Lo,Hi} are nullptr, then just copy the value.
3866 Val = legalize(Val); 3830 Val = legalize(Val);
3867 Type Ty = Val->getType(); 3831 Type Ty = Val->getType();
3868 if (Ty == IceType_i64) { 3832 if (Ty == IceType_i64) {
3869 Variable *T_edx = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx); 3833 Variable *T_edx = makeReg(IceType_i32, Traits::RegisterSet::Reg_edx);
3870 Variable *T_eax = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax); 3834 Variable *T_eax = makeReg(IceType_i32, Traits::RegisterSet::Reg_eax);
3871 OperandX8632Mem *Addr = formMemoryOperand(Ptr, Ty); 3835 typename Traits::X86OperandMem *Addr = formMemoryOperand(Ptr, Ty);
3872 _mov(T_eax, loOperand(Addr)); 3836 _mov(T_eax, loOperand(Addr));
3873 _mov(T_edx, hiOperand(Addr)); 3837 _mov(T_edx, hiOperand(Addr));
3874 Variable *T_ecx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ecx); 3838 Variable *T_ecx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ecx);
3875 Variable *T_ebx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ebx); 3839 Variable *T_ebx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ebx);
3876 InstX8632Label *Label = InstX8632Label::create(Func, this); 3840 typename Traits::Insts::Label *Label =
3841 Traits::Insts::Label::create(Func, this);
3877 const bool IsXchg8b = Op_Lo == nullptr && Op_Hi == nullptr; 3842 const bool IsXchg8b = Op_Lo == nullptr && Op_Hi == nullptr;
3878 if (!IsXchg8b) { 3843 if (!IsXchg8b) {
3879 Context.insert(Label); 3844 Context.insert(Label);
3880 _mov(T_ebx, T_eax); 3845 _mov(T_ebx, T_eax);
3881 (this->*Op_Lo)(T_ebx, loOperand(Val)); 3846 (this->*Op_Lo)(T_ebx, loOperand(Val));
3882 _mov(T_ecx, T_edx); 3847 _mov(T_ecx, T_edx);
3883 (this->*Op_Hi)(T_ecx, hiOperand(Val)); 3848 (this->*Op_Hi)(T_ecx, hiOperand(Val));
3884 } else { 3849 } else {
3885 // This is for xchg, which doesn't need an actual Op_Lo/Op_Hi. 3850 // This is for xchg, which doesn't need an actual Op_Lo/Op_Hi.
3886 // It just needs the Val loaded into ebx and ecx. 3851 // It just needs the Val loaded into ebx and ecx.
(...skipping 21 matching lines...) Expand all
3908 } 3873 }
3909 // The address base (if any) is also reused in the loop. 3874 // The address base (if any) is also reused in the loop.
3910 if (Variable *Base = Addr->getBase()) 3875 if (Variable *Base = Addr->getBase())
3911 Context.insert(InstFakeUse::create(Func, Base)); 3876 Context.insert(InstFakeUse::create(Func, Base));
3912 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); 3877 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
3913 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest)); 3878 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest));
3914 _mov(DestLo, T_eax); 3879 _mov(DestLo, T_eax);
3915 _mov(DestHi, T_edx); 3880 _mov(DestHi, T_edx);
3916 return; 3881 return;
3917 } 3882 }
3918 OperandX8632Mem *Addr = formMemoryOperand(Ptr, Ty); 3883 typename Traits::X86OperandMem *Addr = formMemoryOperand(Ptr, Ty);
3919 Variable *T_eax = makeReg(Ty, Traits::RegisterSet::Reg_eax); 3884 Variable *T_eax = makeReg(Ty, Traits::RegisterSet::Reg_eax);
3920 _mov(T_eax, Addr); 3885 _mov(T_eax, Addr);
3921 InstX8632Label *Label = InstX8632Label::create(Func, this); 3886 typename Traits::Insts::Label *Label =
3887 Traits::Insts::Label::create(Func, this);
3922 Context.insert(Label); 3888 Context.insert(Label);
3923 // We want to pick a different register for T than Eax, so don't use 3889 // We want to pick a different register for T than Eax, so don't use
3924 // _mov(T == nullptr, T_eax). 3890 // _mov(T == nullptr, T_eax).
3925 Variable *T = makeReg(Ty); 3891 Variable *T = makeReg(Ty);
3926 _mov(T, T_eax); 3892 _mov(T, T_eax);
3927 (this->*Op_Lo)(T, Val); 3893 (this->*Op_Lo)(T, Val);
3928 const bool Locked = true; 3894 const bool Locked = true;
3929 _cmpxchg(Addr, T_eax, T, Locked); 3895 _cmpxchg(Addr, T_eax, T, Locked);
3930 _br(Traits::Cond::Br_ne, Label); 3896 _br(Traits::Cond::Br_ne, Label);
3931 // If Val is a variable, model the extended live range of Val through 3897 // If Val is a variable, model the extended live range of Val through
(...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after
4254 4220
4255 // TODO: consider overflow issues with respect to Offset. 4221 // TODO: consider overflow issues with respect to Offset.
4256 // TODO: handle symbolic constants. 4222 // TODO: handle symbolic constants.
4257 } 4223 }
4258 } 4224 }
4259 4225
4260 template <class Machine> 4226 template <class Machine>
4261 void TargetX86Base<Machine>::lowerLoad(const InstLoad *Load) { 4227 void TargetX86Base<Machine>::lowerLoad(const InstLoad *Load) {
4262 // A Load instruction can be treated the same as an Assign 4228 // A Load instruction can be treated the same as an Assign
4263 // instruction, after the source operand is transformed into an 4229 // instruction, after the source operand is transformed into an
4264 // OperandX8632Mem operand. Note that the address mode 4230 // typename Traits::X86OperandMem operand. Note that the address mode
4265 // optimization already creates an OperandX8632Mem operand, so it 4231 // optimization already creates an typename Traits::X86OperandMem operand, so
4232 // it
4266 // doesn't need another level of transformation. 4233 // doesn't need another level of transformation.
4267 Variable *DestLoad = Load->getDest(); 4234 Variable *DestLoad = Load->getDest();
4268 Type Ty = DestLoad->getType(); 4235 Type Ty = DestLoad->getType();
4269 Operand *Src0 = formMemoryOperand(Load->getSourceAddress(), Ty); 4236 Operand *Src0 = formMemoryOperand(Load->getSourceAddress(), Ty);
4270 InstAssign *Assign = InstAssign::create(Func, DestLoad, Src0); 4237 InstAssign *Assign = InstAssign::create(Func, DestLoad, Src0);
4271 lowerAssign(Assign); 4238 lowerAssign(Assign);
4272 } 4239 }
4273 4240
4274 template <class Machine> void TargetX86Base<Machine>::doAddressOptLoad() { 4241 template <class Machine> void TargetX86Base<Machine>::doAddressOptLoad() {
4275 Inst *Inst = Context.getCur(); 4242 Inst *Inst = Context.getCur();
4276 Variable *Dest = Inst->getDest(); 4243 Variable *Dest = Inst->getDest();
4277 Operand *Addr = Inst->getSrc(0); 4244 Operand *Addr = Inst->getSrc(0);
4278 Variable *Index = nullptr; 4245 Variable *Index = nullptr;
4279 uint16_t Shift = 0; 4246 uint16_t Shift = 0;
4280 int32_t Offset = 0; // TODO: make Constant 4247 int32_t Offset = 0; // TODO: make Constant
4281 // Vanilla ICE load instructions should not use the segment registers, 4248 // Vanilla ICE load instructions should not use the segment registers,
4282 // and computeAddressOpt only works at the level of Variables and Constants, 4249 // and computeAddressOpt only works at the level of Variables and Constants,
4283 // not other OperandX8632Mem, so there should be no mention of segment 4250 // not other typename Traits::X86OperandMem, so there should be no mention of
4251 // segment
4284 // registers there either. 4252 // registers there either.
4285 const OperandX8632Mem::SegmentRegisters SegmentReg = 4253 const typename Traits::X86OperandMem::SegmentRegisters SegmentReg =
4286 OperandX8632Mem::DefaultSegment; 4254 Traits::X86OperandMem::DefaultSegment;
4287 Variable *Base = llvm::dyn_cast<Variable>(Addr); 4255 Variable *Base = llvm::dyn_cast<Variable>(Addr);
4288 computeAddressOpt(Func, Inst, Base, Index, Shift, Offset); 4256 computeAddressOpt(Func, Inst, Base, Index, Shift, Offset);
4289 if (Base && Addr != Base) { 4257 if (Base && Addr != Base) {
4290 Inst->setDeleted(); 4258 Inst->setDeleted();
4291 Constant *OffsetOp = Ctx->getConstantInt32(Offset); 4259 Constant *OffsetOp = Ctx->getConstantInt32(Offset);
4292 Addr = OperandX8632Mem::create(Func, Dest->getType(), Base, OffsetOp, Index, 4260 Addr = Traits::X86OperandMem::create(Func, Dest->getType(), Base, OffsetOp,
4293 Shift, SegmentReg); 4261 Index, Shift, SegmentReg);
4294 Context.insert(InstLoad::create(Func, Dest, Addr)); 4262 Context.insert(InstLoad::create(Func, Dest, Addr));
4295 } 4263 }
4296 } 4264 }
4297 4265
4298 template <class Machine> 4266 template <class Machine>
4299 void TargetX86Base<Machine>::randomlyInsertNop(float Probability) { 4267 void TargetX86Base<Machine>::randomlyInsertNop(float Probability) {
4300 RandomNumberGeneratorWrapper RNG(Ctx->getRNG()); 4268 RandomNumberGeneratorWrapper RNG(Ctx->getRNG());
4301 if (RNG.getTrueWithProbability(Probability)) { 4269 if (RNG.getTrueWithProbability(Probability)) {
4302 _nop(RNG(Traits::X86_NUM_NOP_VARIANTS)); 4270 _nop(RNG(Traits::X86_NUM_NOP_VARIANTS));
4303 } 4271 }
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
4430 CmpOpnd1 = Ctx->getConstantZero(IceType_i32); 4398 CmpOpnd1 = Ctx->getConstantZero(IceType_i32);
4431 } 4399 }
4432 assert(CmpOpnd0); 4400 assert(CmpOpnd0);
4433 assert(CmpOpnd1); 4401 assert(CmpOpnd1);
4434 4402
4435 _cmp(CmpOpnd0, CmpOpnd1); 4403 _cmp(CmpOpnd0, CmpOpnd1);
4436 if (typeWidthInBytes(DestTy) == 1 || isFloatingType(DestTy)) { 4404 if (typeWidthInBytes(DestTy) == 1 || isFloatingType(DestTy)) {
4437 // The cmov instruction doesn't allow 8-bit or FP operands, so 4405 // The cmov instruction doesn't allow 8-bit or FP operands, so
4438 // we need explicit control flow. 4406 // we need explicit control flow.
4439 // d=cmp e,f; a=d?b:c ==> cmp e,f; a=b; jne L1; a=c; L1: 4407 // d=cmp e,f; a=d?b:c ==> cmp e,f; a=b; jne L1; a=c; L1:
4440 InstX8632Label *Label = InstX8632Label::create(Func, this); 4408 typename Traits::Insts::Label *Label =
4409 Traits::Insts::Label::create(Func, this);
4441 SrcT = legalize(SrcT, Legal_Reg | Legal_Imm); 4410 SrcT = legalize(SrcT, Legal_Reg | Legal_Imm);
4442 _mov(Dest, SrcT); 4411 _mov(Dest, SrcT);
4443 _br(Cond, Label); 4412 _br(Cond, Label);
4444 SrcF = legalize(SrcF, Legal_Reg | Legal_Imm); 4413 SrcF = legalize(SrcF, Legal_Reg | Legal_Imm);
4445 _mov_nonkillable(Dest, SrcF); 4414 _mov_nonkillable(Dest, SrcF);
4446 Context.insert(Label); 4415 Context.insert(Label);
4447 return; 4416 return;
4448 } 4417 }
4449 // mov t, SrcF; cmov_cond t, SrcT; mov dest, t 4418 // mov t, SrcF; cmov_cond t, SrcT; mov dest, t
4450 // But if SrcT is immediate, we might be able to do better, as 4419 // But if SrcT is immediate, we might be able to do better, as
4451 // the cmov instruction doesn't allow an immediate operand: 4420 // the cmov instruction doesn't allow an immediate operand:
4452 // mov t, SrcT; cmov_!cond t, SrcF; mov dest, t 4421 // mov t, SrcT; cmov_!cond t, SrcF; mov dest, t
4453 if (llvm::isa<Constant>(SrcT) && !llvm::isa<Constant>(SrcF)) { 4422 if (llvm::isa<Constant>(SrcT) && !llvm::isa<Constant>(SrcF)) {
4454 std::swap(SrcT, SrcF); 4423 std::swap(SrcT, SrcF);
4455 Cond = InstX8632::getOppositeCondition(Cond); 4424 Cond = InstX86Base<Machine>::getOppositeCondition(Cond);
4456 } 4425 }
4457 if (DestTy == IceType_i64) { 4426 if (DestTy == IceType_i64) {
4458 // Set the low portion. 4427 // Set the low portion.
4459 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest)); 4428 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
4460 Variable *TLo = nullptr; 4429 Variable *TLo = nullptr;
4461 Operand *SrcFLo = legalize(loOperand(SrcF)); 4430 Operand *SrcFLo = legalize(loOperand(SrcF));
4462 _mov(TLo, SrcFLo); 4431 _mov(TLo, SrcFLo);
4463 Operand *SrcTLo = legalize(loOperand(SrcT), Legal_Reg | Legal_Mem); 4432 Operand *SrcTLo = legalize(loOperand(SrcT), Legal_Reg | Legal_Mem);
4464 _cmov(TLo, SrcTLo, Cond); 4433 _cmov(TLo, SrcTLo, Cond);
4465 _mov(DestLo, TLo); 4434 _mov(DestLo, TLo);
(...skipping 14 matching lines...) Expand all
4480 _mov(T, SrcF); 4449 _mov(T, SrcF);
4481 SrcT = legalize(SrcT, Legal_Reg | Legal_Mem); 4450 SrcT = legalize(SrcT, Legal_Reg | Legal_Mem);
4482 _cmov(T, SrcT, Cond); 4451 _cmov(T, SrcT, Cond);
4483 _mov(Dest, T); 4452 _mov(Dest, T);
4484 } 4453 }
4485 4454
4486 template <class Machine> 4455 template <class Machine>
4487 void TargetX86Base<Machine>::lowerStore(const InstStore *Inst) { 4456 void TargetX86Base<Machine>::lowerStore(const InstStore *Inst) {
4488 Operand *Value = Inst->getData(); 4457 Operand *Value = Inst->getData();
4489 Operand *Addr = Inst->getAddr(); 4458 Operand *Addr = Inst->getAddr();
4490 OperandX8632Mem *NewAddr = formMemoryOperand(Addr, Value->getType()); 4459 typename Traits::X86OperandMem *NewAddr =
4460 formMemoryOperand(Addr, Value->getType());
4491 Type Ty = NewAddr->getType(); 4461 Type Ty = NewAddr->getType();
4492 4462
4493 if (Ty == IceType_i64) { 4463 if (Ty == IceType_i64) {
4494 Value = legalize(Value); 4464 Value = legalize(Value);
4495 Operand *ValueHi = legalize(hiOperand(Value), Legal_Reg | Legal_Imm); 4465 Operand *ValueHi = legalize(hiOperand(Value), Legal_Reg | Legal_Imm);
4496 Operand *ValueLo = legalize(loOperand(Value), Legal_Reg | Legal_Imm); 4466 Operand *ValueLo = legalize(loOperand(Value), Legal_Reg | Legal_Imm);
4497 _store(ValueHi, llvm::cast<OperandX8632Mem>(hiOperand(NewAddr))); 4467 _store(ValueHi,
4498 _store(ValueLo, llvm::cast<OperandX8632Mem>(loOperand(NewAddr))); 4468 llvm::cast<typename Traits::X86OperandMem>(hiOperand(NewAddr)));
4469 _store(ValueLo,
4470 llvm::cast<typename Traits::X86OperandMem>(loOperand(NewAddr)));
4499 } else if (isVectorType(Ty)) { 4471 } else if (isVectorType(Ty)) {
4500 _storep(legalizeToVar(Value), NewAddr); 4472 _storep(legalizeToVar(Value), NewAddr);
4501 } else { 4473 } else {
4502 Value = legalize(Value, Legal_Reg | Legal_Imm); 4474 Value = legalize(Value, Legal_Reg | Legal_Imm);
4503 _store(Value, NewAddr); 4475 _store(Value, NewAddr);
4504 } 4476 }
4505 } 4477 }
4506 4478
4507 template <class Machine> void TargetX86Base<Machine>::doAddressOptStore() { 4479 template <class Machine> void TargetX86Base<Machine>::doAddressOptStore() {
4508 InstStore *Inst = llvm::cast<InstStore>(Context.getCur()); 4480 InstStore *Inst = llvm::cast<InstStore>(Context.getCur());
4509 Operand *Data = Inst->getData(); 4481 Operand *Data = Inst->getData();
4510 Operand *Addr = Inst->getAddr(); 4482 Operand *Addr = Inst->getAddr();
4511 Variable *Index = nullptr; 4483 Variable *Index = nullptr;
4512 uint16_t Shift = 0; 4484 uint16_t Shift = 0;
4513 int32_t Offset = 0; // TODO: make Constant 4485 int32_t Offset = 0; // TODO: make Constant
4514 Variable *Base = llvm::dyn_cast<Variable>(Addr); 4486 Variable *Base = llvm::dyn_cast<Variable>(Addr);
4515 // Vanilla ICE store instructions should not use the segment registers, 4487 // Vanilla ICE store instructions should not use the segment registers,
4516 // and computeAddressOpt only works at the level of Variables and Constants, 4488 // and computeAddressOpt only works at the level of Variables and Constants,
4517 // not other OperandX8632Mem, so there should be no mention of segment 4489 // not other typename Traits::X86OperandMem, so there should be no mention of
4490 // segment
4518 // registers there either. 4491 // registers there either.
4519 const OperandX8632Mem::SegmentRegisters SegmentReg = 4492 const typename Traits::X86OperandMem::SegmentRegisters SegmentReg =
4520 OperandX8632Mem::DefaultSegment; 4493 Traits::X86OperandMem::DefaultSegment;
4521 computeAddressOpt(Func, Inst, Base, Index, Shift, Offset); 4494 computeAddressOpt(Func, Inst, Base, Index, Shift, Offset);
4522 if (Base && Addr != Base) { 4495 if (Base && Addr != Base) {
4523 Inst->setDeleted(); 4496 Inst->setDeleted();
4524 Constant *OffsetOp = Ctx->getConstantInt32(Offset); 4497 Constant *OffsetOp = Ctx->getConstantInt32(Offset);
4525 Addr = OperandX8632Mem::create(Func, Data->getType(), Base, OffsetOp, Index, 4498 Addr = Traits::X86OperandMem::create(Func, Data->getType(), Base, OffsetOp,
4526 Shift, SegmentReg); 4499 Index, Shift, SegmentReg);
4527 InstStore *NewStore = InstStore::create(Func, Data, Addr); 4500 InstStore *NewStore = InstStore::create(Func, Data, Addr);
4528 if (Inst->getDest()) 4501 if (Inst->getDest())
4529 NewStore->setRmwBeacon(Inst->getRmwBeacon()); 4502 NewStore->setRmwBeacon(Inst->getRmwBeacon());
4530 Context.insert(NewStore); 4503 Context.insert(NewStore);
4531 } 4504 }
4532 } 4505 }
4533 4506
4534 template <class Machine> 4507 template <class Machine>
4535 void TargetX86Base<Machine>::lowerSwitch(const InstSwitch *Inst) { 4508 void TargetX86Base<Machine>::lowerSwitch(const InstSwitch *Inst) {
4536 // This implements the most naive possible lowering. 4509 // This implements the most naive possible lowering.
4537 // cmp a,val[0]; jeq label[0]; cmp a,val[1]; jeq label[1]; ... jmp default 4510 // cmp a,val[0]; jeq label[0]; cmp a,val[1]; jeq label[1]; ... jmp default
4538 Operand *Src0 = Inst->getComparison(); 4511 Operand *Src0 = Inst->getComparison();
4539 SizeT NumCases = Inst->getNumCases(); 4512 SizeT NumCases = Inst->getNumCases();
4540 if (Src0->getType() == IceType_i64) { 4513 if (Src0->getType() == IceType_i64) {
4541 Src0 = legalize(Src0); // get Base/Index into physical registers 4514 Src0 = legalize(Src0); // get Base/Index into physical registers
4542 Operand *Src0Lo = loOperand(Src0); 4515 Operand *Src0Lo = loOperand(Src0);
4543 Operand *Src0Hi = hiOperand(Src0); 4516 Operand *Src0Hi = hiOperand(Src0);
4544 if (NumCases >= 2) { 4517 if (NumCases >= 2) {
4545 Src0Lo = legalizeToVar(Src0Lo); 4518 Src0Lo = legalizeToVar(Src0Lo);
4546 Src0Hi = legalizeToVar(Src0Hi); 4519 Src0Hi = legalizeToVar(Src0Hi);
4547 } else { 4520 } else {
4548 Src0Lo = legalize(Src0Lo, Legal_Reg | Legal_Mem); 4521 Src0Lo = legalize(Src0Lo, Legal_Reg | Legal_Mem);
4549 Src0Hi = legalize(Src0Hi, Legal_Reg | Legal_Mem); 4522 Src0Hi = legalize(Src0Hi, Legal_Reg | Legal_Mem);
4550 } 4523 }
4551 for (SizeT I = 0; I < NumCases; ++I) { 4524 for (SizeT I = 0; I < NumCases; ++I) {
4552 Constant *ValueLo = Ctx->getConstantInt32(Inst->getValue(I)); 4525 Constant *ValueLo = Ctx->getConstantInt32(Inst->getValue(I));
4553 Constant *ValueHi = Ctx->getConstantInt32(Inst->getValue(I) >> 32); 4526 Constant *ValueHi = Ctx->getConstantInt32(Inst->getValue(I) >> 32);
4554 InstX8632Label *Label = InstX8632Label::create(Func, this); 4527 typename Traits::Insts::Label *Label =
4528 Traits::Insts::Label::create(Func, this);
4555 _cmp(Src0Lo, ValueLo); 4529 _cmp(Src0Lo, ValueLo);
4556 _br(Traits::Cond::Br_ne, Label); 4530 _br(Traits::Cond::Br_ne, Label);
4557 _cmp(Src0Hi, ValueHi); 4531 _cmp(Src0Hi, ValueHi);
4558 _br(Traits::Cond::Br_e, Inst->getLabel(I)); 4532 _br(Traits::Cond::Br_e, Inst->getLabel(I));
4559 Context.insert(Label); 4533 Context.insert(Label);
4560 } 4534 }
4561 _br(Inst->getLabelDefault()); 4535 _br(Inst->getLabelDefault());
4562 return; 4536 return;
4563 } 4537 }
4564 // OK, we'll be slightly less naive by forcing Src into a physical 4538 // OK, we'll be slightly less naive by forcing Src into a physical
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
4631 } 4605 }
4632 } 4606 }
4633 4607
4634 template <class Machine> 4608 template <class Machine>
4635 void TargetX86Base<Machine>::lowerUnreachable( 4609 void TargetX86Base<Machine>::lowerUnreachable(
4636 const InstUnreachable * /*Inst*/) { 4610 const InstUnreachable * /*Inst*/) {
4637 _ud2(); 4611 _ud2();
4638 } 4612 }
4639 4613
4640 template <class Machine> 4614 template <class Machine>
4641 void TargetX86Base<Machine>::lowerRMW(const InstX8632FakeRMW *RMW) { 4615 void TargetX86Base<Machine>::lowerRMW(
4616 const typename Traits::Insts::FakeRMW *RMW) {
4642 // If the beacon variable's live range does not end in this 4617 // If the beacon variable's live range does not end in this
4643 // instruction, then it must end in the modified Store instruction 4618 // instruction, then it must end in the modified Store instruction
4644 // that follows. This means that the original Store instruction is 4619 // that follows. This means that the original Store instruction is
4645 // still there, either because the value being stored is used beyond 4620 // still there, either because the value being stored is used beyond
4646 // the Store instruction, or because dead code elimination did not 4621 // the Store instruction, or because dead code elimination did not
4647 // happen. In either case, we cancel RMW lowering (and the caller 4622 // happen. In either case, we cancel RMW lowering (and the caller
4648 // deletes the RMW instruction). 4623 // deletes the RMW instruction).
4649 if (!RMW->isLastUse(RMW->getBeacon())) 4624 if (!RMW->isLastUse(RMW->getBeacon()))
4650 return; 4625 return;
4651 Operand *Src = RMW->getData(); 4626 Operand *Src = RMW->getData();
4652 Type Ty = Src->getType(); 4627 Type Ty = Src->getType();
4653 OperandX8632Mem *Addr = formMemoryOperand(RMW->getAddr(), Ty); 4628 typename Traits::X86OperandMem *Addr = formMemoryOperand(RMW->getAddr(), Ty);
4654 if (Ty == IceType_i64) { 4629 if (Ty == IceType_i64) {
4655 Operand *SrcLo = legalize(loOperand(Src), Legal_Reg | Legal_Imm); 4630 Operand *SrcLo = legalize(loOperand(Src), Legal_Reg | Legal_Imm);
4656 Operand *SrcHi = legalize(hiOperand(Src), Legal_Reg | Legal_Imm); 4631 Operand *SrcHi = legalize(hiOperand(Src), Legal_Reg | Legal_Imm);
4657 OperandX8632Mem *AddrLo = llvm::cast<OperandX8632Mem>(loOperand(Addr)); 4632 typename Traits::X86OperandMem *AddrLo =
4658 OperandX8632Mem *AddrHi = llvm::cast<OperandX8632Mem>(hiOperand(Addr)); 4633 llvm::cast<typename Traits::X86OperandMem>(loOperand(Addr));
4634 typename Traits::X86OperandMem *AddrHi =
4635 llvm::cast<typename Traits::X86OperandMem>(hiOperand(Addr));
4659 switch (RMW->getOp()) { 4636 switch (RMW->getOp()) {
4660 default: 4637 default:
4661 // TODO(stichnot): Implement other arithmetic operators. 4638 // TODO(stichnot): Implement other arithmetic operators.
4662 break; 4639 break;
4663 case InstArithmetic::Add: 4640 case InstArithmetic::Add:
4664 _add_rmw(AddrLo, SrcLo); 4641 _add_rmw(AddrLo, SrcLo);
4665 _adc_rmw(AddrHi, SrcHi); 4642 _adc_rmw(AddrHi, SrcHi);
4666 return; 4643 return;
4667 case InstArithmetic::Sub: 4644 case InstArithmetic::Sub:
4668 _sub_rmw(AddrLo, SrcLo); 4645 _sub_rmw(AddrLo, SrcLo);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
4707 Src = legalize(Src, Legal_Reg | Legal_Imm); 4684 Src = legalize(Src, Legal_Reg | Legal_Imm);
4708 _xor_rmw(Addr, Src); 4685 _xor_rmw(Addr, Src);
4709 return; 4686 return;
4710 } 4687 }
4711 } 4688 }
4712 llvm::report_fatal_error("Couldn't lower RMW instruction"); 4689 llvm::report_fatal_error("Couldn't lower RMW instruction");
4713 } 4690 }
4714 4691
4715 template <class Machine> 4692 template <class Machine>
4716 void TargetX86Base<Machine>::lowerOther(const Inst *Instr) { 4693 void TargetX86Base<Machine>::lowerOther(const Inst *Instr) {
4717 if (const auto *RMW = llvm::dyn_cast<InstX8632FakeRMW>(Instr)) { 4694 if (const auto *RMW =
4695 llvm::dyn_cast<typename Traits::Insts::FakeRMW>(Instr)) {
4718 lowerRMW(RMW); 4696 lowerRMW(RMW);
4719 } else { 4697 } else {
4720 TargetLowering::lowerOther(Instr); 4698 TargetLowering::lowerOther(Instr);
4721 } 4699 }
4722 } 4700 }
4723 4701
4724 // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to 4702 // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to
4725 // preserve integrity of liveness analysis. Undef values are also 4703 // preserve integrity of liveness analysis. Undef values are also
4726 // turned into zeroes, since loOperand() and hiOperand() don't expect 4704 // turned into zeroes, since loOperand() and hiOperand() don't expect
4727 // Undef input. 4705 // Undef input.
(...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after
4983 // vector constants in memory. 4961 // vector constants in memory.
4984 template <class Machine> 4962 template <class Machine>
4985 Variable *TargetX86Base<Machine>::makeVectorOfFabsMask(Type Ty, 4963 Variable *TargetX86Base<Machine>::makeVectorOfFabsMask(Type Ty,
4986 int32_t RegNum) { 4964 int32_t RegNum) {
4987 Variable *Reg = makeVectorOfMinusOnes(Ty, RegNum); 4965 Variable *Reg = makeVectorOfMinusOnes(Ty, RegNum);
4988 _psrl(Reg, Ctx->getConstantInt8(1)); 4966 _psrl(Reg, Ctx->getConstantInt8(1));
4989 return Reg; 4967 return Reg;
4990 } 4968 }
4991 4969
4992 template <class Machine> 4970 template <class Machine>
4993 OperandX8632Mem * 4971 typename TargetX86Base<Machine>::Traits::X86OperandMem *
4994 TargetX86Base<Machine>::getMemoryOperandForStackSlot(Type Ty, Variable *Slot, 4972 TargetX86Base<Machine>::getMemoryOperandForStackSlot(Type Ty, Variable *Slot,
4995 uint32_t Offset) { 4973 uint32_t Offset) {
4996 // Ensure that Loc is a stack slot. 4974 // Ensure that Loc is a stack slot.
4997 assert(Slot->getWeight().isZero()); 4975 assert(Slot->getWeight().isZero());
4998 assert(Slot->getRegNum() == Variable::NoRegister); 4976 assert(Slot->getRegNum() == Variable::NoRegister);
4999 // Compute the location of Loc in memory. 4977 // Compute the location of Loc in memory.
5000 // TODO(wala,stichnot): lea should not be required. The address of 4978 // TODO(wala,stichnot): lea should not be required. The address of
5001 // the stack slot is known at compile time (although not until after 4979 // the stack slot is known at compile time (although not until after
5002 // addProlog()). 4980 // addProlog()).
5003 const Type PointerType = IceType_i32; 4981 const Type PointerType = IceType_i32;
5004 Variable *Loc = makeReg(PointerType); 4982 Variable *Loc = makeReg(PointerType);
5005 _lea(Loc, Slot); 4983 _lea(Loc, Slot);
5006 Constant *ConstantOffset = Ctx->getConstantInt32(Offset); 4984 Constant *ConstantOffset = Ctx->getConstantInt32(Offset);
5007 return OperandX8632Mem::create(Func, Ty, Loc, ConstantOffset); 4985 return Traits::X86OperandMem::create(Func, Ty, Loc, ConstantOffset);
5008 } 4986 }
5009 4987
5010 // Helper for legalize() to emit the right code to lower an operand to a 4988 // Helper for legalize() to emit the right code to lower an operand to a
5011 // register of the appropriate type. 4989 // register of the appropriate type.
5012 template <class Machine> 4990 template <class Machine>
5013 Variable *TargetX86Base<Machine>::copyToReg(Operand *Src, int32_t RegNum) { 4991 Variable *TargetX86Base<Machine>::copyToReg(Operand *Src, int32_t RegNum) {
5014 Type Ty = Src->getType(); 4992 Type Ty = Src->getType();
5015 Variable *Reg = makeReg(Ty, RegNum); 4993 Variable *Reg = makeReg(Ty, RegNum);
5016 if (isVectorType(Ty)) { 4994 if (isVectorType(Ty)) {
5017 _movp(Reg, Src); 4995 _movp(Reg, Src);
(...skipping 11 matching lines...) Expand all
5029 // to legalize() allow a physical register. If a physical register 5007 // to legalize() allow a physical register. If a physical register
5030 // needs to be explicitly disallowed, then new code will need to be 5008 // needs to be explicitly disallowed, then new code will need to be
5031 // written to force a spill. 5009 // written to force a spill.
5032 assert(Allowed & Legal_Reg); 5010 assert(Allowed & Legal_Reg);
5033 // If we're asking for a specific physical register, make sure we're 5011 // If we're asking for a specific physical register, make sure we're
5034 // not allowing any other operand kinds. (This could be future 5012 // not allowing any other operand kinds. (This could be future
5035 // work, e.g. allow the shl shift amount to be either an immediate 5013 // work, e.g. allow the shl shift amount to be either an immediate
5036 // or in ecx.) 5014 // or in ecx.)
5037 assert(RegNum == Variable::NoRegister || Allowed == Legal_Reg); 5015 assert(RegNum == Variable::NoRegister || Allowed == Legal_Reg);
5038 5016
5039 if (auto Mem = llvm::dyn_cast<OperandX8632Mem>(From)) { 5017 if (auto Mem = llvm::dyn_cast<typename Traits::X86OperandMem>(From)) {
5040 // Before doing anything with a Mem operand, we need to ensure 5018 // Before doing anything with a Mem operand, we need to ensure
5041 // that the Base and Index components are in physical registers. 5019 // that the Base and Index components are in physical registers.
5042 Variable *Base = Mem->getBase(); 5020 Variable *Base = Mem->getBase();
5043 Variable *Index = Mem->getIndex(); 5021 Variable *Index = Mem->getIndex();
5044 Variable *RegBase = nullptr; 5022 Variable *RegBase = nullptr;
5045 Variable *RegIndex = nullptr; 5023 Variable *RegIndex = nullptr;
5046 if (Base) { 5024 if (Base) {
5047 RegBase = legalizeToVar(Base); 5025 RegBase = legalizeToVar(Base);
5048 } 5026 }
5049 if (Index) { 5027 if (Index) {
5050 RegIndex = legalizeToVar(Index); 5028 RegIndex = legalizeToVar(Index);
5051 } 5029 }
5052 if (Base != RegBase || Index != RegIndex) { 5030 if (Base != RegBase || Index != RegIndex) {
5053 Mem = 5031 Mem = Traits::X86OperandMem::create(Func, Ty, RegBase, Mem->getOffset(),
5054 OperandX8632Mem::create(Func, Ty, RegBase, Mem->getOffset(), RegIndex, 5032 RegIndex, Mem->getShift(),
5055 Mem->getShift(), Mem->getSegmentRegister()); 5033 Mem->getSegmentRegister());
5056 } 5034 }
5057 5035
5058 // For all Memory Operands, we do randomization/pooling here 5036 // For all Memory Operands, we do randomization/pooling here
5059 From = randomizeOrPoolImmediate(Mem); 5037 From = randomizeOrPoolImmediate(Mem);
5060 5038
5061 if (!(Allowed & Legal_Mem)) { 5039 if (!(Allowed & Legal_Mem)) {
5062 From = copyToReg(From, RegNum); 5040 From = copyToReg(From, RegNum);
5063 } 5041 }
5064 return From; 5042 return From;
5065 } 5043 }
(...skipping 29 matching lines...) Expand all
5095 5073
5096 // Convert a scalar floating point constant into an explicit 5074 // Convert a scalar floating point constant into an explicit
5097 // memory operand. 5075 // memory operand.
5098 if (isScalarFloatingType(Ty)) { 5076 if (isScalarFloatingType(Ty)) {
5099 Variable *Base = nullptr; 5077 Variable *Base = nullptr;
5100 std::string Buffer; 5078 std::string Buffer;
5101 llvm::raw_string_ostream StrBuf(Buffer); 5079 llvm::raw_string_ostream StrBuf(Buffer);
5102 llvm::cast<Constant>(From)->emitPoolLabel(StrBuf); 5080 llvm::cast<Constant>(From)->emitPoolLabel(StrBuf);
5103 llvm::cast<Constant>(From)->setShouldBePooled(true); 5081 llvm::cast<Constant>(From)->setShouldBePooled(true);
5104 Constant *Offset = Ctx->getConstantSym(0, StrBuf.str(), true); 5082 Constant *Offset = Ctx->getConstantSym(0, StrBuf.str(), true);
5105 From = OperandX8632Mem::create(Func, Ty, Base, Offset); 5083 From = Traits::X86OperandMem::create(Func, Ty, Base, Offset);
5106 } 5084 }
5107 bool NeedsReg = false; 5085 bool NeedsReg = false;
5108 if (!(Allowed & Legal_Imm) && !isScalarFloatingType(Ty)) 5086 if (!(Allowed & Legal_Imm) && !isScalarFloatingType(Ty))
5109 // Immediate specifically not allowed 5087 // Immediate specifically not allowed
5110 NeedsReg = true; 5088 NeedsReg = true;
5111 if (!(Allowed & Legal_Mem) && isScalarFloatingType(Ty)) 5089 if (!(Allowed & Legal_Mem) && isScalarFloatingType(Ty))
5112 // On x86, FP constants are lowered to mem operands. 5090 // On x86, FP constants are lowered to mem operands.
5113 NeedsReg = true; 5091 NeedsReg = true;
5114 if (NeedsReg) { 5092 if (NeedsReg) {
5115 From = copyToReg(From, RegNum); 5093 From = copyToReg(From, RegNum);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
5154 if (llvm::isa<Constant>(Src1)) { 5132 if (llvm::isa<Constant>(Src1)) {
5155 IsSrc1ImmOrReg = true; 5133 IsSrc1ImmOrReg = true;
5156 } else if (Variable *Var = llvm::dyn_cast<Variable>(Src1)) { 5134 } else if (Variable *Var = llvm::dyn_cast<Variable>(Src1)) {
5157 if (Var->hasReg()) 5135 if (Var->hasReg())
5158 IsSrc1ImmOrReg = true; 5136 IsSrc1ImmOrReg = true;
5159 } 5137 }
5160 return legalize(Src0, IsSrc1ImmOrReg ? (Legal_Reg | Legal_Mem) : Legal_Reg); 5138 return legalize(Src0, IsSrc1ImmOrReg ? (Legal_Reg | Legal_Mem) : Legal_Reg);
5161 } 5139 }
5162 5140
5163 template <class Machine> 5141 template <class Machine>
5164 OperandX8632Mem *TargetX86Base<Machine>::formMemoryOperand(Operand *Opnd, 5142 typename TargetX86Base<Machine>::Traits::X86OperandMem *
5165 Type Ty, 5143 TargetX86Base<Machine>::formMemoryOperand(Operand *Opnd, Type Ty,
5166 bool DoLegalize) { 5144 bool DoLegalize) {
5167 OperandX8632Mem *Mem = llvm::dyn_cast<OperandX8632Mem>(Opnd); 5145 typename Traits::X86OperandMem *Mem =
5146 llvm::dyn_cast<typename Traits::X86OperandMem>(Opnd);
5168 // It may be the case that address mode optimization already creates 5147 // It may be the case that address mode optimization already creates
5169 // an OperandX8632Mem, so in that case it wouldn't need another level 5148 // an typename Traits::X86OperandMem, so in that case it wouldn't need another
5149 // level
jvoung (off chromium) 2015/07/06 18:58:46 "level" and "of transformation" can be on the same
John 2015/07/06 22:30:09 Done.
5170 // of transformation. 5150 // of transformation.
5171 if (!Mem) { 5151 if (!Mem) {
5172 Variable *Base = llvm::dyn_cast<Variable>(Opnd); 5152 Variable *Base = llvm::dyn_cast<Variable>(Opnd);
5173 Constant *Offset = llvm::dyn_cast<Constant>(Opnd); 5153 Constant *Offset = llvm::dyn_cast<Constant>(Opnd);
5174 assert(Base || Offset); 5154 assert(Base || Offset);
5175 if (Offset) { 5155 if (Offset) {
5176 // During memory operand building, we do not blind or pool 5156 // During memory operand building, we do not blind or pool
5177 // the constant offset, we will work on the whole memory 5157 // the constant offset, we will work on the whole memory
5178 // operand later as one entity later, this save one instruction. 5158 // operand later as one entity later, this save one instruction.
5179 // By turning blinding and pooling off, we guarantee 5159 // By turning blinding and pooling off, we guarantee
5180 // legalize(Offset) will return a constant*. 5160 // legalize(Offset) will return a constant*.
5181 { 5161 {
5182 BoolFlagSaver B(RandomizationPoolingPaused, true); 5162 BoolFlagSaver B(RandomizationPoolingPaused, true);
5183 5163
5184 Offset = llvm::cast<Constant>(legalize(Offset)); 5164 Offset = llvm::cast<Constant>(legalize(Offset));
5185 } 5165 }
5186 5166
5187 assert(llvm::isa<ConstantInteger32>(Offset) || 5167 assert(llvm::isa<ConstantInteger32>(Offset) ||
5188 llvm::isa<ConstantRelocatable>(Offset)); 5168 llvm::isa<ConstantRelocatable>(Offset));
5189 } 5169 }
5190 Mem = OperandX8632Mem::create(Func, Ty, Base, Offset); 5170 Mem = Traits::X86OperandMem::create(Func, Ty, Base, Offset);
5191 } 5171 }
5192 // Do legalization, which contains randomization/pooling 5172 // Do legalization, which contains randomization/pooling
5193 // or do randomization/pooling. 5173 // or do randomization/pooling.
5194 return llvm::cast<OperandX8632Mem>( 5174 return llvm::cast<typename Traits::X86OperandMem>(
5195 DoLegalize ? legalize(Mem) : randomizeOrPoolImmediate(Mem)); 5175 DoLegalize ? legalize(Mem) : randomizeOrPoolImmediate(Mem));
5196 } 5176 }
5197 5177
5198 template <class Machine> 5178 template <class Machine>
5199 Variable *TargetX86Base<Machine>::makeReg(Type Type, int32_t RegNum) { 5179 Variable *TargetX86Base<Machine>::makeReg(Type Type, int32_t RegNum) {
5200 // There aren't any 64-bit integer registers for x86-32. 5180 // There aren't any 64-bit integer registers for x86-32.
5201 assert(Type != IceType_i64); 5181 assert(Type != IceType_i64);
5202 Variable *Reg = Func->template makeVariable(Type); 5182 Variable *Reg = Func->template makeVariable(Type);
5203 if (RegNum == Variable::NoRegister) 5183 if (RegNum == Variable::NoRegister)
5204 Reg->setWeightInfinite(); 5184 Reg->setWeightInfinite();
5205 else 5185 else
5206 Reg->setRegNum(RegNum); 5186 Reg->setRegNum(RegNum);
5207 return Reg; 5187 return Reg;
5208 } 5188 }
5209 5189
5210 template <class Machine> void TargetX86Base<Machine>::postLower() { 5190 template <class Machine> void TargetX86Base<Machine>::postLower() {
5211 if (Ctx->getFlags().getOptLevel() == Opt_m1) 5191 if (Ctx->getFlags().getOptLevel() == Opt_m1)
5212 return; 5192 return;
5213 inferTwoAddress(); 5193 inferTwoAddress();
5214 } 5194 }
5215 5195
5216 template <class Machine> 5196 template <class Machine>
5217 void TargetX86Base<Machine>::makeRandomRegisterPermutation( 5197 void TargetX86Base<Machine>::makeRandomRegisterPermutation(
5218 llvm::SmallVectorImpl<int32_t> &Permutation, 5198 llvm::SmallVectorImpl<int32_t> &Permutation,
5219 const llvm::SmallBitVector &ExcludeRegisters) const { 5199 const llvm::SmallBitVector &ExcludeRegisters) const {
5220 // TODO(stichnot): Declaring Permutation this way loses type/size 5200 Traits::makeRandomRegisterPermutation(Ctx, Func, Permutation,
5221 // information. Fix this in conjunction with the caller-side TODO. 5201 ExcludeRegisters);
5222 assert(Permutation.size() >= Traits::RegisterSet::Reg_NUM);
5223 // Expected upper bound on the number of registers in a single
5224 // equivalence class. For x86-32, this would comprise the 8 XMM
5225 // registers. This is for performance, not correctness.
5226 static const unsigned MaxEquivalenceClassSize = 8;
5227 typedef llvm::SmallVector<int32_t, MaxEquivalenceClassSize> RegisterList;
5228 typedef std::map<uint32_t, RegisterList> EquivalenceClassMap;
5229 EquivalenceClassMap EquivalenceClasses;
5230 SizeT NumShuffled = 0, NumPreserved = 0;
5231
5232 // Build up the equivalence classes of registers by looking at the
5233 // register properties as well as whether the registers should be
5234 // explicitly excluded from shuffling.
5235 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \
5236 frameptr, isI8, isInt, isFP) \
5237 if (ExcludeRegisters[Traits::RegisterSet::val]) { \
5238 /* val stays the same in the resulting permutation. */ \
5239 Permutation[Traits::RegisterSet::val] = Traits::RegisterSet::val; \
5240 ++NumPreserved; \
5241 } else { \
5242 const uint32_t Index = (scratch << 0) | (preserved << 1) | (isI8 << 2) | \
5243 (isInt << 3) | (isFP << 4); \
5244 /* val is assigned to an equivalence class based on its properties. */ \
5245 EquivalenceClasses[Index].push_back(Traits::RegisterSet::val); \
5246 }
5247 REGX8632_TABLE
5248 #undef X
5249
5250 RandomNumberGeneratorWrapper RNG(Ctx->getRNG());
5251
5252 // Shuffle the resulting equivalence classes.
5253 for (auto I : EquivalenceClasses) {
5254 const RegisterList &List = I.second;
5255 RegisterList Shuffled(List);
5256 RandomShuffle(Shuffled.begin(), Shuffled.end(), RNG);
5257 for (size_t SI = 0, SE = Shuffled.size(); SI < SE; ++SI) {
5258 Permutation[List[SI]] = Shuffled[SI];
5259 ++NumShuffled;
5260 }
5261 }
5262
5263 assert(NumShuffled + NumPreserved == Traits::RegisterSet::Reg_NUM);
5264
5265 if (Func->isVerbose(IceV_Random)) {
5266 OstreamLocker L(Func->getContext());
5267 Ostream &Str = Func->getContext()->getStrDump();
5268 Str << "Register equivalence classes:\n";
5269 for (auto I : EquivalenceClasses) {
5270 Str << "{";
5271 const RegisterList &List = I.second;
5272 bool First = true;
5273 for (int32_t Register : List) {
5274 if (!First)
5275 Str << " ";
5276 First = false;
5277 Str << getRegName(Register, IceType_i32);
5278 }
5279 Str << "}\n";
5280 }
5281 }
5282 } 5202 }
5283 5203
5284 template <class Machine> 5204 template <class Machine>
5285 void TargetX86Base<Machine>::emit(const ConstantInteger32 *C) const { 5205 void TargetX86Base<Machine>::emit(const ConstantInteger32 *C) const {
5286 if (!BuildDefs::dump()) 5206 if (!BuildDefs::dump())
5287 return; 5207 return;
5288 Ostream &Str = Ctx->getStrEmit(); 5208 Ostream &Str = Ctx->getStrEmit();
5289 Str << getConstantPrefix() << C->getValue(); 5209 Str << getConstantPrefix() << C->getValue();
5290 } 5210 }
5291 5211
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
5342 // the assigned register as this assignment is that start of its use-def 5262 // the assigned register as this assignment is that start of its use-def
5343 // chain. So we add RegNum argument here. 5263 // chain. So we add RegNum argument here.
5344 // Note we use 'lea' instruction instead of 'xor' to avoid affecting 5264 // Note we use 'lea' instruction instead of 'xor' to avoid affecting
5345 // the flags. 5265 // the flags.
5346 Variable *Reg = makeReg(IceType_i32, RegNum); 5266 Variable *Reg = makeReg(IceType_i32, RegNum);
5347 ConstantInteger32 *Integer = llvm::cast<ConstantInteger32>(Immediate); 5267 ConstantInteger32 *Integer = llvm::cast<ConstantInteger32>(Immediate);
5348 uint32_t Value = Integer->getValue(); 5268 uint32_t Value = Integer->getValue();
5349 uint32_t Cookie = Ctx->getRandomizationCookie(); 5269 uint32_t Cookie = Ctx->getRandomizationCookie();
5350 _mov(Reg, Ctx->getConstantInt(IceType_i32, Cookie + Value)); 5270 _mov(Reg, Ctx->getConstantInt(IceType_i32, Cookie + Value));
5351 Constant *Offset = Ctx->getConstantInt(IceType_i32, 0 - Cookie); 5271 Constant *Offset = Ctx->getConstantInt(IceType_i32, 0 - Cookie);
5352 _lea(Reg, 5272 _lea(Reg, Traits::X86OperandMem::create(Func, IceType_i32, Reg, Offset,
5353 OperandX8632Mem::create(Func, IceType_i32, Reg, Offset, nullptr, 0)); 5273 nullptr, 0));
5354 // make sure liveness analysis won't kill this variable, otherwise a 5274 // make sure liveness analysis won't kill this variable, otherwise a
5355 // liveness 5275 // liveness
5356 // assertion will be triggered. 5276 // assertion will be triggered.
5357 _set_dest_nonkillable(); 5277 _set_dest_nonkillable();
5358 if (Immediate->getType() != IceType_i32) { 5278 if (Immediate->getType() != IceType_i32) {
5359 Variable *TruncReg = makeReg(Immediate->getType(), RegNum); 5279 Variable *TruncReg = makeReg(Immediate->getType(), RegNum);
5360 _mov(TruncReg, Reg); 5280 _mov(TruncReg, Reg);
5361 return TruncReg; 5281 return TruncReg;
5362 } 5282 }
5363 return Reg; 5283 return Reg;
(...skipping 12 matching lines...) Expand all
5376 // the assigned register as this assignment is that start of its use-def 5296 // the assigned register as this assignment is that start of its use-def
5377 // chain. So we add RegNum argument here. 5297 // chain. So we add RegNum argument here.
5378 Variable *Reg = makeReg(Immediate->getType(), RegNum); 5298 Variable *Reg = makeReg(Immediate->getType(), RegNum);
5379 IceString Label; 5299 IceString Label;
5380 llvm::raw_string_ostream Label_stream(Label); 5300 llvm::raw_string_ostream Label_stream(Label);
5381 Immediate->emitPoolLabel(Label_stream); 5301 Immediate->emitPoolLabel(Label_stream);
5382 const RelocOffsetT Offset = 0; 5302 const RelocOffsetT Offset = 0;
5383 const bool SuppressMangling = true; 5303 const bool SuppressMangling = true;
5384 Constant *Symbol = 5304 Constant *Symbol =
5385 Ctx->getConstantSym(Offset, Label_stream.str(), SuppressMangling); 5305 Ctx->getConstantSym(Offset, Label_stream.str(), SuppressMangling);
5386 OperandX8632Mem *MemOperand = 5306 typename Traits::X86OperandMem *MemOperand =
5387 OperandX8632Mem::create(Func, Immediate->getType(), nullptr, Symbol); 5307 Traits::X86OperandMem::create(Func, Immediate->getType(), nullptr,
5308 Symbol);
5388 _mov(Reg, MemOperand); 5309 _mov(Reg, MemOperand);
5389 return Reg; 5310 return Reg;
5390 } 5311 }
5391 assert("Unsupported -randomize-pool-immediates option" && false); 5312 assert("Unsupported -randomize-pool-immediates option" && false);
5392 } 5313 }
5393 // the constant Immediate is not eligible for blinding/pooling 5314 // the constant Immediate is not eligible for blinding/pooling
5394 return Immediate; 5315 return Immediate;
5395 } 5316 }
5396 5317
5397 template <class Machine> 5318 template <class Machine>
5398 OperandX8632Mem * 5319 typename TargetX86Base<Machine>::Traits::X86OperandMem *
5399 TargetX86Base<Machine>::randomizeOrPoolImmediate(OperandX8632Mem *MemOperand, 5320 TargetX86Base<Machine>::randomizeOrPoolImmediate(
5400 int32_t RegNum) { 5321 typename Traits::X86OperandMem *MemOperand, int32_t RegNum) {
5401 assert(MemOperand); 5322 assert(MemOperand);
5402 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None || 5323 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_None ||
5403 RandomizationPoolingPaused == true) { 5324 RandomizationPoolingPaused == true) {
5404 // immediates randomization/pooling is turned off 5325 // immediates randomization/pooling is turned off
5405 return MemOperand; 5326 return MemOperand;
5406 } 5327 }
5407 5328
5408 // If this memory operand is already a randommized one, we do 5329 // If this memory operand is already a randommized one, we do
5409 // not randomize it again. 5330 // not randomize it again.
5410 if (MemOperand->getRandomized()) 5331 if (MemOperand->getRandomized())
(...skipping 13 matching lines...) Expand all
5424 // => -cookie[RegTemp, index, shift] 5345 // => -cookie[RegTemp, index, shift]
5425 uint32_t Value = 5346 uint32_t Value =
5426 llvm::dyn_cast<ConstantInteger32>(MemOperand->getOffset()) 5347 llvm::dyn_cast<ConstantInteger32>(MemOperand->getOffset())
5427 ->getValue(); 5348 ->getValue();
5428 uint32_t Cookie = Ctx->getRandomizationCookie(); 5349 uint32_t Cookie = Ctx->getRandomizationCookie();
5429 Constant *Mask1 = Ctx->getConstantInt( 5350 Constant *Mask1 = Ctx->getConstantInt(
5430 MemOperand->getOffset()->getType(), Cookie + Value); 5351 MemOperand->getOffset()->getType(), Cookie + Value);
5431 Constant *Mask2 = 5352 Constant *Mask2 =
5432 Ctx->getConstantInt(MemOperand->getOffset()->getType(), 0 - Cookie); 5353 Ctx->getConstantInt(MemOperand->getOffset()->getType(), 0 - Cookie);
5433 5354
5434 OperandX8632Mem *TempMemOperand = OperandX8632Mem::create( 5355 typename Traits::X86OperandMem *TempMemOperand =
5435 Func, MemOperand->getType(), MemOperand->getBase(), Mask1); 5356 Traits::X86OperandMem::create(Func, MemOperand->getType(),
5357 MemOperand->getBase(), Mask1);
5436 // If we have already assigned a physical register, we must come from 5358 // If we have already assigned a physical register, we must come from
5437 // advancedPhiLowering()=>lowerAssign(). In this case we should reuse 5359 // advancedPhiLowering()=>lowerAssign(). In this case we should reuse
5438 // the assigned register as this assignment is that start of its use-def 5360 // the assigned register as this assignment is that start of its use-def
5439 // chain. So we add RegNum argument here. 5361 // chain. So we add RegNum argument here.
5440 Variable *RegTemp = makeReg(MemOperand->getOffset()->getType(), RegNum); 5362 Variable *RegTemp = makeReg(MemOperand->getOffset()->getType(), RegNum);
5441 _lea(RegTemp, TempMemOperand); 5363 _lea(RegTemp, TempMemOperand);
5442 // As source operand doesn't use the dstreg, we don't need to add 5364 // As source operand doesn't use the dstreg, we don't need to add
5443 // _set_dest_nonkillable(). 5365 // _set_dest_nonkillable().
5444 // But if we use the same Dest Reg, that is, with RegNum 5366 // But if we use the same Dest Reg, that is, with RegNum
5445 // assigned, we should add this _set_dest_nonkillable() 5367 // assigned, we should add this _set_dest_nonkillable()
5446 if (RegNum != Variable::NoRegister) 5368 if (RegNum != Variable::NoRegister)
5447 _set_dest_nonkillable(); 5369 _set_dest_nonkillable();
5448 5370
5449 OperandX8632Mem *NewMemOperand = OperandX8632Mem::create( 5371 typename Traits::X86OperandMem *NewMemOperand =
5450 Func, MemOperand->getType(), RegTemp, Mask2, MemOperand->getIndex(), 5372 Traits::X86OperandMem::create(Func, MemOperand->getType(), RegTemp,
5451 MemOperand->getShift(), MemOperand->getSegmentRegister()); 5373 Mask2, MemOperand->getIndex(),
5374 MemOperand->getShift(),
5375 MemOperand->getSegmentRegister());
5452 5376
5453 // Label this memory operand as randomize, so we won't randomize it 5377 // Label this memory operand as randomize, so we won't randomize it
5454 // again in case we call legalize() mutiple times on this memory 5378 // again in case we call legalize() mutiple times on this memory
5455 // operand. 5379 // operand.
5456 NewMemOperand->setRandomized(true); 5380 NewMemOperand->setRandomized(true);
5457 return NewMemOperand; 5381 return NewMemOperand;
5458 } 5382 }
5459 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_Pool) { 5383 if (Ctx->getFlags().getRandomizeAndPoolImmediatesOption() == RPI_Pool) {
5460 // pool the constant offset 5384 // pool the constant offset
5461 // FROM: 5385 // FROM:
(...skipping 14 matching lines...) Expand all
5476 return MemOperand; 5400 return MemOperand;
5477 Variable *RegTemp = makeReg(IceType_i32); 5401 Variable *RegTemp = makeReg(IceType_i32);
5478 IceString Label; 5402 IceString Label;
5479 llvm::raw_string_ostream Label_stream(Label); 5403 llvm::raw_string_ostream Label_stream(Label);
5480 MemOperand->getOffset()->emitPoolLabel(Label_stream); 5404 MemOperand->getOffset()->emitPoolLabel(Label_stream);
5481 MemOperand->getOffset()->setShouldBePooled(true); 5405 MemOperand->getOffset()->setShouldBePooled(true);
5482 const RelocOffsetT SymOffset = 0; 5406 const RelocOffsetT SymOffset = 0;
5483 bool SuppressMangling = true; 5407 bool SuppressMangling = true;
5484 Constant *Symbol = Ctx->getConstantSym(SymOffset, Label_stream.str(), 5408 Constant *Symbol = Ctx->getConstantSym(SymOffset, Label_stream.str(),
5485 SuppressMangling); 5409 SuppressMangling);
5486 OperandX8632Mem *SymbolOperand = OperandX8632Mem::create( 5410 typename Traits::X86OperandMem *SymbolOperand =
5487 Func, MemOperand->getOffset()->getType(), nullptr, Symbol); 5411 Traits::X86OperandMem::create(
5412 Func, MemOperand->getOffset()->getType(), nullptr, Symbol);
5488 _mov(RegTemp, SymbolOperand); 5413 _mov(RegTemp, SymbolOperand);
5489 // If we have a base variable here, we should add the lea instruction 5414 // If we have a base variable here, we should add the lea instruction
5490 // to add the value of the base variable to RegTemp. If there is no 5415 // to add the value of the base variable to RegTemp. If there is no
5491 // base variable, we won't need this lea instruction. 5416 // base variable, we won't need this lea instruction.
5492 if (MemOperand->getBase()) { 5417 if (MemOperand->getBase()) {
5493 OperandX8632Mem *CalculateOperand = OperandX8632Mem::create( 5418 typename Traits::X86OperandMem *CalculateOperand =
5494 Func, MemOperand->getType(), MemOperand->getBase(), nullptr, 5419 Traits::X86OperandMem::create(
5495 RegTemp, 0, MemOperand->getSegmentRegister()); 5420 Func, MemOperand->getType(), MemOperand->getBase(), nullptr,
5421 RegTemp, 0, MemOperand->getSegmentRegister());
5496 _lea(RegTemp, CalculateOperand); 5422 _lea(RegTemp, CalculateOperand);
5497 _set_dest_nonkillable(); 5423 _set_dest_nonkillable();
5498 } 5424 }
5499 OperandX8632Mem *NewMemOperand = OperandX8632Mem::create( 5425 typename Traits::X86OperandMem *NewMemOperand =
5500 Func, MemOperand->getType(), RegTemp, nullptr, 5426 Traits::X86OperandMem::create(Func, MemOperand->getType(), RegTemp,
5501 MemOperand->getIndex(), MemOperand->getShift(), 5427 nullptr, MemOperand->getIndex(),
5502 MemOperand->getSegmentRegister()); 5428 MemOperand->getShift(),
5429 MemOperand->getSegmentRegister());
5503 return NewMemOperand; 5430 return NewMemOperand;
5504 } 5431 }
5505 assert("Unsupported -randomize-pool-immediates option" && false); 5432 assert("Unsupported -randomize-pool-immediates option" && false);
5506 } 5433 }
5507 } 5434 }
5508 // the offset is not eligible for blinding or pooling, return the original 5435 // the offset is not eligible for blinding or pooling, return the original
5509 // mem operand 5436 // mem operand
5510 return MemOperand; 5437 return MemOperand;
5511 } 5438 }
5512 5439
5513 } // end of namespace X86Internal 5440 } // end of namespace X86Internal
5514 } // end of namespace Ice 5441 } // end of namespace Ice
5515 5442
5516 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H 5443 #endif // SUBZERO_SRC_ICETARGETLOWERINGX86BASEIMPL_H
OLDNEW
« src/IceTargetLoweringX8632Traits.h ('K') | « src/IceTargetLoweringX86Base.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698