Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(300)

Side by Side Diff: src/IceTargetLoweringX8632.cpp

Issue 877003003: Subzero: Use a "known" version of clang-format. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Add a clang-format blacklist. Fix formatting "errors". Created 5 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/IceTargetLowering.cpp ('k') | src/IceTimerTree.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===// 1 //===- subzero/src/IceTargetLoweringX8632.cpp - x86-32 lowering -----------===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This file implements the TargetLoweringX8632 class, which 10 // This file implements the TargetLoweringX8632 class, which
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
54 const struct TableFcmp_ { 54 const struct TableFcmp_ {
55 uint32_t Default; 55 uint32_t Default;
56 bool SwapScalarOperands; 56 bool SwapScalarOperands;
57 CondX86::BrCond C1, C2; 57 CondX86::BrCond C1, C2;
58 bool SwapVectorOperands; 58 bool SwapVectorOperands;
59 CondX86::CmppsCond Predicate; 59 CondX86::CmppsCond Predicate;
60 } TableFcmp[] = { 60 } TableFcmp[] = {
61 #define X(val, dflt, swapS, C1, C2, swapV, pred) \ 61 #define X(val, dflt, swapS, C1, C2, swapV, pred) \
62 { dflt, swapS, CondX86::C1, CondX86::C2, swapV, CondX86::pred } \ 62 { dflt, swapS, CondX86::C1, CondX86::C2, swapV, CondX86::pred } \
63 , 63 ,
64 FCMPX8632_TABLE 64 FCMPX8632_TABLE
65 #undef X 65 #undef X
66 }; 66 };
67 const size_t TableFcmpSize = llvm::array_lengthof(TableFcmp); 67 const size_t TableFcmpSize = llvm::array_lengthof(TableFcmp);
68 68
69 // The following table summarizes the logic for lowering the icmp instruction 69 // The following table summarizes the logic for lowering the icmp instruction
70 // for i32 and narrower types. Each icmp condition has a clear mapping to an 70 // for i32 and narrower types. Each icmp condition has a clear mapping to an
71 // x86 conditional branch instruction. 71 // x86 conditional branch instruction.
72 72
73 const struct TableIcmp32_ { 73 const struct TableIcmp32_ {
74 CondX86::BrCond Mapping; 74 CondX86::BrCond Mapping;
75 } TableIcmp32[] = { 75 } TableIcmp32[] = {
76 #define X(val, C_32, C1_64, C2_64, C3_64) \ 76 #define X(val, C_32, C1_64, C2_64, C3_64) \
77 { CondX86::C_32 } \ 77 { CondX86::C_32 } \
78 , 78 ,
79 ICMPX8632_TABLE 79 ICMPX8632_TABLE
80 #undef X 80 #undef X
81 }; 81 };
82 const size_t TableIcmp32Size = llvm::array_lengthof(TableIcmp32); 82 const size_t TableIcmp32Size = llvm::array_lengthof(TableIcmp32);
83 83
84 // The following table summarizes the logic for lowering the icmp instruction 84 // The following table summarizes the logic for lowering the icmp instruction
85 // for the i64 type. For Eq and Ne, two separate 32-bit comparisons and 85 // for the i64 type. For Eq and Ne, two separate 32-bit comparisons and
86 // conditional branches are needed. For the other conditions, three separate 86 // conditional branches are needed. For the other conditions, three separate
87 // conditional branches are needed. 87 // conditional branches are needed.
88 const struct TableIcmp64_ { 88 const struct TableIcmp64_ {
89 CondX86::BrCond C1, C2, C3; 89 CondX86::BrCond C1, C2, C3;
90 } TableIcmp64[] = { 90 } TableIcmp64[] = {
91 #define X(val, C_32, C1_64, C2_64, C3_64) \ 91 #define X(val, C_32, C1_64, C2_64, C3_64) \
92 { CondX86::C1_64, CondX86::C2_64, CondX86::C3_64 } \ 92 { CondX86::C1_64, CondX86::C2_64, CondX86::C3_64 } \
93 , 93 ,
94 ICMPX8632_TABLE 94 ICMPX8632_TABLE
95 #undef X 95 #undef X
96 }; 96 };
97 const size_t TableIcmp64Size = llvm::array_lengthof(TableIcmp64); 97 const size_t TableIcmp64Size = llvm::array_lengthof(TableIcmp64);
98 98
99 CondX86::BrCond getIcmp32Mapping(InstIcmp::ICond Cond) { 99 CondX86::BrCond getIcmp32Mapping(InstIcmp::ICond Cond) {
100 size_t Index = static_cast<size_t>(Cond); 100 size_t Index = static_cast<size_t>(Cond);
101 assert(Index < TableIcmp32Size); 101 assert(Index < TableIcmp32Size);
102 return TableIcmp32[Index].Mapping; 102 return TableIcmp32[Index].Mapping;
103 } 103 }
104 104
105 const struct TableTypeX8632Attributes_ { 105 const struct TableTypeX8632Attributes_ {
106 Type InVectorElementType; 106 Type InVectorElementType;
107 } TableTypeX8632Attributes[] = { 107 } TableTypeX8632Attributes[] = {
108 #define X(tag, elementty, cvt, sdss, pack, width, fld) \ 108 #define X(tag, elementty, cvt, sdss, pack, width, fld) \
109 { elementty } \ 109 { elementty } \
110 , 110 ,
111 ICETYPEX8632_TABLE 111 ICETYPEX8632_TABLE
112 #undef X 112 #undef X
113 }; 113 };
114 const size_t TableTypeX8632AttributesSize = 114 const size_t TableTypeX8632AttributesSize =
115 llvm::array_lengthof(TableTypeX8632Attributes); 115 llvm::array_lengthof(TableTypeX8632Attributes);
116 116
117 // Return the type which the elements of the vector have in the X86 117 // Return the type which the elements of the vector have in the X86
118 // representation of the vector. 118 // representation of the vector.
119 Type getInVectorElementType(Type Ty) { 119 Type getInVectorElementType(Type Ty) {
120 assert(isVectorType(Ty)); 120 assert(isVectorType(Ty));
121 size_t Index = static_cast<size_t>(Ty); 121 size_t Index = static_cast<size_t>(Ty);
122 (void)Index; 122 (void)Index;
123 assert(Index < TableTypeX8632AttributesSize); 123 assert(Index < TableTypeX8632AttributesSize);
(...skipping 24 matching lines...) Expand all
148 } 148 }
149 149
150 // Value is in bytes. Return Value adjusted to the next highest multiple 150 // Value is in bytes. Return Value adjusted to the next highest multiple
151 // of the stack alignment. 151 // of the stack alignment.
152 uint32_t applyStackAlignment(uint32_t Value) { 152 uint32_t applyStackAlignment(uint32_t Value) {
153 return applyAlignment(Value, X86_STACK_ALIGNMENT_BYTES); 153 return applyAlignment(Value, X86_STACK_ALIGNMENT_BYTES);
154 } 154 }
155 155
156 // Instruction set options 156 // Instruction set options
157 namespace cl = ::llvm::cl; 157 namespace cl = ::llvm::cl;
158 cl::opt<TargetX8632::X86InstructionSet> 158 cl::opt<TargetX8632::X86InstructionSet> CLInstructionSet(
159 CLInstructionSet("mattr", cl::desc("X86 target attributes"), 159 "mattr", cl::desc("X86 target attributes"), cl::init(TargetX8632::SSE2),
160 cl::init(TargetX8632::SSE2), 160 cl::values(clEnumValN(TargetX8632::SSE2, "sse2",
161 cl::values(clEnumValN(TargetX8632::SSE2, "sse2", 161 "Enable SSE2 instructions (default)"),
162 "Enable SSE2 instructions (default)"), 162 clEnumValN(TargetX8632::SSE4_1, "sse4.1",
163 clEnumValN(TargetX8632::SSE4_1, "sse4.1", 163 "Enable SSE 4.1 instructions"),
164 "Enable SSE 4.1 instructions"), 164 clEnumValEnd));
165 clEnumValEnd));
166 165
167 // In some cases, there are x-macros tables for both high-level and 166 // In some cases, there are x-macros tables for both high-level and
168 // low-level instructions/operands that use the same enum key value. 167 // low-level instructions/operands that use the same enum key value.
169 // The tables are kept separate to maintain a proper separation 168 // The tables are kept separate to maintain a proper separation
170 // between abstraction layers. There is a risk that the tables could 169 // between abstraction layers. There is a risk that the tables could
171 // get out of sync if enum values are reordered or if entries are 170 // get out of sync if enum values are reordered or if entries are
172 // added or deleted. The following dummy namespaces use 171 // added or deleted. The following dummy namespaces use
173 // static_asserts to ensure everything is kept in sync. 172 // static_asserts to ensure everything is kept in sync.
174 173
175 // Validate the enum values in FCMPX8632_TABLE. 174 // Validate the enum values in FCMPX8632_TABLE.
(...skipping 271 matching lines...) Expand 10 before | Expand all | Expand 10 after
447 if (InstX8632Br *Br = llvm::dyn_cast<InstX8632Br>(I)) { 446 if (InstX8632Br *Br = llvm::dyn_cast<InstX8632Br>(I)) {
448 return Br->optimizeBranch(NextNode); 447 return Br->optimizeBranch(NextNode);
449 } 448 }
450 return false; 449 return false;
451 } 450 }
452 451
453 IceString TargetX8632::RegNames[] = { 452 IceString TargetX8632::RegNames[] = {
454 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \ 453 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \
455 frameptr, isI8, isInt, isFP) \ 454 frameptr, isI8, isInt, isFP) \
456 name, 455 name,
457 REGX8632_TABLE 456 REGX8632_TABLE
458 #undef X 457 #undef X
459 }; 458 };
460 459
461 Variable *TargetX8632::getPhysicalRegister(SizeT RegNum, Type Ty) { 460 Variable *TargetX8632::getPhysicalRegister(SizeT RegNum, Type Ty) {
462 if (Ty == IceType_void) 461 if (Ty == IceType_void)
463 Ty = IceType_i32; 462 Ty = IceType_i32;
464 if (PhysicalRegisters[Ty].empty()) 463 if (PhysicalRegisters[Ty].empty())
465 PhysicalRegisters[Ty].resize(RegX8632::Reg_NUM); 464 PhysicalRegisters[Ty].resize(RegX8632::Reg_NUM);
466 assert(RegNum < PhysicalRegisters[Ty].size()); 465 assert(RegNum < PhysicalRegisters[Ty].size());
467 Variable *Reg = PhysicalRegisters[Ty][RegNum]; 466 Variable *Reg = PhysicalRegisters[Ty][RegNum];
(...skipping 10 matching lines...) Expand all
478 } 477 }
479 return Reg; 478 return Reg;
480 } 479 }
481 480
482 IceString TargetX8632::getRegName(SizeT RegNum, Type Ty) const { 481 IceString TargetX8632::getRegName(SizeT RegNum, Type Ty) const {
483 assert(RegNum < RegX8632::Reg_NUM); 482 assert(RegNum < RegX8632::Reg_NUM);
484 static IceString RegNames8[] = { 483 static IceString RegNames8[] = {
485 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \ 484 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \
486 frameptr, isI8, isInt, isFP) \ 485 frameptr, isI8, isInt, isFP) \
487 name8, 486 name8,
488 REGX8632_TABLE 487 REGX8632_TABLE
489 #undef X 488 #undef X
490 }; 489 };
491 static IceString RegNames16[] = { 490 static IceString RegNames16[] = {
492 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \ 491 #define X(val, encode, name, name16, name8, scratch, preserved, stackptr, \
493 frameptr, isI8, isInt, isFP) \ 492 frameptr, isI8, isInt, isFP) \
494 name16, 493 name16,
495 REGX8632_TABLE 494 REGX8632_TABLE
496 #undef X 495 #undef X
497 }; 496 };
498 switch (Ty) { 497 switch (Ty) {
499 case IceType_i1: 498 case IceType_i1:
500 case IceType_i8: 499 case IceType_i8:
501 return RegNames8[RegNum]; 500 return RegNames8[RegNum];
502 case IceType_i16: 501 case IceType_i16:
503 return RegNames16[RegNum]; 502 return RegNames16[RegNum];
504 default: 503 default:
505 return RegNames[RegNum]; 504 return RegNames[RegNum];
(...skipping 2249 matching lines...) Expand 10 before | Expand all | Expand 10 after
2755 // 2754 //
2756 // insertelement into index 2 (result is stored in T): 2755 // insertelement into index 2 (result is stored in T):
2757 // T := SourceVectRM 2756 // T := SourceVectRM
2758 // ElementR := ElementR[0, 0] T[0, 3] 2757 // ElementR := ElementR[0, 0] T[0, 3]
2759 // T := T[0, 1] ElementR[0, 3] 2758 // T := T[0, 1] ElementR[0, 3]
2760 // 2759 //
2761 // insertelement into index 3 (result is stored in T): 2760 // insertelement into index 3 (result is stored in T):
2762 // T := SourceVectRM 2761 // T := SourceVectRM
2763 // ElementR := ElementR[0, 0] T[0, 2] 2762 // ElementR := ElementR[0, 0] T[0, 2]
2764 // T := T[0, 1] ElementR[3, 0] 2763 // T := T[0, 1] ElementR[3, 0]
2765 const unsigned char Mask1[3] = { 0, 192, 128 }; 2764 const unsigned char Mask1[3] = {0, 192, 128};
2766 const unsigned char Mask2[3] = { 227, 196, 52 }; 2765 const unsigned char Mask2[3] = {227, 196, 52};
2767 2766
2768 Constant *Mask1Constant = Ctx->getConstantInt32(Mask1[Index - 1]); 2767 Constant *Mask1Constant = Ctx->getConstantInt32(Mask1[Index - 1]);
2769 Constant *Mask2Constant = Ctx->getConstantInt32(Mask2[Index - 1]); 2768 Constant *Mask2Constant = Ctx->getConstantInt32(Mask2[Index - 1]);
2770 2769
2771 if (Index == 1) { 2770 if (Index == 1) {
2772 _shufps(ElementR, SourceVectRM, Mask1Constant); 2771 _shufps(ElementR, SourceVectRM, Mask1Constant);
2773 _shufps(ElementR, SourceVectRM, Mask2Constant); 2772 _shufps(ElementR, SourceVectRM, Mask2Constant);
2774 _movp(Inst->getDest(), ElementR); 2773 _movp(Inst->getDest(), ElementR);
2775 } else { 2774 } else {
2776 Variable *T = makeReg(Ty); 2775 Variable *T = makeReg(Ty);
(...skipping 22 matching lines...) Expand all
2799 Variable *T = makeReg(Ty); 2798 Variable *T = makeReg(Ty);
2800 _movp(T, Slot); 2799 _movp(T, Slot);
2801 _movp(Inst->getDest(), T); 2800 _movp(Inst->getDest(), T);
2802 } 2801 }
2803 } 2802 }
2804 2803
2805 void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { 2804 void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
2806 switch (Instr->getIntrinsicInfo().ID) { 2805 switch (Instr->getIntrinsicInfo().ID) {
2807 case Intrinsics::AtomicCmpxchg: { 2806 case Intrinsics::AtomicCmpxchg: {
2808 if (!Intrinsics::VerifyMemoryOrder( 2807 if (!Intrinsics::VerifyMemoryOrder(
2809 llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) { 2808 llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) {
2810 Func->setError("Unexpected memory ordering (success) for AtomicCmpxchg"); 2809 Func->setError("Unexpected memory ordering (success) for AtomicCmpxchg");
2811 return; 2810 return;
2812 } 2811 }
2813 if (!Intrinsics::VerifyMemoryOrder( 2812 if (!Intrinsics::VerifyMemoryOrder(
2814 llvm::cast<ConstantInteger32>(Instr->getArg(4))->getValue())) { 2813 llvm::cast<ConstantInteger32>(Instr->getArg(4))->getValue())) {
2815 Func->setError("Unexpected memory ordering (failure) for AtomicCmpxchg"); 2814 Func->setError("Unexpected memory ordering (failure) for AtomicCmpxchg");
2816 return; 2815 return;
2817 } 2816 }
2818 Variable *DestPrev = Instr->getDest(); 2817 Variable *DestPrev = Instr->getDest();
2819 Operand *PtrToMem = Instr->getArg(0); 2818 Operand *PtrToMem = Instr->getArg(0);
2820 Operand *Expected = Instr->getArg(1); 2819 Operand *Expected = Instr->getArg(1);
2821 Operand *Desired = Instr->getArg(2); 2820 Operand *Desired = Instr->getArg(2);
2822 if (tryOptimizedCmpxchgCmpBr(DestPrev, PtrToMem, Expected, Desired)) 2821 if (tryOptimizedCmpxchgCmpBr(DestPrev, PtrToMem, Expected, Desired))
2823 return; 2822 return;
2824 lowerAtomicCmpxchg(DestPrev, PtrToMem, Expected, Desired); 2823 lowerAtomicCmpxchg(DestPrev, PtrToMem, Expected, Desired);
2825 return; 2824 return;
2826 } 2825 }
2827 case Intrinsics::AtomicFence: 2826 case Intrinsics::AtomicFence:
2828 if (!Intrinsics::VerifyMemoryOrder( 2827 if (!Intrinsics::VerifyMemoryOrder(
2829 llvm::cast<ConstantInteger32>(Instr->getArg(0))->getValue())) { 2828 llvm::cast<ConstantInteger32>(Instr->getArg(0))->getValue())) {
2830 Func->setError("Unexpected memory ordering for AtomicFence"); 2829 Func->setError("Unexpected memory ordering for AtomicFence");
2831 return; 2830 return;
2832 } 2831 }
2833 _mfence(); 2832 _mfence();
2834 return; 2833 return;
2835 case Intrinsics::AtomicFenceAll: 2834 case Intrinsics::AtomicFenceAll:
2836 // NOTE: FenceAll should prevent and load/store from being moved 2835 // NOTE: FenceAll should prevent and load/store from being moved
2837 // across the fence (both atomic and non-atomic). The InstX8632Mfence 2836 // across the fence (both atomic and non-atomic). The InstX8632Mfence
2838 // instruction is currently marked coarsely as "HasSideEffects". 2837 // instruction is currently marked coarsely as "HasSideEffects".
2839 _mfence(); 2838 _mfence();
(...skipping 25 matching lines...) Expand all
2865 return; 2864 return;
2866 } 2865 }
2867 // The PNaCl ABI requires the byte size to be a compile-time constant. 2866 // The PNaCl ABI requires the byte size to be a compile-time constant.
2868 Func->setError("AtomicIsLockFree byte size should be compile-time const"); 2867 Func->setError("AtomicIsLockFree byte size should be compile-time const");
2869 return; 2868 return;
2870 } 2869 }
2871 case Intrinsics::AtomicLoad: { 2870 case Intrinsics::AtomicLoad: {
2872 // We require the memory address to be naturally aligned. 2871 // We require the memory address to be naturally aligned.
2873 // Given that is the case, then normal loads are atomic. 2872 // Given that is the case, then normal loads are atomic.
2874 if (!Intrinsics::VerifyMemoryOrder( 2873 if (!Intrinsics::VerifyMemoryOrder(
2875 llvm::cast<ConstantInteger32>(Instr->getArg(1))->getValue())) { 2874 llvm::cast<ConstantInteger32>(Instr->getArg(1))->getValue())) {
2876 Func->setError("Unexpected memory ordering for AtomicLoad"); 2875 Func->setError("Unexpected memory ordering for AtomicLoad");
2877 return; 2876 return;
2878 } 2877 }
2879 Variable *Dest = Instr->getDest(); 2878 Variable *Dest = Instr->getDest();
2880 if (Dest->getType() == IceType_i64) { 2879 if (Dest->getType() == IceType_i64) {
2881 // Follow what GCC does and use a movq instead of what lowerLoad() 2880 // Follow what GCC does and use a movq instead of what lowerLoad()
2882 // normally does (split the load into two). 2881 // normally does (split the load into two).
2883 // Thus, this skips load/arithmetic op folding. Load/arithmetic folding 2882 // Thus, this skips load/arithmetic op folding. Load/arithmetic folding
2884 // can't happen anyway, since this is x86-32 and integer arithmetic only 2883 // can't happen anyway, since this is x86-32 and integer arithmetic only
2885 // happens on 32-bit quantities. 2884 // happens on 32-bit quantities.
(...skipping 12 matching lines...) Expand all
2898 lowerLoad(Load); 2897 lowerLoad(Load);
2899 // Make sure the atomic load isn't elided when unused, by adding a FakeUse. 2898 // Make sure the atomic load isn't elided when unused, by adding a FakeUse.
2900 // Since lowerLoad may fuse the load w/ an arithmetic instruction, 2899 // Since lowerLoad may fuse the load w/ an arithmetic instruction,
2901 // insert the FakeUse on the last-inserted instruction's dest. 2900 // insert the FakeUse on the last-inserted instruction's dest.
2902 Context.insert( 2901 Context.insert(
2903 InstFakeUse::create(Func, Context.getLastInserted()->getDest())); 2902 InstFakeUse::create(Func, Context.getLastInserted()->getDest()));
2904 return; 2903 return;
2905 } 2904 }
2906 case Intrinsics::AtomicRMW: 2905 case Intrinsics::AtomicRMW:
2907 if (!Intrinsics::VerifyMemoryOrder( 2906 if (!Intrinsics::VerifyMemoryOrder(
2908 llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) { 2907 llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) {
2909 Func->setError("Unexpected memory ordering for AtomicRMW"); 2908 Func->setError("Unexpected memory ordering for AtomicRMW");
2910 return; 2909 return;
2911 } 2910 }
2912 lowerAtomicRMW(Instr->getDest(), 2911 lowerAtomicRMW(Instr->getDest(),
2913 static_cast<uint32_t>(llvm::cast<ConstantInteger32>( 2912 static_cast<uint32_t>(llvm::cast<ConstantInteger32>(
2914 Instr->getArg(0))->getValue()), 2913 Instr->getArg(0))->getValue()),
2915 Instr->getArg(1), Instr->getArg(2)); 2914 Instr->getArg(1), Instr->getArg(2));
2916 return; 2915 return;
2917 case Intrinsics::AtomicStore: { 2916 case Intrinsics::AtomicStore: {
2918 if (!Intrinsics::VerifyMemoryOrder( 2917 if (!Intrinsics::VerifyMemoryOrder(
2919 llvm::cast<ConstantInteger32>(Instr->getArg(2))->getValue())) { 2918 llvm::cast<ConstantInteger32>(Instr->getArg(2))->getValue())) {
2920 Func->setError("Unexpected memory ordering for AtomicStore"); 2919 Func->setError("Unexpected memory ordering for AtomicStore");
2921 return; 2920 return;
2922 } 2921 }
2923 // We require the memory address to be naturally aligned. 2922 // We require the memory address to be naturally aligned.
2924 // Given that is the case, then normal stores are atomic. 2923 // Given that is the case, then normal stores are atomic.
2925 // Add a fence after the store to make it visible. 2924 // Add a fence after the store to make it visible.
2926 Operand *Value = Instr->getArg(0); 2925 Operand *Value = Instr->getArg(0);
2927 Operand *Ptr = Instr->getArg(1); 2926 Operand *Ptr = Instr->getArg(1);
2928 if (Value->getType() == IceType_i64) { 2927 if (Value->getType() == IceType_i64) {
2929 // Use a movq instead of what lowerStore() normally does 2928 // Use a movq instead of what lowerStore() normally does
(...skipping 1640 matching lines...) Expand 10 before | Expand all | Expand 10 after
4570 return; 4569 return;
4571 4570
4572 Ostream &Str = Ctx->getStrEmit(); 4571 Ostream &Str = Ctx->getStrEmit();
4573 4572
4574 const VariableDeclaration::InitializerListType &Initializers = 4573 const VariableDeclaration::InitializerListType &Initializers =
4575 Var.getInitializers(); 4574 Var.getInitializers();
4576 4575
4577 // If external and not initialized, this must be a cross test. 4576 // If external and not initialized, this must be a cross test.
4578 // Don't generate a declaration for such cases. 4577 // Don't generate a declaration for such cases.
4579 bool IsExternal = Var.isExternal() || Ctx->getFlags().DisableInternal; 4578 bool IsExternal = Var.isExternal() || Ctx->getFlags().DisableInternal;
4580 if (IsExternal && !Var.hasInitializer()) return; 4579 if (IsExternal && !Var.hasInitializer())
4580 return;
4581 4581
4582 bool HasNonzeroInitializer = Var.hasNonzeroInitializer(); 4582 bool HasNonzeroInitializer = Var.hasNonzeroInitializer();
4583 bool IsConstant = Var.getIsConstant(); 4583 bool IsConstant = Var.getIsConstant();
4584 uint32_t Align = Var.getAlignment(); 4584 uint32_t Align = Var.getAlignment();
4585 SizeT Size = Var.getNumBytes(); 4585 SizeT Size = Var.getNumBytes();
4586 IceString MangledName = Var.mangleName(Ctx); 4586 IceString MangledName = Var.mangleName(Ctx);
4587 IceString SectionSuffix = ""; 4587 IceString SectionSuffix = "";
4588 if (Ctx->getFlags().DataSections) 4588 if (Ctx->getFlags().DataSections)
4589 SectionSuffix = "." + MangledName; 4589 SectionSuffix = "." + MangledName;
4590 4590
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
4711 Writer->writeConstantPool<ConstantFloat>(IceType_f32); 4711 Writer->writeConstantPool<ConstantFloat>(IceType_f32);
4712 Writer->writeConstantPool<ConstantDouble>(IceType_f64); 4712 Writer->writeConstantPool<ConstantDouble>(IceType_f64);
4713 } else { 4713 } else {
4714 OstreamLocker L(Ctx); 4714 OstreamLocker L(Ctx);
4715 emitConstantPool<PoolTypeConverter<float>>(Ctx); 4715 emitConstantPool<PoolTypeConverter<float>>(Ctx);
4716 emitConstantPool<PoolTypeConverter<double>>(Ctx); 4716 emitConstantPool<PoolTypeConverter<double>>(Ctx);
4717 } 4717 }
4718 } 4718 }
4719 4719
4720 } // end of namespace Ice 4720 } // end of namespace Ice
OLDNEW
« no previous file with comments | « src/IceTargetLowering.cpp ('k') | src/IceTimerTree.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698