OLD | NEW |
1 //===- subzero/src/IceTargetLowering.cpp - Basic lowering implementation --===// | 1 //===- subzero/src/IceTargetLowering.cpp - Basic lowering implementation --===// |
2 // | 2 // |
3 // The Subzero Code Generator | 3 // The Subzero Code Generator |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 /// | 9 /// |
10 /// \file | 10 /// \file |
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
205 for (SizeT TypeIndex = 0; TypeIndex < TypeToRegisterSetSize; | 205 for (SizeT TypeIndex = 0; TypeIndex < TypeToRegisterSetSize; |
206 ++TypeIndex) { | 206 ++TypeIndex) { |
207 if (RClass.empty() || | 207 if (RClass.empty() || |
208 RClass == getRegClassName(static_cast<RegClass>(TypeIndex))) { | 208 RClass == getRegClassName(static_cast<RegClass>(TypeIndex))) { |
209 RegSet[TypeIndex][RegIndex] = TypeToRegisterSet[TypeIndex][RegIndex]; | 209 RegSet[TypeIndex][RegIndex] = TypeToRegisterSet[TypeIndex][RegIndex]; |
210 } | 210 } |
211 } | 211 } |
212 } | 212 } |
213 }; | 213 }; |
214 | 214 |
215 processRegList(Ctx->getFlags().getUseRestrictedRegisters(), UseSet); | 215 processRegList(getFlags().getUseRestrictedRegisters(), UseSet); |
216 processRegList(Ctx->getFlags().getExcludedRegisters(), ExcludeSet); | 216 processRegList(getFlags().getExcludedRegisters(), ExcludeSet); |
217 | 217 |
218 if (!BadRegNames.empty()) { | 218 if (!BadRegNames.empty()) { |
219 std::string Buffer; | 219 std::string Buffer; |
220 llvm::raw_string_ostream StrBuf(Buffer); | 220 llvm::raw_string_ostream StrBuf(Buffer); |
221 StrBuf << "Unrecognized use/exclude registers:"; | 221 StrBuf << "Unrecognized use/exclude registers:"; |
222 for (const auto &RegName : BadRegNames) | 222 for (const auto &RegName : BadRegNames) |
223 StrBuf << " " << RegName; | 223 StrBuf << " " << RegName; |
224 llvm::report_fatal_error(StrBuf.str()); | 224 llvm::report_fatal_error(StrBuf.str()); |
225 } | 225 } |
226 | 226 |
227 // Apply filters. | 227 // Apply filters. |
228 for (size_t TypeIndex = 0; TypeIndex < TypeToRegisterSetSize; ++TypeIndex) { | 228 for (size_t TypeIndex = 0; TypeIndex < TypeToRegisterSetSize; ++TypeIndex) { |
229 SmallBitVector *TypeBitSet = &TypeToRegisterSet[TypeIndex]; | 229 SmallBitVector *TypeBitSet = &TypeToRegisterSet[TypeIndex]; |
230 SmallBitVector *UseBitSet = &UseSet[TypeIndex]; | 230 SmallBitVector *UseBitSet = &UseSet[TypeIndex]; |
231 SmallBitVector *ExcludeBitSet = &ExcludeSet[TypeIndex]; | 231 SmallBitVector *ExcludeBitSet = &ExcludeSet[TypeIndex]; |
232 if (UseBitSet->any()) | 232 if (UseBitSet->any()) |
233 *TypeBitSet = *UseBitSet; | 233 *TypeBitSet = *UseBitSet; |
234 (*TypeBitSet).reset(*ExcludeBitSet); | 234 (*TypeBitSet).reset(*ExcludeBitSet); |
235 } | 235 } |
236 | 236 |
237 // Display filtered register sets, if requested. | 237 // Display filtered register sets, if requested. |
238 if (BuildDefs::dump() && NumRegs && | 238 if (BuildDefs::dump() && NumRegs && |
239 (Ctx->getFlags().getVerbose() & IceV_AvailableRegs)) { | 239 (getFlags().getVerbose() & IceV_AvailableRegs)) { |
240 Ostream &Str = Ctx->getStrDump(); | 240 Ostream &Str = Ctx->getStrDump(); |
241 const std::string Indent = " "; | 241 const std::string Indent = " "; |
242 const std::string IndentTwice = Indent + Indent; | 242 const std::string IndentTwice = Indent + Indent; |
243 Str << "Registers available for register allocation:\n"; | 243 Str << "Registers available for register allocation:\n"; |
244 for (size_t TypeIndex = 0; TypeIndex < TypeToRegisterSetSize; ++TypeIndex) { | 244 for (size_t TypeIndex = 0; TypeIndex < TypeToRegisterSetSize; ++TypeIndex) { |
245 Str << Indent << getRegClassName(static_cast<RegClass>(TypeIndex)) | 245 Str << Indent << getRegClassName(static_cast<RegClass>(TypeIndex)) |
246 << ":\n"; | 246 << ":\n"; |
247 printRegisterSet(Str, TypeToRegisterSet[TypeIndex], getRegName, | 247 printRegisterSet(Str, TypeToRegisterSet[TypeIndex], getRegName, |
248 IndentTwice); | 248 IndentTwice); |
249 } | 249 } |
250 Str << "\n"; | 250 Str << "\n"; |
251 } | 251 } |
252 } | 252 } |
253 | 253 |
254 std::unique_ptr<TargetLowering> | 254 std::unique_ptr<TargetLowering> |
255 TargetLowering::createLowering(TargetArch Target, Cfg *Func) { | 255 TargetLowering::createLowering(TargetArch Target, Cfg *Func) { |
256 switch (Target) { | 256 switch (Target) { |
257 default: | 257 default: |
258 badTargetFatalError(Target); | 258 badTargetFatalError(Target); |
259 #define SUBZERO_TARGET(X) \ | 259 #define SUBZERO_TARGET(X) \ |
260 case TARGET_LOWERING_CLASS_FOR(X): \ | 260 case TARGET_LOWERING_CLASS_FOR(X): \ |
261 return ::X::createTargetLowering(Func); | 261 return ::X::createTargetLowering(Func); |
262 #include "SZTargets.def" | 262 #include "SZTargets.def" |
263 #undef SUBZERO_TARGET | 263 #undef SUBZERO_TARGET |
264 } | 264 } |
265 } | 265 } |
266 | 266 |
267 void TargetLowering::staticInit(GlobalContext *Ctx) { | 267 void TargetLowering::staticInit(GlobalContext *Ctx) { |
268 const TargetArch Target = Ctx->getFlags().getTargetArch(); | 268 const TargetArch Target = getFlags().getTargetArch(); |
269 // Call the specified target's static initializer. | 269 // Call the specified target's static initializer. |
270 switch (Target) { | 270 switch (Target) { |
271 default: | 271 default: |
272 badTargetFatalError(Target); | 272 badTargetFatalError(Target); |
273 #define SUBZERO_TARGET(X) \ | 273 #define SUBZERO_TARGET(X) \ |
274 case TARGET_LOWERING_CLASS_FOR(X): { \ | 274 case TARGET_LOWERING_CLASS_FOR(X): { \ |
275 static bool InitGuard##X = false; \ | 275 static bool InitGuard##X = false; \ |
276 if (InitGuard##X) { \ | 276 if (InitGuard##X) { \ |
277 return; \ | 277 return; \ |
278 } \ | 278 } \ |
279 InitGuard##X = true; \ | 279 InitGuard##X = true; \ |
280 ::X::staticInit(Ctx); \ | 280 ::X::staticInit(Ctx); \ |
281 } break; | 281 } break; |
282 #include "SZTargets.def" | 282 #include "SZTargets.def" |
283 #undef SUBZERO_TARGET | 283 #undef SUBZERO_TARGET |
284 } | 284 } |
285 } | 285 } |
286 | 286 |
287 bool TargetLowering::shouldBePooled(const Constant *C) { | 287 bool TargetLowering::shouldBePooled(const Constant *C) { |
288 const TargetArch Target = GlobalContext::getFlags().getTargetArch(); | 288 const TargetArch Target = getFlags().getTargetArch(); |
289 switch (Target) { | 289 switch (Target) { |
290 default: | 290 default: |
291 return false; | 291 return false; |
292 #define SUBZERO_TARGET(X) \ | 292 #define SUBZERO_TARGET(X) \ |
293 case TARGET_LOWERING_CLASS_FOR(X): \ | 293 case TARGET_LOWERING_CLASS_FOR(X): \ |
294 return ::X::shouldBePooled(C); | 294 return ::X::shouldBePooled(C); |
295 #include "SZTargets.def" | 295 #include "SZTargets.def" |
296 #undef SUBZERO_TARGET | 296 #undef SUBZERO_TARGET |
297 } | 297 } |
298 } | 298 } |
299 | 299 |
300 TargetLowering::SandboxType | 300 TargetLowering::SandboxType |
301 TargetLowering::determineSandboxTypeFromFlags(const ClFlags &Flags) { | 301 TargetLowering::determineSandboxTypeFromFlags(const ClFlags &Flags) { |
302 assert(!Flags.getUseSandboxing() || !Flags.getUseNonsfi()); | 302 assert(!Flags.getUseSandboxing() || !Flags.getUseNonsfi()); |
303 if (Flags.getUseNonsfi()) { | 303 if (Flags.getUseNonsfi()) { |
304 return TargetLowering::ST_Nonsfi; | 304 return TargetLowering::ST_Nonsfi; |
305 } | 305 } |
306 if (Flags.getUseSandboxing()) { | 306 if (Flags.getUseSandboxing()) { |
307 return TargetLowering::ST_NaCl; | 307 return TargetLowering::ST_NaCl; |
308 } | 308 } |
309 return TargetLowering::ST_None; | 309 return TargetLowering::ST_None; |
310 } | 310 } |
311 | 311 |
312 TargetLowering::TargetLowering(Cfg *Func) | 312 TargetLowering::TargetLowering(Cfg *Func) |
313 : Func(Func), Ctx(Func->getContext()), | 313 : Func(Func), Ctx(Func->getContext()), |
314 SandboxingType(determineSandboxTypeFromFlags(Ctx->getFlags())) {} | 314 SandboxingType(determineSandboxTypeFromFlags(getFlags())) {} |
315 | 315 |
316 TargetLowering::AutoBundle::AutoBundle(TargetLowering *Target, | 316 TargetLowering::AutoBundle::AutoBundle(TargetLowering *Target, |
317 InstBundleLock::Option Option) | 317 InstBundleLock::Option Option) |
318 : Target(Target), | 318 : Target(Target), NeedSandboxing(getFlags().getUseSandboxing()) { |
319 NeedSandboxing(Target->Ctx->getFlags().getUseSandboxing()) { | |
320 assert(!Target->AutoBundling); | 319 assert(!Target->AutoBundling); |
321 Target->AutoBundling = true; | 320 Target->AutoBundling = true; |
322 if (NeedSandboxing) { | 321 if (NeedSandboxing) { |
323 Target->_bundle_lock(Option); | 322 Target->_bundle_lock(Option); |
324 } | 323 } |
325 } | 324 } |
326 | 325 |
327 TargetLowering::AutoBundle::~AutoBundle() { | 326 TargetLowering::AutoBundle::~AutoBundle() { |
328 assert(Target->AutoBundling); | 327 assert(Target->AutoBundling); |
329 Target->AutoBundling = false; | 328 Target->AutoBundling = false; |
(...skipping 22 matching lines...) Expand all Loading... |
352 Context.advanceCur(); | 351 Context.advanceCur(); |
353 Context.advanceNext(); | 352 Context.advanceNext(); |
354 } | 353 } |
355 | 354 |
356 void TargetLowering::doNopInsertion(RandomNumberGenerator &RNG) { | 355 void TargetLowering::doNopInsertion(RandomNumberGenerator &RNG) { |
357 Inst *I = Context.getCur(); | 356 Inst *I = Context.getCur(); |
358 bool ShouldSkip = llvm::isa<InstFakeUse>(I) || llvm::isa<InstFakeDef>(I) || | 357 bool ShouldSkip = llvm::isa<InstFakeUse>(I) || llvm::isa<InstFakeDef>(I) || |
359 llvm::isa<InstFakeKill>(I) || I->isRedundantAssign() || | 358 llvm::isa<InstFakeKill>(I) || I->isRedundantAssign() || |
360 I->isDeleted(); | 359 I->isDeleted(); |
361 if (!ShouldSkip) { | 360 if (!ShouldSkip) { |
362 int Probability = Ctx->getFlags().getNopProbabilityAsPercentage(); | 361 int Probability = getFlags().getNopProbabilityAsPercentage(); |
363 for (int I = 0; I < Ctx->getFlags().getMaxNopsPerInstruction(); ++I) { | 362 for (int I = 0; I < getFlags().getMaxNopsPerInstruction(); ++I) { |
364 randomlyInsertNop(Probability / 100.0, RNG); | 363 randomlyInsertNop(Probability / 100.0, RNG); |
365 } | 364 } |
366 } | 365 } |
367 } | 366 } |
368 | 367 |
369 // Lowers a single instruction according to the information in Context, by | 368 // Lowers a single instruction according to the information in Context, by |
370 // checking the Context.Cur instruction kind and calling the appropriate | 369 // checking the Context.Cur instruction kind and calling the appropriate |
371 // lowering method. The lowering method should insert target instructions at | 370 // lowering method. The lowering method should insert target instructions at |
372 // the Cur.Next insertion point, and should not delete the Context.Cur | 371 // the Cur.Next insertion point, and should not delete the Context.Cur |
373 // instruction or advance Context.Cur. | 372 // instruction or advance Context.Cur. |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
485 void TargetLowering::regAlloc(RegAllocKind Kind) { | 484 void TargetLowering::regAlloc(RegAllocKind Kind) { |
486 TimerMarker T(TimerStack::TT_regAlloc, Func); | 485 TimerMarker T(TimerStack::TT_regAlloc, Func); |
487 LinearScan LinearScan(Func); | 486 LinearScan LinearScan(Func); |
488 RegSetMask RegInclude = RegSet_None; | 487 RegSetMask RegInclude = RegSet_None; |
489 RegSetMask RegExclude = RegSet_None; | 488 RegSetMask RegExclude = RegSet_None; |
490 RegInclude |= RegSet_CallerSave; | 489 RegInclude |= RegSet_CallerSave; |
491 RegInclude |= RegSet_CalleeSave; | 490 RegInclude |= RegSet_CalleeSave; |
492 if (hasFramePointer()) | 491 if (hasFramePointer()) |
493 RegExclude |= RegSet_FramePointer; | 492 RegExclude |= RegSet_FramePointer; |
494 SmallBitVector RegMask = getRegisterSet(RegInclude, RegExclude); | 493 SmallBitVector RegMask = getRegisterSet(RegInclude, RegExclude); |
495 bool Repeat = (Kind == RAK_Global && Ctx->getFlags().getRepeatRegAlloc()); | 494 bool Repeat = (Kind == RAK_Global && getFlags().getRepeatRegAlloc()); |
496 do { | 495 do { |
497 LinearScan.init(Kind); | 496 LinearScan.init(Kind); |
498 LinearScan.scan(RegMask, Ctx->getFlags().getRandomizeRegisterAllocation()); | 497 LinearScan.scan(RegMask, getFlags().getRandomizeRegisterAllocation()); |
499 if (!LinearScan.hasEvictions()) | 498 if (!LinearScan.hasEvictions()) |
500 Repeat = false; | 499 Repeat = false; |
501 Kind = RAK_SecondChance; | 500 Kind = RAK_SecondChance; |
502 } while (Repeat); | 501 } while (Repeat); |
503 // TODO(stichnot): Run the register allocator one more time to do stack slot | 502 // TODO(stichnot): Run the register allocator one more time to do stack slot |
504 // coalescing. The idea would be to initialize the Unhandled list with the | 503 // coalescing. The idea would be to initialize the Unhandled list with the |
505 // set of Variables that have no register and a non-empty live range, and | 504 // set of Variables that have no register and a non-empty live range, and |
506 // model an infinite number of registers. Maybe use the register aliasing | 505 // model an infinite number of registers. Maybe use the register aliasing |
507 // mechanism to get better packing of narrower slots. | 506 // mechanism to get better packing of narrower slots. |
508 } | 507 } |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
638 *SpillAreaSizeBytes = LocalsSize[NodeIndex]; | 637 *SpillAreaSizeBytes = LocalsSize[NodeIndex]; |
639 if (!*LocalsSlotsAlignmentBytes) | 638 if (!*LocalsSlotsAlignmentBytes) |
640 *LocalsSlotsAlignmentBytes = Increment; | 639 *LocalsSlotsAlignmentBytes = Increment; |
641 } | 640 } |
642 } else { | 641 } else { |
643 *SpillAreaSizeBytes += Increment; | 642 *SpillAreaSizeBytes += Increment; |
644 } | 643 } |
645 } | 644 } |
646 // For testing legalization of large stack offsets on targets with limited | 645 // For testing legalization of large stack offsets on targets with limited |
647 // offset bits in instruction encodings, add some padding. | 646 // offset bits in instruction encodings, add some padding. |
648 *SpillAreaSizeBytes += Ctx->getFlags().getTestStackExtra(); | 647 *SpillAreaSizeBytes += getFlags().getTestStackExtra(); |
649 } | 648 } |
650 | 649 |
651 void TargetLowering::alignStackSpillAreas(uint32_t SpillAreaStartOffset, | 650 void TargetLowering::alignStackSpillAreas(uint32_t SpillAreaStartOffset, |
652 uint32_t SpillAreaAlignmentBytes, | 651 uint32_t SpillAreaAlignmentBytes, |
653 size_t GlobalsSize, | 652 size_t GlobalsSize, |
654 uint32_t LocalsSlotsAlignmentBytes, | 653 uint32_t LocalsSlotsAlignmentBytes, |
655 uint32_t *SpillAreaPaddingBytes, | 654 uint32_t *SpillAreaPaddingBytes, |
656 uint32_t *LocalsSlotsPaddingBytes) { | 655 uint32_t *LocalsSlotsPaddingBytes) { |
657 if (SpillAreaAlignmentBytes) { | 656 if (SpillAreaAlignmentBytes) { |
658 uint32_t PaddingStart = SpillAreaStartOffset; | 657 uint32_t PaddingStart = SpillAreaStartOffset; |
(...skipping 18 matching lines...) Expand all Loading... |
677 size_t GlobalsAndSubsequentPaddingSize, | 676 size_t GlobalsAndSubsequentPaddingSize, |
678 bool UsesFramePointer) { | 677 bool UsesFramePointer) { |
679 const VariablesMetadata *VMetadata = Func->getVMetadata(); | 678 const VariablesMetadata *VMetadata = Func->getVMetadata(); |
680 // For testing legalization of large stack offsets on targets with limited | 679 // For testing legalization of large stack offsets on targets with limited |
681 // offset bits in instruction encodings, add some padding. This assumes that | 680 // offset bits in instruction encodings, add some padding. This assumes that |
682 // SpillAreaSizeBytes has accounted for the extra test padding. When | 681 // SpillAreaSizeBytes has accounted for the extra test padding. When |
683 // UseFramePointer is true, the offset depends on the padding, not just the | 682 // UseFramePointer is true, the offset depends on the padding, not just the |
684 // SpillAreaSizeBytes. On the other hand, when UseFramePointer is false, the | 683 // SpillAreaSizeBytes. On the other hand, when UseFramePointer is false, the |
685 // offsets depend on the gap between SpillAreaSizeBytes and | 684 // offsets depend on the gap between SpillAreaSizeBytes and |
686 // SpillAreaPaddingBytes, so we don't increment that. | 685 // SpillAreaPaddingBytes, so we don't increment that. |
687 size_t TestPadding = Ctx->getFlags().getTestStackExtra(); | 686 size_t TestPadding = getFlags().getTestStackExtra(); |
688 if (UsesFramePointer) | 687 if (UsesFramePointer) |
689 SpillAreaPaddingBytes += TestPadding; | 688 SpillAreaPaddingBytes += TestPadding; |
690 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes; | 689 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes; |
691 size_t NextStackOffset = SpillAreaPaddingBytes; | 690 size_t NextStackOffset = SpillAreaPaddingBytes; |
692 CfgVector<size_t> LocalsSize(Func->getNumNodes()); | 691 CfgVector<size_t> LocalsSize(Func->getNumNodes()); |
693 const bool SimpleCoalescing = !callsReturnsTwice(); | 692 const bool SimpleCoalescing = !callsReturnsTwice(); |
694 | 693 |
695 for (Variable *Var : SortedSpilledVariables) { | 694 for (Variable *Var : SortedSpilledVariables) { |
696 size_t Increment = typeWidthInBytesOnStack(Var->getType()); | 695 size_t Increment = typeWidthInBytesOnStack(Var->getType()); |
697 if (SimpleCoalescing && VMetadata->isTracked(Var)) { | 696 if (SimpleCoalescing && VMetadata->isTracked(Var)) { |
(...skipping 20 matching lines...) Expand all Loading... |
718 InstCall *TargetLowering::makeHelperCall(RuntimeHelper FuncID, Variable *Dest, | 717 InstCall *TargetLowering::makeHelperCall(RuntimeHelper FuncID, Variable *Dest, |
719 SizeT MaxSrcs) { | 718 SizeT MaxSrcs) { |
720 constexpr bool HasTailCall = false; | 719 constexpr bool HasTailCall = false; |
721 Constant *CallTarget = Ctx->getRuntimeHelperFunc(FuncID); | 720 Constant *CallTarget = Ctx->getRuntimeHelperFunc(FuncID); |
722 InstCall *Call = | 721 InstCall *Call = |
723 InstCall::create(Func, MaxSrcs, Dest, CallTarget, HasTailCall); | 722 InstCall::create(Func, MaxSrcs, Dest, CallTarget, HasTailCall); |
724 return Call; | 723 return Call; |
725 } | 724 } |
726 | 725 |
727 bool TargetLowering::shouldOptimizeMemIntrins() { | 726 bool TargetLowering::shouldOptimizeMemIntrins() { |
728 return Ctx->getFlags().getOptLevel() >= Opt_1 || | 727 return getFlags().getOptLevel() >= Opt_1 || getFlags().getForceMemIntrinOpt(); |
729 Ctx->getFlags().getForceMemIntrinOpt(); | |
730 } | 728 } |
731 | 729 |
732 void TargetLowering::scalarizeArithmetic(InstArithmetic::OpKind Kind, | 730 void TargetLowering::scalarizeArithmetic(InstArithmetic::OpKind Kind, |
733 Variable *Dest, Operand *Src0, | 731 Variable *Dest, Operand *Src0, |
734 Operand *Src1) { | 732 Operand *Src1) { |
735 scalarizeInstruction( | 733 scalarizeInstruction( |
736 Dest, [this, Kind](Variable *Dest, Operand *Src0, Operand *Src1) { | 734 Dest, [this, Kind](Variable *Dest, Operand *Src0, Operand *Src1) { |
737 return Context.insert<InstArithmetic>(Kind, Dest, Src0, Src1); | 735 return Context.insert<InstArithmetic>(Kind, Dest, Src0, Src1); |
738 }, Src0, Src1); | 736 }, Src0, Src1); |
739 } | 737 } |
(...skipping 14 matching lines...) Expand all Loading... |
754 RelocOffsetT Offset = C->getOffset(); | 752 RelocOffsetT Offset = C->getOffset(); |
755 if (Offset) { | 753 if (Offset) { |
756 if (Offset > 0) | 754 if (Offset > 0) |
757 Str << "+"; | 755 Str << "+"; |
758 Str << Offset; | 756 Str << Offset; |
759 } | 757 } |
760 } | 758 } |
761 | 759 |
762 std::unique_ptr<TargetDataLowering> | 760 std::unique_ptr<TargetDataLowering> |
763 TargetDataLowering::createLowering(GlobalContext *Ctx) { | 761 TargetDataLowering::createLowering(GlobalContext *Ctx) { |
764 TargetArch Target = Ctx->getFlags().getTargetArch(); | 762 TargetArch Target = getFlags().getTargetArch(); |
765 switch (Target) { | 763 switch (Target) { |
766 default: | 764 default: |
767 badTargetFatalError(Target); | 765 badTargetFatalError(Target); |
768 #define SUBZERO_TARGET(X) \ | 766 #define SUBZERO_TARGET(X) \ |
769 case TARGET_LOWERING_CLASS_FOR(X): \ | 767 case TARGET_LOWERING_CLASS_FOR(X): \ |
770 return ::X::createTargetDataLowering(Ctx); | 768 return ::X::createTargetDataLowering(Ctx); |
771 #include "SZTargets.def" | 769 #include "SZTargets.def" |
772 #undef SUBZERO_TARGET | 770 #undef SUBZERO_TARGET |
773 } | 771 } |
774 } | 772 } |
(...skipping 23 matching lines...) Expand all Loading... |
798 | 796 |
799 } // end of anonymous namespace | 797 } // end of anonymous namespace |
800 | 798 |
801 void TargetDataLowering::emitGlobal(const VariableDeclaration &Var, | 799 void TargetDataLowering::emitGlobal(const VariableDeclaration &Var, |
802 const std::string &SectionSuffix) { | 800 const std::string &SectionSuffix) { |
803 if (!BuildDefs::dump()) | 801 if (!BuildDefs::dump()) |
804 return; | 802 return; |
805 | 803 |
806 // If external and not initialized, this must be a cross test. Don't generate | 804 // If external and not initialized, this must be a cross test. Don't generate |
807 // a declaration for such cases. | 805 // a declaration for such cases. |
808 const bool IsExternal = | 806 const bool IsExternal = Var.isExternal() || getFlags().getDisableInternal(); |
809 Var.isExternal() || Ctx->getFlags().getDisableInternal(); | |
810 if (IsExternal && !Var.hasInitializer()) | 807 if (IsExternal && !Var.hasInitializer()) |
811 return; | 808 return; |
812 | 809 |
813 Ostream &Str = Ctx->getStrEmit(); | 810 Ostream &Str = Ctx->getStrEmit(); |
814 const bool HasNonzeroInitializer = Var.hasNonzeroInitializer(); | 811 const bool HasNonzeroInitializer = Var.hasNonzeroInitializer(); |
815 const bool IsConstant = Var.getIsConstant(); | 812 const bool IsConstant = Var.getIsConstant(); |
816 const SizeT Size = Var.getNumBytes(); | 813 const SizeT Size = Var.getNumBytes(); |
817 const std::string Name = Var.getName().toString(); | 814 const std::string Name = Var.getName().toString(); |
818 | 815 |
819 Str << "\t.type\t" << Name << ",%object\n"; | 816 Str << "\t.type\t" << Name << ",%object\n"; |
820 | 817 |
821 const bool UseDataSections = Ctx->getFlags().getDataSections(); | 818 const bool UseDataSections = getFlags().getDataSections(); |
822 const bool UseNonsfi = Ctx->getFlags().getUseNonsfi(); | 819 const bool UseNonsfi = getFlags().getUseNonsfi(); |
823 const std::string Suffix = | 820 const std::string Suffix = |
824 dataSectionSuffix(SectionSuffix, Name, UseDataSections); | 821 dataSectionSuffix(SectionSuffix, Name, UseDataSections); |
825 if (IsConstant && UseNonsfi) | 822 if (IsConstant && UseNonsfi) |
826 Str << "\t.section\t.data.rel.ro" << Suffix << ",\"aw\",%progbits\n"; | 823 Str << "\t.section\t.data.rel.ro" << Suffix << ",\"aw\",%progbits\n"; |
827 else if (IsConstant) | 824 else if (IsConstant) |
828 Str << "\t.section\t.rodata" << Suffix << ",\"a\",%progbits\n"; | 825 Str << "\t.section\t.rodata" << Suffix << ",\"a\",%progbits\n"; |
829 else if (HasNonzeroInitializer) | 826 else if (HasNonzeroInitializer) |
830 Str << "\t.section\t.data" << Suffix << ",\"aw\",%progbits\n"; | 827 Str << "\t.section\t.data" << Suffix << ",\"aw\",%progbits\n"; |
831 else | 828 else |
832 Str << "\t.section\t.bss" << Suffix << ",\"aw\",%nobits\n"; | 829 Str << "\t.section\t.bss" << Suffix << ",\"aw\",%nobits\n"; |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
885 // but the .s writer still needs this .zero and cannot simply use the .size | 882 // but the .s writer still needs this .zero and cannot simply use the .size |
886 // to advance offsets. | 883 // to advance offsets. |
887 Str << "\t.zero\t" << Size << "\n"; | 884 Str << "\t.zero\t" << Size << "\n"; |
888 } | 885 } |
889 | 886 |
890 Str << "\t.size\t" << Name << ", " << Size << "\n"; | 887 Str << "\t.size\t" << Name << ", " << Size << "\n"; |
891 } | 888 } |
892 | 889 |
893 std::unique_ptr<TargetHeaderLowering> | 890 std::unique_ptr<TargetHeaderLowering> |
894 TargetHeaderLowering::createLowering(GlobalContext *Ctx) { | 891 TargetHeaderLowering::createLowering(GlobalContext *Ctx) { |
895 TargetArch Target = Ctx->getFlags().getTargetArch(); | 892 TargetArch Target = getFlags().getTargetArch(); |
896 switch (Target) { | 893 switch (Target) { |
897 default: | 894 default: |
898 badTargetFatalError(Target); | 895 badTargetFatalError(Target); |
899 #define SUBZERO_TARGET(X) \ | 896 #define SUBZERO_TARGET(X) \ |
900 case TARGET_LOWERING_CLASS_FOR(X): \ | 897 case TARGET_LOWERING_CLASS_FOR(X): \ |
901 return ::X::createTargetHeaderLowering(Ctx); | 898 return ::X::createTargetHeaderLowering(Ctx); |
902 #include "SZTargets.def" | 899 #include "SZTargets.def" |
903 #undef SUBZERO_TARGET | 900 #undef SUBZERO_TARGET |
904 } | 901 } |
905 } | 902 } |
906 | 903 |
907 TargetHeaderLowering::~TargetHeaderLowering() = default; | 904 TargetHeaderLowering::~TargetHeaderLowering() = default; |
908 | 905 |
909 } // end of namespace Ice | 906 } // end of namespace Ice |
OLD | NEW |