Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(499)

Side by Side Diff: src/IceTargetLowering.cpp

Issue 1341423002: Reflow comments to use the full width. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Fix spelling and rebase Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/IceTargetLowering.h ('k') | src/IceTargetLoweringARM32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 //===- subzero/src/IceTargetLowering.cpp - Basic lowering implementation --===// 1 //===- subzero/src/IceTargetLowering.cpp - Basic lowering implementation --===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 /// 9 ///
10 /// \file 10 /// \file
11 /// This file implements the skeleton of the TargetLowering class, 11 /// This file implements the skeleton of the TargetLowering class, specifically
12 /// specifically invoking the appropriate lowering method for a given 12 /// invoking the appropriate lowering method for a given instruction kind and
13 /// instruction kind and driving global register allocation. It also 13 /// driving global register allocation. It also implements the non-deleted
14 /// implements the non-deleted instruction iteration in 14 /// instruction iteration in LoweringContext.
15 /// LoweringContext.
16 /// 15 ///
17 //===----------------------------------------------------------------------===// 16 //===----------------------------------------------------------------------===//
18 17
19 #include "IceTargetLowering.h" 18 #include "IceTargetLowering.h"
20 19
21 #include "IceAssemblerARM32.h" 20 #include "IceAssemblerARM32.h"
22 #include "IceAssemblerMIPS32.h" 21 #include "IceAssemblerMIPS32.h"
23 #include "IceAssemblerX8632.h" 22 #include "IceAssemblerX8632.h"
24 #include "IceAssemblerX8664.h" 23 #include "IceAssemblerX8664.h"
25 #include "IceCfg.h" // setError() 24 #include "IceCfg.h" // setError()
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
110 llvm::isa<InstFakeKill>(I) || I->isRedundantAssign() || 109 llvm::isa<InstFakeKill>(I) || I->isRedundantAssign() ||
111 I->isDeleted(); 110 I->isDeleted();
112 if (!ShouldSkip) { 111 if (!ShouldSkip) {
113 int Probability = Ctx->getFlags().getNopProbabilityAsPercentage(); 112 int Probability = Ctx->getFlags().getNopProbabilityAsPercentage();
114 for (int I = 0; I < Ctx->getFlags().getMaxNopsPerInstruction(); ++I) { 113 for (int I = 0; I < Ctx->getFlags().getMaxNopsPerInstruction(); ++I) {
115 randomlyInsertNop(Probability / 100.0, RNG); 114 randomlyInsertNop(Probability / 100.0, RNG);
116 } 115 }
117 } 116 }
118 } 117 }
119 118
120 // Lowers a single instruction according to the information in 119 // Lowers a single instruction according to the information in Context, by
121 // Context, by checking the Context.Cur instruction kind and calling 120 // checking the Context.Cur instruction kind and calling the appropriate
122 // the appropriate lowering method. The lowering method should insert 121 // lowering method. The lowering method should insert target instructions at
123 // target instructions at the Cur.Next insertion point, and should not 122 // the Cur.Next insertion point, and should not delete the Context.Cur
124 // delete the Context.Cur instruction or advance Context.Cur. 123 // instruction or advance Context.Cur.
125 // 124 //
126 // The lowering method may look ahead in the instruction stream as 125 // The lowering method may look ahead in the instruction stream as desired, and
127 // desired, and lower additional instructions in conjunction with the 126 // lower additional instructions in conjunction with the current one, for
128 // current one, for example fusing a compare and branch. If it does, 127 // example fusing a compare and branch. If it does, it should advance
129 // it should advance Context.Cur to point to the next non-deleted 128 // Context.Cur to point to the next non-deleted instruction to process, and it
130 // instruction to process, and it should delete any additional 129 // should delete any additional instructions it consumes.
131 // instructions it consumes.
132 void TargetLowering::lower() { 130 void TargetLowering::lower() {
133 assert(!Context.atEnd()); 131 assert(!Context.atEnd());
134 Inst *Inst = Context.getCur(); 132 Inst *Inst = Context.getCur();
135 Inst->deleteIfDead(); 133 Inst->deleteIfDead();
136 if (!Inst->isDeleted() && !llvm::isa<InstFakeDef>(Inst) && 134 if (!Inst->isDeleted() && !llvm::isa<InstFakeDef>(Inst) &&
137 !llvm::isa<InstFakeUse>(Inst)) { 135 !llvm::isa<InstFakeUse>(Inst)) {
138 // Mark the current instruction as deleted before lowering, 136 // Mark the current instruction as deleted before lowering, otherwise the
139 // otherwise the Dest variable will likely get marked as non-SSA. 137 // Dest variable will likely get marked as non-SSA. See
140 // See Variable::setDefinition(). However, just pass-through 138 // Variable::setDefinition(). However, just pass-through FakeDef and
141 // FakeDef and FakeUse instructions that might have been inserted 139 // FakeUse instructions that might have been inserted prior to lowering.
142 // prior to lowering.
143 Inst->setDeleted(); 140 Inst->setDeleted();
144 switch (Inst->getKind()) { 141 switch (Inst->getKind()) {
145 case Inst::Alloca: 142 case Inst::Alloca:
146 lowerAlloca(llvm::cast<InstAlloca>(Inst)); 143 lowerAlloca(llvm::cast<InstAlloca>(Inst));
147 break; 144 break;
148 case Inst::Arithmetic: 145 case Inst::Arithmetic:
149 lowerArithmetic(llvm::cast<InstArithmetic>(Inst)); 146 lowerArithmetic(llvm::cast<InstArithmetic>(Inst));
150 break; 147 break;
151 case Inst::Assign: 148 case Inst::Assign:
152 lowerAssign(llvm::cast<InstAssign>(Inst)); 149 lowerAssign(llvm::cast<InstAssign>(Inst));
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
224 assert(&*Next == Instr); 221 assert(&*Next == Instr);
225 Context.setCur(Next); 222 Context.setCur(Next);
226 lower(); 223 lower();
227 } 224 }
228 225
229 void TargetLowering::lowerOther(const Inst *Instr) { 226 void TargetLowering::lowerOther(const Inst *Instr) {
230 (void)Instr; 227 (void)Instr;
231 Func->setError("Can't lower unsupported instruction type"); 228 Func->setError("Can't lower unsupported instruction type");
232 } 229 }
233 230
234 // Drives register allocation, allowing all physical registers (except 231 // Drives register allocation, allowing all physical registers (except perhaps
235 // perhaps for the frame pointer) to be allocated. This set of 232 // for the frame pointer) to be allocated. This set of registers could
236 // registers could potentially be parameterized if we want to restrict 233 // potentially be parameterized if we want to restrict registers e.g. for
237 // registers e.g. for performance testing. 234 // performance testing.
238 void TargetLowering::regAlloc(RegAllocKind Kind) { 235 void TargetLowering::regAlloc(RegAllocKind Kind) {
239 TimerMarker T(TimerStack::TT_regAlloc, Func); 236 TimerMarker T(TimerStack::TT_regAlloc, Func);
240 LinearScan LinearScan(Func); 237 LinearScan LinearScan(Func);
241 RegSetMask RegInclude = RegSet_None; 238 RegSetMask RegInclude = RegSet_None;
242 RegSetMask RegExclude = RegSet_None; 239 RegSetMask RegExclude = RegSet_None;
243 RegInclude |= RegSet_CallerSave; 240 RegInclude |= RegSet_CallerSave;
244 RegInclude |= RegSet_CalleeSave; 241 RegInclude |= RegSet_CalleeSave;
245 if (hasFramePointer()) 242 if (hasFramePointer())
246 RegExclude |= RegSet_FramePointer; 243 RegExclude |= RegSet_FramePointer;
247 LinearScan.init(Kind); 244 LinearScan.init(Kind);
248 llvm::SmallBitVector RegMask = getRegisterSet(RegInclude, RegExclude); 245 llvm::SmallBitVector RegMask = getRegisterSet(RegInclude, RegExclude);
249 LinearScan.scan(RegMask, Ctx->getFlags().shouldRandomizeRegAlloc()); 246 LinearScan.scan(RegMask, Ctx->getFlags().shouldRandomizeRegAlloc());
250 } 247 }
251 248
252 void TargetLowering::inferTwoAddress() { 249 void TargetLowering::inferTwoAddress() {
253 // Find two-address non-SSA instructions where Dest==Src0, and set 250 // Find two-address non-SSA instructions where Dest==Src0, and set the
254 // the DestNonKillable flag to keep liveness analysis consistent. 251 // DestNonKillable flag to keep liveness analysis consistent.
255 for (auto Inst = Context.getCur(), E = Context.getNext(); Inst != E; ++Inst) { 252 for (auto Inst = Context.getCur(), E = Context.getNext(); Inst != E; ++Inst) {
256 if (Inst->isDeleted()) 253 if (Inst->isDeleted())
257 continue; 254 continue;
258 if (Variable *Dest = Inst->getDest()) { 255 if (Variable *Dest = Inst->getDest()) {
259 // TODO(stichnot): We may need to consider all source 256 // TODO(stichnot): We may need to consider all source operands, not just
260 // operands, not just the first one, if using 3-address 257 // the first one, if using 3-address instructions.
261 // instructions.
262 if (Inst->getSrcSize() > 0 && Inst->getSrc(0) == Dest) 258 if (Inst->getSrcSize() > 0 && Inst->getSrc(0) == Dest)
263 Inst->setDestNonKillable(); 259 Inst->setDestNonKillable();
264 } 260 }
265 } 261 }
266 } 262 }
267 263
268 void TargetLowering::sortVarsByAlignment(VarList &Dest, 264 void TargetLowering::sortVarsByAlignment(VarList &Dest,
269 const VarList &Source) const { 265 const VarList &Source) const {
270 Dest = Source; 266 Dest = Source;
271 // Instead of std::sort, we could do a bucket sort with log2(alignment) 267 // Instead of std::sort, we could do a bucket sort with log2(alignment) as
272 // as the buckets, if performance is an issue. 268 // the buckets, if performance is an issue.
273 std::sort(Dest.begin(), Dest.end(), 269 std::sort(Dest.begin(), Dest.end(),
274 [this](const Variable *V1, const Variable *V2) { 270 [this](const Variable *V1, const Variable *V2) {
275 return typeWidthInBytesOnStack(V1->getType()) > 271 return typeWidthInBytesOnStack(V1->getType()) >
276 typeWidthInBytesOnStack(V2->getType()); 272 typeWidthInBytesOnStack(V2->getType());
277 }); 273 });
278 } 274 }
279 275
280 void TargetLowering::getVarStackSlotParams( 276 void TargetLowering::getVarStackSlotParams(
281 VarList &SortedSpilledVariables, llvm::SmallBitVector &RegsUsed, 277 VarList &SortedSpilledVariables, llvm::SmallBitVector &RegsUsed,
282 size_t *GlobalsSize, size_t *SpillAreaSizeBytes, 278 size_t *GlobalsSize, size_t *SpillAreaSizeBytes,
283 uint32_t *SpillAreaAlignmentBytes, uint32_t *LocalsSlotsAlignmentBytes, 279 uint32_t *SpillAreaAlignmentBytes, uint32_t *LocalsSlotsAlignmentBytes,
284 std::function<bool(Variable *)> TargetVarHook) { 280 std::function<bool(Variable *)> TargetVarHook) {
285 const VariablesMetadata *VMetadata = Func->getVMetadata(); 281 const VariablesMetadata *VMetadata = Func->getVMetadata();
286 llvm::BitVector IsVarReferenced(Func->getNumVariables()); 282 llvm::BitVector IsVarReferenced(Func->getNumVariables());
287 for (CfgNode *Node : Func->getNodes()) { 283 for (CfgNode *Node : Func->getNodes()) {
288 for (Inst &Inst : Node->getInsts()) { 284 for (Inst &Inst : Node->getInsts()) {
289 if (Inst.isDeleted()) 285 if (Inst.isDeleted())
290 continue; 286 continue;
291 if (const Variable *Var = Inst.getDest()) 287 if (const Variable *Var = Inst.getDest())
292 IsVarReferenced[Var->getIndex()] = true; 288 IsVarReferenced[Var->getIndex()] = true;
293 FOREACH_VAR_IN_INST(Var, Inst) { 289 FOREACH_VAR_IN_INST(Var, Inst) {
294 IsVarReferenced[Var->getIndex()] = true; 290 IsVarReferenced[Var->getIndex()] = true;
295 } 291 }
296 } 292 }
297 } 293 }
298 294
299 // If SimpleCoalescing is false, each variable without a register 295 // If SimpleCoalescing is false, each variable without a register gets its
300 // gets its own unique stack slot, which leads to large stack 296 // own unique stack slot, which leads to large stack frames. If
301 // frames. If SimpleCoalescing is true, then each "global" variable 297 // SimpleCoalescing is true, then each "global" variable without a register
302 // without a register gets its own slot, but "local" variable slots 298 // gets its own slot, but "local" variable slots are reused across basic
303 // are reused across basic blocks. E.g., if A and B are local to 299 // blocks. E.g., if A and B are local to block 1 and C is local to block 2,
304 // block 1 and C is local to block 2, then C may share a slot with A or B. 300 // then C may share a slot with A or B.
305 // 301 //
306 // We cannot coalesce stack slots if this function calls a "returns twice" 302 // We cannot coalesce stack slots if this function calls a "returns twice"
307 // function. In that case, basic blocks may be revisited, and variables 303 // function. In that case, basic blocks may be revisited, and variables local
308 // local to those basic blocks are actually live until after the 304 // to those basic blocks are actually live until after the called function
309 // called function returns a second time. 305 // returns a second time.
310 const bool SimpleCoalescing = !callsReturnsTwice(); 306 const bool SimpleCoalescing = !callsReturnsTwice();
311 307
312 std::vector<size_t> LocalsSize(Func->getNumNodes()); 308 std::vector<size_t> LocalsSize(Func->getNumNodes());
313 const VarList &Variables = Func->getVariables(); 309 const VarList &Variables = Func->getVariables();
314 VarList SpilledVariables; 310 VarList SpilledVariables;
315 for (Variable *Var : Variables) { 311 for (Variable *Var : Variables) {
316 if (Var->hasReg()) { 312 if (Var->hasReg()) {
317 RegsUsed[Var->getRegNum()] = true; 313 RegsUsed[Var->getRegNum()] = true;
318 continue; 314 continue;
319 } 315 }
320 // An argument either does not need a stack slot (if passed in a 316 // An argument either does not need a stack slot (if passed in a register)
321 // register) or already has one (if passed on the stack). 317 // or already has one (if passed on the stack).
322 if (Var->getIsArg()) 318 if (Var->getIsArg())
323 continue; 319 continue;
324 // An unreferenced variable doesn't need a stack slot. 320 // An unreferenced variable doesn't need a stack slot.
325 if (!IsVarReferenced[Var->getIndex()]) 321 if (!IsVarReferenced[Var->getIndex()])
326 continue; 322 continue;
327 // Check a target-specific variable (it may end up sharing stack slots) 323 // Check a target-specific variable (it may end up sharing stack slots) and
328 // and not need accounting here. 324 // not need accounting here.
329 if (TargetVarHook(Var)) 325 if (TargetVarHook(Var))
330 continue; 326 continue;
331 SpilledVariables.push_back(Var); 327 SpilledVariables.push_back(Var);
332 } 328 }
333 329
334 SortedSpilledVariables.reserve(SpilledVariables.size()); 330 SortedSpilledVariables.reserve(SpilledVariables.size());
335 sortVarsByAlignment(SortedSpilledVariables, SpilledVariables); 331 sortVarsByAlignment(SortedSpilledVariables, SpilledVariables);
336 332
337 for (Variable *Var : SortedSpilledVariables) { 333 for (Variable *Var : SortedSpilledVariables) {
338 size_t Increment = typeWidthInBytesOnStack(Var->getType()); 334 size_t Increment = typeWidthInBytesOnStack(Var->getType());
339 // We have sorted by alignment, so the first variable we encounter that 335 // We have sorted by alignment, so the first variable we encounter that is
340 // is located in each area determines the max alignment for the area. 336 // located in each area determines the max alignment for the area.
341 if (!*SpillAreaAlignmentBytes) 337 if (!*SpillAreaAlignmentBytes)
342 *SpillAreaAlignmentBytes = Increment; 338 *SpillAreaAlignmentBytes = Increment;
343 if (SimpleCoalescing && VMetadata->isTracked(Var)) { 339 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
344 if (VMetadata->isMultiBlock(Var)) { 340 if (VMetadata->isMultiBlock(Var)) {
345 *GlobalsSize += Increment; 341 *GlobalsSize += Increment;
346 } else { 342 } else {
347 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex(); 343 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
348 LocalsSize[NodeIndex] += Increment; 344 LocalsSize[NodeIndex] += Increment;
349 if (LocalsSize[NodeIndex] > *SpillAreaSizeBytes) 345 if (LocalsSize[NodeIndex] > *SpillAreaSizeBytes)
350 *SpillAreaSizeBytes = LocalsSize[NodeIndex]; 346 *SpillAreaSizeBytes = LocalsSize[NodeIndex];
(...skipping 15 matching lines...) Expand all
366 uint32_t LocalsSlotsAlignmentBytes, 362 uint32_t LocalsSlotsAlignmentBytes,
367 uint32_t *SpillAreaPaddingBytes, 363 uint32_t *SpillAreaPaddingBytes,
368 uint32_t *LocalsSlotsPaddingBytes) { 364 uint32_t *LocalsSlotsPaddingBytes) {
369 if (SpillAreaAlignmentBytes) { 365 if (SpillAreaAlignmentBytes) {
370 uint32_t PaddingStart = SpillAreaStartOffset; 366 uint32_t PaddingStart = SpillAreaStartOffset;
371 uint32_t SpillAreaStart = 367 uint32_t SpillAreaStart =
372 Utils::applyAlignment(PaddingStart, SpillAreaAlignmentBytes); 368 Utils::applyAlignment(PaddingStart, SpillAreaAlignmentBytes);
373 *SpillAreaPaddingBytes = SpillAreaStart - PaddingStart; 369 *SpillAreaPaddingBytes = SpillAreaStart - PaddingStart;
374 } 370 }
375 371
376 // If there are separate globals and locals areas, make sure the 372 // If there are separate globals and locals areas, make sure the locals area
377 // locals area is aligned by padding the end of the globals area. 373 // is aligned by padding the end of the globals area.
378 if (LocalsSlotsAlignmentBytes) { 374 if (LocalsSlotsAlignmentBytes) {
379 uint32_t GlobalsAndSubsequentPaddingSize = GlobalsSize; 375 uint32_t GlobalsAndSubsequentPaddingSize = GlobalsSize;
380 GlobalsAndSubsequentPaddingSize = 376 GlobalsAndSubsequentPaddingSize =
381 Utils::applyAlignment(GlobalsSize, LocalsSlotsAlignmentBytes); 377 Utils::applyAlignment(GlobalsSize, LocalsSlotsAlignmentBytes);
382 *LocalsSlotsPaddingBytes = GlobalsAndSubsequentPaddingSize - GlobalsSize; 378 *LocalsSlotsPaddingBytes = GlobalsAndSubsequentPaddingSize - GlobalsSize;
383 } 379 }
384 } 380 }
385 381
386 void TargetLowering::assignVarStackSlots(VarList &SortedSpilledVariables, 382 void TargetLowering::assignVarStackSlots(VarList &SortedSpilledVariables,
387 size_t SpillAreaPaddingBytes, 383 size_t SpillAreaPaddingBytes,
388 size_t SpillAreaSizeBytes, 384 size_t SpillAreaSizeBytes,
389 size_t GlobalsAndSubsequentPaddingSize, 385 size_t GlobalsAndSubsequentPaddingSize,
390 bool UsesFramePointer) { 386 bool UsesFramePointer) {
391 const VariablesMetadata *VMetadata = Func->getVMetadata(); 387 const VariablesMetadata *VMetadata = Func->getVMetadata();
392 // For testing legalization of large stack offsets on targets with limited 388 // For testing legalization of large stack offsets on targets with limited
393 // offset bits in instruction encodings, add some padding. This assumes that 389 // offset bits in instruction encodings, add some padding. This assumes that
394 // SpillAreaSizeBytes has accounted for the extra test padding. 390 // SpillAreaSizeBytes has accounted for the extra test padding. When
395 // When UseFramePointer is true, the offset depends on the padding, 391 // UseFramePointer is true, the offset depends on the padding, not just the
396 // not just the SpillAreaSizeBytes. On the other hand, when UseFramePointer 392 // SpillAreaSizeBytes. On the other hand, when UseFramePointer is false, the
397 // is false, the offsets depend on the gap between SpillAreaSizeBytes 393 // offsets depend on the gap between SpillAreaSizeBytes and
398 // and SpillAreaPaddingBytes, so we don't increment that. 394 // SpillAreaPaddingBytes, so we don't increment that.
399 size_t TestPadding = Ctx->getFlags().getTestStackExtra(); 395 size_t TestPadding = Ctx->getFlags().getTestStackExtra();
400 if (UsesFramePointer) 396 if (UsesFramePointer)
401 SpillAreaPaddingBytes += TestPadding; 397 SpillAreaPaddingBytes += TestPadding;
402 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes; 398 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes;
403 size_t NextStackOffset = SpillAreaPaddingBytes; 399 size_t NextStackOffset = SpillAreaPaddingBytes;
404 std::vector<size_t> LocalsSize(Func->getNumNodes()); 400 std::vector<size_t> LocalsSize(Func->getNumNodes());
405 const bool SimpleCoalescing = !callsReturnsTwice(); 401 const bool SimpleCoalescing = !callsReturnsTwice();
406 402
407 for (Variable *Var : SortedSpilledVariables) { 403 for (Variable *Var : SortedSpilledVariables) {
408 size_t Increment = typeWidthInBytesOnStack(Var->getType()); 404 size_t Increment = typeWidthInBytesOnStack(Var->getType());
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
499 return "." + SectionSuffix; 495 return "." + SectionSuffix;
500 } 496 }
501 497
502 } // end of anonymous namespace 498 } // end of anonymous namespace
503 499
504 void TargetDataLowering::emitGlobal(const VariableDeclaration &Var, 500 void TargetDataLowering::emitGlobal(const VariableDeclaration &Var,
505 const IceString &SectionSuffix) { 501 const IceString &SectionSuffix) {
506 if (!BuildDefs::dump()) 502 if (!BuildDefs::dump())
507 return; 503 return;
508 504
509 // If external and not initialized, this must be a cross test. 505 // If external and not initialized, this must be a cross test. Don't generate
510 // Don't generate a declaration for such cases. 506 // a declaration for such cases.
511 const bool IsExternal = 507 const bool IsExternal =
512 Var.isExternal() || Ctx->getFlags().getDisableInternal(); 508 Var.isExternal() || Ctx->getFlags().getDisableInternal();
513 if (IsExternal && !Var.hasInitializer()) 509 if (IsExternal && !Var.hasInitializer())
514 return; 510 return;
515 511
516 Ostream &Str = Ctx->getStrEmit(); 512 Ostream &Str = Ctx->getStrEmit();
517 const bool HasNonzeroInitializer = Var.hasNonzeroInitializer(); 513 const bool HasNonzeroInitializer = Var.hasNonzeroInitializer();
518 const bool IsConstant = Var.getIsConstant(); 514 const bool IsConstant = Var.getIsConstant();
519 const SizeT Size = Var.getNumBytes(); 515 const SizeT Size = Var.getNumBytes();
520 const IceString MangledName = Var.mangleName(Ctx); 516 const IceString MangledName = Var.mangleName(Ctx);
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
570 Str << " + " << Offset; 566 Str << " + " << Offset;
571 else 567 else
572 Str << " - " << -Offset; 568 Str << " - " << -Offset;
573 } 569 }
574 Str << "\n"; 570 Str << "\n";
575 break; 571 break;
576 } 572 }
577 } 573 }
578 } 574 }
579 } else { 575 } else {
580 // NOTE: for non-constant zero initializers, this is BSS (no bits), 576 // NOTE: for non-constant zero initializers, this is BSS (no bits), so an
581 // so an ELF writer would not write to the file, and only track 577 // ELF writer would not write to the file, and only track virtual offsets,
582 // virtual offsets, but the .s writer still needs this .zero and 578 // but the .s writer still needs this .zero and cannot simply use the .size
583 // cannot simply use the .size to advance offsets. 579 // to advance offsets.
584 Str << "\t.zero\t" << Size << "\n"; 580 Str << "\t.zero\t" << Size << "\n";
585 } 581 }
586 582
587 Str << "\t.size\t" << MangledName << ", " << Size << "\n"; 583 Str << "\t.size\t" << MangledName << ", " << Size << "\n";
588 } 584 }
589 585
590 std::unique_ptr<TargetHeaderLowering> 586 std::unique_ptr<TargetHeaderLowering>
591 TargetHeaderLowering::createLowering(GlobalContext *Ctx) { 587 TargetHeaderLowering::createLowering(GlobalContext *Ctx) {
592 TargetArch Target = Ctx->getFlags().getTargetArch(); 588 TargetArch Target = Ctx->getFlags().getTargetArch();
593 #define SUBZERO_TARGET(X) \ 589 #define SUBZERO_TARGET(X) \
594 if (Target == Target_##X) \ 590 if (Target == Target_##X) \
595 return TargetHeader##X::create(Ctx); 591 return TargetHeader##X::create(Ctx);
596 #include "llvm/Config/SZTargets.def" 592 #include "llvm/Config/SZTargets.def"
597 593
598 llvm::report_fatal_error("Unsupported target header lowering"); 594 llvm::report_fatal_error("Unsupported target header lowering");
599 } 595 }
600 596
601 TargetHeaderLowering::~TargetHeaderLowering() = default; 597 TargetHeaderLowering::~TargetHeaderLowering() = default;
602 598
603 } // end of namespace Ice 599 } // end of namespace Ice
OLDNEW
« no previous file with comments | « src/IceTargetLowering.h ('k') | src/IceTargetLoweringARM32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698