OLD | NEW |
---|---|
(Empty) | |
1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===// | |
2 // | |
3 // The Subzero Code Generator | |
4 // | |
5 // This file is distributed under the University of Illinois Open Source | |
6 // License. See LICENSE.TXT for details. | |
7 // | |
8 //===----------------------------------------------------------------------===// | |
9 // | |
10 // This file implements the TargetLoweringARM32 class, which | |
Jim Stichnoth
2015/04/17 19:16:01
Reformat to 80-col lines? (this is probably an ar
jvoung (off chromium)
2015/04/21 17:05:30
Done.
| |
11 // consists almost entirely of the lowering sequence for each | |
12 // high-level instruction. | |
13 // | |
14 //===----------------------------------------------------------------------===// | |
15 | |
16 #include "llvm/Support/MathExtras.h" | |
17 | |
18 #include "IceCfg.h" | |
19 #include "IceCfgNode.h" | |
20 #include "IceClFlags.h" | |
21 #include "IceDefs.h" | |
22 #include "IceELFObjectWriter.h" | |
23 #include "IceGlobalInits.h" | |
24 #include "IceInstARM32.h" | |
25 #include "IceLiveness.h" | |
26 #include "IceOperand.h" | |
27 #include "IceRegistersARM32.h" | |
28 #include "IceTargetLoweringARM32.def" | |
29 #include "IceTargetLoweringARM32.h" | |
30 #include "IceUtils.h" | |
31 | |
32 namespace Ice { | |
33 | |
34 TargetARM32::TargetARM32(Cfg *Func) | |
35 : TargetLowering(Func), UsesFramePointer(false), NextLabelNumber(0) { | |
36 // TODO: Don't initialize IntegerRegisters and friends every time. | |
37 // Instead, initialize in some sort of static initializer for the | |
38 // class. | |
39 llvm::SmallBitVector IntegerRegisters(RegARM32::Reg_NUM); | |
40 llvm::SmallBitVector FloatRegisters(RegARM32::Reg_NUM); | |
41 llvm::SmallBitVector VectorRegisters(RegARM32::Reg_NUM); | |
42 llvm::SmallBitVector InvalidRegisters(RegARM32::Reg_NUM); | |
43 ScratchRegs.resize(RegARM32::Reg_NUM); | |
44 #define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \ | |
45 isFP) \ | |
46 IntegerRegisters[RegARM32::val] = isInt; \ | |
47 FloatRegisters[RegARM32::val] = isFP; \ | |
48 VectorRegisters[RegARM32::val] = isFP; \ | |
49 ScratchRegs[RegARM32::val] = scratch; | |
50 REGARM32_TABLE; | |
51 #undef X | |
52 TypeToRegisterSet[IceType_void] = InvalidRegisters; | |
53 TypeToRegisterSet[IceType_i1] = IntegerRegisters; | |
54 TypeToRegisterSet[IceType_i8] = IntegerRegisters; | |
55 TypeToRegisterSet[IceType_i16] = IntegerRegisters; | |
56 TypeToRegisterSet[IceType_i32] = IntegerRegisters; | |
57 TypeToRegisterSet[IceType_i64] = IntegerRegisters; | |
58 TypeToRegisterSet[IceType_f32] = FloatRegisters; | |
59 TypeToRegisterSet[IceType_f64] = FloatRegisters; | |
60 TypeToRegisterSet[IceType_v4i1] = VectorRegisters; | |
61 TypeToRegisterSet[IceType_v8i1] = VectorRegisters; | |
62 TypeToRegisterSet[IceType_v16i1] = VectorRegisters; | |
63 TypeToRegisterSet[IceType_v16i8] = VectorRegisters; | |
64 TypeToRegisterSet[IceType_v8i16] = VectorRegisters; | |
65 TypeToRegisterSet[IceType_v4i32] = VectorRegisters; | |
66 TypeToRegisterSet[IceType_v4f32] = VectorRegisters; | |
67 } | |
68 | |
69 void TargetARM32::translateO2() { | |
70 TimerMarker T(TimerStack::TT_O2, Func); | |
71 | |
72 // TODO: share passes with X86? | |
Jim Stichnoth
2015/04/17 19:16:01
Definitely. This would presumably be done as part
jvoung (off chromium)
2015/04/21 17:05:30
Done.
| |
73 | |
74 if (!Ctx->getFlags().getPhiEdgeSplit()) { | |
75 // Lower Phi instructions. | |
76 Func->placePhiLoads(); | |
77 if (Func->hasError()) | |
78 return; | |
79 Func->placePhiStores(); | |
80 if (Func->hasError()) | |
81 return; | |
82 Func->deletePhis(); | |
83 if (Func->hasError()) | |
84 return; | |
85 Func->dump("After Phi lowering"); | |
86 } | |
87 | |
88 // Address mode optimization. | |
89 Func->getVMetadata()->init(VMK_SingleDefs); | |
90 Func->doAddressOpt(); | |
91 | |
92 // Argument lowering | |
93 Func->doArgLowering(); | |
94 | |
95 // Target lowering. This requires liveness analysis for some parts | |
96 // of the lowering decisions, such as compare/branch fusing. If | |
97 // non-lightweight liveness analysis is used, the instructions need | |
98 // to be renumbered first. TODO: This renumbering should only be | |
99 // necessary if we're actually calculating live intervals, which we | |
100 // only do for register allocation. | |
101 Func->renumberInstructions(); | |
102 if (Func->hasError()) | |
103 return; | |
104 | |
105 // TODO: It should be sufficient to use the fastest liveness | |
106 // calculation, i.e. livenessLightweight(). However, for some | |
107 // reason that slows down the rest of the translation. Investigate. | |
108 Func->liveness(Liveness_Basic); | |
109 if (Func->hasError()) | |
110 return; | |
111 Func->dump("After ARM32 address mode opt"); | |
112 | |
113 Func->genCode(); | |
114 if (Func->hasError()) | |
115 return; | |
116 Func->dump("After ARM32 codegen"); | |
117 | |
118 // Register allocation. This requires instruction renumbering and | |
119 // full liveness analysis. | |
120 Func->renumberInstructions(); | |
121 if (Func->hasError()) | |
122 return; | |
123 Func->liveness(Liveness_Intervals); | |
124 if (Func->hasError()) | |
125 return; | |
126 // Validate the live range computations. The expensive validation | |
127 // call is deliberately only made when assertions are enabled. | |
128 assert(Func->validateLiveness()); | |
129 // The post-codegen dump is done here, after liveness analysis and | |
130 // associated cleanup, to make the dump cleaner and more useful. | |
131 Func->dump("After initial ARM32 codegen"); | |
132 Func->getVMetadata()->init(VMK_All); | |
133 regAlloc(RAK_Global); | |
134 if (Func->hasError()) | |
135 return; | |
136 Func->dump("After linear scan regalloc"); | |
137 | |
138 if (Ctx->getFlags().getPhiEdgeSplit()) { | |
139 Func->advancedPhiLowering(); | |
140 Func->dump("After advanced Phi lowering"); | |
141 } | |
142 | |
143 // Stack frame mapping. | |
144 Func->genFrame(); | |
145 if (Func->hasError()) | |
146 return; | |
147 Func->dump("After stack frame mapping"); | |
148 | |
149 Func->contractEmptyNodes(); | |
150 Func->reorderNodes(); | |
151 | |
152 // Branch optimization. This needs to be done just before code | |
153 // emission. In particular, no transformations that insert or | |
154 // reorder CfgNodes should be done after branch optimization. We go | |
155 // ahead and do it before nop insertion to reduce the amount of work | |
156 // needed for searching for opportunities. | |
157 Func->doBranchOpt(); | |
158 Func->dump("After branch optimization"); | |
159 | |
160 // Nop insertion | |
161 if (Ctx->getFlags().shouldDoNopInsertion()) { | |
162 Func->doNopInsertion(); | |
163 } | |
164 } | |
165 | |
166 void TargetARM32::translateOm1() { | |
167 TimerMarker T(TimerStack::TT_Om1, Func); | |
168 | |
169 // TODO: share passes with X86? | |
170 | |
171 Func->placePhiLoads(); | |
172 if (Func->hasError()) | |
173 return; | |
174 Func->placePhiStores(); | |
175 if (Func->hasError()) | |
176 return; | |
177 Func->deletePhis(); | |
178 if (Func->hasError()) | |
179 return; | |
180 Func->dump("After Phi lowering"); | |
181 | |
182 Func->doArgLowering(); | |
183 | |
184 Func->genCode(); | |
185 if (Func->hasError()) | |
186 return; | |
187 Func->dump("After initial ARM32 codegen"); | |
188 | |
189 regAlloc(RAK_InfOnly); | |
190 if (Func->hasError()) | |
191 return; | |
192 Func->dump("After regalloc of infinite-weight variables"); | |
193 | |
194 Func->genFrame(); | |
195 if (Func->hasError()) | |
196 return; | |
197 Func->dump("After stack frame mapping"); | |
198 | |
199 // Nop insertion | |
200 if (Ctx->getFlags().shouldDoNopInsertion()) { | |
201 Func->doNopInsertion(); | |
202 } | |
203 } | |
204 | |
205 bool TargetARM32::doBranchOpt(Inst *I, const CfgNode *NextNode) { | |
206 (void)I; | |
207 (void)NextNode; | |
208 llvm::report_fatal_error("Not yet implemented"); | |
209 } | |
210 | |
211 IceString TargetARM32::RegNames[] = { | |
212 #define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \ | |
213 isFP) \ | |
214 name, | |
215 REGARM32_TABLE | |
216 #undef X | |
217 }; | |
218 | |
219 IceString TargetARM32::getRegName(SizeT RegNum, Type Ty) const { | |
220 assert(RegNum < RegARM32::Reg_NUM); | |
221 (void)Ty; | |
222 return RegNames[RegNum]; | |
223 } | |
224 | |
225 Variable *TargetARM32::getPhysicalRegister(SizeT RegNum, Type Ty) { | |
226 if (Ty == IceType_void) | |
227 Ty = IceType_i32; | |
228 if (PhysicalRegisters[Ty].empty()) | |
229 PhysicalRegisters[Ty].resize(RegARM32::Reg_NUM); | |
230 assert(RegNum < PhysicalRegisters[Ty].size()); | |
231 Variable *Reg = PhysicalRegisters[Ty][RegNum]; | |
232 if (Reg == nullptr) { | |
233 Reg = Func->makeVariable(Ty); | |
234 Reg->setRegNum(RegNum); | |
235 PhysicalRegisters[Ty][RegNum] = Reg; | |
236 // Specially mark SP as an "argument" so that it is considered | |
237 // live upon function entry. | |
238 if (RegNum == RegARM32::Reg_sp) { | |
239 Func->addImplicitArg(Reg); | |
240 Reg->setIgnoreLiveness(); | |
241 } | |
242 } | |
243 return Reg; | |
244 } | |
245 | |
246 void TargetARM32::emitVariable(const Variable *Var) const { | |
247 Ostream &Str = Ctx->getStrEmit(); | |
248 (void)Var; | |
249 (void)Str; | |
250 llvm::report_fatal_error("emitVariable: Not yet implemented"); | |
251 } | |
252 | |
253 void TargetARM32::lowerArguments() { | |
254 llvm::report_fatal_error("lowerArguments: Not yet implemented"); | |
255 } | |
256 | |
257 Type TargetARM32::stackSlotType() { return IceType_i32; } | |
258 | |
259 void TargetARM32::addProlog(CfgNode *Node) { | |
260 (void)Node; | |
261 llvm::report_fatal_error("addProlog: Not yet implemented"); | |
262 } | |
263 | |
264 void TargetARM32::addEpilog(CfgNode *Node) { | |
265 (void)Node; | |
266 llvm::report_fatal_error("addEpilog: Not yet implemented"); | |
267 } | |
268 | |
269 llvm::SmallBitVector TargetARM32::getRegisterSet(RegSetMask Include, | |
270 RegSetMask Exclude) const { | |
271 llvm::SmallBitVector Registers(RegARM32::Reg_NUM); | |
272 | |
273 #define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \ | |
274 isFP) \ | |
275 if (scratch && (Include & RegSet_CallerSave)) \ | |
276 Registers[RegARM32::val] = true; \ | |
277 if (preserved && (Include & RegSet_CalleeSave)) \ | |
278 Registers[RegARM32::val] = true; \ | |
279 if (stackptr && (Include & RegSet_StackPointer)) \ | |
280 Registers[RegARM32::val] = true; \ | |
281 if (frameptr && (Include & RegSet_FramePointer)) \ | |
282 Registers[RegARM32::val] = true; \ | |
283 if (scratch && (Exclude & RegSet_CallerSave)) \ | |
284 Registers[RegARM32::val] = false; \ | |
285 if (preserved && (Exclude & RegSet_CalleeSave)) \ | |
286 Registers[RegARM32::val] = false; \ | |
287 if (stackptr && (Exclude & RegSet_StackPointer)) \ | |
288 Registers[RegARM32::val] = false; \ | |
289 if (frameptr && (Exclude & RegSet_FramePointer)) \ | |
290 Registers[RegARM32::val] = false; | |
291 | |
292 REGARM32_TABLE | |
293 | |
294 #undef X | |
295 | |
296 return Registers; | |
297 } | |
298 | |
299 void TargetARM32::lowerAlloca(const InstAlloca *Inst) { | |
300 UsesFramePointer = true; | |
301 // Conservatively require the stack to be aligned. Some stack | |
302 // adjustment operations implemented below assume that the stack is | |
303 // aligned before the alloca. All the alloca code ensures that the | |
304 // stack alignment is preserved after the alloca. The stack alignment | |
305 // restriction can be relaxed in some cases. | |
306 NeedsStackAlignment = true; | |
307 (void)Inst; | |
308 llvm::report_fatal_error("Not yet implemented"); | |
309 } | |
310 | |
311 void TargetARM32::lowerArithmetic(const InstArithmetic *Inst) { | |
312 switch (Inst->getOp()) { | |
313 case InstArithmetic::_num: | |
314 llvm_unreachable("Unknown arithmetic operator"); | |
315 break; | |
316 case InstArithmetic::Add: | |
317 llvm::report_fatal_error("Not yet implemented"); | |
318 break; | |
319 case InstArithmetic::And: | |
320 llvm::report_fatal_error("Not yet implemented"); | |
321 break; | |
322 case InstArithmetic::Or: | |
323 llvm::report_fatal_error("Not yet implemented"); | |
324 break; | |
325 case InstArithmetic::Xor: | |
326 llvm::report_fatal_error("Not yet implemented"); | |
327 break; | |
328 case InstArithmetic::Sub: | |
329 llvm::report_fatal_error("Not yet implemented"); | |
330 break; | |
331 case InstArithmetic::Mul: | |
332 llvm::report_fatal_error("Not yet implemented"); | |
333 break; | |
334 case InstArithmetic::Shl: | |
335 llvm::report_fatal_error("Not yet implemented"); | |
336 break; | |
337 case InstArithmetic::Lshr: | |
338 llvm::report_fatal_error("Not yet implemented"); | |
339 break; | |
340 case InstArithmetic::Ashr: | |
341 llvm::report_fatal_error("Not yet implemented"); | |
342 break; | |
343 case InstArithmetic::Udiv: | |
344 llvm::report_fatal_error("Not yet implemented"); | |
345 break; | |
346 case InstArithmetic::Sdiv: | |
347 llvm::report_fatal_error("Not yet implemented"); | |
348 break; | |
349 case InstArithmetic::Urem: | |
350 llvm::report_fatal_error("Not yet implemented"); | |
351 break; | |
352 case InstArithmetic::Srem: | |
353 llvm::report_fatal_error("Not yet implemented"); | |
354 break; | |
355 case InstArithmetic::Fadd: | |
356 llvm::report_fatal_error("Not yet implemented"); | |
357 break; | |
358 case InstArithmetic::Fsub: | |
359 llvm::report_fatal_error("Not yet implemented"); | |
360 break; | |
361 case InstArithmetic::Fmul: | |
362 llvm::report_fatal_error("Not yet implemented"); | |
363 break; | |
364 case InstArithmetic::Fdiv: | |
365 llvm::report_fatal_error("Not yet implemented"); | |
366 break; | |
367 case InstArithmetic::Frem: | |
368 llvm::report_fatal_error("Not yet implemented"); | |
369 break; | |
370 } | |
371 } | |
372 | |
373 void TargetARM32::lowerAssign(const InstAssign *Inst) { | |
374 (void)Inst; | |
375 llvm::report_fatal_error("Not yet implemented"); | |
376 } | |
377 | |
378 void TargetARM32::lowerBr(const InstBr *Inst) { | |
379 (void)Inst; | |
380 llvm::report_fatal_error("Not yet implemented"); | |
381 } | |
382 | |
383 void TargetARM32::lowerCall(const InstCall *Inst) { | |
384 (void)Inst; | |
385 llvm::report_fatal_error("Not yet implemented"); | |
386 } | |
387 | |
388 void TargetARM32::lowerCast(const InstCast *Inst) { | |
389 InstCast::OpKind CastKind = Inst->getCastKind(); | |
390 switch (CastKind) { | |
391 default: | |
392 Func->setError("Cast type not supported"); | |
393 return; | |
394 case InstCast::Sext: { | |
395 llvm::report_fatal_error("Not yet implemented"); | |
396 break; | |
397 } | |
398 case InstCast::Zext: { | |
399 llvm::report_fatal_error("Not yet implemented"); | |
400 break; | |
401 } | |
402 case InstCast::Trunc: { | |
403 llvm::report_fatal_error("Not yet implemented"); | |
404 break; | |
405 } | |
406 case InstCast::Fptrunc: | |
407 llvm::report_fatal_error("Not yet implemented"); | |
408 break; | |
409 case InstCast::Fpext: { | |
410 llvm::report_fatal_error("Not yet implemented"); | |
411 break; | |
412 } | |
413 case InstCast::Fptosi: | |
414 llvm::report_fatal_error("Not yet implemented"); | |
415 break; | |
416 case InstCast::Fptoui: | |
417 llvm::report_fatal_error("Not yet implemented"); | |
418 break; | |
419 case InstCast::Sitofp: | |
420 llvm::report_fatal_error("Not yet implemented"); | |
421 break; | |
422 case InstCast::Uitofp: { | |
423 llvm::report_fatal_error("Not yet implemented"); | |
424 break; | |
425 } | |
426 case InstCast::Bitcast: { | |
427 llvm::report_fatal_error("Not yet implemented"); | |
428 break; | |
429 } | |
430 } | |
431 } | |
432 | |
433 void TargetARM32::lowerExtractElement(const InstExtractElement *Inst) { | |
434 (void)Inst; | |
435 llvm::report_fatal_error("Not yet implemented"); | |
436 } | |
437 | |
438 void TargetARM32::lowerFcmp(const InstFcmp *Inst) { | |
439 (void)Inst; | |
440 llvm::report_fatal_error("Not yet implemented"); | |
441 } | |
442 | |
443 void TargetARM32::lowerIcmp(const InstIcmp *Inst) { | |
444 (void)Inst; | |
445 llvm::report_fatal_error("Not yet implemented"); | |
446 } | |
447 | |
448 void TargetARM32::lowerInsertElement(const InstInsertElement *Inst) { | |
449 (void)Inst; | |
450 llvm::report_fatal_error("Not yet implemented"); | |
451 } | |
452 | |
453 void TargetARM32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { | |
454 switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) { | |
455 case Intrinsics::AtomicCmpxchg: { | |
456 llvm::report_fatal_error("Not yet implemented"); | |
457 return; | |
458 } | |
459 case Intrinsics::AtomicFence: | |
460 llvm::report_fatal_error("Not yet implemented"); | |
461 return; | |
462 case Intrinsics::AtomicFenceAll: | |
463 // NOTE: FenceAll should prevent and load/store from being moved | |
464 // across the fence (both atomic and non-atomic). The InstARM32Mfence | |
465 // instruction is currently marked coarsely as "HasSideEffects". | |
466 llvm::report_fatal_error("Not yet implemented"); | |
467 return; | |
468 case Intrinsics::AtomicIsLockFree: { | |
469 llvm::report_fatal_error("Not yet implemented"); | |
470 return; | |
471 } | |
472 case Intrinsics::AtomicLoad: { | |
473 llvm::report_fatal_error("Not yet implemented"); | |
474 return; | |
475 } | |
476 case Intrinsics::AtomicRMW: | |
477 llvm::report_fatal_error("Not yet implemented"); | |
478 return; | |
479 case Intrinsics::AtomicStore: { | |
480 llvm::report_fatal_error("Not yet implemented"); | |
481 return; | |
482 } | |
483 case Intrinsics::Bswap: { | |
484 llvm::report_fatal_error("Not yet implemented"); | |
485 return; | |
486 } | |
487 case Intrinsics::Ctpop: { | |
488 llvm::report_fatal_error("Not yet implemented"); | |
489 return; | |
490 } | |
491 case Intrinsics::Ctlz: { | |
492 llvm::report_fatal_error("Not yet implemented"); | |
493 return; | |
494 } | |
495 case Intrinsics::Cttz: { | |
496 llvm::report_fatal_error("Not yet implemented"); | |
497 return; | |
498 } | |
499 case Intrinsics::Fabs: { | |
500 llvm::report_fatal_error("Not yet implemented"); | |
501 return; | |
502 } | |
503 case Intrinsics::Longjmp: { | |
504 InstCall *Call = makeHelperCall(H_call_longjmp, nullptr, 2); | |
505 Call->addArg(Instr->getArg(0)); | |
506 Call->addArg(Instr->getArg(1)); | |
507 lowerCall(Call); | |
508 return; | |
509 } | |
510 case Intrinsics::Memcpy: { | |
511 // In the future, we could potentially emit an inline memcpy/memset, etc. | |
512 // for intrinsic calls w/ a known length. | |
513 InstCall *Call = makeHelperCall(H_call_memcpy, nullptr, 3); | |
514 Call->addArg(Instr->getArg(0)); | |
515 Call->addArg(Instr->getArg(1)); | |
516 Call->addArg(Instr->getArg(2)); | |
517 lowerCall(Call); | |
518 return; | |
519 } | |
520 case Intrinsics::Memmove: { | |
521 InstCall *Call = makeHelperCall(H_call_memmove, nullptr, 3); | |
522 Call->addArg(Instr->getArg(0)); | |
523 Call->addArg(Instr->getArg(1)); | |
524 Call->addArg(Instr->getArg(2)); | |
525 lowerCall(Call); | |
526 return; | |
527 } | |
528 case Intrinsics::Memset: { | |
529 // The value operand needs to be extended to a stack slot size | |
530 // because the PNaCl ABI requires arguments to be at least 32 bits | |
531 // wide. | |
532 Operand *ValOp = Instr->getArg(1); | |
533 assert(ValOp->getType() == IceType_i8); | |
534 Variable *ValExt = Func->makeVariable(stackSlotType()); | |
535 lowerCast(InstCast::create(Func, InstCast::Zext, ValExt, ValOp)); | |
536 InstCall *Call = makeHelperCall(H_call_memset, nullptr, 3); | |
537 Call->addArg(Instr->getArg(0)); | |
538 Call->addArg(ValExt); | |
539 Call->addArg(Instr->getArg(2)); | |
540 lowerCall(Call); | |
541 return; | |
542 } | |
543 case Intrinsics::NaClReadTP: { | |
544 if (Ctx->getFlags().getUseSandboxing()) { | |
545 llvm::report_fatal_error("Not yet implemented"); | |
546 } else { | |
547 InstCall *Call = makeHelperCall(H_call_read_tp, Instr->getDest(), 0); | |
548 lowerCall(Call); | |
549 } | |
550 return; | |
551 } | |
552 case Intrinsics::Setjmp: { | |
553 InstCall *Call = makeHelperCall(H_call_setjmp, Instr->getDest(), 1); | |
554 Call->addArg(Instr->getArg(0)); | |
555 lowerCall(Call); | |
556 return; | |
557 } | |
558 case Intrinsics::Sqrt: { | |
559 llvm::report_fatal_error("Not yet implemented"); | |
560 return; | |
561 } | |
562 case Intrinsics::Stacksave: { | |
563 llvm::report_fatal_error("Not yet implemented"); | |
564 return; | |
565 } | |
566 case Intrinsics::Stackrestore: { | |
567 llvm::report_fatal_error("Not yet implemented"); | |
568 return; | |
569 } | |
570 case Intrinsics::Trap: | |
571 llvm::report_fatal_error("Not yet implemented"); | |
572 return; | |
573 case Intrinsics::UnknownIntrinsic: | |
574 Func->setError("Should not be lowering UnknownIntrinsic"); | |
575 return; | |
576 } | |
577 return; | |
578 } | |
579 | |
580 void TargetARM32::lowerLoad(const InstLoad *Inst) { | |
581 (void)Inst; | |
582 llvm::report_fatal_error("Not yet implemented"); | |
583 } | |
584 | |
585 void TargetARM32::doAddressOptLoad() { | |
586 llvm::report_fatal_error("Not yet implemented"); | |
587 } | |
588 | |
589 void TargetARM32::randomlyInsertNop(float Probability) { | |
590 RandomNumberGeneratorWrapper RNG(Ctx->getRNG()); | |
591 if (RNG.getTrueWithProbability(Probability)) { | |
592 llvm::report_fatal_error("Not yet implemented"); | |
593 } | |
594 } | |
595 | |
596 void TargetARM32::lowerPhi(const InstPhi * /*Inst*/) { | |
597 Func->setError("Phi found in regular instruction list"); | |
598 } | |
599 | |
600 void TargetARM32::lowerRet(const InstRet *Inst) { | |
601 (void)Inst; | |
602 llvm::report_fatal_error("Not yet implemented"); | |
603 } | |
604 | |
605 void TargetARM32::lowerSelect(const InstSelect *Inst) { | |
606 (void)Inst; | |
607 llvm::report_fatal_error("Not yet implemented"); | |
608 } | |
609 | |
610 void TargetARM32::lowerStore(const InstStore *Inst) { | |
611 (void)Inst; | |
612 llvm::report_fatal_error("Not yet implemented"); | |
613 } | |
614 | |
615 void TargetARM32::doAddressOptStore() { | |
616 llvm::report_fatal_error("Not yet implemented"); | |
617 } | |
618 | |
619 void TargetARM32::lowerSwitch(const InstSwitch *Inst) { | |
620 (void)Inst; | |
621 llvm::report_fatal_error("Not yet implemented"); | |
622 } | |
623 | |
624 void TargetARM32::lowerUnreachable(const InstUnreachable * /*Inst*/) { | |
625 llvm_unreachable("Not yet implemented"); | |
626 } | |
627 | |
628 // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to | |
629 // preserve integrity of liveness analysis. Undef values are also | |
630 // turned into zeroes, since loOperand() and hiOperand() don't expect | |
631 // Undef input. | |
632 void TargetARM32::prelowerPhis() { | |
633 llvm::report_fatal_error("Not yet implemented"); | |
634 } | |
635 | |
636 // Lower the pre-ordered list of assignments into mov instructions. | |
637 // Also has to do some ad-hoc register allocation as necessary. | |
638 void TargetARM32::lowerPhiAssignments(CfgNode *Node, | |
639 const AssignList &Assignments) { | |
640 (void)Node; | |
641 (void)Assignments; | |
642 llvm::report_fatal_error("Not yet implemented"); | |
643 } | |
644 | |
645 void TargetARM32::postLower() { | |
646 if (Ctx->getFlags().getOptLevel() == Opt_m1) | |
647 return; | |
648 // Find two-address non-SSA instructions where Dest==Src0, and set | |
649 // the DestNonKillable flag to keep liveness analysis consistent. | |
650 llvm::report_fatal_error("Not yet implemented"); | |
651 } | |
652 | |
653 void TargetARM32::makeRandomRegisterPermutation( | |
654 llvm::SmallVectorImpl<int32_t> &Permutation, | |
655 const llvm::SmallBitVector &ExcludeRegisters) const { | |
656 (void)Permutation; | |
657 (void)ExcludeRegisters; | |
658 llvm::report_fatal_error("Not yet implemented"); | |
659 } | |
660 | |
661 /* TODO(jvoung): avoid duplicate symbols with multiple targets. | |
662 void ConstantUndef::emitWithoutDollar(GlobalContext *) const { | |
663 llvm_unreachable("Not expecting to emitWithoutDollar undef"); | |
664 } | |
665 | |
666 void ConstantUndef::emit(GlobalContext *) const { | |
667 llvm_unreachable("undef value encountered by emitter."); | |
668 } | |
669 */ | |
670 | |
671 TargetDataARM32::TargetDataARM32(GlobalContext *Ctx) | |
672 : TargetDataLowering(Ctx) {} | |
673 | |
674 void TargetDataARM32::lowerGlobal(const VariableDeclaration &Var) const { | |
675 (void)Var; | |
676 llvm::report_fatal_error("Not yet implemented"); | |
677 } | |
678 | |
679 void TargetDataARM32::lowerGlobals( | |
680 std::unique_ptr<VariableDeclarationList> Vars) const { | |
681 switch (Ctx->getFlags().getOutFileType()) { | |
682 case FT_Elf: { | |
683 ELFObjectWriter *Writer = Ctx->getObjectWriter(); | |
684 Writer->writeDataSection(*Vars, llvm::ELF::R_ARM_ABS32); | |
685 } break; | |
686 case FT_Asm: | |
687 case FT_Iasm: { | |
688 const IceString &TranslateOnly = Ctx->getFlags().getTranslateOnly(); | |
689 OstreamLocker L(Ctx); | |
690 for (const VariableDeclaration *Var : *Vars) { | |
691 if (GlobalContext::matchSymbolName(Var->getName(), TranslateOnly)) { | |
692 lowerGlobal(*Var); | |
693 } | |
694 } | |
695 } break; | |
696 } | |
697 } | |
698 | |
699 void TargetDataARM32::lowerConstants() const { | |
700 if (Ctx->getFlags().getDisableTranslation()) | |
701 return; | |
702 llvm::report_fatal_error("Not yet implemented"); | |
703 } | |
704 | |
705 } // end of namespace Ice | |
OLD | NEW |