Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/IceTargetLoweringARM32.cpp

Issue 1127963004: Subzero ARM: lowerArguments (GPR), basic legalize(), and lowerRet(i32, i64). (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: clang-format Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===// 1 //===- subzero/src/IceTargetLoweringARM32.cpp - ARM32 lowering ------------===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This file implements the TargetLoweringARM32 class, which consists almost 10 // This file implements the TargetLoweringARM32 class, which consists almost
(...skipping 21 matching lines...) Expand all
32 32
33 namespace { 33 namespace {
34 void UnimplementedError(const ClFlags &Flags) { 34 void UnimplementedError(const ClFlags &Flags) {
35 if (!Flags.getSkipUnimplemented()) { 35 if (!Flags.getSkipUnimplemented()) {
36 // Use llvm_unreachable instead of report_fatal_error, which gives better 36 // Use llvm_unreachable instead of report_fatal_error, which gives better
37 // stack traces. 37 // stack traces.
38 llvm_unreachable("Not yet implemented"); 38 llvm_unreachable("Not yet implemented");
39 abort(); 39 abort();
40 } 40 }
41 } 41 }
42
43 // The maximum number of arguments to pass in GPR registers.
44 const uint32_t ARM32_MAX_GPR_ARG = 4;
45
46 // The maximum number of argument bytes to pass in float/vector registers.
47 // const uint32_t ARM32_MAX_DREG_ARG_BYTES = 64;
42 } // end of anonymous namespace 48 } // end of anonymous namespace
43 49
44 TargetARM32::TargetARM32(Cfg *Func) 50 TargetARM32::TargetARM32(Cfg *Func)
45 : TargetLowering(Func), UsesFramePointer(false) { 51 : TargetLowering(Func), UsesFramePointer(false) {
46 // TODO: Don't initialize IntegerRegisters and friends every time. 52 // TODO: Don't initialize IntegerRegisters and friends every time.
47 // Instead, initialize in some sort of static initializer for the 53 // Instead, initialize in some sort of static initializer for the
48 // class. 54 // class.
49 llvm::SmallBitVector IntegerRegisters(RegARM32::Reg_NUM); 55 llvm::SmallBitVector IntegerRegisters(RegARM32::Reg_NUM);
50 llvm::SmallBitVector FloatRegisters(RegARM32::Reg_NUM); 56 llvm::SmallBitVector FloatRegisters(RegARM32::Reg_NUM);
51 llvm::SmallBitVector VectorRegisters(RegARM32::Reg_NUM); 57 llvm::SmallBitVector VectorRegisters(RegARM32::Reg_NUM);
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after
268 int32_t Offset = Var->getStackOffset(); 274 int32_t Offset = Var->getStackOffset();
269 if (!hasFramePointer()) 275 if (!hasFramePointer())
270 Offset += getStackAdjustment(); 276 Offset += getStackAdjustment();
271 // TODO(jvoung): Handle out of range. Perhaps we need a scratch register 277 // TODO(jvoung): Handle out of range. Perhaps we need a scratch register
272 // to materialize a larger offset. 278 // to materialize a larger offset.
273 const bool SignExt = false; 279 const bool SignExt = false;
274 if (!OperandARM32Mem::canHoldOffset(Var->getType(), SignExt, Offset)) { 280 if (!OperandARM32Mem::canHoldOffset(Var->getType(), SignExt, Offset)) {
275 llvm::report_fatal_error("Illegal stack offset"); 281 llvm::report_fatal_error("Illegal stack offset");
276 } 282 }
277 const Type FrameSPTy = IceType_i32; 283 const Type FrameSPTy = IceType_i32;
278 Str << "[" << getRegName(getFrameOrStackReg(), FrameSPTy) << ", " << Offset 284 Str << "[" << getRegName(getFrameOrStackReg(), FrameSPTy);
279 << "]"; 285 if (Offset != 0) {
286 Str << ", " << getConstantPrefix() << Offset;
287 }
288 Str << "]";
280 } 289 }
281 290
282 void TargetARM32::lowerArguments() { 291 void TargetARM32::lowerArguments() {
283 UnimplementedError(Func->getContext()->getFlags()); 292 VarList &Args = Func->getArgs();
293 // The first few integer type parameters can use r0-r3, regardless of their
294 // position relative to the floating-point/vector arguments in the argument
295 // list. Floating-point and vector arguments can use q0-q3 (aka d0-d7,
296 // s0-s15).
297 unsigned NumGPRRegsUsed = 0;
298
299 // For each register argument, replace Arg in the argument list with the
300 // home register. Then generate an instruction in the prolog to copy the
301 // home register to the assigned location of Arg.
302 Context.init(Func->getEntryNode());
303 Context.setInsertPoint(Context.getCur());
304
305 for (SizeT I = 0, E = Args.size(); I < E; ++I) {
306 Variable *Arg = Args[I];
307 Type Ty = Arg->getType();
308 // TODO(jvoung): handle float/vector types.
309 if (isVectorType(Ty)) {
310 UnimplementedError(Func->getContext()->getFlags());
311 continue;
312 } else if (isFloatingType(Ty)) {
313 UnimplementedError(Func->getContext()->getFlags());
314 continue;
315 } else if (Ty == IceType_i64) {
316 if (NumGPRRegsUsed >= ARM32_MAX_GPR_ARG)
317 continue;
318 int32_t RegLo = RegARM32::Reg_r0 + NumGPRRegsUsed;
319 int32_t RegHi = 0;
320 ++NumGPRRegsUsed;
321 // Always start i64 registers at an even register, so this may end
322 // up padding away a register.
323 if (RegLo % 2 != 0) {
324 ++RegLo;
325 ++NumGPRRegsUsed;
326 }
327 // If this leaves us without room to consume another register,
328 // leave any previously speculatively consumed registers as consumed.
329 if (NumGPRRegsUsed >= ARM32_MAX_GPR_ARG)
330 continue;
331 RegHi = RegARM32::Reg_r0 + NumGPRRegsUsed;
332 ++NumGPRRegsUsed;
333 Variable *RegisterArg = Func->makeVariable(Ty);
334 Variable *RegisterLo = Func->makeVariable(IceType_i32);
335 Variable *RegisterHi = Func->makeVariable(IceType_i32);
336 if (ALLOW_DUMP) {
337 RegisterArg->setName(Func, "home_reg:" + Arg->getName(Func));
338 RegisterLo->setName(Func, "home_reg_lo:" + Arg->getName(Func));
339 RegisterHi->setName(Func, "home_reg_hi:" + Arg->getName(Func));
340 }
341 RegisterLo->setRegNum(RegLo);
342 RegisterLo->setIsArg();
343 RegisterHi->setRegNum(RegHi);
344 RegisterHi->setIsArg();
345 RegisterArg->setLoHi(RegisterLo, RegisterHi);
346 RegisterArg->setIsArg();
347 Arg->setIsArg(false);
348
349 Args[I] = RegisterArg;
350 Context.insert(InstAssign::create(Func, Arg, RegisterArg));
351 continue;
352 } else {
353 assert(Ty == IceType_i32);
354 if (NumGPRRegsUsed >= ARM32_MAX_GPR_ARG)
355 continue;
356 int32_t RegNum = RegARM32::Reg_r0 + NumGPRRegsUsed;
357 ++NumGPRRegsUsed;
358 Variable *RegisterArg = Func->makeVariable(Ty);
359 if (ALLOW_DUMP) {
360 RegisterArg->setName(Func, "home_reg:" + Arg->getName(Func));
361 }
362 RegisterArg->setRegNum(RegNum);
363 RegisterArg->setIsArg();
364 Arg->setIsArg(false);
365
366 Args[I] = RegisterArg;
367 Context.insert(InstAssign::create(Func, Arg, RegisterArg));
368 }
369 }
284 } 370 }
285 371
286 Type TargetARM32::stackSlotType() { return IceType_i32; } 372 Type TargetARM32::stackSlotType() { return IceType_i32; }
287 373
288 void TargetARM32::addProlog(CfgNode *Node) { 374 void TargetARM32::addProlog(CfgNode *Node) {
289 (void)Node; 375 (void)Node;
290 UnimplementedError(Func->getContext()->getFlags()); 376 UnimplementedError(Func->getContext()->getFlags());
291 } 377 }
292 378
293 void TargetARM32::addEpilog(CfgNode *Node) { 379 void TargetARM32::addEpilog(CfgNode *Node) {
294 (void)Node; 380 (void)Node;
295 UnimplementedError(Func->getContext()->getFlags()); 381 UnimplementedError(Func->getContext()->getFlags());
296 } 382 }
297 383
384 void TargetARM32::split64(Variable *Var) {
385 switch (Var->getType()) {
386 default:
387 return;
388 case IceType_i64:
389 // TODO: Only consider F64 if we need to push each half when
390 // passing as an argument to a function call. Note that each half
391 // is still typed as I32.
392 case IceType_f64:
393 break;
394 }
395 Variable *Lo = Var->getLo();
396 Variable *Hi = Var->getHi();
397 if (Lo) {
398 assert(Hi);
399 return;
400 }
401 assert(Hi == nullptr);
402 Lo = Func->makeVariable(IceType_i32);
403 Hi = Func->makeVariable(IceType_i32);
404 if (ALLOW_DUMP) {
405 Lo->setName(Func, Var->getName(Func) + "__lo");
406 Hi->setName(Func, Var->getName(Func) + "__hi");
407 }
408 Var->setLoHi(Lo, Hi);
409 if (Var->getIsArg()) {
410 Lo->setIsArg();
411 Hi->setIsArg();
412 }
413 }
414
415 Operand *TargetARM32::loOperand(Operand *Operand) {
416 assert(Operand->getType() == IceType_i64);
417 if (Operand->getType() != IceType_i64)
418 return Operand;
419 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) {
420 split64(Var);
421 return Var->getLo();
422 }
423 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
424 return Ctx->getConstantInt32(static_cast<uint32_t>(Const->getValue()));
425 }
426 if (OperandARM32Mem *Mem = llvm::dyn_cast<OperandARM32Mem>(Operand)) {
427 // Conservatively disallow memory operands with side-effects
428 // in case of duplication.
429 assert(Mem->getAddrMode() == OperandARM32Mem::Offset ||
430 Mem->getAddrMode() == OperandARM32Mem::NegOffset);
431 if (Mem->isRegReg()) {
432 return OperandARM32Mem::create(Func, IceType_i32, Mem->getBase(),
433 Mem->getIndex(), Mem->getShiftOp(),
434 Mem->getShiftAmt(), Mem->getAddrMode());
435 } else {
436 return OperandARM32Mem::create(Func, IceType_i32, Mem->getBase(),
437 Mem->getOffset(), Mem->getAddrMode());
438 }
439 }
440 llvm_unreachable("Unsupported operand type");
441 return nullptr;
442 }
443
444 Operand *TargetARM32::hiOperand(Operand *Operand) {
445 assert(Operand->getType() == IceType_i64);
446 if (Operand->getType() != IceType_i64)
447 return Operand;
448 if (Variable *Var = llvm::dyn_cast<Variable>(Operand)) {
449 split64(Var);
450 return Var->getHi();
451 }
452 if (ConstantInteger64 *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
453 return Ctx->getConstantInt32(
454 static_cast<uint32_t>(Const->getValue() >> 32));
455 }
456 if (OperandARM32Mem *Mem = llvm::dyn_cast<OperandARM32Mem>(Operand)) {
457 // Conservatively disallow memory operands with side-effects
458 // in case of duplication.
459 assert(Mem->getAddrMode() == OperandARM32Mem::Offset ||
460 Mem->getAddrMode() == OperandARM32Mem::NegOffset);
461 const Type SplitType = IceType_i32;
462 if (Mem->isRegReg()) {
463 // We have to make a temp variable T, and add 4 to either Base or Index.
464 // The Index may be shifted, so adding 4 can mean something else.
465 // Thus, prefer T := Base + 4, and use T as the new Base.
466 Variable *Base = Mem->getBase();
467 Constant *Four = Ctx->getConstantInt32(4);
468 Variable *NewBase = Func->makeVariable(Base->getType());
469 lowerArithmetic(InstArithmetic::create(Func, InstArithmetic::Add, NewBase,
470 Base, Four));
471 return OperandARM32Mem::create(Func, SplitType, NewBase, Mem->getIndex(),
472 Mem->getShiftOp(), Mem->getShiftAmt(),
473 Mem->getAddrMode());
474 } else {
475 Variable *Base = Mem->getBase();
476 ConstantInteger32 *Offset = Mem->getOffset();
477 assert(!Utils::WouldOverflowAdd(Offset->getValue(), 4));
478 int32_t NextOffsetVal = Offset->getValue() + 4;
479 const bool SignExt = false;
480 if (!OperandARM32Mem::canHoldOffset(SplitType, SignExt, NextOffsetVal)) {
481 // We have to make a temp variable and add 4 to either Base or Offset.
482 // If we add 4 to Offset, this will convert a non-RegReg addressing
483 // mode into a RegReg addressing mode. Since NaCl sandboxing disallows
484 // RegReg addressing modes, prefer adding to base and replacing instead.
485 // Thus we leave the old offset alone.
486 Constant *Four = Ctx->getConstantInt32(4);
487 Variable *NewBase = Func->makeVariable(Base->getType());
488 lowerArithmetic(InstArithmetic::create(Func, InstArithmetic::Add,
489 NewBase, Base, Four));
490 Base = NewBase;
491 } else {
492 Offset =
493 llvm::cast<ConstantInteger32>(Ctx->getConstantInt32(NextOffsetVal));
494 }
495 return OperandARM32Mem::create(Func, SplitType, Base, Offset,
496 Mem->getAddrMode());
497 }
498 }
499 llvm_unreachable("Unsupported operand type");
500 return nullptr;
501 }
502
298 llvm::SmallBitVector TargetARM32::getRegisterSet(RegSetMask Include, 503 llvm::SmallBitVector TargetARM32::getRegisterSet(RegSetMask Include,
299 RegSetMask Exclude) const { 504 RegSetMask Exclude) const {
300 llvm::SmallBitVector Registers(RegARM32::Reg_NUM); 505 llvm::SmallBitVector Registers(RegARM32::Reg_NUM);
301 506
302 #define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \ 507 #define X(val, encode, name, scratch, preserved, stackptr, frameptr, isInt, \
303 isFP) \ 508 isFP) \
304 if (scratch && (Include & RegSet_CallerSave)) \ 509 if (scratch && (Include & RegSet_CallerSave)) \
305 Registers[RegARM32::val] = true; \ 510 Registers[RegARM32::val] = true; \
306 if (preserved && (Include & RegSet_CalleeSave)) \ 511 if (preserved && (Include & RegSet_CalleeSave)) \
307 Registers[RegARM32::val] = true; \ 512 Registers[RegARM32::val] = true; \
(...skipping 23 matching lines...) Expand all
331 // adjustment operations implemented below assume that the stack is 536 // adjustment operations implemented below assume that the stack is
332 // aligned before the alloca. All the alloca code ensures that the 537 // aligned before the alloca. All the alloca code ensures that the
333 // stack alignment is preserved after the alloca. The stack alignment 538 // stack alignment is preserved after the alloca. The stack alignment
334 // restriction can be relaxed in some cases. 539 // restriction can be relaxed in some cases.
335 NeedsStackAlignment = true; 540 NeedsStackAlignment = true;
336 (void)Inst; 541 (void)Inst;
337 UnimplementedError(Func->getContext()->getFlags()); 542 UnimplementedError(Func->getContext()->getFlags());
338 } 543 }
339 544
340 void TargetARM32::lowerArithmetic(const InstArithmetic *Inst) { 545 void TargetARM32::lowerArithmetic(const InstArithmetic *Inst) {
341 switch (Inst->getOp()) { 546 Variable *Dest = Inst->getDest();
342 case InstArithmetic::_num: 547 // TODO(jvoung): Should be able to flip Src0 and Src1 if it is easier
343 llvm_unreachable("Unknown arithmetic operator"); 548 // to legalize Src0 to flex or Src1 to flex and there is a reversible
344 break; 549 // instruction. E.g., reverse subtract with immediate, register vs
345 case InstArithmetic::Add: 550 // register, immediate.
551 // Or it may be the case that the operands aren't swapped, but the
552 // bits can be flipped and a different operation applied.
553 // E.g., use BIC (bit clear) instead of AND for some masks.
554 Variable *Src0 = legalizeToVar(Inst->getSrc(0));
555 Operand *Src1 = legalize(Inst->getSrc(1), Legal_Reg | Legal_Flex);
556 (void)Src0;
557 (void)Src1;
558 if (Dest->getType() == IceType_i64) {
346 UnimplementedError(Func->getContext()->getFlags()); 559 UnimplementedError(Func->getContext()->getFlags());
347 break; 560 } else if (isVectorType(Dest->getType())) {
348 case InstArithmetic::And:
349 UnimplementedError(Func->getContext()->getFlags()); 561 UnimplementedError(Func->getContext()->getFlags());
350 break; 562 } else { // Dest->getType() is non-i64 scalar
351 case InstArithmetic::Or: 563 switch (Inst->getOp()) {
352 UnimplementedError(Func->getContext()->getFlags()); 564 case InstArithmetic::_num:
353 break; 565 llvm_unreachable("Unknown arithmetic operator");
354 case InstArithmetic::Xor: 566 break;
355 UnimplementedError(Func->getContext()->getFlags()); 567 case InstArithmetic::Add: {
356 break; 568 UnimplementedError(Func->getContext()->getFlags());
357 case InstArithmetic::Sub: 569 // Variable *T = makeReg(Dest->getType());
358 UnimplementedError(Func->getContext()->getFlags()); 570 // _add(T, Src0, Src1);
359 break; 571 // _mov(Dest, T);
jvoung (off chromium) 2015/05/14 22:42:07 re: commented out code -- I've been playing around
360 case InstArithmetic::Mul: 572 } break;
361 UnimplementedError(Func->getContext()->getFlags()); 573 case InstArithmetic::And:
362 break; 574 UnimplementedError(Func->getContext()->getFlags());
363 case InstArithmetic::Shl: 575 break;
364 UnimplementedError(Func->getContext()->getFlags()); 576 case InstArithmetic::Or:
365 break; 577 UnimplementedError(Func->getContext()->getFlags());
366 case InstArithmetic::Lshr: 578 break;
367 UnimplementedError(Func->getContext()->getFlags()); 579 case InstArithmetic::Xor:
368 break; 580 UnimplementedError(Func->getContext()->getFlags());
369 case InstArithmetic::Ashr: 581 break;
370 UnimplementedError(Func->getContext()->getFlags()); 582 case InstArithmetic::Sub:
371 break; 583 UnimplementedError(Func->getContext()->getFlags());
372 case InstArithmetic::Udiv: 584 break;
373 UnimplementedError(Func->getContext()->getFlags()); 585 case InstArithmetic::Mul:
374 break; 586 UnimplementedError(Func->getContext()->getFlags());
375 case InstArithmetic::Sdiv: 587 break;
376 UnimplementedError(Func->getContext()->getFlags()); 588 case InstArithmetic::Shl:
377 break; 589 UnimplementedError(Func->getContext()->getFlags());
378 case InstArithmetic::Urem: 590 break;
379 UnimplementedError(Func->getContext()->getFlags()); 591 case InstArithmetic::Lshr:
380 break; 592 UnimplementedError(Func->getContext()->getFlags());
381 case InstArithmetic::Srem: 593 break;
382 UnimplementedError(Func->getContext()->getFlags()); 594 case InstArithmetic::Ashr:
383 break; 595 UnimplementedError(Func->getContext()->getFlags());
384 case InstArithmetic::Fadd: 596 break;
385 UnimplementedError(Func->getContext()->getFlags()); 597 case InstArithmetic::Udiv:
386 break; 598 UnimplementedError(Func->getContext()->getFlags());
387 case InstArithmetic::Fsub: 599 break;
388 UnimplementedError(Func->getContext()->getFlags()); 600 case InstArithmetic::Sdiv:
389 break; 601 UnimplementedError(Func->getContext()->getFlags());
390 case InstArithmetic::Fmul: 602 break;
391 UnimplementedError(Func->getContext()->getFlags()); 603 case InstArithmetic::Urem:
392 break; 604 UnimplementedError(Func->getContext()->getFlags());
393 case InstArithmetic::Fdiv: 605 break;
394 UnimplementedError(Func->getContext()->getFlags()); 606 case InstArithmetic::Srem:
395 break; 607 UnimplementedError(Func->getContext()->getFlags());
396 case InstArithmetic::Frem: 608 break;
397 UnimplementedError(Func->getContext()->getFlags()); 609 case InstArithmetic::Fadd:
398 break; 610 UnimplementedError(Func->getContext()->getFlags());
611 break;
612 case InstArithmetic::Fsub:
613 UnimplementedError(Func->getContext()->getFlags());
614 break;
615 case InstArithmetic::Fmul:
616 UnimplementedError(Func->getContext()->getFlags());
617 break;
618 case InstArithmetic::Fdiv:
619 UnimplementedError(Func->getContext()->getFlags());
620 break;
621 case InstArithmetic::Frem:
622 UnimplementedError(Func->getContext()->getFlags());
623 break;
624 }
399 } 625 }
400 } 626 }
401 627
402 void TargetARM32::lowerAssign(const InstAssign *Inst) { 628 void TargetARM32::lowerAssign(const InstAssign *Inst) {
403 (void)Inst; 629 Variable *Dest = Inst->getDest();
404 UnimplementedError(Func->getContext()->getFlags()); 630 Operand *Src0 = Inst->getSrc(0);
631 assert(Dest->getType() == Src0->getType());
632 if (Dest->getType() == IceType_i64) {
633 Src0 = legalize(Src0);
634 Operand *Src0Lo = loOperand(Src0);
635 Operand *Src0Hi = hiOperand(Src0);
636 Variable *DestLo = llvm::cast<Variable>(loOperand(Dest));
637 Variable *DestHi = llvm::cast<Variable>(hiOperand(Dest));
638 Variable *T_Lo = nullptr, *T_Hi = nullptr;
639 _mov(T_Lo, Src0Lo);
640 _mov(DestLo, T_Lo);
641 _mov(T_Hi, Src0Hi);
642 _mov(DestHi, T_Hi);
643 } else {
644 Operand *SrcR;
645 if (Dest->hasReg()) {
646 // If Dest already has a physical register, then legalize the
647 // Src operand into a Variable with the same register
648 // assignment. This is mostly a workaround for advanced phi
649 // lowering's ad-hoc register allocation which assumes no
650 // register allocation is needed when at least one of the
651 // operands is non-memory.
652 // TODO(jvoung): check this for ARM.
653 SrcR = legalize(Src0, Legal_Reg, Dest->getRegNum());
654 } else {
655 // Dest could be a stack operand. Since we could potentially need
656 // to do a Store (and store can only have Register operands),
657 // legalize this to a register.
658 SrcR = legalize(Src0, Legal_Reg);
659 }
660 if (isVectorType(Dest->getType())) {
661 UnimplementedError(Func->getContext()->getFlags());
662 } else {
663 _mov(Dest, SrcR);
664 }
665 }
405 } 666 }
406 667
407 void TargetARM32::lowerBr(const InstBr *Inst) { 668 void TargetARM32::lowerBr(const InstBr *Inst) {
408 (void)Inst; 669 (void)Inst;
409 UnimplementedError(Func->getContext()->getFlags()); 670 UnimplementedError(Func->getContext()->getFlags());
410 } 671 }
411 672
412 void TargetARM32::lowerCall(const InstCall *Inst) { 673 void TargetARM32::lowerCall(const InstCall *Inst) {
413 (void)Inst; 674 (void)Inst;
414 UnimplementedError(Func->getContext()->getFlags()); 675 UnimplementedError(Func->getContext()->getFlags());
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after
622 } 883 }
623 } 884 }
624 885
625 void TargetARM32::lowerPhi(const InstPhi * /*Inst*/) { 886 void TargetARM32::lowerPhi(const InstPhi * /*Inst*/) {
626 Func->setError("Phi found in regular instruction list"); 887 Func->setError("Phi found in regular instruction list");
627 } 888 }
628 889
629 void TargetARM32::lowerRet(const InstRet *Inst) { 890 void TargetARM32::lowerRet(const InstRet *Inst) {
630 Variable *Reg = nullptr; 891 Variable *Reg = nullptr;
631 if (Inst->hasRetValue()) { 892 if (Inst->hasRetValue()) {
632 UnimplementedError(Func->getContext()->getFlags()); 893 Operand *Src0 = Inst->getRetValue();
894 if (Src0->getType() == IceType_i64) {
895 Variable *R0 = legalizeToVar(loOperand(Src0), RegARM32::Reg_r0);
896 Variable *R1 = legalizeToVar(hiOperand(Src0), RegARM32::Reg_r1);
897 Reg = R0;
898 Context.insert(InstFakeUse::create(Func, R1));
899 } else if (isScalarFloatingType(Src0->getType())) {
900 UnimplementedError(Func->getContext()->getFlags());
901 } else if (isVectorType(Src0->getType())) {
902 UnimplementedError(Func->getContext()->getFlags());
903 } else {
904 Operand *Src0F = legalize(Src0, Legal_Reg | Legal_Flex);
905 _mov(Reg, Src0F, RegARM32::Reg_r0);
906 }
633 } 907 }
634 // Add a ret instruction even if sandboxing is enabled, because 908 // Add a ret instruction even if sandboxing is enabled, because
635 // addEpilog explicitly looks for a ret instruction as a marker for 909 // addEpilog explicitly looks for a ret instruction as a marker for
636 // where to insert the frame removal instructions. 910 // where to insert the frame removal instructions.
637 // addEpilog is responsible for restoring the "lr" register as needed 911 // addEpilog is responsible for restoring the "lr" register as needed
638 // prior to this ret instruction. 912 // prior to this ret instruction.
639 _ret(getPhysicalRegister(RegARM32::Reg_lr), Reg); 913 _ret(getPhysicalRegister(RegARM32::Reg_lr), Reg);
640 // Add a fake use of sp to make sure sp stays alive for the entire 914 // Add a fake use of sp to make sure sp stays alive for the entire
641 // function. Otherwise post-call sp adjustments get dead-code 915 // function. Otherwise post-call sp adjustments get dead-code
642 // eliminated. TODO: Are there more places where the fake use 916 // eliminated. TODO: Are there more places where the fake use
(...skipping 16 matching lines...) Expand all
659 void TargetARM32::doAddressOptStore() { 933 void TargetARM32::doAddressOptStore() {
660 UnimplementedError(Func->getContext()->getFlags()); 934 UnimplementedError(Func->getContext()->getFlags());
661 } 935 }
662 936
663 void TargetARM32::lowerSwitch(const InstSwitch *Inst) { 937 void TargetARM32::lowerSwitch(const InstSwitch *Inst) {
664 (void)Inst; 938 (void)Inst;
665 UnimplementedError(Func->getContext()->getFlags()); 939 UnimplementedError(Func->getContext()->getFlags());
666 } 940 }
667 941
668 void TargetARM32::lowerUnreachable(const InstUnreachable * /*Inst*/) { 942 void TargetARM32::lowerUnreachable(const InstUnreachable * /*Inst*/) {
669 llvm_unreachable("Not yet implemented"); 943 UnimplementedError(Func->getContext()->getFlags());
670 } 944 }
671 945
672 // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to 946 // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to
673 // preserve integrity of liveness analysis. Undef values are also 947 // preserve integrity of liveness analysis. Undef values are also
674 // turned into zeroes, since loOperand() and hiOperand() don't expect 948 // turned into zeroes, since loOperand() and hiOperand() don't expect
675 // Undef input. 949 // Undef input.
676 void TargetARM32::prelowerPhis() { 950 void TargetARM32::prelowerPhis() {
677 UnimplementedError(Func->getContext()->getFlags()); 951 UnimplementedError(Func->getContext()->getFlags());
678 } 952 }
679 953
680 // Lower the pre-ordered list of assignments into mov instructions. 954 // Lower the pre-ordered list of assignments into mov instructions.
681 // Also has to do some ad-hoc register allocation as necessary. 955 // Also has to do some ad-hoc register allocation as necessary.
682 void TargetARM32::lowerPhiAssignments(CfgNode *Node, 956 void TargetARM32::lowerPhiAssignments(CfgNode *Node,
683 const AssignList &Assignments) { 957 const AssignList &Assignments) {
684 (void)Node; 958 (void)Node;
685 (void)Assignments; 959 (void)Assignments;
686 UnimplementedError(Func->getContext()->getFlags()); 960 UnimplementedError(Func->getContext()->getFlags());
687 } 961 }
688 962
963 Variable *TargetARM32::makeVectorOfZeros(Type Ty, int32_t RegNum) {
964 Variable *Reg = makeReg(Ty, RegNum);
965 UnimplementedError(Func->getContext()->getFlags());
966 return Reg;
967 }
968
969 // Helper for legalize() to emit the right code to lower an operand to a
970 // register of the appropriate type.
971 Variable *TargetARM32::copyToReg(Operand *Src, int32_t RegNum) {
972 Type Ty = Src->getType();
973 Variable *Reg = makeReg(Ty, RegNum);
974 if (isVectorType(Ty)) {
975 UnimplementedError(Func->getContext()->getFlags());
976 } else {
977 // Mov's Src operand can really only be the flexible second operand type
978 // or a register. Users should guarantee that.
979 _mov(Reg, Src);
980 }
981 return Reg;
982 }
983
984 Operand *TargetARM32::legalize(Operand *From, LegalMask Allowed,
985 int32_t RegNum) {
986 // Assert that a physical register is allowed. To date, all calls
987 // to legalize() allow a physical register. Legal_Flex converts
988 // registers to the right type OperandARM32FlexReg as needed.
989 assert(Allowed & Legal_Reg);
990 // Go through the various types of operands:
991 // OperandARM32Mem, OperandARM32Flex, Constant, and Variable.
992 // Given the above assertion, if type of operand is not legal
993 // (e.g., OperandARM32Mem and !Legal_Mem), we can always copy
994 // to a register.
995 if (auto *Mem = llvm::dyn_cast<OperandARM32Mem>(From)) {
996 // Before doing anything with a Mem operand, we need to ensure
997 // that the Base and Index components are in physical registers.
998 Variable *Base = Mem->getBase();
999 Variable *Index = Mem->getIndex();
1000 Variable *RegBase = nullptr;
1001 Variable *RegIndex = nullptr;
1002 if (Base) {
1003 RegBase = legalizeToVar(Base);
1004 }
1005 if (Index) {
1006 RegIndex = legalizeToVar(Index);
1007 }
1008 // Create a new operand if there was a change.
1009 if (Base != RegBase || Index != RegIndex) {
1010 // There is only a reg +/- reg or reg + imm form.
1011 // Figure out which to re-create.
1012 if (Mem->isRegReg()) {
1013 Mem = OperandARM32Mem::create(Func, Mem->getType(), RegBase, RegIndex,
1014 Mem->getShiftOp(), Mem->getShiftAmt(),
1015 Mem->getAddrMode());
1016 } else {
1017 Mem = OperandARM32Mem::create(Func, Mem->getType(), RegBase,
1018 Mem->getOffset(), Mem->getAddrMode());
1019 }
1020 }
1021 if (!(Allowed & Legal_Mem)) {
1022 Type Ty = Mem->getType();
1023 Variable *Reg = makeReg(Ty, RegNum);
1024 _ldr(Reg, Mem);
1025 From = Reg;
1026 } else {
1027 From = Mem;
1028 }
1029 return From;
1030 }
1031
1032 if (auto *Flex = llvm::dyn_cast<OperandARM32Flex>(From)) {
1033 if (!(Allowed & Legal_Flex)) {
1034 if (auto *FlexReg = llvm::dyn_cast<OperandARM32FlexReg>(Flex)) {
1035 if (FlexReg->getShiftOp() == OperandARM32::kNoShift) {
1036 From = FlexReg->getReg();
1037 // Fall through and let From be checked as a Variable below,
1038 // where it may or may not need a register.
1039 } else {
1040 return copyToReg(Flex, RegNum);
1041 }
1042 } else {
1043 return copyToReg(Flex, RegNum);
1044 }
1045 } else {
1046 return From;
1047 }
1048 }
1049
1050 if (llvm::isa<Constant>(From)) {
1051 if (llvm::isa<ConstantUndef>(From)) {
1052 // Lower undefs to zero. Another option is to lower undefs to an
1053 // uninitialized register; however, using an uninitialized register
1054 // results in less predictable code.
1055 if (isVectorType(From->getType()))
1056 return makeVectorOfZeros(From->getType(), RegNum);
1057 From = Ctx->getConstantZero(From->getType());
1058 }
1059 // There should be no constants of vector type (other than undef).
1060 assert(!isVectorType(From->getType()));
1061 bool CanBeFlex = Allowed & Legal_Flex;
1062 if (auto C32 = llvm::dyn_cast<ConstantInteger32>(From)) {
1063 uint32_t RotateAmt;
1064 uint32_t Immed_8;
1065 uint32_t Value = static_cast<uint32_t>(C32->getValue());
1066 // Check if the immediate will fit in a Flexible second operand,
1067 // if a Flexible second operand is allowed. We need to know the exact
1068 // value, so that rules out relocatable constants.
1069 // Also try the inverse and use MVN if possible.
1070 if (CanBeFlex &&
1071 OperandARM32FlexImm::canHoldImm(Value, &RotateAmt, &Immed_8)) {
1072 return OperandARM32FlexImm::create(Func, From->getType(), Immed_8,
1073 RotateAmt);
1074 } else if (CanBeFlex && OperandARM32FlexImm::canHoldImm(
1075 ~Value, &RotateAmt, &Immed_8)) {
1076 auto InvertedFlex = OperandARM32FlexImm::create(Func, From->getType(),
1077 Immed_8, RotateAmt);
1078 Type Ty = From->getType();
1079 Variable *Reg = makeReg(Ty, RegNum);
1080 _mvn(Reg, InvertedFlex);
1081 return Reg;
1082 } else {
1083 // Do a movw/movt to a register.
1084 Type Ty = From->getType();
1085 Variable *Reg = makeReg(Ty, RegNum);
1086 _movw(Reg, Ctx->getConstantInt32(Value & 0xFFFF));
1087 uint32_t UpperBits = (Value >> 16) & 0xFFFF;
1088 if (UpperBits != 0) {
1089 _movt(Reg, Ctx->getConstantInt32(UpperBits));
1090 }
1091 return Reg;
1092 }
1093 } else if (auto C = llvm::dyn_cast<ConstantRelocatable>(From)) {
1094 Type Ty = From->getType();
1095 Variable *Reg = makeReg(Ty, RegNum);
1096 _movw(Reg, C);
1097 _movt(Reg, C);
1098 return Reg;
1099 } else {
1100 // Load floats/doubles from literal pool.
1101 UnimplementedError(Func->getContext()->getFlags());
1102 From = copyToReg(From, RegNum);
1103 }
1104 return From;
1105 }
1106
1107 if (auto Var = llvm::dyn_cast<Variable>(From)) {
1108 // Check if the variable is guaranteed a physical register. This
1109 // can happen either when the variable is pre-colored or when it is
1110 // assigned infinite weight.
1111 bool MustHaveRegister = (Var->hasReg() || Var->getWeight().isInf());
1112 // We need a new physical register for the operand if:
1113 // Mem is not allowed and Var isn't guaranteed a physical
1114 // register, or
1115 // RegNum is required and Var->getRegNum() doesn't match.
1116 if ((!(Allowed & Legal_Mem) && !MustHaveRegister) ||
1117 (RegNum != Variable::NoRegister && RegNum != Var->getRegNum())) {
1118 From = copyToReg(From, RegNum);
1119 }
1120 return From;
1121 }
1122 llvm_unreachable("Unhandled operand kind in legalize()");
1123
1124 return From;
1125 }
1126
1127 // Provide a trivial wrapper to legalize() for this common usage.
1128 Variable *TargetARM32::legalizeToVar(Operand *From, int32_t RegNum) {
1129 return llvm::cast<Variable>(legalize(From, Legal_Reg, RegNum));
1130 }
1131
1132 Variable *TargetARM32::makeReg(Type Type, int32_t RegNum) {
1133 // There aren't any 64-bit integer registers for ARM32.
1134 assert(Type != IceType_i64);
1135 Variable *Reg = Func->makeVariable(Type);
1136 if (RegNum == Variable::NoRegister)
1137 Reg->setWeightInfinite();
1138 else
1139 Reg->setRegNum(RegNum);
1140 return Reg;
1141 }
1142
689 void TargetARM32::postLower() { 1143 void TargetARM32::postLower() {
690 if (Ctx->getFlags().getOptLevel() == Opt_m1) 1144 if (Ctx->getFlags().getOptLevel() == Opt_m1)
691 return; 1145 return;
692 // Find two-address non-SSA instructions where Dest==Src0, and set 1146 inferTwoAddress();
jvoung (off chromium) 2015/05/14 22:42:07 Needed this for "movt" which I modeled as two-addr
693 // the DestNonKillable flag to keep liveness analysis consistent.
694 UnimplementedError(Func->getContext()->getFlags());
695 } 1147 }
696 1148
697 void TargetARM32::makeRandomRegisterPermutation( 1149 void TargetARM32::makeRandomRegisterPermutation(
698 llvm::SmallVectorImpl<int32_t> &Permutation, 1150 llvm::SmallVectorImpl<int32_t> &Permutation,
699 const llvm::SmallBitVector &ExcludeRegisters) const { 1151 const llvm::SmallBitVector &ExcludeRegisters) const {
700 (void)Permutation; 1152 (void)Permutation;
701 (void)ExcludeRegisters; 1153 (void)ExcludeRegisters;
702 UnimplementedError(Func->getContext()->getFlags()); 1154 UnimplementedError(Func->getContext()->getFlags());
703 } 1155 }
704 1156
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
753 } 1205 }
754 } 1206 }
755 1207
756 void TargetDataARM32::lowerConstants() const { 1208 void TargetDataARM32::lowerConstants() const {
757 if (Ctx->getFlags().getDisableTranslation()) 1209 if (Ctx->getFlags().getDisableTranslation())
758 return; 1210 return;
759 UnimplementedError(Ctx->getFlags()); 1211 UnimplementedError(Ctx->getFlags());
760 } 1212 }
761 1213
762 } // end of namespace Ice 1214 } // end of namespace Ice
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698