| Index: src/IceTargetLoweringX8664.cpp
|
| diff --git a/src/IceTargetLoweringX8664.cpp b/src/IceTargetLoweringX8664.cpp
|
| index f5d4eadae59ddd5a81699b24a4682d72e0b824d9..90566489cc5cb23e6ed7846674cc466a66f27a45 100644
|
| --- a/src/IceTargetLoweringX8664.cpp
|
| +++ b/src/IceTargetLoweringX8664.cpp
|
| @@ -243,9 +243,8 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
|
| if (Dest) {
|
| switch (Dest->getType()) {
|
| case IceType_NUM:
|
| - llvm_unreachable("Invalid Call dest type");
|
| - break;
|
| case IceType_void:
|
| + llvm::report_fatal_error("Invalid Call dest type");
|
| break;
|
| case IceType_i1:
|
| case IceType_i8:
|
| @@ -339,34 +338,381 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
|
| }
|
| }
|
|
|
| -void TargetDataX8664::lowerJumpTables() {
|
| - switch (Ctx->getFlags().getOutFileType()) {
|
| - case FT_Elf: {
|
| - ELFObjectWriter *Writer = Ctx->getObjectWriter();
|
| - for (const JumpTableData &JumpTable : Ctx->getJumpTables())
|
| - // TODO(jpp): not 386.
|
| - Writer->writeJumpTable(JumpTable, llvm::ELF::R_386_32);
|
| - } break;
|
| - case FT_Asm:
|
| - // Already emitted from Cfg
|
| - break;
|
| - case FT_Iasm: {
|
| - if (!BuildDefs::dump())
|
| - return;
|
| - Ostream &Str = Ctx->getStrEmit();
|
| - for (const JumpTableData &JT : Ctx->getJumpTables()) {
|
| - Str << "\t.section\t.rodata." << JT.getFunctionName()
|
| - << "$jumptable,\"a\",@progbits\n";
|
| - Str << "\t.align\t" << typeWidthInBytes(getPointerType()) << "\n";
|
| - Str << InstJumpTable::makeName(JT.getFunctionName(), JT.getId()) << ":";
|
| +void TargetX8664::lowerArguments() {
|
| + VarList &Args = Func->getArgs();
|
| + // The first eight vetcor typed arguments (as well as fp arguments) are passed
|
| + // in %xmm0 through %xmm7 regardless of their position in the argument list.
|
| + unsigned NumXmmArgs = 0;
|
| + // The first six integer typed arguments are passed in %rdi, %rsi, %rdx, %rcx,
|
| + // %r8, and %r9 regardless of their position in the argument list.
|
| + unsigned NumGprArgs = 0;
|
| +
|
| + Context.init(Func->getEntryNode());
|
| + Context.setInsertPoint(Context.getCur());
|
| +
|
| + for (SizeT i = 0, End = Args.size();
|
| + i < End && (NumXmmArgs < Traits::X86_MAX_XMM_ARGS ||
|
| + NumGprArgs < Traits::X86_MAX_XMM_ARGS);
|
| + ++i) {
|
| + Variable *Arg = Args[i];
|
| + Type Ty = Arg->getType();
|
| + if ((isVectorType(Ty) || isScalarFloatingType(Ty)) &&
|
| + NumXmmArgs < Traits::X86_MAX_XMM_ARGS) {
|
| + // Replace Arg in the argument list with the home register. Then
|
| + // generate an instruction in the prolog to copy the home register
|
| + // to the assigned location of Arg.
|
| + int32_t RegNum = getRegisterForXmmArgNum(NumXmmArgs);
|
| + ++NumXmmArgs;
|
| + Variable *RegisterArg = Func->makeVariable(Ty);
|
| + if (BuildDefs::dump())
|
| + RegisterArg->setName(Func, "home_reg:" + Arg->getName(Func));
|
| + RegisterArg->setRegNum(RegNum);
|
| + RegisterArg->setIsArg();
|
| + Arg->setIsArg(false);
|
| +
|
| + Args[i] = RegisterArg;
|
| + Context.insert(InstAssign::create(Func, Arg, RegisterArg));
|
| + } else if (isScalarIntegerType(Ty) &&
|
| + NumGprArgs < Traits::X86_MAX_GPR_ARGS) {
|
| + int32_t RegNum = getRegisterForGprArgNum(NumGprArgs);
|
| + ++NumGprArgs;
|
| + Variable *RegisterArg = Func->makeVariable(Ty);
|
| + if (BuildDefs::dump())
|
| + RegisterArg->setName(Func, "home_reg:" + Arg->getName(Func));
|
| + RegisterArg->setRegNum(RegNum);
|
| + RegisterArg->setIsArg();
|
| + Arg->setIsArg(false);
|
| +
|
| + Args[i] = RegisterArg;
|
| + Context.insert(InstAssign::create(Func, Arg, RegisterArg));
|
| + }
|
| + }
|
| +}
|
|
|
| - // On X8664 ILP32 pointers are 32-bit hence the use of .long
|
| - for (intptr_t TargetOffset : JT.getTargetOffsets())
|
| - Str << "\n\t.long\t" << JT.getFunctionName() << "+" << TargetOffset;
|
| - Str << "\n";
|
| +void TargetX8664::lowerRet(const InstRet *Inst) {
|
| + Variable *Reg = nullptr;
|
| + if (Inst->hasRetValue()) {
|
| + Operand *Src0 = legalize(Inst->getRetValue());
|
| + // TODO(jpp): this is not needed.
|
| + if (Src0->getType() == IceType_i64) {
|
| + Variable *eax =
|
| + legalizeToReg(loOperand(Src0), Traits::RegisterSet::Reg_eax);
|
| + Variable *edx =
|
| + legalizeToReg(hiOperand(Src0), Traits::RegisterSet::Reg_edx);
|
| + Reg = eax;
|
| + Context.insert(InstFakeUse::create(Func, edx));
|
| + } else if (isScalarFloatingType(Src0->getType())) {
|
| + _fld(Src0);
|
| + } else if (isVectorType(Src0->getType())) {
|
| + Reg = legalizeToReg(Src0, Traits::RegisterSet::Reg_xmm0);
|
| + } else {
|
| + _mov(Reg, Src0, Traits::RegisterSet::Reg_eax);
|
| }
|
| - } break;
|
| }
|
| + // Add a ret instruction even if sandboxing is enabled, because
|
| + // addEpilog explicitly looks for a ret instruction as a marker for
|
| + // where to insert the frame removal instructions.
|
| + _ret(Reg);
|
| + // Add a fake use of esp to make sure esp stays alive for the entire
|
| + // function. Otherwise post-call esp adjustments get dead-code
|
| + // eliminated. TODO: Are there more places where the fake use
|
| + // should be inserted? E.g. "void f(int n){while(1) g(n);}" may not
|
| + // have a ret instruction.
|
| + Variable *esp =
|
| + Func->getTarget()->getPhysicalRegister(Traits::RegisterSet::Reg_esp);
|
| + Context.insert(InstFakeUse::create(Func, esp));
|
| +}
|
| +
|
| +void TargetX8664::addProlog(CfgNode *Node) {
|
| + // Stack frame layout:
|
| + //
|
| + // +------------------------+
|
| + // | 1. return address |
|
| + // +------------------------+
|
| + // | 2. preserved registers |
|
| + // +------------------------+
|
| + // | 3. padding |
|
| + // +------------------------+
|
| + // | 4. global spill area |
|
| + // +------------------------+
|
| + // | 5. padding |
|
| + // +------------------------+
|
| + // | 6. local spill area |
|
| + // +------------------------+
|
| + // | 7. padding |
|
| + // +------------------------+
|
| + // | 8. allocas |
|
| + // +------------------------+
|
| + //
|
| + // The following variables record the size in bytes of the given areas:
|
| + // * X86_RET_IP_SIZE_BYTES: area 1
|
| + // * PreservedRegsSizeBytes: area 2
|
| + // * SpillAreaPaddingBytes: area 3
|
| + // * GlobalsSize: area 4
|
| + // * GlobalsAndSubsequentPaddingSize: areas 4 - 5
|
| + // * LocalsSpillAreaSize: area 6
|
| + // * SpillAreaSizeBytes: areas 3 - 7
|
| +
|
| + // Determine stack frame offsets for each Variable without a
|
| + // register assignment. This can be done as one variable per stack
|
| + // slot. Or, do coalescing by running the register allocator again
|
| + // with an infinite set of registers (as a side effect, this gives
|
| + // variables a second chance at physical register assignment).
|
| + //
|
| + // A middle ground approach is to leverage sparsity and allocate one
|
| + // block of space on the frame for globals (variables with
|
| + // multi-block lifetime), and one block to share for locals
|
| + // (single-block lifetime).
|
| +
|
| + Context.init(Node);
|
| + Context.setInsertPoint(Context.getCur());
|
| +
|
| + llvm::SmallBitVector CalleeSaves =
|
| + getRegisterSet(RegSet_CalleeSave, RegSet_None);
|
| + RegsUsed = llvm::SmallBitVector(CalleeSaves.size());
|
| + VarList SortedSpilledVariables, VariablesLinkedToSpillSlots;
|
| + size_t GlobalsSize = 0;
|
| + // If there is a separate locals area, this represents that area.
|
| + // Otherwise it counts any variable not counted by GlobalsSize.
|
| + SpillAreaSizeBytes = 0;
|
| + // If there is a separate locals area, this specifies the alignment
|
| + // for it.
|
| + uint32_t LocalsSlotsAlignmentBytes = 0;
|
| + // The entire spill locations area gets aligned to largest natural
|
| + // alignment of the variables that have a spill slot.
|
| + uint32_t SpillAreaAlignmentBytes = 0;
|
| + // A spill slot linked to a variable with a stack slot should reuse
|
| + // that stack slot.
|
| + std::function<bool(Variable *)> TargetVarHook =
|
| + [&VariablesLinkedToSpillSlots](Variable *Var) {
|
| + if (auto *SpillVar =
|
| + llvm::dyn_cast<typename Traits::SpillVariable>(Var)) {
|
| + assert(Var->getWeight().isZero());
|
| + if (SpillVar->getLinkedTo() && !SpillVar->getLinkedTo()->hasReg()) {
|
| + VariablesLinkedToSpillSlots.push_back(Var);
|
| + return true;
|
| + }
|
| + }
|
| + return false;
|
| + };
|
| +
|
| + // Compute the list of spilled variables and bounds for GlobalsSize, etc.
|
| + getVarStackSlotParams(SortedSpilledVariables, RegsUsed, &GlobalsSize,
|
| + &SpillAreaSizeBytes, &SpillAreaAlignmentBytes,
|
| + &LocalsSlotsAlignmentBytes, TargetVarHook);
|
| + uint32_t LocalsSpillAreaSize = SpillAreaSizeBytes;
|
| + SpillAreaSizeBytes += GlobalsSize;
|
| +
|
| + // Add push instructions for preserved registers.
|
| + uint32_t NumCallee = 0;
|
| + size_t PreservedRegsSizeBytes = 0;
|
| + for (SizeT i = 0; i < CalleeSaves.size(); ++i) {
|
| + if (CalleeSaves[i] && RegsUsed[i]) {
|
| + ++NumCallee;
|
| + PreservedRegsSizeBytes += typeWidthInBytes(IceType_i64);
|
| + _push(getPhysicalRegister(i));
|
| + }
|
| + }
|
| + Ctx->statsUpdateRegistersSaved(NumCallee);
|
| +
|
| + // Generate "push ebp; mov ebp, esp"
|
| + if (IsEbpBasedFrame) {
|
| + assert((RegsUsed & getRegisterSet(RegSet_FramePointer, RegSet_None))
|
| + .count() == 0);
|
| + PreservedRegsSizeBytes += typeWidthInBytes(IceType_i64);
|
| + Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_ebp);
|
| + Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
|
| + _push(ebp);
|
| + _mov(ebp, esp);
|
| + // Keep ebp live for late-stage liveness analysis
|
| + // (e.g. asm-verbose mode).
|
| + Context.insert(InstFakeUse::create(Func, ebp));
|
| + }
|
| +
|
| + // Align the variables area. SpillAreaPaddingBytes is the size of
|
| + // the region after the preserved registers and before the spill areas.
|
| + // LocalsSlotsPaddingBytes is the amount of padding between the globals
|
| + // and locals area if they are separate.
|
| + assert(SpillAreaAlignmentBytes <= Traits::X86_STACK_ALIGNMENT_BYTES);
|
| + assert(LocalsSlotsAlignmentBytes <= SpillAreaAlignmentBytes);
|
| + uint32_t SpillAreaPaddingBytes = 0;
|
| + uint32_t LocalsSlotsPaddingBytes = 0;
|
| + alignStackSpillAreas(Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes,
|
| + SpillAreaAlignmentBytes, GlobalsSize,
|
| + LocalsSlotsAlignmentBytes, &SpillAreaPaddingBytes,
|
| + &LocalsSlotsPaddingBytes);
|
| + SpillAreaSizeBytes += SpillAreaPaddingBytes + LocalsSlotsPaddingBytes;
|
| + uint32_t GlobalsAndSubsequentPaddingSize =
|
| + GlobalsSize + LocalsSlotsPaddingBytes;
|
| +
|
| + // Align esp if necessary.
|
| + if (NeedsStackAlignment) {
|
| + uint32_t StackOffset =
|
| + Traits::X86_RET_IP_SIZE_BYTES + PreservedRegsSizeBytes;
|
| + uint32_t StackSize =
|
| + Traits::applyStackAlignment(StackOffset + SpillAreaSizeBytes);
|
| + SpillAreaSizeBytes = StackSize - StackOffset;
|
| + }
|
| +
|
| + // Generate "sub esp, SpillAreaSizeBytes"
|
| + if (SpillAreaSizeBytes)
|
| + _sub(getPhysicalRegister(Traits::RegisterSet::Reg_esp),
|
| + Ctx->getConstantInt32(SpillAreaSizeBytes));
|
| + Ctx->statsUpdateFrameBytes(SpillAreaSizeBytes);
|
| +
|
| + resetStackAdjustment();
|
| +
|
| + // Fill in stack offsets for stack args, and copy args into registers
|
| + // for those that were register-allocated. Args are pushed right to
|
| + // left, so Arg[0] is closest to the stack/frame pointer.
|
| + Variable *FramePtr = getPhysicalRegister(getFrameOrStackReg());
|
| + size_t BasicFrameOffset =
|
| + PreservedRegsSizeBytes + Traits::X86_RET_IP_SIZE_BYTES;
|
| + if (!IsEbpBasedFrame)
|
| + BasicFrameOffset += SpillAreaSizeBytes;
|
| +
|
| + const VarList &Args = Func->getArgs();
|
| + size_t InArgsSizeBytes = 0;
|
| + unsigned NumXmmArgs = 0;
|
| + unsigned NumGPRArgs = 0;
|
| + for (Variable *Arg : Args) {
|
| + // Skip arguments passed in registers.
|
| + if (isVectorType(Arg->getType()) && NumXmmArgs < Traits::X86_MAX_XMM_ARGS) {
|
| + ++NumXmmArgs;
|
| + continue;
|
| + }
|
| + if (isScalarFloatingType(Arg->getType()) &&
|
| + NumXmmArgs < Traits::X86_MAX_XMM_ARGS) {
|
| + ++NumXmmArgs;
|
| + continue;
|
| + }
|
| + if (isScalarIntegerType(Arg->getType()) &&
|
| + NumGPRArgs < Traits::X86_MAX_GPR_ARGS) {
|
| + ++NumGPRArgs;
|
| + continue;
|
| + }
|
| + finishArgumentLowering(Arg, FramePtr, BasicFrameOffset, InArgsSizeBytes);
|
| + }
|
| +
|
| + // Fill in stack offsets for locals.
|
| + assignVarStackSlots(SortedSpilledVariables, SpillAreaPaddingBytes,
|
| + SpillAreaSizeBytes, GlobalsAndSubsequentPaddingSize,
|
| + IsEbpBasedFrame);
|
| + // Assign stack offsets to variables that have been linked to spilled
|
| + // variables.
|
| + for (Variable *Var : VariablesLinkedToSpillSlots) {
|
| + Variable *Linked =
|
| + (llvm::cast<typename Traits::SpillVariable>(Var))->getLinkedTo();
|
| + Var->setStackOffset(Linked->getStackOffset());
|
| + }
|
| + this->HasComputedFrame = true;
|
| +
|
| + if (BuildDefs::dump() && Func->isVerbose(IceV_Frame)) {
|
| + OstreamLocker L(Func->getContext());
|
| + Ostream &Str = Func->getContext()->getStrDump();
|
| +
|
| + Str << "Stack layout:\n";
|
| + uint32_t EspAdjustmentPaddingSize =
|
| + SpillAreaSizeBytes - LocalsSpillAreaSize -
|
| + GlobalsAndSubsequentPaddingSize - SpillAreaPaddingBytes;
|
| + Str << " in-args = " << InArgsSizeBytes << " bytes\n"
|
| + << " return address = " << Traits::X86_RET_IP_SIZE_BYTES << " bytes\n"
|
| + << " preserved registers = " << PreservedRegsSizeBytes << " bytes\n"
|
| + << " spill area padding = " << SpillAreaPaddingBytes << " bytes\n"
|
| + << " globals spill area = " << GlobalsSize << " bytes\n"
|
| + << " globals-locals spill areas intermediate padding = "
|
| + << GlobalsAndSubsequentPaddingSize - GlobalsSize << " bytes\n"
|
| + << " locals spill area = " << LocalsSpillAreaSize << " bytes\n"
|
| + << " esp alignment padding = " << EspAdjustmentPaddingSize
|
| + << " bytes\n";
|
| +
|
| + Str << "Stack details:\n"
|
| + << " esp adjustment = " << SpillAreaSizeBytes << " bytes\n"
|
| + << " spill area alignment = " << SpillAreaAlignmentBytes << " bytes\n"
|
| + << " locals spill area alignment = " << LocalsSlotsAlignmentBytes
|
| + << " bytes\n"
|
| + << " is ebp based = " << IsEbpBasedFrame << "\n";
|
| + }
|
| +}
|
| +
|
| +void TargetX8664::addEpilog(CfgNode *Node) {
|
| + InstList &Insts = Node->getInsts();
|
| + InstList::reverse_iterator RI, E;
|
| + for (RI = Insts.rbegin(), E = Insts.rend(); RI != E; ++RI) {
|
| + if (llvm::isa<typename Traits::Insts::Ret>(*RI))
|
| + break;
|
| + }
|
| + if (RI == E)
|
| + return;
|
| +
|
| + // Convert the reverse_iterator position into its corresponding
|
| + // (forward) iterator position.
|
| + InstList::iterator InsertPoint = RI.base();
|
| + --InsertPoint;
|
| + Context.init(Node);
|
| + Context.setInsertPoint(InsertPoint);
|
| +
|
| + Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
|
| + if (IsEbpBasedFrame) {
|
| + Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_ebp);
|
| + // For late-stage liveness analysis (e.g. asm-verbose mode),
|
| + // adding a fake use of esp before the assignment of esp=ebp keeps
|
| + // previous esp adjustments from being dead-code eliminated.
|
| + Context.insert(InstFakeUse::create(Func, esp));
|
| + _mov(esp, ebp);
|
| + _pop(ebp);
|
| + } else {
|
| + // add esp, SpillAreaSizeBytes
|
| + if (SpillAreaSizeBytes)
|
| + _add(esp, Ctx->getConstantInt32(SpillAreaSizeBytes));
|
| + }
|
| +
|
| + // Add pop instructions for preserved registers.
|
| + llvm::SmallBitVector CalleeSaves =
|
| + getRegisterSet(RegSet_CalleeSave, RegSet_None);
|
| + for (SizeT i = 0; i < CalleeSaves.size(); ++i) {
|
| + SizeT j = CalleeSaves.size() - i - 1;
|
| + if (j == Traits::RegisterSet::Reg_ebp && IsEbpBasedFrame)
|
| + continue;
|
| + if (CalleeSaves[j] && RegsUsed[j]) {
|
| + _pop(getPhysicalRegister(j));
|
| + }
|
| + }
|
| +
|
| + if (!Ctx->getFlags().getUseSandboxing())
|
| + return;
|
| + // Change the original ret instruction into a sandboxed return sequence.
|
| + // t:ecx = pop
|
| + // bundle_lock
|
| + // and t, ~31
|
| + // jmp *t
|
| + // bundle_unlock
|
| + // FakeUse <original_ret_operand>
|
| + Variable *T_ecx = makeReg(IceType_i32, Traits::RegisterSet::Reg_ecx);
|
| + _pop(T_ecx);
|
| + lowerIndirectJump(T_ecx);
|
| + if (RI->getSrcSize()) {
|
| + Variable *RetValue = llvm::cast<Variable>(RI->getSrc(0));
|
| + Context.insert(InstFakeUse::create(Func, RetValue));
|
| + }
|
| + RI->setDeleted();
|
| +}
|
| +
|
| +void TargetX8664::emitJumpTable(const Cfg *Func,
|
| + const InstJumpTable *JumpTable) const {
|
| + if (!BuildDefs::dump())
|
| + return;
|
| + Ostream &Str = Ctx->getStrEmit();
|
| + IceString MangledName = Ctx->mangleName(Func->getFunctionName());
|
| + Str << "\t.section\t.rodata." << MangledName
|
| + << "$jumptable,\"a\",@progbits\n";
|
| + Str << "\t.align\t" << typeWidthInBytes(getPointerType()) << "\n";
|
| + Str << InstJumpTable::makeName(MangledName, JumpTable->getId()) << ":";
|
| +
|
| + // On X8664 ILP32 pointers are 32-bit hence the use of .long
|
| + for (SizeT I = 0; I < JumpTable->getNumTargets(); ++I)
|
| + Str << "\n\t.long\t" << JumpTable->getTarget(I)->getAsmName();
|
| + Str << "\n";
|
| }
|
|
|
| namespace {
|
| @@ -507,21 +853,34 @@ void TargetDataX8664::lowerConstants() {
|
| }
|
| }
|
|
|
| -void TargetX8664::emitJumpTable(const Cfg *Func,
|
| - const InstJumpTable *JumpTable) const {
|
| - if (!BuildDefs::dump())
|
| - return;
|
| - Ostream &Str = Ctx->getStrEmit();
|
| - IceString MangledName = Ctx->mangleName(Func->getFunctionName());
|
| - Str << "\t.section\t.rodata." << MangledName
|
| - << "$jumptable,\"a\",@progbits\n";
|
| - Str << "\t.align\t" << typeWidthInBytes(getPointerType()) << "\n";
|
| - Str << InstJumpTable::makeName(MangledName, JumpTable->getId()) << ":";
|
| +void TargetDataX8664::lowerJumpTables() {
|
| + switch (Ctx->getFlags().getOutFileType()) {
|
| + case FT_Elf: {
|
| + ELFObjectWriter *Writer = Ctx->getObjectWriter();
|
| + for (const JumpTableData &JumpTable : Ctx->getJumpTables())
|
| + // TODO(jpp): not 386.
|
| + Writer->writeJumpTable(JumpTable, llvm::ELF::R_386_32);
|
| + } break;
|
| + case FT_Asm:
|
| + // Already emitted from Cfg
|
| + break;
|
| + case FT_Iasm: {
|
| + if (!BuildDefs::dump())
|
| + return;
|
| + Ostream &Str = Ctx->getStrEmit();
|
| + for (const JumpTableData &JT : Ctx->getJumpTables()) {
|
| + Str << "\t.section\t.rodata." << JT.getFunctionName()
|
| + << "$jumptable,\"a\",@progbits\n";
|
| + Str << "\t.align\t" << typeWidthInBytes(getPointerType()) << "\n";
|
| + Str << InstJumpTable::makeName(JT.getFunctionName(), JT.getId()) << ":";
|
|
|
| - // On X8664 ILP32 pointers are 32-bit hence the use of .long
|
| - for (SizeT I = 0; I < JumpTable->getNumTargets(); ++I)
|
| - Str << "\n\t.long\t" << JumpTable->getTarget(I)->getAsmName();
|
| - Str << "\n";
|
| + // On X8664 ILP32 pointers are 32-bit hence the use of .long
|
| + for (intptr_t TargetOffset : JT.getTargetOffsets())
|
| + Str << "\n\t.long\t" << JT.getFunctionName() << "+" << TargetOffset;
|
| + Str << "\n";
|
| + }
|
| + } break;
|
| + }
|
| }
|
|
|
| void TargetDataX8664::lowerGlobals(const VariableDeclarationList &Vars,
|
|
|