Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(194)

Side by Side Diff: src/IceTargetLowering.cpp

Issue 1159013002: Subzero ARM: addProlog/addEpilogue -- share some code with x86. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: typo Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/IceTargetLowering.h ('k') | src/IceTargetLoweringARM32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 //===- subzero/src/IceTargetLowering.cpp - Basic lowering implementation --===// 1 //===- subzero/src/IceTargetLowering.cpp - Basic lowering implementation --===//
2 // 2 //
3 // The Subzero Code Generator 3 // The Subzero Code Generator
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 //
10 // This file implements the skeleton of the TargetLowering class, 10 // This file implements the skeleton of the TargetLowering class,
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 if (Variable *Dest = Inst->getDest()) { 237 if (Variable *Dest = Inst->getDest()) {
238 // TODO(stichnot): We may need to consider all source 238 // TODO(stichnot): We may need to consider all source
239 // operands, not just the first one, if using 3-address 239 // operands, not just the first one, if using 3-address
240 // instructions. 240 // instructions.
241 if (Inst->getSrcSize() > 0 && Inst->getSrc(0) == Dest) 241 if (Inst->getSrcSize() > 0 && Inst->getSrc(0) == Dest)
242 Inst->setDestNonKillable(); 242 Inst->setDestNonKillable();
243 } 243 }
244 } 244 }
245 } 245 }
246 246
247 void TargetLowering::sortVarsByAlignment(VarList &Dest,
248 const VarList &Source) const {
249 Dest = Source;
250 // Instead of std::sort, we could do a bucket sort with log2(alignment)
251 // as the buckets, if performance is an issue.
252 std::sort(Dest.begin(), Dest.end(),
253 [this](const Variable *V1, const Variable *V2) {
254 return typeWidthInBytesOnStack(V1->getType()) >
255 typeWidthInBytesOnStack(V2->getType());
256 });
257 }
258
259 void TargetLowering::getVarStackSlotParams(
260 VarList &SortedSpilledVariables, llvm::SmallBitVector &RegsUsed,
261 size_t *GlobalsSize, size_t *SpillAreaSizeBytes,
262 uint32_t *SpillAreaAlignmentBytes, uint32_t *LocalsSlotsAlignmentBytes,
263 std::function<bool(Variable *)> TargetVarHook) {
264 const VariablesMetadata *VMetadata = Func->getVMetadata();
265 llvm::BitVector IsVarReferenced(Func->getNumVariables());
266 for (CfgNode *Node : Func->getNodes()) {
267 for (Inst &Inst : Node->getInsts()) {
268 if (Inst.isDeleted())
269 continue;
270 if (const Variable *Var = Inst.getDest())
271 IsVarReferenced[Var->getIndex()] = true;
272 for (SizeT I = 0; I < Inst.getSrcSize(); ++I) {
273 Operand *Src = Inst.getSrc(I);
274 SizeT NumVars = Src->getNumVars();
275 for (SizeT J = 0; J < NumVars; ++J) {
276 const Variable *Var = Src->getVar(J);
277 IsVarReferenced[Var->getIndex()] = true;
278 }
279 }
280 }
281 }
282
283 // If SimpleCoalescing is false, each variable without a register
284 // gets its own unique stack slot, which leads to large stack
285 // frames. If SimpleCoalescing is true, then each "global" variable
286 // without a register gets its own slot, but "local" variable slots
287 // are reused across basic blocks. E.g., if A and B are local to
288 // block 1 and C is local to block 2, then C may share a slot with A or B.
289 //
290 // We cannot coalesce stack slots if this function calls a "returns twice"
291 // function. In that case, basic blocks may be revisited, and variables
292 // local to those basic blocks are actually live until after the
293 // called function returns a second time.
294 const bool SimpleCoalescing = !callsReturnsTwice();
295
296 std::vector<size_t> LocalsSize(Func->getNumNodes());
297 const VarList &Variables = Func->getVariables();
298 VarList SpilledVariables;
299 for (Variable *Var : Variables) {
300 if (Var->hasReg()) {
301 RegsUsed[Var->getRegNum()] = true;
302 continue;
303 }
304 // An argument either does not need a stack slot (if passed in a
305 // register) or already has one (if passed on the stack).
306 if (Var->getIsArg())
307 continue;
308 // An unreferenced variable doesn't need a stack slot.
309 if (!IsVarReferenced[Var->getIndex()])
310 continue;
311 // Check a target-specific variable (it may end up sharing stack slots)
312 // and not need accounting here.
313 if (TargetVarHook(Var))
314 continue;
315 SpilledVariables.push_back(Var);
316 }
317
318 SortedSpilledVariables.reserve(SpilledVariables.size());
319 sortVarsByAlignment(SortedSpilledVariables, SpilledVariables);
320
321 for (Variable *Var : SortedSpilledVariables) {
322 size_t Increment = typeWidthInBytesOnStack(Var->getType());
323 // We have sorted by alignment, so the first variable we encounter that
324 // is located in each area determines the max alignment for the area.
325 if (!*SpillAreaAlignmentBytes)
326 *SpillAreaAlignmentBytes = Increment;
327 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
328 if (VMetadata->isMultiBlock(Var)) {
329 *GlobalsSize += Increment;
330 } else {
331 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
332 LocalsSize[NodeIndex] += Increment;
333 if (LocalsSize[NodeIndex] > *SpillAreaSizeBytes)
334 *SpillAreaSizeBytes = LocalsSize[NodeIndex];
335 if (!*LocalsSlotsAlignmentBytes)
336 *LocalsSlotsAlignmentBytes = Increment;
337 }
338 } else {
339 *SpillAreaSizeBytes += Increment;
340 }
341 }
342 }
343
344 void TargetLowering::alignStackSpillAreas(uint32_t SpillAreaStartOffset,
345 uint32_t SpillAreaAlignmentBytes,
346 size_t GlobalsSize,
347 uint32_t LocalsSlotsAlignmentBytes,
348 uint32_t *SpillAreaPaddingBytes,
349 uint32_t *LocalsSlotsPaddingBytes) {
350 if (SpillAreaAlignmentBytes) {
351 uint32_t PaddingStart = SpillAreaStartOffset;
352 uint32_t SpillAreaStart =
353 Utils::applyAlignment(PaddingStart, SpillAreaAlignmentBytes);
354 *SpillAreaPaddingBytes = SpillAreaStart - PaddingStart;
355 }
356
357 // If there are separate globals and locals areas, make sure the
358 // locals area is aligned by padding the end of the globals area.
359 if (LocalsSlotsAlignmentBytes) {
360 uint32_t GlobalsAndSubsequentPaddingSize = GlobalsSize;
361 GlobalsAndSubsequentPaddingSize =
362 Utils::applyAlignment(GlobalsSize, LocalsSlotsAlignmentBytes);
363 *LocalsSlotsPaddingBytes = GlobalsAndSubsequentPaddingSize - GlobalsSize;
364 }
365 }
366
367 void TargetLowering::assignVarStackSlots(VarList &SortedSpilledVariables,
368 size_t SpillAreaPaddingBytes,
369 size_t SpillAreaSizeBytes,
370 size_t GlobalsAndSubsequentPaddingSize,
371 bool UsesFramePointer) {
372 const VariablesMetadata *VMetadata = Func->getVMetadata();
373 size_t GlobalsSpaceUsed = SpillAreaPaddingBytes;
374 size_t NextStackOffset = SpillAreaPaddingBytes;
375 std::vector<size_t> LocalsSize(Func->getNumNodes());
376 const bool SimpleCoalescing = !callsReturnsTwice();
377 for (Variable *Var : SortedSpilledVariables) {
378 size_t Increment = typeWidthInBytesOnStack(Var->getType());
379 if (SimpleCoalescing && VMetadata->isTracked(Var)) {
380 if (VMetadata->isMultiBlock(Var)) {
381 GlobalsSpaceUsed += Increment;
382 NextStackOffset = GlobalsSpaceUsed;
383 } else {
384 SizeT NodeIndex = VMetadata->getLocalUseNode(Var)->getIndex();
385 LocalsSize[NodeIndex] += Increment;
386 NextStackOffset = SpillAreaPaddingBytes +
387 GlobalsAndSubsequentPaddingSize +
388 LocalsSize[NodeIndex];
389 }
390 } else {
391 NextStackOffset += Increment;
392 }
393 if (UsesFramePointer)
394 Var->setStackOffset(-NextStackOffset);
395 else
396 Var->setStackOffset(SpillAreaSizeBytes - NextStackOffset);
397 }
398 }
399
247 InstCall *TargetLowering::makeHelperCall(const IceString &Name, Variable *Dest, 400 InstCall *TargetLowering::makeHelperCall(const IceString &Name, Variable *Dest,
248 SizeT MaxSrcs) { 401 SizeT MaxSrcs) {
249 const bool HasTailCall = false; 402 const bool HasTailCall = false;
250 Constant *CallTarget = Ctx->getConstantExternSym(Name); 403 Constant *CallTarget = Ctx->getConstantExternSym(Name);
251 InstCall *Call = 404 InstCall *Call =
252 InstCall::create(Func, MaxSrcs, Dest, CallTarget, HasTailCall); 405 InstCall::create(Func, MaxSrcs, Dest, CallTarget, HasTailCall);
253 return Call; 406 return Call;
254 } 407 }
255 408
256 void TargetLowering::emitWithoutPrefix(const ConstantRelocatable *C) const { 409 void TargetLowering::emitWithoutPrefix(const ConstantRelocatable *C) const {
(...skipping 28 matching lines...) Expand all
285 return std::unique_ptr<TargetDataLowering>(TargetData##X::create(Ctx)); 438 return std::unique_ptr<TargetDataLowering>(TargetData##X::create(Ctx));
286 #include "llvm/Config/SZTargets.def" 439 #include "llvm/Config/SZTargets.def"
287 440
288 llvm_unreachable("Unsupported target data lowering"); 441 llvm_unreachable("Unsupported target data lowering");
289 return nullptr; 442 return nullptr;
290 } 443 }
291 444
292 TargetDataLowering::~TargetDataLowering() {} 445 TargetDataLowering::~TargetDataLowering() {}
293 446
294 } // end of namespace Ice 447 } // end of namespace Ice
OLDNEW
« no previous file with comments | « src/IceTargetLowering.h ('k') | src/IceTargetLoweringARM32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698