OLD | NEW |
---|---|
1 //===- PNaClABIVerifyFunctions.cpp - Verify PNaCl ABI rules ---------------===// | 1 //===- PNaClABIVerifyFunctions.cpp - Verify PNaCl ABI rules ---------------===// |
2 // | 2 // |
3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
4 // | 4 // |
5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
7 // | 7 // |
8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
9 // | 9 // |
10 // Verify function-level PNaCl ABI requirements. | 10 // Verify function-level PNaCl ABI requirements. |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
49 initializePNaClABIVerifyFunctionsPass(*PassRegistry::getPassRegistry()); | 49 initializePNaClABIVerifyFunctionsPass(*PassRegistry::getPassRegistry()); |
50 } | 50 } |
51 ~PNaClABIVerifyFunctions() { | 51 ~PNaClABIVerifyFunctions() { |
52 if (ReporterIsOwned) | 52 if (ReporterIsOwned) |
53 delete Reporter; | 53 delete Reporter; |
54 } | 54 } |
55 virtual bool doInitialization(Module &M) { | 55 virtual bool doInitialization(Module &M) { |
56 AtomicIntrinsics.reset(new NaCl::AtomicIntrinsics(M.getContext())); | 56 AtomicIntrinsics.reset(new NaCl::AtomicIntrinsics(M.getContext())); |
57 return false; | 57 return false; |
58 } | 58 } |
59 virtual void getAnalysisUsage(AnalysisUsage &Info) const { | |
60 Info.addRequired<DataLayout>(); | |
jvoung (off chromium)
2014/06/10 16:28:32
Should probably also setPreservesAll() ?
JF
2014/06/10 18:26:29
Done.
| |
61 } | |
59 bool runOnFunction(Function &F); | 62 bool runOnFunction(Function &F); |
60 virtual void print(raw_ostream &O, const Module *M) const; | 63 virtual void print(raw_ostream &O, const Module *M) const; |
61 private: | 64 private: |
62 bool IsWhitelistedMetadata(unsigned MDKind); | 65 bool IsWhitelistedMetadata(unsigned MDKind); |
63 const char *checkInstruction(const Instruction *Inst); | 66 const char *checkInstruction(const DataLayout *DL, const Instruction *Inst); |
64 PNaClABIErrorReporter *Reporter; | 67 PNaClABIErrorReporter *Reporter; |
65 bool ReporterIsOwned; | 68 bool ReporterIsOwned; |
66 OwningPtr<NaCl::AtomicIntrinsics> AtomicIntrinsics; | 69 OwningPtr<NaCl::AtomicIntrinsics> AtomicIntrinsics; |
67 }; | 70 }; |
68 | 71 |
69 } // and anonymous namespace | 72 } // and anonymous namespace |
70 | 73 |
71 // There's no built-in way to get the name of an MDNode, so use a | 74 // There's no built-in way to get the name of an MDNode, so use a |
72 // string ostream to print it. | 75 // string ostream to print it. |
73 static std::string getMDNodeString(unsigned Kind, | 76 static std::string getMDNodeString(unsigned Kind, |
74 const SmallVectorImpl<StringRef> &MDNames) { | 77 const SmallVectorImpl<StringRef> &MDNames) { |
75 std::string MDName; | 78 std::string MDName; |
76 raw_string_ostream N(MDName); | 79 raw_string_ostream N(MDName); |
77 if (Kind < MDNames.size()) { | 80 if (Kind < MDNames.size()) { |
78 N << "!" << MDNames[Kind]; | 81 N << "!" << MDNames[Kind]; |
79 } else { | 82 } else { |
80 N << "!<unknown kind #" << Kind << ">"; | 83 N << "!<unknown kind #" << Kind << ">"; |
81 } | 84 } |
82 return N.str(); | 85 return N.str(); |
83 } | 86 } |
84 | 87 |
85 bool PNaClABIVerifyFunctions::IsWhitelistedMetadata(unsigned MDKind) { | 88 bool PNaClABIVerifyFunctions::IsWhitelistedMetadata(unsigned MDKind) { |
86 return MDKind == LLVMContext::MD_dbg && PNaClABIAllowDebugMetadata; | 89 return MDKind == LLVMContext::MD_dbg && PNaClABIAllowDebugMetadata; |
87 } | 90 } |
88 | 91 |
89 // A valid pointer type is either: | 92 // A valid pointer type is either: |
90 // * a pointer to a valid PNaCl scalar type (except i1), or | 93 // * a pointer to a valid PNaCl scalar type (except i1), or |
94 // * a pointer to a valid PNaCl vector type (except i1), or | |
91 // * a function pointer (with valid argument and return types). | 95 // * a function pointer (with valid argument and return types). |
92 // | 96 // |
93 // i1 is disallowed so that all loads and stores are a whole number of | 97 // i1 is disallowed so that all loads and stores are a whole number of |
94 // bytes, and so that we do not need to define whether a store of i1 | 98 // bytes, and so that we do not need to define whether a store of i1 |
95 // zero-extends. | 99 // zero-extends. |
96 // | |
97 // Vector pointer types aren't currently allowed because vector memory | |
98 // accesses go through their scalar elements. | |
99 static bool isValidPointerType(Type *Ty) { | 100 static bool isValidPointerType(Type *Ty) { |
100 if (PointerType *PtrTy = dyn_cast<PointerType>(Ty)) { | 101 if (PointerType *PtrTy = dyn_cast<PointerType>(Ty)) { |
101 if (PtrTy->getAddressSpace() != 0) | 102 if (PtrTy->getAddressSpace() != 0) |
102 return false; | 103 return false; |
103 Type *EltTy = PtrTy->getElementType(); | 104 Type *EltTy = PtrTy->getElementType(); |
104 if (PNaClABITypeChecker::isValidScalarType(EltTy) && !EltTy->isIntegerTy(1)) | 105 if (PNaClABITypeChecker::isValidScalarType(EltTy) && !EltTy->isIntegerTy(1)) |
105 return true; | 106 return true; |
107 if (PNaClABITypeChecker::isValidVectorType(EltTy) && | |
108 !cast<VectorType>(EltTy)->getElementType()->isIntegerTy(1)) | |
109 return true; | |
106 if (FunctionType *FTy = dyn_cast<FunctionType>(EltTy)) | 110 if (FunctionType *FTy = dyn_cast<FunctionType>(EltTy)) |
107 return PNaClABITypeChecker::isValidFunctionType(FTy); | 111 return PNaClABITypeChecker::isValidFunctionType(FTy); |
108 } | 112 } |
109 return false; | 113 return false; |
110 } | 114 } |
111 | 115 |
112 static bool isIntrinsicFunc(const Value *Val) { | 116 static bool isIntrinsicFunc(const Value *Val) { |
113 if (const Function *F = dyn_cast<Function>(Val)) | 117 if (const Function *F = dyn_cast<Function>(Val)) |
114 return F->isIntrinsic(); | 118 return F->isIntrinsic(); |
115 return false; | 119 return false; |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
159 if (isa<Instruction>(Val) || isa<Argument>(Val)) | 163 if (isa<Instruction>(Val) || isa<Argument>(Val)) |
160 return true; | 164 return true; |
161 // Contrary to scalars, constant vector values aren't allowed on | 165 // Contrary to scalars, constant vector values aren't allowed on |
162 // instructions, except undefined. Constant vectors are loaded from | 166 // instructions, except undefined. Constant vectors are loaded from |
163 // constant global memory instead, and can be rematerialized as | 167 // constant global memory instead, and can be rematerialized as |
164 // constants by the backend if need be. | 168 // constants by the backend if need be. |
165 return PNaClABITypeChecker::isValidVectorType(Val->getType()) && | 169 return PNaClABITypeChecker::isValidVectorType(Val->getType()) && |
166 isa<UndefValue>(Val); | 170 isa<UndefValue>(Val); |
167 } | 171 } |
168 | 172 |
169 static bool isAllowedAlignment(unsigned Alignment, Type *Ty) { | 173 static bool isAllowedAlignment(const DataLayout *DL, uint64_t Alignment, |
170 // Non-atomic integer operations must always use "align 1", since we | 174 Type *Ty) { |
171 // do not want the backend to generate code with non-portable | 175 // Non-atomic integer operations must always use "align 1", since we do not |
172 // undefined behaviour (such as misaligned access faults) if user | 176 // want the backend to generate code with non-portable undefined behaviour |
173 // code specifies "align 4" but uses a misaligned pointer. As a | 177 // (such as misaligned access faults) if user code specifies "align 4" but |
174 // concession to performance, we allow larger alignment values for | 178 // uses a misaligned pointer. As a concession to performance, we allow larger |
175 // floating point types. | 179 // alignment values for floating point types, and we only allow vectors to be |
180 // aligned by their element's size. | |
176 // | 181 // |
177 // To reduce the set of alignment values that need to be encoded in | 182 // TODO(jfb) Allow vectors to be marked as align == 1. This requires proper |
178 // pexes, we disallow other alignment values. We require alignments | 183 // testing on each supported ISA, and is probably not as common as |
179 // to be explicit by disallowing Alignment == 0. | 184 // align == elemsize. |
180 // | 185 // |
181 // Vector memory accesses go through their scalar elements, there is | 186 // To reduce the set of alignment values that need to be encoded in pexes, we |
182 // therefore no such thing as vector alignment. | 187 // disallow other alignment values. We require alignments to be explicit by |
183 return Alignment == 1 || | 188 // disallowing Alignment == 0. |
184 (Ty->isDoubleTy() && Alignment == 8) || | 189 if (Alignment > std::numeric_limits<uint64_t>::max() / CHAR_BIT) |
185 (Ty->isFloatTy() && Alignment == 4); | 190 return false; // No overflow assumed below. |
191 else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) | |
192 return !VTy->getElementType()->isIntegerTy(1) && | |
193 (Alignment * CHAR_BIT == | |
194 DL->getTypeSizeInBits(VTy->getElementType())); | |
195 else | |
196 return Alignment == 1 || | |
197 (Ty->isDoubleTy() && Alignment == 8) || | |
198 (Ty->isFloatTy() && Alignment == 4); | |
186 } | 199 } |
187 | 200 |
188 static bool hasAllowedAtomicRMWOperation( | 201 static bool hasAllowedAtomicRMWOperation( |
189 const NaCl::AtomicIntrinsics::AtomicIntrinsic *I, const CallInst *Call) { | 202 const NaCl::AtomicIntrinsics::AtomicIntrinsic *I, const CallInst *Call) { |
190 for (size_t P = 0; P != I->NumParams; ++P) { | 203 for (size_t P = 0; P != I->NumParams; ++P) { |
191 if (I->ParamType[P] != NaCl::AtomicIntrinsics::RMW) | 204 if (I->ParamType[P] != NaCl::AtomicIntrinsics::RMW) |
192 continue; | 205 continue; |
193 | 206 |
194 const Value *Operation = Call->getOperand(P); | 207 const Value *Operation = Call->getOperand(P); |
195 if (!Operation) | 208 if (!Operation) |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
246 if (I == 1 || I == 2 || I == 4 || I == 8) | 259 if (I == 1 || I == 2 || I == 4 || I == 8) |
247 return true; | 260 return true; |
248 return false; | 261 return false; |
249 } | 262 } |
250 | 263 |
251 // Check the instruction's opcode and its operands. The operands may | 264 // Check the instruction's opcode and its operands. The operands may |
252 // require opcode-specific checking. | 265 // require opcode-specific checking. |
253 // | 266 // |
254 // This returns an error string if the instruction is rejected, or | 267 // This returns an error string if the instruction is rejected, or |
255 // NULL if the instruction is allowed. | 268 // NULL if the instruction is allowed. |
256 const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { | 269 const char *PNaClABIVerifyFunctions::checkInstruction(const DataLayout *DL, |
270 const Instruction *Inst) { | |
257 // If the instruction has a single pointer operand, PtrOperandIndex is | 271 // If the instruction has a single pointer operand, PtrOperandIndex is |
258 // set to its operand index. | 272 // set to its operand index. |
259 unsigned PtrOperandIndex = -1; | 273 unsigned PtrOperandIndex = -1; |
260 | 274 |
261 switch (Inst->getOpcode()) { | 275 switch (Inst->getOpcode()) { |
262 // Disallowed instructions. Default is to disallow. | 276 // Disallowed instructions. Default is to disallow. |
263 // We expand GetElementPtr out into arithmetic. | 277 // We expand GetElementPtr out into arithmetic. |
264 case Instruction::GetElementPtr: | 278 case Instruction::GetElementPtr: |
265 // VAArg is expanded out by ExpandVarArgs. | 279 // VAArg is expanded out by ExpandVarArgs. |
266 case Instruction::VAArg: | 280 case Instruction::VAArg: |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
357 // Memory accesses. | 371 // Memory accesses. |
358 case Instruction::Load: { | 372 case Instruction::Load: { |
359 const LoadInst *Load = cast<LoadInst>(Inst); | 373 const LoadInst *Load = cast<LoadInst>(Inst); |
360 PtrOperandIndex = Load->getPointerOperandIndex(); | 374 PtrOperandIndex = Load->getPointerOperandIndex(); |
361 if (Load->isAtomic()) | 375 if (Load->isAtomic()) |
362 return "atomic load"; | 376 return "atomic load"; |
363 if (Load->isVolatile()) | 377 if (Load->isVolatile()) |
364 return "volatile load"; | 378 return "volatile load"; |
365 if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) | 379 if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) |
366 return "bad pointer"; | 380 return "bad pointer"; |
367 if (!isAllowedAlignment(Load->getAlignment(), | 381 if (!isAllowedAlignment(DL, Load->getAlignment(), Load->getType())) |
368 Load->getType())) | |
369 return "bad alignment"; | 382 return "bad alignment"; |
370 break; | 383 break; |
371 } | 384 } |
372 case Instruction::Store: { | 385 case Instruction::Store: { |
373 const StoreInst *Store = cast<StoreInst>(Inst); | 386 const StoreInst *Store = cast<StoreInst>(Inst); |
374 PtrOperandIndex = Store->getPointerOperandIndex(); | 387 PtrOperandIndex = Store->getPointerOperandIndex(); |
375 if (Store->isAtomic()) | 388 if (Store->isAtomic()) |
376 return "atomic store"; | 389 return "atomic store"; |
377 if (Store->isVolatile()) | 390 if (Store->isVolatile()) |
378 return "volatile store"; | 391 return "volatile store"; |
379 if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) | 392 if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) |
380 return "bad pointer"; | 393 return "bad pointer"; |
381 if (!isAllowedAlignment(Store->getAlignment(), | 394 if (!isAllowedAlignment(DL, Store->getAlignment(), |
382 Store->getValueOperand()->getType())) | 395 Store->getValueOperand()->getType())) |
383 return "bad alignment"; | 396 return "bad alignment"; |
384 break; | 397 break; |
385 } | 398 } |
386 | 399 |
387 // Casts. | 400 // Casts. |
388 case Instruction::BitCast: | 401 case Instruction::BitCast: |
389 if (Inst->getType()->isPointerTy()) { | 402 if (Inst->getType()->isPointerTy()) { |
390 PtrOperandIndex = 0; | 403 PtrOperandIndex = 0; |
391 if (!isInherentPtr(Inst->getOperand(PtrOperandIndex))) | 404 if (!isInherentPtr(Inst->getOperand(PtrOperandIndex))) |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
534 dyn_cast<PossiblyExactOperator>(Inst)) { | 547 dyn_cast<PossiblyExactOperator>(Inst)) { |
535 if (Op->isExact()) | 548 if (Op->isExact()) |
536 return "has \"exact\" attribute"; | 549 return "has \"exact\" attribute"; |
537 } | 550 } |
538 | 551 |
539 // Allow the instruction. | 552 // Allow the instruction. |
540 return NULL; | 553 return NULL; |
541 } | 554 } |
542 | 555 |
543 bool PNaClABIVerifyFunctions::runOnFunction(Function &F) { | 556 bool PNaClABIVerifyFunctions::runOnFunction(Function &F) { |
557 const DataLayout *DL = &getAnalysis<DataLayout>(); | |
544 SmallVector<StringRef, 8> MDNames; | 558 SmallVector<StringRef, 8> MDNames; |
545 F.getContext().getMDKindNames(MDNames); | 559 F.getContext().getMDKindNames(MDNames); |
546 | 560 |
547 for (Function::const_iterator FI = F.begin(), FE = F.end(); | 561 for (Function::const_iterator FI = F.begin(), FE = F.end(); |
548 FI != FE; ++FI) { | 562 FI != FE; ++FI) { |
549 for (BasicBlock::const_iterator BBI = FI->begin(), BBE = FI->end(); | 563 for (BasicBlock::const_iterator BBI = FI->begin(), BBE = FI->end(); |
550 BBI != BBE; ++BBI) { | 564 BBI != BBE; ++BBI) { |
551 const Instruction *Inst = BBI; | 565 const Instruction *Inst = BBI; |
552 // Check the instruction opcode first. This simplifies testing, | 566 // Check the instruction opcode first. This simplifies testing, |
553 // because some instruction opcodes must be rejected out of hand | 567 // because some instruction opcodes must be rejected out of hand |
554 // (regardless of the instruction's result type) and the tests | 568 // (regardless of the instruction's result type) and the tests |
555 // check the reason for rejection. | 569 // check the reason for rejection. |
556 const char *Error = checkInstruction(BBI); | 570 const char *Error = checkInstruction(DL, BBI); |
557 // Check the instruction's result type. | 571 // Check the instruction's result type. |
558 bool BadResult = false; | 572 bool BadResult = false; |
559 if (!Error && !(PNaClABITypeChecker::isValidScalarType(Inst->getType()) || | 573 if (!Error && !(PNaClABITypeChecker::isValidScalarType(Inst->getType()) || |
560 PNaClABITypeChecker::isValidVectorType(Inst->getType()) || | 574 PNaClABITypeChecker::isValidVectorType(Inst->getType()) || |
561 isNormalizedPtr(Inst) || | 575 isNormalizedPtr(Inst) || |
562 isa<AllocaInst>(Inst))) { | 576 isa<AllocaInst>(Inst))) { |
563 Error = "bad result type"; | 577 Error = "bad result type"; |
564 BadResult = true; | 578 BadResult = true; |
565 } | 579 } |
566 if (Error) { | 580 if (Error) { |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
599 } | 613 } |
600 | 614 |
601 char PNaClABIVerifyFunctions::ID = 0; | 615 char PNaClABIVerifyFunctions::ID = 0; |
602 INITIALIZE_PASS(PNaClABIVerifyFunctions, "verify-pnaclabi-functions", | 616 INITIALIZE_PASS(PNaClABIVerifyFunctions, "verify-pnaclabi-functions", |
603 "Verify functions for PNaCl", false, true) | 617 "Verify functions for PNaCl", false, true) |
604 | 618 |
605 FunctionPass *llvm::createPNaClABIVerifyFunctionsPass( | 619 FunctionPass *llvm::createPNaClABIVerifyFunctionsPass( |
606 PNaClABIErrorReporter *Reporter) { | 620 PNaClABIErrorReporter *Reporter) { |
607 return new PNaClABIVerifyFunctions(Reporter); | 621 return new PNaClABIVerifyFunctions(Reporter); |
608 } | 622 } |
OLD | NEW |