| OLD | NEW |
| 1 //===- PNaClAllowedIntrinsics.cpp - Set of allowed intrinsics -------------===// | 1 //===- PNaClAllowedIntrinsics.cpp - Set of allowed intrinsics -------------===// |
| 2 // | 2 // |
| 3 // The LLVM Compiler Infrastructure | 3 // The LLVM Compiler Infrastructure |
| 4 // | 4 // |
| 5 // This file is distributed under the University of Illinois Open Source | 5 // This file is distributed under the University of Illinois Open Source |
| 6 // License. See LICENSE.TXT for details. | 6 // License. See LICENSE.TXT for details. |
| 7 // | 7 // |
| 8 //===----------------------------------------------------------------------===// | 8 //===----------------------------------------------------------------------===// |
| 9 // | 9 // |
| 10 // Defines implementation of class that holds set of allowed intrinsics. | 10 // Defines implementation of class that holds set of allowed intrinsics. |
| 11 // | 11 // |
| 12 // Keep 3 categories of intrinsics for now. | 12 // Keep 3 categories of intrinsics for now. |
| 13 // (1) Allowed always, provided the exact name and type match. | 13 // (1) Allowed always, provided the exact name and type match. |
| 14 // (2) Never allowed. | 14 // (2) Never allowed. |
| 15 // (3) Debug info intrinsics. | 15 // (3) Debug info intrinsics. |
| 16 // | 16 // |
| 17 //===----------------------------------------------------------------------===// | 17 //===----------------------------------------------------------------------===// |
| 18 | 18 |
| 19 #include "llvm/Analysis/NaCl/PNaClAllowedIntrinsics.h" | 19 #include "llvm/Analysis/NaCl/PNaClAllowedIntrinsics.h" |
| 20 | 20 |
| 21 #include "llvm/ADT/STLExtras.h" | 21 #include "llvm/ADT/STLExtras.h" |
| 22 #include "llvm/Analysis/NaCl.h" | 22 #include "llvm/Analysis/NaCl.h" |
| 23 #include "llvm/IR/Function.h" | 23 #include "llvm/IR/Function.h" |
| 24 #include "llvm/IR/Type.h" | 24 #include "llvm/IR/Type.h" |
| 25 | 25 |
| 26 using namespace llvm; | 26 using namespace llvm; |
| 27 | 27 |
| 28 /* |
| 29 The constructor sets up the whitelist of allowed intrinsics and their expected |
| 30 types. The comments in that code have some details on the allowed intrinsics. |
| 31 Additionally, the following intrinsics are disallowed for the stated reasons: |
| 32 |
| 33 * Trampolines depend on a target-specific-sized/aligned buffer. |
| 34 Intrinsic::adjust_trampoline: |
| 35 Intrinsic::init_trampoline: |
| 36 * CXX exception handling is not stable. |
| 37 Intrinsic::eh_dwarf_cfa: |
| 38 Intrinsic::eh_return_i32: |
| 39 Intrinsic::eh_return_i64: |
| 40 Intrinsic::eh_sjlj_callsite: |
| 41 Intrinsic::eh_sjlj_functioncontext: |
| 42 Intrinsic::eh_sjlj_longjmp: |
| 43 Intrinsic::eh_sjlj_lsda: |
| 44 Intrinsic::eh_sjlj_setjmp: |
| 45 Intrinsic::eh_typeid_for: |
| 46 Intrinsic::eh_unwind_init: |
| 47 * We do not want to expose addresses to the user. |
| 48 Intrinsic::frameaddress: |
| 49 Intrinsic::returnaddress: |
| 50 * We do not support stack protectors. |
| 51 Intrinsic::stackprotector: |
| 52 * Var-args handling is done w/out intrinsics. |
| 53 Intrinsic::vacopy: |
| 54 Intrinsic::vaend: |
| 55 Intrinsic::vastart: |
| 56 * Disallow the *_with_overflow intrinsics because they return |
| 57 struct types. All of them can be introduced by passing -ftrapv |
| 58 to Clang, which we do not support for now. umul_with_overflow |
| 59 and uadd_with_overflow are introduced by Clang for C++'s new[], |
| 60 but ExpandArithWithOverflow expands out this use. |
| 61 Intrinsic::sadd_with_overflow: |
| 62 Intrinsic::ssub_with_overflow: |
| 63 Intrinsic::uadd_with_overflow: |
| 64 Intrinsic::usub_with_overflow: |
| 65 Intrinsic::smul_with_overflow: |
| 66 Intrinsic::umul_with_overflow: |
| 67 * Disallow lifetime.start/end because the semantics of what |
| 68 arguments they accept are not very well defined, and because it |
| 69 would be better to do merging of stack slots in the user |
| 70 toolchain than in the PNaCl translator. |
| 71 See https://code.google.com/p/nativeclient/issues/detail?id=3443 |
| 72 Intrinsic::lifetime_end: |
| 73 Intrinsic::lifetime_start: |
| 74 Intrinsic::invariant_end: |
| 75 Intrinsic::invariant_start: |
| 76 * Some transcendental functions not needed yet. |
| 77 Intrinsic::cos: |
| 78 Intrinsic::exp: |
| 79 Intrinsic::exp2: |
| 80 Intrinsic::log: |
| 81 Intrinsic::log2: |
| 82 Intrinsic::log10: |
| 83 Intrinsic::pow: |
| 84 Intrinsic::powi: |
| 85 Intrinsic::sin: |
| 86 * We run -lower-expect to convert Intrinsic::expect into branch weights |
| 87 and consume in the middle-end. The backend just ignores llvm.expect. |
| 88 Intrinsic::expect: |
| 89 * For FLT_ROUNDS macro from float.h. It works for ARM and X86 |
| 90 (but not MIPS). Also, wait until we add a set_flt_rounds intrinsic |
| 91 before we bless this. |
| 92 case Intrinsic::flt_rounds: |
| 93 */ |
| 28 PNaClAllowedIntrinsics:: | 94 PNaClAllowedIntrinsics:: |
| 29 PNaClAllowedIntrinsics(LLVMContext *Context) : Context(Context) { | 95 PNaClAllowedIntrinsics(LLVMContext *Context) : Context(Context) { |
| 30 Type *I8Ptr = Type::getInt8PtrTy(*Context); | 96 Type *I8Ptr = Type::getInt8PtrTy(*Context); |
| 31 Type *I8 = Type::getInt8Ty(*Context); | 97 Type *I8 = Type::getInt8Ty(*Context); |
| 32 Type *I16 = Type::getInt16Ty(*Context); | 98 Type *I16 = Type::getInt16Ty(*Context); |
| 33 Type *I32 = Type::getInt32Ty(*Context); | 99 Type *I32 = Type::getInt32Ty(*Context); |
| 34 Type *I64 = Type::getInt64Ty(*Context); | 100 Type *I64 = Type::getInt64Ty(*Context); |
| 35 Type *Float = Type::getFloatTy(*Context); | 101 Type *Float = Type::getFloatTy(*Context); |
| 36 Type *Double = Type::getDoubleTy(*Context); | 102 Type *Double = Type::getDoubleTy(*Context); |
| 37 Type *Vec4Float = VectorType::get(Float, 4); | 103 Type *Vec4Float = VectorType::get(Float, 4); |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 103 report_fatal_error(StrBuf.str()); | 169 report_fatal_error(StrBuf.str()); |
| 104 } | 170 } |
| 105 TypeMap[Name] = FcnType; | 171 TypeMap[Name] = FcnType; |
| 106 } | 172 } |
| 107 | 173 |
| 108 bool PNaClAllowedIntrinsics::isAllowed(const Function *Func) { | 174 bool PNaClAllowedIntrinsics::isAllowed(const Function *Func) { |
| 109 if (isIntrinsicName(Func->getName())) | 175 if (isIntrinsicName(Func->getName())) |
| 110 return Func->getFunctionType() == TypeMap[Func->getName()]; | 176 return Func->getFunctionType() == TypeMap[Func->getName()]; |
| 111 // Check to see if debugging intrinsic, which can be allowed if | 177 // Check to see if debugging intrinsic, which can be allowed if |
| 112 // command-line flag set. | 178 // command-line flag set. |
| 113 return isAllowedIntrinsicID(Func->getIntrinsicID()); | 179 return isAllowedDebugInfoIntrinsic(Func->getIntrinsicID()); |
| 114 } | 180 } |
| 115 | 181 |
| 116 bool PNaClAllowedIntrinsics::isAllowedIntrinsicID(unsigned ID) { | 182 bool PNaClAllowedIntrinsics::isAllowedDebugInfoIntrinsic(unsigned IntrinsicID) { |
| 117 // (1) Allowed always, provided the exact name and type match. | 183 /* These intrinsics are allowed when debug info metadata is also allowed, |
| 118 // (2) Never allowed. | 184 and we just assume that they are called correctly by the frontend. */ |
| 119 // (3) Debug info intrinsics. | 185 switch (IntrinsicID) { |
| 120 // | |
| 121 // Please keep these sorted or grouped in a sensible way, within | |
| 122 // each category. | |
| 123 switch (ID) { | |
| 124 // Disallow by default. | |
| 125 default: return false; | 186 default: return false; |
| 126 | |
| 127 /* The following is intentionally commented out, since the default | |
| 128 will return false. | |
| 129 // (2) Known to be never allowed. | |
| 130 case Intrinsic::not_intrinsic: | |
| 131 // Trampolines depend on a target-specific-sized/aligned buffer. | |
| 132 case Intrinsic::adjust_trampoline: | |
| 133 case Intrinsic::init_trampoline: | |
| 134 // CXX exception handling is not stable. | |
| 135 case Intrinsic::eh_dwarf_cfa: | |
| 136 case Intrinsic::eh_return_i32: | |
| 137 case Intrinsic::eh_return_i64: | |
| 138 case Intrinsic::eh_sjlj_callsite: | |
| 139 case Intrinsic::eh_sjlj_functioncontext: | |
| 140 case Intrinsic::eh_sjlj_longjmp: | |
| 141 case Intrinsic::eh_sjlj_lsda: | |
| 142 case Intrinsic::eh_sjlj_setjmp: | |
| 143 case Intrinsic::eh_typeid_for: | |
| 144 case Intrinsic::eh_unwind_init: | |
| 145 // We do not want to expose addresses to the user. | |
| 146 case Intrinsic::frameaddress: | |
| 147 case Intrinsic::returnaddress: | |
| 148 // Not supporting stack protectors. | |
| 149 case Intrinsic::stackprotector: | |
| 150 // Var-args handling is done w/out intrinsics. | |
| 151 case Intrinsic::vacopy: | |
| 152 case Intrinsic::vaend: | |
| 153 case Intrinsic::vastart: | |
| 154 // Disallow the *_with_overflow intrinsics because they return | |
| 155 // struct types. All of them can be introduced by passing -ftrapv | |
| 156 // to Clang, which we do not support for now. umul_with_overflow | |
| 157 // and uadd_with_overflow are introduced by Clang for C++'s new[], | |
| 158 // but ExpandArithWithOverflow expands out this use. | |
| 159 case Intrinsic::sadd_with_overflow: | |
| 160 case Intrinsic::ssub_with_overflow: | |
| 161 case Intrinsic::uadd_with_overflow: | |
| 162 case Intrinsic::usub_with_overflow: | |
| 163 case Intrinsic::smul_with_overflow: | |
| 164 case Intrinsic::umul_with_overflow: | |
| 165 // Disallow lifetime.start/end because the semantics of what | |
| 166 // arguments they accept are not very well defined, and because it | |
| 167 // would be better to do merging of stack slots in the user | |
| 168 // toolchain than in the PNaCl translator. | |
| 169 // See https://code.google.com/p/nativeclient/issues/detail?id=3443 | |
| 170 case Intrinsic::lifetime_end: | |
| 171 case Intrinsic::lifetime_start: | |
| 172 case Intrinsic::invariant_end: | |
| 173 case Intrinsic::invariant_start: | |
| 174 // Some transcendental functions not needed yet. | |
| 175 case Intrinsic::cos: | |
| 176 case Intrinsic::exp: | |
| 177 case Intrinsic::exp2: | |
| 178 case Intrinsic::log: | |
| 179 case Intrinsic::log2: | |
| 180 case Intrinsic::log10: | |
| 181 case Intrinsic::pow: | |
| 182 case Intrinsic::powi: | |
| 183 case Intrinsic::sin: | |
| 184 // We run -lower-expect to convert Intrinsic::expect into branch weights | |
| 185 // and consume in the middle-end. The backend just ignores llvm.expect. | |
| 186 case Intrinsic::expect: | |
| 187 // For FLT_ROUNDS macro from float.h. It works for ARM and X86 | |
| 188 // (but not MIPS). Also, wait until we add a set_flt_rounds intrinsic | |
| 189 // before we bless this. | |
| 190 case Intrinsic::flt_rounds: | |
| 191 return false; | |
| 192 */ | |
| 193 | |
| 194 // (3) Debug info intrinsics. | |
| 195 case Intrinsic::dbg_declare: | 187 case Intrinsic::dbg_declare: |
| 196 case Intrinsic::dbg_value: | 188 case Intrinsic::dbg_value: |
| 197 return PNaClABIAllowDebugMetadata; | 189 return PNaClABIAllowDebugMetadata; |
| 198 } | 190 } |
| 199 } | 191 } |
| OLD | NEW |