OLD | NEW |
(Empty) | |
| 1 //===- PNaClAllowedIntrinsics.cpp - Set of allowed intrinsics -------------===// |
| 2 // |
| 3 // The LLVM Compiler Infrastructure |
| 4 // |
| 5 // This file is distributed under the University of Illinois Open Source |
| 6 // License. See LICENSE.TXT for details. |
| 7 // |
| 8 //===----------------------------------------------------------------------===// |
| 9 // |
| 10 // Defines implementation of class that holds set of allowed intrinsics. |
| 11 // |
| 12 // Keep 3 categories of intrinsics for now. |
| 13 // (1) Allowed always, provided the exact name and type match. |
| 14 // (2) Never allowed. |
| 15 // (3) Debug info intrinsics. |
| 16 // |
| 17 //===----------------------------------------------------------------------===// |
| 18 |
| 19 #include "llvm/Analysis/NaCl/PNaClAllowedIntrinsics.h" |
| 20 |
| 21 #include "llvm/ADT/STLExtras.h" |
| 22 #include "llvm/Analysis/NaCl.h" |
| 23 #include "llvm/IR/Function.h" |
| 24 #include "llvm/IR/Type.h" |
| 25 |
| 26 using namespace llvm; |
| 27 |
| 28 PNaClAllowedIntrinsics:: |
| 29 PNaClAllowedIntrinsics(LLVMContext *Context) : Context(Context) { |
| 30 Type *I8Ptr = Type::getInt8PtrTy(*Context); |
| 31 Type *I8 = Type::getInt8Ty(*Context); |
| 32 Type *I16 = Type::getInt16Ty(*Context); |
| 33 Type *I32 = Type::getInt32Ty(*Context); |
| 34 Type *I64 = Type::getInt64Ty(*Context); |
| 35 Type *Float = Type::getFloatTy(*Context); |
| 36 Type *Double = Type::getDoubleTy(*Context); |
| 37 Type *Vec4Float = VectorType::get(Float, 4); |
| 38 |
| 39 // We accept bswap for a limited set of types (i16, i32, i64). The |
| 40 // various backends are able to generate instructions to implement |
| 41 // the intrinsic. Also, i16 and i64 are easy to implement as along |
| 42 // as there is a way to do i32. |
| 43 addIntrinsic(Intrinsic::bswap, I16); |
| 44 addIntrinsic(Intrinsic::bswap, I32); |
| 45 addIntrinsic(Intrinsic::bswap, I64); |
| 46 |
| 47 // We accept cttz, ctlz, and ctpop for a limited set of types (i32, i64). |
| 48 addIntrinsic(Intrinsic::ctlz, I32); |
| 49 addIntrinsic(Intrinsic::ctlz, I64); |
| 50 addIntrinsic(Intrinsic::cttz, I32); |
| 51 addIntrinsic(Intrinsic::cttz, I64); |
| 52 addIntrinsic(Intrinsic::ctpop, I32); |
| 53 addIntrinsic(Intrinsic::ctpop, I64); |
| 54 |
| 55 addIntrinsic(Intrinsic::nacl_read_tp); |
| 56 addIntrinsic(Intrinsic::nacl_longjmp); |
| 57 addIntrinsic(Intrinsic::nacl_setjmp); |
| 58 |
| 59 addIntrinsic(Intrinsic::fabs, Float); |
| 60 addIntrinsic(Intrinsic::fabs, Double); |
| 61 addIntrinsic(Intrinsic::fabs, Vec4Float); |
| 62 |
| 63 // For native sqrt instructions. Must guarantee when x < -0.0, sqrt(x) = NaN. |
| 64 addIntrinsic(Intrinsic::sqrt, Float); |
| 65 addIntrinsic(Intrinsic::sqrt, Double); |
| 66 |
| 67 Type *AtomicTypes[] = { I8, I16, I32, I64 }; |
| 68 for (size_t T = 0, E = array_lengthof(AtomicTypes); T != E; ++T) { |
| 69 addIntrinsic(Intrinsic::nacl_atomic_load, AtomicTypes[T]); |
| 70 addIntrinsic(Intrinsic::nacl_atomic_store, AtomicTypes[T]); |
| 71 addIntrinsic(Intrinsic::nacl_atomic_rmw, AtomicTypes[T]); |
| 72 addIntrinsic(Intrinsic::nacl_atomic_cmpxchg, AtomicTypes[T]); |
| 73 } |
| 74 addIntrinsic(Intrinsic::nacl_atomic_fence); |
| 75 addIntrinsic(Intrinsic::nacl_atomic_fence_all); |
| 76 |
| 77 addIntrinsic(Intrinsic::nacl_atomic_is_lock_free); |
| 78 |
| 79 // Stack save and restore are used to support C99 VLAs. |
| 80 addIntrinsic(Intrinsic::stacksave); |
| 81 addIntrinsic(Intrinsic::stackrestore); |
| 82 |
| 83 addIntrinsic(Intrinsic::trap); |
| 84 |
| 85 // We only allow the variants of memcpy/memmove/memset with an i32 |
| 86 // "len" argument, not an i64 argument. |
| 87 Type *MemcpyTypes[] = { I8Ptr, I8Ptr, I32 }; |
| 88 addIntrinsic(Intrinsic::memcpy, MemcpyTypes); |
| 89 addIntrinsic(Intrinsic::memmove, MemcpyTypes); |
| 90 Type *MemsetTypes[] = { I8Ptr, I32 }; |
| 91 addIntrinsic(Intrinsic::memset, MemsetTypes); |
| 92 } |
| 93 |
| 94 void PNaClAllowedIntrinsics::addIntrinsic(Intrinsic::ID ID, |
| 95 ArrayRef<Type *> Tys) { |
| 96 std::string Name = Intrinsic::getName(ID, Tys); |
| 97 FunctionType *FcnType = Intrinsic::getType(*Context, ID, Tys); |
| 98 if (TypeMap.count(Name) >= 1) { |
| 99 std::string Buffer; |
| 100 raw_string_ostream StrBuf(Buffer); |
| 101 StrBuf << "Instrinsic " << Name << " defined with multiple types: " |
| 102 << *TypeMap[Name] << " and " << *FcnType << "\n"; |
| 103 report_fatal_error(StrBuf.str()); |
| 104 } |
| 105 TypeMap[Name] = FcnType; |
| 106 } |
| 107 |
| 108 bool PNaClAllowedIntrinsics::isAllowed(const Function *Func) { |
| 109 if (isIntrinsicName(Func->getName())) |
| 110 return Func->getFunctionType() == TypeMap[Func->getName()]; |
| 111 // Check to see if debugging intrinsic, which can be allowed if |
| 112 // command-line flag set. |
| 113 return isAllowedIntrinsicID(Func->getIntrinsicID()); |
| 114 } |
| 115 |
| 116 bool PNaClAllowedIntrinsics::isAllowedIntrinsicID(unsigned ID) { |
| 117 // (1) Allowed always, provided the exact name and type match. |
| 118 // (2) Never allowed. |
| 119 // (3) Debug info intrinsics. |
| 120 // |
| 121 // Please keep these sorted or grouped in a sensible way, within |
| 122 // each category. |
| 123 switch (ID) { |
| 124 // Disallow by default. |
| 125 default: return false; |
| 126 |
| 127 /* The following is intentionally commented out, since the default |
| 128 will return false. |
| 129 // (2) Known to be never allowed. |
| 130 case Intrinsic::not_intrinsic: |
| 131 // Trampolines depend on a target-specific-sized/aligned buffer. |
| 132 case Intrinsic::adjust_trampoline: |
| 133 case Intrinsic::init_trampoline: |
| 134 // CXX exception handling is not stable. |
| 135 case Intrinsic::eh_dwarf_cfa: |
| 136 case Intrinsic::eh_return_i32: |
| 137 case Intrinsic::eh_return_i64: |
| 138 case Intrinsic::eh_sjlj_callsite: |
| 139 case Intrinsic::eh_sjlj_functioncontext: |
| 140 case Intrinsic::eh_sjlj_longjmp: |
| 141 case Intrinsic::eh_sjlj_lsda: |
| 142 case Intrinsic::eh_sjlj_setjmp: |
| 143 case Intrinsic::eh_typeid_for: |
| 144 case Intrinsic::eh_unwind_init: |
| 145 // We do not want to expose addresses to the user. |
| 146 case Intrinsic::frameaddress: |
| 147 case Intrinsic::returnaddress: |
| 148 // Not supporting stack protectors. |
| 149 case Intrinsic::stackprotector: |
| 150 // Var-args handling is done w/out intrinsics. |
| 151 case Intrinsic::vacopy: |
| 152 case Intrinsic::vaend: |
| 153 case Intrinsic::vastart: |
| 154 // Disallow the *_with_overflow intrinsics because they return |
| 155 // struct types. All of them can be introduced by passing -ftrapv |
| 156 // to Clang, which we do not support for now. umul_with_overflow |
| 157 // and uadd_with_overflow are introduced by Clang for C++'s new[], |
| 158 // but ExpandArithWithOverflow expands out this use. |
| 159 case Intrinsic::sadd_with_overflow: |
| 160 case Intrinsic::ssub_with_overflow: |
| 161 case Intrinsic::uadd_with_overflow: |
| 162 case Intrinsic::usub_with_overflow: |
| 163 case Intrinsic::smul_with_overflow: |
| 164 case Intrinsic::umul_with_overflow: |
| 165 // Disallow lifetime.start/end because the semantics of what |
| 166 // arguments they accept are not very well defined, and because it |
| 167 // would be better to do merging of stack slots in the user |
| 168 // toolchain than in the PNaCl translator. |
| 169 // See https://code.google.com/p/nativeclient/issues/detail?id=3443 |
| 170 case Intrinsic::lifetime_end: |
| 171 case Intrinsic::lifetime_start: |
| 172 case Intrinsic::invariant_end: |
| 173 case Intrinsic::invariant_start: |
| 174 // Some transcendental functions not needed yet. |
| 175 case Intrinsic::cos: |
| 176 case Intrinsic::exp: |
| 177 case Intrinsic::exp2: |
| 178 case Intrinsic::log: |
| 179 case Intrinsic::log2: |
| 180 case Intrinsic::log10: |
| 181 case Intrinsic::pow: |
| 182 case Intrinsic::powi: |
| 183 case Intrinsic::sin: |
| 184 // We run -lower-expect to convert Intrinsic::expect into branch weights |
| 185 // and consume in the middle-end. The backend just ignores llvm.expect. |
| 186 case Intrinsic::expect: |
| 187 // For FLT_ROUNDS macro from float.h. It works for ARM and X86 |
| 188 // (but not MIPS). Also, wait until we add a set_flt_rounds intrinsic |
| 189 // before we bless this. |
| 190 case Intrinsic::flt_rounds: |
| 191 return false; |
| 192 */ |
| 193 |
| 194 // (3) Debug info intrinsics. |
| 195 case Intrinsic::dbg_declare: |
| 196 case Intrinsic::dbg_value: |
| 197 return PNaClABIAllowDebugMetadata; |
| 198 } |
| 199 } |
OLD | NEW |