Chromium Code Reviews| Index: lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp |
| diff --git a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp |
| index 80d7da3f19baf026ada204a8488d8aefee056c41..3c23bd5718794ca180822f499a94d96c8b308d85 100644 |
| --- a/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp |
| +++ b/lib/Analysis/NaCl/PNaClABIVerifyFunctions.cpp |
| @@ -19,6 +19,7 @@ |
| #include "llvm/IR/IntrinsicInst.h" |
| #include "llvm/IR/LLVMContext.h" |
| #include "llvm/IR/Metadata.h" |
| +#include "llvm/IR/NaClIntrinsics.h" |
| #include "llvm/IR/Operator.h" |
| #include "llvm/Pass.h" |
| #include "llvm/Support/raw_ostream.h" |
| @@ -53,7 +54,8 @@ class PNaClABIVerifyFunctions : public FunctionPass { |
| virtual void print(raw_ostream &O, const Module *M) const; |
| private: |
| bool IsWhitelistedMetadata(unsigned MDKind); |
| - const char *checkInstruction(const Instruction *Inst); |
| + const char *checkInstruction(LLVMContext &C, const NaCl::AtomicIntrinsics &AI, |
| + const Instruction *Inst); |
| PNaClABIErrorReporter *Reporter; |
| bool ReporterIsOwned; |
| }; |
| @@ -144,16 +146,7 @@ static bool isValidScalarOperand(const Value *Val) { |
| isa<UndefValue>(Val)); |
| } |
| -static bool isAllowedAlignment(unsigned Alignment, Type *Ty, bool IsAtomic) { |
| - if (IsAtomic) { |
| - // For atomic operations, the alignment must match the size of the type. |
| - if (Ty->isIntegerTy()) { |
| - unsigned Bits = Ty->getIntegerBitWidth(); |
| - return Bits % 8 == 0 && Alignment == Bits / 8; |
| - } |
| - return (Ty->isDoubleTy() && Alignment == 8) || |
| - (Ty->isFloatTy() && Alignment == 4); |
| - } |
| +static bool isAllowedAlignment(unsigned Alignment, Type *Ty) { |
| // Non-atomic integer operations must always use "align 1", since we |
| // do not want the backend to generate code with non-portable |
| // undefined behaviour (such as misaligned access faults) if user |
| @@ -169,12 +162,58 @@ static bool isAllowedAlignment(unsigned Alignment, Type *Ty, bool IsAtomic) { |
| (Ty->isFloatTy() && Alignment == 4); |
| } |
| +static bool hasAllowedAtomicRMWOperation( |
| + NaCl::AtomicIntrinsics::const_iterator AI, const CallInst *Call) { |
| + for (size_t P = 0; P != AI->NumParams; ++P) { |
| + if (AI->ParamType[P] != NaCl::AtomicIntrinsics::RMW) |
| + continue; |
| + |
| + const Value *Operation = Call->getOperand(P); |
| + if (!Operation) |
| + return false; |
| + const Constant *C = dyn_cast<Constant>(Operation); |
| + if (!C) |
| + return false; |
| + const APInt &I = C->getUniqueInteger(); |
| + if (I.ule(NaCl::AtomicInvalid) || I.uge(NaCl::AtomicNum)) |
| + return false; |
| + } |
| + return true; |
| +} |
| + |
| +static bool hasAllowedAtomicMemoryOrder( |
| + NaCl::AtomicIntrinsics::const_iterator AI, const CallInst *Call) { |
| + for (size_t P = 0; P != AI->NumParams; ++P) { |
| + if (AI->ParamType[P] != NaCl::AtomicIntrinsics::Mem) |
| + continue; |
| + |
| + const Value *MemoryOrder = Call->getOperand(P); |
| + if (!MemoryOrder) |
| + return false; |
| + const Constant *C = dyn_cast<Constant>(MemoryOrder); |
| + if (!C) |
| + return false; |
| + const APInt &I = C->getUniqueInteger(); |
| + if (I.ule(NaCl::MemoryOrderInvalid) || I.uge(NaCl::MemoryOrderNum)) |
| + return false; |
| + // TODO For now only sequential consistency is allowed. When more |
| + // are allowed we need to validate that the memory order is |
| + // allowed on the specific atomic operation (e.g. no store |
| + // acquire, and relationship between success/failure memory |
| + // order on compare exchange). |
| + if (I != NaCl::MemoryOrderSequentiallyConsistent) |
| + return false; |
| + } |
| + return true; |
| +} |
| + |
| // Check the instruction's opcode and its operands. The operands may |
| // require opcode-specific checking. |
| // |
| // This returns an error string if the instruction is rejected, or |
| // NULL if the instruction is allowed. |
| -const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { |
| +const char *PNaClABIVerifyFunctions::checkInstruction(LLVMContext &C, |
|
eliben
2013/07/03 16:06:05
I think you should not carry the Context and Atomi
JF
2013/07/03 20:58:35
Done.
|
| + const NaCl::AtomicIntrinsics &AI, const Instruction *Inst) { |
| // If the instruction has a single pointer operand, PtrOperandIndex is |
| // set to its operand index. |
| unsigned PtrOperandIndex = -1; |
| @@ -198,6 +237,10 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { |
| // ExtractValue and InsertValue operate on struct values. |
| case Instruction::ExtractValue: |
| case Instruction::InsertValue: |
| + // Atomics should become NaCl intrinsics. |
| + case Instruction::AtomicCmpXchg: |
| + case Instruction::AtomicRMW: |
| + case Instruction::Fence: |
| return "bad instruction opcode"; |
| default: |
| return "unknown instruction opcode"; |
| @@ -216,8 +259,6 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { |
| case Instruction::And: |
| case Instruction::Or: |
| case Instruction::Xor: |
| - // Memory instructions |
| - case Instruction::Fence: |
| // Conversion operations |
| case Instruction::Trunc: |
| case Instruction::ZExt: |
| @@ -256,32 +297,32 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { |
| // Memory accesses. |
| case Instruction::Load: { |
| const LoadInst *Load = cast<LoadInst>(Inst); |
| + PtrOperandIndex = Load->getPointerOperandIndex(); |
| + if (Load->isAtomic()) |
| + return "atomic"; |
|
eliben
2013/07/03 16:06:05
Maybe "atomic load" / "volatile load", etc? Same b
JF
2013/07/03 20:58:35
Done. It's a bit redundant: see my corresponding c
|
| + if (Load->isVolatile()) |
| + return "volatile"; |
| if (!isAllowedAlignment(Load->getAlignment(), |
| - Load->getType(), |
| - Load->isAtomic())) |
| + Load->getType())) |
| return "bad alignment"; |
| - PtrOperandIndex = 0; |
| if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) |
| return "bad pointer"; |
| break; |
| } |
| case Instruction::Store: { |
| const StoreInst *Store = cast<StoreInst>(Inst); |
| + PtrOperandIndex = Store->getPointerOperandIndex(); |
| + if (Store->isAtomic()) |
| + return "atomic"; |
| + if (Store->isVolatile()) |
| + return "volatile"; |
| if (!isAllowedAlignment(Store->getAlignment(), |
| - Store->getValueOperand()->getType(), |
| - Store->isAtomic())) |
| + Store->getValueOperand()->getType())) |
| return "bad alignment"; |
| - PtrOperandIndex = 1; |
| if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) |
| return "bad pointer"; |
| break; |
| } |
| - case Instruction::AtomicCmpXchg: |
| - case Instruction::AtomicRMW: |
| - PtrOperandIndex = 0; |
| - if (!isNormalizedPtr(Inst->getOperand(PtrOperandIndex))) |
| - return "bad pointer"; |
| - break; |
| // Casts. |
| case Instruction::BitCast: |
| @@ -332,6 +373,7 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { |
| isa<MDNode>(Arg))) |
| return "bad intrinsic operand"; |
| } |
| + |
| // Disallow alignments other than 1 on memcpy() etc., for the |
| // same reason that we disallow them on integer loads and |
| // stores. |
| @@ -344,6 +386,26 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { |
| return "bad alignment"; |
| } |
| } |
| + |
| + // Disallow NaCl atomic intrinsics which don't have valid |
| + // constant NaCl::AtomicOperation and NaCl::MemoryOrder |
| + // parameters. |
| + switch (Call->getIntrinsicID()) { |
| + default: break; // Non-atomic intrinsic. |
| + case Intrinsic::nacl_atomic_load: |
| + case Intrinsic::nacl_atomic_store: |
| + case Intrinsic::nacl_atomic_rmw: |
| + case Intrinsic::nacl_atomic_cmpxchg: |
| + case Intrinsic::nacl_atomic_fence: { |
| + NaCl::AtomicIntrinsics::const_iterator I = |
| + AI.find(Call->getIntrinsicID(), Type::getInt32Ty(C)); |
| + if (!hasAllowedAtomicMemoryOrder(I, Call)) |
| + return "invalid memory order"; |
| + if (!hasAllowedAtomicRMWOperation(I, Call)) |
| + return "invalid atomicRMW operation"; |
| + } break; |
| + } |
| + |
| // Allow the instruction and skip the later checks. |
| return NULL; |
| } |
| @@ -414,7 +476,10 @@ const char *PNaClABIVerifyFunctions::checkInstruction(const Instruction *Inst) { |
| bool PNaClABIVerifyFunctions::runOnFunction(Function &F) { |
| SmallVector<StringRef, 8> MDNames; |
| - F.getContext().getMDKindNames(MDNames); |
| + LLVMContext &C = F.getContext(); |
| + C.getMDKindNames(MDNames); |
| + |
| + NaCl::AtomicIntrinsics AI(C); |
| for (Function::const_iterator FI = F.begin(), FE = F.end(); |
| FI != FE; ++FI) { |
| @@ -425,7 +490,7 @@ bool PNaClABIVerifyFunctions::runOnFunction(Function &F) { |
| // because some instruction opcodes must be rejected out of hand |
| // (regardless of the instruction's result type) and the tests |
| // check the reason for rejection. |
| - const char *Error = checkInstruction(BBI); |
| + const char *Error = checkInstruction(C, AI, BBI); |
| // Check the instruction's result type. |
| if (!Error && !(PNaClABITypeChecker::isValidScalarType(Inst->getType()) || |
| isNormalizedPtr(Inst) || |