Index: src/IceTargetLoweringX8632.cpp |
diff --git a/src/IceTargetLoweringX8632.cpp b/src/IceTargetLoweringX8632.cpp |
index 00db25a5a29823b3411b6b030f5fd174184aeb55..df73036c626b8652627f90f2661258d4e5b2ab71 100644 |
--- a/src/IceTargetLoweringX8632.cpp |
+++ b/src/IceTargetLoweringX8632.cpp |
@@ -114,6 +114,7 @@ const size_t TableTypeX8632AttributesSize = |
Type getInVectorElementType(Type Ty) { |
assert(isVectorType(Ty)); |
size_t Index = static_cast<size_t>(Ty); |
+ (void)Index; |
assert(Index < TableTypeX8632AttributesSize); |
return TableTypeX8632Attributes[Ty].InVectorElementType; |
} |
@@ -2036,6 +2037,7 @@ void TargetX8632::lowerCast(const InstCast *Inst) { |
Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem); |
Type DestType = Dest->getType(); |
Type SrcType = Src0RM->getType(); |
+ (void)DestType; |
assert((DestType == IceType_i32 && SrcType == IceType_f32) || |
(DestType == IceType_f32 && SrcType == IceType_i32)); |
// a.i32 = bitcast b.f32 ==> |
@@ -2734,8 +2736,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { |
// Make sure the atomic load isn't elided when unused, by adding a FakeUse. |
// Since lowerLoad may fuse the load w/ an arithmetic instruction, |
// insert the FakeUse on the last-inserted instruction's dest. |
- Context.insert(InstFakeUse::create(Func, |
- Context.getLastInserted()->getDest())); |
+ Context.insert( |
Jim Stichnoth
2014/07/30 18:30:58
sorry, this snuck in despite "make format-diff" :)
|
+ InstFakeUse::create(Func, Context.getLastInserted()->getDest())); |
return; |
} |
case Intrinsics::AtomicRMW: |
@@ -3127,6 +3129,7 @@ void TargetX8632::lowerAtomicRMW(Variable *Dest, uint32_t Operation, |
return; |
} |
// Otherwise, we need a cmpxchg loop. |
+ (void)NeedsCmpxchg; |
assert(NeedsCmpxchg); |
expandAtomicRMWAsCmpxchg(Op_Lo, Op_Hi, Dest, Ptr, Val); |
} |