Index: lib/CodeGen/CGBuiltin.cpp |
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp |
index a655966d6e32e34423c096f59efd96da9b942cf8..b2decffefafe37d465c870bc75015f00c98cf8a3 100644 |
--- a/lib/CodeGen/CGBuiltin.cpp |
+++ b/lib/CodeGen/CGBuiltin.cpp |
@@ -20,6 +20,7 @@ |
#include "clang/Basic/TargetBuiltins.h" |
#include "clang/Basic/TargetInfo.h" |
#include "llvm/IR/DataLayout.h" |
+#include "llvm/IR/InlineAsm.h" // @LOCALMOD |
#include "llvm/IR/Intrinsics.h" |
using namespace clang; |
@@ -1034,7 +1035,27 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, |
// any way to safely use it... but in practice, it mostly works |
// to use it with non-atomic loads and stores to get acquire/release |
// semantics. |
+ // @LOCALMOD-START |
+ // For PNaCl, surround __sync_synchronize with compiler fences. This |
+ // should enforce ordering of more than just atomic memory accesses, |
+ // though it won't guarantee that all accesses (e.g. those to |
+ // non-escaping objects) will not be reordered. |
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); |
+ std::string AsmString; // Empty. |
+ std::string Constraints("~{memory}"); |
+ bool HasSideEffect = true; |
+ if (getTarget().getTriple().getArch() == llvm::Triple::le32) |
eliben
2013/08/06 16:26:56
It would be cleaner to have a single LOCAMOD if-br
JF
2013/08/07 17:58:28
Done, and as suggested offline the code now uses T
|
+ Builder.CreateCall( |
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect))-> |
+ addAttribute(llvm::AttributeSet::FunctionIndex, |
+ llvm::Attribute::NoUnwind); |
Builder.CreateFence(llvm::SequentiallyConsistent); |
+ if (getTarget().getTriple().getArch() == llvm::Triple::le32) |
+ Builder.CreateCall( |
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect))-> |
+ addAttribute(llvm::AttributeSet::FunctionIndex, |
+ llvm::Attribute::NoUnwind); |
+ // @LOCALMOD-END |
return RValue::get(0); |
} |