Index: lib/CodeGen/TargetInfo.cpp |
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp |
index 6fe0de3e8c347cfba57903949330c4f40a76c96d..9882c395be90e080c4048301b9326cde3c78a8fe 100644 |
--- a/lib/CodeGen/TargetInfo.cpp |
+++ b/lib/CodeGen/TargetInfo.cpp |
@@ -1114,10 +1114,15 @@ class X86_64ABIInfo : public ABIInfo { |
} |
bool HasAVX; |
+ // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on |
+ // 64-bit hardware. |
+ bool Has64BitPointers; |
public: |
X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : |
- ABIInfo(CGT), HasAVX(hasavx) {} |
+ ABIInfo(CGT), HasAVX(hasavx), |
+ Has64BitPointers(CGT.getDataLayout().getPointerSize() == 8) { |
+ } |
bool isPassedUsingAVXType(QualType type) const { |
unsigned neededInt, neededSSE; |
@@ -1154,7 +1159,7 @@ public: |
class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { |
public: |
X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) |
- : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} |
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {} |
const X86_64ABIInfo &getABIInfo() const { |
return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); |
@@ -1327,7 +1332,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, |
Hi = Integer; |
} else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { |
Current = Integer; |
- } else if (k == BuiltinType::Float || k == BuiltinType::Double) { |
+ } else if ((k == BuiltinType::Float || k == BuiltinType::Double) || |
+ (k == BuiltinType::LongDouble && |
+ getContext().getTargetInfo().getTriple().getOS() == |
+ llvm::Triple::NativeClient)) { |
Current = SSE; |
} else if (k == BuiltinType::LongDouble) { |
Lo = X87; |
@@ -1350,7 +1358,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, |
} |
if (Ty->isMemberPointerType()) { |
- if (Ty->isMemberFunctionPointerType()) |
+ if (Ty->isMemberFunctionPointerType() && Has64BitPointers) |
Lo = Hi = Integer; |
else |
Current = Integer; |
@@ -1413,7 +1421,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, |
Lo = Hi = Integer; |
} else if (ET == getContext().FloatTy) |
Current = SSE; |
- else if (ET == getContext().DoubleTy) |
+ else if (ET == getContext().DoubleTy || |
+ (ET == getContext().LongDoubleTy && |
+ getContext().getTargetInfo().getTriple().getOS() == |
+ llvm::Triple::NativeClient)) |
Lo = Hi = SSE; |
else if (ET == getContext().LongDoubleTy) |
Current = ComplexX87; |
@@ -1861,7 +1872,8 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, |
// returning an 8-byte unit starting with it. See if we can safely use it. |
if (IROffset == 0) { |
// Pointers and int64's always fill the 8-byte unit. |
- if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64)) |
+ if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) || |
+ IRType->isIntegerTy(64)) |
return IRType; |
// If we have a 1/2/4-byte integer, we can use it only if the rest of the |
@@ -1871,8 +1883,10 @@ GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset, |
// have to do this analysis on the source type because we can't depend on |
// unions being lowered a specific way etc. |
if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) || |
- IRType->isIntegerTy(32)) { |
- unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth(); |
+ IRType->isIntegerTy(32) || |
+ (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) { |
+ unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 : |
+ cast<llvm::IntegerType>(IRType)->getBitWidth(); |
if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth, |
SourceOffset*8+64, getContext())) |