OLD | NEW |
(Empty) | |
| 1 ; RUN: pnacl-llc -mtriple=x86_64-unknown-nacl -filetype=asm %s -O0 -o - \ |
| 2 ; RUN: | FileCheck %s |
| 3 |
| 4 ; RUN: pnacl-llc -mtriple=x86_64-unknown-nacl -filetype=asm %s -O2 -o - \ |
| 5 ; RUN: | FileCheck %s |
| 6 |
| 7 ; Check that we don't try to fold a negative displacement into a memory |
| 8 ; reference |
| 9 define i16 @negativedisp(i32 %b) { |
| 10 ; CHECK: negativedisp |
| 11 %a = alloca [1 x i16], align 2 |
| 12 %add = add nsw i32 1073741824, %b |
| 13 %arrayidx = getelementptr inbounds [1 x i16]* %a, i32 0, i32 %add |
| 14 ; CHECK-NOT: nacl:-2147483648( |
| 15 %c = load i16* %arrayidx, align 2 |
| 16 ret i16 %c |
| 17 } |
| 18 |
| 19 @main.m2 = internal constant [1 x [1 x i32]] [[1 x i32] [i32 -60417067]], align
4 |
| 20 define i1 @largeconst() nounwind { |
| 21 ; CHECK: largeconst |
| 22 entry: |
| 23 %retval = alloca i32, align 4 |
| 24 %i = alloca i32, align 4 |
| 25 %j = alloca i32, align 4 |
| 26 %madat = alloca i32*, align 4 |
| 27 store i32 0, i32* %retval |
| 28 store i32 -270770481, i32* %i, align 4 |
| 29 store i32 -1912319477, i32* %j, align 4 |
| 30 %0 = load i32* %j, align 4 |
| 31 %mul = mul nsw i32 %0, 233468377 |
| 32 %add = add nsw i32 %mul, 689019309 |
| 33 %1 = load i32* %i, align 4 |
| 34 %mul1 = mul nsw i32 %1, 947877507 |
| 35 %add2 = add nsw i32 %mul1, 1574375955 |
| 36 %arrayidx = getelementptr inbounds [1 x i32]* getelementptr inbounds ([1 x [1
x i32]]* @main.m2, i32 0, i32 0), i32 %add2 |
| 37 %2 = bitcast [1 x i32]* %arrayidx to i32* |
| 38 %arrayidx3 = getelementptr inbounds i32* %2, i32 %add |
| 39 store i32* %arrayidx3, i32** %madat, align 4 |
| 40 ; Ensure the large constant doesn't get folded into the load |
| 41 ; CHECK: nacl:(%r15 |
| 42 %3 = load i32** %madat, align 4 |
| 43 %4 = load i32* %3, align 4 |
| 44 %conv = zext i32 %4 to i64 |
| 45 %5 = load i32* %j, align 4 |
| 46 %mul4 = mul nsw i32 %5, 233468377 |
| 47 %add5 = add nsw i32 %mul4, 689019309 |
| 48 %6 = load i32* %i, align 4 |
| 49 %mul6 = mul nsw i32 %6, 947877507 |
| 50 %add7 = add nsw i32 %mul6, 1574375955 |
| 51 %arrayidx8 = getelementptr inbounds [1 x i32]* getelementptr inbounds ([1 x [1
x i32]]* @main.m2, i32 0, i32 0), i32 %add7 |
| 52 %7 = bitcast [1 x i32]* %arrayidx8 to i32* |
| 53 %arrayidx9 = getelementptr inbounds i32* %7, i32 %add5 |
| 54 ; Ensure the large constant doesn't get folded into the load |
| 55 ; CHECK: nacl:(%r15 |
| 56 %8 = load i32* %arrayidx9, align 4 |
| 57 %conv10 = zext i32 %8 to i64 |
| 58 %mul11 = mul nsw i64 3795428823, %conv10 |
| 59 %9 = load i32* %j, align 4 |
| 60 %mul12 = mul nsw i32 %9, 233468377 |
| 61 %add13 = add nsw i32 %mul12, 689019309 |
| 62 %conv14 = sext i32 %add13 to i64 |
| 63 %rem = srem i64 %conv14, 4294967295 |
| 64 %xor = xor i64 2597389499, %rem |
| 65 %mul15 = mul nsw i64 %xor, 3795428823 |
| 66 %sub = sub nsw i64 %mul11, %mul15 |
| 67 %add16 = add nsw i64 %sub, 3829710203 |
| 68 %mul17 = mul nsw i64 %add16, 2824337475 |
| 69 %add18 = add nsw i64 %mul17, 2376483023 |
| 70 %cmp = icmp eq i64 %conv, %add18 |
| 71 ret i1 %cmp |
| 72 } |
| 73 |
| 74 |
| 75 @main.array = private unnamed_addr constant [1 x i64] [i64 1438933078946427748],
align 8 |
| 76 |
| 77 define i1 @largeconst_frameindex() nounwind { |
| 78 ; CHECK: largeconst_frameindex |
| 79 entry: |
| 80 %retval = alloca i32, align 4 |
| 81 %r_Ng = alloca i64, align 8 |
| 82 %i = alloca i32, align 4 |
| 83 %adat = alloca i64*, align 4 |
| 84 %array = alloca [1 x i64], align 8 |
| 85 store i32 0, i32* %retval |
| 86 store i32 -270770481, i32* %i, align 4 |
| 87 %0 = bitcast [1 x i64]* %array to i8* |
| 88 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* bitcast ([1 x i64]* @main.arr
ay to i8*), i32 8, i32 8, i1 false) |
| 89 store i32 -270770481, i32* %i, align 4 |
| 90 %1 = load i32* %i, align 4 |
| 91 %mul = mul i32 %1, 947877507 |
| 92 %add = add i32 %mul, 1574375955 |
| 93 %2 = bitcast [1 x i64]* %array to i64* |
| 94 %arrayidx = getelementptr inbounds i64* %2, i32 %add |
| 95 ; Ensure the large constant didn't get folded into the load |
| 96 ; CHECK: nacl:(%r15 |
| 97 %3 = load i64* %arrayidx, align 8 |
| 98 %add1 = add i64 %3, -5707596139582126917 |
| 99 %4 = load i32* %i, align 4 |
| 100 %mul2 = mul i32 %4, 947877507 |
| 101 %add3 = add i32 %mul2, 1574375955 |
| 102 %5 = bitcast [1 x i64]* %array to i64* |
| 103 %arrayidx4 = getelementptr inbounds i64* %5, i32 %add3 |
| 104 store i64 %add1, i64* %arrayidx4, align 8 |
| 105 %6 = load i32* %i, align 4 |
| 106 %mul5 = mul nsw i32 %6, 947877507 |
| 107 %add6 = add nsw i32 %mul5, 1574375955 |
| 108 %arrayidx7 = getelementptr inbounds [1 x i64]* %array, i32 0, i32 %add6 |
| 109 ; CHECK: nacl:(%r15 |
| 110 %7 = load i64* %arrayidx7, align 8 |
| 111 %add8 = add i64 %7, -5707596139582126917 |
| 112 %8 = load i32* %i, align 4 |
| 113 %mul9 = mul nsw i32 %8, 947877507 |
| 114 %add10 = add nsw i32 %mul9, 1574375955 |
| 115 %arrayidx11 = getelementptr inbounds [1 x i64]* %array, i32 0, i32 %add10 |
| 116 store i64 %add8, i64* %arrayidx11, align 8 |
| 117 %9 = load i32* %i, align 4 |
| 118 %mul12 = mul nsw i32 %9, 947877507 |
| 119 %add13 = add nsw i32 %mul12, 1574375955 |
| 120 %10 = bitcast [1 x i64]* %array to i64* |
| 121 %arrayidx14 = getelementptr inbounds i64* %10, i32 %add13 |
| 122 store i64* %arrayidx14, i64** %adat, align 4 |
| 123 %11 = load i64** %adat, align 4 |
| 124 %12 = load i64* %11, align 8 |
| 125 %mul15 = mul i64 %12, -1731288434922394955 |
| 126 %add16 = add i64 %mul15, -7745351015538694962 |
| 127 store i64 %add16, i64* %r_Ng, align 8 |
| 128 ret i1 0 |
| 129 } |
| 130 |
| 131 declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32,
i1) nounwind |
OLD | NEW |