OLD | NEW |
1 ; RUN: opt -S -instcombine < %s | FileCheck %s | 1 ; RUN: opt -S -instcombine < %s | FileCheck %s |
2 ; <rdar://problem/8558713> | 2 ; <rdar://problem/8558713> |
3 | 3 |
| 4 ; @LOCALMOD-BEGIN |
| 5 ; PNaCl does not support the with.overflow intrinsics in its stable |
| 6 ; ABI, so these optimizations are disabled. |
| 7 |
| 8 ; RUN: opt -S -instcombine < %s | FileCheck %s -check-prefix=PNACL |
| 9 ; PNACL-NOT: with.overflow |
| 10 |
4 declare void @throwAnExceptionOrWhatever() | 11 declare void @throwAnExceptionOrWhatever() |
5 | 12 |
6 ; CHECK: @test1 | 13 ; CHECK: @test1 |
7 define i32 @test1(i32 %a, i32 %b) nounwind ssp { | 14 define i32 @test1(i32 %a, i32 %b) nounwind ssp { |
8 entry: | 15 entry: |
9 ; CHECK-NOT: sext | 16 ; C;HECK-NOT: sext |
10 %conv = sext i32 %a to i64 | 17 %conv = sext i32 %a to i64 |
11 %conv2 = sext i32 %b to i64 | 18 %conv2 = sext i32 %b to i64 |
12 %add = add nsw i64 %conv2, %conv | 19 %add = add nsw i64 %conv2, %conv |
13 %add.off = add i64 %add, 2147483648 | 20 %add.off = add i64 %add, 2147483648 |
14 ; CHECK: llvm.sadd.with.overflow.i32 | 21 ; C;HECK: llvm.sadd.with.overflow.i32 |
15 %0 = icmp ugt i64 %add.off, 4294967295 | 22 %0 = icmp ugt i64 %add.off, 4294967295 |
16 br i1 %0, label %if.then, label %if.end | 23 br i1 %0, label %if.then, label %if.end |
17 | 24 |
18 if.then: | 25 if.then: |
19 tail call void @throwAnExceptionOrWhatever() nounwind | 26 tail call void @throwAnExceptionOrWhatever() nounwind |
20 br label %if.end | 27 br label %if.end |
21 | 28 |
22 if.end: | 29 if.end: |
23 ; CHECK-NOT: trunc | 30 ; C;HECK-NOT: trunc |
24 %conv9 = trunc i64 %add to i32 | 31 %conv9 = trunc i64 %add to i32 |
25 ; CHECK: ret i32 | 32 ; C;HECK: ret i32 |
26 ret i32 %conv9 | 33 ret i32 %conv9 |
27 } | 34 } |
28 | 35 |
29 ; CHECK: @test2 | 36 ; CHECK: @test2 |
30 ; This form should not be promoted for two reasons: 1) it is unprofitable to | 37 ; This form should not be promoted for two reasons: 1) it is unprofitable to |
31 ; promote it since the add.off instruction has another use, and 2) it is unsafe | 38 ; promote it since the add.off instruction has another use, and 2) it is unsafe |
32 ; because the add-with-off makes the high bits of the original add live. | 39 ; because the add-with-off makes the high bits of the original add live. |
33 define i32 @test2(i32 %a, i32 %b, i64* %P) nounwind ssp { | 40 define i32 @test2(i32 %a, i32 %b, i64* %P) nounwind ssp { |
34 entry: | 41 entry: |
35 %conv = sext i32 %a to i64 | 42 %conv = sext i32 %a to i64 |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
79 ; CHECK: @test4 | 86 ; CHECK: @test4 |
80 ; Should be able to form an i8 sadd computed in an i32. | 87 ; Should be able to form an i8 sadd computed in an i32. |
81 define zeroext i8 @test4(i8 signext %a, i8 signext %b) nounwind ssp { | 88 define zeroext i8 @test4(i8 signext %a, i8 signext %b) nounwind ssp { |
82 entry: | 89 entry: |
83 %conv = sext i8 %a to i32 | 90 %conv = sext i8 %a to i32 |
84 %conv2 = sext i8 %b to i32 | 91 %conv2 = sext i8 %b to i32 |
85 %add = add nsw i32 %conv2, %conv | 92 %add = add nsw i32 %conv2, %conv |
86 %add4 = add nsw i32 %add, 128 | 93 %add4 = add nsw i32 %add, 128 |
87 %cmp = icmp ugt i32 %add4, 255 | 94 %cmp = icmp ugt i32 %add4, 255 |
88 br i1 %cmp, label %if.then, label %if.end | 95 br i1 %cmp, label %if.then, label %if.end |
89 ; CHECK: llvm.sadd.with.overflow.i8 | 96 ; C;HECK: llvm.sadd.with.overflow.i8 |
90 if.then: ; preds = %entry | 97 if.then: ; preds = %entry |
91 tail call void @throwAnExceptionOrWhatever() nounwind | 98 tail call void @throwAnExceptionOrWhatever() nounwind |
92 unreachable | 99 unreachable |
93 | 100 |
94 if.end: ; preds = %entry | 101 if.end: ; preds = %entry |
95 %conv7 = trunc i32 %add to i8 | 102 %conv7 = trunc i32 %add to i8 |
96 ret i8 %conv7 | 103 ret i8 %conv7 |
97 ; CHECK: ret i8 | 104 ; CHECK: ret i8 |
98 } | 105 } |
99 | 106 |
100 ; CHECK: @test5 | 107 ; CHECK: @test5 |
101 ; CHECK: llvm.uadd.with.overflow | 108 ; C;HECK: llvm.uadd.with.overflow |
102 ; CHECK: ret i64 | 109 ; CHECK: ret i64 |
103 define i64 @test5(i64 %a, i64 %b) nounwind ssp { | 110 define i64 @test5(i64 %a, i64 %b) nounwind ssp { |
104 entry: | 111 entry: |
105 %add = add i64 %b, %a | 112 %add = add i64 %b, %a |
106 %cmp = icmp ult i64 %add, %a | 113 %cmp = icmp ult i64 %add, %a |
107 %Q = select i1 %cmp, i64 %b, i64 42 | 114 %Q = select i1 %cmp, i64 %b, i64 42 |
108 ret i64 %Q | 115 ret i64 %Q |
109 } | 116 } |
110 | 117 |
111 ; CHECK: @test6 | 118 ; CHECK: @test6 |
112 ; CHECK: llvm.uadd.with.overflow | 119 ; C;HECK: llvm.uadd.with.overflow |
113 ; CHECK: ret i64 | 120 ; CHECK: ret i64 |
114 define i64 @test6(i64 %a, i64 %b) nounwind ssp { | 121 define i64 @test6(i64 %a, i64 %b) nounwind ssp { |
115 entry: | 122 entry: |
116 %add = add i64 %b, %a | 123 %add = add i64 %b, %a |
117 %cmp = icmp ult i64 %add, %b | 124 %cmp = icmp ult i64 %add, %b |
118 %Q = select i1 %cmp, i64 %b, i64 42 | 125 %Q = select i1 %cmp, i64 %b, i64 42 |
119 ret i64 %Q | 126 ret i64 %Q |
120 } | 127 } |
121 | 128 |
122 ; CHECK: @test7 | 129 ; CHECK: @test7 |
123 ; CHECK: llvm.uadd.with.overflow | 130 ; C;HECK: llvm.uadd.with.overflow |
124 ; CHECK: ret i64 | 131 ; CHECK: ret i64 |
125 define i64 @test7(i64 %a, i64 %b) nounwind ssp { | 132 define i64 @test7(i64 %a, i64 %b) nounwind ssp { |
126 entry: | 133 entry: |
127 %add = add i64 %b, %a | 134 %add = add i64 %b, %a |
128 %cmp = icmp ugt i64 %b, %add | 135 %cmp = icmp ugt i64 %b, %add |
129 %Q = select i1 %cmp, i64 %b, i64 42 | 136 %Q = select i1 %cmp, i64 %b, i64 42 |
130 ret i64 %Q | 137 ret i64 %Q |
131 } | 138 } |
132 | 139 |
133 ; CHECK: @test8 | 140 ; CHECK: @test8 |
(...skipping 12 matching lines...) Expand all Loading... |
146 br i1 %0, label %if.then, label %if.end | 153 br i1 %0, label %if.then, label %if.end |
147 | 154 |
148 if.then: | 155 if.then: |
149 tail call void @throwAnExceptionOrWhatever() nounwind | 156 tail call void @throwAnExceptionOrWhatever() nounwind |
150 br label %if.end | 157 br label %if.end |
151 | 158 |
152 if.end: | 159 if.end: |
153 %conv9 = trunc i64 %add to i32 | 160 %conv9 = trunc i64 %add to i32 |
154 ret i32 %conv9 | 161 ret i32 %conv9 |
155 } | 162 } |
| 163 |
| 164 ; @LOCALMOD-END |
OLD | NEW |