OLD | NEW |
(Empty) | |
| 1 ; Test dumping binary operations. |
| 2 |
| 3 ; RUN: llvm-as < %s | pnacl-freeze | pnacl-bccompress --remove-abbreviations \ |
| 4 ; RUN: | pnacl-bcdis | FileCheck %s |
| 5 |
| 6 ; Test integer binary operators. |
| 7 define internal void @IntOps(i64 %p0) { |
| 8 ; Define different sized integer ops. |
| 9 %v0 = trunc i64 %p0 to i8 |
| 10 %v1 = trunc i64 %p0 to i16 |
| 11 %v2 = trunc i64 %p0 to i32 |
| 12 %v3 = zext i32 %v2 to i64 |
| 13 %v4 = trunc i32 %v2 to i1 |
| 14 |
| 15 %v5 = add i8 %v0, 1 |
| 16 %v6 = add i16 %v1, 2 |
| 17 %v7 = add i32 %v2, 3 |
| 18 %v8 = add i64 %v3, 4 |
| 19 |
| 20 ; CHECK: {{.*}}| 3: <2, 5, 8, 0> | %v5 = add i8 %v0, %c2; |
| 21 ; CHECK-NEXT: {{.*}}| 3: <2, 5, 8, 0> | %v6 = add i16 %v1, %c3
; |
| 22 ; CHECK-NEXT: {{.*}}| 3: <2, 5, 11, 0> | %v7 = add i32 %v2, %c1
; |
| 23 ; CHECK-NEXT: {{.*}}| 3: <2, 5, 13, 0> | %v8 = add i64 %v3, %c0
; |
| 24 |
| 25 %v9 = sub i8 1, %v0 |
| 26 %v10 = sub i16 2, %v1 |
| 27 %v11 = sub i32 3, %v2 |
| 28 %v12 = sub i64 4, %v3 |
| 29 |
| 30 ; CHECK-NEXT: {{.*}}| 3: <2, 12, 9, 1> | %v9 = sub i8 %c2, %v0; |
| 31 ; CHECK-NEXT: {{.*}}| 3: <2, 12, 9, 1> | %v10 = sub i16 %c3, %v
1; |
| 32 ; CHECK-NEXT: {{.*}}| 3: <2, 15, 9, 1> | %v11 = sub i32 %c1, %v
2; |
| 33 ; CHECK-NEXT: {{.*}}| 3: <2, 17, 9, 1> | %v12 = sub i64 %c0, %v
3; |
| 34 |
| 35 %v13 = mul i8 %v0, 1 |
| 36 %v14 = mul i16 %v1, 2 |
| 37 %v15 = mul i32 %v2, 3 |
| 38 %v16 = mul i64 %v3, 4 |
| 39 |
| 40 ; CHECK-NEXT: {{.*}}| 3: <2, 13, 16, 2> | %v13 = mul i8 %v0, %c2
; |
| 41 ; CHECK-NEXT: {{.*}}| 3: <2, 13, 16, 2> | %v14 = mul i16 %v1, %c
3; |
| 42 ; CHECK-NEXT: {{.*}}| 3: <2, 13, 19, 2> | %v15 = mul i32 %v2, %c
1; |
| 43 ; CHECK-NEXT: {{.*}}| 3: <2, 13, 21, 2> | %v16 = mul i64 %v3, %c
0; |
| 44 |
| 45 %v17 = udiv i8 %v0, 1 |
| 46 %v18 = udiv i16 %v1, 2 |
| 47 %v19 = udiv i32 %v2, 3 |
| 48 %v20 = udiv i64 %v3, 4 |
| 49 |
| 50 ; CHECK-NEXT: {{.*}}| 3: <2, 17, 20, 3> | %v17 = udiv i8 %v0, %c
2; |
| 51 ; CHECK-NEXT: {{.*}}| 3: <2, 17, 20, 3> | %v18 = udiv i16 %v1, %
c3; |
| 52 ; CHECK-NEXT: {{.*}}| 3: <2, 17, 23, 3> | %v19 = udiv i32 %v2, %
c1; |
| 53 ; CHECK-NEXT: {{.*}}| 3: <2, 17, 25, 3> | %v20 = udiv i64 %v3, %
c0; |
| 54 |
| 55 %v21 = sdiv i8 1, %v0 |
| 56 %v22 = sdiv i16 2, %v1 |
| 57 %v23 = sdiv i32 3, %v2 |
| 58 %v24 = sdiv i64 4, %v3 |
| 59 |
| 60 ; CHECK-NEXT: {{.*}}| 3: <2, 24, 21, 4> | %v21 = sdiv i8 %c2, %v
0; |
| 61 ; CHECK-NEXT: {{.*}}| 3: <2, 24, 21, 4> | %v22 = sdiv i16 %c3, %
v1; |
| 62 ; CHECK-NEXT: {{.*}}| 3: <2, 27, 21, 4> | %v23 = sdiv i32 %c1, %
v2; |
| 63 ; CHECK-NEXT: {{.*}}| 3: <2, 29, 21, 4> | %v24 = sdiv i64 %c0, %
v3; |
| 64 |
| 65 %v25 = urem i8 1, %v0 |
| 66 %v26 = urem i16 2, %v1 |
| 67 %v27 = urem i32 3, %v2 |
| 68 %v28 = urem i64 4, %v3 |
| 69 |
| 70 ; CHECK-NEXT: {{.*}}| 3: <2, 28, 25, 5> | %v25 = urem i8 %c2, %v
0; |
| 71 ; CHECK-NEXT: {{.*}}| 3: <2, 28, 25, 5> | %v26 = urem i16 %c3, %
v1; |
| 72 ; CHECK-NEXT: {{.*}}| 3: <2, 31, 25, 5> | %v27 = urem i32 %c1, %
v2; |
| 73 ; CHECK-NEXT: {{.*}}| 3: <2, 33, 25, 5> | %v28 = urem i64 %c0, %
v3; |
| 74 |
| 75 %v29 = srem i8 %v0, 1 |
| 76 %v30 = srem i16 %v1, 2 |
| 77 %v31 = srem i32 %v2, 3 |
| 78 %v32 = srem i64 %v3, 4 |
| 79 |
| 80 ; CHECK-NEXT: {{.*}}| 3: <2, 29, 32, 6> | %v29 = srem i8 %v0, %c
2; |
| 81 ; CHECK-NEXT: {{.*}}| 3: <2, 29, 32, 6> | %v30 = srem i16 %v1, %
c3; |
| 82 ; CHECK-NEXT: {{.*}}| 3: <2, 29, 35, 6> | %v31 = srem i32 %v2, %
c1; |
| 83 ; CHECK-NEXT: {{.*}}| 3: <2, 29, 37, 6> | %v32 = srem i64 %v3, %
c0; |
| 84 |
| 85 %v33 = shl i8 1, %v0 |
| 86 %v34 = shl i16 2, %v1 |
| 87 %v35 = shl i32 3, %v2 |
| 88 %v36 = shl i64 4, %v3 |
| 89 |
| 90 ; CHECK-NEXT: {{.*}}| 3: <2, 36, 33, 7> | %v33 = shl i8 %c2, %v0
; |
| 91 ; CHECK-NEXT: {{.*}}| 3: <2, 36, 33, 7> | %v34 = shl i16 %c3, %v
1; |
| 92 ; CHECK-NEXT: {{.*}}| 3: <2, 39, 33, 7> | %v35 = shl i32 %c1, %v
2; |
| 93 ; CHECK-NEXT: {{.*}}| 3: <2, 41, 33, 7> | %v36 = shl i64 %c0, %v
3; |
| 94 |
| 95 %v37 = lshr i8 %v0, 1 |
| 96 %v38 = lshr i16 %v1, 2 |
| 97 %v39 = lshr i32 %v2, 3 |
| 98 %v40 = lshr i64 %v3, 4 |
| 99 |
| 100 ; CHECK-NEXT: {{.*}}| 3: <2, 37, 40, 8> | %v37 = lshr i8 %v0, %c
2; |
| 101 ; CHECK-NEXT: {{.*}}| 3: <2, 37, 40, 8> | %v38 = lshr i16 %v1, %
c3; |
| 102 ; CHECK-NEXT: {{.*}}| 3: <2, 37, 43, 8> | %v39 = lshr i32 %v2, %
c1; |
| 103 ; CHECK-NEXT: {{.*}}| 3: <2, 37, 45, 8> | %v40 = lshr i64 %v3, %
c0; |
| 104 |
| 105 %v41 = ashr i8 %v0, 1 |
| 106 %v42 = ashr i16 %v1, 2 |
| 107 %v43 = ashr i32 %v2, 3 |
| 108 %v44 = ashr i64 %v3, 4 |
| 109 |
| 110 ; CHECK-NEXT: {{.*}}| 3: <2, 41, 44, 9> | %v41 = ashr i8 %v0, %c
2; |
| 111 ; CHECK-NEXT: {{.*}}| 3: <2, 41, 44, 9> | %v42 = ashr i16 %v1, %
c3; |
| 112 ; CHECK-NEXT: {{.*}}| 3: <2, 41, 47, 9> | %v43 = ashr i32 %v2, %
c1; |
| 113 ; CHECK-NEXT: {{.*}}| 3: <2, 41, 49, 9> | %v44 = ashr i64 %v3, %
c0; |
| 114 |
| 115 %v45 = and i1 %v4, 0 |
| 116 %v46 = and i8 %v0, 1 |
| 117 %v47 = and i16 %v1, 2 |
| 118 %v48 = and i32 %v2, 3 |
| 119 %v49 = and i64 %v3, 4 |
| 120 |
| 121 ; CHECK-NEXT: {{.*}}| 3: <2, 41, 46, 10> | %v45 = and i1 %v4, %c4
; |
| 122 ; CHECK-NEXT: {{.*}}| 3: <2, 46, 49, 10> | %v46 = and i8 %v0, %c2
; |
| 123 ; CHECK-NEXT: {{.*}}| 3: <2, 46, 49, 10> | %v47 = and i16 %v1, %c
3; |
| 124 ; CHECK-NEXT: {{.*}}| 3: <2, 46, 52, 10> | %v48 = and i32 %v2, %c
1; |
| 125 ; CHECK-NEXT: {{.*}}| 3: <2, 46, 54, 10> | %v49 = and i64 %v3, %c
0; |
| 126 |
| 127 %v50 = or i1 0, %v4 |
| 128 %v51 = or i8 1, %v0 |
| 129 %v52 = or i16 2, %v1 |
| 130 %v53 = or i32 3, %v2 |
| 131 %v54 = or i64 4, %v3 |
| 132 |
| 133 ; CHECK-NEXT: {{.*}}| 3: <2, 51, 46, 11> | %v50 = or i1 %c4, %v4; |
| 134 ; CHECK-NEXT: {{.*}}| 3: <2, 54, 51, 11> | %v51 = or i8 %c2, %v0; |
| 135 ; CHECK-NEXT: {{.*}}| 3: <2, 54, 51, 11> | %v52 = or i16 %c3, %v1
; |
| 136 ; CHECK-NEXT: {{.*}}| 3: <2, 57, 51, 11> | %v53 = or i32 %c1, %v2
; |
| 137 ; CHECK-NEXT: {{.*}}| 3: <2, 59, 51, 11> | %v54 = or i64 %c0, %v3
; |
| 138 |
| 139 %v55 = xor i1 %v4, 0 |
| 140 %v56 = xor i8 %v0, 1 |
| 141 %v57 = xor i16 %v1, 2 |
| 142 %v58 = xor i32 %v2, 3 |
| 143 %v59 = xor i64 %v3, 4 |
| 144 |
| 145 ; CHECK-NEXT: {{.*}}| 3: <2, 51, 56, 12> | %v55 = xor i1 %v4, %c4
; |
| 146 ; CHECK-NEXT: {{.*}}| 3: <2, 56, 59, 12> | %v56 = xor i8 %v0, %c2
; |
| 147 ; CHECK-NEXT: {{.*}}| 3: <2, 56, 59, 12> | %v57 = xor i16 %v1, %c
3; |
| 148 ; CHECK-NEXT: {{.*}}| 3: <2, 56, 62, 12> | %v58 = xor i32 %v2, %c
1; |
| 149 ; CHECK-NEXT: {{.*}}| 3: <2, 56, 64, 12> | %v59 = xor i64 %v3, %c
0; |
| 150 |
| 151 ret void |
| 152 } |
| 153 |
| 154 |
| 155 |
| 156 ; Tests integer vector binary operations. |
| 157 define internal void @IntVecOps(<16 x i8> %p0, <8 x i16> %p1, <4 x i32> %p2, |
| 158 <4 x i1> %p3, <8 x i1> %p4, <16 x i1> %p5) { |
| 159 |
| 160 ; CHECK: | | %b0: |
| 161 |
| 162 %v0 = add <16 x i8> %p0, %p0 |
| 163 %v1 = add <8 x i16> %p1, %p1 |
| 164 %v2 = add <4 x i32> %p2, %p2 |
| 165 |
| 166 ; CHECK-NEXT: {{.*}}| 3: <2, 6, 6, 0> | %v0 = add <16 x i8> %p
0, %p0; |
| 167 ; CHECK-NEXT: {{.*}}| 3: <2, 6, 6, 0> | %v1 = add <8 x i16> %p
1, %p1; |
| 168 ; CHECK-NEXT: {{.*}}| 3: <2, 6, 6, 0> | %v2 = add <4 x i32> %p
2, %p2; |
| 169 |
| 170 %v3 = sub <16 x i8> %p0, %p0 |
| 171 %v4 = sub <8 x i16> %p1, %p1 |
| 172 %v5 = sub <4 x i32> %p2, %p2 |
| 173 |
| 174 ; CHECK-NEXT: {{.*}}| 3: <2, 9, 9, 1> | %v3 = sub <16 x i8> %p
0, %p0; |
| 175 ; CHECK-NEXT: {{.*}}| 3: <2, 9, 9, 1> | %v4 = sub <8 x i16> %p
1, %p1; |
| 176 ; CHECK-NEXT: {{.*}}| 3: <2, 9, 9, 1> | %v5 = sub <4 x i32> %p
2, %p2; |
| 177 |
| 178 %v6 = mul <16 x i8> %p0, %p0 |
| 179 %v7 = mul <8 x i16> %p1, %p1 |
| 180 %v8 = mul <4 x i32> %p2, %p2 |
| 181 |
| 182 ; CHECK-NEXT: {{.*}}| 3: <2, 12, 12, 2> | %v6 = mul <16 x i8> %p
0, %p0; |
| 183 ; CHECK-NEXT: {{.*}}| 3: <2, 12, 12, 2> | %v7 = mul <8 x i16> %p
1, %p1; |
| 184 ; CHECK-NEXT: {{.*}}| 3: <2, 12, 12, 2> | %v8 = mul <4 x i32> %p
2, %p2; |
| 185 |
| 186 %v9 = sdiv <16 x i8> %p0, %p0 |
| 187 %v10 = sdiv <8 x i16> %p1, %p1 |
| 188 %v11 = sdiv <4 x i32> %p2, %p2 |
| 189 |
| 190 ; CHECK-NEXT: {{.*}}| 3: <2, 15, 15, 4> | %v9 = sdiv <16 x i8> %
p0, %p0; |
| 191 ; CHECK-NEXT: {{.*}}| 3: <2, 15, 15, 4> | %v10 = sdiv <8 x i16>
%p1, %p1; |
| 192 ; CHECK-NEXT: {{.*}}| 3: <2, 15, 15, 4> | %v11 = sdiv <4 x i32>
%p2, %p2; |
| 193 |
| 194 %v12 = udiv <16 x i8> %p0, %p0 |
| 195 %v13 = udiv <8 x i16> %p1, %p1 |
| 196 %v14 = udiv <4 x i32> %p2, %p2 |
| 197 |
| 198 ; CHECK-NEXT: {{.*}}| 3: <2, 18, 18, 3> | %v12 = udiv <16 x i8>
%p0, %p0; |
| 199 ; CHECK-NEXT: {{.*}}| 3: <2, 18, 18, 3> | %v13 = udiv <8 x i16>
%p1, %p1; |
| 200 ; CHECK-NEXT: {{.*}}| 3: <2, 18, 18, 3> | %v14 = udiv <4 x i32>
%p2, %p2; |
| 201 |
| 202 %v15 = srem <16 x i8> %p0, %p0 |
| 203 %v16 = srem <8 x i16> %p1, %p1 |
| 204 %v17 = srem <4 x i32> %p2, %p2 |
| 205 |
| 206 ; CHECK-NEXT: {{.*}}| 3: <2, 21, 21, 6> | %v15 = srem <16 x i8>
%p0, %p0; |
| 207 ; CHECK-NEXT: {{.*}}| 3: <2, 21, 21, 6> | %v16 = srem <8 x i16>
%p1, %p1; |
| 208 ; CHECK-NEXT: {{.*}}| 3: <2, 21, 21, 6> | %v17 = srem <4 x i32>
%p2, %p2; |
| 209 |
| 210 %v18 = urem <16 x i8> %p0, %p0 |
| 211 %v19 = urem <8 x i16> %p1, %p1 |
| 212 %v20 = urem <4 x i32> %p2, %p2 |
| 213 |
| 214 ; CHECK-NEXT: {{.*}}| 3: <2, 24, 24, 5> | %v18 = urem <16 x i8>
%p0, %p0; |
| 215 ; CHECK-NEXT: {{.*}}| 3: <2, 24, 24, 5> | %v19 = urem <8 x i16>
%p1, %p1; |
| 216 ; CHECK-NEXT: {{.*}}| 3: <2, 24, 24, 5> | %v20 = urem <4 x i32>
%p2, %p2; |
| 217 |
| 218 %v21 = shl <16 x i8> %p0, %p0 |
| 219 %v22 = shl <8 x i16> %p1, %p1 |
| 220 %v23 = shl <4 x i32> %p2, %p2 |
| 221 |
| 222 ; CHECK-NEXT: {{.*}}| 3: <2, 27, 27, 7> | %v21 = shl <16 x i8> %
p0, %p0; |
| 223 ; CHECK-NEXT: {{.*}}| 3: <2, 27, 27, 7> | %v22 = shl <8 x i16> %
p1, %p1; |
| 224 ; CHECK-NEXT: {{.*}}| 3: <2, 27, 27, 7> | %v23 = shl <4 x i32> %
p2, %p2; |
| 225 |
| 226 %v24 = lshr <16 x i8> %p0, %p0 |
| 227 %v25 = lshr <8 x i16> %p1, %p1 |
| 228 %v26 = lshr <4 x i32> %p2, %p2 |
| 229 |
| 230 ; CHECK-NEXT: {{.*}}| 3: <2, 30, 30, 8> | %v24 = lshr <16 x i8>
%p0, %p0; |
| 231 ; CHECK-NEXT: {{.*}}| 3: <2, 30, 30, 8> | %v25 = lshr <8 x i16>
%p1, %p1; |
| 232 ; CHECK-NEXT: {{.*}}| 3: <2, 30, 30, 8> | %v26 = lshr <4 x i32>
%p2, %p2; |
| 233 |
| 234 %v27 = ashr <16 x i8> %p0, %p0 |
| 235 %v28 = ashr <8 x i16> %p1, %p1 |
| 236 %v29 = ashr <4 x i32> %p2, %p2 |
| 237 |
| 238 ; CHECK-NEXT: {{.*}}| 3: <2, 33, 33, 9> | %v27 = ashr <16 x i8>
%p0, %p0; |
| 239 ; CHECK-NEXT: {{.*}}| 3: <2, 33, 33, 9> | %v28 = ashr <8 x i16>
%p1, %p1; |
| 240 ; CHECK-NEXT: {{.*}}| 3: <2, 33, 33, 9> | %v29 = ashr <4 x i32>
%p2, %p2; |
| 241 |
| 242 %v30 = and <16 x i8> %p0, %p0 |
| 243 %v31 = and <8 x i16> %p1, %p1 |
| 244 %v32 = and <4 x i32> %p2, %p2 |
| 245 %v34 = and <4 x i1> %p3, %p3 |
| 246 %v35 = and <8 x i1> %p4, %p4 |
| 247 %v36 = and <16 x i1> %p5, %p5 |
| 248 |
| 249 ; CHECK-NEXT: {{.*}}| 3: <2, 36, 36, 10> | %v30 = and <16 x i8> %
p0, %p0; |
| 250 ; CHECK-NEXT: {{.*}}| 3: <2, 36, 36, 10> | %v31 = and <8 x i16> %
p1, %p1; |
| 251 ; CHECK-NEXT: {{.*}}| 3: <2, 36, 36, 10> | %v32 = and <4 x i32> %
p2, %p2; |
| 252 ; CHECK-NEXT: {{.*}}| 3: <2, 36, 36, 10> | %v33 = and <4 x i1> %p
3, %p3; |
| 253 ; CHECK-NEXT: {{.*}}| 3: <2, 36, 36, 10> | %v34 = and <8 x i1> %p
4, %p4; |
| 254 ; CHECK-NEXT: {{.*}}| 3: <2, 36, 36, 10> | %v35 = and <16 x i1> %
p5, %p5; |
| 255 |
| 256 %v37 = or <16 x i8> %p0, %p0 |
| 257 %v38 = or <8 x i16> %p1, %p1 |
| 258 %v39 = or <4 x i32> %p2, %p2 |
| 259 %v41 = or <4 x i1> %p3, %p3 |
| 260 %v42 = or <8 x i1> %p4, %p4 |
| 261 %v43 = or <16 x i1> %p5, %p5 |
| 262 |
| 263 ; CHECK-NEXT: {{.*}}| 3: <2, 42, 42, 11> | %v36 = or <16 x i8> %p
0, %p0; |
| 264 ; CHECK-NEXT: {{.*}}| 3: <2, 42, 42, 11> | %v37 = or <8 x i16> %p
1, %p1; |
| 265 ; CHECK-NEXT: {{.*}}| 3: <2, 42, 42, 11> | %v38 = or <4 x i32> %p
2, %p2; |
| 266 ; CHECK-NEXT: {{.*}}| 3: <2, 42, 42, 11> | %v39 = or <4 x i1> %p3
, %p3; |
| 267 ; CHECK-NEXT: {{.*}}| 3: <2, 42, 42, 11> | %v40 = or <8 x i1> %p4
, %p4; |
| 268 ; CHECK-NEXT: {{.*}}| 3: <2, 42, 42, 11> | %v41 = or <16 x i1> %p
5, %p5; |
| 269 |
| 270 %v44 = xor <16 x i8> %p0, %p0 |
| 271 %v45 = xor <8 x i16> %p1, %p1 |
| 272 %v46 = xor <4 x i32> %p2, %p2 |
| 273 %v48 = xor <4 x i1> %p3, %p3 |
| 274 %v49 = xor <8 x i1> %p4, %p4 |
| 275 %v50 = xor <16 x i1> %p5, %p5 |
| 276 ret void |
| 277 |
| 278 ; CHECK-NEXT: {{.*}}| 3: <2, 48, 48, 12> | %v42 = xor <16 x i8> %
p0, %p0; |
| 279 ; CHECK-NEXT: {{.*}}| 3: <2, 48, 48, 12> | %v43 = xor <8 x i16> %
p1, %p1; |
| 280 ; CHECK-NEXT: {{.*}}| 3: <2, 48, 48, 12> | %v44 = xor <4 x i32> %
p2, %p2; |
| 281 ; CHECK-NEXT: {{.*}}| 3: <2, 48, 48, 12> | %v45 = xor <4 x i1> %p
3, %p3; |
| 282 ; CHECK-NEXT: {{.*}}| 3: <2, 48, 48, 12> | %v46 = xor <8 x i1> %p
4, %p4; |
| 283 ; CHECK-NEXT: {{.*}}| 3: <2, 48, 48, 12> | %v47 = xor <16 x i1> %
p5, %p5; |
| 284 ; CHECK-NEXT: {{.*}}| 3: <10> | ret void; |
| 285 |
| 286 } |
| 287 |
| 288 |
| 289 |
| 290 ; Test floating point binary operations. |
| 291 define internal void @FloatOps(float %p0, double %p1) { |
| 292 |
| 293 ; CHECK: | | %b0: |
| 294 |
| 295 %v0 = fadd float %p0, 1.0 |
| 296 %v1 = fadd double %p1, 2.0 |
| 297 |
| 298 ; CHECK-NEXT: {{.*}}| 3: <2, 4, 2, 0> | %v0 = fadd float %p0,
%c0; |
| 299 ; CHECK-NEXT: {{.*}}| 3: <2, 4, 2, 0> | %v1 = fadd double %p1,
%c1; |
| 300 |
| 301 %v2 = fsub float %p0, 1.0 |
| 302 %v3 = fsub double %p1, 2.0 |
| 303 |
| 304 ; CHECK-NEXT: {{.*}}| 3: <2, 6, 4, 1> | %v2 = fsub float %p0,
%c0; |
| 305 ; CHECK-NEXT: {{.*}}| 3: <2, 6, 4, 1> | %v3 = fsub double %p1,
%c1; |
| 306 |
| 307 %v4 = fmul float %p0, 1.0 |
| 308 %v5 = fmul double %p1, 2.0 |
| 309 |
| 310 ; CHECK-NEXT: {{.*}}| 3: <2, 8, 6, 2> | %v4 = fmul float %p0,
%c0; |
| 311 ; CHECK-NEXT: {{.*}}| 3: <2, 8, 6, 2> | %v5 = fmul double %p1,
%c1; |
| 312 |
| 313 %v6 = fdiv float %p0, 1.0 |
| 314 %v7 = fdiv double %p1, 2.0 |
| 315 |
| 316 ; CHECK-NEXT: {{.*}}| 3: <2, 10, 8, 4> | %v6 = fdiv float %p0,
%c0; |
| 317 ; CHECK-NEXT: {{.*}}| 3: <2, 10, 8, 4> | %v7 = fdiv double %p1,
%c1; |
| 318 |
| 319 %v8 = frem float %p0, 1.0 |
| 320 %v9 = frem double %p1, 2.0 |
| 321 ret void |
| 322 |
| 323 ; CHECK-NEXT: {{.*}}| 3: <2, 12, 10, 6> | %v8 = frem float %p0,
%c0; |
| 324 ; CHECK-NEXT: {{.*}}| 3: <2, 12, 10, 6> | %v9 = frem double %p1,
%c1; |
| 325 ; CHECK-NEXT: {{.*}}| 3: <10> | ret void; |
| 326 |
| 327 } |
| 328 |
| 329 |
| 330 |
| 331 ; Tests floating point vector binary operations. |
| 332 define internal void @VecFloatOps(<4 x float> %p0) { |
| 333 |
| 334 ; CHECK: | | %b0: |
| 335 |
| 336 %v0 = fadd <4 x float> %p0, %p0 |
| 337 %v2 = fsub <4 x float> %p0, %p0 |
| 338 %v4 = fmul <4 x float> %p0, %p0 |
| 339 %v6 = fdiv <4 x float> %p0, %p0 |
| 340 %v8 = frem <4 x float> %p0, %p0 |
| 341 ret void |
| 342 |
| 343 ; CHECK-NEXT: {{.*}}| 3: <2, 1, 1, 0> | %v0 = fadd <4 x float>
%p0, %p0; |
| 344 ; CHECK-NEXT: {{.*}}| 3: <2, 2, 2, 1> | %v1 = fsub <4 x float>
%p0, %p0; |
| 345 ; CHECK-NEXT: {{.*}}| 3: <2, 3, 3, 2> | %v2 = fmul <4 x float>
%p0, %p0; |
| 346 ; CHECK-NEXT: {{.*}}| 3: <2, 4, 4, 4> | %v3 = fdiv <4 x float>
%p0, %p0; |
| 347 ; CHECK-NEXT: {{.*}}| 3: <2, 5, 5, 6> | %v4 = frem <4 x float>
%p0, %p0; |
| 348 ; CHECK-NEXT: {{.*}}| 3: <10> | ret void; |
| 349 |
| 350 } |
OLD | NEW |