OLD | NEW |
(Empty) | |
| 1 ; Tests the icmp instruction. |
| 2 |
| 3 ; RUN: llvm-as < %s | pnacl-freeze | pnacl-bccompress --remove-abbreviations \ |
| 4 ; RUN: | pnacl-bcdis | FileCheck %s |
| 5 |
| 6 ; Test icmp on primitive types. |
| 7 define void @SimpleIcmpOps() { |
| 8 |
| 9 ; CHECK: {{.*}}| 1: <65535, 11, 2> | constants { // BlockI
D = 11 |
| 10 ; CHECK-NEXT: {{.*}}| 3: <1, 0> | i1: |
| 11 ; CHECK-NEXT: {{.*}}| 3: <4, 0> | %c0 = i1 0; |
| 12 ; CHECK-NEXT: {{.*}}| 3: <1, 4> | i8: |
| 13 ; CHECK-NEXT: {{.*}}| 3: <4, 2> | %c1 = i8 1; |
| 14 ; CHECK-NEXT: {{.*}}| 3: <1, 6> | i16: |
| 15 ; CHECK-NEXT: {{.*}}| 3: <4, 4> | %c2 = i16 2; |
| 16 ; CHECK-NEXT: {{.*}}| 3: <1, 8> | i32: |
| 17 ; CHECK-NEXT: {{.*}}| 3: <4, 6> | %c3 = i32 3; |
| 18 ; CHECK-NEXT: {{.*}}| 3: <1, 10> | i64: |
| 19 ; CHECK-NEXT: {{.*}}| 3: <4, 8> | %c4 = i64 4; |
| 20 ; CHECK-NEXT: {{.*}}| 0: <65534> | } |
| 21 ; CHECK-NEXT: | | %b0: |
| 22 |
| 23 %v0 = icmp eq i1 0, 0 |
| 24 %v1 = icmp eq i8 1, 1 |
| 25 %v2 = icmp eq i16 2, 2 |
| 26 %v3 = icmp eq i32 3, 3 |
| 27 %v4 = icmp eq i64 4, 4 |
| 28 |
| 29 ; CHECK-NEXT: {{.*}}| 3: <28, 5, 5, 32> | %v0 = icmp eq i1 %c0, %
c0; |
| 30 ; CHECK-NEXT: {{.*}}| 3: <28, 5, 5, 32> | %v1 = icmp eq i8 %c1, %
c1; |
| 31 ; CHECK-NEXT: {{.*}}| 3: <28, 5, 5, 32> | %v2 = icmp eq i16 %c2,
%c2; |
| 32 ; CHECK-NEXT: {{.*}}| 3: <28, 5, 5, 32> | %v3 = icmp eq i32 %c3,
%c3; |
| 33 ; CHECK-NEXT: {{.*}}| 3: <28, 5, 5, 32> | %v4 = icmp eq i64 %c4,
%c4; |
| 34 |
| 35 %v5 = icmp ne i1 0, 0 |
| 36 %v6 = icmp ne i8 1, 1 |
| 37 %v7 = icmp ne i16 2, 2 |
| 38 %v8 = icmp ne i32 3, 3 |
| 39 %v9 = icmp ne i64 4, 4 |
| 40 |
| 41 ; CHECK-NEXT: {{.*}}| 3: <28, 10, 10, 33> | %v5 = icmp ne i1 %c0,
%c0; |
| 42 ; CHECK-NEXT: {{.*}}| 3: <28, 10, 10, 33> | %v6 = icmp ne i8 %c1,
%c1; |
| 43 ; CHECK-NEXT: {{.*}}| 3: <28, 10, 10, 33> | %v7 = icmp ne i16 %c2,
%c2; |
| 44 ; CHECK-NEXT: {{.*}}| 3: <28, 10, 10, 33> | %v8 = icmp ne i32 %c3,
%c3; |
| 45 ; CHECK-NEXT: {{.*}}| 3: <28, 10, 10, 33> | %v9 = icmp ne i64 %c4,
%c4; |
| 46 |
| 47 %v10 = icmp ugt i1 0, 0 |
| 48 %v11 = icmp ugt i8 1, 1 |
| 49 %v12 = icmp ugt i16 2, 2 |
| 50 %v13 = icmp ugt i32 3, 3 |
| 51 %v14 = icmp ugt i64 4, 4 |
| 52 |
| 53 ; CHECK-NEXT: {{.*}}| 3: <28, 15, 15, 34> | %v10 = icmp ugt i1 %c0
, %c0; |
| 54 ; CHECK-NEXT: {{.*}}| 3: <28, 15, 15, 34> | %v11 = icmp ugt i8 %c1
, %c1; |
| 55 ; CHECK-NEXT: {{.*}}| 3: <28, 15, 15, 34> | %v12 = icmp ugt i16 %c
2, %c2; |
| 56 ; CHECK-NEXT: {{.*}}| 3: <28, 15, 15, 34> | %v13 = icmp ugt i32 %c
3, %c3; |
| 57 ; CHECK-NEXT: {{.*}}| 3: <28, 15, 15, 34> | %v14 = icmp ugt i64 %c
4, %c4; |
| 58 |
| 59 %v15 = icmp uge i1 0, 0 |
| 60 %v16 = icmp uge i8 1, 1 |
| 61 %v17 = icmp uge i16 2, 2 |
| 62 %v18 = icmp uge i32 3, 3 |
| 63 %v19 = icmp uge i64 4, 4 |
| 64 |
| 65 ; CHECK-NEXT: {{.*}}| 3: <28, 20, 20, 35> | %v15 = icmp uge i1 %c0
, %c0; |
| 66 ; CHECK-NEXT: {{.*}}| 3: <28, 20, 20, 35> | %v16 = icmp uge i8 %c1
, %c1; |
| 67 ; CHECK-NEXT: {{.*}}| 3: <28, 20, 20, 35> | %v17 = icmp uge i16 %c
2, %c2; |
| 68 ; CHECK-NEXT: {{.*}}| 3: <28, 20, 20, 35> | %v18 = icmp uge i32 %c
3, %c3; |
| 69 ; CHECK-NEXT: {{.*}}| 3: <28, 20, 20, 35> | %v19 = icmp uge i64 %c
4, %c4; |
| 70 |
| 71 %v20 = icmp ult i1 0, 0 |
| 72 %v21 = icmp ult i8 1, 1 |
| 73 %v22 = icmp ult i16 2, 2 |
| 74 %v23 = icmp ult i32 3, 3 |
| 75 %v24 = icmp ult i64 4, 4 |
| 76 |
| 77 ; CHECK-NEXT: {{.*}}| 3: <28, 25, 25, 36> | %v20 = icmp ult i1 %c0
, %c0; |
| 78 ; CHECK-NEXT: {{.*}}| 3: <28, 25, 25, 36> | %v21 = icmp ult i8 %c1
, %c1; |
| 79 ; CHECK-NEXT: {{.*}}| 3: <28, 25, 25, 36> | %v22 = icmp ult i16 %c
2, %c2; |
| 80 ; CHECK-NEXT: {{.*}}| 3: <28, 25, 25, 36> | %v23 = icmp ult i32 %c
3, %c3; |
| 81 ; CHECK-NEXT: {{.*}}| 3: <28, 25, 25, 36> | %v24 = icmp ult i64 %c
4, %c4; |
| 82 |
| 83 %v25 = icmp ule i1 0, 0 |
| 84 %v26 = icmp ule i8 1, 1 |
| 85 %v27 = icmp ule i16 2, 2 |
| 86 %v28 = icmp ule i32 3, 3 |
| 87 %v29 = icmp ule i64 4, 4 |
| 88 |
| 89 ; CHECK-NEXT: {{.*}}| 3: <28, 30, 30, 37> | %v25 = icmp ule i1 %c0
, %c0; |
| 90 ; CHECK-NEXT: {{.*}}| 3: <28, 30, 30, 37> | %v26 = icmp ule i8 %c1
, %c1; |
| 91 ; CHECK-NEXT: {{.*}}| 3: <28, 30, 30, 37> | %v27 = icmp ule i16 %c
2, %c2; |
| 92 ; CHECK-NEXT: {{.*}}| 3: <28, 30, 30, 37> | %v28 = icmp ule i32 %c
3, %c3; |
| 93 ; CHECK-NEXT: {{.*}}| 3: <28, 30, 30, 37> | %v29 = icmp ule i64 %c
4, %c4; |
| 94 |
| 95 %v30 = icmp sgt i1 0, 0 |
| 96 %v31 = icmp sgt i8 1, 1 |
| 97 %v32 = icmp sgt i16 2, 2 |
| 98 %v33 = icmp sgt i32 3, 3 |
| 99 %v34 = icmp sgt i64 4, 4 |
| 100 |
| 101 ; CHECK-NEXT: {{.*}}| 3: <28, 35, 35, 38> | %v30 = icmp sgt i1 %c0
, %c0; |
| 102 ; CHECK-NEXT: {{.*}}| 3: <28, 35, 35, 38> | %v31 = icmp sgt i8 %c1
, %c1; |
| 103 ; CHECK-NEXT: {{.*}}| 3: <28, 35, 35, 38> | %v32 = icmp sgt i16 %c
2, %c2; |
| 104 ; CHECK-NEXT: {{.*}}| 3: <28, 35, 35, 38> | %v33 = icmp sgt i32 %c
3, %c3; |
| 105 ; CHECK-NEXT: {{.*}}| 3: <28, 35, 35, 38> | %v34 = icmp sgt i64 %c
4, %c4; |
| 106 |
| 107 %v35 = icmp sge i1 0, 0 |
| 108 %v36 = icmp sge i8 1, 1 |
| 109 %v37 = icmp sge i16 2, 2 |
| 110 %v38 = icmp sge i32 3, 3 |
| 111 %v39 = icmp sge i64 4, 4 |
| 112 |
| 113 ; CHECK-NEXT: {{.*}}| 3: <28, 40, 40, 39> | %v35 = icmp sge i1 %c0
, %c0; |
| 114 ; CHECK-NEXT: {{.*}}| 3: <28, 40, 40, 39> | %v36 = icmp sge i8 %c1
, %c1; |
| 115 ; CHECK-NEXT: {{.*}}| 3: <28, 40, 40, 39> | %v37 = icmp sge i16 %c
2, %c2; |
| 116 ; CHECK-NEXT: {{.*}}| 3: <28, 40, 40, 39> | %v38 = icmp sge i32 %c
3, %c3; |
| 117 ; CHECK-NEXT: {{.*}}| 3: <28, 40, 40, 39> | %v39 = icmp sge i64 %c
4, %c4; |
| 118 |
| 119 %v40 = icmp slt i1 0, 0 |
| 120 %v41 = icmp slt i8 1, 1 |
| 121 %v42 = icmp slt i16 2, 2 |
| 122 %v43 = icmp slt i32 3, 3 |
| 123 %v44 = icmp slt i64 4, 4 |
| 124 |
| 125 ; CHECK-NEXT: {{.*}}| 3: <28, 45, 45, 40> | %v40 = icmp slt i1 %c0
, %c0; |
| 126 ; CHECK-NEXT: {{.*}}| 3: <28, 45, 45, 40> | %v41 = icmp slt i8 %c1
, %c1; |
| 127 ; CHECK-NEXT: {{.*}}| 3: <28, 45, 45, 40> | %v42 = icmp slt i16 %c
2, %c2; |
| 128 ; CHECK-NEXT: {{.*}}| 3: <28, 45, 45, 40> | %v43 = icmp slt i32 %c
3, %c3; |
| 129 ; CHECK-NEXT: {{.*}}| 3: <28, 45, 45, 40> | %v44 = icmp slt i64 %c
4, %c4; |
| 130 |
| 131 %v45 = icmp sle i1 0, 0 |
| 132 %v46 = icmp sle i8 1, 1 |
| 133 %v47 = icmp sle i16 2, 2 |
| 134 %v48 = icmp sle i32 3, 3 |
| 135 %v49 = icmp sle i64 4, 4 |
| 136 |
| 137 ; CHECK-NEXT: {{.*}}| 3: <28, 50, 50, 41> | %v45 = icmp sle i1 %c0
, %c0; |
| 138 ; CHECK-NEXT: {{.*}}| 3: <28, 50, 50, 41> | %v46 = icmp sle i8 %c1
, %c1; |
| 139 ; CHECK-NEXT: {{.*}}| 3: <28, 50, 50, 41> | %v47 = icmp sle i16 %c
2, %c2; |
| 140 ; CHECK-NEXT: {{.*}}| 3: <28, 50, 50, 41> | %v48 = icmp sle i32 %c
3, %c3; |
| 141 ; CHECK-NEXT: {{.*}}| 3: <28, 50, 50, 41> | %v49 = icmp sle i64 %c
4, %c4; |
| 142 |
| 143 ; Verifies result is i1. |
| 144 %v50 = and i1 %v0, %v1 |
| 145 %v51 = and i1 %v2, %v3 |
| 146 %v52 = and i1 %v4, %v5 |
| 147 %v53 = and i1 %v6, %v7 |
| 148 %v54 = and i1 %v8, %v9 |
| 149 %v55 = and i1 %v10, %v11 |
| 150 %v56 = and i1 %v12, %v13 |
| 151 %v57 = and i1 %v14, %v15 |
| 152 %v58 = and i1 %v16, %v17 |
| 153 %v59 = and i1 %v18, %v19 |
| 154 %v60 = and i1 %v20, %v21 |
| 155 %v61 = and i1 %v22, %v23 |
| 156 %v62 = and i1 %v24, %v25 |
| 157 %v63 = and i1 %v26, %v27 |
| 158 %v64 = and i1 %v28, %v29 |
| 159 %v65 = and i1 %v30, %v31 |
| 160 %v66 = and i1 %v32, %v33 |
| 161 %v67 = and i1 %v34, %v35 |
| 162 %v68 = and i1 %v36, %v37 |
| 163 %v69 = and i1 %v38, %v39 |
| 164 ret void |
| 165 |
| 166 ; CHECK-NEXT: {{.*}}| 3: <2, 50, 49, 10> | %v50 = and i1 %v0, %v1
; |
| 167 ; CHECK-NEXT: {{.*}}| 3: <2, 49, 48, 10> | %v51 = and i1 %v2, %v3
; |
| 168 ; CHECK-NEXT: {{.*}}| 3: <2, 48, 47, 10> | %v52 = and i1 %v4, %v5
; |
| 169 ; CHECK-NEXT: {{.*}}| 3: <2, 47, 46, 10> | %v53 = and i1 %v6, %v7
; |
| 170 ; CHECK-NEXT: {{.*}}| 3: <2, 46, 45, 10> | %v54 = and i1 %v8, %v9
; |
| 171 ; CHECK-NEXT: {{.*}}| 3: <2, 45, 44, 10> | %v55 = and i1 %v10, %v
11; |
| 172 ; CHECK-NEXT: {{.*}}| 3: <2, 44, 43, 10> | %v56 = and i1 %v12, %v
13; |
| 173 ; CHECK-NEXT: {{.*}}| 3: <2, 43, 42, 10> | %v57 = and i1 %v14, %v
15; |
| 174 ; CHECK-NEXT: {{.*}}| 3: <2, 42, 41, 10> | %v58 = and i1 %v16, %v
17; |
| 175 ; CHECK-NEXT: {{.*}}| 3: <2, 41, 40, 10> | %v59 = and i1 %v18, %v
19; |
| 176 ; CHECK-NEXT: {{.*}}| 3: <2, 40, 39, 10> | %v60 = and i1 %v20, %v
21; |
| 177 ; CHECK-NEXT: {{.*}}| 3: <2, 39, 38, 10> | %v61 = and i1 %v22, %v
23; |
| 178 ; CHECK-NEXT: {{.*}}| 3: <2, 38, 37, 10> | %v62 = and i1 %v24, %v
25; |
| 179 ; CHECK-NEXT: {{.*}}| 3: <2, 37, 36, 10> | %v63 = and i1 %v26, %v
27; |
| 180 ; CHECK-NEXT: {{.*}}| 3: <2, 36, 35, 10> | %v64 = and i1 %v28, %v
29; |
| 181 ; CHECK-NEXT: {{.*}}| 3: <2, 35, 34, 10> | %v65 = and i1 %v30, %v
31; |
| 182 ; CHECK-NEXT: {{.*}}| 3: <2, 34, 33, 10> | %v66 = and i1 %v32, %v
33; |
| 183 ; CHECK-NEXT: {{.*}}| 3: <2, 33, 32, 10> | %v67 = and i1 %v34, %v
35; |
| 184 ; CHECK-NEXT: {{.*}}| 3: <2, 32, 31, 10> | %v68 = and i1 %v36, %v
37; |
| 185 ; CHECK-NEXT: {{.*}}| 3: <2, 31, 30, 10> | %v69 = and i1 %v38, %v
39; |
| 186 ; CHECK-NEXT: {{.*}}| 3: <10> | ret void; |
| 187 |
| 188 } |
| 189 |
| 190 |
| 191 ; Tests integer vector compares. |
| 192 define void @VectorIcmpOps(<16 x i8> %p0, <8 x i16> %p1, <4 x i32> %p2, |
| 193 <16 x i1> %p3, <8 x i1> %p4, <4 x i1> %p5) { |
| 194 |
| 195 ; CHECK: {{.*}}| 3: <1, 1> | blocks 1; |
| 196 ; CHECK-NEXT: | | %b0: |
| 197 |
| 198 %v0 = icmp eq <16 x i8> %p0, %p0 |
| 199 %v1 = icmp eq <8 x i16> %p1, %p1 |
| 200 %v2 = icmp eq <4 x i32> %p2, %p2 |
| 201 |
| 202 ; CHECK-NEXT: {{.*}}| 3: <28, 6, 6, 32> | %v0 = icmp eq <16 x i8
> %p0, %p0; |
| 203 ; CHECK-NEXT: {{.*}}| 3: <28, 6, 6, 32> | %v1 = icmp eq <8 x i16
> %p1, %p1; |
| 204 ; CHECK-NEXT: {{.*}}| 3: <28, 6, 6, 32> | %v2 = icmp eq <4 x i32
> %p2, %p2; |
| 205 |
| 206 %v3 = icmp ne <16 x i8> %p0, %p0 |
| 207 %v4 = icmp ne <8 x i16> %p1, %p1 |
| 208 %v5 = icmp ne <4 x i32> %p2, %p2 |
| 209 |
| 210 ; CHECK-NEXT: {{.*}}| 3: <28, 9, 9, 33> | %v3 = icmp ne <16 x i8
> %p0, %p0; |
| 211 ; CHECK-NEXT: {{.*}}| 3: <28, 9, 9, 33> | %v4 = icmp ne <8 x i16
> %p1, %p1; |
| 212 ; CHECK-NEXT: {{.*}}| 3: <28, 9, 9, 33> | %v5 = icmp ne <4 x i32
> %p2, %p2; |
| 213 |
| 214 %v6 = icmp ugt <16 x i8> %p0, %p0 |
| 215 %v7 = icmp ugt <8 x i16> %p1, %p1 |
| 216 %v8 = icmp ugt <4 x i32> %p2, %p2 |
| 217 |
| 218 ; CHECK-NEXT: {{.*}}| 3: <28, 12, 12, 34> | %v6 = icmp ugt <16 x i
8> %p0, %p0; |
| 219 ; CHECK-NEXT: {{.*}}| 3: <28, 12, 12, 34> | %v7 = icmp ugt <8 x i1
6> %p1, %p1; |
| 220 ; CHECK-NEXT: {{.*}}| 3: <28, 12, 12, 34> | %v8 = icmp ugt <4 x i3
2> %p2, %p2; |
| 221 |
| 222 %v9 = icmp uge <16 x i8> %p0, %p0 |
| 223 %v10 = icmp uge <8 x i16> %p1, %p1 |
| 224 %v11 = icmp uge <4 x i32> %p2, %p2 |
| 225 |
| 226 ; CHECK-NEXT: {{.*}}| 3: <28, 15, 15, 35> | %v9 = icmp uge <16 x i
8> %p0, %p0; |
| 227 ; CHECK-NEXT: {{.*}}| 3: <28, 15, 15, 35> | %v10 = icmp uge <8 x i
16> %p1, %p1; |
| 228 ; CHECK-NEXT: {{.*}}| 3: <28, 15, 15, 35> | %v11 = icmp uge <4 x i
32> %p2, %p2; |
| 229 |
| 230 %v12 = icmp ult <16 x i8> %p0, %p0 |
| 231 %v13 = icmp ult <8 x i16> %p1, %p1 |
| 232 %v14 = icmp ult <4 x i32> %p2, %p2 |
| 233 |
| 234 ; CHECK-NEXT: {{.*}}| 3: <28, 18, 18, 36> | %v12 = icmp ult <16 x
i8> %p0, %p0; |
| 235 ; CHECK-NEXT: {{.*}}| 3: <28, 18, 18, 36> | %v13 = icmp ult <8 x i
16> %p1, %p1; |
| 236 ; CHECK-NEXT: {{.*}}| 3: <28, 18, 18, 36> | %v14 = icmp ult <4 x i
32> %p2, %p2; |
| 237 |
| 238 %v15 = icmp ule <16 x i8> %p0, %p0 |
| 239 %v16 = icmp ule <8 x i16> %p1, %p1 |
| 240 %v17 = icmp ule <4 x i32> %p2, %p2 |
| 241 |
| 242 ; CHECK-NEXT: {{.*}}| 3: <28, 21, 21, 37> | %v15 = icmp ule <16 x
i8> %p0, %p0; |
| 243 ; CHECK-NEXT: {{.*}}| 3: <28, 21, 21, 37> | %v16 = icmp ule <8 x i
16> %p1, %p1; |
| 244 ; CHECK-NEXT: {{.*}}| 3: <28, 21, 21, 37> | %v17 = icmp ule <4 x i
32> %p2, %p2; |
| 245 |
| 246 %v18 = icmp sgt <16 x i8> %p0, %p0 |
| 247 %v19 = icmp sgt <8 x i16> %p1, %p1 |
| 248 %v20 = icmp sgt <4 x i32> %p2, %p2 |
| 249 |
| 250 ; CHECK-NEXT: {{.*}}| 3: <28, 24, 24, 38> | %v18 = icmp sgt <16 x
i8> %p0, %p0; |
| 251 ; CHECK-NEXT: {{.*}}| 3: <28, 24, 24, 38> | %v19 = icmp sgt <8 x i
16> %p1, %p1; |
| 252 ; CHECK-NEXT: {{.*}}| 3: <28, 24, 24, 38> | %v20 = icmp sgt <4 x i
32> %p2, %p2; |
| 253 |
| 254 %v21 = icmp sge <16 x i8> %p0, %p0 |
| 255 %v22 = icmp sge <8 x i16> %p1, %p1 |
| 256 %v23 = icmp sge <4 x i32> %p2, %p2 |
| 257 |
| 258 ; CHECK-NEXT: {{.*}}| 3: <28, 27, 27, 39> | %v21 = icmp sge <16 x
i8> %p0, %p0; |
| 259 ; CHECK-NEXT: {{.*}}| 3: <28, 27, 27, 39> | %v22 = icmp sge <8 x i
16> %p1, %p1; |
| 260 ; CHECK-NEXT: {{.*}}| 3: <28, 27, 27, 39> | %v23 = icmp sge <4 x i
32> %p2, %p2; |
| 261 |
| 262 %v24 = icmp slt <16 x i8> %p0, %p0 |
| 263 %v25 = icmp slt <8 x i16> %p1, %p1 |
| 264 %v26 = icmp slt <4 x i32> %p2, %p2 |
| 265 |
| 266 ; CHECK-NEXT: {{.*}}| 3: <28, 30, 30, 40> | %v24 = icmp slt <16 x
i8> %p0, %p0; |
| 267 ; CHECK-NEXT: {{.*}}| 3: <28, 30, 30, 40> | %v25 = icmp slt <8 x i
16> %p1, %p1; |
| 268 ; CHECK-NEXT: {{.*}}| 3: <28, 30, 30, 40> | %v26 = icmp slt <4 x i
32> %p2, %p2; |
| 269 |
| 270 %v27 = icmp sle <16 x i8> %p0, %p0 |
| 271 %v28 = icmp sle <8 x i16> %p1, %p1 |
| 272 %v29 = icmp sle <4 x i32> %p2, %p2 |
| 273 |
| 274 ; CHECK-NEXT: {{.*}}| 3: <28, 33, 33, 41> | %v27 = icmp sle <16 x
i8> %p0, %p0; |
| 275 ; CHECK-NEXT: {{.*}}| 3: <28, 33, 33, 41> | %v28 = icmp sle <8 x i
16> %p1, %p1; |
| 276 ; CHECK-NEXT: {{.*}}| 3: <28, 33, 33, 41> | %v29 = icmp sle <4 x i
32> %p2, %p2; |
| 277 |
| 278 ; Verify result types are vectors of right size. |
| 279 %v30 = and <16 x i1> %v0, %v3 |
| 280 %v31 = and <16 x i1> %v6, %v9 |
| 281 %v32 = and <16 x i1> %v12, %v15 |
| 282 %v33 = and <16 x i1> %v18, %v21 |
| 283 %v34 = and <16 x i1> %v24, %v27 |
| 284 |
| 285 ; CHECK-NEXT: {{.*}}| 3: <2, 30, 27, 10> | %v30 = and <16 x i1> %
v0, %v3; |
| 286 ; CHECK-NEXT: {{.*}}| 3: <2, 25, 22, 10> | %v31 = and <16 x i1> %
v6, %v9; |
| 287 ; CHECK-NEXT: {{.*}}| 3: <2, 20, 17, 10> | %v32 = and <16 x i1> %
v12, %v15; |
| 288 ; CHECK-NEXT: {{.*}}| 3: <2, 15, 12, 10> | %v33 = and <16 x i1> %
v18, %v21; |
| 289 ; CHECK-NEXT: {{.*}}| 3: <2, 10, 7, 10> | %v34 = and <16 x i1> %
v24, %v27; |
| 290 |
| 291 %v35 = and <8 x i1> %v1, %v4 |
| 292 %v36 = and <8 x i1> %v7, %v10 |
| 293 %v37 = and <8 x i1> %v13, %v16 |
| 294 %v38 = and <8 x i1> %v19, %v22 |
| 295 %v39 = and <8 x i1> %v25, %v28 |
| 296 |
| 297 ; CHECK-NEXT: {{.*}}| 3: <2, 34, 31, 10> | %v35 = and <8 x i1> %v
1, %v4; |
| 298 ; CHECK-NEXT: {{.*}}| 3: <2, 29, 26, 10> | %v36 = and <8 x i1> %v
7, %v10; |
| 299 ; CHECK-NEXT: {{.*}}| 3: <2, 24, 21, 10> | %v37 = and <8 x i1> %v
13, %v16; |
| 300 ; CHECK-NEXT: {{.*}}| 3: <2, 19, 16, 10> | %v38 = and <8 x i1> %v
19, %v22; |
| 301 ; CHECK-NEXT: {{.*}}| 3: <2, 14, 11, 10> | %v39 = and <8 x i1> %v
25, %v28; |
| 302 |
| 303 |
| 304 %v40 = and <4 x i1> %v2, %v5 |
| 305 %v41 = and <4 x i1> %v8, %v11 |
| 306 %v42 = and <4 x i1> %v14, %v17 |
| 307 %v43 = and <4 x i1> %v20, %v23 |
| 308 %v44 = and <4 x i1> %v26, %v29 |
| 309 ret void |
| 310 |
| 311 ; CHECK-NEXT: {{.*}}| 3: <2, 38, 35, 10> | %v40 = and <4 x i1> %v
2, %v5; |
| 312 ; CHECK-NEXT: {{.*}}| 3: <2, 33, 30, 10> | %v41 = and <4 x i1> %v
8, %v11; |
| 313 ; CHECK-NEXT: {{.*}}| 3: <2, 28, 25, 10> | %v42 = and <4 x i1> %v
14, %v17; |
| 314 ; CHECK-NEXT: {{.*}}| 3: <2, 23, 20, 10> | %v43 = and <4 x i1> %v
20, %v23; |
| 315 ; CHECK-NEXT: {{.*}}| 3: <2, 18, 15, 10> | %v44 = and <4 x i1> %v
26, %v29; |
| 316 ; CHECK-NEXT: {{.*}}| 3: <10> | ret void; |
| 317 |
| 318 } |
OLD | NEW |