Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(192)

Side by Side Diff: tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll

Issue 509233002: Convert lit tests to check disassembled assembly. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: add comment Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 ; This tests each of the supported NaCl atomic instructions for every 1 ; This tests each of the supported NaCl atomic instructions for every
2 ; size allowed. 2 ; size allowed.
3 3
4 ; RUN: %llvm2ice -O2 --verbose none %s | FileCheck %s
5 ; RUN: %llvm2ice -O2 --verbose none %s | FileCheck %s --check-prefix=CHECKO2
6 ; RUN: %llvm2ice -Om1 --verbose none %s | FileCheck %s
7 ; RUN: %llvm2ice -O2 --verbose none %s \ 4 ; RUN: %llvm2ice -O2 --verbose none %s \
8 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj 5 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
6 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
7 ; RUN: %llvm2ice -O2 --verbose none %s \
8 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
9 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - \
10 ; RUN: | FileCheck --check-prefix=CHECKO2 %s
9 ; RUN: %llvm2ice -Om1 --verbose none %s \ 11 ; RUN: %llvm2ice -Om1 --verbose none %s \
10 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj 12 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
13 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
11 ; RUN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s 14 ; RUN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
12 ; RUN: %llvm2iceinsts %s | %szdiff %s | FileCheck --check-prefix=DUMP %s 15 ; RUN: %llvm2iceinsts %s | %szdiff %s | FileCheck --check-prefix=DUMP %s
13 ; RUN: %llvm2iceinsts --pnacl %s | %szdiff %s \ 16 ; RUN: %llvm2iceinsts --pnacl %s | %szdiff %s \
14 ; RUN: | FileCheck --check-prefix=DUMP %s 17 ; RUN: | FileCheck --check-prefix=DUMP %s
15 18
19 ; TODO(jvoung): Uh... normally pnacl-llc is not supposed to separate the
20 ; lock from its instruction w/ bundle padding, but when processing .s
21 ; files with llvm-mc it seems be ocassionally wrong!
22 ; https://code.google.com/p/nativeclient/issues/detail?id=3929
23 ; That makes the current "lock" checks avoid using CHECK-NEXT.
24
16 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32) 25 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32)
17 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32) 26 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32)
18 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) 27 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32)
19 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32) 28 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32)
20 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32) 29 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32)
21 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32) 30 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32)
22 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32) 31 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32)
23 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32) 32 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32)
24 declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32) 33 declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32)
25 declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32) 34 declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32)
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after
194 define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) { 203 define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
195 entry: 204 entry:
196 %trunc = trunc i32 %v to i8 205 %trunc = trunc i32 %v to i8
197 %ptr = inttoptr i32 %iptr to i8* 206 %ptr = inttoptr i32 %iptr to i8*
198 ; "1" is an atomic add, and "6" is sequential consistency. 207 ; "1" is an atomic add, and "6" is sequential consistency.
199 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6) 208 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6)
200 %a_ext = zext i8 %a to i32 209 %a_ext = zext i8 %a to i32
201 ret i32 %a_ext 210 ret i32 %a_ext
202 } 211 }
203 ; CHECK-LABEL: test_atomic_rmw_add_8 212 ; CHECK-LABEL: test_atomic_rmw_add_8
204 ; CHECK: lock xadd byte {{.*}}, [[REG:.*]] 213 ; CHECK: lock
214 ; CHECK-NEXT: xadd byte {{.*}}, [[REG:.*]]
205 ; CHECK: mov {{.*}}, [[REG]] 215 ; CHECK: mov {{.*}}, [[REG]]
206 216
207 define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) { 217 define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
208 entry: 218 entry:
209 %trunc = trunc i32 %v to i16 219 %trunc = trunc i32 %v to i16
210 %ptr = inttoptr i32 %iptr to i16* 220 %ptr = inttoptr i32 %iptr to i16*
211 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6) 221 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6)
212 %a_ext = zext i16 %a to i32 222 %a_ext = zext i16 %a to i32
213 ret i32 %a_ext 223 ret i32 %a_ext
214 } 224 }
215 ; CHECK-LABEL: test_atomic_rmw_add_16 225 ; CHECK-LABEL: test_atomic_rmw_add_16
216 ; CHECK: lock xadd word {{.*}}, [[REG:.*]] 226 ; CHECK: lock
227 ; CHECK-NEXT: xadd word {{.*}}, [[REG:.*]]
217 ; CHECK: mov {{.*}}, [[REG]] 228 ; CHECK: mov {{.*}}, [[REG]]
218 229
219 define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) { 230 define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
220 entry: 231 entry:
221 %ptr = inttoptr i32 %iptr to i32* 232 %ptr = inttoptr i32 %iptr to i32*
222 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) 233 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
223 ret i32 %a 234 ret i32 %a
224 } 235 }
225 ; CHECK-LABEL: test_atomic_rmw_add_32 236 ; CHECK-LABEL: test_atomic_rmw_add_32
226 ; CHECK: lock xadd dword {{.*}}, [[REG:.*]] 237 ; CHECK: lock
238 ; CHECK-NEXT: xadd dword {{.*}}, [[REG:.*]]
227 ; CHECK: mov {{.*}}, [[REG]] 239 ; CHECK: mov {{.*}}, [[REG]]
228 240
229 define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) { 241 define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
230 entry: 242 entry:
231 %ptr = inttoptr i32 %iptr to i64* 243 %ptr = inttoptr i32 %iptr to i64*
232 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) 244 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
233 ret i64 %a 245 ret i64 %a
234 } 246 }
235 ; CHECK-LABEL: test_atomic_rmw_add_64 247 ; CHECK-LABEL: test_atomic_rmw_add_64
236 ; CHECK: push ebx 248 ; CHECK: push ebx
237 ; CHECK: mov eax, dword ptr [{{.*}}] 249 ; CHECK: mov eax, dword ptr [{{.*}}]
238 ; CHECK: mov edx, dword ptr [{{.*}}+4] 250 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
239 ; CHECK: .L[[LABEL:.*]]:
240 ; CHECK: mov ebx, eax 251 ; CHECK: mov ebx, eax
241 ; RHS of add cannot be any of the e[abcd]x regs because they are 252 ; RHS of add cannot be any of the e[abcd]x regs because they are
242 ; clobbered in the loop, and the RHS needs to be remain live. 253 ; clobbered in the loop, and the RHS needs to be remain live.
243 ; CHECK: add ebx, {{.*e.[^x]}} 254 ; CHECK: add ebx, {{.*e.[^x]}}
244 ; CHECK: mov ecx, edx 255 ; CHECK: mov ecx, edx
245 ; CHECK: adc ecx, {{.*e.[^x]}} 256 ; CHECK: adc ecx, {{.*e.[^x]}}
246 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). 257 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired).
247 ; It can be esi, edi, or ebp though, for example (so we need to be careful 258 ; It can be esi, edi, or ebp though, for example (so we need to be careful
248 ; about rejecting eb* and ed*.) 259 ; about rejecting eb* and ed*.)
249 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 260 ; CHECK: lock
250 ; CHECK: jne .L[[LABEL]] 261 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
262 ; CHECK: jne -{{[0-9]}}
251 263
252 ; Test with some more register pressure. When we have an alloca, ebp is 264 ; Test with some more register pressure. When we have an alloca, ebp is
253 ; used to manage the stack frame, so it cannot be used as a register either. 265 ; used to manage the stack frame, so it cannot be used as a register either.
254 declare void @use_ptr(i32 %iptr) 266 define void @use_ptr(i32 %iptr) {
267 entry:
268 ret void
269 }
255 270
256 define i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) { 271 define i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) {
257 entry: 272 entry:
258 %alloca_ptr = alloca i8, i32 16, align 16 273 %alloca_ptr = alloca i8, i32 16, align 16
259 %ptr = inttoptr i32 %iptr to i64* 274 %ptr = inttoptr i32 %iptr to i64*
260 %old = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) 275 %old = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
261 store i8 0, i8* %alloca_ptr, align 1 276 store i8 0, i8* %alloca_ptr, align 1
262 store i8 1, i8* %alloca_ptr, align 1 277 store i8 1, i8* %alloca_ptr, align 1
263 store i8 2, i8* %alloca_ptr, align 1 278 store i8 2, i8* %alloca_ptr, align 1
264 store i8 3, i8* %alloca_ptr, align 1 279 store i8 3, i8* %alloca_ptr, align 1
265 %__5 = ptrtoint i8* %alloca_ptr to i32 280 %__5 = ptrtoint i8* %alloca_ptr to i32
266 call void @use_ptr(i32 %__5) 281 call void @use_ptr(i32 %__5)
267 ret i64 %old 282 ret i64 %old
268 } 283 }
269 ; CHECK-LABEL: test_atomic_rmw_add_64_alloca 284 ; CHECK-LABEL: test_atomic_rmw_add_64_alloca
270 ; CHECK: push ebx 285 ; CHECK: push ebx
271 ; CHECK-DAG: mov edx 286 ; CHECK-DAG: mov edx
272 ; CHECK-DAG: mov eax 287 ; CHECK-DAG: mov eax
273 ; CHECK-DAG: mov ecx 288 ; CHECK-DAG: mov ecx
274 ; CHECK-DAG: mov ebx 289 ; CHECK-DAG: mov ebx
275 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). 290 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired).
276 ; It also cannot be ebp since we use that for alloca. Also make sure it's 291 ; It also cannot be ebp since we use that for alloca. Also make sure it's
277 ; not esp, since that's the stack pointer and mucking with it will break 292 ; not esp, since that's the stack pointer and mucking with it will break
278 ; the later use_ptr function call. 293 ; the later use_ptr function call.
279 ; That pretty much leaves esi, or edi as the only viable registers. 294 ; That pretty much leaves esi, or edi as the only viable registers.
280 ; CHECK: lock cmpxchg8b qword ptr [e{{[ds]}}i] 295 ; CHECK: lock
296 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{[ds]}}i]
281 ; CHECK: call use_ptr 297 ; CHECK: call use_ptr
282 298
283 define i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) { 299 define i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) {
284 entry: 300 entry:
285 %ptr = inttoptr i32 %iptr to i32* 301 %ptr = inttoptr i32 %iptr to i32*
286 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) 302 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
287 ret i32 %v 303 ret i32 %v
288 } 304 }
289 ; Technically this could use "lock add" instead of "lock xadd", if liveness 305 ; Technically this could use "lock add" instead of "lock xadd", if liveness
290 ; tells us that the destination variable is dead. 306 ; tells us that the destination variable is dead.
291 ; CHECK-LABEL: test_atomic_rmw_add_32_ignored 307 ; CHECK-LABEL: test_atomic_rmw_add_32_ignored
292 ; CHECK: lock xadd dword {{.*}}, [[REG:.*]] 308 ; CHECK: lock
309 ; CHECK-NEXT: xadd dword {{.*}}, [[REG:.*]]
293 310
294 ; Atomic RMW 64 needs to be expanded into its own loop. 311 ; Atomic RMW 64 needs to be expanded into its own loop.
295 ; Make sure that works w/ non-trivial function bodies. 312 ; Make sure that works w/ non-trivial function bodies.
296 define i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) { 313 define i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) {
297 entry: 314 entry:
298 %x = icmp ult i64 %v, 100 315 %x = icmp ult i64 %v, 100
299 br i1 %x, label %err, label %loop 316 br i1 %x, label %err, label %loop
300 317
301 loop: 318 loop:
302 %v_next = phi i64 [ %v, %entry ], [ %next, %loop ] 319 %v_next = phi i64 [ %v, %entry ], [ %next, %loop ]
303 %ptr = inttoptr i32 %iptr to i64* 320 %ptr = inttoptr i32 %iptr to i64*
304 %next = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v_next, i32 6) 321 %next = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v_next, i32 6)
305 %success = icmp eq i64 %next, 100 322 %success = icmp eq i64 %next, 100
306 br i1 %success, label %done, label %loop 323 br i1 %success, label %done, label %loop
307 324
308 done: 325 done:
309 ret i64 %next 326 ret i64 %next
310 327
311 err: 328 err:
312 ret i64 0 329 ret i64 0
313 } 330 }
314 ; CHECK-LABEL: test_atomic_rmw_add_64_loop 331 ; CHECK-LABEL: test_atomic_rmw_add_64_loop
315 ; CHECK: push ebx 332 ; CHECK: push ebx
316 ; CHECK-LABEL: .Ltest_atomic_rmw_add_64_loop{{.*}}loop
317 ; CHECK: mov eax, dword ptr [{{.*}}] 333 ; CHECK: mov eax, dword ptr [{{.*}}]
318 ; CHECK: mov edx, dword ptr [{{.*}}+4] 334 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
319 ; CHECK: .L[[LABEL:.*]]:
320 ; CHECK: mov ebx, eax 335 ; CHECK: mov ebx, eax
321 ; CHECK: add ebx, {{.*e.[^x]}} 336 ; CHECK: add ebx, {{.*e.[^x]}}
322 ; CHECK: mov ecx, edx 337 ; CHECK: mov ecx, edx
323 ; CHECK: adc ecx, {{.*e.[^x]}} 338 ; CHECK: adc ecx, {{.*e.[^x]}}
324 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 339 ; CHECK: lock
325 ; CHECK: jne .L[[LABEL]] 340 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
326 ; CHECK-LABEL: .Ltest_atomic_rmw_add_64_loop{{.*}}done 341 ; CHECK: jne -{{[0-9]}}
327 342
328 ;; sub 343 ;; sub
329 344
330 define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) { 345 define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
331 entry: 346 entry:
332 %trunc = trunc i32 %v to i8 347 %trunc = trunc i32 %v to i8
333 %ptr = inttoptr i32 %iptr to i8* 348 %ptr = inttoptr i32 %iptr to i8*
334 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6) 349 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6)
335 %a_ext = zext i8 %a to i32 350 %a_ext = zext i8 %a to i32
336 ret i32 %a_ext 351 ret i32 %a_ext
337 } 352 }
338 ; CHECK-LABEL: test_atomic_rmw_sub_8 353 ; CHECK-LABEL: test_atomic_rmw_sub_8
339 ; CHECK: neg [[REG:.*]] 354 ; CHECK: neg [[REG:.*]]
340 ; CHECK: lock xadd byte {{.*}}, [[REG]] 355 ; CHECK: lock
356 ; Should be using NEXT: see issue 3929
357 ; CHECK: xadd byte {{.*}}, [[REG]]
341 ; CHECK: mov {{.*}}, [[REG]] 358 ; CHECK: mov {{.*}}, [[REG]]
342 359
343 define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) { 360 define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
344 entry: 361 entry:
345 %trunc = trunc i32 %v to i16 362 %trunc = trunc i32 %v to i16
346 %ptr = inttoptr i32 %iptr to i16* 363 %ptr = inttoptr i32 %iptr to i16*
347 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6) 364 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6)
348 %a_ext = zext i16 %a to i32 365 %a_ext = zext i16 %a to i32
349 ret i32 %a_ext 366 ret i32 %a_ext
350 } 367 }
351 ; CHECK-LABEL: test_atomic_rmw_sub_16 368 ; CHECK-LABEL: test_atomic_rmw_sub_16
352 ; CHECK: neg [[REG:.*]] 369 ; CHECK: neg [[REG:.*]]
353 ; CHECK: lock xadd word {{.*}}, [[REG]] 370 ; CHECK: lock
371 ; CHECK-NEXT: xadd word {{.*}}, [[REG]]
354 ; CHECK: mov {{.*}}, [[REG]] 372 ; CHECK: mov {{.*}}, [[REG]]
355 373
356 define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) { 374 define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
357 entry: 375 entry:
358 %ptr = inttoptr i32 %iptr to i32* 376 %ptr = inttoptr i32 %iptr to i32*
359 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) 377 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
360 ret i32 %a 378 ret i32 %a
361 } 379 }
362 ; CHECK-LABEL: test_atomic_rmw_sub_32 380 ; CHECK-LABEL: test_atomic_rmw_sub_32
363 ; CHECK: neg [[REG:.*]] 381 ; CHECK: neg [[REG:.*]]
364 ; CHECK: lock xadd dword {{.*}}, [[REG]] 382 ; CHECK: lock
383 ; CHECK-NEXT: xadd dword {{.*}}, [[REG]]
365 ; CHECK: mov {{.*}}, [[REG]] 384 ; CHECK: mov {{.*}}, [[REG]]
366 385
367 define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) { 386 define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
368 entry: 387 entry:
369 %ptr = inttoptr i32 %iptr to i64* 388 %ptr = inttoptr i32 %iptr to i64*
370 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6) 389 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6)
371 ret i64 %a 390 ret i64 %a
372 } 391 }
373 ; CHECK-LABEL: test_atomic_rmw_sub_64 392 ; CHECK-LABEL: test_atomic_rmw_sub_64
374 ; CHECK: push ebx 393 ; CHECK: push ebx
375 ; CHECK: mov eax, dword ptr [{{.*}}] 394 ; CHECK: mov eax, dword ptr [{{.*}}]
376 ; CHECK: mov edx, dword ptr [{{.*}}+4] 395 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
377 ; CHECK: .L[[LABEL:.*]]:
378 ; CHECK: mov ebx, eax 396 ; CHECK: mov ebx, eax
379 ; CHECK: sub ebx, {{.*e.[^x]}} 397 ; CHECK: sub ebx, {{.*e.[^x]}}
380 ; CHECK: mov ecx, edx 398 ; CHECK: mov ecx, edx
381 ; CHECK: sbb ecx, {{.*e.[^x]}} 399 ; CHECK: sbb ecx, {{.*e.[^x]}}
382 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 400 ; CHECK: lock
383 ; CHECK: jne .L[[LABEL]] 401 ; Should be using NEXT: see issue 3929
402 ; CHECK: cmpxchg8b qword ptr [e{{.[^x]}}]
403 ; CHECK: jne -{{[0-9]}}
384 404
385 405
386 define i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) { 406 define i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) {
387 entry: 407 entry:
388 %ptr = inttoptr i32 %iptr to i32* 408 %ptr = inttoptr i32 %iptr to i32*
389 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) 409 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
390 ret i32 %v 410 ret i32 %v
391 } 411 }
392 ; Could use "lock sub" instead of "neg; lock xadd" 412 ; Could use "lock sub" instead of "neg; lock xadd"
393 ; CHECK-LABEL: test_atomic_rmw_sub_32_ignored 413 ; CHECK-LABEL: test_atomic_rmw_sub_32_ignored
394 ; CHECK: neg [[REG:.*]] 414 ; CHECK: neg [[REG:.*]]
395 ; CHECK: lock xadd dword {{.*}}, [[REG]] 415 ; CHECK: lock
416 ; CHECK-NEXT: xadd dword {{.*}}, [[REG]]
396 417
397 ;; or 418 ;; or
398 419
399 define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) { 420 define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
400 entry: 421 entry:
401 %trunc = trunc i32 %v to i8 422 %trunc = trunc i32 %v to i8
402 %ptr = inttoptr i32 %iptr to i8* 423 %ptr = inttoptr i32 %iptr to i8*
403 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6) 424 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6)
404 %a_ext = zext i8 %a to i32 425 %a_ext = zext i8 %a to i32
405 ret i32 %a_ext 426 ret i32 %a_ext
406 } 427 }
407 ; CHECK-LABEL: test_atomic_rmw_or_8 428 ; CHECK-LABEL: test_atomic_rmw_or_8
408 ; CHECK: mov al, byte ptr 429 ; CHECK: mov al, byte ptr
409 ; CHECK: .L[[LABEL:.*]]:
410 ; Dest cannot be eax here, because eax is used for the old value. Also want 430 ; Dest cannot be eax here, because eax is used for the old value. Also want
411 ; to make sure that cmpxchg's source is the same register. 431 ; to make sure that cmpxchg's source is the same register.
412 ; CHECK: or [[REG:[^a].]] 432 ; CHECK: or [[REG:[^a].]]
413 ; CHECK: lock cmpxchg byte ptr [e{{[^a].}}], [[REG]] 433 ; CHECK: lock
414 ; CHECK: jne .L[[LABEL]] 434 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]]
435 ; CHECK: jne -{{[0-9]}}
415 436
416 define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) { 437 define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
417 entry: 438 entry:
418 %trunc = trunc i32 %v to i16 439 %trunc = trunc i32 %v to i16
419 %ptr = inttoptr i32 %iptr to i16* 440 %ptr = inttoptr i32 %iptr to i16*
420 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6) 441 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6)
421 %a_ext = zext i16 %a to i32 442 %a_ext = zext i16 %a to i32
422 ret i32 %a_ext 443 ret i32 %a_ext
423 } 444 }
424 ; CHECK-LABEL: test_atomic_rmw_or_16 445 ; CHECK-LABEL: test_atomic_rmw_or_16
425 ; CHECK: mov ax, word ptr 446 ; CHECK: mov ax, word ptr
426 ; CHECK: .L[[LABEL:.*]]:
427 ; CHECK: or [[REG:[^a].]] 447 ; CHECK: or [[REG:[^a].]]
428 ; CHECK: lock cmpxchg word ptr [e{{[^a].}}], [[REG]] 448 ; CHECK: lock
429 ; CHECK: jne .L[[LABEL]] 449 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], [[REG]]
450 ; CHECK: jne -{{[0-9]}}
430 451
431 define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) { 452 define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
432 entry: 453 entry:
433 %ptr = inttoptr i32 %iptr to i32* 454 %ptr = inttoptr i32 %iptr to i32*
434 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) 455 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
435 ret i32 %a 456 ret i32 %a
436 } 457 }
437 ; CHECK-LABEL: test_atomic_rmw_or_32 458 ; CHECK-LABEL: test_atomic_rmw_or_32
438 ; CHECK: mov eax, dword ptr 459 ; CHECK: mov eax, dword ptr
439 ; CHECK: .L[[LABEL:.*]]:
440 ; CHECK: or [[REG:e[^a].]] 460 ; CHECK: or [[REG:e[^a].]]
441 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}], [[REG]] 461 ; CHECK: lock
442 ; CHECK: jne .L[[LABEL]] 462 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], [[REG]]
463 ; CHECK: jne -{{[0-9]}}
443 464
444 define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) { 465 define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
445 entry: 466 entry:
446 %ptr = inttoptr i32 %iptr to i64* 467 %ptr = inttoptr i32 %iptr to i64*
447 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6) 468 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6)
448 ret i64 %a 469 ret i64 %a
449 } 470 }
450 ; CHECK-LABEL: test_atomic_rmw_or_64 471 ; CHECK-LABEL: test_atomic_rmw_or_64
451 ; CHECK: push ebx 472 ; CHECK: push ebx
452 ; CHECK: mov eax, dword ptr [{{.*}}] 473 ; CHECK: mov eax, dword ptr [{{.*}}]
453 ; CHECK: mov edx, dword ptr [{{.*}}+4] 474 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
454 ; CHECK: .L[[LABEL:.*]]:
455 ; CHECK: mov ebx, eax 475 ; CHECK: mov ebx, eax
456 ; CHECK: or ebx, {{.*e.[^x]}} 476 ; CHECK: or ebx, {{.*e.[^x]}}
457 ; CHECK: mov ecx, edx 477 ; CHECK: mov ecx, edx
458 ; CHECK: or ecx, {{.*e.[^x]}} 478 ; CHECK: or ecx, {{.*e.[^x]}}
459 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 479 ; CHECK: lock
460 ; CHECK: jne .L[[LABEL]] 480 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
481 ; CHECK: jne -{{[0-9]}}
461 482
462 define i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) { 483 define i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) {
463 entry: 484 entry:
464 %ptr = inttoptr i32 %iptr to i32* 485 %ptr = inttoptr i32 %iptr to i32*
465 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) 486 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
466 ret i32 %v 487 ret i32 %v
467 } 488 }
468 ; CHECK-LABEL: test_atomic_rmw_or_32_ignored 489 ; CHECK-LABEL: test_atomic_rmw_or_32_ignored
469 ; Could just "lock or", if we inspect the liveness information first. 490 ; Could just "lock or", if we inspect the liveness information first.
470 ; Would also need a way to introduce "lock"'edness to binary 491 ; Would also need a way to introduce "lock"'edness to binary
471 ; operators without introducing overhead on the more common binary ops. 492 ; operators without introducing overhead on the more common binary ops.
472 ; CHECK: mov eax, dword ptr 493 ; CHECK: mov eax, dword ptr
473 ; CHECK: .L[[LABEL:.*]]:
474 ; CHECK: or [[REG:e[^a].]] 494 ; CHECK: or [[REG:e[^a].]]
475 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}], [[REG]] 495 ; CHECK: lock
476 ; CHECK: jne .L[[LABEL]] 496 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], [[REG]]
497 ; CHECK: jne -{{[0-9]}}
477 498
478 ;; and 499 ;; and
479 500
480 define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) { 501 define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
481 entry: 502 entry:
482 %trunc = trunc i32 %v to i8 503 %trunc = trunc i32 %v to i8
483 %ptr = inttoptr i32 %iptr to i8* 504 %ptr = inttoptr i32 %iptr to i8*
484 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6) 505 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6)
485 %a_ext = zext i8 %a to i32 506 %a_ext = zext i8 %a to i32
486 ret i32 %a_ext 507 ret i32 %a_ext
487 } 508 }
488 ; CHECK-LABEL: test_atomic_rmw_and_8 509 ; CHECK-LABEL: test_atomic_rmw_and_8
489 ; CHECK: mov al, byte ptr 510 ; CHECK: mov al, byte ptr
490 ; CHECK: .L[[LABEL:.*]]:
491 ; CHECK: and [[REG:[^a].]] 511 ; CHECK: and [[REG:[^a].]]
492 ; CHECK: lock cmpxchg byte ptr [e{{[^a].}}], [[REG]] 512 ; CHECK: lock
493 ; CHECK: jne .L[[LABEL]] 513 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]]
514 ; CHECK: jne -{{[0-9]}}
494 515
495 define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) { 516 define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
496 entry: 517 entry:
497 %trunc = trunc i32 %v to i16 518 %trunc = trunc i32 %v to i16
498 %ptr = inttoptr i32 %iptr to i16* 519 %ptr = inttoptr i32 %iptr to i16*
499 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6) 520 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6)
500 %a_ext = zext i16 %a to i32 521 %a_ext = zext i16 %a to i32
501 ret i32 %a_ext 522 ret i32 %a_ext
502 } 523 }
503 ; CHECK-LABEL: test_atomic_rmw_and_16 524 ; CHECK-LABEL: test_atomic_rmw_and_16
504 ; CHECK: mov ax, word ptr 525 ; CHECK: mov ax, word ptr
505 ; CHECK: .L[[LABEL:.*]]:
506 ; CHECK: and 526 ; CHECK: and
507 ; CHECK: lock cmpxchg word ptr [e{{[^a].}}] 527 ; CHECK: lock
508 ; CHECK: jne .L[[LABEL]] 528 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}]
529 ; CHECK: jne -{{[0-9]}}
509 530
510 define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) { 531 define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
511 entry: 532 entry:
512 %ptr = inttoptr i32 %iptr to i32* 533 %ptr = inttoptr i32 %iptr to i32*
513 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) 534 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
514 ret i32 %a 535 ret i32 %a
515 } 536 }
516 ; CHECK-LABEL: test_atomic_rmw_and_32 537 ; CHECK-LABEL: test_atomic_rmw_and_32
517 ; CHECK: mov eax, dword ptr 538 ; CHECK: mov eax, dword ptr
518 ; CHECK: .L[[LABEL:.*]]:
519 ; CHECK: and 539 ; CHECK: and
520 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 540 ; CHECK: lock
521 ; CHECK: jne .L[[LABEL]] 541 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}]
542 ; CHECK: jne -{{[0-9]}}
522 543
523 define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) { 544 define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
524 entry: 545 entry:
525 %ptr = inttoptr i32 %iptr to i64* 546 %ptr = inttoptr i32 %iptr to i64*
526 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6) 547 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6)
527 ret i64 %a 548 ret i64 %a
528 } 549 }
529 ; CHECK-LABEL: test_atomic_rmw_and_64 550 ; CHECK-LABEL: test_atomic_rmw_and_64
530 ; CHECK: push ebx 551 ; CHECK: push ebx
531 ; CHECK: mov eax, dword ptr [{{.*}}] 552 ; CHECK: mov eax, dword ptr [{{.*}}]
532 ; CHECK: mov edx, dword ptr [{{.*}}+4] 553 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
533 ; CHECK: .L[[LABEL:.*]]:
534 ; CHECK: mov ebx, eax 554 ; CHECK: mov ebx, eax
535 ; CHECK: and ebx, {{.*e.[^x]}} 555 ; CHECK: and ebx, {{.*e.[^x]}}
536 ; CHECK: mov ecx, edx 556 ; CHECK: mov ecx, edx
537 ; CHECK: and ecx, {{.*e.[^x]}} 557 ; CHECK: and ecx, {{.*e.[^x]}}
538 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 558 ; CHECK: lock
539 ; CHECK: jne .L[[LABEL]] 559 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
560 ; CHECK: jne -{{[0-9]}}
540 561
541 define i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) { 562 define i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) {
542 entry: 563 entry:
543 %ptr = inttoptr i32 %iptr to i32* 564 %ptr = inttoptr i32 %iptr to i32*
544 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) 565 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
545 ret i32 %v 566 ret i32 %v
546 } 567 }
547 ; CHECK-LABEL: test_atomic_rmw_and_32_ignored 568 ; CHECK-LABEL: test_atomic_rmw_and_32_ignored
548 ; Could just "lock and" 569 ; Could just "lock and"
549 ; CHECK: mov eax, dword ptr 570 ; CHECK: mov eax, dword ptr
550 ; CHECK: .L[[LABEL:.*]]:
551 ; CHECK: and 571 ; CHECK: and
552 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 572 ; CHECK: lock
553 ; CHECK: jne .L[[LABEL]] 573 ; Should be using NEXT: see issue 3929
574 ; CHECK: cmpxchg dword ptr [e{{[^a].}}]
575 ; CHECK: jne -{{[0-9]}}
554 576
555 ;; xor 577 ;; xor
556 578
557 define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) { 579 define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
558 entry: 580 entry:
559 %trunc = trunc i32 %v to i8 581 %trunc = trunc i32 %v to i8
560 %ptr = inttoptr i32 %iptr to i8* 582 %ptr = inttoptr i32 %iptr to i8*
561 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6) 583 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6)
562 %a_ext = zext i8 %a to i32 584 %a_ext = zext i8 %a to i32
563 ret i32 %a_ext 585 ret i32 %a_ext
564 } 586 }
565 ; CHECK-LABEL: test_atomic_rmw_xor_8 587 ; CHECK-LABEL: test_atomic_rmw_xor_8
566 ; CHECK: mov al, byte ptr 588 ; CHECK: mov al, byte ptr
567 ; CHECK: .L[[LABEL:.*]]:
568 ; CHECK: xor [[REG:[^a].]] 589 ; CHECK: xor [[REG:[^a].]]
569 ; CHECK: lock cmpxchg byte ptr [e{{[^a].}}], [[REG]] 590 ; CHECK: lock
570 ; CHECK: jne .L[[LABEL]] 591 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]]
592 ; CHECK: jne -{{[0-9]}}
571 593
572 define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) { 594 define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
573 entry: 595 entry:
574 %trunc = trunc i32 %v to i16 596 %trunc = trunc i32 %v to i16
575 %ptr = inttoptr i32 %iptr to i16* 597 %ptr = inttoptr i32 %iptr to i16*
576 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6) 598 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6)
577 %a_ext = zext i16 %a to i32 599 %a_ext = zext i16 %a to i32
578 ret i32 %a_ext 600 ret i32 %a_ext
579 } 601 }
580 ; CHECK-LABEL: test_atomic_rmw_xor_16 602 ; CHECK-LABEL: test_atomic_rmw_xor_16
581 ; CHECK: mov ax, word ptr 603 ; CHECK: mov ax, word ptr
582 ; CHECK: .L[[LABEL:.*]]:
583 ; CHECK: xor 604 ; CHECK: xor
584 ; CHECK: lock cmpxchg word ptr [e{{[^a].}}] 605 ; CHECK: lock
585 ; CHECK: jne .L[[LABEL]] 606 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}]
607 ; CHECK: jne -{{[0-9]}}
586 608
587 609
588 define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) { 610 define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
589 entry: 611 entry:
590 %ptr = inttoptr i32 %iptr to i32* 612 %ptr = inttoptr i32 %iptr to i32*
591 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) 613 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
592 ret i32 %a 614 ret i32 %a
593 } 615 }
594 ; CHECK-LABEL: test_atomic_rmw_xor_32 616 ; CHECK-LABEL: test_atomic_rmw_xor_32
595 ; CHECK: mov eax, dword ptr 617 ; CHECK: mov eax, dword ptr
596 ; CHECK: .L[[LABEL:.*]]:
597 ; CHECK: xor 618 ; CHECK: xor
598 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 619 ; CHECK: lock
599 ; CHECK: jne .L[[LABEL]] 620 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}]
621 ; CHECK: jne -{{[0-9]}}
600 622
601 define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) { 623 define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
602 entry: 624 entry:
603 %ptr = inttoptr i32 %iptr to i64* 625 %ptr = inttoptr i32 %iptr to i64*
604 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6) 626 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6)
605 ret i64 %a 627 ret i64 %a
606 } 628 }
607 ; CHECK-LABEL: test_atomic_rmw_xor_64 629 ; CHECK-LABEL: test_atomic_rmw_xor_64
608 ; CHECK: push ebx 630 ; CHECK: push ebx
609 ; CHECK: mov eax, dword ptr [{{.*}}] 631 ; CHECK: mov eax, dword ptr [{{.*}}]
610 ; CHECK: mov edx, dword ptr [{{.*}}+4] 632 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
611 ; CHECK: .L[[LABEL:.*]]:
612 ; CHECK: mov ebx, eax 633 ; CHECK: mov ebx, eax
613 ; CHECK: or ebx, {{.*e.[^x]}} 634 ; CHECK: or ebx, {{.*e.[^x]}}
614 ; CHECK: mov ecx, edx 635 ; CHECK: mov ecx, edx
615 ; CHECK: or ecx, {{.*e.[^x]}} 636 ; CHECK: or ecx, {{.*e.[^x]}}
616 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 637 ; CHECK: lock
617 ; CHECK: jne .L[[LABEL]] 638 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
639 ; CHECK: jne -{{[0-9]}}
618 640
619 define i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) { 641 define i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) {
620 entry: 642 entry:
621 %ptr = inttoptr i32 %iptr to i32* 643 %ptr = inttoptr i32 %iptr to i32*
622 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) 644 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
623 ret i32 %v 645 ret i32 %v
624 } 646 }
625 ; CHECK-LABEL: test_atomic_rmw_xor_32_ignored 647 ; CHECK-LABEL: test_atomic_rmw_xor_32_ignored
626 ; CHECK: mov eax, dword ptr 648 ; CHECK: mov eax, dword ptr
627 ; CHECK: .L[[LABEL:.*]]:
628 ; CHECK: xor 649 ; CHECK: xor
629 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 650 ; CHECK: lock
630 ; CHECK: jne .L[[LABEL]] 651 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}]
652 ; CHECK: jne -{{[0-9]}}
631 653
632 ;; exchange 654 ;; exchange
633 655
634 define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) { 656 define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
635 entry: 657 entry:
636 %trunc = trunc i32 %v to i8 658 %trunc = trunc i32 %v to i8
637 %ptr = inttoptr i32 %iptr to i8* 659 %ptr = inttoptr i32 %iptr to i8*
638 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6) 660 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6)
639 %a_ext = zext i8 %a to i32 661 %a_ext = zext i8 %a to i32
640 ret i32 %a_ext 662 ret i32 %a_ext
(...skipping 26 matching lines...) Expand all
667 %ptr = inttoptr i32 %iptr to i64* 689 %ptr = inttoptr i32 %iptr to i64*
668 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6) 690 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6)
669 ret i64 %a 691 ret i64 %a
670 } 692 }
671 ; CHECK-LABEL: test_atomic_rmw_xchg_64 693 ; CHECK-LABEL: test_atomic_rmw_xchg_64
672 ; CHECK: push ebx 694 ; CHECK: push ebx
673 ; CHECK-DAG: mov edx 695 ; CHECK-DAG: mov edx
674 ; CHECK-DAG: mov eax 696 ; CHECK-DAG: mov eax
675 ; CHECK-DAG: mov ecx 697 ; CHECK-DAG: mov ecx
676 ; CHECK-DAG: mov ebx 698 ; CHECK-DAG: mov ebx
677 ; CHECK: .L[[LABEL:.*]]: 699 ; CHECK: lock
678 ; CHECK: lock cmpxchg8b qword ptr [{{e.[^x]}}] 700 ; CHECK-NEXT: cmpxchg8b qword ptr [{{e.[^x]}}]
679 ; CHECK: jne .L[[LABEL]] 701 ; CHECK: jne -{{[0-9]}}
680 702
681 define i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) { 703 define i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) {
682 entry: 704 entry:
683 %ptr = inttoptr i32 %iptr to i32* 705 %ptr = inttoptr i32 %iptr to i32*
684 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6) 706 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6)
685 ret i32 %v 707 ret i32 %v
686 } 708 }
687 ; In this case, ignoring the return value doesn't help. The xchg is 709 ; In this case, ignoring the return value doesn't help. The xchg is
688 ; used to do an atomic store. 710 ; used to do an atomic store.
689 ; CHECK-LABEL: test_atomic_rmw_xchg_32_ignored 711 ; CHECK-LABEL: test_atomic_rmw_xchg_32_ignored
690 ; CHECK: xchg dword ptr {{.*}}, [[REG:.*]] 712 ; CHECK: xchg dword ptr {{.*}}, [[REG:.*]]
691 713
692 ;;;; Cmpxchg 714 ;;;; Cmpxchg
693 715
694 define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) { 716 define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
695 entry: 717 entry:
696 %trunc_exp = trunc i32 %expected to i8 718 %trunc_exp = trunc i32 %expected to i8
697 %trunc_des = trunc i32 %desired to i8 719 %trunc_des = trunc i32 %desired to i8
698 %ptr = inttoptr i32 %iptr to i8* 720 %ptr = inttoptr i32 %iptr to i8*
699 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp, 721 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp,
700 i8 %trunc_des, i32 6, i32 6) 722 i8 %trunc_des, i32 6, i32 6)
701 %old_ext = zext i8 %old to i32 723 %old_ext = zext i8 %old to i32
702 ret i32 %old_ext 724 ret i32 %old_ext
703 } 725 }
704 ; CHECK-LABEL: test_atomic_cmpxchg_8 726 ; CHECK-LABEL: test_atomic_cmpxchg_8
705 ; CHECK: mov al, {{.*}} 727 ; CHECK: mov al, {{.*}}
706 ; Need to check that eax isn't used as the address register or the desired. 728 ; Need to check that eax isn't used as the address register or the desired.
707 ; since it is already used as the *expected* register. 729 ; since it is already used as the *expected* register.
708 ; CHECK: lock cmpxchg byte ptr [e{{[^a].}}], {{[^a]}} 730 ; CHECK: lock
731 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], {{[^a]}}
709 732
710 define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) { 733 define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
711 entry: 734 entry:
712 %trunc_exp = trunc i32 %expected to i16 735 %trunc_exp = trunc i32 %expected to i16
713 %trunc_des = trunc i32 %desired to i16 736 %trunc_des = trunc i32 %desired to i16
714 %ptr = inttoptr i32 %iptr to i16* 737 %ptr = inttoptr i32 %iptr to i16*
715 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp, 738 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp,
716 i16 %trunc_des, i32 6, i32 6) 739 i16 %trunc_des, i32 6, i32 6)
717 %old_ext = zext i16 %old to i32 740 %old_ext = zext i16 %old to i32
718 ret i32 %old_ext 741 ret i32 %old_ext
719 } 742 }
720 ; CHECK-LABEL: test_atomic_cmpxchg_16 743 ; CHECK-LABEL: test_atomic_cmpxchg_16
721 ; CHECK: mov ax, {{.*}} 744 ; CHECK: mov ax, {{.*}}
722 ; CHECK: lock cmpxchg word ptr [e{{[^a].}}], {{[^a]}} 745 ; CHECK: lock
746 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], {{[^a]}}
723 747
724 define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) { 748 define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
725 entry: 749 entry:
726 %ptr = inttoptr i32 %iptr to i32* 750 %ptr = inttoptr i32 %iptr to i32*
727 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, 751 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
728 i32 %desired, i32 6, i32 6) 752 i32 %desired, i32 6, i32 6)
729 ret i32 %old 753 ret i32 %old
730 } 754 }
731 ; CHECK-LABEL: test_atomic_cmpxchg_32 755 ; CHECK-LABEL: test_atomic_cmpxchg_32
732 ; CHECK: mov eax, {{.*}} 756 ; CHECK: mov eax, {{.*}}
733 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}], e{{[^a]}} 757 ; CHECK: lock
758 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], e{{[^a]}}
734 759
735 define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) { 760 define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
736 entry: 761 entry:
737 %ptr = inttoptr i32 %iptr to i64* 762 %ptr = inttoptr i32 %iptr to i64*
738 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 763 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
739 i64 %desired, i32 6, i32 6) 764 i64 %desired, i32 6, i32 6)
740 ret i64 %old 765 ret i64 %old
741 } 766 }
742 ; CHECK-LABEL: test_atomic_cmpxchg_64 767 ; CHECK-LABEL: test_atomic_cmpxchg_64
743 ; CHECK: push ebx 768 ; CHECK: push ebx
744 ; CHECK-DAG: mov edx 769 ; CHECK-DAG: mov edx
745 ; CHECK-DAG: mov eax 770 ; CHECK-DAG: mov eax
746 ; CHECK-DAG: mov ecx 771 ; CHECK-DAG: mov ecx
747 ; CHECK-DAG: mov ebx 772 ; CHECK-DAG: mov ebx
748 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 773 ; CHECK: lock
774 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
749 ; edx and eax are already the return registers, so they don't actually 775 ; edx and eax are already the return registers, so they don't actually
750 ; need to be reshuffled via movs. The next test stores the result 776 ; need to be reshuffled via movs. The next test stores the result
751 ; somewhere, so in that case they do need to be mov'ed. 777 ; somewhere, so in that case they do need to be mov'ed.
752 778
753 ; Test a case where %old really does need to be copied out of edx:eax. 779 ; Test a case where %old really does need to be copied out of edx:eax.
754 define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expecte d, i64 %desired) { 780 define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expecte d, i64 %desired) {
755 entry: 781 entry:
756 %ptr = inttoptr i32 %iptr to i64* 782 %ptr = inttoptr i32 %iptr to i64*
757 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 783 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
758 i64 %desired, i32 6, i32 6) 784 i64 %desired, i32 6, i32 6)
759 %__6 = inttoptr i32 %ret_iptr to i64* 785 %__6 = inttoptr i32 %ret_iptr to i64*
760 store i64 %old, i64* %__6, align 1 786 store i64 %old, i64* %__6, align 1
761 ret void 787 ret void
762 } 788 }
763 ; CHECK-LABEL: test_atomic_cmpxchg_64_store 789 ; CHECK-LABEL: test_atomic_cmpxchg_64_store
764 ; CHECK: push ebx 790 ; CHECK: push ebx
765 ; CHECK-DAG: mov edx 791 ; CHECK-DAG: mov edx
766 ; CHECK-DAG: mov eax 792 ; CHECK-DAG: mov eax
767 ; CHECK-DAG: mov ecx 793 ; CHECK-DAG: mov ecx
768 ; CHECK-DAG: mov ebx 794 ; CHECK-DAG: mov ebx
769 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 795 ; CHECK: lock
796 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
770 ; CHECK: mov {{.*}}, edx 797 ; CHECK: mov {{.*}}, edx
771 ; CHECK: mov {{.*}}, eax 798 ; CHECK: mov {{.*}}, eax
772 799
773 ; Test with some more register pressure. When we have an alloca, ebp is 800 ; Test with some more register pressure. When we have an alloca, ebp is
774 ; used to manage the stack frame, so it cannot be used as a register either. 801 ; used to manage the stack frame, so it cannot be used as a register either.
775 define i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected, i64 %desired ) { 802 define i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected, i64 %desired ) {
776 entry: 803 entry:
777 %alloca_ptr = alloca i8, i32 16, align 16 804 %alloca_ptr = alloca i8, i32 16, align 16
778 %ptr = inttoptr i32 %iptr to i64* 805 %ptr = inttoptr i32 %iptr to i64*
779 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 806 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
(...skipping 10 matching lines...) Expand all
790 ; CHECK: push ebx 817 ; CHECK: push ebx
791 ; CHECK-DAG: mov edx 818 ; CHECK-DAG: mov edx
792 ; CHECK-DAG: mov eax 819 ; CHECK-DAG: mov eax
793 ; CHECK-DAG: mov ecx 820 ; CHECK-DAG: mov ecx
794 ; CHECK-DAG: mov ebx 821 ; CHECK-DAG: mov ebx
795 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). 822 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired).
796 ; It also cannot be ebp since we use that for alloca. Also make sure it's 823 ; It also cannot be ebp since we use that for alloca. Also make sure it's
797 ; not esp, since that's the stack pointer and mucking with it will break 824 ; not esp, since that's the stack pointer and mucking with it will break
798 ; the later use_ptr function call. 825 ; the later use_ptr function call.
799 ; That pretty much leaves esi, or edi as the only viable registers. 826 ; That pretty much leaves esi, or edi as the only viable registers.
800 ; CHECK: lock cmpxchg8b qword ptr [e{{[ds]}}i] 827 ; CHECK: lock
828 ; Should be using NEXT: see issue 3929
829 ; CHECK: cmpxchg8b qword ptr [e{{[ds]}}i]
801 ; CHECK: call use_ptr 830 ; CHECK: call use_ptr
802 831
803 define i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected, i32 %desire d) { 832 define i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected, i32 %desire d) {
804 entry: 833 entry:
805 %ptr = inttoptr i32 %iptr to i32* 834 %ptr = inttoptr i32 %iptr to i32*
806 %ignored = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, 835 %ignored = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
807 i32 %desired, i32 6, i32 6) 836 i32 %desired, i32 6, i32 6)
808 ret i32 0 837 ret i32 0
809 } 838 }
810 ; CHECK-LABEL: test_atomic_cmpxchg_32_ignored 839 ; CHECK-LABEL: test_atomic_cmpxchg_32_ignored
811 ; CHECK: mov eax, {{.*}} 840 ; CHECK: mov eax, {{.*}}
812 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 841 ; CHECK: lock
842 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}]
813 843
814 define i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected, i64 %desire d) { 844 define i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected, i64 %desire d) {
815 entry: 845 entry:
816 %ptr = inttoptr i32 %iptr to i64* 846 %ptr = inttoptr i32 %iptr to i64*
817 %ignored = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 847 %ignored = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
818 i64 %desired, i32 6, i32 6) 848 i64 %desired, i32 6, i32 6)
819 ret i64 0 849 ret i64 0
820 } 850 }
821 ; CHECK-LABEL: test_atomic_cmpxchg_64_ignored 851 ; CHECK-LABEL: test_atomic_cmpxchg_64_ignored
822 ; CHECK: push ebx 852 ; CHECK: push ebx
823 ; CHECK-DAG: mov edx 853 ; CHECK-DAG: mov edx
824 ; CHECK-DAG: mov eax 854 ; CHECK-DAG: mov eax
825 ; CHECK-DAG: mov ecx 855 ; CHECK-DAG: mov ecx
826 ; CHECK-DAG: mov ebx 856 ; CHECK-DAG: mov ebx
827 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 857 ; CHECK: lock
858 ; Should be using NEXT: see issue 3929
859 ; CHECK: cmpxchg8b qword ptr [e{{.[^x]}}]
828 860
829 ;;;; Fence and is-lock-free. 861 ;;;; Fence and is-lock-free.
830 862
831 define void @test_atomic_fence() { 863 define void @test_atomic_fence() {
832 entry: 864 entry:
833 call void @llvm.nacl.atomic.fence(i32 6) 865 call void @llvm.nacl.atomic.fence(i32 6)
834 ret void 866 ret void
835 } 867 }
836 ; CHECK-LABEL: test_atomic_fence 868 ; CHECK-LABEL: test_atomic_fence
837 ; CHECK: mfence 869 ; CHECK: mfence
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
896 ret i32 %z 928 ret i32 %z
897 } 929 }
898 ; CHECK-LABEL: test_atomic_is_lock_free_can_dce 930 ; CHECK-LABEL: test_atomic_is_lock_free_can_dce
899 ; CHECK: mov {{.*}}, 1 931 ; CHECK: mov {{.*}}, 1
900 ; CHECK: ret 932 ; CHECK: ret
901 ; CHECK: add 933 ; CHECK: add
902 ; CHECK: ret 934 ; CHECK: ret
903 935
904 ; ERRORS-NOT: ICE translation error 936 ; ERRORS-NOT: ICE translation error
905 ; DUMP-NOT: SZ 937 ; DUMP-NOT: SZ
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698