Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(246)

Side by Side Diff: tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll

Issue 1531623007: Add option to force filetype=asm for testing (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Code review fixes. Tighter ABI checks. Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 ; This tests each of the supported NaCl atomic instructions for every 1 ; This tests each of the supported NaCl atomic instructions for every
2 ; size allowed. 2 ; size allowed.
3 3
4 ; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \ 4 ; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \
5 ; RUN: -allow-externally-defined-symbols | FileCheck %s 5 ; RUN: -allow-externally-defined-symbols | FileCheck %s
6 ; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \ 6 ; RUN: %p2i -i %s --filetype=obj --disassemble --args -O2 \
7 ; RUN: -allow-externally-defined-symbols | FileCheck --check-prefix=O2 %s 7 ; RUN: -allow-externally-defined-symbols | FileCheck --check-prefix=O2 %s
8 ; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \ 8 ; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
9 ; RUN: -allow-externally-defined-symbols | FileCheck %s 9 ; RUN: -allow-externally-defined-symbols | FileCheck %s
10 10
(...skipping 28 matching lines...) Expand all
39 declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32) 39 declare i32 @llvm.nacl.atomic.rmw.i32(i32, i32*, i32, i32)
40 declare i64 @llvm.nacl.atomic.rmw.i64(i32, i64*, i64, i32) 40 declare i64 @llvm.nacl.atomic.rmw.i64(i32, i64*, i64, i32)
41 declare i8 @llvm.nacl.atomic.cmpxchg.i8(i8*, i8, i8, i32, i32) 41 declare i8 @llvm.nacl.atomic.cmpxchg.i8(i8*, i8, i8, i32, i32)
42 declare i16 @llvm.nacl.atomic.cmpxchg.i16(i16*, i16, i16, i32, i32) 42 declare i16 @llvm.nacl.atomic.cmpxchg.i16(i16*, i16, i16, i32, i32)
43 declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32) 43 declare i32 @llvm.nacl.atomic.cmpxchg.i32(i32*, i32, i32, i32, i32)
44 declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32) 44 declare i64 @llvm.nacl.atomic.cmpxchg.i64(i64*, i64, i64, i32, i32)
45 declare void @llvm.nacl.atomic.fence(i32) 45 declare void @llvm.nacl.atomic.fence(i32)
46 declare void @llvm.nacl.atomic.fence.all() 46 declare void @llvm.nacl.atomic.fence.all()
47 declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*) 47 declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*)
48 48
49 @Global8 = internal global [1 x i8] zeroinitializer, align 1 49 @SzGlobal8 = internal global [1 x i8] zeroinitializer, align 1
50 @Global16 = internal global [2 x i8] zeroinitializer, align 2 50 @SzGlobal16 = internal global [2 x i8] zeroinitializer, align 2
51 @Global32 = internal global [4 x i8] zeroinitializer, align 4 51 @SzGlobal32 = internal global [4 x i8] zeroinitializer, align 4
52 @Global64 = internal global [8 x i8] zeroinitializer, align 8 52 @SzGlobal64 = internal global [8 x i8] zeroinitializer, align 8
53 53
54 ; NOTE: The LLC equivalent for 16-bit atomic operations are expanded 54 ; NOTE: The LLC equivalent for 16-bit atomic operations are expanded
55 ; as 32-bit operations. For Subzero, assume that real 16-bit operations 55 ; as 32-bit operations. For Subzero, assume that real 16-bit operations
56 ; will be usable (the validator will be fixed): 56 ; will be usable (the validator will be fixed):
57 ; https://code.google.com/p/nativeclient/issues/detail?id=2981 57 ; https://code.google.com/p/nativeclient/issues/detail?id=2981
58 58
59 ;;; Load 59 ;;; Load
60 60
61 ; x86 guarantees load/store to be atomic if naturally aligned. 61 ; x86 guarantees load/store to be atomic if naturally aligned.
62 ; The PNaCl IR requires all atomic accesses to be naturally aligned. 62 ; The PNaCl IR requires all atomic accesses to be naturally aligned.
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after
343 ; ARM32: ldrexd r{{[0-9]+}}, r{{[0-9]+}}, [r{{[0-9]+}}] 343 ; ARM32: ldrexd r{{[0-9]+}}, r{{[0-9]+}}, [r{{[0-9]+}}]
344 ; ARM32: adds 344 ; ARM32: adds
345 ; ARM32-NEXT: adc 345 ; ARM32-NEXT: adc
346 ; ARM32: strexd r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}, [r{{[0-9]+}}] 346 ; ARM32: strexd r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}, [r{{[0-9]+}}]
347 ; ARM32: bne 347 ; ARM32: bne
348 ; ARM32: dmb 348 ; ARM32: dmb
349 349
350 ; Same test as above, but with a global address to test FakeUse issues. 350 ; Same test as above, but with a global address to test FakeUse issues.
351 define internal i64 @test_atomic_rmw_add_64_global(i64 %v) { 351 define internal i64 @test_atomic_rmw_add_64_global(i64 %v) {
352 entry: 352 entry:
353 %ptr = bitcast [8 x i8]* @Global64 to i64* 353 %ptr = bitcast [8 x i8]* @SzGlobal64 to i64*
354 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) 354 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
355 ret i64 %a 355 ret i64 %a
356 } 356 }
357 ; CHECK-LABEL: test_atomic_rmw_add_64_global 357 ; CHECK-LABEL: test_atomic_rmw_add_64_global
358 ; ARM32-LABEL: test_atomic_rmw_add_64_global 358 ; ARM32-LABEL: test_atomic_rmw_add_64_global
359 ; ARM32: dmb 359 ; ARM32: dmb
360 ; ARM32: ldrexd r{{[0-9]+}}, r{{[0-9]+}}, [r{{[0-9]+}}] 360 ; ARM32: ldrexd r{{[0-9]+}}, r{{[0-9]+}}, [r{{[0-9]+}}]
361 ; ARM32: adds 361 ; ARM32: adds
362 ; ARM32-NEXT: adc 362 ; ARM32-NEXT: adc
363 ; ARM32: strexd r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}, [r{{[0-9]+}}] 363 ; ARM32: strexd r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}, [r{{[0-9]+}}]
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
588 ; ARM32: ldrexb 588 ; ARM32: ldrexb
589 ; ARM32: orr 589 ; ARM32: orr
590 ; ARM32: strexb 590 ; ARM32: strexb
591 ; ARM32: bne 591 ; ARM32: bne
592 ; ARM32: dmb 592 ; ARM32: dmb
593 593
594 ; Same test as above, but with a global address to test FakeUse issues. 594 ; Same test as above, but with a global address to test FakeUse issues.
595 define internal i32 @test_atomic_rmw_or_8_global(i32 %v) { 595 define internal i32 @test_atomic_rmw_or_8_global(i32 %v) {
596 entry: 596 entry:
597 %trunc = trunc i32 %v to i8 597 %trunc = trunc i32 %v to i8
598 %ptr = bitcast [1 x i8]* @Global8 to i8* 598 %ptr = bitcast [1 x i8]* @SzGlobal8 to i8*
599 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6) 599 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6)
600 %a_ext = zext i8 %a to i32 600 %a_ext = zext i8 %a to i32
601 ret i32 %a_ext 601 ret i32 %a_ext
602 } 602 }
603 ; CHECK-LABEL: test_atomic_rmw_or_8_global 603 ; CHECK-LABEL: test_atomic_rmw_or_8_global
604 ; ARM32-LABEL: test_atomic_rmw_or_8_global 604 ; ARM32-LABEL: test_atomic_rmw_or_8_global
605 ; ARM32: movw [[PTR:r[0-9]+]], #:lower16:Global8 605 ; ARM32: movw [[PTR:r[0-9]+]], #:lower16:SzGlobal8
606 ; ARM32: movt [[PTR]], #:upper16:Global8 606 ; ARM32: movt [[PTR]], #:upper16:SzGlobal8
607 ; ARM32: dmb 607 ; ARM32: dmb
608 ; ARM32: ldrexb r{{[0-9]+}}, {{[[]}}[[PTR]]{{[]]}} 608 ; ARM32: ldrexb r{{[0-9]+}}, {{[[]}}[[PTR]]{{[]]}}
609 ; ARM32: orr 609 ; ARM32: orr
610 ; ARM32: strexb 610 ; ARM32: strexb
611 ; ARM32: bne 611 ; ARM32: bne
612 ; ARM32: dmb 612 ; ARM32: dmb
613 613
614 define internal i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) { 614 define internal i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
615 entry: 615 entry:
616 %trunc = trunc i32 %v to i16 616 %trunc = trunc i32 %v to i16
(...skipping 12 matching lines...) Expand all
629 ; ARM32: ldrexh 629 ; ARM32: ldrexh
630 ; ARM32: orr 630 ; ARM32: orr
631 ; ARM32: strexh 631 ; ARM32: strexh
632 ; ARM32: bne 632 ; ARM32: bne
633 ; ARM32: dmb 633 ; ARM32: dmb
634 634
635 ; Same test as above, but with a global address to test FakeUse issues. 635 ; Same test as above, but with a global address to test FakeUse issues.
636 define internal i32 @test_atomic_rmw_or_16_global(i32 %v) { 636 define internal i32 @test_atomic_rmw_or_16_global(i32 %v) {
637 entry: 637 entry:
638 %trunc = trunc i32 %v to i16 638 %trunc = trunc i32 %v to i16
639 %ptr = bitcast [2 x i8]* @Global16 to i16* 639 %ptr = bitcast [2 x i8]* @SzGlobal16 to i16*
640 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6) 640 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6)
641 %a_ext = zext i16 %a to i32 641 %a_ext = zext i16 %a to i32
642 ret i32 %a_ext 642 ret i32 %a_ext
643 } 643 }
644 ; CHECK-LABEL: test_atomic_rmw_or_16_global 644 ; CHECK-LABEL: test_atomic_rmw_or_16_global
645 ; ARM32-LABEL: test_atomic_rmw_or_16_global 645 ; ARM32-LABEL: test_atomic_rmw_or_16_global
646 ; ARM32: movw [[PTR:r[0-9]+]], #:lower16:Global16 646 ; ARM32: movw [[PTR:r[0-9]+]], #:lower16:SzGlobal16
647 ; ARM32: movt [[PTR]], #:upper16:Global16 647 ; ARM32: movt [[PTR]], #:upper16:SzGlobal16
648 ; ARM32: dmb 648 ; ARM32: dmb
649 ; ARM32: ldrexh r{{[0-9]+}}, {{[[]}}[[PTR]]{{[]]}} 649 ; ARM32: ldrexh r{{[0-9]+}}, {{[[]}}[[PTR]]{{[]]}}
650 ; ARM32: orr 650 ; ARM32: orr
651 ; ARM32: strexh 651 ; ARM32: strexh
652 ; ARM32: bne 652 ; ARM32: bne
653 ; ARM32: dmb 653 ; ARM32: dmb
654 654
655 define internal i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) { 655 define internal i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
656 entry: 656 entry:
657 %ptr = inttoptr i32 %iptr to i32* 657 %ptr = inttoptr i32 %iptr to i32*
658 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) 658 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
659 ret i32 %a 659 ret i32 %a
660 } 660 }
661 ; CHECK-LABEL: test_atomic_rmw_or_32 661 ; CHECK-LABEL: test_atomic_rmw_or_32
662 ; CHECK: mov eax,DWORD PTR 662 ; CHECK: mov eax,DWORD PTR
663 ; CHECK: or [[REG:e[^a].]] 663 ; CHECK: or [[REG:e[^a].]]
664 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}],[[REG]] 664 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}],[[REG]]
665 ; CHECK: jne 665 ; CHECK: jne
666 ; ARM32-LABEL: test_atomic_rmw_or_32 666 ; ARM32-LABEL: test_atomic_rmw_or_32
667 ; ARM32: dmb 667 ; ARM32: dmb
668 ; ARM32: ldrex 668 ; ARM32: ldrex
669 ; ARM32: orr 669 ; ARM32: orr
670 ; ARM32: strex 670 ; ARM32: strex
671 ; ARM32: bne 671 ; ARM32: bne
672 ; ARM32: dmb 672 ; ARM32: dmb
673 673
674 ; Same test as above, but with a global address to test FakeUse issues. 674 ; Same test as above, but with a global address to test FakeUse issues.
675 define internal i32 @test_atomic_rmw_or_32_global(i32 %v) { 675 define internal i32 @test_atomic_rmw_or_32_global(i32 %v) {
676 entry: 676 entry:
677 %ptr = bitcast [4 x i8]* @Global32 to i32* 677 %ptr = bitcast [4 x i8]* @SzGlobal32 to i32*
678 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) 678 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
679 ret i32 %a 679 ret i32 %a
680 } 680 }
681 ; CHECK-LABEL: test_atomic_rmw_or_32_global 681 ; CHECK-LABEL: test_atomic_rmw_or_32_global
682 ; ARM32-LABEL: test_atomic_rmw_or_32_global 682 ; ARM32-LABEL: test_atomic_rmw_or_32_global
683 ; ARM32: movw [[PTR:r[0-9]+]], #:lower16:Global32 683 ; ARM32: movw [[PTR:r[0-9]+]], #:lower16:SzGlobal32
684 ; ARM32: movt [[PTR]], #:upper16:Global32 684 ; ARM32: movt [[PTR]], #:upper16:SzGlobal32
685 ; ARM32: dmb 685 ; ARM32: dmb
686 ; ARM32: ldrex r{{[0-9]+}}, {{[[]}}[[PTR]]{{[]]}} 686 ; ARM32: ldrex r{{[0-9]+}}, {{[[]}}[[PTR]]{{[]]}}
687 ; ARM32: orr 687 ; ARM32: orr
688 ; ARM32: strex 688 ; ARM32: strex
689 ; ARM32: bne 689 ; ARM32: bne
690 ; ARM32: dmb 690 ; ARM32: dmb
691 691
692 define internal i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) { 692 define internal i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
693 entry: 693 entry:
694 %ptr = inttoptr i32 %iptr to i64* 694 %ptr = inttoptr i32 %iptr to i64*
(...skipping 691 matching lines...) Expand 10 before | Expand all | Expand 10 after
1386 1386
1387 ; Test the liveness / register allocation properties of the xadd instruction. 1387 ; Test the liveness / register allocation properties of the xadd instruction.
1388 ; Make sure we model that the Src register is modified and therefore it can't 1388 ; Make sure we model that the Src register is modified and therefore it can't
1389 ; share a register with an overlapping live range, even if the result of the 1389 ; share a register with an overlapping live range, even if the result of the
1390 ; xadd instruction is unused. 1390 ; xadd instruction is unused.
1391 define internal void @test_xadd_regalloc() { 1391 define internal void @test_xadd_regalloc() {
1392 entry: 1392 entry:
1393 br label %body 1393 br label %body
1394 body: 1394 body:
1395 %i = phi i32 [ 1, %entry ], [ %i_plus_1, %body ] 1395 %i = phi i32 [ 1, %entry ], [ %i_plus_1, %body ]
1396 %g = bitcast [4 x i8]* @Global32 to i32* 1396 %g = bitcast [4 x i8]* @SzGlobal32 to i32*
1397 %unused = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %g, i32 %i, i32 6) 1397 %unused = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %g, i32 %i, i32 6)
1398 %i_plus_1 = add i32 %i, 1 1398 %i_plus_1 = add i32 %i, 1
1399 %cmp = icmp eq i32 %i_plus_1, 1001 1399 %cmp = icmp eq i32 %i_plus_1, 1001
1400 br i1 %cmp, label %done, label %body 1400 br i1 %cmp, label %done, label %body
1401 done: 1401 done:
1402 ret void 1402 ret void
1403 } 1403 }
1404 ; O2-LABEL: test_xadd_regalloc 1404 ; O2-LABEL: test_xadd_regalloc
1405 ;;; Some register will be used in the xadd instruction. 1405 ;;; Some register will be used in the xadd instruction.
1406 ; O2: lock xadd DWORD PTR {{.*}},[[REG:e..]] 1406 ; O2: lock xadd DWORD PTR {{.*}},[[REG:e..]]
1407 ;;; Make sure that register isn't used again, e.g. as the induction variable. 1407 ;;; Make sure that register isn't used again, e.g. as the induction variable.
1408 ; O2-NOT: [[REG]] 1408 ; O2-NOT: [[REG]]
1409 ; O2: ret 1409 ; O2: ret
1410 1410
1411 ; Do the same test for the xchg instruction instead of xadd. 1411 ; Do the same test for the xchg instruction instead of xadd.
1412 define internal void @test_xchg_regalloc() { 1412 define internal void @test_xchg_regalloc() {
1413 entry: 1413 entry:
1414 br label %body 1414 br label %body
1415 body: 1415 body:
1416 %i = phi i32 [ 1, %entry ], [ %i_plus_1, %body ] 1416 %i = phi i32 [ 1, %entry ], [ %i_plus_1, %body ]
1417 %g = bitcast [4 x i8]* @Global32 to i32* 1417 %g = bitcast [4 x i8]* @SzGlobal32 to i32*
1418 %unused = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %g, i32 %i, i32 6) 1418 %unused = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %g, i32 %i, i32 6)
1419 %i_plus_1 = add i32 %i, 1 1419 %i_plus_1 = add i32 %i, 1
1420 %cmp = icmp eq i32 %i_plus_1, 1001 1420 %cmp = icmp eq i32 %i_plus_1, 1001
1421 br i1 %cmp, label %done, label %body 1421 br i1 %cmp, label %done, label %body
1422 done: 1422 done:
1423 ret void 1423 ret void
1424 } 1424 }
1425 ; O2-LABEL: test_xchg_regalloc 1425 ; O2-LABEL: test_xchg_regalloc
1426 ;;; Some register will be used in the xchg instruction. 1426 ;;; Some register will be used in the xchg instruction.
1427 ; O2: xchg DWORD PTR {{.*}},[[REG:e..]] 1427 ; O2: xchg DWORD PTR {{.*}},[[REG:e..]]
1428 ;;; Make sure that register isn't used again, e.g. as the induction variable. 1428 ;;; Make sure that register isn't used again, e.g. as the induction variable.
1429 ; O2-NOT: [[REG]] 1429 ; O2-NOT: [[REG]]
1430 ; O2: ret 1430 ; O2: ret
1431 1431
1432 ; Same test for cmpxchg. 1432 ; Same test for cmpxchg.
1433 define internal void @test_cmpxchg_regalloc() { 1433 define internal void @test_cmpxchg_regalloc() {
1434 entry: 1434 entry:
1435 br label %body 1435 br label %body
1436 body: 1436 body:
1437 %i = phi i32 [ 1, %entry ], [ %i_plus_1, %body ] 1437 %i = phi i32 [ 1, %entry ], [ %i_plus_1, %body ]
1438 %g = bitcast [4 x i8]* @Global32 to i32* 1438 %g = bitcast [4 x i8]* @SzGlobal32 to i32*
1439 %unused = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %g, i32 %i, i32 %i, i32 6, i32 6) 1439 %unused = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %g, i32 %i, i32 %i, i32 6, i32 6)
1440 %i_plus_1 = add i32 %i, 1 1440 %i_plus_1 = add i32 %i, 1
1441 %cmp = icmp eq i32 %i_plus_1, 1001 1441 %cmp = icmp eq i32 %i_plus_1, 1001
1442 br i1 %cmp, label %done, label %body 1442 br i1 %cmp, label %done, label %body
1443 done: 1443 done:
1444 ret void 1444 ret void
1445 } 1445 }
1446 ; O2-LABEL: test_cmpxchg_regalloc 1446 ; O2-LABEL: test_cmpxchg_regalloc
1447 ;;; eax and some other register will be used in the cmpxchg instruction. 1447 ;;; eax and some other register will be used in the cmpxchg instruction.
1448 ; O2: lock cmpxchg DWORD PTR {{.*}},[[REG:e..]] 1448 ; O2: lock cmpxchg DWORD PTR {{.*}},[[REG:e..]]
1449 ;;; Make sure eax isn't used again, e.g. as the induction variable. 1449 ;;; Make sure eax isn't used again, e.g. as the induction variable.
1450 ; O2-NOT: eax 1450 ; O2-NOT: eax
1451 ; O2: ret 1451 ; O2: ret
1452 1452
1453 ; Same test for cmpxchg8b. 1453 ; Same test for cmpxchg8b.
1454 define internal void @test_cmpxchg8b_regalloc() { 1454 define internal void @test_cmpxchg8b_regalloc() {
1455 entry: 1455 entry:
1456 br label %body 1456 br label %body
1457 body: 1457 body:
1458 %i = phi i32 [ 1, %entry ], [ %i_plus_1, %body ] 1458 %i = phi i32 [ 1, %entry ], [ %i_plus_1, %body ]
1459 %g = bitcast [8 x i8]* @Global64 to i64* 1459 %g = bitcast [8 x i8]* @SzGlobal64 to i64*
1460 %i_64 = zext i32 %i to i64 1460 %i_64 = zext i32 %i to i64
1461 %unused = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %g, i64 %i_64, i64 %i_64 , i32 6, i32 6) 1461 %unused = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %g, i64 %i_64, i64 %i_64 , i32 6, i32 6)
1462 %i_plus_1 = add i32 %i, 1 1462 %i_plus_1 = add i32 %i, 1
1463 %cmp = icmp eq i32 %i_plus_1, 1001 1463 %cmp = icmp eq i32 %i_plus_1, 1001
1464 br i1 %cmp, label %done, label %body 1464 br i1 %cmp, label %done, label %body
1465 done: 1465 done:
1466 ret void 1466 ret void
1467 } 1467 }
1468 ; O2-LABEL: test_cmpxchg8b_regalloc 1468 ; O2-LABEL: test_cmpxchg8b_regalloc
1469 ;;; eax and some other register will be used in the cmpxchg instruction. 1469 ;;; eax and some other register will be used in the cmpxchg instruction.
1470 ; O2: lock cmpxchg8b QWORD PTR 1470 ; O2: lock cmpxchg8b QWORD PTR
1471 ;;; Make sure eax/ecx/edx/ebx aren't used again, e.g. as the induction variable. 1471 ;;; Make sure eax/ecx/edx/ebx aren't used again, e.g. as the induction variable.
1472 ; O2-NOT: {{eax|ecx|edx|ebx}} 1472 ; O2-NOT: {{eax|ecx|edx|ebx}}
1473 ; O2: pop ebx 1473 ; O2: pop ebx
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698