OLD | NEW |
1 ; This tests the NaCl intrinsics not related to atomic operations. | 1 ; This tests the NaCl intrinsics not related to atomic operations. |
2 | 2 |
3 ; RUN: %llvm2ice -O2 --verbose none %s | FileCheck %s | 3 ; RUN: %llvm2ice -O2 --verbose none %s | FileCheck %s |
4 ; RUN: %llvm2ice -Om1 --verbose none %s | FileCheck %s | 4 ; RUN: %llvm2ice -Om1 --verbose none %s | FileCheck %s |
5 | 5 |
6 ; Do another run w/ O2 and a different check-prefix (otherwise O2 and Om1 | 6 ; Do another run w/ O2 and a different check-prefix (otherwise O2 and Om1 |
7 ; share the same "CHECK" prefix). This separate run helps check that | 7 ; share the same "CHECK" prefix). This separate run helps check that |
8 ; some code is optimized out. | 8 ; some code is optimized out. |
9 ; RUN: %llvm2ice -O2 --verbose none %s | FileCheck %s --check-prefix=CHECKO2REM | 9 ; RUN: %llvm2ice -O2 --verbose none %s | FileCheck %s --check-prefix=CHECKO2REM |
10 | 10 |
(...skipping 13 matching lines...) Expand all Loading... |
24 declare void @llvm.trap() | 24 declare void @llvm.trap() |
25 declare i16 @llvm.bswap.i16(i16) | 25 declare i16 @llvm.bswap.i16(i16) |
26 declare i32 @llvm.bswap.i32(i32) | 26 declare i32 @llvm.bswap.i32(i32) |
27 declare i64 @llvm.bswap.i64(i64) | 27 declare i64 @llvm.bswap.i64(i64) |
28 declare i32 @llvm.ctlz.i32(i32, i1) | 28 declare i32 @llvm.ctlz.i32(i32, i1) |
29 declare i64 @llvm.ctlz.i64(i64, i1) | 29 declare i64 @llvm.ctlz.i64(i64, i1) |
30 declare i32 @llvm.cttz.i32(i32, i1) | 30 declare i32 @llvm.cttz.i32(i32, i1) |
31 declare i64 @llvm.cttz.i64(i64, i1) | 31 declare i64 @llvm.cttz.i64(i64, i1) |
32 declare i32 @llvm.ctpop.i32(i32) | 32 declare i32 @llvm.ctpop.i32(i32) |
33 declare i64 @llvm.ctpop.i64(i64) | 33 declare i64 @llvm.ctpop.i64(i64) |
| 34 declare i8* @llvm.stacksave() |
| 35 declare void @llvm.stackrestore(i8*) |
34 | 36 |
35 define i32 @test_nacl_read_tp() { | 37 define i32 @test_nacl_read_tp() { |
36 entry: | 38 entry: |
37 %ptr = call i8* @llvm.nacl.read.tp() | 39 %ptr = call i8* @llvm.nacl.read.tp() |
38 %__1 = ptrtoint i8* %ptr to i32 | 40 %__1 = ptrtoint i8* %ptr to i32 |
39 ret i32 %__1 | 41 ret i32 %__1 |
40 } | 42 } |
41 ; CHECK-LABEL: test_nacl_read_tp | 43 ; CHECK-LABEL: test_nacl_read_tp |
42 ; CHECK: mov e{{.*}}, dword ptr gs:[0] | 44 ; CHECK: mov e{{.*}}, dword ptr gs:[0] |
43 ; CHECKO2REM-LABEL: test_nacl_read_tp | 45 ; CHECKO2REM-LABEL: test_nacl_read_tp |
(...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
387 entry: | 389 entry: |
388 %r_i64 = call i64 @llvm.ctpop.i64(i64 %x) | 390 %r_i64 = call i64 @llvm.ctpop.i64(i64 %x) |
389 %r = trunc i64 %r_i64 to i32 | 391 %r = trunc i64 %r_i64 to i32 |
390 ret i32 %r | 392 ret i32 %r |
391 } | 393 } |
392 ; If there is a trunc, then the mov {{.*}}, 0 is dead and gets optimized out. | 394 ; If there is a trunc, then the mov {{.*}}, 0 is dead and gets optimized out. |
393 ; CHECKO2REM-LABEL: test_popcount_64_ret_i32 | 395 ; CHECKO2REM-LABEL: test_popcount_64_ret_i32 |
394 ; CHECKO2REM: call __popcountdi2 | 396 ; CHECKO2REM: call __popcountdi2 |
395 ; CHECKO2REM-NOT: mov {{.*}}, 0 | 397 ; CHECKO2REM-NOT: mov {{.*}}, 0 |
396 | 398 |
| 399 define void @test_stacksave_noalloca() { |
| 400 entry: |
| 401 %sp = call i8* @llvm.stacksave() |
| 402 call void @llvm.stackrestore(i8* %sp) |
| 403 ret void |
| 404 } |
| 405 ; CHECK-LABEL: test_stacksave_noalloca |
| 406 ; CHECK: mov {{.*}}, esp |
| 407 ; CHECK: mov esp, {{.*}} |
| 408 |
| 409 declare i32 @foo(i32 %x) |
| 410 |
| 411 define void @test_stacksave_multiple(i32 %x) { |
| 412 entry: |
| 413 %x_4 = mul i32 %x, 4 |
| 414 %sp1 = call i8* @llvm.stacksave() |
| 415 %tmp1 = alloca i8, i32 %x_4, align 4 |
| 416 |
| 417 %sp2 = call i8* @llvm.stacksave() |
| 418 %tmp2 = alloca i8, i32 %x_4, align 4 |
| 419 |
| 420 %y = call i32 @foo(i32 %x) |
| 421 |
| 422 %sp3 = call i8* @llvm.stacksave() |
| 423 %tmp3 = alloca i8, i32 %x_4, align 4 |
| 424 |
| 425 %__9 = bitcast i8* %tmp1 to i32* |
| 426 store i32 %y, i32* %__9, align 1 |
| 427 |
| 428 %__10 = bitcast i8* %tmp2 to i32* |
| 429 store i32 %x, i32* %__10, align 1 |
| 430 |
| 431 %__11 = bitcast i8* %tmp3 to i32* |
| 432 store i32 %x, i32* %__11, align 1 |
| 433 |
| 434 call void @llvm.stackrestore(i8* %sp1) |
| 435 ret void |
| 436 } |
| 437 ; CHECK-LABEL: test_stacksave_multiple |
| 438 ; At least 3 copies of esp, but probably more from having to do the allocas. |
| 439 ; CHECK: mov {{.*}}, esp |
| 440 ; CHECK: mov {{.*}}, esp |
| 441 ; CHECK: mov {{.*}}, esp |
| 442 ; CHECK: mov esp, {{.*}} |
397 | 443 |
398 ; ERRORS-NOT: ICE translation error | 444 ; ERRORS-NOT: ICE translation error |
399 ; DUMP-NOT: SZ | 445 ; DUMP-NOT: SZ |
OLD | NEW |