OLD | NEW |
1 ; RUN: pnacl-llc -mtriple=armv7-unknown-nacl -filetype=obj %s -o - \ | 1 ; RUN: pnacl-llc -mtriple=armv7-unknown-nacl -filetype=obj %s -o - \ |
2 ; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s | 2 ; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s |
3 | 3 |
4 define i32 @foo(i32 %aa, i32 %bb) nounwind { | 4 define i32 @foo(i32 %aa, i32 %bb) nounwind { |
5 entry: | 5 entry: |
6 %aa.addr = alloca i32, align 4 | 6 %aa.addr = alloca i32, align 4 |
7 %bb.addr = alloca i32, align 4 | 7 %bb.addr = alloca i32, align 4 |
8 %cc = alloca i32, align 4 | 8 %cc = alloca i32, align 4 |
9 %dd = alloca i32, align 4 | 9 %dd = alloca i32, align 4 |
10 store i32 %aa, i32* %aa.addr, align 4 | 10 store i32 %aa, i32* %aa.addr, align 4 |
11 store i32 %bb, i32* %bb.addr, align 4 | 11 store i32 %bb, i32* %bb.addr, align 4 |
12 %0 = load i32* %aa.addr, align 4 | 12 %0 = load i32, i32* %aa.addr, align 4 |
13 %1 = load i32* %bb.addr, align 4 | 13 %1 = load i32, i32* %bb.addr, align 4 |
14 %mul = mul nsw i32 %0, %1 | 14 %mul = mul nsw i32 %0, %1 |
15 store i32 %mul, i32* %cc, align 4 | 15 store i32 %mul, i32* %cc, align 4 |
16 %sub = sub nsw i32 %mul, %1 | 16 %sub = sub nsw i32 %mul, %1 |
17 store i32 %sub, i32* %dd, align 4 | 17 store i32 %sub, i32* %dd, align 4 |
18 %2 = load i32* %dd, align 4 | 18 %2 = load i32, i32* %dd, align 4 |
19 ret i32 %2 | 19 ret i32 %2 |
20 | 20 |
21 ; This checks two things: | 21 ; This checks two things: |
22 ; 1. bx lr is sandboxed by prepending a bic | 22 ; 1. bx lr is sandboxed by prepending a bic |
23 ; 2. The bic/bx pair don't straddle a 16-byte bundle boundary, hence the nop | 23 ; 2. The bic/bx pair don't straddle a 16-byte bundle boundary, hence the nop |
24 ; CHECK: nop | 24 ; CHECK: nop |
25 ; CHECK-NEXT: {{.*}}0:{{.*}}bic lr, lr, #3221225487 | 25 ; CHECK-NEXT: {{.*}}0:{{.*}}bic lr, lr, #-1073741809 |
26 ; CHECK-NEXT: bx lr | 26 ; CHECK-NEXT: bx lr |
27 | 27 |
28 } | 28 } |
29 | 29 |
30 define i32 @bar(i32 %aa, i32 %bb) nounwind { | 30 define i32 @bar(i32 %aa, i32 %bb) nounwind { |
31 entry: | 31 entry: |
32 | 32 |
33 ; Check that the function start is padded with nops to start at a bundle | 33 ; Check that the function start is padded with nops to start at a bundle |
34 ; boundary | 34 ; boundary |
35 ; CHECK: nop | 35 ; CHECK: nop |
36 ; CHECK-LABEL: bar: | 36 ; CHECK-LABEL: bar: |
37 ; CHECK-NEXT: {{.*}}0:{{.*}}push | 37 ; CHECK-NEXT: {{.*}}0:{{.*}}push |
38 | 38 |
39 %aa.addr = alloca i32, align 4 | 39 %aa.addr = alloca i32, align 4 |
40 %bb.addr = alloca i32, align 4 | 40 %bb.addr = alloca i32, align 4 |
41 store i32 %aa, i32* %aa.addr, align 4 | 41 store i32 %aa, i32* %aa.addr, align 4 |
42 store i32 %bb, i32* %bb.addr, align 4 | 42 store i32 %bb, i32* %bb.addr, align 4 |
43 %0 = load i32* %aa.addr, align 4 | 43 %0 = load i32, i32* %aa.addr, align 4 |
44 %mul = mul nsw i32 %0, 19 | 44 %mul = mul nsw i32 %0, 19 |
45 %call = call i32 @foo(i32 %mul, i32 7) | 45 %call = call i32 @foo(i32 %mul, i32 7) |
46 | 46 |
47 ; Check that the call is padded to be at the end of a bundle | 47 ; Check that the call is padded to be at the end of a bundle |
48 ; CHECK: {{.*}}8:{{.*}}nop | 48 ; CHECK: {{.*}}8:{{.*}}nop |
49 ; CHECK-NEXT: {{.*}}c:{{.*}}bl | 49 ; CHECK-NEXT: {{.*}}c:{{.*}}bl |
50 | 50 |
51 %1 = load i32* %bb.addr, align 4 | 51 %1 = load i32, i32* %bb.addr, align 4 |
52 %mul1 = mul nsw i32 %1, 31 | 52 %mul1 = mul nsw i32 %1, 31 |
53 %2 = load i32* %bb.addr, align 4 | 53 %2 = load i32, i32* %bb.addr, align 4 |
54 %div = sdiv i32 %2, 7 | 54 %div = sdiv i32 %2, 7 |
55 %add = add nsw i32 %div, 191 | 55 %add = add nsw i32 %div, 191 |
56 %call2 = call i32 @foo(i32 %mul1, i32 %add) | 56 %call2 = call i32 @foo(i32 %mul1, i32 %add) |
57 | 57 |
58 ; Check that the call is padded to be at the end of a bundle | 58 ; Check that the call is padded to be at the end of a bundle |
59 ; CHECK: {{.*}}8:{{.*}}nop | 59 ; CHECK: {{.*}}8:{{.*}}nop |
60 ; CHECK-NEXT: {{.*}}c:{{.*}}bl | 60 ; CHECK-NEXT: {{.*}}c:{{.*}}bl |
61 | 61 |
62 %add3 = add nsw i32 %call, %call2 | 62 %add3 = add nsw i32 %call, %call2 |
63 ret i32 %add3 | 63 ret i32 %add3 |
64 } | 64 } |
65 | 65 |
OLD | NEW |