OLD | NEW |
1 ; RUN: pnacl-llc -mtriple=armv7-unknown-nacl -mattr=+neon -filetype=obj %s -o -
\ | 1 ; RUN: pnacl-llc -mtriple=armv7-unknown-nacl -mattr=+neon -filetype=obj %s -o -
\ |
2 ; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s | 2 ; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s |
3 | 3 |
4 define <8 x i8> @vld1i8(i8* %A) nounwind { | 4 define <8 x i8> @vld1i8(i8* %A) nounwind { |
5 %tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A, i32 16) | 5 %tmp1 = call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %A, i32 16) |
6 ; CHECK: bic r0, r0, #3221225472 | 6 ; CHECK: bic r0, r0, #-1073741824 |
7 ; CHECK-NEXT: vld1.8 {{{d[0-9]+}}}, [r0:64] | 7 ; CHECK-NEXT: vld1.8 {{{d[0-9]+}}}, [r0:64] |
8 ret <8 x i8> %tmp1 | 8 ret <8 x i8> %tmp1 |
9 } | 9 } |
10 | 10 |
11 define <4 x i16> @vld1i16(i16* %A) nounwind { | 11 define <4 x i16> @vld1i16(i16* %A) nounwind { |
12 %tmp0 = bitcast i16* %A to i8* | 12 %tmp0 = bitcast i16* %A to i8* |
13 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1) | 13 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1) |
14 ; CHECK: bic r0, r0, #3221225472 | 14 ; CHECK: bic r0, r0, #-1073741824 |
15 ; CHECK-NEXT: vld1.16 {{{d[0-9]+}}}, [r0] | 15 ; CHECK-NEXT: vld1.16 {{{d[0-9]+}}}, [r0] |
16 ret <4 x i16> %tmp1 | 16 ret <4 x i16> %tmp1 |
17 } | 17 } |
18 | 18 |
19 define <2 x i32> @vld1i32(i32* %A) nounwind { | 19 define <2 x i32> @vld1i32(i32* %A) nounwind { |
20 %tmp0 = bitcast i32* %A to i8* | 20 %tmp0 = bitcast i32* %A to i8* |
21 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1) | 21 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1) |
22 ; CHECK: bic r0, r0, #3221225472 | 22 ; CHECK: bic r0, r0, #-1073741824 |
23 ; CHECK-NEXT: vld1.32 {{{d[0-9]+}}}, [r0] | 23 ; CHECK-NEXT: vld1.32 {{{d[0-9]+}}}, [r0] |
24 ret <2 x i32> %tmp1 | 24 ret <2 x i32> %tmp1 |
25 } | 25 } |
26 | 26 |
27 ; Insert useless arguments here just for the sake of moving | 27 ; Insert useless arguments here just for the sake of moving |
28 ; %A further down the rN chain (testing how sandboxing detects | 28 ; %A further down the rN chain (testing how sandboxing detects |
29 ; the correct register and not just the default r0) | 29 ; the correct register and not just the default r0) |
30 define <1 x i64> @vld1i64(i32 %foo, i32 %bar, i32 %baz, | 30 define <1 x i64> @vld1i64(i32 %foo, i32 %bar, i32 %baz, |
31 i64* %A) nounwind { | 31 i64* %A) nounwind { |
32 %tmp0 = bitcast i64* %A to i8* | 32 %tmp0 = bitcast i64* %A to i8* |
33 %tmp1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %tmp0, i32 1) | 33 %tmp1 = call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %tmp0, i32 1) |
34 ; CHECK: bic r3, r3, #3221225472 | 34 ; CHECK: bic r3, r3, #-1073741824 |
35 ; CHECK-NEXT: vld1.64 {{{d[0-9]+}}}, [r3] | 35 ; CHECK-NEXT: vld1.64 {{{d[0-9]+}}}, [r3] |
36 ret <1 x i64> %tmp1 | 36 ret <1 x i64> %tmp1 |
37 } | 37 } |
38 | 38 |
39 define <16 x i8> @vld1Qi8(i8* %A) nounwind { | 39 define <16 x i8> @vld1Qi8(i8* %A) nounwind { |
40 %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8) | 40 %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8) |
41 ; CHECK: bic r0, r0, #3221225472 | 41 ; CHECK: bic r0, r0, #-1073741824 |
42 ; CHECK-NEXT: vld1.8 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:64] | 42 ; CHECK-NEXT: vld1.8 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:64] |
43 ret <16 x i8> %tmp1 | 43 ret <16 x i8> %tmp1 |
44 } | 44 } |
45 | 45 |
46 define <8 x i16> @vld1Qi16(i16* %A) nounwind { | 46 define <8 x i16> @vld1Qi16(i16* %A) nounwind { |
47 %tmp0 = bitcast i16* %A to i8* | 47 %tmp0 = bitcast i16* %A to i8* |
48 %tmp1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %tmp0, i32 32) | 48 %tmp1 = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %tmp0, i32 32) |
49 ; CHECK: bic r0, r0, #3221225472 | 49 ; CHECK: bic r0, r0, #-1073741824 |
50 ; CHECK-NEXT: vld1.16 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128] | 50 ; CHECK-NEXT: vld1.16 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0:128] |
51 ret <8 x i16> %tmp1 | 51 ret <8 x i16> %tmp1 |
52 } | 52 } |
53 | 53 |
54 define <4 x i32> @vld1Qi32(i32* %A) nounwind { | 54 define <4 x i32> @vld1Qi32(i32* %A) nounwind { |
55 %tmp0 = bitcast i32* %A to i8* | 55 %tmp0 = bitcast i32* %A to i8* |
56 %tmp1 = call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %tmp0, i32 1) | 56 %tmp1 = call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %tmp0, i32 1) |
57 ; CHECK: bic r0, r0, #3221225472 | 57 ; CHECK: bic r0, r0, #-1073741824 |
58 ; CHECK-NEXT: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0] | 58 ; CHECK-NEXT: vld1.32 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0] |
59 ret <4 x i32> %tmp1 | 59 ret <4 x i32> %tmp1 |
60 } | 60 } |
61 | 61 |
62 define <2 x i64> @vld1Qi64(i64* %A) nounwind { | 62 define <2 x i64> @vld1Qi64(i64* %A) nounwind { |
63 %tmp0 = bitcast i64* %A to i8* | 63 %tmp0 = bitcast i64* %A to i8* |
64 %tmp1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %tmp0, i32 1) | 64 %tmp1 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %tmp0, i32 1) |
65 ; CHECK: bic r0, r0, #3221225472 | 65 ; CHECK: bic r0, r0, #-1073741824 |
66 ; CHECK-NEXT: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0] | 66 ; CHECK-NEXT: vld1.64 {{{d[0-9]+}}, {{d[0-9]+}}}, [r0] |
67 ret <2 x i64> %tmp1 | 67 ret <2 x i64> %tmp1 |
68 } | 68 } |
69 | 69 |
70 declare <8 x i8> @llvm.arm.neon.vld1.v8i8(i8*, i32) nounwind readonly | 70 declare <8 x i8> @llvm.arm.neon.vld1.v8i8(i8*, i32) nounwind readonly |
71 declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32) nounwind readonly | 71 declare <4 x i16> @llvm.arm.neon.vld1.v4i16(i8*, i32) nounwind readonly |
72 declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32) nounwind readonly | 72 declare <2 x i32> @llvm.arm.neon.vld1.v2i32(i8*, i32) nounwind readonly |
73 declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*, i32) nounwind readonly | 73 declare <2 x float> @llvm.arm.neon.vld1.v2f32(i8*, i32) nounwind readonly |
74 declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*, i32) nounwind readonly | 74 declare <1 x i64> @llvm.arm.neon.vld1.v1i64(i8*, i32) nounwind readonly |
75 | 75 |
76 declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*, i32) nounwind readonly | 76 declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*, i32) nounwind readonly |
77 declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly | 77 declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly |
78 declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32) nounwind readonly | 78 declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32) nounwind readonly |
79 declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly | 79 declare <4 x float> @llvm.arm.neon.vld1.v4f32(i8*, i32) nounwind readonly |
80 declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) nounwind readonly | 80 declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) nounwind readonly |
81 | 81 |
82 define <16 x i8> @vld1Qi8_update(i8** %ptr) nounwind { | 82 define <16 x i8> @vld1Qi8_update(i8** %ptr) nounwind { |
83 %A = load i8** %ptr | 83 %A = load i8*, i8** %ptr |
84 %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8) | 84 %tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8) |
85 ; CHECK: bic r1, r1, #3221225472 | 85 ; CHECK: bic r1, r1, #-1073741824 |
86 ; CHECK-NEXT: vld1.8 {{{d[0-9]+}}, {{d[0-9]+}}}, [r1:64]! | 86 ; CHECK-NEXT: vld1.8 {{{d[0-9]+}}, {{d[0-9]+}}}, [r1:64]! |
87 %tmp2 = getelementptr i8* %A, i32 16 | 87 %tmp2 = getelementptr i8, i8* %A, i32 16 |
88 store i8* %tmp2, i8** %ptr | 88 store i8* %tmp2, i8** %ptr |
89 ret <16 x i8> %tmp1 | 89 ret <16 x i8> %tmp1 |
90 } | 90 } |
91 | 91 |
92 define <4 x i16> @vld1i16_update(i16** %ptr) nounwind { | 92 define <4 x i16> @vld1i16_update(i16** %ptr) nounwind { |
93 %A = load i16** %ptr | 93 %A = load i16*, i16** %ptr |
94 %tmp0 = bitcast i16* %A to i8* | 94 %tmp0 = bitcast i16* %A to i8* |
95 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1) | 95 %tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1) |
96 ; CHECK: bic r1, r1, #3221225472 | 96 ; CHECK: bic r1, r1, #-1073741824 |
97 ; CHECK-NEXT: vld1.16 {{{d[0-9]+}}}, [r1]! | 97 ; CHECK-NEXT: vld1.16 {{{d[0-9]+}}}, [r1]! |
98 %tmp2 = getelementptr i16* %A, i32 4 | 98 %tmp2 = getelementptr i16, i16* %A, i32 4 |
99 store i16* %tmp2, i16** %ptr | 99 store i16* %tmp2, i16** %ptr |
100 ret <4 x i16> %tmp1 | 100 ret <4 x i16> %tmp1 |
101 } | 101 } |
102 | 102 |
103 define <2 x i32> @vld1i32_update(i32** %ptr, i32 %inc) nounwind { | 103 define <2 x i32> @vld1i32_update(i32** %ptr, i32 %inc) nounwind { |
104 %A = load i32** %ptr | 104 %A = load i32*, i32** %ptr |
105 %tmp0 = bitcast i32* %A to i8* | 105 %tmp0 = bitcast i32* %A to i8* |
106 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1) | 106 %tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1) |
107 ; CHECK: bic r2, r2, #3221225472 | 107 ; CHECK: bic r2, r2, #-1073741824 |
108 ; CHECK-NEXT: vld1.32 {{{d[0-9]+}}}, [r2], r1 | 108 ; CHECK-NEXT: vld1.32 {{{d[0-9]+}}}, [r2], r1 |
109 %tmp2 = getelementptr i32* %A, i32 %inc | 109 %tmp2 = getelementptr i32, i32* %A, i32 %inc |
110 store i32* %tmp2, i32** %ptr | 110 store i32* %tmp2, i32** %ptr |
111 ret <2 x i32> %tmp1 | 111 ret <2 x i32> %tmp1 |
112 } | 112 } |
113 | 113 |
OLD | NEW |