| OLD | NEW |
| 1 ; RUN: pnacl-llc -mtriple=armv7-unknown-nacl -mattr=+neon -filetype=obj %s -o -
\ | 1 ; RUN: pnacl-llc -mtriple=armv7-unknown-nacl -mattr=+neon -filetype=obj %s -o -
\ |
| 2 ; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s | 2 ; RUN: | llvm-objdump -disassemble -triple armv7 - | FileCheck %s |
| 3 | 3 |
| 4 %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } | 4 %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } |
| 5 %struct.__neon_int4x16x2_t = type { <4 x i16>, <4 x i16> } | 5 %struct.__neon_int4x16x2_t = type { <4 x i16>, <4 x i16> } |
| 6 %struct.__neon_int2x32x2_t = type { <2 x i32>, <2 x i32> } | 6 %struct.__neon_int2x32x2_t = type { <2 x i32>, <2 x i32> } |
| 7 | 7 |
| 8 declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8
x i8>, i32, i32) nounwind readonly | 8 declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8
x i8>, i32, i32) nounwind readonly |
| 9 declare %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>,
<4 x i16>, i32, i32) nounwind readonly | 9 declare %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>,
<4 x i16>, i32, i32) nounwind readonly |
| 10 declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>,
<2 x i32>, i32, i32) nounwind readonly | 10 declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>,
<2 x i32>, i32, i32) nounwind readonly |
| 11 | 11 |
| 12 %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } | 12 %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } |
| 13 %struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> } | 13 %struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> } |
| 14 | 14 |
| 15 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8
x i8>, <8 x i8>, i32, i32) nounwind readonly | 15 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8
x i8>, <8 x i8>, i32, i32) nounwind readonly |
| 16 declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>,
<4 x i16>, <4 x i16>, i32, i32) nounwind readonly | 16 declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>,
<4 x i16>, <4 x i16>, i32, i32) nounwind readonly |
| 17 | 17 |
| 18 %struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } | 18 %struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } |
| 19 %struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } | 19 %struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } |
| 20 | 20 |
| 21 declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>,
<4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly | 21 declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>,
<4 x i16>, <4 x i16>, <4 x i16>, i32, i32) nounwind readonly |
| 22 declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>,
<2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly | 22 declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>,
<2 x i32>, <2 x i32>, <2 x i32>, i32, i32) nounwind readonly |
| 23 | 23 |
| 24 define <8 x i8> @vld1dupi8(i32 %foo, i32 %bar, | 24 define <8 x i8> @vld1dupi8(i32 %foo, i32 %bar, |
| 25 i8* %A) nounwind { | 25 i8* %A) nounwind { |
| 26 %tmp1 = load i8* %A, align 8 | 26 %tmp1 = load i8, i8* %A, align 8 |
| 27 %tmp2 = insertelement <8 x i8> undef, i8 %tmp1, i32 0 | 27 %tmp2 = insertelement <8 x i8> undef, i8 %tmp1, i32 0 |
| 28 %tmp3 = shufflevector <8 x i8> %tmp2, <8 x i8> undef, <8 x i32> zeroinitialize
r | 28 %tmp3 = shufflevector <8 x i8> %tmp2, <8 x i8> undef, <8 x i32> zeroinitialize
r |
| 29 ; CHECK: bic r2, r2, #3221225472 | 29 ; CHECK: bic r2, r2, #-1073741824 |
| 30 ; CHECK-NEXT: vld1.8 {{{d[0-9]+\[\]}}}, [r2] | 30 ; CHECK-NEXT: vld1.8 {{{d[0-9]+\[\]}}}, [r2] |
| 31 ret <8 x i8> %tmp3 | 31 ret <8 x i8> %tmp3 |
| 32 } | 32 } |
| 33 | 33 |
| 34 define <4 x i16> @vld1dupi16(i16* %A) nounwind { | 34 define <4 x i16> @vld1dupi16(i16* %A) nounwind { |
| 35 %tmp1 = load i16* %A, align 8 | 35 %tmp1 = load i16, i16* %A, align 8 |
| 36 %tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0 | 36 %tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0 |
| 37 %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 37 %tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 38 ; CHECK: bic r0, r0, #3221225472 | 38 ; CHECK: bic r0, r0, #-1073741824 |
| 39 ; CHECK-NEXT: vld1.16 {{{d[0-9]+\[\]}}}, [r0:16] | 39 ; CHECK-NEXT: vld1.16 {{{d[0-9]+\[\]}}}, [r0:16] |
| 40 ret <4 x i16> %tmp3 | 40 ret <4 x i16> %tmp3 |
| 41 } | 41 } |
| 42 | 42 |
| 43 define <2 x i32> @vld1dupi32(i32* %A) nounwind { | 43 define <2 x i32> @vld1dupi32(i32* %A) nounwind { |
| 44 %tmp1 = load i32* %A, align 8 | 44 %tmp1 = load i32, i32* %A, align 8 |
| 45 %tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0 | 45 %tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0 |
| 46 %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitiali
zer | 46 %tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitiali
zer |
| 47 ; CHECK: bic r0, r0, #3221225472 | 47 ; CHECK: bic r0, r0, #-1073741824 |
| 48 ; CHECK-NEXT: vld1.32 {{{d[0-9]+\[\]}}}, [r0:32] | 48 ; CHECK-NEXT: vld1.32 {{{d[0-9]+\[\]}}}, [r0:32] |
| 49 ret <2 x i32> %tmp3 | 49 ret <2 x i32> %tmp3 |
| 50 } | 50 } |
| 51 | 51 |
| 52 define <16 x i8> @vld1dupQi8(i8* %A) nounwind { | 52 define <16 x i8> @vld1dupQi8(i8* %A) nounwind { |
| 53 %tmp1 = load i8* %A, align 8 | 53 %tmp1 = load i8, i8* %A, align 8 |
| 54 %tmp2 = insertelement <16 x i8> undef, i8 %tmp1, i32 0 | 54 %tmp2 = insertelement <16 x i8> undef, i8 %tmp1, i32 0 |
| 55 %tmp3 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <16 x i32> zeroinitial
izer | 55 %tmp3 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <16 x i32> zeroinitial
izer |
| 56 ; CHECK: bic r0, r0, #3221225472 | 56 ; CHECK: bic r0, r0, #-1073741824 |
| 57 ; CHECK-NEXT: vld1.8 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r0] | 57 ; CHECK-NEXT: vld1.8 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r0] |
| 58 ret <16 x i8> %tmp3 | 58 ret <16 x i8> %tmp3 |
| 59 } | 59 } |
| 60 | 60 |
| 61 define <8 x i8> @vld2dupi8(i8* %A) nounwind { | 61 define <8 x i8> @vld2dupi8(i8* %A) nounwind { |
| 62 %tmp0 = tail call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %
A, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1) | 62 %tmp0 = tail call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %
A, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1) |
| 63 ; CHECK: bic r0, r0, #3221225472 | 63 ; CHECK: bic r0, r0, #-1073741824 |
| 64 ; CHECK-NEXT: vld2.8 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r0] | 64 ; CHECK-NEXT: vld2.8 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r0] |
| 65 %tmp1 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 0 | 65 %tmp1 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 0 |
| 66 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitialize
r | 66 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitialize
r |
| 67 %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 1 | 67 %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 1 |
| 68 %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitialize
r | 68 %tmp4 = shufflevector <8 x i8> %tmp3, <8 x i8> undef, <8 x i32> zeroinitialize
r |
| 69 %tmp5 = add <8 x i8> %tmp2, %tmp4 | 69 %tmp5 = add <8 x i8> %tmp2, %tmp4 |
| 70 ret <8 x i8> %tmp5 | 70 ret <8 x i8> %tmp5 |
| 71 } | 71 } |
| 72 | 72 |
| 73 define <4 x i16> @vld2dupi16(i8* %A) nounwind { | 73 define <4 x i16> @vld2dupi16(i8* %A) nounwind { |
| 74 %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8*
%A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) | 74 %tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16(i8*
%A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2) |
| 75 ; CHECK: bic r0, r0, #3221225472 | 75 ; CHECK: bic r0, r0, #-1073741824 |
| 76 ; CHECK-NEXT: vld2.16 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r0] | 76 ; CHECK-NEXT: vld2.16 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r0] |
| 77 %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0 | 77 %tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0 |
| 78 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 78 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 79 %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1 | 79 %tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1 |
| 80 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 80 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 81 %tmp5 = add <4 x i16> %tmp2, %tmp4 | 81 %tmp5 = add <4 x i16> %tmp2, %tmp4 |
| 82 ret <4 x i16> %tmp5 | 82 ret <4 x i16> %tmp5 |
| 83 } | 83 } |
| 84 | 84 |
| 85 define <2 x i32> @vld2dupi32(i8* %A) nounwind { | 85 define <2 x i32> @vld2dupi32(i8* %A) nounwind { |
| 86 %tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8*
%A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16) | 86 %tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32(i8*
%A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16) |
| 87 ; CHECK: bic r0, r0, #3221225472 | 87 ; CHECK: bic r0, r0, #-1073741824 |
| 88 ; CHECK-NEXT: vld2.32 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r0:64] | 88 ; CHECK-NEXT: vld2.32 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r0:64] |
| 89 %tmp1 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 0 | 89 %tmp1 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 0 |
| 90 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitiali
zer | 90 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitiali
zer |
| 91 %tmp3 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 1 | 91 %tmp3 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 1 |
| 92 %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitiali
zer | 92 %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitiali
zer |
| 93 %tmp5 = add <2 x i32> %tmp2, %tmp4 | 93 %tmp5 = add <2 x i32> %tmp2, %tmp4 |
| 94 ret <2 x i32> %tmp5 | 94 ret <2 x i32> %tmp5 |
| 95 } | 95 } |
| 96 | 96 |
| 97 define <4 x i16> @vld3dupi16(i8* %A) nounwind { | 97 define <4 x i16> @vld3dupi16(i8* %A) nounwind { |
| 98 %tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*
%A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8) | 98 %tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*
%A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8) |
| 99 ; CHECK: bic r0, r0, #3221225472 | 99 ; CHECK: bic r0, r0, #-1073741824 |
| 100 ; CHECK-NEXT: vld3.16 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r
0] | 100 ; CHECK-NEXT: vld3.16 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}}, [r
0] |
| 101 %tmp1 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 0 | 101 %tmp1 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 0 |
| 102 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 102 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 103 %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 1 | 103 %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 1 |
| 104 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 104 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 105 %tmp5 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 2 | 105 %tmp5 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 2 |
| 106 %tmp6 = shufflevector <4 x i16> %tmp5, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 106 %tmp6 = shufflevector <4 x i16> %tmp5, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 107 %tmp7 = add <4 x i16> %tmp2, %tmp4 | 107 %tmp7 = add <4 x i16> %tmp2, %tmp4 |
| 108 %tmp8 = add <4 x i16> %tmp7, %tmp6 | 108 %tmp8 = add <4 x i16> %tmp7, %tmp6 |
| 109 ret <4 x i16> %tmp8 | 109 ret <4 x i16> %tmp8 |
| 110 } | 110 } |
| 111 | 111 |
| 112 define <2 x i32> @vld4dupi32(i8* %A) nounwind { | 112 define <2 x i32> @vld4dupi32(i8* %A) nounwind { |
| 113 %tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*
%A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0,
i32 8) | 113 %tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*
%A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0,
i32 8) |
| 114 ; CHECK: bic r0, r0, #3221225472 | 114 ; CHECK: bic r0, r0, #-1073741824 |
| 115 ; CHECK-NEXT: vld4.32 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d
[0-9]+\[\]}}}, [r0:64] | 115 ; CHECK-NEXT: vld4.32 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d
[0-9]+\[\]}}}, [r0:64] |
| 116 %tmp1 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 0 | 116 %tmp1 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 0 |
| 117 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitiali
zer | 117 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitiali
zer |
| 118 %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 1 | 118 %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 1 |
| 119 %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitiali
zer | 119 %tmp4 = shufflevector <2 x i32> %tmp3, <2 x i32> undef, <2 x i32> zeroinitiali
zer |
| 120 %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 2 | 120 %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 2 |
| 121 %tmp6 = shufflevector <2 x i32> %tmp5, <2 x i32> undef, <2 x i32> zeroinitiali
zer | 121 %tmp6 = shufflevector <2 x i32> %tmp5, <2 x i32> undef, <2 x i32> zeroinitiali
zer |
| 122 %tmp7 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 3 | 122 %tmp7 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 3 |
| 123 %tmp8 = shufflevector <2 x i32> %tmp7, <2 x i32> undef, <2 x i32> zeroinitiali
zer | 123 %tmp8 = shufflevector <2 x i32> %tmp7, <2 x i32> undef, <2 x i32> zeroinitiali
zer |
| 124 %tmp9 = add <2 x i32> %tmp2, %tmp4 | 124 %tmp9 = add <2 x i32> %tmp2, %tmp4 |
| 125 %tmp10 = add <2 x i32> %tmp6, %tmp8 | 125 %tmp10 = add <2 x i32> %tmp6, %tmp8 |
| 126 %tmp11 = add <2 x i32> %tmp9, %tmp10 | 126 %tmp11 = add <2 x i32> %tmp9, %tmp10 |
| 127 ret <2 x i32> %tmp11 | 127 ret <2 x i32> %tmp11 |
| 128 } | 128 } |
| 129 | 129 |
| 130 ;Check for a post-increment updating load. | 130 ;Check for a post-increment updating load. |
| 131 define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind { | 131 define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind { |
| 132 %A = load i16** %ptr | 132 %A = load i16*, i16** %ptr |
| 133 %A2 = bitcast i16* %A to i8* | 133 %A2 = bitcast i16* %A to i8* |
| 134 %tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*
%A2, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0,
i32 1) | 134 %tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*
%A2, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0,
i32 1) |
| 135 ; CHECK: bic r1, r1, #3221225472 | 135 ; CHECK: bic r1, r1, #-1073741824 |
| 136 ; CHECK-NEXT: vld4.16 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d
[0-9]+\[\]}}}, [r1]! | 136 ; CHECK-NEXT: vld4.16 {{{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d[0-9]+\[\]}}, {{d
[0-9]+\[\]}}}, [r1]! |
| 137 %tmp1 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 0 | 137 %tmp1 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 0 |
| 138 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 138 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 139 %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 1 | 139 %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 1 |
| 140 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 140 %tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 141 %tmp5 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 2 | 141 %tmp5 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 2 |
| 142 %tmp6 = shufflevector <4 x i16> %tmp5, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 142 %tmp6 = shufflevector <4 x i16> %tmp5, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 143 %tmp7 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 3 | 143 %tmp7 = extractvalue %struct.__neon_int16x4x4_t %tmp0, 3 |
| 144 %tmp8 = shufflevector <4 x i16> %tmp7, <4 x i16> undef, <4 x i32> zeroinitiali
zer | 144 %tmp8 = shufflevector <4 x i16> %tmp7, <4 x i16> undef, <4 x i32> zeroinitiali
zer |
| 145 %tmp9 = add <4 x i16> %tmp2, %tmp4 | 145 %tmp9 = add <4 x i16> %tmp2, %tmp4 |
| 146 %tmp10 = add <4 x i16> %tmp6, %tmp8 | 146 %tmp10 = add <4 x i16> %tmp6, %tmp8 |
| 147 %tmp11 = add <4 x i16> %tmp9, %tmp10 | 147 %tmp11 = add <4 x i16> %tmp9, %tmp10 |
| 148 %tmp12 = getelementptr i16* %A, i32 4 | 148 %tmp12 = getelementptr i16, i16* %A, i32 4 |
| 149 store i16* %tmp12, i16** %ptr | 149 store i16* %tmp12, i16** %ptr |
| 150 ret <4 x i16> %tmp11 | 150 ret <4 x i16> %tmp11 |
| 151 } | 151 } |
| OLD | NEW |