OLD | NEW |
1 ; Show that we know how to translate lsl. | 1 ; Show that we know how to translate lsl. |
2 | 2 |
3 ; NOTE: We use -O2 to get rid of memory stores. | 3 ; NOTE: We use -O2 to get rid of memory stores. |
4 | 4 |
5 ; REQUIRES: allow_dump | 5 ; REQUIRES: allow_dump |
6 | 6 |
7 ; Compile using standalone assembler. | 7 ; Compile using standalone assembler. |
8 ; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -O2 \ | 8 ; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -O2 \ |
9 ; RUN: | FileCheck %s --check-prefix=ASM | 9 ; RUN: | FileCheck %s --check-prefix=ASM |
10 | 10 |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
54 | 54 |
55 ; ASM-NEXT: lsl r0, r0, r1 | 55 ; ASM-NEXT: lsl r0, r0, r1 |
56 ; DIS-NEXT: 10: e1a00110 | 56 ; DIS-NEXT: 10: e1a00110 |
57 ; IASM-NEXT: .byte 0x10 | 57 ; IASM-NEXT: .byte 0x10 |
58 ; IASM-NEXT: .byte 0x1 | 58 ; IASM-NEXT: .byte 0x1 |
59 ; IASM-NEXT: .byte 0xa0 | 59 ; IASM-NEXT: .byte 0xa0 |
60 ; IASM-NEXT: .byte 0xe1 | 60 ; IASM-NEXT: .byte 0xe1 |
61 | 61 |
62 ret i32 %shl | 62 ret i32 %shl |
63 } | 63 } |
| 64 |
| 65 define internal <4 x i32> @ShlVec(<4 x i32> %a, <4 x i32> %b) { |
| 66 ; ASM-LABEL:ShlVec: |
| 67 ; DIS-LABEL:00000020 <ShlVec>: |
| 68 ; IASM-LABEL:ShlVec: |
| 69 |
| 70 entry: |
| 71 ; ASM-NEXT:.LShlVec$entry: |
| 72 ; IASM-NEXT:.LShlVec$entry: |
| 73 |
| 74 %shl = shl <4 x i32> %a, %b |
| 75 |
| 76 ; ASM: lsl r0, r0, r1 |
| 77 ; ASM: lsl r0, r0, r1 |
| 78 ; ASM: lsl r0, r0, r1 |
| 79 ; ASM: lsl r0, r0, r1 |
| 80 ; DIS: 28: e1a00110 |
| 81 |
| 82 ret <4 x i32> %shl |
| 83 } |
| 84 |
| 85 define internal <8 x i16> @ShlVeci16(<8 x i16> %a, <8 x i16> %b) { |
| 86 ; ASM-LABEL:ShlVeci16: |
| 87 |
| 88 entry: |
| 89 |
| 90 %v = shl <8 x i16> %a, %b |
| 91 |
| 92 ; ASM: lsl r0, r0, r1 |
| 93 ; ASM: lsl r0, r0, r1 |
| 94 ; ASM: lsl r0, r0, r1 |
| 95 ; ASM: lsl r0, r0, r1 |
| 96 ; ASM: lsl r0, r0, r1 |
| 97 ; ASM: lsl r0, r0, r1 |
| 98 ; ASM: lsl r0, r0, r1 |
| 99 ; ASM: lsl r0, r0, r1 |
| 100 |
| 101 ret <8 x i16> %v |
| 102 } |
| 103 |
| 104 define internal <16 x i8> @ShlVeci8(<16 x i8> %a, <16 x i8> %b) { |
| 105 ; ASM-LABEL:ShlVeci8: |
| 106 |
| 107 entry: |
| 108 |
| 109 %v = shl <16 x i8> %a, %b |
| 110 |
| 111 ; ASM: lsl r0, r0, r1 |
| 112 ; ASM: lsl r0, r0, r1 |
| 113 ; ASM: lsl r0, r0, r1 |
| 114 ; ASM: lsl r0, r0, r1 |
| 115 ; ASM: lsl r0, r0, r1 |
| 116 ; ASM: lsl r0, r0, r1 |
| 117 ; ASM: lsl r0, r0, r1 |
| 118 ; ASM: lsl r0, r0, r1 |
| 119 ; ASM: lsl r0, r0, r1 |
| 120 ; ASM: lsl r0, r0, r1 |
| 121 ; ASM: lsl r0, r0, r1 |
| 122 ; ASM: lsl r0, r0, r1 |
| 123 ; ASM: lsl r0, r0, r1 |
| 124 ; ASM: lsl r0, r0, r1 |
| 125 ; ASM: lsl r0, r0, r1 |
| 126 ; ASM: lsl r0, r0, r1 |
| 127 |
| 128 ret <16 x i8> %v |
| 129 } |
OLD | NEW |