Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 ; Show that we know how to translate lsr. | 1 ; Show that we know how to translate lsr. |
| 2 | 2 |
| 3 ; NOTE: We use -O2 to get rid of memory stores. | 3 ; NOTE: We use -O2 to get rid of memory stores. |
| 4 | 4 |
| 5 ; REQUIRES: allow_dump | 5 ; REQUIRES: allow_dump |
| 6 | 6 |
| 7 ; Compile using standalone assembler. | 7 ; Compile using standalone assembler. |
| 8 ; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -O2 \ | 8 ; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -O2 \ |
| 9 ; RUN: | FileCheck %s --check-prefix=ASM | 9 ; RUN: | FileCheck %s --check-prefix=ASM |
| 10 | 10 |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 66 ; ASM-LABEL:LshrVec: | 66 ; ASM-LABEL:LshrVec: |
| 67 ; DIS-LABEL:00000020 <LshrVec>: | 67 ; DIS-LABEL:00000020 <LshrVec>: |
| 68 ; IASM-LABEL:LshrVec: | 68 ; IASM-LABEL:LshrVec: |
| 69 | 69 |
| 70 entry: | 70 entry: |
| 71 ; ASM-NEXT:.LLshrVec$entry: | 71 ; ASM-NEXT:.LLshrVec$entry: |
| 72 ; IASM-NEXT:.LLshrVec$entry: | 72 ; IASM-NEXT:.LLshrVec$entry: |
| 73 | 73 |
| 74 %v = lshr <4 x i32> %a, %b | 74 %v = lshr <4 x i32> %a, %b |
| 75 | 75 |
| 76 ; ASM: lsr r0, r0, r1 | 76 ; ASM: vneg.s32 q1, q1 |
| 77 ; ASM: lsr r0, r0, r1 | 77 ; ASM-NEXT: vshl.u32 q0, q0, q1 |
| 78 ; ASM: lsr r0, r0, r1 | 78 ; DIS: 20: f3b923c2 |
| 79 ; ASM: lsr r0, r0, r1 | 79 ; DIS: 24: f3220440 |
| 80 ; DIS: 28: e1a00130 | 80 ; IASM: .byte 0xc2 |
|
Karl
2016/04/13 16:17:37
Same here (and below).
John
2016/04/15 13:20:57
Done.
| |
| 81 ; IASM-NEXT: .byte 0x23 | |
| 82 ; IASM-NEXT: .byte 0xb9 | |
| 83 ; IASM-NEXT: .byte 0xf3 | |
| 84 ; IASM-NEXT: .byte 0x40 | |
| 85 ; IASM-NEXT: .byte 0x4 | |
| 86 ; IASM-NEXT: .byte 0x22 | |
| 87 ; IASM-NEXT: .byte 0xf3 | |
| 81 | 88 |
| 82 ret <4 x i32> %v | 89 ret <4 x i32> %v |
| 83 } | 90 } |
| 84 | 91 |
| 85 define internal <8 x i16> @LshrVeci16(<8 x i16> %a, <8 x i16> %b) { | 92 define internal <8 x i16> @LshrVeci16(<8 x i16> %a, <8 x i16> %b) { |
| 86 ; ASM-LABEL:LshrVeci16: | 93 ; ASM-LABEL:LshrVeci16: |
| 87 | 94 |
| 88 entry: | 95 entry: |
| 89 | 96 |
| 90 %v = lshr <8 x i16> %a, %b | 97 %v = lshr <8 x i16> %a, %b |
| 91 | 98 |
| 92 ; ASM: lsr r0, r0, r1 | 99 ; ASM: vneg.s16 q1, q1 |
| 93 ; ASM: lsr r0, r0, r1 | 100 ; ASM-NEXT: vshl.u16 q0, q0, q1 |
| 94 ; ASM: lsr r0, r0, r1 | 101 ; DIS: 30: f3b523c2 |
| 95 ; ASM: lsr r0, r0, r1 | 102 ; DIS: 34: f3120440 |
| 96 ; ASM: lsr r0, r0, r1 | 103 ; IASM: .byte 0xc2 |
| 97 ; ASM: lsr r0, r0, r1 | 104 ; IASM-NEXT: .byte 0x23 |
| 98 ; ASM: lsr r0, r0, r1 | 105 ; IASM-NEXT: .byte 0xb5 |
| 99 ; ASM: lsr r0, r0, r1 | 106 ; IASM-NEXT: .byte 0xf3 |
| 107 ; IASM-NEXT: .byte 0x40 | |
| 108 ; IASM-NEXT: .byte 0x4 | |
| 109 ; IASM-NEXT: .byte 0x12 | |
| 110 ; IASM-NEXT: .byte 0xf3 | |
| 100 | 111 |
| 101 ret <8 x i16> %v | 112 ret <8 x i16> %v |
| 102 } | 113 } |
| 103 | 114 |
| 104 define internal <16 x i8> @LshrVeci8(<16 x i8> %a, <16 x i8> %b) { | 115 define internal <16 x i8> @LshrVeci8(<16 x i8> %a, <16 x i8> %b) { |
| 105 ; ASM-LABEL:LshrVeci8: | 116 ; ASM-LABEL:LshrVeci8: |
| 106 | 117 |
| 107 entry: | 118 entry: |
| 108 | 119 |
| 109 %v = lshr <16 x i8> %a, %b | 120 %v = lshr <16 x i8> %a, %b |
| 110 | 121 |
| 111 ; ASM: lsr r0, r0, r1 | 122 ; ASM: vneg.s8 q1, q1 |
| 112 ; ASM: lsr r0, r0, r1 | 123 ; ASM-NEXT: vshl.u8 q0, q0, q1 |
| 113 ; ASM: lsr r0, r0, r1 | 124 ; DIS: 40: f3b123c2 |
| 114 ; ASM: lsr r0, r0, r1 | 125 ; DIS: 44: f3020440 |
| 115 ; ASM: lsr r0, r0, r1 | 126 ; IASM: .byte 0xc2 |
| 116 ; ASM: lsr r0, r0, r1 | 127 ; IASM-NEXT: .byte 0x23 |
| 117 ; ASM: lsr r0, r0, r1 | 128 ; IASM-NEXT: .byte 0xb1 |
| 118 ; ASM: lsr r0, r0, r1 | 129 ; IASM-NEXT: .byte 0xf3 |
| 119 ; ASM: lsr r0, r0, r1 | 130 ; IASM-NEXT: .byte 0x40 |
| 120 ; ASM: lsr r0, r0, r1 | 131 ; IASM-NEXT: .byte 0x4 |
| 121 ; ASM: lsr r0, r0, r1 | 132 ; IASM-NEXT: .byte 0x2 |
| 122 ; ASM: lsr r0, r0, r1 | 133 ; IASM-NEXT: .byte 0xf3 |
| 123 ; ASM: lsr r0, r0, r1 | |
| 124 ; ASM: lsr r0, r0, r1 | |
| 125 ; ASM: lsr r0, r0, r1 | |
| 126 ; ASM: lsr r0, r0, r1 | |
| 127 | 134 |
| 128 ret <16 x i8> %v | 135 ret <16 x i8> %v |
| 129 } | 136 } |
| OLD | NEW |