Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 ; Show that we know how to translate asr | 1 ; Show that we know how to translate asr |
|
Jim Stichnoth
2016/02/17 20:59:51
end sentence with period. (for consistency with ot
Eric Holk
2016/02/17 22:31:56
Done.
| |
| 2 | 2 |
| 3 ; NOTE: We use -O2 to get rid of memory stores. | 3 ; NOTE: We use -O2 to get rid of memory stores. |
| 4 | 4 |
| 5 ; REQUIRES: allow_dump | 5 ; REQUIRES: allow_dump |
| 6 | 6 |
| 7 ; Compile using standalone assembler. | 7 ; Compile using standalone assembler. |
| 8 ; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -O2 \ | 8 ; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -O2 \ |
| 9 ; RUN: | FileCheck %s --check-prefix=ASM | 9 ; RUN: | FileCheck %s --check-prefix=ASM |
| 10 | 10 |
| 11 ; Show bytes in assembled standalone code. | 11 ; Show bytes in assembled standalone code. |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 54 | 54 |
| 55 ; ASM-NEXT: asr r0, r0, r1 | 55 ; ASM-NEXT: asr r0, r0, r1 |
| 56 ; DIS-NEXT: 10: e1a00150 | 56 ; DIS-NEXT: 10: e1a00150 |
| 57 ; IASM-NEXT: .byte 0x50 | 57 ; IASM-NEXT: .byte 0x50 |
| 58 ; IASM-NEXT: .byte 0x1 | 58 ; IASM-NEXT: .byte 0x1 |
| 59 ; IASM-NEXT: .byte 0xa0 | 59 ; IASM-NEXT: .byte 0xa0 |
| 60 ; IASM-NEXT: .byte 0xe1 | 60 ; IASM-NEXT: .byte 0xe1 |
| 61 | 61 |
| 62 ret i32 %v | 62 ret i32 %v |
| 63 } | 63 } |
| 64 | |
| 65 define internal <4 x i32> @AshrVeci32(<4 x i32> %a, <4 x i32> %b) { | |
| 66 ; ASM-LABEL:AshrVeci32: | |
| 67 ; DIS-LABEL:00000020 <AshrVeci32>: | |
| 68 ; IASM-LABEL:AshrVeci32: | |
| 69 | |
| 70 entry: | |
| 71 | |
| 72 %v = ashr <4 x i32> %a, %b | |
| 73 | |
| 74 ; ASM: asr r0, r0, r1 | |
| 75 ; ASM: asr r0, r0, r1 | |
| 76 ; ASM: asr r0, r0, r1 | |
| 77 ; ASM: asr r0, r0, r1 | |
| 78 ; DIS: 28: e1a00150 | |
| 79 ; DIS: 38: e1a00150 | |
| 80 ; DIS: 48: e1a00150 | |
| 81 ; DIS: 58: e1a00150 | |
| 82 | |
| 83 ret <4 x i32> %v | |
| 84 } | |
| 85 | |
| 86 define internal <8 x i16> @AshrVeci16(<8 x i16> %a, <8 x i16> %b) { | |
| 87 ; ASM-LABEL:AshrVeci16: | |
| 88 | |
| 89 entry: | |
| 90 | |
| 91 %v = ashr <8 x i16> %a, %b | |
| 92 | |
| 93 ; ASM: asr r0, r0, r1 | |
| 94 ; ASM: asr r0, r0, r1 | |
| 95 ; ASM: asr r0, r0, r1 | |
| 96 ; ASM: asr r0, r0, r1 | |
| 97 ; ASM: asr r0, r0, r1 | |
| 98 ; ASM: asr r0, r0, r1 | |
| 99 ; ASM: asr r0, r0, r1 | |
| 100 ; ASM: asr r0, r0, r1 | |
| 101 | |
| 102 ret <8 x i16> %v | |
| 103 } | |
| 104 | |
| 105 define internal <16 x i8> @AshrVeci8(<16 x i8> %a, <16 x i8> %b) { | |
| 106 ; ASM-LABEL:AshrVeci8: | |
| 107 | |
| 108 entry: | |
| 109 | |
| 110 %v = ashr <16 x i8> %a, %b | |
| 111 | |
| 112 ; ASM: asr r0, r0, r1 | |
| 113 ; ASM: asr r0, r0, r1 | |
| 114 ; ASM: asr r0, r0, r1 | |
| 115 ; ASM: asr r0, r0, r1 | |
| 116 ; ASM: asr r0, r0, r1 | |
| 117 ; ASM: asr r0, r0, r1 | |
| 118 ; ASM: asr r0, r0, r1 | |
| 119 ; ASM: asr r0, r0, r1 | |
| 120 ; ASM: asr r0, r0, r1 | |
| 121 ; ASM: asr r0, r0, r1 | |
| 122 ; ASM: asr r0, r0, r1 | |
| 123 ; ASM: asr r0, r0, r1 | |
| 124 ; ASM: asr r0, r0, r1 | |
| 125 ; ASM: asr r0, r0, r1 | |
| 126 ; ASM: asr r0, r0, r1 | |
| 127 ; ASM: asr r0, r0, r1 | |
| 128 | |
| 129 ret <16 x i8> %v | |
| 130 } | |
| OLD | NEW |