| OLD | NEW |
| (Empty) |
| 1 ; Tests basics and corner cases of x86-32 sandboxing, using -Om1 in | |
| 2 ; the hope that the output will remain stable. When packing bundles, | |
| 3 ; we try to limit to a few instructions with well known sizes and | |
| 4 ; minimal use of registers and stack slots in the lowering sequence. | |
| 5 | |
| 6 ; RUN: %p2i -i %s --sandbox --filetype=asm --target=arm32 --assemble \ | |
| 7 ; RUN: --disassemble --args -Om1 -allow-externally-defined-symbols \ | |
| 8 ; RUN: -ffunction-sections | FileCheck %s | |
| 9 | |
| 10 declare void @call_target() | |
| 11 @global_short = internal global [2 x i8] zeroinitializer | |
| 12 | |
| 13 ; A direct call sequence uses the right mask and register-call sequence. | |
| 14 define internal void @test_direct_call() { | |
| 15 entry: | |
| 16 call void @call_target() | |
| 17 ret void | |
| 18 } | |
| 19 ; CHECK-LABEL: test_direct_call | |
| 20 ; CHECK: nop | |
| 21 ; CHECK: c: {{.*}} bl {{.*}} call_target | |
| 22 ; CHECK-NEXT: 10: | |
| 23 | |
| 24 ; An indirect call sequence uses the right mask and register-call sequence. | |
| 25 define internal void @test_indirect_call(i32 %target) { | |
| 26 entry: | |
| 27 %__1 = inttoptr i32 %target to void ()* | |
| 28 call void %__1() | |
| 29 ret void | |
| 30 } | |
| 31 ; CHECK-LABEL: test_indirect_call | |
| 32 ; CHECK: ldr [[REG:.*]], [sp, | |
| 33 ; CHECK-NEXT: nop | |
| 34 ; CHECK-NEXT: nop | |
| 35 ; CHECK: 18: {{.*}} bic [[REG]], [[REG]], {{.*}} 0xc000000f | |
| 36 ; CHECK-NEXT: blx [[REG]] | |
| 37 ; CHECk-NEXT: 20: | |
| 38 | |
| 39 ; A return sequences uses the right pop / mask / jmp sequence. | |
| 40 define internal void @test_ret() { | |
| 41 entry: | |
| 42 ret void | |
| 43 } | |
| 44 ; CHECK-LABEL: test_ret | |
| 45 ; CHECK: 0: {{.*}} bic lr, lr, {{.*}} 0xc000000f | |
| 46 ; CHECK-NEXT: bx lr | |
| 47 | |
| 48 ; Bundle lock without padding. | |
| 49 define internal void @bundle_lock_without_padding() { | |
| 50 entry: | |
| 51 %addr_short = bitcast [2 x i8]* @global_short to i16* | |
| 52 store i16 0, i16* %addr_short, align 1 | |
| 53 ret void | |
| 54 } | |
| 55 ; CHECK-LABEL: bundle_lock_without_padding | |
| 56 ; CHECK: 0: {{.*}} movw | |
| 57 ; CHECK-NEXT: movt | |
| 58 ; CHECK-NEXT: movw | |
| 59 ; CHECK-NEXT: strh | |
| 60 ; CHECK-NEXT: bic lr, lr, {{.*}} 0xc000000f | |
| 61 ; CHECK-NEXT: {{.*}} bx lr | |
| 62 | |
| 63 ; Bundle lock with padding. | |
| 64 define internal void @bundle_lock_with_padding() { | |
| 65 entry: | |
| 66 call void @call_target() | |
| 67 ; bundle boundary | |
| 68 store i16 0, i16* undef, align 1 ; 3 insts | |
| 69 store i16 0, i16* undef, align 1 ; 3 insts | |
| 70 store i16 0, i16* undef, align 1 ; 3 insts | |
| 71 ; SP adjustment + pop | |
| 72 ; nop | |
| 73 ; bundle boundary | |
| 74 ret void | |
| 75 } | |
| 76 ; CHECK-LABEL: bundle_lock_with_padding | |
| 77 ; CHECK: 38: {{.*}} pop | |
| 78 ; CHECK-NEXT: nop | |
| 79 ; CHECK-NEXT: bic lr, lr, {{.*}} 0xc000000f | |
| 80 ; CHECK-NEXT: {{.*}} bx lr | |
| 81 | |
| 82 ; Bundle lock align_to_end without any padding. | |
| 83 define internal void @bundle_lock_align_to_end_padding_0() { | |
| 84 entry: | |
| 85 call void @call_target() | |
| 86 ; bundle boundary | |
| 87 store i16 0, i16* undef, align 1 | |
| 88 call void @call_target() | |
| 89 ; bundle boundary | |
| 90 ret void | |
| 91 } | |
| 92 ; CHECK-LABEL: bundle_lock_align_to_end_padding_0 | |
| 93 ; CHECK: c: {{.*}} bl {{.*}} call_target | |
| 94 ; CHECK-NEXT: movw | |
| 95 ; CHECK-NEXT: movw | |
| 96 ; CHECK-NEXT: strh | |
| 97 ; CHECK-NEXT: bl {{.*}} call_target | |
| 98 ; CHECK-NEXT: add | |
| 99 ; CHECK-NEXT: pop | |
| 100 ; CHECK-NEXT: bic lr, lr, {{.*}} 0xc000000f | |
| 101 ; CHECK-NEXT: {{.*}} bx lr | |
| 102 | |
| 103 ; Bundle lock align_to_end with one bunch of padding. | |
| 104 define internal void @bundle_lock_align_to_end_padding_1() { | |
| 105 entry: | |
| 106 call void @call_target() | |
| 107 ; bundle boundary | |
| 108 store i16 0, i16* undef, align 1 | |
| 109 store i16 0, i16* undef, align 1 | |
| 110 ; bundle boundary | |
| 111 call void @call_target() | |
| 112 ; bundle boundary | |
| 113 ret void | |
| 114 } | |
| 115 ; CHECK-LABEL: bundle_lock_align_to_end_padding_1 | |
| 116 ; CHECK: c: {{.*}} bl {{.*}} call_target | |
| 117 ; CHECK-NEXT: movw | |
| 118 ; CHECK-NEXT: movw | |
| 119 ; CHECK-NEXT: strh | |
| 120 ; CHECK-NEXT: movw | |
| 121 ; CHECK-NEXT: movw | |
| 122 ; CHECK-NEXT: strh | |
| 123 ; CHECK-NEXT: nop | |
| 124 ; CHECK-NEXT: bl {{.*}} call_target | |
| 125 ; CHECK-NEXT: add | |
| 126 ; CHECK-NEXT: pop | |
| 127 ; CHECK-NEXT: bic lr, lr, {{.*}} 0xc000000f | |
| 128 ; CHECK-NEXT: {{.*}} bx lr | |
| 129 | |
| 130 ; Bundle lock align_to_end with two bunches of padding. | |
| 131 define internal void @bundle_lock_align_to_end_padding_2(i32 %target) { | |
| 132 entry: | |
| 133 call void @call_target() | |
| 134 ; bundle boundary | |
| 135 %__1 = inttoptr i32 %target to void ()* | |
| 136 store i8 0, i8* undef, align 1 | |
| 137 call void %__1() | |
| 138 ret void | |
| 139 } | |
| 140 ; CHECK-LABEL: bundle_lock_align_to_end_padding_2 | |
| 141 ; CHECK: c: {{.*}} bl {{.*}} call_target | |
| 142 ; CHECK-NEXT: movw | |
| 143 ; CHECK-NEXT: movw | |
| 144 ; CHECK-NEXT: strb | |
| 145 ; CHECK: 20: {{.*}} nop | |
| 146 ; CHECK-NEXT: nop | |
| 147 ; CHECK-NEXT: bic [[REG:r[0-9]+]], [[REG]], {{.*}} 0xc000000f | |
| 148 ; CHECK-NEXT: {{.*}} blx [[REG]] | |
| 149 | |
| OLD | NEW |