| OLD | NEW |
| (Empty) | |
| 1 ; Tests basics and corner cases of arm32 sandboxing, using -Om1 in the hope that |
| 2 ; the output will remain stable. When packing bundles, we try to limit to a few |
| 3 ; instructions with well known sizes and minimal use of registers and stack |
| 4 ; slots in the lowering sequence. |
| 5 |
| 6 ; REQUIRES: allow_dump, target_arm32 |
| 7 ; RUN: %p2i -i %s --sandbox --filetype=asm --target=arm32 --assemble \ |
| 8 ; RUN: --disassemble --args -Om1 -allow-externally-defined-symbols \ |
| 9 ; RUN: -ffunction-sections | FileCheck %s |
| 10 |
| 11 declare void @call_target() |
| 12 declare void @call_target1(i32 %arg) |
| 13 @global_short = internal global [2 x i8] zeroinitializer |
| 14 |
| 15 ; A direct call sequence uses the right mask and register-call sequence. |
| 16 define internal void @test_direct_call() { |
| 17 entry: |
| 18 call void @call_target() |
| 19 ret void |
| 20 } |
| 21 ; CHECK-LABEL: test_direct_call |
| 22 ; CHECK: sub sp, |
| 23 ; CHECK-NEXT: bic sp, sp, {{.*}} ; 0xc0000000 |
| 24 ; CHECK: {{[0-9]*}}c: {{.*}} bl {{.*}} call_target |
| 25 ; CHECK-NEXT: {{[0-9]*}}0: |
| 26 |
| 27 ; An indirect call sequence uses the right mask and register-call sequence. |
| 28 define internal void @test_indirect_call(i32 %target) { |
| 29 entry: |
| 30 %__1 = inttoptr i32 %target to void ()* |
| 31 call void %__1() |
| 32 ret void |
| 33 } |
| 34 ; CHECK-LABEL: test_indirect_call |
| 35 ; CHECK: sub sp, |
| 36 ; CHECK: bic sp, sp, {{.*}} ; 0xc0000000 |
| 37 ; CHECK-NOT: bic sp, sp, {{.*}} ; 0xc0000000 |
| 38 ; CHECK: ldr [[REG:r[0-9]+]], [sp, |
| 39 ; CHECK-NEXT: nop |
| 40 ; CHECK: {{[0-9]+}}8: {{.*}} bic [[REG:r[0-9]+]], [[REG]], {{.*}} 0xc000000f |
| 41 ; CHECK-NEXT: blx [[REG]] |
| 42 ; CHECk-NEXT: {{[0-9]+}}0: |
| 43 |
| 44 ; A return sequences uses the right pop / mask / jmp sequence. |
| 45 define internal void @test_ret() { |
| 46 entry: |
| 47 ret void |
| 48 } |
| 49 ; CHECK-LABEL: test_ret |
| 50 ; CHECK: 0: {{.*}} bic lr, lr, {{.*}} 0xc000000f |
| 51 ; CHECK-NEXT: bx lr |
| 52 |
| 53 ; Bundle lock without padding. |
| 54 define internal void @bundle_lock_without_padding() { |
| 55 entry: |
| 56 %addr_short = bitcast [2 x i8]* @global_short to i16* |
| 57 store i16 0, i16* %addr_short, align 1 |
| 58 ret void |
| 59 } |
| 60 ; CHECK-LABEL: bundle_lock_without_padding |
| 61 ; CHECK: 0: {{.*}} movw |
| 62 ; CHECK-NEXT: movt |
| 63 ; CHECK-NEXT: movw |
| 64 ; CHECK-NEXT: nop |
| 65 ; CHECK-NEXT: bic [[REG:r[0-9]+]], {{.*}} 0xc0000000 |
| 66 ; CHECK-NEXT: strh {{.*}}, {{[[]}}[[REG]] |
| 67 ; CHECK-NEXT: bic lr, lr, {{.*}} ; 0xc000000f |
| 68 ; CHECK-NEXT: {{.*}} bx lr |
| 69 |
| 70 ; Bundle lock with padding. |
| 71 define internal void @bundle_lock_with_padding() { |
| 72 entry: |
| 73 call void @call_target() |
| 74 ; bundle boundary |
| 75 store i16 0, i16* undef, align 1 ; 3 insts |
| 76 store i16 0, i16* undef, align 1 ; 3 insts |
| 77 store i16 0, i16* undef, align 1 ; 3 insts |
| 78 ; SP adjustment + pop |
| 79 ; nop |
| 80 ; bundle boundary |
| 81 ret void |
| 82 } |
| 83 ; CHECK-LABEL: bundle_lock_with_padding |
| 84 ; CHECK: 48: {{.*}} pop |
| 85 ; CHECK-NEXT: nop |
| 86 ; CHECK-NEXT: bic lr, {{.*}} 0xc000000f |
| 87 ; CHECK-NEXT: {{.*}} bx lr |
| 88 |
| 89 ; Bundle lock align_to_end without any padding. |
| 90 define internal void @bundle_lock_align_to_end_padding_0() { |
| 91 entry: |
| 92 call void @call_target() |
| 93 ; bundle boundary |
| 94 store i16 0, i16* undef, align 1 |
| 95 call void @call_target() |
| 96 ; bundle boundary |
| 97 ret void |
| 98 } |
| 99 ; CHECK-LABEL: bundle_lock_align_to_end_padding_0 |
| 100 ; CHECK: c: {{.*}} bl {{.*}} call_target |
| 101 ; CHECK-NEXT: movw |
| 102 ; CHECK-NEXT: movw |
| 103 ; CHECK-NEXT: bic [[REG:r[0-9]+]] |
| 104 ; CHECK-NEXT: strh {{.*}}, {{[[]}}[[REG]] |
| 105 ; CHECK: {{[0-9]+}}c: {{.*}} bl {{.*}} call_target |
| 106 ; CHECK-NEXT: add sp |
| 107 ; CHECK-NEXT: bic sp, {{.*}} 0xc0000000 |
| 108 ; CHECK-NEXT: pop |
| 109 ; CHECK: {{[0-9]+}}0: {{.*}} bic lr, lr, {{.*}} 0xc000000f |
| 110 ; CHECK-NEXT: {{.*}} bx lr |
| 111 |
| 112 ; Bundle lock align_to_end with one bunch of padding. |
| 113 define internal void @bundle_lock_align_to_end_padding_1() { |
| 114 entry: |
| 115 call void @call_target() |
| 116 ; bundle boundary |
| 117 store i32 65536, i32* undef, align 1 |
| 118 ; bundle boundary |
| 119 call void @call_target() |
| 120 ; bundle boundary |
| 121 ret void |
| 122 } |
| 123 ; CHECK-LABEL: bundle_lock_align_to_end_padding_1 |
| 124 ; CHECK: {{[0-9]*}}c: {{.*}} bl {{.*}} call_target |
| 125 ; CHECK-NEXT: movw [[BASE:r[0-9]+]] |
| 126 ; CHECK-NEXT: movw [[REG:r[0-9]+]], #0 |
| 127 ; CHECK-NEXT: movt [[REG]], #1 |
| 128 ; CHECK-NEXT: nop |
| 129 ; CHECK-NEXT: bic [[BASE]], [[BASE]], {{.*}} 0xc0000000 |
| 130 ; CHECK-NEXT: str [[REG]], {{[[]}}[[BASE]] |
| 131 ; CHECK-NEXT: nop |
| 132 ; CHECK-NEXT: bl {{.*}} call_target |
| 133 ; CHECK: {{[0-9]+}}0: {{.*}} bic lr, lr, {{.*}} 0xc000000f |
| 134 ; CHECK-NEXT: {{.*}} bx lr |
| 135 |
| 136 ; Bundle lock align_to_end with two bunches of padding. |
| 137 define internal void @bundle_lock_align_to_end_padding_2(i32 %target) { |
| 138 entry: |
| 139 call void @call_target1(i32 1) |
| 140 ; bundle boundary |
| 141 %__1 = inttoptr i32 %target to void (i32, i32, i32)* |
| 142 call void %__1(i32 2, i32 3, i32 4) |
| 143 ret void |
| 144 } |
| 145 ; CHECK-LABEL: bundle_lock_align_to_end_padding_2 |
| 146 ; CHECK: {{[0-9]+}}0: |
| 147 ; CHECK-NEXT: nop |
| 148 ; CHECK-NEXT: nop |
| 149 ; CHECK-NEXT: bl {{.*}} call_target |
| 150 ; CHECK: {{[0-9]+}}c: {{.*}} movw r2, #4 |
| 151 ; CHECK-NEXT: nop |
| 152 ; CHECK-NEXT: nop |
| 153 ; CHECK-NEXT: bic [[REG:r[0-9]+]], [[REG]], {{.*}} 0xc000000f |
| 154 ; CHECK-NEXT: {{.*}} blx [[REG]] |
| OLD | NEW |