| OLD | NEW |
| 1 ; Tests basics and corner cases of x86-32 sandboxing, using -Om1 in | 1 ; Tests basics and corner cases of x86-32 sandboxing, using -Om1 in |
| 2 ; the hope that the output will remain stable. When packing bundles, | 2 ; the hope that the output will remain stable. When packing bundles, |
| 3 ; we try to limit to a few instructions with well known sizes and | 3 ; we try to limit to a few instructions with well known sizes and |
| 4 ; minimal use of registers and stack slots in the lowering sequence. | 4 ; minimal use of registers and stack slots in the lowering sequence. |
| 5 | 5 |
| 6 ; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \ | 6 ; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \ |
| 7 ; RUN: -allow-externally-defined-symbols \ |
| 7 ; RUN: -ffunction-sections -sandbox | FileCheck %s | 8 ; RUN: -ffunction-sections -sandbox | FileCheck %s |
| 8 | 9 |
| 9 declare void @call_target() | 10 declare void @call_target() |
| 10 @global_byte = internal global [1 x i8] zeroinitializer | 11 @global_byte = internal global [1 x i8] zeroinitializer |
| 11 @global_short = internal global [2 x i8] zeroinitializer | 12 @global_short = internal global [2 x i8] zeroinitializer |
| 12 @global_int = internal global [4 x i8] zeroinitializer | 13 @global_int = internal global [4 x i8] zeroinitializer |
| 13 | 14 |
| 14 ; A direct call sequence uses the right mask and register-call sequence. | 15 ; A direct call sequence uses the right mask and register-call sequence. |
| 15 define void @test_direct_call() { | 16 define internal void @test_direct_call() { |
| 16 entry: | 17 entry: |
| 17 call void @call_target() | 18 call void @call_target() |
| 18 ret void | 19 ret void |
| 19 } | 20 } |
| 20 ; CHECK-LABEL: test_direct_call | 21 ; CHECK-LABEL: test_direct_call |
| 21 ; CHECK: nop | 22 ; CHECK: nop |
| 22 ; CHECK: 1b: {{.*}} call 1c | 23 ; CHECK: 1b: {{.*}} call 1c |
| 23 ; CHECK-NEXT: 20: | 24 ; CHECK-NEXT: 20: |
| 24 | 25 |
| 25 ; An indirect call sequence uses the right mask and register-call sequence. | 26 ; An indirect call sequence uses the right mask and register-call sequence. |
| 26 define void @test_indirect_call(i32 %target) { | 27 define internal void @test_indirect_call(i32 %target) { |
| 27 entry: | 28 entry: |
| 28 %__1 = inttoptr i32 %target to void ()* | 29 %__1 = inttoptr i32 %target to void ()* |
| 29 call void %__1() | 30 call void %__1() |
| 30 ret void | 31 ret void |
| 31 } | 32 } |
| 32 ; CHECK-LABEL: test_indirect_call | 33 ; CHECK-LABEL: test_indirect_call |
| 33 ; CHECK: mov [[REG:.*]],DWORD PTR [esp | 34 ; CHECK: mov [[REG:.*]],DWORD PTR [esp |
| 34 ; CHECK-NEXT: nop | 35 ; CHECK-NEXT: nop |
| 35 ; CHECK: 1b: {{.*}} and [[REG]],0xffffffe0 | 36 ; CHECK: 1b: {{.*}} and [[REG]],0xffffffe0 |
| 36 ; CHECK-NEXT: call [[REG]] | 37 ; CHECK-NEXT: call [[REG]] |
| 37 ; CHECk-NEXT: 20: | 38 ; CHECk-NEXT: 20: |
| 38 | 39 |
| 39 ; A return sequences uses the right pop / mask / jmp sequence. | 40 ; A return sequences uses the right pop / mask / jmp sequence. |
| 40 define void @test_ret() { | 41 define internal void @test_ret() { |
| 41 entry: | 42 entry: |
| 42 ret void | 43 ret void |
| 43 } | 44 } |
| 44 ; CHECK-LABEL: test_ret | 45 ; CHECK-LABEL: test_ret |
| 45 ; CHECK: pop ecx | 46 ; CHECK: pop ecx |
| 46 ; CHECK-NEXT: and ecx,0xffffffe0 | 47 ; CHECK-NEXT: and ecx,0xffffffe0 |
| 47 ; CHECK-NEXT: jmp ecx | 48 ; CHECK-NEXT: jmp ecx |
| 48 | 49 |
| 49 ; A perfectly packed bundle should not have nops at the end. | 50 ; A perfectly packed bundle should not have nops at the end. |
| 50 define void @packed_bundle() { | 51 define internal void @packed_bundle() { |
| 51 entry: | 52 entry: |
| 52 call void @call_target() | 53 call void @call_target() |
| 53 ; bundle boundary | 54 ; bundle boundary |
| 54 %addr_byte = bitcast [1 x i8]* @global_byte to i8* | 55 %addr_byte = bitcast [1 x i8]* @global_byte to i8* |
| 55 %addr_short = bitcast [2 x i8]* @global_short to i16* | 56 %addr_short = bitcast [2 x i8]* @global_short to i16* |
| 56 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction | 57 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction |
| 57 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 58 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 58 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction | 59 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction |
| 59 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 60 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 60 ; bundle boundary | 61 ; bundle boundary |
| 61 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction | 62 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction |
| 62 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 63 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 63 ret void | 64 ret void |
| 64 } | 65 } |
| 65 ; CHECK-LABEL: packed_bundle | 66 ; CHECK-LABEL: packed_bundle |
| 66 ; CHECK: call | 67 ; CHECK: call |
| 67 ; CHECK-NEXT: 20: {{.*}} mov BYTE PTR | 68 ; CHECK-NEXT: 20: {{.*}} mov BYTE PTR |
| 68 ; CHECK-NEXT: 27: {{.*}} mov WORD PTR | 69 ; CHECK-NEXT: 27: {{.*}} mov WORD PTR |
| 69 ; CHECK-NEXT: 30: {{.*}} mov BYTE PTR | 70 ; CHECK-NEXT: 30: {{.*}} mov BYTE PTR |
| 70 ; CHECK-NEXT: 37: {{.*}} mov WORD PTR | 71 ; CHECK-NEXT: 37: {{.*}} mov WORD PTR |
| 71 ; CHECK-NEXT: 40: {{.*}} mov BYTE PTR | 72 ; CHECK-NEXT: 40: {{.*}} mov BYTE PTR |
| 72 ; CHECK-NEXT: 47: {{.*}} mov WORD PTR | 73 ; CHECK-NEXT: 47: {{.*}} mov WORD PTR |
| 73 | 74 |
| 74 ; An imperfectly packed bundle should have one or more nops at the end. | 75 ; An imperfectly packed bundle should have one or more nops at the end. |
| 75 define void @nonpacked_bundle() { | 76 define internal void @nonpacked_bundle() { |
| 76 entry: | 77 entry: |
| 77 call void @call_target() | 78 call void @call_target() |
| 78 ; bundle boundary | 79 ; bundle boundary |
| 79 %addr_short = bitcast [2 x i8]* @global_short to i16* | 80 %addr_short = bitcast [2 x i8]* @global_short to i16* |
| 80 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 81 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 81 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 82 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 82 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 83 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 83 ; nop padding | 84 ; nop padding |
| 84 ; bundle boundary | 85 ; bundle boundary |
| 85 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 86 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 86 ret void | 87 ret void |
| 87 } | 88 } |
| 88 ; CHECK-LABEL: nonpacked_bundle | 89 ; CHECK-LABEL: nonpacked_bundle |
| 89 ; CHECK: call | 90 ; CHECK: call |
| 90 ; CHECK-NEXT: 20: {{.*}} mov WORD PTR | 91 ; CHECK-NEXT: 20: {{.*}} mov WORD PTR |
| 91 ; CHECK-NEXT: 29: {{.*}} mov WORD PTR | 92 ; CHECK-NEXT: 29: {{.*}} mov WORD PTR |
| 92 ; CHECK-NEXT: 32: {{.*}} mov WORD PTR | 93 ; CHECK-NEXT: 32: {{.*}} mov WORD PTR |
| 93 ; CHECK-NEXT: 3b: {{.*}} nop | 94 ; CHECK-NEXT: 3b: {{.*}} nop |
| 94 ; CHECK: 40: {{.*}} mov WORD PTR | 95 ; CHECK: 40: {{.*}} mov WORD PTR |
| 95 | 96 |
| 96 ; A zero-byte instruction (e.g. local label definition) at a bundle | 97 ; A zero-byte instruction (e.g. local label definition) at a bundle |
| 97 ; boundary should not trigger nop padding. | 98 ; boundary should not trigger nop padding. |
| 98 define void @label_at_boundary(i32 %arg, float %farg1, float %farg2) { | 99 define internal void @label_at_boundary(i32 %arg, float %farg1, float %farg2) { |
| 99 entry: | 100 entry: |
| 100 %argi8 = trunc i32 %arg to i8 | 101 %argi8 = trunc i32 %arg to i8 |
| 101 call void @call_target() | 102 call void @call_target() |
| 102 ; bundle boundary | 103 ; bundle boundary |
| 103 %addr_short = bitcast [2 x i8]* @global_short to i16* | 104 %addr_short = bitcast [2 x i8]* @global_short to i16* |
| 104 %addr_int = bitcast [4 x i8]* @global_int to i32* | 105 %addr_int = bitcast [4 x i8]* @global_int to i32* |
| 105 store i32 0, i32* %addr_int, align 1 ; 10-byte instruction | 106 store i32 0, i32* %addr_int, align 1 ; 10-byte instruction |
| 106 %blah = select i1 true, i8 %argi8, i8 %argi8 ; 22-byte lowering sequence | 107 %blah = select i1 true, i8 %argi8, i8 %argi8 ; 22-byte lowering sequence |
| 107 ; label is here | 108 ; label is here |
| 108 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 109 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 109 ret void | 110 ret void |
| 110 } | 111 } |
| 111 ; CHECK-LABEL: label_at_boundary | 112 ; CHECK-LABEL: label_at_boundary |
| 112 ; CHECK: call | 113 ; CHECK: call |
| 113 ; We rely on a particular 7-instruction 22-byte Om1 lowering sequence | 114 ; We rely on a particular 7-instruction 22-byte Om1 lowering sequence |
| 114 ; for select. | 115 ; for select. |
| 115 ; CHECK-NEXT: 20: {{.*}} mov DWORD PTR | 116 ; CHECK-NEXT: 20: {{.*}} mov DWORD PTR |
| 116 ; CHECK-NEXT: 2a: {{.*}} mov {{.*}},0x1 | 117 ; CHECK-NEXT: 2a: {{.*}} mov {{.*}},0x1 |
| 117 ; CHECK-NEXT: 2c: {{.*}} cmp {{.*}},0x0 | 118 ; CHECK-NEXT: 2c: {{.*}} cmp {{.*}},0x0 |
| 118 ; CHECK-NEXT: 2e: {{.*}} mov {{.*}},BYTE PTR | 119 ; CHECK-NEXT: 2e: {{.*}} mov {{.*}},BYTE PTR |
| 119 ; CHECK-NEXT: 32: {{.*}} mov BYTE PTR | 120 ; CHECK-NEXT: 32: {{.*}} mov BYTE PTR |
| 120 ; CHECK-NEXT: 36: {{.*}} jne 40 | 121 ; CHECK-NEXT: 36: {{.*}} jne 40 |
| 121 ; CHECK-NEXT: 38: {{.*}} mov {{.*}},BYTE PTR | 122 ; CHECK-NEXT: 38: {{.*}} mov {{.*}},BYTE PTR |
| 122 ; CHECK-NEXT: 3c: {{.*}} mov BYTE PTR | 123 ; CHECK-NEXT: 3c: {{.*}} mov BYTE PTR |
| 123 ; CHECK-NEXT: 40: {{.*}} mov WORD PTR | 124 ; CHECK-NEXT: 40: {{.*}} mov WORD PTR |
| 124 | 125 |
| 125 ; Bundle lock without padding. | 126 ; Bundle lock without padding. |
| 126 define void @bundle_lock_without_padding() { | 127 define internal void @bundle_lock_without_padding() { |
| 127 entry: | 128 entry: |
| 128 %addr_short = bitcast [2 x i8]* @global_short to i16* | 129 %addr_short = bitcast [2 x i8]* @global_short to i16* |
| 129 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 130 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 130 ret void | 131 ret void |
| 131 } | 132 } |
| 132 ; CHECK-LABEL: bundle_lock_without_padding | 133 ; CHECK-LABEL: bundle_lock_without_padding |
| 133 ; CHECK: mov WORD PTR | 134 ; CHECK: mov WORD PTR |
| 134 ; CHECK-NEXT: pop ecx | 135 ; CHECK-NEXT: pop ecx |
| 135 ; CHECK-NEXT: and ecx,0xffffffe0 | 136 ; CHECK-NEXT: and ecx,0xffffffe0 |
| 136 ; CHECK-NEXT: jmp ecx | 137 ; CHECK-NEXT: jmp ecx |
| 137 | 138 |
| 138 ; Bundle lock with padding. | 139 ; Bundle lock with padding. |
| 139 define void @bundle_lock_with_padding() { | 140 define internal void @bundle_lock_with_padding() { |
| 140 entry: | 141 entry: |
| 141 call void @call_target() | 142 call void @call_target() |
| 142 ; bundle boundary | 143 ; bundle boundary |
| 143 %addr_byte = bitcast [1 x i8]* @global_byte to i8* | 144 %addr_byte = bitcast [1 x i8]* @global_byte to i8* |
| 144 %addr_short = bitcast [2 x i8]* @global_short to i16* | 145 %addr_short = bitcast [2 x i8]* @global_short to i16* |
| 145 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction | 146 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction |
| 146 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 147 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 147 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 148 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 148 ret void | 149 ret void |
| 149 ; 3 bytes to restore stack pointer | 150 ; 3 bytes to restore stack pointer |
| (...skipping 10 matching lines...) Expand all Loading... |
| 160 ; CHECK-NEXT: 20: {{.*}} mov BYTE PTR | 161 ; CHECK-NEXT: 20: {{.*}} mov BYTE PTR |
| 161 ; CHECK-NEXT: 27: {{.*}} mov WORD PTR | 162 ; CHECK-NEXT: 27: {{.*}} mov WORD PTR |
| 162 ; CHECK-NEXT: 30: {{.*}} mov WORD PTR | 163 ; CHECK-NEXT: 30: {{.*}} mov WORD PTR |
| 163 ; CHECK-NEXT: 39: {{.*}} add esp, | 164 ; CHECK-NEXT: 39: {{.*}} add esp, |
| 164 ; CHECK-NEXT: 3c: {{.*}} pop ecx | 165 ; CHECK-NEXT: 3c: {{.*}} pop ecx |
| 165 ; CHECK-NEXT: 3d: {{.*}} nop | 166 ; CHECK-NEXT: 3d: {{.*}} nop |
| 166 ; CHECK-NEXT: 40: {{.*}} and ecx,0xffffffe0 | 167 ; CHECK-NEXT: 40: {{.*}} and ecx,0xffffffe0 |
| 167 ; CHECK-NEXT: 43: {{.*}} jmp ecx | 168 ; CHECK-NEXT: 43: {{.*}} jmp ecx |
| 168 | 169 |
| 169 ; Bundle lock align_to_end without any padding. | 170 ; Bundle lock align_to_end without any padding. |
| 170 define void @bundle_lock_align_to_end_padding_0() { | 171 define internal void @bundle_lock_align_to_end_padding_0() { |
| 171 entry: | 172 entry: |
| 172 call void @call_target() | 173 call void @call_target() |
| 173 ; bundle boundary | 174 ; bundle boundary |
| 174 %addr_short = bitcast [2 x i8]* @global_short to i16* | 175 %addr_short = bitcast [2 x i8]* @global_short to i16* |
| 175 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 176 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 176 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 177 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 177 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 178 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 178 call void @call_target() ; 5-byte instruction | 179 call void @call_target() ; 5-byte instruction |
| 179 ret void | 180 ret void |
| 180 } | 181 } |
| 181 ; CHECK-LABEL: bundle_lock_align_to_end_padding_0 | 182 ; CHECK-LABEL: bundle_lock_align_to_end_padding_0 |
| 182 ; CHECK: call | 183 ; CHECK: call |
| 183 ; CHECK-NEXT: 20: {{.*}} mov WORD PTR | 184 ; CHECK-NEXT: 20: {{.*}} mov WORD PTR |
| 184 ; CHECK-NEXT: 29: {{.*}} mov WORD PTR | 185 ; CHECK-NEXT: 29: {{.*}} mov WORD PTR |
| 185 ; CHECK-NEXT: 32: {{.*}} mov WORD PTR | 186 ; CHECK-NEXT: 32: {{.*}} mov WORD PTR |
| 186 ; CHECK-NEXT: 3b: {{.*}} call | 187 ; CHECK-NEXT: 3b: {{.*}} call |
| 187 | 188 |
| 188 ; Bundle lock align_to_end with one bunch of padding. | 189 ; Bundle lock align_to_end with one bunch of padding. |
| 189 define void @bundle_lock_align_to_end_padding_1() { | 190 define internal void @bundle_lock_align_to_end_padding_1() { |
| 190 entry: | 191 entry: |
| 191 call void @call_target() | 192 call void @call_target() |
| 192 ; bundle boundary | 193 ; bundle boundary |
| 193 %addr_byte = bitcast [1 x i8]* @global_byte to i8* | 194 %addr_byte = bitcast [1 x i8]* @global_byte to i8* |
| 194 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction | 195 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction |
| 195 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction | 196 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction |
| 196 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction | 197 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction |
| 197 call void @call_target() ; 5-byte instruction | 198 call void @call_target() ; 5-byte instruction |
| 198 ret void | 199 ret void |
| 199 } | 200 } |
| 200 ; CHECK-LABEL: bundle_lock_align_to_end_padding_1 | 201 ; CHECK-LABEL: bundle_lock_align_to_end_padding_1 |
| 201 ; CHECK: call | 202 ; CHECK: call |
| 202 ; CHECK-NEXT: 20: {{.*}} mov BYTE PTR | 203 ; CHECK-NEXT: 20: {{.*}} mov BYTE PTR |
| 203 ; CHECK-NEXT: 27: {{.*}} mov BYTE PTR | 204 ; CHECK-NEXT: 27: {{.*}} mov BYTE PTR |
| 204 ; CHECK-NEXT: 2e: {{.*}} mov BYTE PTR | 205 ; CHECK-NEXT: 2e: {{.*}} mov BYTE PTR |
| 205 ; CHECK-NEXT: 35: {{.*}} nop | 206 ; CHECK-NEXT: 35: {{.*}} nop |
| 206 ; CHECK: 3b: {{.*}} call | 207 ; CHECK: 3b: {{.*}} call |
| 207 | 208 |
| 208 ; Bundle lock align_to_end with two bunches of padding. | 209 ; Bundle lock align_to_end with two bunches of padding. |
| 209 define void @bundle_lock_align_to_end_padding_2(i32 %target) { | 210 define internal void @bundle_lock_align_to_end_padding_2(i32 %target) { |
| 210 entry: | 211 entry: |
| 211 call void @call_target() | 212 call void @call_target() |
| 212 ; bundle boundary | 213 ; bundle boundary |
| 213 %addr_byte = bitcast [1 x i8]* @global_byte to i8* | 214 %addr_byte = bitcast [1 x i8]* @global_byte to i8* |
| 214 %addr_short = bitcast [2 x i8]* @global_short to i16* | 215 %addr_short = bitcast [2 x i8]* @global_short to i16* |
| 215 %__1 = inttoptr i32 %target to void ()* | 216 %__1 = inttoptr i32 %target to void ()* |
| 216 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction | 217 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction |
| 217 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 218 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 218 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 219 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
| 219 call void %__1() | 220 call void %__1() |
| (...skipping 15 matching lines...) Expand all Loading... |
| 235 ; CHECK-NEXT: 39: {{.*}} mov [[REG:.*]],DWORD PTR [esp | 236 ; CHECK-NEXT: 39: {{.*}} mov [[REG:.*]],DWORD PTR [esp |
| 236 ; CHECK-NEXT: 3d: {{.*}} nop | 237 ; CHECK-NEXT: 3d: {{.*}} nop |
| 237 ; CHECK: 40: {{.*}} nop | 238 ; CHECK: 40: {{.*}} nop |
| 238 ; CHECK: 5b: {{.*}} and [[REG]],0xffffffe0 | 239 ; CHECK: 5b: {{.*}} and [[REG]],0xffffffe0 |
| 239 ; CHECK-NEXT: 5e: {{.*}} call [[REG]] | 240 ; CHECK-NEXT: 5e: {{.*}} call [[REG]] |
| 240 | 241 |
| 241 ; Stack adjustment state during an argument push sequence gets | 242 ; Stack adjustment state during an argument push sequence gets |
| 242 ; properly checkpointed and restored during the two passes, as | 243 ; properly checkpointed and restored during the two passes, as |
| 243 ; observed by the stack adjustment for accessing stack-allocated | 244 ; observed by the stack adjustment for accessing stack-allocated |
| 244 ; variables. | 245 ; variables. |
| 245 define void @checkpoint_restore_stack_adjustment(i32 %arg) { | 246 define internal void @checkpoint_restore_stack_adjustment(i32 %arg) { |
| 246 entry: | 247 entry: |
| 247 call void @call_target() | 248 call void @call_target() |
| 248 ; bundle boundary | 249 ; bundle boundary |
| 249 call void @checkpoint_restore_stack_adjustment(i32 %arg) | 250 call void @checkpoint_restore_stack_adjustment(i32 %arg) |
| 250 ret void | 251 ret void |
| 251 } | 252 } |
| 252 ; CHECK-LABEL: checkpoint_restore_stack_adjustment | 253 ; CHECK-LABEL: checkpoint_restore_stack_adjustment |
| 253 ; CHECK: call | 254 ; CHECK: call |
| 254 ; CHECK: sub esp,0x10 | 255 ; CHECK: sub esp,0x10 |
| 255 ; The address of %arg should be [esp+0x20], not [esp+0x30]. | 256 ; The address of %arg should be [esp+0x20], not [esp+0x30]. |
| 256 ; CHECK-NEXT: mov [[REG:.*]],DWORD PTR [esp+0x20] | 257 ; CHECK-NEXT: mov [[REG:.*]],DWORD PTR [esp+0x20] |
| 257 ; CHECK-NEXT: mov DWORD PTR [esp],[[REG]] | 258 ; CHECK-NEXT: mov DWORD PTR [esp],[[REG]] |
| 258 ; CHECK: call | 259 ; CHECK: call |
| 259 ; CHECK: add esp,0x10 | 260 ; CHECK: add esp,0x10 |
| OLD | NEW |