OLD | NEW |
1 ; Tests basics and corner cases of x86-32 sandboxing, using -Om1 in | 1 ; Tests basics and corner cases of x86-32 sandboxing, using -Om1 in |
2 ; the hope that the output will remain stable. When packing bundles, | 2 ; the hope that the output will remain stable. When packing bundles, |
3 ; we try to limit to a few instructions with well known sizes and | 3 ; we try to limit to a few instructions with well known sizes and |
4 ; minimal use of registers and stack slots in the lowering sequence. | 4 ; minimal use of registers and stack slots in the lowering sequence. |
5 | 5 |
6 ; RUN: %p2i -i %s --sandbox --filetype=obj --disassemble --args -Om1 \ | 6 ; RUN: %p2i -i %s --sandbox --filetype=obj --disassemble --args -Om1 \ |
7 ; RUN: -allow-externally-defined-symbols \ | 7 ; RUN: -allow-externally-defined-symbols \ |
8 ; RUN: -ffunction-sections | FileCheck %s | 8 ; RUN: -ffunction-sections | FileCheck %s |
9 | 9 |
| 10 ; RUN: %p2i -i %s --sandbox --filetype=obj --disassemble --target=x8664 \ |
| 11 ; RUN: --args -Om1 -allow-externally-defined-symbols \ |
| 12 ; RUN: -ffunction-sections | FileCheck %s --check-prefix X8664 |
| 13 |
10 declare void @call_target() | 14 declare void @call_target() |
11 @global_byte = internal global [1 x i8] zeroinitializer | 15 @global_byte = internal global [1 x i8] zeroinitializer |
12 @global_short = internal global [2 x i8] zeroinitializer | 16 @global_short = internal global [2 x i8] zeroinitializer |
13 @global_int = internal global [4 x i8] zeroinitializer | 17 @global_int = internal global [4 x i8] zeroinitializer |
14 | 18 |
15 ; A direct call sequence uses the right mask and register-call sequence. | 19 ; A direct call sequence uses the right mask and register-call sequence. |
16 define internal void @test_direct_call() { | 20 define internal void @test_direct_call() { |
17 entry: | 21 entry: |
18 call void @call_target() | 22 call void @call_target() |
19 ret void | 23 ret void |
20 } | 24 } |
21 ; CHECK-LABEL: test_direct_call | 25 ; CHECK-LABEL: test_direct_call |
22 ; CHECK: nop | 26 ; CHECK: nop |
23 ; CHECK: 1b: {{.*}} call 1c | 27 ; CHECK: 1b: {{.*}} call 1c |
24 ; CHECK-NEXT: 20: | 28 ; CHECK-NEXT: 20: |
| 29 ; X8664-LABEL: test_direct_call |
| 30 ; X8664: push {{.*}}$local$__0 |
| 31 ; X8664: jmp {{.*}} call_target |
| 32 ; X8664: {{0+}}20 <{{.*}}$local$__0>: |
25 | 33 |
26 ; An indirect call sequence uses the right mask and register-call sequence. | 34 ; An indirect call sequence uses the right mask and register-call sequence. |
27 define internal void @test_indirect_call(i32 %target) { | 35 define internal void @test_indirect_call(i32 %target) { |
28 entry: | 36 entry: |
29 %__1 = inttoptr i32 %target to void ()* | 37 %__1 = inttoptr i32 %target to void ()* |
30 call void %__1() | 38 call void %__1() |
31 ret void | 39 ret void |
32 } | 40 } |
33 ; CHECK-LABEL: test_indirect_call | 41 ; CHECK-LABEL: test_indirect_call |
34 ; CHECK: mov [[REG:.*]],DWORD PTR [esp | 42 ; CHECK: mov [[REG:.*]],DWORD PTR [esp |
35 ; CHECK-NEXT: nop | 43 ; CHECK-NEXT: nop |
36 ; CHECK: 1b: {{.*}} and [[REG]],0xffffffe0 | 44 ; CHECK: 1b: {{.*}} and [[REG]],0xffffffe0 |
37 ; CHECK-NEXT: call [[REG]] | 45 ; CHECK-NEXT: call [[REG]] |
38 ; CHECk-NEXT: 20: | 46 ; CHECk-NEXT: 20: |
| 47 ; X8664-LABEL: test_indirect_call |
| 48 ; X8664: push {{.*}}$local$__0 |
| 49 ; X8664: {{.*}} and e[[REG:..]],0xffffffe0 |
| 50 ; X8664: add r[[REG]],r15 |
| 51 ; X8664: jmp r[[REG]] |
| 52 ; X8664: {{0+}}20 <{{.*}}$local$__0>: |
39 | 53 |
40 ; A return sequences uses the right pop / mask / jmp sequence. | 54 ; A return sequence uses the right pop / mask / jmp sequence. |
41 define internal void @test_ret() { | 55 define internal void @test_ret() { |
42 entry: | 56 entry: |
43 ret void | 57 ret void |
44 } | 58 } |
45 ; CHECK-LABEL: test_ret | 59 ; CHECK-LABEL: test_ret |
46 ; CHECK: pop ecx | 60 ; CHECK: pop ecx |
47 ; CHECK-NEXT: and ecx,0xffffffe0 | 61 ; CHECK-NEXT: and ecx,0xffffffe0 |
48 ; CHECK-NEXT: jmp ecx | 62 ; CHECK-NEXT: jmp ecx |
| 63 ; X8664-LABEL: test_ret |
| 64 ; X8664: pop rcx |
| 65 ; X8664: and ecx,0xffffffe0 |
| 66 ; X8664: add rcx,r15 |
| 67 ; X8664: jmp rcx |
49 | 68 |
50 ; A perfectly packed bundle should not have nops at the end. | 69 ; A perfectly packed bundle should not have nops at the end. |
51 define internal void @packed_bundle() { | 70 define internal void @packed_bundle() { |
52 entry: | 71 entry: |
53 call void @call_target() | 72 call void @call_target() |
54 ; bundle boundary | 73 ; bundle boundary |
55 %addr_byte = bitcast [1 x i8]* @global_byte to i8* | 74 %addr_byte = bitcast [1 x i8]* @global_byte to i8* |
56 %addr_short = bitcast [2 x i8]* @global_short to i16* | 75 %addr_short = bitcast [2 x i8]* @global_short to i16* |
57 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction | 76 store i8 0, i8* %addr_byte, align 1 ; 7-byte instruction |
58 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction | 77 store i16 0, i16* %addr_short, align 1 ; 9-byte instruction |
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
232 ; CHECK: call | 251 ; CHECK: call |
233 ; CHECK-NEXT: 20: {{.*}} mov BYTE PTR | 252 ; CHECK-NEXT: 20: {{.*}} mov BYTE PTR |
234 ; CHECK-NEXT: 27: {{.*}} mov WORD PTR | 253 ; CHECK-NEXT: 27: {{.*}} mov WORD PTR |
235 ; CHECK-NEXT: 30: {{.*}} mov WORD PTR | 254 ; CHECK-NEXT: 30: {{.*}} mov WORD PTR |
236 ; CHECK-NEXT: 39: {{.*}} mov [[REG:.*]],DWORD PTR [esp | 255 ; CHECK-NEXT: 39: {{.*}} mov [[REG:.*]],DWORD PTR [esp |
237 ; CHECK-NEXT: 3d: {{.*}} nop | 256 ; CHECK-NEXT: 3d: {{.*}} nop |
238 ; CHECK: 40: {{.*}} nop | 257 ; CHECK: 40: {{.*}} nop |
239 ; CHECK: 5b: {{.*}} and [[REG]],0xffffffe0 | 258 ; CHECK: 5b: {{.*}} and [[REG]],0xffffffe0 |
240 ; CHECK-NEXT: 5e: {{.*}} call [[REG]] | 259 ; CHECK-NEXT: 5e: {{.*}} call [[REG]] |
241 | 260 |
| 261 ; Tests the pad_to_end bundle alignment with no padding bytes needed. |
| 262 define internal void @bundle_lock_pad_to_end_padding_0(i32 %arg0, i32 %arg1, |
| 263 i32 %arg3, i32 %arg4, |
| 264 i32 %arg5, i32 %arg6) { |
| 265 call void @call_target() |
| 266 ; bundle boundary |
| 267 %x = add i32 %arg5, %arg6 ; 12 bytes |
| 268 %y = trunc i32 %x to i16 ; 10 bytes |
| 269 call void @call_target() ; 10 bytes |
| 270 ; bundle boundary |
| 271 ret void |
| 272 } |
| 273 ; X8664-LABEL: bundle_lock_pad_to_end_padding_0 |
| 274 ; X8664: <{{.*}}$local$__0>: |
| 275 ; X8664: 56: {{.*}} push {{.*}}$local$__1 |
| 276 ; X8664: 5b: {{.*}} jmp {{.*}} call_target |
| 277 ; X8664: 60: {{.*}} add |
| 278 |
| 279 ; Tests the pad_to_end bundle alignment with 11 padding bytes needed, and some |
| 280 ; instructions before the call. |
| 281 define internal void @bundle_lock_pad_to_end_padding_11(i32 %arg0, i32 %arg1, |
| 282 i32 %arg3, i32 %arg4, |
| 283 i32 %arg5, i32 %arg6) { |
| 284 call void @call_target() |
| 285 ; bundle boundary |
| 286 %x = add i32 %arg5, %arg6 ; 11 bytes |
| 287 call void @call_target() ; 10 bytes |
| 288 ; 11 bytes of nop |
| 289 ; bundle boundary |
| 290 ret void |
| 291 } |
| 292 ; X8664-LABEL: bundle_lock_pad_to_end_padding_11 |
| 293 ; X8664: <{{.*}}$local$__0>: |
| 294 ; X8664: 4b: {{.*}} push {{.*}}$local$__1 |
| 295 ; X8664: 50: {{.*}} jmp {{.*}} call_target |
| 296 ; X8664: 55: {{.*}} nop |
| 297 ; X8664: 5d: {{.*}} nop |
| 298 ; X8664: 60: {{.*}} add |
| 299 |
| 300 ; Tests the pad_to_end bundle alignment with 22 padding bytes needed, and no |
| 301 ; instructions before the call. |
| 302 define internal void @bundle_lock_pad_to_end_padding_22(i32 %arg0, i32 %arg1, |
| 303 i32 %arg3, i32 %arg4, |
| 304 i32 %arg5, i32 %arg6) { |
| 305 call void @call_target() |
| 306 ; bundle boundary |
| 307 call void @call_target() ; 10 bytes |
| 308 ; 22 bytes of nop |
| 309 ; bundle boundary |
| 310 ret void |
| 311 } |
| 312 ; X8664-LABEL: bundle_lock_pad_to_end_padding_22 |
| 313 ; X8664: <{{.*}}$local$__0>: |
| 314 ; X8664: 40: {{.*}} push {{.*}}$local$__1 |
| 315 ; X8664: 45: {{.*}} jmp {{.*}} call_target |
| 316 ; X8664: 4a: {{.*}} nop |
| 317 ; X8664: 52: {{.*}} nop |
| 318 ; X8664: 5a: {{.*}} nop |
| 319 ; X8664: 60: {{.*}} add |
| 320 |
242 ; Stack adjustment state during an argument push sequence gets | 321 ; Stack adjustment state during an argument push sequence gets |
243 ; properly checkpointed and restored during the two passes, as | 322 ; properly checkpointed and restored during the two passes, as |
244 ; observed by the stack adjustment for accessing stack-allocated | 323 ; observed by the stack adjustment for accessing stack-allocated |
245 ; variables. | 324 ; variables. |
246 define internal void @checkpoint_restore_stack_adjustment(i32 %arg) { | 325 define internal void @checkpoint_restore_stack_adjustment(i32 %arg) { |
247 entry: | 326 entry: |
248 call void @call_target() | 327 call void @call_target() |
249 ; bundle boundary | 328 ; bundle boundary |
250 call void @checkpoint_restore_stack_adjustment(i32 %arg) | 329 call void @checkpoint_restore_stack_adjustment(i32 %arg) |
251 ret void | 330 ret void |
252 } | 331 } |
253 ; CHECK-LABEL: checkpoint_restore_stack_adjustment | 332 ; CHECK-LABEL: checkpoint_restore_stack_adjustment |
254 ; CHECK: sub esp,0x1c | 333 ; CHECK: sub esp,0x1c |
255 ; CHECK: call | 334 ; CHECK: call |
256 ; The address of %arg should be [esp+0x20], not [esp+0x30]. | 335 ; The address of %arg should be [esp+0x20], not [esp+0x30]. |
257 ; CHECK-NEXT: mov [[REG:.*]],DWORD PTR [esp+0x20] | 336 ; CHECK-NEXT: mov [[REG:.*]],DWORD PTR [esp+0x20] |
258 ; CHECK-NEXT: mov DWORD PTR [esp],[[REG]] | 337 ; CHECK-NEXT: mov DWORD PTR [esp],[[REG]] |
259 ; CHECK: call | 338 ; CHECK: call |
260 ; CHECK: add esp,0x1c | 339 ; CHECK: add esp,0x1c |
| 340 |
OLD | NEW |