Chromium Code Reviews| Index: tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll |
| diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll |
| index 4b0127b7b47e8d1b5b32b236807254e2b21a1edb..8c1db6dac377904d458961167477f30c8f0571a9 100644 |
| --- a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll |
| +++ b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll |
| @@ -3,9 +3,12 @@ |
| ; (unlike the non-"all" variety of nacl.atomic.fence, which only |
| ; applies to atomic load/stores). |
| ; |
| -; RUN: %llvm2ice -O2 --verbose none %s | FileCheck %s |
| ; RUN: %llvm2ice -O2 --verbose none %s \ |
| -; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj |
| +; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \ |
| +; RUN: | llvm-objdump -d -symbolize -x86-asm-syntax=intel - | FileCheck %s |
| + |
| +; TODO(jvoung): llvm-objdump doesn't symbolize global symbols well, so we |
| +; have [0] == g32_a, [4] == g32_b, [8] == g32_c, etc. |
| declare void @llvm.nacl.atomic.fence.all() |
| declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) |
| @@ -45,15 +48,15 @@ entry: |
| ; CHECK: mov {{.*}}, esp |
| ; CHECK: mov dword ptr {{.*}}, 999 |
| ; atomic store (w/ its own mfence) |
| -; CHECK: lea {{.*}}, g32_a |
| +; CHECK: lea {{.*}}, dword ptr [0] |
| ; The load + add are optimized into one everywhere. |
| ; CHECK: add {{.*}}, dword ptr |
| ; CHECK: mov dword ptr |
| ; CHECK: mfence |
| -; CHECK: lea {{.*}}, g32_b |
| +; CHECK: lea {{.*}}, dword ptr [4] |
| ; CHECK: add {{.*}}, dword ptr |
| ; CHECK: mov dword ptr |
| -; CHECK: lea {{.*}}, g32_c |
| +; CHECK: lea {{.*}}, dword ptr [8] |
| ; CHECK: add {{.*}}, dword ptr |
| ; CHECK: mfence |
| ; CHECK: mov dword ptr |
| @@ -88,14 +91,14 @@ entry: |
| ; CHECK: mov {{.*}}, esp |
| ; CHECK: mov dword ptr {{.*}}, 999 |
| ; atomic store (w/ its own mfence) |
| -; CHECK: lea {{.*}}, g32_a |
| +; CHECK: lea {{.*}}, dword ptr [0] |
| ; CHECK: add {{.*}}, dword ptr |
| ; CHECK: mov dword ptr |
| ; CHECK: mfence |
| -; CHECK: lea {{.*}}, g32_b |
| +; CHECK: lea {{.*}}, dword ptr [4] |
| ; CHECK: add {{.*}}, dword ptr |
| ; CHECK: mov dword ptr |
| -; CHECK: lea {{.*}}, g32_c |
| +; CHECK: lea {{.*}}, dword ptr [8] |
| ; CHECK: mfence |
| ; Load + add can still be optimized into one instruction |
| ; because it is not separated by a fence. |
| @@ -132,11 +135,11 @@ entry: |
| ; CHECK: mov {{.*}}, esp |
| ; CHECK: mov dword ptr {{.*}}, 999 |
| ; atomic store (w/ its own mfence) |
| -; CHECK: lea {{.*}}, g32_a |
| +; CHECK: lea {{.*}}, dword ptr [0] |
| ; CHECK: add {{.*}}, dword ptr |
| ; CHECK: mov dword ptr |
| ; CHECK: mfence |
| -; CHECK: lea {{.*}}, g32_b |
| +; CHECK: lea {{.*}}, dword ptr [4] |
| ; This load + add are no longer optimized into one, |
| ; though perhaps it should be legal as long as |
| ; the load stays on the same side of the fence. |
| @@ -144,7 +147,7 @@ entry: |
| ; CHECK: mfence |
| ; CHECK: add {{.*}}, 1 |
| ; CHECK: mov dword ptr |
| -; CHECK: lea {{.*}}, g32_c |
| +; CHECK: lea {{.*}}, dword ptr [8] |
| ; CHECK: add {{.*}}, dword ptr |
| ; CHECK: mov dword ptr |
| @@ -184,7 +187,7 @@ entry: |
| ret i32 %b1234 |
| } |
| ; CHECK-LABEL: could_have_fused_loads |
| -; CHECK: lea {{.*}}, g32_d |
| +; CHECK: lea {{.*}}, |
|
Jim Stichnoth
2014/08/28 20:19:23
Should g32_d turn into "dword ptr [12]"?
jvoung (off chromium)
2014/08/29 00:51:20
This is actually going to be "dword ptr [0]", beca
|
| ; CHECK: mov {{.*}}, byte ptr |
| ; CHECK: mov {{.*}}, byte ptr |
| ; CHECK: mov {{.*}}, byte ptr |
| @@ -208,7 +211,7 @@ branch2: |
| ret i32 %z |
| } |
| ; CHECK-LABEL: could_have_hoisted_loads |
| -; CHECK: lea {{.*}}, g32_d |
| +; CHECK: lea {{.*}}, |
| ; CHECK: je {{.*}} |
| ; CHECK: jmp {{.*}} |
| ; CHECK: mov {{.*}}, dword ptr |