Index: tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll |
diff --git a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll |
index 08d9a3b3a4d92cfb1dc16c8c1981f1fcbc240cf1..d29acf3c12201e6d8f3f7c1e7931c2d8d2db8885 100644 |
--- a/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll |
+++ b/tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll |
@@ -6,11 +6,11 @@ |
; TODO(kschimpf) Find out why lc2i is needed. |
; RUN: %lc2i -i %s --args -O2 --verbose none \ |
; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \ |
-; RUN: | llvm-objdump -d -symbolize -x86-asm-syntax=intel - | FileCheck %s |
+; RUN: | llvm-objdump -d -r -symbolize -x86-asm-syntax=intel - | FileCheck %s |
; TODO(jvoung): llvm-objdump doesn't symbolize global symbols well, so we |
-; have [0] == g32_a, [4] == g32_b, [8] == g32_c. |
-; g32_d is also [0] because it's in the .data section instead of .bss. |
+; have 0 == g32_a, 4 == g32_b, 8 == g32_c. |
+; g32_d is also 0 because it's in the .data section instead of .bss. |
declare void @llvm.nacl.atomic.fence.all() |
declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) |
@@ -50,15 +50,18 @@ entry: |
; CHECK: mov {{.*}}, esp |
; CHECK: mov dword ptr {{.*}}, 999 |
; atomic store (w/ its own mfence) |
-; CHECK: dword ptr [0] |
+; CHECK: mov {{.*}}, 0 |
+; CHECK-NEXT: R_386_32 |
; The load + add are optimized into one everywhere. |
; CHECK: add {{.*}}, dword ptr |
; CHECK: mov dword ptr |
; CHECK: mfence |
-; CHECK: dword ptr [4] |
+; CHECK: mov {{.*}}, 4 |
+; CHECK-NEXT: R_386_32 |
; CHECK: add {{.*}}, dword ptr |
; CHECK: mov dword ptr |
-; CHECK: dword ptr [8] |
+; CHECK: mov {{.*}}, 8 |
+; CHECK-NEXT: R_386_32 |
; CHECK: add {{.*}}, dword ptr |
; CHECK: mfence |
; CHECK: mov dword ptr |
@@ -93,14 +96,17 @@ entry: |
; CHECK: mov {{.*}}, esp |
; CHECK: mov dword ptr {{.*}}, 999 |
; atomic store (w/ its own mfence) |
-; CHECK: dword ptr [0] |
+; CHECK: mov {{.*}}, 0 |
+; CHECK-NEXT: R_386_32 |
; CHECK: add {{.*}}, dword ptr |
; CHECK: mov dword ptr |
; CHECK: mfence |
-; CHECK: dword ptr [4] |
+; CHECK: mov {{.*}}, 4 |
+; CHECK-NEXT: R_386_32 |
; CHECK: add {{.*}}, dword ptr |
; CHECK: mov dword ptr |
-; CHECK: dword ptr [8] |
+; CHECK: mov {{.*}}, 8 |
+; CHECK-NEXT: R_386_32 |
; CHECK: mfence |
; Load + add can still be optimized into one instruction |
; because it is not separated by a fence. |
@@ -137,11 +143,13 @@ entry: |
; CHECK: mov {{.*}}, esp |
; CHECK: mov dword ptr {{.*}}, 999 |
; atomic store (w/ its own mfence) |
-; CHECK: dword ptr [0] |
+; CHECK: mov {{.*}}, 0 |
+; CHECK-NEXT: R_386_32 |
; CHECK: add {{.*}}, dword ptr |
; CHECK: mov dword ptr |
; CHECK: mfence |
-; CHECK: dword ptr [4] |
+; CHECK: mov {{.*}}, 4 |
+; CHECK-NEXT: R_386_32 |
; This load + add are no longer optimized into one, |
; though perhaps it should be legal as long as |
; the load stays on the same side of the fence. |
@@ -149,7 +157,8 @@ entry: |
; CHECK: mfence |
; CHECK: add {{.*}}, 1 |
; CHECK: mov dword ptr |
-; CHECK: dword ptr [8] |
+; CHECK: mov {{.*}}, 8 |
+; CHECK-NEXT: R_386_32 |
; CHECK: add {{.*}}, dword ptr |
; CHECK: mov dword ptr |
@@ -189,7 +198,8 @@ entry: |
ret i32 %b1234 |
} |
; CHECK-LABEL: could_have_fused_loads |
-; CHECK: dword ptr [0] |
+; CHECK: mov {{.*}}, 0 |
+; CHECK-NEXT: R_386_32 |
; CHECK: mov {{.*}}, byte ptr |
; CHECK: mov {{.*}}, byte ptr |
; CHECK: mov {{.*}}, byte ptr |
@@ -213,7 +223,8 @@ branch2: |
ret i32 %z |
} |
; CHECK-LABEL: could_have_hoisted_loads |
-; CHECK: dword ptr [0] |
+; CHECK: mov {{.*}}, 0 |
+; CHECK-NEXT: R_386_32 |
; CHECK: jne {{.*}} |
; CHECK: mov {{.*}}, dword ptr |
; CHECK: ret |