Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(147)

Side by Side Diff: tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll

Issue 756543002: Subzero: Fix new issues after the LLVM 3.5 merge. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Also use `llvm-config --system-libs` Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 ; Test that loads/stores don't move across a nacl.atomic.fence.all. 1 ; Test that loads/stores don't move across a nacl.atomic.fence.all.
2 ; This should apply to both atomic and non-atomic loads/stores 2 ; This should apply to both atomic and non-atomic loads/stores
3 ; (unlike the non-"all" variety of nacl.atomic.fence, which only 3 ; (unlike the non-"all" variety of nacl.atomic.fence, which only
4 ; applies to atomic load/stores). 4 ; applies to atomic load/stores).
5 ; 5 ;
6 ; RUN: %p2i -i %s --args -O2 --verbose none \ 6 ; RUN: %p2i -i %s --args -O2 --verbose none \
7 ; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \ 7 ; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
8 ; RUN: | llvm-objdump -d -r -symbolize -x86-asm-syntax=intel - | FileCheck %s 8 ; RUN: | llvm-objdump -d -r -symbolize -x86-asm-syntax=intel - | FileCheck %s
9 9
10 ; TODO(jvoung): llvm-objdump doesn't symbolize global symbols well, so we 10 ; TODO(jvoung): llvm-objdump doesn't symbolize global symbols well, so we
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
42 store i32 %l_c2, i32* %p_c, align 1 42 store i32 %l_c2, i32* %p_c, align 1
43 43
44 ret i32 %l_c2 44 ret i32 %l_c2
45 } 45 }
46 ; CHECK-LABEL: test_fused_load_add_a 46 ; CHECK-LABEL: test_fused_load_add_a
47 ; alloca store 47 ; alloca store
48 ; CHECK: mov {{.*}}, esp 48 ; CHECK: mov {{.*}}, esp
49 ; CHECK: mov dword ptr {{.*}}, 999 49 ; CHECK: mov dword ptr {{.*}}, 999
50 ; atomic store (w/ its own mfence) 50 ; atomic store (w/ its own mfence)
51 ; The load + add are optimized into one everywhere. 51 ; The load + add are optimized into one everywhere.
52 ; CHECK: add {{.*}}, dword ptr [0] 52 ; CHECK: add {{.*}}, dword ptr [.bss]
53 ; CHECK-NEXT: R_386_32 53 ; CHECK-NEXT: R_386_32
54 ; CHECK: mov dword ptr 54 ; CHECK: mov dword ptr
55 ; CHECK: mfence 55 ; CHECK: mfence
56 ; CHECK: add {{.*}}, dword ptr [4] 56 ; CHECK: add {{.*}}, dword ptr [.bss]
57 ; CHECK-NEXT: R_386_32 57 ; CHECK-NEXT: R_386_32
58 ; CHECK: mov dword ptr 58 ; CHECK: mov dword ptr
59 ; CHECK: add {{.*}}, dword ptr [8] 59 ; CHECK: add {{.*}}, dword ptr [.bss]
60 ; CHECK-NEXT: R_386_32 60 ; CHECK-NEXT: R_386_32
61 ; CHECK: mfence 61 ; CHECK: mfence
62 ; CHECK: mov dword ptr 62 ; CHECK: mov dword ptr
63 63
64 ; Test with the fence moved up a bit. 64 ; Test with the fence moved up a bit.
65 define i32 @test_fused_load_add_b() { 65 define i32 @test_fused_load_add_b() {
66 entry: 66 entry:
67 %p_alloca = alloca i8, i32 4, align 4 67 %p_alloca = alloca i8, i32 4, align 4
68 %p_alloca_bc = bitcast i8* %p_alloca to i32* 68 %p_alloca_bc = bitcast i8* %p_alloca to i32*
69 store i32 999, i32* %p_alloca_bc, align 1 69 store i32 999, i32* %p_alloca_bc, align 1
(...skipping 14 matching lines...) Expand all
84 %l_c2 = add i32 %l_c, 1 84 %l_c2 = add i32 %l_c, 1
85 store i32 %l_c2, i32* %p_c, align 1 85 store i32 %l_c2, i32* %p_c, align 1
86 86
87 ret i32 %l_c2 87 ret i32 %l_c2
88 } 88 }
89 ; CHECK-LABEL: test_fused_load_add_b 89 ; CHECK-LABEL: test_fused_load_add_b
90 ; alloca store 90 ; alloca store
91 ; CHECK: mov {{.*}}, esp 91 ; CHECK: mov {{.*}}, esp
92 ; CHECK: mov dword ptr {{.*}}, 999 92 ; CHECK: mov dword ptr {{.*}}, 999
93 ; atomic store (w/ its own mfence) 93 ; atomic store (w/ its own mfence)
94 ; CHECK: add {{.*}}, dword ptr [0] 94 ; CHECK: add {{.*}}, dword ptr [.bss]
95 ; CHECK-NEXT: R_386_32 95 ; CHECK-NEXT: R_386_32
96 ; CHECK: mov dword ptr 96 ; CHECK: mov dword ptr
97 ; CHECK: mfence 97 ; CHECK: mfence
98 ; CHECK: add {{.*}}, dword ptr [4] 98 ; CHECK: add {{.*}}, dword ptr [.bss]
99 ; CHECK-NEXT: R_386_32 99 ; CHECK-NEXT: R_386_32
100 ; CHECK: mov dword ptr 100 ; CHECK: mov dword ptr
101 ; CHECK: mfence 101 ; CHECK: mfence
102 ; Load + add can still be optimized into one instruction 102 ; Load + add can still be optimized into one instruction
103 ; because it is not separated by a fence. 103 ; because it is not separated by a fence.
104 ; CHECK: add {{.*}}, dword ptr [8] 104 ; CHECK: add {{.*}}, dword ptr [.bss]
105 ; CHECK-NEXT: R_386_32 105 ; CHECK-NEXT: R_386_32
106 ; CHECK: mov dword ptr 106 ; CHECK: mov dword ptr
107 107
108 ; Test with the fence splitting a load/add. 108 ; Test with the fence splitting a load/add.
109 define i32 @test_fused_load_add_c() { 109 define i32 @test_fused_load_add_c() {
110 entry: 110 entry:
111 %p_alloca = alloca i8, i32 4, align 4 111 %p_alloca = alloca i8, i32 4, align 4
112 %p_alloca_bc = bitcast i8* %p_alloca to i32* 112 %p_alloca_bc = bitcast i8* %p_alloca to i32*
113 store i32 999, i32* %p_alloca_bc, align 1 113 store i32 999, i32* %p_alloca_bc, align 1
114 114
(...skipping 13 matching lines...) Expand all
128 %l_c2 = add i32 %l_c, 1 128 %l_c2 = add i32 %l_c, 1
129 store i32 %l_c2, i32* %p_c, align 1 129 store i32 %l_c2, i32* %p_c, align 1
130 130
131 ret i32 %l_c2 131 ret i32 %l_c2
132 } 132 }
133 ; CHECK-LABEL: test_fused_load_add_c 133 ; CHECK-LABEL: test_fused_load_add_c
134 ; alloca store 134 ; alloca store
135 ; CHECK: mov {{.*}}, esp 135 ; CHECK: mov {{.*}}, esp
136 ; CHECK: mov dword ptr {{.*}}, 999 136 ; CHECK: mov dword ptr {{.*}}, 999
137 ; atomic store (w/ its own mfence) 137 ; atomic store (w/ its own mfence)
138 ; CHECK: add {{.*}}, dword ptr [0] 138 ; CHECK: add {{.*}}, dword ptr [.bss]
139 ; CHECK-NEXT: R_386_32 139 ; CHECK-NEXT: R_386_32
140 ; CHECK: mov dword ptr 140 ; CHECK: mov dword ptr
141 ; CHECK: mfence 141 ; CHECK: mfence
142 ; This load + add are no longer optimized into one, 142 ; This load + add are no longer optimized into one,
143 ; though perhaps it should be legal as long as 143 ; though perhaps it should be legal as long as
144 ; the load stays on the same side of the fence. 144 ; the load stays on the same side of the fence.
145 ; CHECK: mov {{.*}}, dword ptr [4] 145 ; CHECK: mov {{.*}}, dword ptr [.bss]
146 ; CHECK-NEXT: R_386_32 146 ; CHECK-NEXT: R_386_32
147 ; CHECK: mfence 147 ; CHECK: mfence
148 ; CHECK: add {{.*}}, 1 148 ; CHECK: add {{.*}}, 1
149 ; CHECK: mov dword ptr 149 ; CHECK: mov dword ptr
150 ; CHECK: add {{.*}}, dword ptr [8] 150 ; CHECK: add {{.*}}, dword ptr [.bss]
151 ; CHECK-NEXT: R_386_32 151 ; CHECK-NEXT: R_386_32
152 ; CHECK: mov dword ptr 152 ; CHECK: mov dword ptr
153 153
154 154
155 ; Test where a bunch of i8 loads could have been fused into one 155 ; Test where a bunch of i8 loads could have been fused into one
156 ; i32 load, but a fence blocks that. 156 ; i32 load, but a fence blocks that.
157 define i32 @could_have_fused_loads() { 157 define i32 @could_have_fused_loads() {
158 entry: 158 entry:
159 %ptr1 = bitcast [4 x i8]* @g32_d to i8* 159 %ptr1 = bitcast [4 x i8]* @g32_d to i8*
160 %b1 = load i8* %ptr1, align 1 160 %b1 = load i8* %ptr1, align 1
(...skipping 19 matching lines...) Expand all
180 %b12 = or i32 %b1.ext, %b2.shift 180 %b12 = or i32 %b1.ext, %b2.shift
181 %b3.ext = zext i8 %b3 to i32 181 %b3.ext = zext i8 %b3 to i32
182 %b3.shift = shl i32 %b3.ext, 16 182 %b3.shift = shl i32 %b3.ext, 16
183 %b123 = or i32 %b12, %b3.shift 183 %b123 = or i32 %b12, %b3.shift
184 %b4.ext = zext i8 %b4 to i32 184 %b4.ext = zext i8 %b4 to i32
185 %b4.shift = shl i32 %b4.ext, 24 185 %b4.shift = shl i32 %b4.ext, 24
186 %b1234 = or i32 %b123, %b4.shift 186 %b1234 = or i32 %b123, %b4.shift
187 ret i32 %b1234 187 ret i32 %b1234
188 } 188 }
189 ; CHECK-LABEL: could_have_fused_loads 189 ; CHECK-LABEL: could_have_fused_loads
190 ; CHECK: mov {{.*}}, byte ptr [12] 190 ; CHECK: mov {{.*}}, byte ptr
191 ; CHECK-NEXT: R_386_32 191 ; CHECK-NEXT: R_386_32
192 ; CHECK: mov {{.*}}, byte ptr 192 ; CHECK: mov {{.*}}, byte ptr
193 ; CHECK: mov {{.*}}, byte ptr 193 ; CHECK: mov {{.*}}, byte ptr
194 ; CHECK: mfence 194 ; CHECK: mfence
195 ; CHECK: mov {{.*}}, byte ptr 195 ; CHECK: mov {{.*}}, byte ptr
196 196
197 197
198 ; Test where an identical load from two branches could have been hoisted 198 ; Test where an identical load from two branches could have been hoisted
199 ; up, and then the code merged, but a fence prevents it. 199 ; up, and then the code merged, but a fence prevents it.
200 define i32 @could_have_hoisted_loads(i32 %x) { 200 define i32 @could_have_hoisted_loads(i32 %x) {
201 entry: 201 entry:
202 %ptr = bitcast [4 x i8]* @g32_d to i32* 202 %ptr = bitcast [4 x i8]* @g32_d to i32*
203 %cmp = icmp eq i32 %x, 1 203 %cmp = icmp eq i32 %x, 1
204 br i1 %cmp, label %branch1, label %branch2 204 br i1 %cmp, label %branch1, label %branch2
205 branch1: 205 branch1:
206 %y = load i32* %ptr, align 1 206 %y = load i32* %ptr, align 1
207 ret i32 %y 207 ret i32 %y
208 branch2: 208 branch2:
209 call void @llvm.nacl.atomic.fence.all() 209 call void @llvm.nacl.atomic.fence.all()
210 %z = load i32* %ptr, align 1 210 %z = load i32* %ptr, align 1
211 ret i32 %z 211 ret i32 %z
212 } 212 }
213 ; CHECK-LABEL: could_have_hoisted_loads 213 ; CHECK-LABEL: could_have_hoisted_loads
214 ; CHECK: jne {{.*}} 214 ; CHECK: jne {{.*}}
215 ; CHECK: mov {{.*}}, dword ptr [12] 215 ; CHECK: mov {{.*}}, dword ptr [.bss]
216 ; CHECK-NEXT: R_386_32 216 ; CHECK-NEXT: R_386_32
217 ; CHECK: ret 217 ; CHECK: ret
218 ; CHECK: mfence 218 ; CHECK: mfence
219 ; CHECK: mov {{.*}}, dword ptr [12] 219 ; CHECK: mov {{.*}}, dword ptr [.bss]
220 ; CHECK-NEXT: R_386_32 220 ; CHECK-NEXT: R_386_32
221 ; CHECK: ret 221 ; CHECK: ret
OLDNEW
« no previous file with comments | « tests_lit/llvm2ice_tests/globalinit.pnacl.ll ('k') | tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698