Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(536)

Side by Side Diff: tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll

Issue 914263005: Subzero: switch from llvm-objdump to objdump for lit tests (for LLVM merge) (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 ; This tests each of the supported NaCl atomic instructions for every 1 ; This tests each of the supported NaCl atomic instructions for every
2 ; size allowed. 2 ; size allowed.
3 3
4 ; RUN: %p2i -i %s --args -O2 --verbose none \ 4 ; RUN: %p2i -i %s --assemble --disassemble --args -O2 --verbose none \
5 ; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
6 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - \
7 ; RUN: | FileCheck %s 5 ; RUN: | FileCheck %s
8 ; RUN: %p2i -i %s --args -O2 --verbose none \ 6 ; RUN: %p2i -i %s --assemble --disassemble --args -O2 --verbose none \
9 ; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
10 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - \
11 ; RUN: | FileCheck --check-prefix=CHECKO2 %s 7 ; RUN: | FileCheck --check-prefix=CHECKO2 %s
12 ; RUN: %p2i -i %s --args -Om1 --verbose none \ 8 ; RUN: %p2i -i %s --assemble --disassemble --args -Om1 --verbose none \
13 ; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
14 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - \
15 ; RUN: | FileCheck %s 9 ; RUN: | FileCheck %s
16 10
17 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32) 11 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32)
18 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32) 12 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32)
19 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) 13 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32)
20 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32) 14 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32)
21 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32) 15 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32)
22 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32) 16 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32)
23 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32) 17 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32)
24 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32) 18 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32)
(...skipping 21 matching lines...) Expand all
46 40
47 define i32 @test_atomic_load_8(i32 %iptr) { 41 define i32 @test_atomic_load_8(i32 %iptr) {
48 entry: 42 entry:
49 %ptr = inttoptr i32 %iptr to i8* 43 %ptr = inttoptr i32 %iptr to i8*
50 ; parameter value "6" is for the sequential consistency memory order. 44 ; parameter value "6" is for the sequential consistency memory order.
51 %i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6) 45 %i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6)
52 %r = zext i8 %i to i32 46 %r = zext i8 %i to i32
53 ret i32 %r 47 ret i32 %r
54 } 48 }
55 ; CHECK-LABEL: test_atomic_load_8 49 ; CHECK-LABEL: test_atomic_load_8
56 ; CHECK: mov {{.*}}, dword 50 ; CHECK: mov {{.*}},DWORD
57 ; CHECK: mov {{.*}}, byte 51 ; CHECK: mov {{.*}},byte
58 52
59 define i32 @test_atomic_load_16(i32 %iptr) { 53 define i32 @test_atomic_load_16(i32 %iptr) {
60 entry: 54 entry:
61 %ptr = inttoptr i32 %iptr to i16* 55 %ptr = inttoptr i32 %iptr to i16*
62 %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6) 56 %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6)
63 %r = zext i16 %i to i32 57 %r = zext i16 %i to i32
64 ret i32 %r 58 ret i32 %r
65 } 59 }
66 ; CHECK-LABEL: test_atomic_load_16 60 ; CHECK-LABEL: test_atomic_load_16
67 ; CHECK: mov {{.*}}, dword 61 ; CHECK: mov {{.*}},DWORD
68 ; CHECK: mov {{.*}}, word 62 ; CHECK: mov {{.*}},word
69 63
70 define i32 @test_atomic_load_32(i32 %iptr) { 64 define i32 @test_atomic_load_32(i32 %iptr) {
71 entry: 65 entry:
72 %ptr = inttoptr i32 %iptr to i32* 66 %ptr = inttoptr i32 %iptr to i32*
73 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) 67 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
74 ret i32 %r 68 ret i32 %r
75 } 69 }
76 ; CHECK-LABEL: test_atomic_load_32 70 ; CHECK-LABEL: test_atomic_load_32
77 ; CHECK: mov {{.*}}, dword 71 ; CHECK: mov {{.*}},DWORD
78 ; CHECK: mov {{.*}}, dword 72 ; CHECK: mov {{.*}},DWORD
79 73
80 define i64 @test_atomic_load_64(i32 %iptr) { 74 define i64 @test_atomic_load_64(i32 %iptr) {
81 entry: 75 entry:
82 %ptr = inttoptr i32 %iptr to i64* 76 %ptr = inttoptr i32 %iptr to i64*
83 %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) 77 %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6)
84 ret i64 %r 78 ret i64 %r
85 } 79 }
86 ; CHECK-LABEL: test_atomic_load_64 80 ; CHECK-LABEL: test_atomic_load_64
87 ; CHECK: movq x{{.*}}, qword 81 ; CHECK: movq x{{.*}},QWORD
88 ; CHECK: movq qword {{.*}}, x{{.*}} 82 ; CHECK: movq QWORD {{.*}},x{{.*}}
89 83
90 define i32 @test_atomic_load_32_with_arith(i32 %iptr) { 84 define i32 @test_atomic_load_32_with_arith(i32 %iptr) {
91 entry: 85 entry:
92 br label %next 86 br label %next
93 87
94 next: 88 next:
95 %ptr = inttoptr i32 %iptr to i32* 89 %ptr = inttoptr i32 %iptr to i32*
96 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) 90 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
97 %r2 = add i32 %r, 32 91 %r2 = add i32 %r, 32
98 ret i32 %r2 92 ret i32 %r2
99 } 93 }
100 ; CHECK-LABEL: test_atomic_load_32_with_arith 94 ; CHECK-LABEL: test_atomic_load_32_with_arith
101 ; CHECK: mov {{.*}}, dword 95 ; CHECK: mov {{.*}},DWORD
102 ; The next instruction may be a separate load or folded into an add. 96 ; The next instruction may be a separate load or folded into an add.
103 ; 97 ;
104 ; In O2 mode, we know that the load and add are going to be fused. 98 ; In O2 mode, we know that the load and add are going to be fused.
105 ; CHECKO2-LABEL: test_atomic_load_32_with_arith 99 ; CHECKO2-LABEL: test_atomic_load_32_with_arith
106 ; CHECKO2: mov {{.*}}, dword 100 ; CHECKO2: mov {{.*}}, DWORD
107 ; CHECKO2: add {{.*}}, dword 101 ; CHECKO2: add {{.*}}, DWORD
108 102
109 define i32 @test_atomic_load_32_ignored(i32 %iptr) { 103 define i32 @test_atomic_load_32_ignored(i32 %iptr) {
110 entry: 104 entry:
111 %ptr = inttoptr i32 %iptr to i32* 105 %ptr = inttoptr i32 %iptr to i32*
112 %ignored = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) 106 %ignored = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6)
113 ret i32 0 107 ret i32 0
114 } 108 }
115 ; CHECK-LABEL: test_atomic_load_32_ignored 109 ; CHECK-LABEL: test_atomic_load_32_ignored
116 ; CHECK: mov {{.*}}, dword 110 ; CHECK: mov {{.*}},DWORD
117 ; CHECK: mov {{.*}}, dword 111 ; CHECK: mov {{.*}},DWORD
118 ; CHECKO2-LABEL: test_atomic_load_32_ignored 112 ; CHECKO2-LABEL: test_atomic_load_32_ignored
119 ; CHECKO2: mov {{.*}}, dword 113 ; CHECKO2: mov {{.*}}, DWORD
120 ; CHECKO2: mov {{.*}}, dword 114 ; CHECKO2: mov {{.*}}, DWORD
121 115
122 define i64 @test_atomic_load_64_ignored(i32 %iptr) { 116 define i64 @test_atomic_load_64_ignored(i32 %iptr) {
123 entry: 117 entry:
124 %ptr = inttoptr i32 %iptr to i64* 118 %ptr = inttoptr i32 %iptr to i64*
125 %ignored = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) 119 %ignored = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6)
126 ret i64 0 120 ret i64 0
127 } 121 }
128 ; CHECK-LABEL: test_atomic_load_64_ignored 122 ; CHECK-LABEL: test_atomic_load_64_ignored
129 ; CHECK: movq x{{.*}}, qword 123 ; CHECK: movq x{{.*}},QWORD
130 ; CHECK: movq qword {{.*}}, x{{.*}} 124 ; CHECK: movq QWORD {{.*}},x{{.*}}
131 125
132 ;;; Store 126 ;;; Store
133 127
134 define void @test_atomic_store_8(i32 %iptr, i32 %v) { 128 define void @test_atomic_store_8(i32 %iptr, i32 %v) {
135 entry: 129 entry:
136 %truncv = trunc i32 %v to i8 130 %truncv = trunc i32 %v to i8
137 %ptr = inttoptr i32 %iptr to i8* 131 %ptr = inttoptr i32 %iptr to i8*
138 call void @llvm.nacl.atomic.store.i8(i8 %truncv, i8* %ptr, i32 6) 132 call void @llvm.nacl.atomic.store.i8(i8 %truncv, i8* %ptr, i32 6)
139 ret void 133 ret void
140 } 134 }
(...skipping 12 matching lines...) Expand all
153 ; CHECK: mov word 147 ; CHECK: mov word
154 ; CHECK: mfence 148 ; CHECK: mfence
155 149
156 define void @test_atomic_store_32(i32 %iptr, i32 %v) { 150 define void @test_atomic_store_32(i32 %iptr, i32 %v) {
157 entry: 151 entry:
158 %ptr = inttoptr i32 %iptr to i32* 152 %ptr = inttoptr i32 %iptr to i32*
159 call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 6) 153 call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 6)
160 ret void 154 ret void
161 } 155 }
162 ; CHECK-LABEL: test_atomic_store_32 156 ; CHECK-LABEL: test_atomic_store_32
163 ; CHECK: mov dword 157 ; CHECK: mov DWORD
164 ; CHECK: mfence 158 ; CHECK: mfence
165 159
166 define void @test_atomic_store_64(i32 %iptr, i64 %v) { 160 define void @test_atomic_store_64(i32 %iptr, i64 %v) {
167 entry: 161 entry:
168 %ptr = inttoptr i32 %iptr to i64* 162 %ptr = inttoptr i32 %iptr to i64*
169 call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 6) 163 call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 6)
170 ret void 164 ret void
171 } 165 }
172 ; CHECK-LABEL: test_atomic_store_64 166 ; CHECK-LABEL: test_atomic_store_64
173 ; CHECK: movq x{{.*}}, qword 167 ; CHECK: movq x{{.*}},QWORD
174 ; CHECK: movq qword {{.*}}, x{{.*}} 168 ; CHECK: movq QWORD {{.*}},x{{.*}}
175 ; CHECK: mfence 169 ; CHECK: mfence
176 170
177 define void @test_atomic_store_64_const(i32 %iptr) { 171 define void @test_atomic_store_64_const(i32 %iptr) {
178 entry: 172 entry:
179 %ptr = inttoptr i32 %iptr to i64* 173 %ptr = inttoptr i32 %iptr to i64*
180 call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 6) 174 call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 6)
181 ret void 175 ret void
182 } 176 }
183 ; CHECK-LABEL: test_atomic_store_64_const 177 ; CHECK-LABEL: test_atomic_store_64_const
184 ; CHECK: mov {{.*}}, 1942892530 178 ; CHECK: mov {{.*}},1942892530
185 ; CHECK: mov {{.*}}, 2874 179 ; CHECK: mov {{.*}},2874
186 ; CHECK: movq x{{.*}}, qword 180 ; CHECK: movq x{{.*}},QWORD
187 ; CHECK: movq qword {{.*}}, x{{.*}} 181 ; CHECK: movq QWORD {{.*}},x{{.*}}
188 ; CHECK: mfence 182 ; CHECK: mfence
189 183
190 184
191 ;;; RMW 185 ;;; RMW
192 186
193 ;; add 187 ;; add
194 188
195 define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) { 189 define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
196 entry: 190 entry:
197 %trunc = trunc i32 %v to i8 191 %trunc = trunc i32 %v to i8
198 %ptr = inttoptr i32 %iptr to i8* 192 %ptr = inttoptr i32 %iptr to i8*
199 ; "1" is an atomic add, and "6" is sequential consistency. 193 ; "1" is an atomic add, and "6" is sequential consistency.
200 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6) 194 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6)
201 %a_ext = zext i8 %a to i32 195 %a_ext = zext i8 %a to i32
202 ret i32 %a_ext 196 ret i32 %a_ext
203 } 197 }
204 ; CHECK-LABEL: test_atomic_rmw_add_8 198 ; CHECK-LABEL: test_atomic_rmw_add_8
205 ; CHECK: lock 199 ; CHECK: lock
206 ; CHECK-NEXT: xadd byte {{.*}}, [[REG:.*]] 200 ; CHECK-NEXT: xadd byte {{.*}}, [[REG:.*]]
207 ; CHECK: {{mov|movzx}} {{.*}}, [[REG]] 201 ; CHECK: {{mov|movzx}} {{.*}},[[REG]]
208 202
209 define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) { 203 define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
210 entry: 204 entry:
211 %trunc = trunc i32 %v to i16 205 %trunc = trunc i32 %v to i16
212 %ptr = inttoptr i32 %iptr to i16* 206 %ptr = inttoptr i32 %iptr to i16*
213 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6) 207 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6)
214 %a_ext = zext i16 %a to i32 208 %a_ext = zext i16 %a to i32
215 ret i32 %a_ext 209 ret i32 %a_ext
216 } 210 }
217 ; CHECK-LABEL: test_atomic_rmw_add_16 211 ; CHECK-LABEL: test_atomic_rmw_add_16
218 ; CHECK: lock 212 ; CHECK: lock
219 ; CHECK-NEXT: xadd word {{.*}}, [[REG:.*]] 213 ; CHECK-NEXT: xadd word {{.*}}, [[REG:.*]]
220 ; CHECK: {{mov|movzx}} {{.*}}, [[REG]] 214 ; CHECK: {{mov|movzx}} {{.*}},[[REG]]
221 215
222 define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) { 216 define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
223 entry: 217 entry:
224 %ptr = inttoptr i32 %iptr to i32* 218 %ptr = inttoptr i32 %iptr to i32*
225 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) 219 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
226 ret i32 %a 220 ret i32 %a
227 } 221 }
228 ; CHECK-LABEL: test_atomic_rmw_add_32 222 ; CHECK-LABEL: test_atomic_rmw_add_32
229 ; CHECK: lock 223 ; CHECK: lock
230 ; CHECK-NEXT: xadd dword {{.*}}, [[REG:.*]] 224 ; CHECK-NEXT: xadd DWORD {{.*}}, [[REG:.*]]
231 ; CHECK: mov {{.*}}, [[REG]] 225 ; CHECK: mov {{.*}},[[REG]]
232 226
233 define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) { 227 define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
234 entry: 228 entry:
235 %ptr = inttoptr i32 %iptr to i64* 229 %ptr = inttoptr i32 %iptr to i64*
236 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) 230 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
237 ret i64 %a 231 ret i64 %a
238 } 232 }
239 ; CHECK-LABEL: test_atomic_rmw_add_64 233 ; CHECK-LABEL: test_atomic_rmw_add_64
240 ; CHECK: push ebx 234 ; CHECK: push ebx
241 ; CHECK: mov eax, dword ptr [{{.*}}] 235 ; CHECK: mov eax,DWORD ptr [{{.*}}]
242 ; CHECK: mov edx, dword ptr [{{.*}} + 4] 236 ; CHECK: mov edx,DWORD ptr [{{.*}}+4]
243 ; CHECK: mov ebx, eax 237 ; CHECK: mov ebx,eax
244 ; RHS of add cannot be any of the e[abcd]x regs because they are 238 ; RHS of add cannot be any of the e[abcd]x regs because they are
245 ; clobbered in the loop, and the RHS needs to be remain live. 239 ; clobbered in the loop, and the RHS needs to be remain live.
246 ; CHECK: add ebx, {{.*e.[^x]}} 240 ; CHECK: add ebx, {{.*e.[^x]}}
247 ; CHECK: mov ecx, edx 241 ; CHECK: mov ecx,edx
248 ; CHECK: adc ecx, {{.*e.[^x]}} 242 ; CHECK: adc ecx, {{.*e.[^x]}}
249 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). 243 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired).
250 ; It can be esi, edi, or ebp though, for example (so we need to be careful 244 ; It can be esi, edi, or ebp though, for example (so we need to be careful
251 ; about rejecting eb* and ed*.) 245 ; about rejecting eb* and ed*.)
252 ; CHECK: lock 246 ; CHECK: lock
253 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] 247 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{.[^x]}}]
254 ; CHECK: jne -{{[0-9]}} 248 ; CHECK: jne-{{[0-9]}}
255 249
256 ; Test with some more register pressure. When we have an alloca, ebp is 250 ; Test with some more register pressure. When we have an alloca, ebp is
257 ; used to manage the stack frame, so it cannot be used as a register either. 251 ; used to manage the stack frame, so it cannot be used as a register either.
258 define void @use_ptr(i32 %iptr) { 252 define void @use_ptr(i32 %iptr) {
259 entry: 253 entry:
260 ret void 254 ret void
261 } 255 }
262 256
263 define i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) { 257 define i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) {
264 entry: 258 entry:
(...skipping 13 matching lines...) Expand all
278 ; CHECK-DAG: mov edx 272 ; CHECK-DAG: mov edx
279 ; CHECK-DAG: mov eax 273 ; CHECK-DAG: mov eax
280 ; CHECK-DAG: mov ecx 274 ; CHECK-DAG: mov ecx
281 ; CHECK-DAG: mov ebx 275 ; CHECK-DAG: mov ebx
282 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). 276 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired).
283 ; It also cannot be ebp since we use that for alloca. Also make sure it's 277 ; It also cannot be ebp since we use that for alloca. Also make sure it's
284 ; not esp, since that's the stack pointer and mucking with it will break 278 ; not esp, since that's the stack pointer and mucking with it will break
285 ; the later use_ptr function call. 279 ; the later use_ptr function call.
286 ; That pretty much leaves esi, or edi as the only viable registers. 280 ; That pretty much leaves esi, or edi as the only viable registers.
287 ; CHECK: lock 281 ; CHECK: lock
288 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{[ds]}}i] 282 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{[ds]}}i]
289 ; CHECK: call use_ptr 283 ; CHECK: call
284 ; CHECK-NEXT: R_{{.*}} use_ptr
290 285
291 define i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) { 286 define i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) {
292 entry: 287 entry:
293 %ptr = inttoptr i32 %iptr to i32* 288 %ptr = inttoptr i32 %iptr to i32*
294 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) 289 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
295 ret i32 %v 290 ret i32 %v
296 } 291 }
297 ; Technically this could use "lock add" instead of "lock xadd", if liveness 292 ; Technically this could use "lock add" instead of "lock xadd", if liveness
298 ; tells us that the destination variable is dead. 293 ; tells us that the destination variable is dead.
299 ; CHECK-LABEL: test_atomic_rmw_add_32_ignored 294 ; CHECK-LABEL: test_atomic_rmw_add_32_ignored
300 ; CHECK: lock 295 ; CHECK: lock
301 ; CHECK-NEXT: xadd dword {{.*}}, [[REG:.*]] 296 ; CHECK-NEXT: xadd DWORD {{.*}}, [[REG:.*]]
302 297
303 ; Atomic RMW 64 needs to be expanded into its own loop. 298 ; Atomic RMW 64 needs to be expanded into its own loop.
304 ; Make sure that works w/ non-trivial function bodies. 299 ; Make sure that works w/ non-trivial function bodies.
305 define i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) { 300 define i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) {
306 entry: 301 entry:
307 %x = icmp ult i64 %v, 100 302 %x = icmp ult i64 %v, 100
308 br i1 %x, label %err, label %loop 303 br i1 %x, label %err, label %loop
309 304
310 loop: 305 loop:
311 %v_next = phi i64 [ %v, %entry ], [ %next, %loop ] 306 %v_next = phi i64 [ %v, %entry ], [ %next, %loop ]
312 %ptr = inttoptr i32 %iptr to i64* 307 %ptr = inttoptr i32 %iptr to i64*
313 %next = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v_next, i32 6) 308 %next = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v_next, i32 6)
314 %success = icmp eq i64 %next, 100 309 %success = icmp eq i64 %next, 100
315 br i1 %success, label %done, label %loop 310 br i1 %success, label %done, label %loop
316 311
317 done: 312 done:
318 ret i64 %next 313 ret i64 %next
319 314
320 err: 315 err:
321 ret i64 0 316 ret i64 0
322 } 317 }
323 ; CHECK-LABEL: test_atomic_rmw_add_64_loop 318 ; CHECK-LABEL: test_atomic_rmw_add_64_loop
324 ; CHECK: push ebx 319 ; CHECK: push ebx
325 ; CHECK: mov eax, dword ptr [{{.*}}] 320 ; CHECK: mov eax,DWORD ptr [{{.*}}]
326 ; CHECK: mov edx, dword ptr [{{.*}} + 4] 321 ; CHECK: mov edx,DWORD ptr [{{.*}}+4]
327 ; CHECK: mov ebx, eax 322 ; CHECK: mov ebx,eax
328 ; CHECK: add ebx, {{.*e.[^x]}} 323 ; CHECK: add ebx, {{.*e.[^x]}}
329 ; CHECK: mov ecx, edx 324 ; CHECK: mov ecx,edx
330 ; CHECK: adc ecx, {{.*e.[^x]}} 325 ; CHECK: adc ecx, {{.*e.[^x]}}
331 ; CHECK: lock 326 ; CHECK: lock
332 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] 327 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{.[^x]}}]
333 ; CHECK: jne -{{[0-9]}} 328 ; CHECK: jne-{{[0-9]}}
334 329
335 ;; sub 330 ;; sub
336 331
337 define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) { 332 define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
338 entry: 333 entry:
339 %trunc = trunc i32 %v to i8 334 %trunc = trunc i32 %v to i8
340 %ptr = inttoptr i32 %iptr to i8* 335 %ptr = inttoptr i32 %iptr to i8*
341 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6) 336 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6)
342 %a_ext = zext i8 %a to i32 337 %a_ext = zext i8 %a to i32
343 ret i32 %a_ext 338 ret i32 %a_ext
344 } 339 }
345 ; CHECK-LABEL: test_atomic_rmw_sub_8 340 ; CHECK-LABEL: test_atomic_rmw_sub_8
346 ; CHECK: neg [[REG:.*]] 341 ; CHECK: neg [[REG:.*]]
347 ; CHECK: lock 342 ; CHECK: lock
348 ; CHECK-NEXT: xadd byte {{.*}}, [[REG]] 343 ; CHECK-NEXT: xadd byte {{.*}}, [[REG]]
349 ; CHECK: {{mov|movzx}} {{.*}}, [[REG]] 344 ; CHECK: {{mov|movzx}} {{.*}},[[REG]]
350 345
351 define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) { 346 define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
352 entry: 347 entry:
353 %trunc = trunc i32 %v to i16 348 %trunc = trunc i32 %v to i16
354 %ptr = inttoptr i32 %iptr to i16* 349 %ptr = inttoptr i32 %iptr to i16*
355 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6) 350 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6)
356 %a_ext = zext i16 %a to i32 351 %a_ext = zext i16 %a to i32
357 ret i32 %a_ext 352 ret i32 %a_ext
358 } 353 }
359 ; CHECK-LABEL: test_atomic_rmw_sub_16 354 ; CHECK-LABEL: test_atomic_rmw_sub_16
360 ; CHECK: neg [[REG:.*]] 355 ; CHECK: neg [[REG:.*]]
361 ; CHECK: lock 356 ; CHECK: lock
362 ; CHECK-NEXT: xadd word {{.*}}, [[REG]] 357 ; CHECK-NEXT: xadd word {{.*}}, [[REG]]
363 ; CHECK: {{mov|movzx}} {{.*}}, [[REG]] 358 ; CHECK: {{mov|movzx}} {{.*}},[[REG]]
364 359
365 define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) { 360 define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
366 entry: 361 entry:
367 %ptr = inttoptr i32 %iptr to i32* 362 %ptr = inttoptr i32 %iptr to i32*
368 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) 363 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
369 ret i32 %a 364 ret i32 %a
370 } 365 }
371 ; CHECK-LABEL: test_atomic_rmw_sub_32 366 ; CHECK-LABEL: test_atomic_rmw_sub_32
372 ; CHECK: neg [[REG:.*]] 367 ; CHECK: neg [[REG:.*]]
373 ; CHECK: lock 368 ; CHECK: lock
374 ; CHECK-NEXT: xadd dword {{.*}}, [[REG]] 369 ; CHECK-NEXT: xadd DWORD {{.*}}, [[REG]]
375 ; CHECK: mov {{.*}}, [[REG]] 370 ; CHECK: mov {{.*}},[[REG]]
376 371
377 define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) { 372 define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
378 entry: 373 entry:
379 %ptr = inttoptr i32 %iptr to i64* 374 %ptr = inttoptr i32 %iptr to i64*
380 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6) 375 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6)
381 ret i64 %a 376 ret i64 %a
382 } 377 }
383 ; CHECK-LABEL: test_atomic_rmw_sub_64 378 ; CHECK-LABEL: test_atomic_rmw_sub_64
384 ; CHECK: push ebx 379 ; CHECK: push ebx
385 ; CHECK: mov eax, dword ptr [{{.*}}] 380 ; CHECK: mov eax,DWORD ptr [{{.*}}]
386 ; CHECK: mov edx, dword ptr [{{.*}} + 4] 381 ; CHECK: mov edx,DWORD ptr [{{.*}}+4]
387 ; CHECK: mov ebx, eax 382 ; CHECK: mov ebx,eax
388 ; CHECK: sub ebx, {{.*e.[^x]}} 383 ; CHECK: sub ebx, {{.*e.[^x]}}
389 ; CHECK: mov ecx, edx 384 ; CHECK: mov ecx,edx
390 ; CHECK: sbb ecx, {{.*e.[^x]}} 385 ; CHECK: sbb ecx, {{.*e.[^x]}}
391 ; CHECK: lock 386 ; CHECK: lock
392 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] 387 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{.[^x]}}]
393 ; CHECK: jne -{{[0-9]}} 388 ; CHECK: jne-{{[0-9]}}
394 389
395 390
396 define i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) { 391 define i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) {
397 entry: 392 entry:
398 %ptr = inttoptr i32 %iptr to i32* 393 %ptr = inttoptr i32 %iptr to i32*
399 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) 394 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
400 ret i32 %v 395 ret i32 %v
401 } 396 }
402 ; Could use "lock sub" instead of "neg; lock xadd" 397 ; Could use "lock sub" instead of "neg; lock xadd"
403 ; CHECK-LABEL: test_atomic_rmw_sub_32_ignored 398 ; CHECK-LABEL: test_atomic_rmw_sub_32_ignored
404 ; CHECK: neg [[REG:.*]] 399 ; CHECK: neg [[REG:.*]]
405 ; CHECK: lock 400 ; CHECK: lock
406 ; CHECK-NEXT: xadd dword {{.*}}, [[REG]] 401 ; CHECK-NEXT: xadd DWORD {{.*}}, [[REG]]
407 402
408 ;; or 403 ;; or
409 404
410 define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) { 405 define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
411 entry: 406 entry:
412 %trunc = trunc i32 %v to i8 407 %trunc = trunc i32 %v to i8
413 %ptr = inttoptr i32 %iptr to i8* 408 %ptr = inttoptr i32 %iptr to i8*
414 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6) 409 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6)
415 %a_ext = zext i8 %a to i32 410 %a_ext = zext i8 %a to i32
416 ret i32 %a_ext 411 ret i32 %a_ext
417 } 412 }
418 ; CHECK-LABEL: test_atomic_rmw_or_8 413 ; CHECK-LABEL: test_atomic_rmw_or_8
419 ; CHECK: mov al, byte ptr 414 ; CHECK: mov al,BYTE PTR
420 ; Dest cannot be eax here, because eax is used for the old value. Also want 415 ; Dest cannot be eax here, because eax is used for the old value. Also want
421 ; to make sure that cmpxchg's source is the same register. 416 ; to make sure that cmpxchg's source is the same register.
422 ; CHECK: or [[REG:[^a].]] 417 ; CHECK: or [[REG:[^a].]]
423 ; CHECK: lock 418 ; CHECK: lock
424 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]] 419 ; CHECK-NEXT: cmpxchg BYTE PTR [e{{[^a].}}], [[REG]]
425 ; CHECK: jne -{{[0-9]}} 420 ; CHECK: jne-{{[0-9]}}
426 421
427 define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) { 422 define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
428 entry: 423 entry:
429 %trunc = trunc i32 %v to i16 424 %trunc = trunc i32 %v to i16
430 %ptr = inttoptr i32 %iptr to i16* 425 %ptr = inttoptr i32 %iptr to i16*
431 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6) 426 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6)
432 %a_ext = zext i16 %a to i32 427 %a_ext = zext i16 %a to i32
433 ret i32 %a_ext 428 ret i32 %a_ext
434 } 429 }
435 ; CHECK-LABEL: test_atomic_rmw_or_16 430 ; CHECK-LABEL: test_atomic_rmw_or_16
436 ; CHECK: mov ax, word ptr 431 ; CHECK: mov ax,word ptr
437 ; CHECK: or [[REG:[^a].]] 432 ; CHECK: or [[REG:[^a].]]
438 ; CHECK: lock 433 ; CHECK: lock
439 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], [[REG]] 434 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], [[REG]]
440 ; CHECK: jne -{{[0-9]}} 435 ; CHECK: jne-{{[0-9]}}
441 436
442 define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) { 437 define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
443 entry: 438 entry:
444 %ptr = inttoptr i32 %iptr to i32* 439 %ptr = inttoptr i32 %iptr to i32*
445 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) 440 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
446 ret i32 %a 441 ret i32 %a
447 } 442 }
448 ; CHECK-LABEL: test_atomic_rmw_or_32 443 ; CHECK-LABEL: test_atomic_rmw_or_32
449 ; CHECK: mov eax, dword ptr 444 ; CHECK: mov eax,DWORD ptr
450 ; CHECK: or [[REG:e[^a].]] 445 ; CHECK: or [[REG:e[^a].]]
451 ; CHECK: lock 446 ; CHECK: lock
452 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], [[REG]] 447 ; CHECK-NEXT: cmpxchg DWORD PTR [e{{[^a].}}], [[REG]]
453 ; CHECK: jne -{{[0-9]}} 448 ; CHECK: jne-{{[0-9]}}
454 449
455 define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) { 450 define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
456 entry: 451 entry:
457 %ptr = inttoptr i32 %iptr to i64* 452 %ptr = inttoptr i32 %iptr to i64*
458 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6) 453 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6)
459 ret i64 %a 454 ret i64 %a
460 } 455 }
461 ; CHECK-LABEL: test_atomic_rmw_or_64 456 ; CHECK-LABEL: test_atomic_rmw_or_64
462 ; CHECK: push ebx 457 ; CHECK: push ebx
463 ; CHECK: mov eax, dword ptr [{{.*}}] 458 ; CHECK: mov eax,DWORD ptr [{{.*}}]
464 ; CHECK: mov edx, dword ptr [{{.*}} + 4] 459 ; CHECK: mov edx,DWORD ptr [{{.*}}+4]
465 ; CHECK: mov ebx, eax 460 ; CHECK: mov ebx,eax
466 ; CHECK: or ebx, {{.*e.[^x]}} 461 ; CHECK: or ebx, {{.*e.[^x]}}
467 ; CHECK: mov ecx, edx 462 ; CHECK: mov ecx,edx
468 ; CHECK: or ecx, {{.*e.[^x]}} 463 ; CHECK: or ecx, {{.*e.[^x]}}
469 ; CHECK: lock 464 ; CHECK: lock
470 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] 465 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{.[^x]}}]
471 ; CHECK: jne -{{[0-9]}} 466 ; CHECK: jne-{{[0-9]}}
472 467
473 define i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) { 468 define i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) {
474 entry: 469 entry:
475 %ptr = inttoptr i32 %iptr to i32* 470 %ptr = inttoptr i32 %iptr to i32*
476 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) 471 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
477 ret i32 %v 472 ret i32 %v
478 } 473 }
479 ; CHECK-LABEL: test_atomic_rmw_or_32_ignored 474 ; CHECK-LABEL: test_atomic_rmw_or_32_ignored
480 ; Could just "lock or", if we inspect the liveness information first. 475 ; Could just "lock or", if we inspect the liveness information first.
481 ; Would also need a way to introduce "lock"'edness to binary 476 ; Would also need a way to introduce "lock"'edness to binary
482 ; operators without introducing overhead on the more common binary ops. 477 ; operators without introducing overhead on the more common binary ops.
483 ; CHECK: mov eax, dword ptr 478 ; CHECK: mov eax,DWORD ptr
484 ; CHECK: or [[REG:e[^a].]] 479 ; CHECK: or [[REG:e[^a].]]
485 ; CHECK: lock 480 ; CHECK: lock
486 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], [[REG]] 481 ; CHECK-NEXT: cmpxchg DWORD PTR [e{{[^a].}}], [[REG]]
487 ; CHECK: jne -{{[0-9]}} 482 ; CHECK: jne-{{[0-9]}}
488 483
489 ;; and 484 ;; and
490 485
491 define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) { 486 define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
492 entry: 487 entry:
493 %trunc = trunc i32 %v to i8 488 %trunc = trunc i32 %v to i8
494 %ptr = inttoptr i32 %iptr to i8* 489 %ptr = inttoptr i32 %iptr to i8*
495 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6) 490 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6)
496 %a_ext = zext i8 %a to i32 491 %a_ext = zext i8 %a to i32
497 ret i32 %a_ext 492 ret i32 %a_ext
498 } 493 }
499 ; CHECK-LABEL: test_atomic_rmw_and_8 494 ; CHECK-LABEL: test_atomic_rmw_and_8
500 ; CHECK: mov al, byte ptr 495 ; CHECK: mov al,BYTE PTR
501 ; CHECK: and [[REG:[^a].]] 496 ; CHECK: and [[REG:[^a].]]
502 ; CHECK: lock 497 ; CHECK: lock
503 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]] 498 ; CHECK-NEXT: cmpxchg BYTE PTR [e{{[^a].}}], [[REG]]
504 ; CHECK: jne -{{[0-9]}} 499 ; CHECK: jne-{{[0-9]}}
505 500
506 define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) { 501 define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
507 entry: 502 entry:
508 %trunc = trunc i32 %v to i16 503 %trunc = trunc i32 %v to i16
509 %ptr = inttoptr i32 %iptr to i16* 504 %ptr = inttoptr i32 %iptr to i16*
510 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6) 505 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6)
511 %a_ext = zext i16 %a to i32 506 %a_ext = zext i16 %a to i32
512 ret i32 %a_ext 507 ret i32 %a_ext
513 } 508 }
514 ; CHECK-LABEL: test_atomic_rmw_and_16 509 ; CHECK-LABEL: test_atomic_rmw_and_16
515 ; CHECK: mov ax, word ptr 510 ; CHECK: mov ax,word ptr
516 ; CHECK: and 511 ; CHECK: and
517 ; CHECK: lock 512 ; CHECK: lock
518 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}] 513 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}]
519 ; CHECK: jne -{{[0-9]}} 514 ; CHECK: jne-{{[0-9]}}
520 515
521 define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) { 516 define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
522 entry: 517 entry:
523 %ptr = inttoptr i32 %iptr to i32* 518 %ptr = inttoptr i32 %iptr to i32*
524 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) 519 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
525 ret i32 %a 520 ret i32 %a
526 } 521 }
527 ; CHECK-LABEL: test_atomic_rmw_and_32 522 ; CHECK-LABEL: test_atomic_rmw_and_32
528 ; CHECK: mov eax, dword ptr 523 ; CHECK: mov eax,DWORD ptr
529 ; CHECK: and 524 ; CHECK: and
530 ; CHECK: lock 525 ; CHECK: lock
531 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] 526 ; CHECK-NEXT: cmpxchg DWORD PTR [e{{[^a].}}]
532 ; CHECK: jne -{{[0-9]}} 527 ; CHECK: jne-{{[0-9]}}
533 528
534 define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) { 529 define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
535 entry: 530 entry:
536 %ptr = inttoptr i32 %iptr to i64* 531 %ptr = inttoptr i32 %iptr to i64*
537 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6) 532 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6)
538 ret i64 %a 533 ret i64 %a
539 } 534 }
540 ; CHECK-LABEL: test_atomic_rmw_and_64 535 ; CHECK-LABEL: test_atomic_rmw_and_64
541 ; CHECK: push ebx 536 ; CHECK: push ebx
542 ; CHECK: mov eax, dword ptr [{{.*}}] 537 ; CHECK: mov eax,DWORD ptr [{{.*}}]
543 ; CHECK: mov edx, dword ptr [{{.*}} + 4] 538 ; CHECK: mov edx,DWORD ptr [{{.*}}+4]
544 ; CHECK: mov ebx, eax 539 ; CHECK: mov ebx,eax
545 ; CHECK: and ebx, {{.*e.[^x]}} 540 ; CHECK: and ebx, {{.*e.[^x]}}
546 ; CHECK: mov ecx, edx 541 ; CHECK: mov ecx,edx
547 ; CHECK: and ecx, {{.*e.[^x]}} 542 ; CHECK: and ecx, {{.*e.[^x]}}
548 ; CHECK: lock 543 ; CHECK: lock
549 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] 544 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{.[^x]}}]
550 ; CHECK: jne -{{[0-9]}} 545 ; CHECK: jne-{{[0-9]}}
551 546
552 define i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) { 547 define i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) {
553 entry: 548 entry:
554 %ptr = inttoptr i32 %iptr to i32* 549 %ptr = inttoptr i32 %iptr to i32*
555 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) 550 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
556 ret i32 %v 551 ret i32 %v
557 } 552 }
558 ; CHECK-LABEL: test_atomic_rmw_and_32_ignored 553 ; CHECK-LABEL: test_atomic_rmw_and_32_ignored
559 ; Could just "lock and" 554 ; Could just "lock and"
560 ; CHECK: mov eax, dword ptr 555 ; CHECK: mov eax,DWORD ptr
561 ; CHECK: and 556 ; CHECK: and
562 ; CHECK: lock 557 ; CHECK: lock
563 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] 558 ; CHECK-NEXT: cmpxchg DWORD PTR [e{{[^a].}}]
564 ; CHECK: jne -{{[0-9]}} 559 ; CHECK: jne-{{[0-9]}}
565 560
566 ;; xor 561 ;; xor
567 562
568 define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) { 563 define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
569 entry: 564 entry:
570 %trunc = trunc i32 %v to i8 565 %trunc = trunc i32 %v to i8
571 %ptr = inttoptr i32 %iptr to i8* 566 %ptr = inttoptr i32 %iptr to i8*
572 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6) 567 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6)
573 %a_ext = zext i8 %a to i32 568 %a_ext = zext i8 %a to i32
574 ret i32 %a_ext 569 ret i32 %a_ext
575 } 570 }
576 ; CHECK-LABEL: test_atomic_rmw_xor_8 571 ; CHECK-LABEL: test_atomic_rmw_xor_8
577 ; CHECK: mov al, byte ptr 572 ; CHECK: mov al,BYTE PTR
578 ; CHECK: xor [[REG:[^a].]] 573 ; CHECK: xor [[REG:[^a].]]
579 ; CHECK: lock 574 ; CHECK: lock
580 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]] 575 ; CHECK-NEXT: cmpxchg BYTE PTR [e{{[^a].}}], [[REG]]
581 ; CHECK: jne -{{[0-9]}} 576 ; CHECK: jne-{{[0-9]}}
582 577
583 define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) { 578 define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
584 entry: 579 entry:
585 %trunc = trunc i32 %v to i16 580 %trunc = trunc i32 %v to i16
586 %ptr = inttoptr i32 %iptr to i16* 581 %ptr = inttoptr i32 %iptr to i16*
587 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6) 582 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6)
588 %a_ext = zext i16 %a to i32 583 %a_ext = zext i16 %a to i32
589 ret i32 %a_ext 584 ret i32 %a_ext
590 } 585 }
591 ; CHECK-LABEL: test_atomic_rmw_xor_16 586 ; CHECK-LABEL: test_atomic_rmw_xor_16
592 ; CHECK: mov ax, word ptr 587 ; CHECK: mov ax,word ptr
593 ; CHECK: xor 588 ; CHECK: xor
594 ; CHECK: lock 589 ; CHECK: lock
595 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}] 590 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}]
596 ; CHECK: jne -{{[0-9]}} 591 ; CHECK: jne-{{[0-9]}}
597 592
598 593
599 define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) { 594 define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
600 entry: 595 entry:
601 %ptr = inttoptr i32 %iptr to i32* 596 %ptr = inttoptr i32 %iptr to i32*
602 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) 597 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
603 ret i32 %a 598 ret i32 %a
604 } 599 }
605 ; CHECK-LABEL: test_atomic_rmw_xor_32 600 ; CHECK-LABEL: test_atomic_rmw_xor_32
606 ; CHECK: mov eax, dword ptr 601 ; CHECK: mov eax,DWORD ptr
607 ; CHECK: xor 602 ; CHECK: xor
608 ; CHECK: lock 603 ; CHECK: lock
609 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] 604 ; CHECK-NEXT: cmpxchg DWORD PTR [e{{[^a].}}]
610 ; CHECK: jne -{{[0-9]}} 605 ; CHECK: jne-{{[0-9]}}
611 606
612 define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) { 607 define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
613 entry: 608 entry:
614 %ptr = inttoptr i32 %iptr to i64* 609 %ptr = inttoptr i32 %iptr to i64*
615 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6) 610 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6)
616 ret i64 %a 611 ret i64 %a
617 } 612 }
618 ; CHECK-LABEL: test_atomic_rmw_xor_64 613 ; CHECK-LABEL: test_atomic_rmw_xor_64
619 ; CHECK: push ebx 614 ; CHECK: push ebx
620 ; CHECK: mov eax, dword ptr [{{.*}}] 615 ; CHECK: mov eax,DWORD ptr [{{.*}}]
621 ; CHECK: mov edx, dword ptr [{{.*}} + 4] 616 ; CHECK: mov edx,DWORD ptr [{{.*}}+4]
622 ; CHECK: mov ebx, eax 617 ; CHECK: mov ebx,eax
623 ; CHECK: or ebx, {{.*e.[^x]}} 618 ; CHECK: or ebx, {{.*e.[^x]}}
624 ; CHECK: mov ecx, edx 619 ; CHECK: mov ecx,edx
625 ; CHECK: or ecx, {{.*e.[^x]}} 620 ; CHECK: or ecx, {{.*e.[^x]}}
626 ; CHECK: lock 621 ; CHECK: lock
627 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] 622 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{.[^x]}}]
628 ; CHECK: jne -{{[0-9]}} 623 ; CHECK: jne-{{[0-9]}}
629 624
630 define i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) { 625 define i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) {
631 entry: 626 entry:
632 %ptr = inttoptr i32 %iptr to i32* 627 %ptr = inttoptr i32 %iptr to i32*
633 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) 628 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
634 ret i32 %v 629 ret i32 %v
635 } 630 }
636 ; CHECK-LABEL: test_atomic_rmw_xor_32_ignored 631 ; CHECK-LABEL: test_atomic_rmw_xor_32_ignored
637 ; CHECK: mov eax, dword ptr 632 ; CHECK: mov eax,DWORD ptr
638 ; CHECK: xor 633 ; CHECK: xor
639 ; CHECK: lock 634 ; CHECK: lock
640 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] 635 ; CHECK-NEXT: cmpxchg DWORD PTR [e{{[^a].}}]
641 ; CHECK: jne -{{[0-9]}} 636 ; CHECK: jne-{{[0-9]}}
642 637
643 ;; exchange 638 ;; exchange
644 639
645 define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) { 640 define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
646 entry: 641 entry:
647 %trunc = trunc i32 %v to i8 642 %trunc = trunc i32 %v to i8
648 %ptr = inttoptr i32 %iptr to i8* 643 %ptr = inttoptr i32 %iptr to i8*
649 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6) 644 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6)
650 %a_ext = zext i8 %a to i32 645 %a_ext = zext i8 %a to i32
651 ret i32 %a_ext 646 ret i32 %a_ext
652 } 647 }
653 ; CHECK-LABEL: test_atomic_rmw_xchg_8 648 ; CHECK-LABEL: test_atomic_rmw_xchg_8
654 ; CHECK: xchg byte ptr {{.*}}, [[REG:.*]] 649 ; CHECK: xchg BYTE PTR {{.*}}, [[REG:.*]]
655 650
656 define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) { 651 define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) {
657 entry: 652 entry:
658 %trunc = trunc i32 %v to i16 653 %trunc = trunc i32 %v to i16
659 %ptr = inttoptr i32 %iptr to i16* 654 %ptr = inttoptr i32 %iptr to i16*
660 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i16* %ptr, i16 %trunc, i32 6) 655 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i16* %ptr, i16 %trunc, i32 6)
661 %a_ext = zext i16 %a to i32 656 %a_ext = zext i16 %a to i32
662 ret i32 %a_ext 657 ret i32 %a_ext
663 } 658 }
664 ; CHECK-LABEL: test_atomic_rmw_xchg_16 659 ; CHECK-LABEL: test_atomic_rmw_xchg_16
665 ; CHECK: xchg word ptr {{.*}}, [[REG:.*]] 660 ; CHECK: xchg word ptr {{.*}}, [[REG:.*]]
666 661
667 define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) { 662 define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) {
668 entry: 663 entry:
669 %ptr = inttoptr i32 %iptr to i32* 664 %ptr = inttoptr i32 %iptr to i32*
670 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6) 665 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6)
671 ret i32 %a 666 ret i32 %a
672 } 667 }
673 ; CHECK-LABEL: test_atomic_rmw_xchg_32 668 ; CHECK-LABEL: test_atomic_rmw_xchg_32
674 ; CHECK: xchg dword ptr {{.*}}, [[REG:.*]] 669 ; CHECK: xchg DWORD PTR {{.*}}, [[REG:.*]]
675 670
676 define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) { 671 define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) {
677 entry: 672 entry:
678 %ptr = inttoptr i32 %iptr to i64* 673 %ptr = inttoptr i32 %iptr to i64*
679 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6) 674 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6)
680 ret i64 %a 675 ret i64 %a
681 } 676 }
682 ; CHECK-LABEL: test_atomic_rmw_xchg_64 677 ; CHECK-LABEL: test_atomic_rmw_xchg_64
683 ; CHECK: push ebx 678 ; CHECK: push ebx
684 ; CHECK-DAG: mov edx 679 ; CHECK-DAG: mov edx
685 ; CHECK-DAG: mov eax 680 ; CHECK-DAG: mov eax
686 ; CHECK-DAG: mov ecx 681 ; CHECK-DAG: mov ecx
687 ; CHECK-DAG: mov ebx 682 ; CHECK-DAG: mov ebx
688 ; CHECK: lock 683 ; CHECK: lock
689 ; CHECK-NEXT: cmpxchg8b qword ptr [{{e.[^x]}}] 684 ; CHECK-NEXT: cmpxchg8b QWORD PTR [{{e.[^x]}}]
690 ; CHECK: jne -{{[0-9]}} 685 ; CHECK: jne-{{[0-9]}}
691 686
692 define i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) { 687 define i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) {
693 entry: 688 entry:
694 %ptr = inttoptr i32 %iptr to i32* 689 %ptr = inttoptr i32 %iptr to i32*
695 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6) 690 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6)
696 ret i32 %v 691 ret i32 %v
697 } 692 }
698 ; In this case, ignoring the return value doesn't help. The xchg is 693 ; In this case, ignoring the return value doesn't help. The xchg is
699 ; used to do an atomic store. 694 ; used to do an atomic store.
700 ; CHECK-LABEL: test_atomic_rmw_xchg_32_ignored 695 ; CHECK-LABEL: test_atomic_rmw_xchg_32_ignored
701 ; CHECK: xchg dword ptr {{.*}}, [[REG:.*]] 696 ; CHECK: xchg DWORD PTR {{.*}}, [[REG:.*]]
702 697
703 ;;;; Cmpxchg 698 ;;;; Cmpxchg
704 699
705 define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) { 700 define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
706 entry: 701 entry:
707 %trunc_exp = trunc i32 %expected to i8 702 %trunc_exp = trunc i32 %expected to i8
708 %trunc_des = trunc i32 %desired to i8 703 %trunc_des = trunc i32 %desired to i8
709 %ptr = inttoptr i32 %iptr to i8* 704 %ptr = inttoptr i32 %iptr to i8*
710 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp, 705 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp,
711 i8 %trunc_des, i32 6, i32 6) 706 i8 %trunc_des, i32 6, i32 6)
712 %old_ext = zext i8 %old to i32 707 %old_ext = zext i8 %old to i32
713 ret i32 %old_ext 708 ret i32 %old_ext
714 } 709 }
715 ; CHECK-LABEL: test_atomic_cmpxchg_8 710 ; CHECK-LABEL: test_atomic_cmpxchg_8
716 ; CHECK: mov eax, {{.*}} 711 ; CHECK: mov eax,{{.*}}
717 ; Need to check that eax isn't used as the address register or the desired. 712 ; Need to check that eax isn't used as the address register or the desired.
718 ; since it is already used as the *expected* register. 713 ; since it is already used as the *expected* register.
719 ; CHECK: lock 714 ; CHECK: lock
720 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], {{[^a]}}l 715 ; CHECK-NEXT: cmpxchg BYTE PTR [e{{[^a].}}], {{[^a]}}l
721 716
722 define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) { 717 define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
723 entry: 718 entry:
724 %trunc_exp = trunc i32 %expected to i16 719 %trunc_exp = trunc i32 %expected to i16
725 %trunc_des = trunc i32 %desired to i16 720 %trunc_des = trunc i32 %desired to i16
726 %ptr = inttoptr i32 %iptr to i16* 721 %ptr = inttoptr i32 %iptr to i16*
727 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp, 722 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp,
728 i16 %trunc_des, i32 6, i32 6) 723 i16 %trunc_des, i32 6, i32 6)
729 %old_ext = zext i16 %old to i32 724 %old_ext = zext i16 %old to i32
730 ret i32 %old_ext 725 ret i32 %old_ext
731 } 726 }
732 ; CHECK-LABEL: test_atomic_cmpxchg_16 727 ; CHECK-LABEL: test_atomic_cmpxchg_16
733 ; CHECK: mov eax, {{.*}} 728 ; CHECK: mov eax,{{.*}}
734 ; CHECK: lock 729 ; CHECK: lock
735 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], {{[^a]}}x 730 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], {{[^a]}}x
736 731
737 define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) { 732 define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
738 entry: 733 entry:
739 %ptr = inttoptr i32 %iptr to i32* 734 %ptr = inttoptr i32 %iptr to i32*
740 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, 735 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
741 i32 %desired, i32 6, i32 6) 736 i32 %desired, i32 6, i32 6)
742 ret i32 %old 737 ret i32 %old
743 } 738 }
744 ; CHECK-LABEL: test_atomic_cmpxchg_32 739 ; CHECK-LABEL: test_atomic_cmpxchg_32
745 ; CHECK: mov eax, {{.*}} 740 ; CHECK: mov eax,{{.*}}
746 ; CHECK: lock 741 ; CHECK: lock
747 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], e{{[^a]}} 742 ; CHECK-NEXT: cmpxchg DWORD PTR [e{{[^a].}}], e{{[^a]}}
748 743
749 define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) { 744 define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
750 entry: 745 entry:
751 %ptr = inttoptr i32 %iptr to i64* 746 %ptr = inttoptr i32 %iptr to i64*
752 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 747 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
753 i64 %desired, i32 6, i32 6) 748 i64 %desired, i32 6, i32 6)
754 ret i64 %old 749 ret i64 %old
755 } 750 }
756 ; CHECK-LABEL: test_atomic_cmpxchg_64 751 ; CHECK-LABEL: test_atomic_cmpxchg_64
757 ; CHECK: push ebx 752 ; CHECK: push ebx
758 ; CHECK-DAG: mov edx 753 ; CHECK-DAG: mov edx
759 ; CHECK-DAG: mov eax 754 ; CHECK-DAG: mov eax
760 ; CHECK-DAG: mov ecx 755 ; CHECK-DAG: mov ecx
761 ; CHECK-DAG: mov ebx 756 ; CHECK-DAG: mov ebx
762 ; CHECK: lock 757 ; CHECK: lock
763 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] 758 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{.[^x]}}]
764 ; edx and eax are already the return registers, so they don't actually 759 ; edx and eax are already the return registers, so they don't actually
765 ; need to be reshuffled via movs. The next test stores the result 760 ; need to be reshuffled via movs. The next test stores the result
766 ; somewhere, so in that case they do need to be mov'ed. 761 ; somewhere, so in that case they do need to be mov'ed.
767 762
768 ; Test a case where %old really does need to be copied out of edx:eax. 763 ; Test a case where %old really does need to be copied out of edx:eax.
769 define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expecte d, i64 %desired) { 764 define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expecte d, i64 %desired) {
770 entry: 765 entry:
771 %ptr = inttoptr i32 %iptr to i64* 766 %ptr = inttoptr i32 %iptr to i64*
772 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 767 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
773 i64 %desired, i32 6, i32 6) 768 i64 %desired, i32 6, i32 6)
774 %__6 = inttoptr i32 %ret_iptr to i64* 769 %__6 = inttoptr i32 %ret_iptr to i64*
775 store i64 %old, i64* %__6, align 1 770 store i64 %old, i64* %__6, align 1
776 ret void 771 ret void
777 } 772 }
778 ; CHECK-LABEL: test_atomic_cmpxchg_64_store 773 ; CHECK-LABEL: test_atomic_cmpxchg_64_store
779 ; CHECK: push ebx 774 ; CHECK: push ebx
780 ; CHECK-DAG: mov edx 775 ; CHECK-DAG: mov edx
781 ; CHECK-DAG: mov eax 776 ; CHECK-DAG: mov eax
782 ; CHECK-DAG: mov ecx 777 ; CHECK-DAG: mov ecx
783 ; CHECK-DAG: mov ebx 778 ; CHECK-DAG: mov ebx
784 ; CHECK: lock 779 ; CHECK: lock
785 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] 780 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{.[^x]}}]
786 ; CHECK-DAG: mov {{.*}}, edx 781 ; CHECK-DAG: mov {{.*}}, edx
787 ; CHECK-DAG: mov {{.*}}, eax 782 ; CHECK-DAG: mov {{.*}}, eax
788 783
789 ; Test with some more register pressure. When we have an alloca, ebp is 784 ; Test with some more register pressure. When we have an alloca, ebp is
790 ; used to manage the stack frame, so it cannot be used as a register either. 785 ; used to manage the stack frame, so it cannot be used as a register either.
791 define i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected, i64 %desired ) { 786 define i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected, i64 %desired ) {
792 entry: 787 entry:
793 %alloca_ptr = alloca i8, i32 16, align 16 788 %alloca_ptr = alloca i8, i32 16, align 16
794 %ptr = inttoptr i32 %iptr to i64* 789 %ptr = inttoptr i32 %iptr to i64*
795 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 790 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
(...skipping 11 matching lines...) Expand all
807 ; CHECK-DAG: mov edx 802 ; CHECK-DAG: mov edx
808 ; CHECK-DAG: mov eax 803 ; CHECK-DAG: mov eax
809 ; CHECK-DAG: mov ecx 804 ; CHECK-DAG: mov ecx
810 ; CHECK-DAG: mov ebx 805 ; CHECK-DAG: mov ebx
811 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). 806 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired).
812 ; It also cannot be ebp since we use that for alloca. Also make sure it's 807 ; It also cannot be ebp since we use that for alloca. Also make sure it's
813 ; not esp, since that's the stack pointer and mucking with it will break 808 ; not esp, since that's the stack pointer and mucking with it will break
814 ; the later use_ptr function call. 809 ; the later use_ptr function call.
815 ; That pretty much leaves esi, or edi as the only viable registers. 810 ; That pretty much leaves esi, or edi as the only viable registers.
816 ; CHECK: lock 811 ; CHECK: lock
817 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{[ds]}}i] 812 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{[ds]}}i]
818 ; CHECK: call use_ptr 813 ; CHECK: call
814 ; CHECK-NEXT: R_{{.*}} use_ptr
819 815
820 define i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected, i32 %desire d) { 816 define i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected, i32 %desire d) {
821 entry: 817 entry:
822 %ptr = inttoptr i32 %iptr to i32* 818 %ptr = inttoptr i32 %iptr to i32*
823 %ignored = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, 819 %ignored = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
824 i32 %desired, i32 6, i32 6) 820 i32 %desired, i32 6, i32 6)
825 ret i32 0 821 ret i32 0
826 } 822 }
827 ; CHECK-LABEL: test_atomic_cmpxchg_32_ignored 823 ; CHECK-LABEL: test_atomic_cmpxchg_32_ignored
828 ; CHECK: mov eax, {{.*}} 824 ; CHECK: mov eax,{{.*}}
829 ; CHECK: lock 825 ; CHECK: lock
830 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] 826 ; CHECK-NEXT: cmpxchg DWORD PTR [e{{[^a].}}]
831 827
832 define i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected, i64 %desire d) { 828 define i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected, i64 %desire d) {
833 entry: 829 entry:
834 %ptr = inttoptr i32 %iptr to i64* 830 %ptr = inttoptr i32 %iptr to i64*
835 %ignored = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 831 %ignored = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
836 i64 %desired, i32 6, i32 6) 832 i64 %desired, i32 6, i32 6)
837 ret i64 0 833 ret i64 0
838 } 834 }
839 ; CHECK-LABEL: test_atomic_cmpxchg_64_ignored 835 ; CHECK-LABEL: test_atomic_cmpxchg_64_ignored
840 ; CHECK: push ebx 836 ; CHECK: push ebx
841 ; CHECK-DAG: mov edx 837 ; CHECK-DAG: mov edx
842 ; CHECK-DAG: mov eax 838 ; CHECK-DAG: mov eax
843 ; CHECK-DAG: mov ecx 839 ; CHECK-DAG: mov ecx
844 ; CHECK-DAG: mov ebx 840 ; CHECK-DAG: mov ebx
845 ; CHECK: lock 841 ; CHECK: lock
846 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] 842 ; CHECK-NEXT: cmpxchg8b QWORD PTR [e{{.[^x]}}]
847 843
848 ;;;; Fence and is-lock-free. 844 ;;;; Fence and is-lock-free.
849 845
850 define void @test_atomic_fence() { 846 define void @test_atomic_fence() {
851 entry: 847 entry:
852 call void @llvm.nacl.atomic.fence(i32 6) 848 call void @llvm.nacl.atomic.fence(i32 6)
853 ret void 849 ret void
854 } 850 }
855 ; CHECK-LABEL: test_atomic_fence 851 ; CHECK-LABEL: test_atomic_fence
856 ; CHECK: mfence 852 ; CHECK: mfence
857 853
858 define void @test_atomic_fence_all() { 854 define void @test_atomic_fence_all() {
859 entry: 855 entry:
860 call void @llvm.nacl.atomic.fence.all() 856 call void @llvm.nacl.atomic.fence.all()
861 ret void 857 ret void
862 } 858 }
863 ; CHECK-LABEL: test_atomic_fence_all 859 ; CHECK-LABEL: test_atomic_fence_all
864 ; CHECK: mfence 860 ; CHECK: mfence
865 861
866 define i32 @test_atomic_is_lock_free(i32 %iptr) { 862 define i32 @test_atomic_is_lock_free(i32 %iptr) {
867 entry: 863 entry:
868 %ptr = inttoptr i32 %iptr to i8* 864 %ptr = inttoptr i32 %iptr to i8*
869 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr) 865 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr)
870 %r = zext i1 %i to i32 866 %r = zext i1 %i to i32
871 ret i32 %r 867 ret i32 %r
872 } 868 }
873 ; CHECK-LABEL: test_atomic_is_lock_free 869 ; CHECK-LABEL: test_atomic_is_lock_free
874 ; CHECK: mov {{.*}}, 1 870 ; CHECK: mov {{.*}},1
875 871
876 define i32 @test_not_lock_free(i32 %iptr) { 872 define i32 @test_not_lock_free(i32 %iptr) {
877 entry: 873 entry:
878 %ptr = inttoptr i32 %iptr to i8* 874 %ptr = inttoptr i32 %iptr to i8*
879 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 7, i8* %ptr) 875 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 7, i8* %ptr)
880 %r = zext i1 %i to i32 876 %r = zext i1 %i to i32
881 ret i32 %r 877 ret i32 %r
882 } 878 }
883 ; CHECK-LABEL: test_not_lock_free 879 ; CHECK-LABEL: test_not_lock_free
884 ; CHECK: mov {{.*}}, 0 880 ; CHECK: mov {{.*}},0
885 881
886 define i32 @test_atomic_is_lock_free_ignored(i32 %iptr) { 882 define i32 @test_atomic_is_lock_free_ignored(i32 %iptr) {
887 entry: 883 entry:
888 %ptr = inttoptr i32 %iptr to i8* 884 %ptr = inttoptr i32 %iptr to i8*
889 %ignored = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr) 885 %ignored = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr)
890 ret i32 0 886 ret i32 0
891 } 887 }
892 ; CHECK-LABEL: test_atomic_is_lock_free_ignored 888 ; CHECK-LABEL: test_atomic_is_lock_free_ignored
893 ; CHECK: mov {{.*}}, 0 889 ; CHECK: mov {{.*}},0
894 ; This can get optimized out, because it's side-effect-free. 890 ; This can get optimized out, because it's side-effect-free.
895 ; CHECKO2-LABEL: test_atomic_is_lock_free_ignored 891 ; CHECKO2-LABEL: test_atomic_is_lock_free_ignored
896 ; CHECKO2-NOT: mov {{.*}}, 1 892 ; CHECKO2-NOT: mov {{.*}}, 1
897 ; CHECKO2: mov {{.*}}, 0 893 ; CHECKO2: mov {{.*}}, 0
898 894
899 ; TODO(jvoung): at some point we can take advantage of the 895 ; TODO(jvoung): at some point we can take advantage of the
900 ; fact that nacl.atomic.is.lock.free will resolve to a constant 896 ; fact that nacl.atomic.is.lock.free will resolve to a constant
901 ; (which adds DCE opportunities). Once we optimize, the test expectations 897 ; (which adds DCE opportunities). Once we optimize, the test expectations
902 ; for this case should change. 898 ; for this case should change.
903 define i32 @test_atomic_is_lock_free_can_dce(i32 %iptr, i32 %x, i32 %y) { 899 define i32 @test_atomic_is_lock_free_can_dce(i32 %iptr, i32 %x, i32 %y) {
904 entry: 900 entry:
905 %ptr = inttoptr i32 %iptr to i8* 901 %ptr = inttoptr i32 %iptr to i8*
906 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr) 902 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr)
907 %i_ext = zext i1 %i to i32 903 %i_ext = zext i1 %i to i32
908 %cmp = icmp eq i32 %i_ext, 1 904 %cmp = icmp eq i32 %i_ext, 1
909 br i1 %cmp, label %lock_free, label %not_lock_free 905 br i1 %cmp, label %lock_free, label %not_lock_free
910 lock_free: 906 lock_free:
911 ret i32 %i_ext 907 ret i32 %i_ext
912 908
913 not_lock_free: 909 not_lock_free:
914 %z = add i32 %x, %y 910 %z = add i32 %x, %y
915 ret i32 %z 911 ret i32 %z
916 } 912 }
917 ; CHECK-LABEL: test_atomic_is_lock_free_can_dce 913 ; CHECK-LABEL: test_atomic_is_lock_free_can_dce
918 ; CHECK: mov {{.*}}, 1 914 ; CHECK: mov {{.*}},1
919 ; CHECK: ret 915 ; CHECK: ret
920 ; CHECK: add 916 ; CHECK: add
921 ; CHECK: ret 917 ; CHECK: ret
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698