Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(69)

Side by Side Diff: tests_lit/llvm2ice_tests/nacl-atomic-intrinsics.ll

Issue 509233002: Convert lit tests to check disassembled assembly. (Closed) Base URL: https://chromium.googlesource.com/native_client/pnacl-subzero.git@master
Patch Set: reorder some CALLTARGETS-LABEL Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 ; This tests each of the supported NaCl atomic instructions for every 1 ; This tests each of the supported NaCl atomic instructions for every
2 ; size allowed. 2 ; size allowed.
3 3
4 ; RUN: %llvm2ice -O2 --verbose none %s | FileCheck %s
5 ; RUN: %llvm2ice -O2 --verbose none %s | FileCheck %s --check-prefix=CHECKO2
6 ; RUN: %llvm2ice -Om1 --verbose none %s | FileCheck %s
7 ; RUN: %llvm2ice -O2 --verbose none %s \ 4 ; RUN: %llvm2ice -O2 --verbose none %s \
8 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj 5 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
6 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
7 ; RUN: %llvm2ice -O2 --verbose none %s \
8 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
9 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - \
10 ; RUN: | FileCheck --check-prefix=CHECKO2 %s
9 ; RUN: %llvm2ice -Om1 --verbose none %s \ 11 ; RUN: %llvm2ice -Om1 --verbose none %s \
10 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj 12 ; RUN: | llvm-mc -triple=i686-none-nacl -x86-asm-syntax=intel -filetype=obj \
13 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
11 ; RUN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s 14 ; RUN: %llvm2ice --verbose none %s | FileCheck --check-prefix=ERRORS %s
12 ; RUN: %llvm2iceinsts %s | %szdiff %s | FileCheck --check-prefix=DUMP %s 15 ; RUN: %llvm2iceinsts %s | %szdiff %s | FileCheck --check-prefix=DUMP %s
13 ; RUN: %llvm2iceinsts --pnacl %s | %szdiff %s \ 16 ; RUN: %llvm2iceinsts --pnacl %s | %szdiff %s \
14 ; RUN: | FileCheck --check-prefix=DUMP %s 17 ; RUN: | FileCheck --check-prefix=DUMP %s
15 18
19 ; TODO(jvoung): Uh... normally pnacl-llc is not supposed to separate the
20 ; lock from its instruction w/ bundle padding, but when processing .s
21 ; files with llvm-mc it seems be ocassionally wrong!
22 ; https://code.google.com/p/nativeclient/issues/detail?id=3929
23 ; That makes the current "lock" checks avoid using CHECK-NEXT.
24
16 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32) 25 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32)
17 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32) 26 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32)
18 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) 27 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32)
19 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32) 28 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32)
20 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32) 29 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32)
21 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32) 30 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32)
22 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32) 31 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32)
23 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32) 32 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32)
24 declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32) 33 declare i8 @llvm.nacl.atomic.rmw.i8(i32, i8*, i8, i32)
25 declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32) 34 declare i16 @llvm.nacl.atomic.rmw.i16(i32, i16*, i16, i32)
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after
194 define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) { 203 define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) {
195 entry: 204 entry:
196 %trunc = trunc i32 %v to i8 205 %trunc = trunc i32 %v to i8
197 %ptr = inttoptr i32 %iptr to i8* 206 %ptr = inttoptr i32 %iptr to i8*
198 ; "1" is an atomic add, and "6" is sequential consistency. 207 ; "1" is an atomic add, and "6" is sequential consistency.
199 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6) 208 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6)
200 %a_ext = zext i8 %a to i32 209 %a_ext = zext i8 %a to i32
201 ret i32 %a_ext 210 ret i32 %a_ext
202 } 211 }
203 ; CHECK-LABEL: test_atomic_rmw_add_8 212 ; CHECK-LABEL: test_atomic_rmw_add_8
204 ; CHECK: lock xadd byte {{.*}}, [[REG:.*]] 213 ; CHECK: lock
214 ; CHECK-NEXT: xadd byte {{.*}}, [[REG:.*]]
205 ; CHECK: mov {{.*}}, [[REG]] 215 ; CHECK: mov {{.*}}, [[REG]]
206 216
207 define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) { 217 define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) {
208 entry: 218 entry:
209 %trunc = trunc i32 %v to i16 219 %trunc = trunc i32 %v to i16
210 %ptr = inttoptr i32 %iptr to i16* 220 %ptr = inttoptr i32 %iptr to i16*
211 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6) 221 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6)
212 %a_ext = zext i16 %a to i32 222 %a_ext = zext i16 %a to i32
213 ret i32 %a_ext 223 ret i32 %a_ext
214 } 224 }
215 ; CHECK-LABEL: test_atomic_rmw_add_16 225 ; CHECK-LABEL: test_atomic_rmw_add_16
216 ; CHECK: lock xadd word {{.*}}, [[REG:.*]] 226 ; CHECK: lock
227 ; Should be using NEXT: see issue 3929
228 ; CHECK: xadd word {{.*}}, [[REG:.*]]
217 ; CHECK: mov {{.*}}, [[REG]] 229 ; CHECK: mov {{.*}}, [[REG]]
218 230
219 define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) { 231 define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) {
220 entry: 232 entry:
221 %ptr = inttoptr i32 %iptr to i32* 233 %ptr = inttoptr i32 %iptr to i32*
222 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) 234 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
223 ret i32 %a 235 ret i32 %a
224 } 236 }
225 ; CHECK-LABEL: test_atomic_rmw_add_32 237 ; CHECK-LABEL: test_atomic_rmw_add_32
226 ; CHECK: lock xadd dword {{.*}}, [[REG:.*]] 238 ; CHECK: lock
239 ; CHECK-NEXT: xadd dword {{.*}}, [[REG:.*]]
227 ; CHECK: mov {{.*}}, [[REG]] 240 ; CHECK: mov {{.*}}, [[REG]]
228 241
229 define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) { 242 define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) {
230 entry: 243 entry:
231 %ptr = inttoptr i32 %iptr to i64* 244 %ptr = inttoptr i32 %iptr to i64*
232 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) 245 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
233 ret i64 %a 246 ret i64 %a
234 } 247 }
235 ; CHECK-LABEL: test_atomic_rmw_add_64 248 ; CHECK-LABEL: test_atomic_rmw_add_64
236 ; CHECK: push ebx 249 ; CHECK: push ebx
237 ; CHECK: mov eax, dword ptr [{{.*}}] 250 ; CHECK: mov eax, dword ptr [{{.*}}]
238 ; CHECK: mov edx, dword ptr [{{.*}}+4] 251 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
239 ; CHECK: .L[[LABEL:.*]]:
240 ; CHECK: mov ebx, eax 252 ; CHECK: mov ebx, eax
241 ; RHS of add cannot be any of the e[abcd]x regs because they are 253 ; RHS of add cannot be any of the e[abcd]x regs because they are
242 ; clobbered in the loop, and the RHS needs to be remain live. 254 ; clobbered in the loop, and the RHS needs to be remain live.
243 ; CHECK: add ebx, {{.*e.[^x]}} 255 ; CHECK: add ebx, {{.*e.[^x]}}
244 ; CHECK: mov ecx, edx 256 ; CHECK: mov ecx, edx
245 ; CHECK: adc ecx, {{.*e.[^x]}} 257 ; CHECK: adc ecx, {{.*e.[^x]}}
246 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). 258 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired).
247 ; It can be esi, edi, or ebp though, for example (so we need to be careful 259 ; It can be esi, edi, or ebp though, for example (so we need to be careful
248 ; about rejecting eb* and ed*.) 260 ; about rejecting eb* and ed*.)
249 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 261 ; CHECK: lock
250 ; CHECK: jne .L[[LABEL]] 262 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
263 ; CHECK: jne -{{[0-9]}}
251 264
252 ; Test with some more register pressure. When we have an alloca, ebp is 265 ; Test with some more register pressure. When we have an alloca, ebp is
253 ; used to manage the stack frame, so it cannot be used as a register either. 266 ; used to manage the stack frame, so it cannot be used as a register either.
254 declare void @use_ptr(i32 %iptr) 267 define void @use_ptr(i32 %iptr) {
268 entry:
269 ret void
270 }
255 271
256 define i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) { 272 define i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) {
257 entry: 273 entry:
258 %alloca_ptr = alloca i8, i32 16, align 16 274 %alloca_ptr = alloca i8, i32 16, align 16
259 %ptr = inttoptr i32 %iptr to i64* 275 %ptr = inttoptr i32 %iptr to i64*
260 %old = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) 276 %old = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6)
261 store i8 0, i8* %alloca_ptr, align 1 277 store i8 0, i8* %alloca_ptr, align 1
262 store i8 1, i8* %alloca_ptr, align 1 278 store i8 1, i8* %alloca_ptr, align 1
263 store i8 2, i8* %alloca_ptr, align 1 279 store i8 2, i8* %alloca_ptr, align 1
264 store i8 3, i8* %alloca_ptr, align 1 280 store i8 3, i8* %alloca_ptr, align 1
265 %__5 = ptrtoint i8* %alloca_ptr to i32 281 %__5 = ptrtoint i8* %alloca_ptr to i32
266 call void @use_ptr(i32 %__5) 282 call void @use_ptr(i32 %__5)
267 ret i64 %old 283 ret i64 %old
268 } 284 }
269 ; CHECK-LABEL: test_atomic_rmw_add_64_alloca 285 ; CHECK-LABEL: test_atomic_rmw_add_64_alloca
270 ; CHECK: push ebx 286 ; CHECK: push ebx
271 ; CHECK-DAG: mov edx 287 ; CHECK-DAG: mov edx
272 ; CHECK-DAG: mov eax 288 ; CHECK-DAG: mov eax
273 ; CHECK-DAG: mov ecx 289 ; CHECK-DAG: mov ecx
274 ; CHECK-DAG: mov ebx 290 ; CHECK-DAG: mov ebx
275 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). 291 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired).
276 ; It also cannot be ebp since we use that for alloca. Also make sure it's 292 ; It also cannot be ebp since we use that for alloca. Also make sure it's
277 ; not esp, since that's the stack pointer and mucking with it will break 293 ; not esp, since that's the stack pointer and mucking with it will break
278 ; the later use_ptr function call. 294 ; the later use_ptr function call.
279 ; That pretty much leaves esi, or edi as the only viable registers. 295 ; That pretty much leaves esi, or edi as the only viable registers.
280 ; CHECK: lock cmpxchg8b qword ptr [e{{[ds]}}i] 296 ; CHECK: lock
297 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{[ds]}}i]
281 ; CHECK: call use_ptr 298 ; CHECK: call use_ptr
282 299
283 define i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) { 300 define i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) {
284 entry: 301 entry:
285 %ptr = inttoptr i32 %iptr to i32* 302 %ptr = inttoptr i32 %iptr to i32*
286 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) 303 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6)
287 ret i32 %v 304 ret i32 %v
288 } 305 }
289 ; Technically this could use "lock add" instead of "lock xadd", if liveness 306 ; Technically this could use "lock add" instead of "lock xadd", if liveness
290 ; tells us that the destination variable is dead. 307 ; tells us that the destination variable is dead.
291 ; CHECK-LABEL: test_atomic_rmw_add_32_ignored 308 ; CHECK-LABEL: test_atomic_rmw_add_32_ignored
292 ; CHECK: lock xadd dword {{.*}}, [[REG:.*]] 309 ; CHECK: lock
310 ; CHECK-NEXT: xadd dword {{.*}}, [[REG:.*]]
293 311
294 ; Atomic RMW 64 needs to be expanded into its own loop. 312 ; Atomic RMW 64 needs to be expanded into its own loop.
295 ; Make sure that works w/ non-trivial function bodies. 313 ; Make sure that works w/ non-trivial function bodies.
296 define i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) { 314 define i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) {
297 entry: 315 entry:
298 %x = icmp ult i64 %v, 100 316 %x = icmp ult i64 %v, 100
299 br i1 %x, label %err, label %loop 317 br i1 %x, label %err, label %loop
300 318
301 loop: 319 loop:
302 %v_next = phi i64 [ %v, %entry ], [ %next, %loop ] 320 %v_next = phi i64 [ %v, %entry ], [ %next, %loop ]
303 %ptr = inttoptr i32 %iptr to i64* 321 %ptr = inttoptr i32 %iptr to i64*
304 %next = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v_next, i32 6) 322 %next = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v_next, i32 6)
305 %success = icmp eq i64 %next, 100 323 %success = icmp eq i64 %next, 100
306 br i1 %success, label %done, label %loop 324 br i1 %success, label %done, label %loop
307 325
308 done: 326 done:
309 ret i64 %next 327 ret i64 %next
310 328
311 err: 329 err:
312 ret i64 0 330 ret i64 0
313 } 331 }
314 ; CHECK-LABEL: test_atomic_rmw_add_64_loop 332 ; CHECK-LABEL: test_atomic_rmw_add_64_loop
315 ; CHECK: push ebx 333 ; CHECK: push ebx
316 ; CHECK-LABEL: .Ltest_atomic_rmw_add_64_loop{{.*}}loop
317 ; CHECK: mov eax, dword ptr [{{.*}}] 334 ; CHECK: mov eax, dword ptr [{{.*}}]
318 ; CHECK: mov edx, dword ptr [{{.*}}+4] 335 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
319 ; CHECK: .L[[LABEL:.*]]:
320 ; CHECK: mov ebx, eax 336 ; CHECK: mov ebx, eax
321 ; CHECK: add ebx, {{.*e.[^x]}} 337 ; CHECK: add ebx, {{.*e.[^x]}}
322 ; CHECK: mov ecx, edx 338 ; CHECK: mov ecx, edx
323 ; CHECK: adc ecx, {{.*e.[^x]}} 339 ; CHECK: adc ecx, {{.*e.[^x]}}
324 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 340 ; CHECK: lock
325 ; CHECK: jne .L[[LABEL]] 341 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
326 ; CHECK-LABEL: .Ltest_atomic_rmw_add_64_loop{{.*}}done 342 ; CHECK: jne -{{[0-9]}}
327 343
328 ;; sub 344 ;; sub
329 345
330 define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) { 346 define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) {
331 entry: 347 entry:
332 %trunc = trunc i32 %v to i8 348 %trunc = trunc i32 %v to i8
333 %ptr = inttoptr i32 %iptr to i8* 349 %ptr = inttoptr i32 %iptr to i8*
334 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6) 350 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6)
335 %a_ext = zext i8 %a to i32 351 %a_ext = zext i8 %a to i32
336 ret i32 %a_ext 352 ret i32 %a_ext
337 } 353 }
338 ; CHECK-LABEL: test_atomic_rmw_sub_8 354 ; CHECK-LABEL: test_atomic_rmw_sub_8
339 ; CHECK: neg [[REG:.*]] 355 ; CHECK: neg [[REG:.*]]
340 ; CHECK: lock xadd byte {{.*}}, [[REG]] 356 ; CHECK: lock
357 ; Should be using NEXT: see issue 3929
358 ; CHECK: xadd byte {{.*}}, [[REG]]
341 ; CHECK: mov {{.*}}, [[REG]] 359 ; CHECK: mov {{.*}}, [[REG]]
342 360
343 define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) { 361 define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) {
344 entry: 362 entry:
345 %trunc = trunc i32 %v to i16 363 %trunc = trunc i32 %v to i16
346 %ptr = inttoptr i32 %iptr to i16* 364 %ptr = inttoptr i32 %iptr to i16*
347 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6) 365 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6)
348 %a_ext = zext i16 %a to i32 366 %a_ext = zext i16 %a to i32
349 ret i32 %a_ext 367 ret i32 %a_ext
350 } 368 }
351 ; CHECK-LABEL: test_atomic_rmw_sub_16 369 ; CHECK-LABEL: test_atomic_rmw_sub_16
352 ; CHECK: neg [[REG:.*]] 370 ; CHECK: neg [[REG:.*]]
353 ; CHECK: lock xadd word {{.*}}, [[REG]] 371 ; CHECK: lock
372 ; CHECK-NEXT: xadd word {{.*}}, [[REG]]
354 ; CHECK: mov {{.*}}, [[REG]] 373 ; CHECK: mov {{.*}}, [[REG]]
355 374
356 define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) { 375 define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) {
357 entry: 376 entry:
358 %ptr = inttoptr i32 %iptr to i32* 377 %ptr = inttoptr i32 %iptr to i32*
359 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) 378 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
360 ret i32 %a 379 ret i32 %a
361 } 380 }
362 ; CHECK-LABEL: test_atomic_rmw_sub_32 381 ; CHECK-LABEL: test_atomic_rmw_sub_32
363 ; CHECK: neg [[REG:.*]] 382 ; CHECK: neg [[REG:.*]]
364 ; CHECK: lock xadd dword {{.*}}, [[REG]] 383 ; CHECK: lock
384 ; CHECK-NEXT: xadd dword {{.*}}, [[REG]]
365 ; CHECK: mov {{.*}}, [[REG]] 385 ; CHECK: mov {{.*}}, [[REG]]
366 386
367 define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) { 387 define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) {
368 entry: 388 entry:
369 %ptr = inttoptr i32 %iptr to i64* 389 %ptr = inttoptr i32 %iptr to i64*
370 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6) 390 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6)
371 ret i64 %a 391 ret i64 %a
372 } 392 }
373 ; CHECK-LABEL: test_atomic_rmw_sub_64 393 ; CHECK-LABEL: test_atomic_rmw_sub_64
374 ; CHECK: push ebx 394 ; CHECK: push ebx
375 ; CHECK: mov eax, dword ptr [{{.*}}] 395 ; CHECK: mov eax, dword ptr [{{.*}}]
376 ; CHECK: mov edx, dword ptr [{{.*}}+4] 396 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
377 ; CHECK: .L[[LABEL:.*]]:
378 ; CHECK: mov ebx, eax 397 ; CHECK: mov ebx, eax
379 ; CHECK: sub ebx, {{.*e.[^x]}} 398 ; CHECK: sub ebx, {{.*e.[^x]}}
380 ; CHECK: mov ecx, edx 399 ; CHECK: mov ecx, edx
381 ; CHECK: sbb ecx, {{.*e.[^x]}} 400 ; CHECK: sbb ecx, {{.*e.[^x]}}
382 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 401 ; CHECK: lock
383 ; CHECK: jne .L[[LABEL]] 402 ; Should be using NEXT: see issue 3929
403 ; CHECK: cmpxchg8b qword ptr [e{{.[^x]}}]
404 ; CHECK: jne -{{[0-9]}}
384 405
385 406
386 define i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) { 407 define i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) {
387 entry: 408 entry:
388 %ptr = inttoptr i32 %iptr to i32* 409 %ptr = inttoptr i32 %iptr to i32*
389 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) 410 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6)
390 ret i32 %v 411 ret i32 %v
391 } 412 }
392 ; Could use "lock sub" instead of "neg; lock xadd" 413 ; Could use "lock sub" instead of "neg; lock xadd"
393 ; CHECK-LABEL: test_atomic_rmw_sub_32_ignored 414 ; CHECK-LABEL: test_atomic_rmw_sub_32_ignored
394 ; CHECK: neg [[REG:.*]] 415 ; CHECK: neg [[REG:.*]]
395 ; CHECK: lock xadd dword {{.*}}, [[REG]] 416 ; CHECK: lock
417 ; CHECK-NEXT: xadd dword {{.*}}, [[REG]]
396 418
397 ;; or 419 ;; or
398 420
399 define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) { 421 define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) {
400 entry: 422 entry:
401 %trunc = trunc i32 %v to i8 423 %trunc = trunc i32 %v to i8
402 %ptr = inttoptr i32 %iptr to i8* 424 %ptr = inttoptr i32 %iptr to i8*
403 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6) 425 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6)
404 %a_ext = zext i8 %a to i32 426 %a_ext = zext i8 %a to i32
405 ret i32 %a_ext 427 ret i32 %a_ext
406 } 428 }
407 ; CHECK-LABEL: test_atomic_rmw_or_8 429 ; CHECK-LABEL: test_atomic_rmw_or_8
408 ; CHECK: mov al, byte ptr 430 ; CHECK: mov al, byte ptr
409 ; CHECK: .L[[LABEL:.*]]:
410 ; Dest cannot be eax here, because eax is used for the old value. Also want 431 ; Dest cannot be eax here, because eax is used for the old value. Also want
411 ; to make sure that cmpxchg's source is the same register. 432 ; to make sure that cmpxchg's source is the same register.
412 ; CHECK: or [[REG:[^a].]] 433 ; CHECK: or [[REG:[^a].]]
413 ; CHECK: lock cmpxchg byte ptr [e{{[^a].}}], [[REG]] 434 ; CHECK: lock
414 ; CHECK: jne .L[[LABEL]] 435 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]]
436 ; CHECK: jne -{{[0-9]}}
415 437
416 define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) { 438 define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) {
417 entry: 439 entry:
418 %trunc = trunc i32 %v to i16 440 %trunc = trunc i32 %v to i16
419 %ptr = inttoptr i32 %iptr to i16* 441 %ptr = inttoptr i32 %iptr to i16*
420 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6) 442 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6)
421 %a_ext = zext i16 %a to i32 443 %a_ext = zext i16 %a to i32
422 ret i32 %a_ext 444 ret i32 %a_ext
423 } 445 }
424 ; CHECK-LABEL: test_atomic_rmw_or_16 446 ; CHECK-LABEL: test_atomic_rmw_or_16
425 ; CHECK: mov ax, word ptr 447 ; CHECK: mov ax, word ptr
426 ; CHECK: .L[[LABEL:.*]]:
427 ; CHECK: or [[REG:[^a].]] 448 ; CHECK: or [[REG:[^a].]]
428 ; CHECK: lock cmpxchg word ptr [e{{[^a].}}], [[REG]] 449 ; CHECK: lock
429 ; CHECK: jne .L[[LABEL]] 450 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], [[REG]]
451 ; CHECK: jne -{{[0-9]}}
430 452
431 define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) { 453 define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) {
432 entry: 454 entry:
433 %ptr = inttoptr i32 %iptr to i32* 455 %ptr = inttoptr i32 %iptr to i32*
434 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) 456 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
435 ret i32 %a 457 ret i32 %a
436 } 458 }
437 ; CHECK-LABEL: test_atomic_rmw_or_32 459 ; CHECK-LABEL: test_atomic_rmw_or_32
438 ; CHECK: mov eax, dword ptr 460 ; CHECK: mov eax, dword ptr
439 ; CHECK: .L[[LABEL:.*]]:
440 ; CHECK: or [[REG:e[^a].]] 461 ; CHECK: or [[REG:e[^a].]]
441 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}], [[REG]] 462 ; CHECK: lock
442 ; CHECK: jne .L[[LABEL]] 463 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], [[REG]]
464 ; CHECK: jne -{{[0-9]}}
443 465
444 define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) { 466 define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) {
445 entry: 467 entry:
446 %ptr = inttoptr i32 %iptr to i64* 468 %ptr = inttoptr i32 %iptr to i64*
447 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6) 469 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6)
448 ret i64 %a 470 ret i64 %a
449 } 471 }
450 ; CHECK-LABEL: test_atomic_rmw_or_64 472 ; CHECK-LABEL: test_atomic_rmw_or_64
451 ; CHECK: push ebx 473 ; CHECK: push ebx
452 ; CHECK: mov eax, dword ptr [{{.*}}] 474 ; CHECK: mov eax, dword ptr [{{.*}}]
453 ; CHECK: mov edx, dword ptr [{{.*}}+4] 475 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
454 ; CHECK: .L[[LABEL:.*]]:
455 ; CHECK: mov ebx, eax 476 ; CHECK: mov ebx, eax
456 ; CHECK: or ebx, {{.*e.[^x]}} 477 ; CHECK: or ebx, {{.*e.[^x]}}
457 ; CHECK: mov ecx, edx 478 ; CHECK: mov ecx, edx
458 ; CHECK: or ecx, {{.*e.[^x]}} 479 ; CHECK: or ecx, {{.*e.[^x]}}
459 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 480 ; CHECK: lock
460 ; CHECK: jne .L[[LABEL]] 481 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
482 ; CHECK: jne -{{[0-9]}}
461 483
462 define i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) { 484 define i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) {
463 entry: 485 entry:
464 %ptr = inttoptr i32 %iptr to i32* 486 %ptr = inttoptr i32 %iptr to i32*
465 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) 487 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6)
466 ret i32 %v 488 ret i32 %v
467 } 489 }
468 ; CHECK-LABEL: test_atomic_rmw_or_32_ignored 490 ; CHECK-LABEL: test_atomic_rmw_or_32_ignored
469 ; Could just "lock or", if we inspect the liveness information first. 491 ; Could just "lock or", if we inspect the liveness information first.
470 ; Would also need a way to introduce "lock"'edness to binary 492 ; Would also need a way to introduce "lock"'edness to binary
471 ; operators without introducing overhead on the more common binary ops. 493 ; operators without introducing overhead on the more common binary ops.
472 ; CHECK: mov eax, dword ptr 494 ; CHECK: mov eax, dword ptr
473 ; CHECK: .L[[LABEL:.*]]:
474 ; CHECK: or [[REG:e[^a].]] 495 ; CHECK: or [[REG:e[^a].]]
475 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}], [[REG]] 496 ; CHECK: lock
476 ; CHECK: jne .L[[LABEL]] 497 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], [[REG]]
498 ; CHECK: jne -{{[0-9]}}
477 499
478 ;; and 500 ;; and
479 501
480 define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) { 502 define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) {
481 entry: 503 entry:
482 %trunc = trunc i32 %v to i8 504 %trunc = trunc i32 %v to i8
483 %ptr = inttoptr i32 %iptr to i8* 505 %ptr = inttoptr i32 %iptr to i8*
484 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6) 506 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6)
485 %a_ext = zext i8 %a to i32 507 %a_ext = zext i8 %a to i32
486 ret i32 %a_ext 508 ret i32 %a_ext
487 } 509 }
488 ; CHECK-LABEL: test_atomic_rmw_and_8 510 ; CHECK-LABEL: test_atomic_rmw_and_8
489 ; CHECK: mov al, byte ptr 511 ; CHECK: mov al, byte ptr
490 ; CHECK: .L[[LABEL:.*]]:
491 ; CHECK: and [[REG:[^a].]] 512 ; CHECK: and [[REG:[^a].]]
492 ; CHECK: lock cmpxchg byte ptr [e{{[^a].}}], [[REG]] 513 ; CHECK: lock
493 ; CHECK: jne .L[[LABEL]] 514 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]]
515 ; CHECK: jne -{{[0-9]}}
494 516
495 define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) { 517 define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) {
496 entry: 518 entry:
497 %trunc = trunc i32 %v to i16 519 %trunc = trunc i32 %v to i16
498 %ptr = inttoptr i32 %iptr to i16* 520 %ptr = inttoptr i32 %iptr to i16*
499 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6) 521 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6)
500 %a_ext = zext i16 %a to i32 522 %a_ext = zext i16 %a to i32
501 ret i32 %a_ext 523 ret i32 %a_ext
502 } 524 }
503 ; CHECK-LABEL: test_atomic_rmw_and_16 525 ; CHECK-LABEL: test_atomic_rmw_and_16
504 ; CHECK: mov ax, word ptr 526 ; CHECK: mov ax, word ptr
505 ; CHECK: .L[[LABEL:.*]]:
506 ; CHECK: and 527 ; CHECK: and
507 ; CHECK: lock cmpxchg word ptr [e{{[^a].}}] 528 ; CHECK: lock
508 ; CHECK: jne .L[[LABEL]] 529 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}]
530 ; CHECK: jne -{{[0-9]}}
509 531
510 define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) { 532 define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) {
511 entry: 533 entry:
512 %ptr = inttoptr i32 %iptr to i32* 534 %ptr = inttoptr i32 %iptr to i32*
513 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) 535 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
514 ret i32 %a 536 ret i32 %a
515 } 537 }
516 ; CHECK-LABEL: test_atomic_rmw_and_32 538 ; CHECK-LABEL: test_atomic_rmw_and_32
517 ; CHECK: mov eax, dword ptr 539 ; CHECK: mov eax, dword ptr
518 ; CHECK: .L[[LABEL:.*]]:
519 ; CHECK: and 540 ; CHECK: and
520 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 541 ; CHECK: lock
521 ; CHECK: jne .L[[LABEL]] 542 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}]
543 ; CHECK: jne -{{[0-9]}}
522 544
523 define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) { 545 define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) {
524 entry: 546 entry:
525 %ptr = inttoptr i32 %iptr to i64* 547 %ptr = inttoptr i32 %iptr to i64*
526 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6) 548 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6)
527 ret i64 %a 549 ret i64 %a
528 } 550 }
529 ; CHECK-LABEL: test_atomic_rmw_and_64 551 ; CHECK-LABEL: test_atomic_rmw_and_64
530 ; CHECK: push ebx 552 ; CHECK: push ebx
531 ; CHECK: mov eax, dword ptr [{{.*}}] 553 ; CHECK: mov eax, dword ptr [{{.*}}]
532 ; CHECK: mov edx, dword ptr [{{.*}}+4] 554 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
533 ; CHECK: .L[[LABEL:.*]]:
534 ; CHECK: mov ebx, eax 555 ; CHECK: mov ebx, eax
535 ; CHECK: and ebx, {{.*e.[^x]}} 556 ; CHECK: and ebx, {{.*e.[^x]}}
536 ; CHECK: mov ecx, edx 557 ; CHECK: mov ecx, edx
537 ; CHECK: and ecx, {{.*e.[^x]}} 558 ; CHECK: and ecx, {{.*e.[^x]}}
538 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 559 ; CHECK: lock
539 ; CHECK: jne .L[[LABEL]] 560 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
561 ; CHECK: jne -{{[0-9]}}
540 562
541 define i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) { 563 define i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) {
542 entry: 564 entry:
543 %ptr = inttoptr i32 %iptr to i32* 565 %ptr = inttoptr i32 %iptr to i32*
544 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) 566 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6)
545 ret i32 %v 567 ret i32 %v
546 } 568 }
547 ; CHECK-LABEL: test_atomic_rmw_and_32_ignored 569 ; CHECK-LABEL: test_atomic_rmw_and_32_ignored
548 ; Could just "lock and" 570 ; Could just "lock and"
549 ; CHECK: mov eax, dword ptr 571 ; CHECK: mov eax, dword ptr
550 ; CHECK: .L[[LABEL:.*]]:
551 ; CHECK: and 572 ; CHECK: and
552 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 573 ; CHECK: lock
553 ; CHECK: jne .L[[LABEL]] 574 ; Should be using NEXT: see issue 3929
575 ; CHECK: cmpxchg dword ptr [e{{[^a].}}]
576 ; CHECK: jne -{{[0-9]}}
554 577
555 ;; xor 578 ;; xor
556 579
557 define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) { 580 define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) {
558 entry: 581 entry:
559 %trunc = trunc i32 %v to i8 582 %trunc = trunc i32 %v to i8
560 %ptr = inttoptr i32 %iptr to i8* 583 %ptr = inttoptr i32 %iptr to i8*
561 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6) 584 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6)
562 %a_ext = zext i8 %a to i32 585 %a_ext = zext i8 %a to i32
563 ret i32 %a_ext 586 ret i32 %a_ext
564 } 587 }
565 ; CHECK-LABEL: test_atomic_rmw_xor_8 588 ; CHECK-LABEL: test_atomic_rmw_xor_8
566 ; CHECK: mov al, byte ptr 589 ; CHECK: mov al, byte ptr
567 ; CHECK: .L[[LABEL:.*]]:
568 ; CHECK: xor [[REG:[^a].]] 590 ; CHECK: xor [[REG:[^a].]]
569 ; CHECK: lock cmpxchg byte ptr [e{{[^a].}}], [[REG]] 591 ; CHECK: lock
570 ; CHECK: jne .L[[LABEL]] 592 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]]
593 ; CHECK: jne -{{[0-9]}}
571 594
572 define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) { 595 define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) {
573 entry: 596 entry:
574 %trunc = trunc i32 %v to i16 597 %trunc = trunc i32 %v to i16
575 %ptr = inttoptr i32 %iptr to i16* 598 %ptr = inttoptr i32 %iptr to i16*
576 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6) 599 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6)
577 %a_ext = zext i16 %a to i32 600 %a_ext = zext i16 %a to i32
578 ret i32 %a_ext 601 ret i32 %a_ext
579 } 602 }
580 ; CHECK-LABEL: test_atomic_rmw_xor_16 603 ; CHECK-LABEL: test_atomic_rmw_xor_16
581 ; CHECK: mov ax, word ptr 604 ; CHECK: mov ax, word ptr
582 ; CHECK: .L[[LABEL:.*]]:
583 ; CHECK: xor 605 ; CHECK: xor
584 ; CHECK: lock cmpxchg word ptr [e{{[^a].}}] 606 ; CHECK: lock
585 ; CHECK: jne .L[[LABEL]] 607 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}]
608 ; CHECK: jne -{{[0-9]}}
586 609
587 610
588 define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) { 611 define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) {
589 entry: 612 entry:
590 %ptr = inttoptr i32 %iptr to i32* 613 %ptr = inttoptr i32 %iptr to i32*
591 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) 614 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
592 ret i32 %a 615 ret i32 %a
593 } 616 }
594 ; CHECK-LABEL: test_atomic_rmw_xor_32 617 ; CHECK-LABEL: test_atomic_rmw_xor_32
595 ; CHECK: mov eax, dword ptr 618 ; CHECK: mov eax, dword ptr
596 ; CHECK: .L[[LABEL:.*]]:
597 ; CHECK: xor 619 ; CHECK: xor
598 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 620 ; CHECK: lock
599 ; CHECK: jne .L[[LABEL]] 621 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}]
622 ; CHECK: jne -{{[0-9]}}
600 623
601 define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) { 624 define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) {
602 entry: 625 entry:
603 %ptr = inttoptr i32 %iptr to i64* 626 %ptr = inttoptr i32 %iptr to i64*
604 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6) 627 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6)
605 ret i64 %a 628 ret i64 %a
606 } 629 }
607 ; CHECK-LABEL: test_atomic_rmw_xor_64 630 ; CHECK-LABEL: test_atomic_rmw_xor_64
608 ; CHECK: push ebx 631 ; CHECK: push ebx
609 ; CHECK: mov eax, dword ptr [{{.*}}] 632 ; CHECK: mov eax, dword ptr [{{.*}}]
610 ; CHECK: mov edx, dword ptr [{{.*}}+4] 633 ; CHECK: mov edx, dword ptr [{{.*}} + 4]
611 ; CHECK: .L[[LABEL:.*]]:
612 ; CHECK: mov ebx, eax 634 ; CHECK: mov ebx, eax
613 ; CHECK: or ebx, {{.*e.[^x]}} 635 ; CHECK: or ebx, {{.*e.[^x]}}
614 ; CHECK: mov ecx, edx 636 ; CHECK: mov ecx, edx
615 ; CHECK: or ecx, {{.*e.[^x]}} 637 ; CHECK: or ecx, {{.*e.[^x]}}
616 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 638 ; CHECK: lock
617 ; CHECK: jne .L[[LABEL]] 639 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
640 ; CHECK: jne -{{[0-9]}}
618 641
619 define i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) { 642 define i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) {
620 entry: 643 entry:
621 %ptr = inttoptr i32 %iptr to i32* 644 %ptr = inttoptr i32 %iptr to i32*
622 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) 645 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6)
623 ret i32 %v 646 ret i32 %v
624 } 647 }
625 ; CHECK-LABEL: test_atomic_rmw_xor_32_ignored 648 ; CHECK-LABEL: test_atomic_rmw_xor_32_ignored
626 ; CHECK: mov eax, dword ptr 649 ; CHECK: mov eax, dword ptr
627 ; CHECK: .L[[LABEL:.*]]:
628 ; CHECK: xor 650 ; CHECK: xor
629 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 651 ; CHECK: lock
630 ; CHECK: jne .L[[LABEL]] 652 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}]
653 ; CHECK: jne -{{[0-9]}}
631 654
632 ;; exchange 655 ;; exchange
633 656
634 define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) { 657 define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) {
635 entry: 658 entry:
636 %trunc = trunc i32 %v to i8 659 %trunc = trunc i32 %v to i8
637 %ptr = inttoptr i32 %iptr to i8* 660 %ptr = inttoptr i32 %iptr to i8*
638 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6) 661 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6)
639 %a_ext = zext i8 %a to i32 662 %a_ext = zext i8 %a to i32
640 ret i32 %a_ext 663 ret i32 %a_ext
(...skipping 26 matching lines...) Expand all
667 %ptr = inttoptr i32 %iptr to i64* 690 %ptr = inttoptr i32 %iptr to i64*
668 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6) 691 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6)
669 ret i64 %a 692 ret i64 %a
670 } 693 }
671 ; CHECK-LABEL: test_atomic_rmw_xchg_64 694 ; CHECK-LABEL: test_atomic_rmw_xchg_64
672 ; CHECK: push ebx 695 ; CHECK: push ebx
673 ; CHECK-DAG: mov edx 696 ; CHECK-DAG: mov edx
674 ; CHECK-DAG: mov eax 697 ; CHECK-DAG: mov eax
675 ; CHECK-DAG: mov ecx 698 ; CHECK-DAG: mov ecx
676 ; CHECK-DAG: mov ebx 699 ; CHECK-DAG: mov ebx
677 ; CHECK: .L[[LABEL:.*]]: 700 ; CHECK: lock
678 ; CHECK: lock cmpxchg8b qword ptr [{{e.[^x]}}] 701 ; CHECK-NEXT: cmpxchg8b qword ptr [{{e.[^x]}}]
679 ; CHECK: jne .L[[LABEL]] 702 ; CHECK: jne -{{[0-9]}}
680 703
681 define i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) { 704 define i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) {
682 entry: 705 entry:
683 %ptr = inttoptr i32 %iptr to i32* 706 %ptr = inttoptr i32 %iptr to i32*
684 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6) 707 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6)
685 ret i32 %v 708 ret i32 %v
686 } 709 }
687 ; In this case, ignoring the return value doesn't help. The xchg is 710 ; In this case, ignoring the return value doesn't help. The xchg is
688 ; used to do an atomic store. 711 ; used to do an atomic store.
689 ; CHECK-LABEL: test_atomic_rmw_xchg_32_ignored 712 ; CHECK-LABEL: test_atomic_rmw_xchg_32_ignored
690 ; CHECK: xchg dword ptr {{.*}}, [[REG:.*]] 713 ; CHECK: xchg dword ptr {{.*}}, [[REG:.*]]
691 714
692 ;;;; Cmpxchg 715 ;;;; Cmpxchg
693 716
694 define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) { 717 define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) {
695 entry: 718 entry:
696 %trunc_exp = trunc i32 %expected to i8 719 %trunc_exp = trunc i32 %expected to i8
697 %trunc_des = trunc i32 %desired to i8 720 %trunc_des = trunc i32 %desired to i8
698 %ptr = inttoptr i32 %iptr to i8* 721 %ptr = inttoptr i32 %iptr to i8*
699 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp, 722 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp,
700 i8 %trunc_des, i32 6, i32 6) 723 i8 %trunc_des, i32 6, i32 6)
701 %old_ext = zext i8 %old to i32 724 %old_ext = zext i8 %old to i32
702 ret i32 %old_ext 725 ret i32 %old_ext
703 } 726 }
704 ; CHECK-LABEL: test_atomic_cmpxchg_8 727 ; CHECK-LABEL: test_atomic_cmpxchg_8
705 ; CHECK: mov al, {{.*}} 728 ; CHECK: mov al, {{.*}}
706 ; Need to check that eax isn't used as the address register or the desired. 729 ; Need to check that eax isn't used as the address register or the desired.
707 ; since it is already used as the *expected* register. 730 ; since it is already used as the *expected* register.
708 ; CHECK: lock cmpxchg byte ptr [e{{[^a].}}], {{[^a]}} 731 ; CHECK: lock
732 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], {{[^a]}}
709 733
710 define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) { 734 define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) {
711 entry: 735 entry:
712 %trunc_exp = trunc i32 %expected to i16 736 %trunc_exp = trunc i32 %expected to i16
713 %trunc_des = trunc i32 %desired to i16 737 %trunc_des = trunc i32 %desired to i16
714 %ptr = inttoptr i32 %iptr to i16* 738 %ptr = inttoptr i32 %iptr to i16*
715 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp, 739 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp,
716 i16 %trunc_des, i32 6, i32 6) 740 i16 %trunc_des, i32 6, i32 6)
717 %old_ext = zext i16 %old to i32 741 %old_ext = zext i16 %old to i32
718 ret i32 %old_ext 742 ret i32 %old_ext
719 } 743 }
720 ; CHECK-LABEL: test_atomic_cmpxchg_16 744 ; CHECK-LABEL: test_atomic_cmpxchg_16
721 ; CHECK: mov ax, {{.*}} 745 ; CHECK: mov ax, {{.*}}
722 ; CHECK: lock cmpxchg word ptr [e{{[^a].}}], {{[^a]}} 746 ; CHECK: lock
747 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], {{[^a]}}
723 748
724 define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) { 749 define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) {
725 entry: 750 entry:
726 %ptr = inttoptr i32 %iptr to i32* 751 %ptr = inttoptr i32 %iptr to i32*
727 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, 752 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
728 i32 %desired, i32 6, i32 6) 753 i32 %desired, i32 6, i32 6)
729 ret i32 %old 754 ret i32 %old
730 } 755 }
731 ; CHECK-LABEL: test_atomic_cmpxchg_32 756 ; CHECK-LABEL: test_atomic_cmpxchg_32
732 ; CHECK: mov eax, {{.*}} 757 ; CHECK: mov eax, {{.*}}
733 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}], e{{[^a]}} 758 ; CHECK: lock
759 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], e{{[^a]}}
734 760
735 define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) { 761 define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) {
736 entry: 762 entry:
737 %ptr = inttoptr i32 %iptr to i64* 763 %ptr = inttoptr i32 %iptr to i64*
738 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 764 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
739 i64 %desired, i32 6, i32 6) 765 i64 %desired, i32 6, i32 6)
740 ret i64 %old 766 ret i64 %old
741 } 767 }
742 ; CHECK-LABEL: test_atomic_cmpxchg_64 768 ; CHECK-LABEL: test_atomic_cmpxchg_64
743 ; CHECK: push ebx 769 ; CHECK: push ebx
744 ; CHECK-DAG: mov edx 770 ; CHECK-DAG: mov edx
745 ; CHECK-DAG: mov eax 771 ; CHECK-DAG: mov eax
746 ; CHECK-DAG: mov ecx 772 ; CHECK-DAG: mov ecx
747 ; CHECK-DAG: mov ebx 773 ; CHECK-DAG: mov ebx
748 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 774 ; CHECK: lock
775 ; Should be using NEXT: see issue 3929
776 ; CHECK: cmpxchg8b qword ptr [e{{.[^x]}}]
749 ; edx and eax are already the return registers, so they don't actually 777 ; edx and eax are already the return registers, so they don't actually
750 ; need to be reshuffled via movs. The next test stores the result 778 ; need to be reshuffled via movs. The next test stores the result
751 ; somewhere, so in that case they do need to be mov'ed. 779 ; somewhere, so in that case they do need to be mov'ed.
752 780
753 ; Test a case where %old really does need to be copied out of edx:eax. 781 ; Test a case where %old really does need to be copied out of edx:eax.
754 define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expecte d, i64 %desired) { 782 define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expecte d, i64 %desired) {
755 entry: 783 entry:
756 %ptr = inttoptr i32 %iptr to i64* 784 %ptr = inttoptr i32 %iptr to i64*
757 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 785 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
758 i64 %desired, i32 6, i32 6) 786 i64 %desired, i32 6, i32 6)
759 %__6 = inttoptr i32 %ret_iptr to i64* 787 %__6 = inttoptr i32 %ret_iptr to i64*
760 store i64 %old, i64* %__6, align 1 788 store i64 %old, i64* %__6, align 1
761 ret void 789 ret void
762 } 790 }
763 ; CHECK-LABEL: test_atomic_cmpxchg_64_store 791 ; CHECK-LABEL: test_atomic_cmpxchg_64_store
764 ; CHECK: push ebx 792 ; CHECK: push ebx
765 ; CHECK-DAG: mov edx 793 ; CHECK-DAG: mov edx
766 ; CHECK-DAG: mov eax 794 ; CHECK-DAG: mov eax
767 ; CHECK-DAG: mov ecx 795 ; CHECK-DAG: mov ecx
768 ; CHECK-DAG: mov ebx 796 ; CHECK-DAG: mov ebx
769 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 797 ; CHECK: lock
798 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}]
770 ; CHECK: mov {{.*}}, edx 799 ; CHECK: mov {{.*}}, edx
771 ; CHECK: mov {{.*}}, eax 800 ; CHECK: mov {{.*}}, eax
772 801
773 ; Test with some more register pressure. When we have an alloca, ebp is 802 ; Test with some more register pressure. When we have an alloca, ebp is
774 ; used to manage the stack frame, so it cannot be used as a register either. 803 ; used to manage the stack frame, so it cannot be used as a register either.
775 define i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected, i64 %desired ) { 804 define i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected, i64 %desired ) {
776 entry: 805 entry:
777 %alloca_ptr = alloca i8, i32 16, align 16 806 %alloca_ptr = alloca i8, i32 16, align 16
778 %ptr = inttoptr i32 %iptr to i64* 807 %ptr = inttoptr i32 %iptr to i64*
779 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 808 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
(...skipping 10 matching lines...) Expand all
790 ; CHECK: push ebx 819 ; CHECK: push ebx
791 ; CHECK-DAG: mov edx 820 ; CHECK-DAG: mov edx
792 ; CHECK-DAG: mov eax 821 ; CHECK-DAG: mov eax
793 ; CHECK-DAG: mov ecx 822 ; CHECK-DAG: mov ecx
794 ; CHECK-DAG: mov ebx 823 ; CHECK-DAG: mov ebx
795 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). 824 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired).
796 ; It also cannot be ebp since we use that for alloca. Also make sure it's 825 ; It also cannot be ebp since we use that for alloca. Also make sure it's
797 ; not esp, since that's the stack pointer and mucking with it will break 826 ; not esp, since that's the stack pointer and mucking with it will break
798 ; the later use_ptr function call. 827 ; the later use_ptr function call.
799 ; That pretty much leaves esi, or edi as the only viable registers. 828 ; That pretty much leaves esi, or edi as the only viable registers.
800 ; CHECK: lock cmpxchg8b qword ptr [e{{[ds]}}i] 829 ; CHECK: lock
830 ; Should be using NEXT: see issue 3929
831 ; CHECK: cmpxchg8b qword ptr [e{{[ds]}}i]
801 ; CHECK: call use_ptr 832 ; CHECK: call use_ptr
802 833
803 define i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected, i32 %desire d) { 834 define i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected, i32 %desire d) {
804 entry: 835 entry:
805 %ptr = inttoptr i32 %iptr to i32* 836 %ptr = inttoptr i32 %iptr to i32*
806 %ignored = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, 837 %ignored = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
807 i32 %desired, i32 6, i32 6) 838 i32 %desired, i32 6, i32 6)
808 ret i32 0 839 ret i32 0
809 } 840 }
810 ; CHECK-LABEL: test_atomic_cmpxchg_32_ignored 841 ; CHECK-LABEL: test_atomic_cmpxchg_32_ignored
811 ; CHECK: mov eax, {{.*}} 842 ; CHECK: mov eax, {{.*}}
812 ; CHECK: lock cmpxchg dword ptr [e{{[^a].}}] 843 ; CHECK: lock
844 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}]
813 845
814 define i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected, i64 %desire d) { 846 define i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected, i64 %desire d) {
815 entry: 847 entry:
816 %ptr = inttoptr i32 %iptr to i64* 848 %ptr = inttoptr i32 %iptr to i64*
817 %ignored = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, 849 %ignored = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
818 i64 %desired, i32 6, i32 6) 850 i64 %desired, i32 6, i32 6)
819 ret i64 0 851 ret i64 0
820 } 852 }
821 ; CHECK-LABEL: test_atomic_cmpxchg_64_ignored 853 ; CHECK-LABEL: test_atomic_cmpxchg_64_ignored
822 ; CHECK: push ebx 854 ; CHECK: push ebx
823 ; CHECK-DAG: mov edx 855 ; CHECK-DAG: mov edx
824 ; CHECK-DAG: mov eax 856 ; CHECK-DAG: mov eax
825 ; CHECK-DAG: mov ecx 857 ; CHECK-DAG: mov ecx
826 ; CHECK-DAG: mov ebx 858 ; CHECK-DAG: mov ebx
827 ; CHECK: lock cmpxchg8b qword ptr [e{{.[^x]}}] 859 ; CHECK: lock
860 ; Should be using NEXT: see issue 3929
861 ; CHECK: cmpxchg8b qword ptr [e{{.[^x]}}]
828 862
829 ;;;; Fence and is-lock-free. 863 ;;;; Fence and is-lock-free.
830 864
831 define void @test_atomic_fence() { 865 define void @test_atomic_fence() {
832 entry: 866 entry:
833 call void @llvm.nacl.atomic.fence(i32 6) 867 call void @llvm.nacl.atomic.fence(i32 6)
834 ret void 868 ret void
835 } 869 }
836 ; CHECK-LABEL: test_atomic_fence 870 ; CHECK-LABEL: test_atomic_fence
837 ; CHECK: mfence 871 ; CHECK: mfence
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
896 ret i32 %z 930 ret i32 %z
897 } 931 }
898 ; CHECK-LABEL: test_atomic_is_lock_free_can_dce 932 ; CHECK-LABEL: test_atomic_is_lock_free_can_dce
899 ; CHECK: mov {{.*}}, 1 933 ; CHECK: mov {{.*}}, 1
900 ; CHECK: ret 934 ; CHECK: ret
901 ; CHECK: add 935 ; CHECK: add
902 ; CHECK: ret 936 ; CHECK: ret
903 937
904 ; ERRORS-NOT: ICE translation error 938 ; ERRORS-NOT: ICE translation error
905 ; DUMP-NOT: SZ 939 ; DUMP-NOT: SZ
OLDNEW
« no previous file with comments | « tests_lit/llvm2ice_tests/nacl-atomic-fence-all.ll ('k') | tests_lit/llvm2ice_tests/nacl-other-intrinsics.ll » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698