OLD | NEW |
1 ; Tests to make sure intrinsics are automatically upgraded. | 1 ; Tests to make sure intrinsics are automatically upgraded. |
2 ; RUN: llvm-as < %s | llvm-dis | FileCheck %s | 2 ; RUN: llvm-as < %s | llvm-dis | FileCheck %s |
3 | 3 |
4 | 4 |
5 declare <4 x float> @llvm.x86.sse.loadu.ps(i8*) nounwind readnone | 5 declare <4 x float> @llvm.x86.sse.loadu.ps(i8*) nounwind readnone |
6 declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*) nounwind readnone | 6 declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*) nounwind readnone |
7 declare <2 x double> @llvm.x86.sse2.loadu.pd(double*) nounwind readnone | 7 declare <2 x double> @llvm.x86.sse2.loadu.pd(double*) nounwind readnone |
8 define void @test_loadu(i8* %a, double* %b) { | 8 define void @test_loadu(i8* %a, double* %b) { |
9 %v0 = call <4 x float> @llvm.x86.sse.loadu.ps(i8* %a) | 9 %v0 = call <4 x float> @llvm.x86.sse.loadu.ps(i8* %a) |
10 %v1 = call <16 x i8> @llvm.x86.sse2.loadu.dq(i8* %a) | 10 %v1 = call <16 x i8> @llvm.x86.sse2.loadu.dq(i8* %a) |
(...skipping 14 matching lines...) Expand all Loading... |
25 ; CHECK: store{{.*}}nontemporal | 25 ; CHECK: store{{.*}}nontemporal |
26 call void @llvm.x86.sse.movnt.ps(i8* %B, <4 x float> %A) | 26 call void @llvm.x86.sse.movnt.ps(i8* %B, <4 x float> %A) |
27 ; CHECK: store{{.*}}nontemporal | 27 ; CHECK: store{{.*}}nontemporal |
28 call void @llvm.x86.sse2.movnt.dq(i8* %B, <2 x double> %C) | 28 call void @llvm.x86.sse2.movnt.dq(i8* %B, <2 x double> %C) |
29 ; CHECK: store{{.*}}nontemporal | 29 ; CHECK: store{{.*}}nontemporal |
30 call void @llvm.x86.sse2.movnt.pd(i8* %B, <2 x double> %C) | 30 call void @llvm.x86.sse2.movnt.pd(i8* %B, <2 x double> %C) |
31 ; CHECK: store{{.*}}nontemporal | 31 ; CHECK: store{{.*}}nontemporal |
32 call void @llvm.x86.sse2.movnt.i(i8* %B, i32 %D) | 32 call void @llvm.x86.sse2.movnt.i(i8* %B, i32 %D) |
33 ret void | 33 ret void |
34 } | 34 } |
35 | |
36 declare void @llvm.prefetch(i8*, i32, i32) nounwind | |
37 | |
38 define void @p(i8* %ptr) { | |
39 ; CHECK: llvm.prefetch(i8* %ptr, i32 0, i32 1, i32 1) | |
40 tail call void @llvm.prefetch(i8* %ptr, i32 0, i32 1) | |
41 ret void | |
42 } | |
OLD | NEW |