OLD | NEW |
1 ; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s | 1 ; RUN: opt -nacl-rewrite-atomics -S < %s | FileCheck %s |
2 ; | 2 ; |
3 ; Validate that sequentially consistent atomic loads/stores get rewritten into | 3 ; Validate that sequentially consistent atomic loads/stores get rewritten into |
4 ; NaCl atomic builtins with sequentially-consistent memory ordering (enum value | 4 ; NaCl atomic builtins with sequentially-consistent memory ordering (enum value |
5 ; 6). | 5 ; 6). |
6 | 6 |
7 target datalayout = "p:32:32:32" | 7 target datalayout = "p:32:32:32" |
8 | 8 |
9 ; CHECK-LABEL: @test_atomic_load_i8 | 9 ; CHECK-LABEL: @test_atomic_load_i8 |
10 define zeroext i8 @test_atomic_load_i8(i8* %ptr) { | 10 define zeroext i8 @test_atomic_load_i8(i8* %ptr) { |
11 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6) | 11 ; CHECK-NEXT: %res = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6) |
12 %res = load atomic i8* %ptr seq_cst, align 1 | 12 %res = load atomic i8, i8* %ptr seq_cst, align 1 |
13 ret i8 %res ; CHECK-NEXT: ret i8 %res | 13 ret i8 %res ; CHECK-NEXT: ret i8 %res |
14 } | 14 } |
15 | 15 |
16 ; CHECK-LABEL: @test_atomic_store_i8 | 16 ; CHECK-LABEL: @test_atomic_store_i8 |
17 define void @test_atomic_store_i8(i8* %ptr, i8 zeroext %value) { | 17 define void @test_atomic_store_i8(i8* %ptr, i8 zeroext %value) { |
18 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 %value, i8* %ptr, i32 6) | 18 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i8(i8 %value, i8* %ptr, i32 6) |
19 store atomic i8 %value, i8* %ptr seq_cst, align 1 | 19 store atomic i8 %value, i8* %ptr seq_cst, align 1 |
20 ret void ; CHECK-NEXT: ret void | 20 ret void ; CHECK-NEXT: ret void |
21 } | 21 } |
22 | 22 |
23 ; CHECK-LABEL: @test_atomic_load_i16 | 23 ; CHECK-LABEL: @test_atomic_load_i16 |
24 define zeroext i16 @test_atomic_load_i16(i16* %ptr) { | 24 define zeroext i16 @test_atomic_load_i16(i16* %ptr) { |
25 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6) | 25 ; CHECK-NEXT: %res = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6) |
26 %res = load atomic i16* %ptr seq_cst, align 2 | 26 %res = load atomic i16, i16* %ptr seq_cst, align 2 |
27 ret i16 %res ; CHECK-NEXT: ret i16 %res | 27 ret i16 %res ; CHECK-NEXT: ret i16 %res |
28 } | 28 } |
29 | 29 |
30 ; CHECK-LABEL: @test_atomic_store_i16 | 30 ; CHECK-LABEL: @test_atomic_store_i16 |
31 define void @test_atomic_store_i16(i16* %ptr, i16 zeroext %value) { | 31 define void @test_atomic_store_i16(i16* %ptr, i16 zeroext %value) { |
32 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 %value, i16* %ptr, i32
6) | 32 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i16(i16 %value, i16* %ptr, i32
6) |
33 store atomic i16 %value, i16* %ptr seq_cst, align 2 | 33 store atomic i16 %value, i16* %ptr seq_cst, align 2 |
34 ret void ; CHECK-NEXT: ret void | 34 ret void ; CHECK-NEXT: ret void |
35 } | 35 } |
36 | 36 |
37 ; CHECK-LABEL: @test_atomic_load_i32 | 37 ; CHECK-LABEL: @test_atomic_load_i32 |
38 define i32 @test_atomic_load_i32(i32* %ptr) { | 38 define i32 @test_atomic_load_i32(i32* %ptr) { |
39 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) | 39 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) |
40 %res = load atomic i32* %ptr seq_cst, align 4 | 40 %res = load atomic i32, i32* %ptr seq_cst, align 4 |
41 ret i32 %res ; CHECK-NEXT: ret i32 %res | 41 ret i32 %res ; CHECK-NEXT: ret i32 %res |
42 } | 42 } |
43 | 43 |
44 ; CHECK-LABEL: @test_atomic_store_i32 | 44 ; CHECK-LABEL: @test_atomic_store_i32 |
45 define void @test_atomic_store_i32(i32* %ptr, i32 %value) { | 45 define void @test_atomic_store_i32(i32* %ptr, i32 %value) { |
46 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32
6) | 46 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value, i32* %ptr, i32
6) |
47 store atomic i32 %value, i32* %ptr seq_cst, align 4 | 47 store atomic i32 %value, i32* %ptr seq_cst, align 4 |
48 ret void ; CHECK-NEXT: ret void | 48 ret void ; CHECK-NEXT: ret void |
49 } | 49 } |
50 | 50 |
51 ; CHECK-LABEL: @test_atomic_load_i64 | 51 ; CHECK-LABEL: @test_atomic_load_i64 |
52 define i64 @test_atomic_load_i64(i64* %ptr) { | 52 define i64 @test_atomic_load_i64(i64* %ptr) { |
53 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) | 53 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) |
54 %res = load atomic i64* %ptr seq_cst, align 8 | 54 %res = load atomic i64, i64* %ptr seq_cst, align 8 |
55 ret i64 %res ; CHECK-NEXT: ret i64 %res | 55 ret i64 %res ; CHECK-NEXT: ret i64 %res |
56 } | 56 } |
57 | 57 |
58 ; CHECK-LABEL: @test_atomic_store_i64 | 58 ; CHECK-LABEL: @test_atomic_store_i64 |
59 define void @test_atomic_store_i64(i64* %ptr, i64 %value) { | 59 define void @test_atomic_store_i64(i64* %ptr, i64 %value) { |
60 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value, i64* %ptr, i32
6) | 60 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value, i64* %ptr, i32
6) |
61 store atomic i64 %value, i64* %ptr seq_cst, align 8 | 61 store atomic i64 %value, i64* %ptr seq_cst, align 8 |
62 ret void ; CHECK-NEXT: ret void | 62 ret void ; CHECK-NEXT: ret void |
63 } | 63 } |
64 | 64 |
65 ; CHECK-LABEL: @test_atomic_load_float | |
66 define float @test_atomic_load_float(float* %ptr) { | |
67 ; CHECK-NEXT: %ptr.cast = bitcast float* %ptr to i32* | |
68 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) | |
69 ; CHECK-NEXT: %res.cast = bitcast i32 %res to float | |
70 %res = load atomic float* %ptr seq_cst, align 4 | |
71 ret float %res ; CHECK-NEXT: ret float %res.cast | |
72 } | |
73 | |
74 ; CHECK-LABEL: @test_atomic_store_float | |
75 define void @test_atomic_store_float(float* %ptr, float %value) { | |
76 ; CHECK-NEXT: %ptr.cast = bitcast float* %ptr to i32* | |
77 ; CHECK-NEXT: %value.cast = bitcast float %value to i32 | |
78 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) | |
79 store atomic float %value, float* %ptr seq_cst, align 4 | |
80 ret void ; CHECK-NEXT: ret void | |
81 } | |
82 | |
83 ; CHECK-LABEL: @test_atomic_load_double | |
84 define double @test_atomic_load_double(double* %ptr) { | |
85 ; CHECK-NEXT: %ptr.cast = bitcast double* %ptr to i64* | |
86 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr.cast, i32 6
) | |
87 ; CHECK-NEXT: %res.cast = bitcast i64 %res to double | |
88 %res = load atomic double* %ptr seq_cst, align 8 | |
89 ret double %res ; CHECK-NEXT: ret double %res.cast | |
90 } | |
91 | |
92 ; CHECK-LABEL: @test_atomic_store_double | |
93 define void @test_atomic_store_double(double* %ptr, double %value) { | |
94 ; CHECK-NEXT: %ptr.cast = bitcast double* %ptr to i64* | |
95 ; CHECK-NEXT: %value.cast = bitcast double %value to i64 | |
96 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr
.cast, i32 6) | |
97 store atomic double %value, double* %ptr seq_cst, align 8 | |
98 ret void ; CHECK-NEXT: ret void | |
99 } | |
100 | |
101 ; CHECK-LABEL: @test_atomic_load_i32_pointer | 65 ; CHECK-LABEL: @test_atomic_load_i32_pointer |
102 define i32* @test_atomic_load_i32_pointer(i32** %ptr) { | 66 define i32* @test_atomic_load_i32_pointer(i32** %ptr) { |
103 ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32* | 67 ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32* |
104 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) | 68 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) |
105 ; CHECK-NEXT: %res.cast = inttoptr i32 %res to i32* | 69 ; CHECK-NEXT: %res.cast = inttoptr i32 %res to i32* |
106 %res = load atomic i32** %ptr seq_cst, align 4 | 70 %res = load atomic i32*, i32** %ptr seq_cst, align 4 |
107 ret i32* %res ; CHECK-NEXT: ret i32* %res.cast | 71 ret i32* %res ; CHECK-NEXT: ret i32* %res.cast |
108 } | 72 } |
109 | 73 |
110 ; CHECK-LABEL: @test_atomic_store_i32_pointer | 74 ; CHECK-LABEL: @test_atomic_store_i32_pointer |
111 define void @test_atomic_store_i32_pointer(i32** %ptr, i32* %value) { | 75 define void @test_atomic_store_i32_pointer(i32** %ptr, i32* %value) { |
112 ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32* | 76 ; CHECK-NEXT: %ptr.cast = bitcast i32** %ptr to i32* |
113 ; CHECK-NEXT: %value.cast = ptrtoint i32* %value to i32 | 77 ; CHECK-NEXT: %value.cast = ptrtoint i32* %value to i32 |
114 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) | 78 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) |
115 store atomic i32* %value, i32** %ptr seq_cst, align 4 | 79 store atomic i32* %value, i32** %ptr seq_cst, align 4 |
116 ret void ; CHECK-NEXT: ret void | 80 ret void ; CHECK-NEXT: ret void |
117 } | 81 } |
118 | 82 |
119 ; CHECK-LABEL: @test_atomic_load_double_pointer | 83 ; CHECK-LABEL: @test_atomic_load_double_pointer |
120 define double* @test_atomic_load_double_pointer(double** %ptr) { | 84 define double* @test_atomic_load_double_pointer(double** %ptr) { |
121 ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32* | 85 ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32* |
122 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) | 86 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) |
123 ; CHECK-NEXT: %res.cast = inttoptr i32 %res to double* | 87 ; CHECK-NEXT: %res.cast = inttoptr i32 %res to double* |
124 %res = load atomic double** %ptr seq_cst, align 4 | 88 %res = load atomic double*, double** %ptr seq_cst, align 4 |
125 ret double* %res ; CHECK-NEXT: ret double* %res.cast | 89 ret double* %res ; CHECK-NEXT: ret double* %res.cast |
126 } | 90 } |
127 | 91 |
128 ; CHECK-LABEL: @test_atomic_store_double_pointer | 92 ; CHECK-LABEL: @test_atomic_store_double_pointer |
129 define void @test_atomic_store_double_pointer(double** %ptr, double* %value) { | 93 define void @test_atomic_store_double_pointer(double** %ptr, double* %value) { |
130 ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32* | 94 ; CHECK-NEXT: %ptr.cast = bitcast double** %ptr to i32* |
131 ; CHECK-NEXT: %value.cast = ptrtoint double* %value to i32 | 95 ; CHECK-NEXT: %value.cast = ptrtoint double* %value to i32 |
132 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) | 96 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) |
133 store atomic double* %value, double** %ptr seq_cst, align 4 | 97 store atomic double* %value, double** %ptr seq_cst, align 4 |
134 ret void ; CHECK-NEXT: ret void | 98 ret void ; CHECK-NEXT: ret void |
135 } | 99 } |
136 | |
137 ; CHECK-LABEL: @test_atomic_load_v4i8 | |
138 define <4 x i8> @test_atomic_load_v4i8(<4 x i8>* %ptr) { | |
139 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i8>* %ptr to i32* | |
140 ; CHECK-NEXT: %res = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr.cast, i32 6
) | |
141 ; CHECK-NEXT: %res.cast = bitcast i32 %res to <4 x i8> | |
142 %res = load atomic <4 x i8>* %ptr seq_cst, align 8 | |
143 ret <4 x i8> %res ; CHECK-NEXT: ret <4 x i8> %res.cast | |
144 } | |
145 | |
146 ; CHECK-LABEL: @test_atomic_store_v4i8 | |
147 define void @test_atomic_store_v4i8(<4 x i8>* %ptr, <4 x i8> %value) { | |
148 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i8>* %ptr to i32* | |
149 ; CHECK-NEXT: %value.cast = bitcast <4 x i8> %value to i32 | |
150 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i32(i32 %value.cast, i32* %ptr
.cast, i32 6) | |
151 store atomic <4 x i8> %value, <4 x i8>* %ptr seq_cst, align 8 | |
152 ret void ; CHECK-NEXT: ret void | |
153 } | |
154 | |
155 ; CHECK-LABEL: @test_atomic_load_v4i16 | |
156 define <4 x i16> @test_atomic_load_v4i16(<4 x i16>* %ptr) { | |
157 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64* | |
158 ; CHECK-NEXT: %res = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr.cast, i32 6
) | |
159 ; CHECK-NEXT: %res.cast = bitcast i64 %res to <4 x i16> | |
160 %res = load atomic <4 x i16>* %ptr seq_cst, align 8 | |
161 ret <4 x i16> %res ; CHECK-NEXT: ret <4 x i16> %res.cast | |
162 } | |
163 | |
164 ; CHECK-LABEL: @test_atomic_store_v4i16 | |
165 define void @test_atomic_store_v4i16(<4 x i16>* %ptr, <4 x i16> %value) { | |
166 ; CHECK-NEXT: %ptr.cast = bitcast <4 x i16>* %ptr to i64* | |
167 ; CHECK-NEXT: %value.cast = bitcast <4 x i16> %value to i64 | |
168 ; CHECK-NEXT: call void @llvm.nacl.atomic.store.i64(i64 %value.cast, i64* %ptr
.cast, i32 6) | |
169 store atomic <4 x i16> %value, <4 x i16>* %ptr seq_cst, align 8 | |
170 ret void ; CHECK-NEXT: ret void | |
171 } | |
OLD | NEW |