OLD | NEW |
1 ; This tests each of the supported NaCl atomic instructions for every | 1 ; This tests each of the supported NaCl atomic instructions for every |
2 ; size allowed. | 2 ; size allowed. |
3 | 3 |
4 ; RUN: %p2i -i %s --args -O2 --verbose none \ | 4 ; RUN: %p2i -i %s --assemble --disassemble --args -O2 --verbose none \ |
5 ; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \ | |
6 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - \ | |
7 ; RUN: | FileCheck %s | 5 ; RUN: | FileCheck %s |
8 ; RUN: %p2i -i %s --args -O2 --verbose none \ | 6 ; RUN: %p2i -i %s --assemble --disassemble --args -O2 --verbose none \ |
9 ; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \ | 7 ; RUN: | FileCheck --check-prefix=O2 %s |
10 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - \ | 8 ; RUN: %p2i -i %s --assemble --disassemble --args -Om1 --verbose none \ |
11 ; RUN: | FileCheck --check-prefix=CHECKO2 %s | |
12 ; RUN: %p2i -i %s --args -Om1 --verbose none \ | |
13 ; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \ | |
14 ; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - \ | |
15 ; RUN: | FileCheck %s | 9 ; RUN: | FileCheck %s |
16 | 10 |
17 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32) | 11 declare i8 @llvm.nacl.atomic.load.i8(i8*, i32) |
18 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32) | 12 declare i16 @llvm.nacl.atomic.load.i16(i16*, i32) |
19 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) | 13 declare i32 @llvm.nacl.atomic.load.i32(i32*, i32) |
20 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32) | 14 declare i64 @llvm.nacl.atomic.load.i64(i64*, i32) |
21 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32) | 15 declare void @llvm.nacl.atomic.store.i8(i8, i8*, i32) |
22 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32) | 16 declare void @llvm.nacl.atomic.store.i16(i16, i16*, i32) |
23 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32) | 17 declare void @llvm.nacl.atomic.store.i32(i32, i32*, i32) |
24 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32) | 18 declare void @llvm.nacl.atomic.store.i64(i64, i64*, i32) |
(...skipping 21 matching lines...) Expand all Loading... |
46 | 40 |
47 define i32 @test_atomic_load_8(i32 %iptr) { | 41 define i32 @test_atomic_load_8(i32 %iptr) { |
48 entry: | 42 entry: |
49 %ptr = inttoptr i32 %iptr to i8* | 43 %ptr = inttoptr i32 %iptr to i8* |
50 ; parameter value "6" is for the sequential consistency memory order. | 44 ; parameter value "6" is for the sequential consistency memory order. |
51 %i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6) | 45 %i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 6) |
52 %r = zext i8 %i to i32 | 46 %r = zext i8 %i to i32 |
53 ret i32 %r | 47 ret i32 %r |
54 } | 48 } |
55 ; CHECK-LABEL: test_atomic_load_8 | 49 ; CHECK-LABEL: test_atomic_load_8 |
56 ; CHECK: mov {{.*}}, dword | 50 ; CHECK: mov {{.*}},DWORD |
57 ; CHECK: mov {{.*}}, byte | 51 ; CHECK: mov {{.*}},BYTE |
58 | 52 |
59 define i32 @test_atomic_load_16(i32 %iptr) { | 53 define i32 @test_atomic_load_16(i32 %iptr) { |
60 entry: | 54 entry: |
61 %ptr = inttoptr i32 %iptr to i16* | 55 %ptr = inttoptr i32 %iptr to i16* |
62 %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6) | 56 %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 6) |
63 %r = zext i16 %i to i32 | 57 %r = zext i16 %i to i32 |
64 ret i32 %r | 58 ret i32 %r |
65 } | 59 } |
66 ; CHECK-LABEL: test_atomic_load_16 | 60 ; CHECK-LABEL: test_atomic_load_16 |
67 ; CHECK: mov {{.*}}, dword | 61 ; CHECK: mov {{.*}},DWORD |
68 ; CHECK: mov {{.*}}, word | 62 ; CHECK: mov {{.*}},WORD |
69 | 63 |
70 define i32 @test_atomic_load_32(i32 %iptr) { | 64 define i32 @test_atomic_load_32(i32 %iptr) { |
71 entry: | 65 entry: |
72 %ptr = inttoptr i32 %iptr to i32* | 66 %ptr = inttoptr i32 %iptr to i32* |
73 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) | 67 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) |
74 ret i32 %r | 68 ret i32 %r |
75 } | 69 } |
76 ; CHECK-LABEL: test_atomic_load_32 | 70 ; CHECK-LABEL: test_atomic_load_32 |
77 ; CHECK: mov {{.*}}, dword | 71 ; CHECK: mov {{.*}},DWORD |
78 ; CHECK: mov {{.*}}, dword | 72 ; CHECK: mov {{.*}},DWORD |
79 | 73 |
80 define i64 @test_atomic_load_64(i32 %iptr) { | 74 define i64 @test_atomic_load_64(i32 %iptr) { |
81 entry: | 75 entry: |
82 %ptr = inttoptr i32 %iptr to i64* | 76 %ptr = inttoptr i32 %iptr to i64* |
83 %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) | 77 %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) |
84 ret i64 %r | 78 ret i64 %r |
85 } | 79 } |
86 ; CHECK-LABEL: test_atomic_load_64 | 80 ; CHECK-LABEL: test_atomic_load_64 |
87 ; CHECK: movq x{{.*}}, qword | 81 ; CHECK: movq x{{.*}},QWORD |
88 ; CHECK: movq qword {{.*}}, x{{.*}} | 82 ; CHECK: movq QWORD {{.*}},x{{.*}} |
89 | 83 |
90 define i32 @test_atomic_load_32_with_arith(i32 %iptr) { | 84 define i32 @test_atomic_load_32_with_arith(i32 %iptr) { |
91 entry: | 85 entry: |
92 br label %next | 86 br label %next |
93 | 87 |
94 next: | 88 next: |
95 %ptr = inttoptr i32 %iptr to i32* | 89 %ptr = inttoptr i32 %iptr to i32* |
96 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) | 90 %r = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) |
97 %r2 = add i32 %r, 32 | 91 %r2 = add i32 %r, 32 |
98 ret i32 %r2 | 92 ret i32 %r2 |
99 } | 93 } |
100 ; CHECK-LABEL: test_atomic_load_32_with_arith | 94 ; CHECK-LABEL: test_atomic_load_32_with_arith |
101 ; CHECK: mov {{.*}}, dword | 95 ; CHECK: mov {{.*}},DWORD |
102 ; The next instruction may be a separate load or folded into an add. | 96 ; The next instruction may be a separate load or folded into an add. |
103 ; | 97 ; |
104 ; In O2 mode, we know that the load and add are going to be fused. | 98 ; In O2 mode, we know that the load and add are going to be fused. |
105 ; CHECKO2-LABEL: test_atomic_load_32_with_arith | 99 ; O2-LABEL: test_atomic_load_32_with_arith |
106 ; CHECKO2: mov {{.*}}, dword | 100 ; O2: mov {{.*}},DWORD |
107 ; CHECKO2: add {{.*}}, dword | 101 ; O2: add {{.*}},DWORD |
108 | 102 |
109 define i32 @test_atomic_load_32_ignored(i32 %iptr) { | 103 define i32 @test_atomic_load_32_ignored(i32 %iptr) { |
110 entry: | 104 entry: |
111 %ptr = inttoptr i32 %iptr to i32* | 105 %ptr = inttoptr i32 %iptr to i32* |
112 %ignored = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) | 106 %ignored = call i32 @llvm.nacl.atomic.load.i32(i32* %ptr, i32 6) |
113 ret i32 0 | 107 ret i32 0 |
114 } | 108 } |
115 ; CHECK-LABEL: test_atomic_load_32_ignored | 109 ; CHECK-LABEL: test_atomic_load_32_ignored |
116 ; CHECK: mov {{.*}}, dword | 110 ; CHECK: mov {{.*}},DWORD |
117 ; CHECK: mov {{.*}}, dword | 111 ; CHECK: mov {{.*}},DWORD |
118 ; CHECKO2-LABEL: test_atomic_load_32_ignored | 112 ; O2-LABEL: test_atomic_load_32_ignored |
119 ; CHECKO2: mov {{.*}}, dword | 113 ; O2: mov {{.*}},DWORD |
120 ; CHECKO2: mov {{.*}}, dword | 114 ; O2: mov {{.*}},DWORD |
121 | 115 |
122 define i64 @test_atomic_load_64_ignored(i32 %iptr) { | 116 define i64 @test_atomic_load_64_ignored(i32 %iptr) { |
123 entry: | 117 entry: |
124 %ptr = inttoptr i32 %iptr to i64* | 118 %ptr = inttoptr i32 %iptr to i64* |
125 %ignored = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) | 119 %ignored = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 6) |
126 ret i64 0 | 120 ret i64 0 |
127 } | 121 } |
128 ; CHECK-LABEL: test_atomic_load_64_ignored | 122 ; CHECK-LABEL: test_atomic_load_64_ignored |
129 ; CHECK: movq x{{.*}}, qword | 123 ; CHECK: movq x{{.*}},QWORD |
130 ; CHECK: movq qword {{.*}}, x{{.*}} | 124 ; CHECK: movq QWORD {{.*}},x{{.*}} |
131 | 125 |
132 ;;; Store | 126 ;;; Store |
133 | 127 |
134 define void @test_atomic_store_8(i32 %iptr, i32 %v) { | 128 define void @test_atomic_store_8(i32 %iptr, i32 %v) { |
135 entry: | 129 entry: |
136 %truncv = trunc i32 %v to i8 | 130 %truncv = trunc i32 %v to i8 |
137 %ptr = inttoptr i32 %iptr to i8* | 131 %ptr = inttoptr i32 %iptr to i8* |
138 call void @llvm.nacl.atomic.store.i8(i8 %truncv, i8* %ptr, i32 6) | 132 call void @llvm.nacl.atomic.store.i8(i8 %truncv, i8* %ptr, i32 6) |
139 ret void | 133 ret void |
140 } | 134 } |
141 ; CHECK-LABEL: test_atomic_store_8 | 135 ; CHECK-LABEL: test_atomic_store_8 |
142 ; CHECK: mov byte | 136 ; CHECK: mov BYTE |
143 ; CHECK: mfence | 137 ; CHECK: mfence |
144 | 138 |
145 define void @test_atomic_store_16(i32 %iptr, i32 %v) { | 139 define void @test_atomic_store_16(i32 %iptr, i32 %v) { |
146 entry: | 140 entry: |
147 %truncv = trunc i32 %v to i16 | 141 %truncv = trunc i32 %v to i16 |
148 %ptr = inttoptr i32 %iptr to i16* | 142 %ptr = inttoptr i32 %iptr to i16* |
149 call void @llvm.nacl.atomic.store.i16(i16 %truncv, i16* %ptr, i32 6) | 143 call void @llvm.nacl.atomic.store.i16(i16 %truncv, i16* %ptr, i32 6) |
150 ret void | 144 ret void |
151 } | 145 } |
152 ; CHECK-LABEL: test_atomic_store_16 | 146 ; CHECK-LABEL: test_atomic_store_16 |
153 ; CHECK: mov word | 147 ; CHECK: mov WORD |
154 ; CHECK: mfence | 148 ; CHECK: mfence |
155 | 149 |
156 define void @test_atomic_store_32(i32 %iptr, i32 %v) { | 150 define void @test_atomic_store_32(i32 %iptr, i32 %v) { |
157 entry: | 151 entry: |
158 %ptr = inttoptr i32 %iptr to i32* | 152 %ptr = inttoptr i32 %iptr to i32* |
159 call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 6) | 153 call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 6) |
160 ret void | 154 ret void |
161 } | 155 } |
162 ; CHECK-LABEL: test_atomic_store_32 | 156 ; CHECK-LABEL: test_atomic_store_32 |
163 ; CHECK: mov dword | 157 ; CHECK: mov DWORD |
164 ; CHECK: mfence | 158 ; CHECK: mfence |
165 | 159 |
166 define void @test_atomic_store_64(i32 %iptr, i64 %v) { | 160 define void @test_atomic_store_64(i32 %iptr, i64 %v) { |
167 entry: | 161 entry: |
168 %ptr = inttoptr i32 %iptr to i64* | 162 %ptr = inttoptr i32 %iptr to i64* |
169 call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 6) | 163 call void @llvm.nacl.atomic.store.i64(i64 %v, i64* %ptr, i32 6) |
170 ret void | 164 ret void |
171 } | 165 } |
172 ; CHECK-LABEL: test_atomic_store_64 | 166 ; CHECK-LABEL: test_atomic_store_64 |
173 ; CHECK: movq x{{.*}}, qword | 167 ; CHECK: movq x{{.*}},QWORD |
174 ; CHECK: movq qword {{.*}}, x{{.*}} | 168 ; CHECK: movq QWORD {{.*}},x{{.*}} |
175 ; CHECK: mfence | 169 ; CHECK: mfence |
176 | 170 |
177 define void @test_atomic_store_64_const(i32 %iptr) { | 171 define void @test_atomic_store_64_const(i32 %iptr) { |
178 entry: | 172 entry: |
179 %ptr = inttoptr i32 %iptr to i64* | 173 %ptr = inttoptr i32 %iptr to i64* |
180 call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 6) | 174 call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 6) |
181 ret void | 175 ret void |
182 } | 176 } |
183 ; CHECK-LABEL: test_atomic_store_64_const | 177 ; CHECK-LABEL: test_atomic_store_64_const |
184 ; CHECK: mov {{.*}}, 1942892530 | 178 ; CHECK: mov {{.*}},0x73ce2ff2 |
185 ; CHECK: mov {{.*}}, 2874 | 179 ; CHECK: mov {{.*}},0xb3a |
186 ; CHECK: movq x{{.*}}, qword | 180 ; CHECK: movq x{{.*}},QWORD |
187 ; CHECK: movq qword {{.*}}, x{{.*}} | 181 ; CHECK: movq QWORD {{.*}},x{{.*}} |
188 ; CHECK: mfence | 182 ; CHECK: mfence |
189 | 183 |
190 | 184 |
191 ;;; RMW | 185 ;;; RMW |
192 | 186 |
193 ;; add | 187 ;; add |
194 | 188 |
195 define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) { | 189 define i32 @test_atomic_rmw_add_8(i32 %iptr, i32 %v) { |
196 entry: | 190 entry: |
197 %trunc = trunc i32 %v to i8 | 191 %trunc = trunc i32 %v to i8 |
198 %ptr = inttoptr i32 %iptr to i8* | 192 %ptr = inttoptr i32 %iptr to i8* |
199 ; "1" is an atomic add, and "6" is sequential consistency. | 193 ; "1" is an atomic add, and "6" is sequential consistency. |
200 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6) | 194 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 6) |
201 %a_ext = zext i8 %a to i32 | 195 %a_ext = zext i8 %a to i32 |
202 ret i32 %a_ext | 196 ret i32 %a_ext |
203 } | 197 } |
204 ; CHECK-LABEL: test_atomic_rmw_add_8 | 198 ; CHECK-LABEL: test_atomic_rmw_add_8 |
205 ; CHECK: lock | 199 ; CHECK: lock xadd BYTE {{.*}},[[REG:.*]] |
206 ; CHECK-NEXT: xadd byte {{.*}}, [[REG:.*]] | 200 ; CHECK: {{mov|movzx}} {{.*}},[[REG]] |
207 ; CHECK: {{mov|movzx}} {{.*}}, [[REG]] | |
208 | 201 |
209 define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) { | 202 define i32 @test_atomic_rmw_add_16(i32 %iptr, i32 %v) { |
210 entry: | 203 entry: |
211 %trunc = trunc i32 %v to i16 | 204 %trunc = trunc i32 %v to i16 |
212 %ptr = inttoptr i32 %iptr to i16* | 205 %ptr = inttoptr i32 %iptr to i16* |
213 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6) | 206 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 1, i16* %ptr, i16 %trunc, i32 6) |
214 %a_ext = zext i16 %a to i32 | 207 %a_ext = zext i16 %a to i32 |
215 ret i32 %a_ext | 208 ret i32 %a_ext |
216 } | 209 } |
217 ; CHECK-LABEL: test_atomic_rmw_add_16 | 210 ; CHECK-LABEL: test_atomic_rmw_add_16 |
218 ; CHECK: lock | 211 ; CHECK: lock xadd WORD {{.*}},[[REG:.*]] |
219 ; CHECK-NEXT: xadd word {{.*}}, [[REG:.*]] | 212 ; CHECK: {{mov|movzx}} {{.*}},[[REG]] |
220 ; CHECK: {{mov|movzx}} {{.*}}, [[REG]] | |
221 | 213 |
222 define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) { | 214 define i32 @test_atomic_rmw_add_32(i32 %iptr, i32 %v) { |
223 entry: | 215 entry: |
224 %ptr = inttoptr i32 %iptr to i32* | 216 %ptr = inttoptr i32 %iptr to i32* |
225 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) | 217 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) |
226 ret i32 %a | 218 ret i32 %a |
227 } | 219 } |
228 ; CHECK-LABEL: test_atomic_rmw_add_32 | 220 ; CHECK-LABEL: test_atomic_rmw_add_32 |
229 ; CHECK: lock | 221 ; CHECK: lock xadd DWORD {{.*}},[[REG:.*]] |
230 ; CHECK-NEXT: xadd dword {{.*}}, [[REG:.*]] | 222 ; CHECK: mov {{.*}},[[REG]] |
231 ; CHECK: mov {{.*}}, [[REG]] | |
232 | 223 |
233 define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) { | 224 define i64 @test_atomic_rmw_add_64(i32 %iptr, i64 %v) { |
234 entry: | 225 entry: |
235 %ptr = inttoptr i32 %iptr to i64* | 226 %ptr = inttoptr i32 %iptr to i64* |
236 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) | 227 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) |
237 ret i64 %a | 228 ret i64 %a |
238 } | 229 } |
239 ; CHECK-LABEL: test_atomic_rmw_add_64 | 230 ; CHECK-LABEL: test_atomic_rmw_add_64 |
240 ; CHECK: push ebx | 231 ; CHECK: push ebx |
241 ; CHECK: mov eax, dword ptr [{{.*}}] | 232 ; CHECK: mov eax,DWORD PTR [{{.*}}] |
242 ; CHECK: mov edx, dword ptr [{{.*}} + 4] | 233 ; CHECK: mov edx,DWORD PTR [{{.*}}+0x4] |
243 ; CHECK: mov ebx, eax | 234 ; CHECK: [[LABEL:[^ ]*]]: {{.*}} mov ebx,eax |
244 ; RHS of add cannot be any of the e[abcd]x regs because they are | 235 ; RHS of add cannot be any of the e[abcd]x regs because they are |
245 ; clobbered in the loop, and the RHS needs to be remain live. | 236 ; clobbered in the loop, and the RHS needs to be remain live. |
246 ; CHECK: add ebx, {{.*e.[^x]}} | 237 ; CHECK: add ebx,{{.*e.[^x]}} |
247 ; CHECK: mov ecx, edx | 238 ; CHECK: mov ecx,edx |
248 ; CHECK: adc ecx, {{.*e.[^x]}} | 239 ; CHECK: adc ecx,{{.*e.[^x]}} |
249 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). | 240 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). |
250 ; It can be esi, edi, or ebp though, for example (so we need to be careful | 241 ; It can be esi, edi, or ebp though, for example (so we need to be careful |
251 ; about rejecting eb* and ed*.) | 242 ; about rejecting eb* and ed*.) |
252 ; CHECK: lock | 243 ; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}} |
253 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] | 244 ; CHECK: jne [[LABEL]] |
254 ; CHECK: jne -{{[0-9]}} | |
255 | 245 |
256 ; Test with some more register pressure. When we have an alloca, ebp is | 246 ; Test with some more register pressure. When we have an alloca, ebp is |
257 ; used to manage the stack frame, so it cannot be used as a register either. | 247 ; used to manage the stack frame, so it cannot be used as a register either. |
258 define void @use_ptr(i32 %iptr) { | 248 declare void @use_ptr(i32 %iptr) |
259 entry: | |
260 ret void | |
261 } | |
262 | 249 |
263 define i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) { | 250 define i64 @test_atomic_rmw_add_64_alloca(i32 %iptr, i64 %v) { |
264 entry: | 251 entry: |
265 %alloca_ptr = alloca i8, i32 16, align 16 | 252 %alloca_ptr = alloca i8, i32 16, align 16 |
266 %ptr = inttoptr i32 %iptr to i64* | 253 %ptr = inttoptr i32 %iptr to i64* |
267 %old = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) | 254 %old = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 6) |
268 store i8 0, i8* %alloca_ptr, align 1 | 255 store i8 0, i8* %alloca_ptr, align 1 |
269 store i8 1, i8* %alloca_ptr, align 1 | 256 store i8 1, i8* %alloca_ptr, align 1 |
270 store i8 2, i8* %alloca_ptr, align 1 | 257 store i8 2, i8* %alloca_ptr, align 1 |
271 store i8 3, i8* %alloca_ptr, align 1 | 258 store i8 3, i8* %alloca_ptr, align 1 |
272 %__5 = ptrtoint i8* %alloca_ptr to i32 | 259 %__5 = ptrtoint i8* %alloca_ptr to i32 |
273 call void @use_ptr(i32 %__5) | 260 call void @use_ptr(i32 %__5) |
274 ret i64 %old | 261 ret i64 %old |
275 } | 262 } |
276 ; CHECK-LABEL: test_atomic_rmw_add_64_alloca | 263 ; CHECK-LABEL: test_atomic_rmw_add_64_alloca |
277 ; CHECK: push ebx | 264 ; CHECK: push ebx |
278 ; CHECK-DAG: mov edx | 265 ; CHECK-DAG: mov edx |
279 ; CHECK-DAG: mov eax | 266 ; CHECK-DAG: mov eax |
280 ; CHECK-DAG: mov ecx | 267 ; CHECK-DAG: mov ecx |
281 ; CHECK-DAG: mov ebx | 268 ; CHECK-DAG: mov ebx |
282 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). | 269 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). |
283 ; It also cannot be ebp since we use that for alloca. Also make sure it's | 270 ; It also cannot be ebp since we use that for alloca. Also make sure it's |
284 ; not esp, since that's the stack pointer and mucking with it will break | 271 ; not esp, since that's the stack pointer and mucking with it will break |
285 ; the later use_ptr function call. | 272 ; the later use_ptr function call. |
286 ; That pretty much leaves esi, or edi as the only viable registers. | 273 ; That pretty much leaves esi, or edi as the only viable registers. |
287 ; CHECK: lock | 274 ; CHECK: lock cmpxchg8b QWORD PTR [e{{[ds]}}i] |
288 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{[ds]}}i] | 275 ; CHECK: call {{.*}} R_{{.*}} use_ptr |
289 ; CHECK: call use_ptr | |
290 | 276 |
291 define i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) { | 277 define i32 @test_atomic_rmw_add_32_ignored(i32 %iptr, i32 %v) { |
292 entry: | 278 entry: |
293 %ptr = inttoptr i32 %iptr to i32* | 279 %ptr = inttoptr i32 %iptr to i32* |
294 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) | 280 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 1, i32* %ptr, i32 %v, i32 6) |
295 ret i32 %v | 281 ret i32 %v |
296 } | 282 } |
297 ; Technically this could use "lock add" instead of "lock xadd", if liveness | 283 ; Technically this could use "lock add" instead of "lock xadd", if liveness |
298 ; tells us that the destination variable is dead. | 284 ; tells us that the destination variable is dead. |
299 ; CHECK-LABEL: test_atomic_rmw_add_32_ignored | 285 ; CHECK-LABEL: test_atomic_rmw_add_32_ignored |
300 ; CHECK: lock | 286 ; CHECK: lock xadd DWORD {{.*}},[[REG:.*]] |
301 ; CHECK-NEXT: xadd dword {{.*}}, [[REG:.*]] | |
302 | 287 |
303 ; Atomic RMW 64 needs to be expanded into its own loop. | 288 ; Atomic RMW 64 needs to be expanded into its own loop. |
304 ; Make sure that works w/ non-trivial function bodies. | 289 ; Make sure that works w/ non-trivial function bodies. |
305 define i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) { | 290 define i64 @test_atomic_rmw_add_64_loop(i32 %iptr, i64 %v) { |
306 entry: | 291 entry: |
307 %x = icmp ult i64 %v, 100 | 292 %x = icmp ult i64 %v, 100 |
308 br i1 %x, label %err, label %loop | 293 br i1 %x, label %err, label %loop |
309 | 294 |
310 loop: | 295 loop: |
311 %v_next = phi i64 [ %v, %entry ], [ %next, %loop ] | 296 %v_next = phi i64 [ %v, %entry ], [ %next, %loop ] |
312 %ptr = inttoptr i32 %iptr to i64* | 297 %ptr = inttoptr i32 %iptr to i64* |
313 %next = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v_next, i32
6) | 298 %next = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v_next, i32
6) |
314 %success = icmp eq i64 %next, 100 | 299 %success = icmp eq i64 %next, 100 |
315 br i1 %success, label %done, label %loop | 300 br i1 %success, label %done, label %loop |
316 | 301 |
317 done: | 302 done: |
318 ret i64 %next | 303 ret i64 %next |
319 | 304 |
320 err: | 305 err: |
321 ret i64 0 | 306 ret i64 0 |
322 } | 307 } |
323 ; CHECK-LABEL: test_atomic_rmw_add_64_loop | 308 ; CHECK-LABEL: test_atomic_rmw_add_64_loop |
324 ; CHECK: push ebx | 309 ; CHECK: push ebx |
325 ; CHECK: mov eax, dword ptr [{{.*}}] | 310 ; CHECK: mov eax,DWORD PTR [{{.*}}] |
326 ; CHECK: mov edx, dword ptr [{{.*}} + 4] | 311 ; CHECK: mov edx,DWORD PTR [{{.*}}+0x4] |
327 ; CHECK: mov ebx, eax | 312 ; CHECK: [[LABEL:[^ ]*]]: {{.*}} mov ebx,eax |
328 ; CHECK: add ebx, {{.*e.[^x]}} | 313 ; CHECK: add ebx,{{.*e.[^x]}} |
329 ; CHECK: mov ecx, edx | 314 ; CHECK: mov ecx,edx |
330 ; CHECK: adc ecx, {{.*e.[^x]}} | 315 ; CHECK: adc ecx,{{.*e.[^x]}} |
331 ; CHECK: lock | 316 ; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}}+0x0] |
332 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] | 317 ; CHECK: jne [[LABEL]] |
333 ; CHECK: jne -{{[0-9]}} | |
334 | 318 |
335 ;; sub | 319 ;; sub |
336 | 320 |
337 define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) { | 321 define i32 @test_atomic_rmw_sub_8(i32 %iptr, i32 %v) { |
338 entry: | 322 entry: |
339 %trunc = trunc i32 %v to i8 | 323 %trunc = trunc i32 %v to i8 |
340 %ptr = inttoptr i32 %iptr to i8* | 324 %ptr = inttoptr i32 %iptr to i8* |
341 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6) | 325 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 2, i8* %ptr, i8 %trunc, i32 6) |
342 %a_ext = zext i8 %a to i32 | 326 %a_ext = zext i8 %a to i32 |
343 ret i32 %a_ext | 327 ret i32 %a_ext |
344 } | 328 } |
345 ; CHECK-LABEL: test_atomic_rmw_sub_8 | 329 ; CHECK-LABEL: test_atomic_rmw_sub_8 |
346 ; CHECK: neg [[REG:.*]] | 330 ; CHECK: neg [[REG:.*]] |
347 ; CHECK: lock | 331 ; CHECK: lock xadd BYTE {{.*}},[[REG]] |
348 ; CHECK-NEXT: xadd byte {{.*}}, [[REG]] | 332 ; CHECK: {{mov|movzx}} {{.*}},[[REG]] |
349 ; CHECK: {{mov|movzx}} {{.*}}, [[REG]] | |
350 | 333 |
351 define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) { | 334 define i32 @test_atomic_rmw_sub_16(i32 %iptr, i32 %v) { |
352 entry: | 335 entry: |
353 %trunc = trunc i32 %v to i16 | 336 %trunc = trunc i32 %v to i16 |
354 %ptr = inttoptr i32 %iptr to i16* | 337 %ptr = inttoptr i32 %iptr to i16* |
355 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6) | 338 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 2, i16* %ptr, i16 %trunc, i32 6) |
356 %a_ext = zext i16 %a to i32 | 339 %a_ext = zext i16 %a to i32 |
357 ret i32 %a_ext | 340 ret i32 %a_ext |
358 } | 341 } |
359 ; CHECK-LABEL: test_atomic_rmw_sub_16 | 342 ; CHECK-LABEL: test_atomic_rmw_sub_16 |
360 ; CHECK: neg [[REG:.*]] | 343 ; CHECK: neg [[REG:.*]] |
361 ; CHECK: lock | 344 ; CHECK: lock xadd WORD {{.*}},[[REG]] |
362 ; CHECK-NEXT: xadd word {{.*}}, [[REG]] | 345 ; CHECK: {{mov|movzx}} {{.*}},[[REG]] |
363 ; CHECK: {{mov|movzx}} {{.*}}, [[REG]] | |
364 | 346 |
365 define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) { | 347 define i32 @test_atomic_rmw_sub_32(i32 %iptr, i32 %v) { |
366 entry: | 348 entry: |
367 %ptr = inttoptr i32 %iptr to i32* | 349 %ptr = inttoptr i32 %iptr to i32* |
368 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) | 350 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) |
369 ret i32 %a | 351 ret i32 %a |
370 } | 352 } |
371 ; CHECK-LABEL: test_atomic_rmw_sub_32 | 353 ; CHECK-LABEL: test_atomic_rmw_sub_32 |
372 ; CHECK: neg [[REG:.*]] | 354 ; CHECK: neg [[REG:.*]] |
373 ; CHECK: lock | 355 ; CHECK: lock xadd DWORD {{.*}},[[REG]] |
374 ; CHECK-NEXT: xadd dword {{.*}}, [[REG]] | 356 ; CHECK: mov {{.*}},[[REG]] |
375 ; CHECK: mov {{.*}}, [[REG]] | |
376 | 357 |
377 define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) { | 358 define i64 @test_atomic_rmw_sub_64(i32 %iptr, i64 %v) { |
378 entry: | 359 entry: |
379 %ptr = inttoptr i32 %iptr to i64* | 360 %ptr = inttoptr i32 %iptr to i64* |
380 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6) | 361 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 2, i64* %ptr, i64 %v, i32 6) |
381 ret i64 %a | 362 ret i64 %a |
382 } | 363 } |
383 ; CHECK-LABEL: test_atomic_rmw_sub_64 | 364 ; CHECK-LABEL: test_atomic_rmw_sub_64 |
384 ; CHECK: push ebx | 365 ; CHECK: push ebx |
385 ; CHECK: mov eax, dword ptr [{{.*}}] | 366 ; CHECK: mov eax,DWORD PTR [{{.*}}] |
386 ; CHECK: mov edx, dword ptr [{{.*}} + 4] | 367 ; CHECK: mov edx,DWORD PTR [{{.*}}+0x4] |
387 ; CHECK: mov ebx, eax | 368 ; CHECK: [[LABEL:[^ ]*]]: {{.*}} mov ebx,eax |
388 ; CHECK: sub ebx, {{.*e.[^x]}} | 369 ; CHECK: sub ebx,{{.*e.[^x]}} |
389 ; CHECK: mov ecx, edx | 370 ; CHECK: mov ecx,edx |
390 ; CHECK: sbb ecx, {{.*e.[^x]}} | 371 ; CHECK: sbb ecx,{{.*e.[^x]}} |
391 ; CHECK: lock | 372 ; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}} |
392 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] | 373 ; CHECK: jne [[LABEL]] |
393 ; CHECK: jne -{{[0-9]}} | |
394 | 374 |
395 | 375 |
396 define i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) { | 376 define i32 @test_atomic_rmw_sub_32_ignored(i32 %iptr, i32 %v) { |
397 entry: | 377 entry: |
398 %ptr = inttoptr i32 %iptr to i32* | 378 %ptr = inttoptr i32 %iptr to i32* |
399 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) | 379 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 2, i32* %ptr, i32 %v, i32 6) |
400 ret i32 %v | 380 ret i32 %v |
401 } | 381 } |
402 ; Could use "lock sub" instead of "neg; lock xadd" | 382 ; Could use "lock sub" instead of "neg; lock xadd" |
403 ; CHECK-LABEL: test_atomic_rmw_sub_32_ignored | 383 ; CHECK-LABEL: test_atomic_rmw_sub_32_ignored |
404 ; CHECK: neg [[REG:.*]] | 384 ; CHECK: neg [[REG:.*]] |
405 ; CHECK: lock | 385 ; CHECK: lock xadd DWORD {{.*}},[[REG]] |
406 ; CHECK-NEXT: xadd dword {{.*}}, [[REG]] | |
407 | 386 |
408 ;; or | 387 ;; or |
409 | 388 |
410 define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) { | 389 define i32 @test_atomic_rmw_or_8(i32 %iptr, i32 %v) { |
411 entry: | 390 entry: |
412 %trunc = trunc i32 %v to i8 | 391 %trunc = trunc i32 %v to i8 |
413 %ptr = inttoptr i32 %iptr to i8* | 392 %ptr = inttoptr i32 %iptr to i8* |
414 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6) | 393 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 3, i8* %ptr, i8 %trunc, i32 6) |
415 %a_ext = zext i8 %a to i32 | 394 %a_ext = zext i8 %a to i32 |
416 ret i32 %a_ext | 395 ret i32 %a_ext |
417 } | 396 } |
418 ; CHECK-LABEL: test_atomic_rmw_or_8 | 397 ; CHECK-LABEL: test_atomic_rmw_or_8 |
419 ; CHECK: mov al, byte ptr | 398 ; CHECK: mov al,BYTE PTR |
420 ; Dest cannot be eax here, because eax is used for the old value. Also want | 399 ; Dest cannot be eax here, because eax is used for the old value. Also want |
421 ; to make sure that cmpxchg's source is the same register. | 400 ; to make sure that cmpxchg's source is the same register. |
422 ; CHECK: or [[REG:[^a].]] | 401 ; CHECK: or [[REG:[^a].]] |
423 ; CHECK: lock | 402 ; CHECK: lock cmpxchg BYTE PTR [e{{[^a].}}],[[REG]] |
424 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]] | 403 ; CHECK: jne |
425 ; CHECK: jne -{{[0-9]}} | |
426 | 404 |
427 define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) { | 405 define i32 @test_atomic_rmw_or_16(i32 %iptr, i32 %v) { |
428 entry: | 406 entry: |
429 %trunc = trunc i32 %v to i16 | 407 %trunc = trunc i32 %v to i16 |
430 %ptr = inttoptr i32 %iptr to i16* | 408 %ptr = inttoptr i32 %iptr to i16* |
431 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6) | 409 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 3, i16* %ptr, i16 %trunc, i32 6) |
432 %a_ext = zext i16 %a to i32 | 410 %a_ext = zext i16 %a to i32 |
433 ret i32 %a_ext | 411 ret i32 %a_ext |
434 } | 412 } |
435 ; CHECK-LABEL: test_atomic_rmw_or_16 | 413 ; CHECK-LABEL: test_atomic_rmw_or_16 |
436 ; CHECK: mov ax, word ptr | 414 ; CHECK: mov ax,WORD PTR |
437 ; CHECK: or [[REG:[^a].]] | 415 ; CHECK: or [[REG:[^a].]] |
438 ; CHECK: lock | 416 ; CHECK: lock cmpxchg WORD PTR [e{{[^a].}}],[[REG]] |
439 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], [[REG]] | 417 ; CHECK: jne |
440 ; CHECK: jne -{{[0-9]}} | |
441 | 418 |
442 define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) { | 419 define i32 @test_atomic_rmw_or_32(i32 %iptr, i32 %v) { |
443 entry: | 420 entry: |
444 %ptr = inttoptr i32 %iptr to i32* | 421 %ptr = inttoptr i32 %iptr to i32* |
445 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) | 422 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) |
446 ret i32 %a | 423 ret i32 %a |
447 } | 424 } |
448 ; CHECK-LABEL: test_atomic_rmw_or_32 | 425 ; CHECK-LABEL: test_atomic_rmw_or_32 |
449 ; CHECK: mov eax, dword ptr | 426 ; CHECK: mov eax,DWORD PTR |
450 ; CHECK: or [[REG:e[^a].]] | 427 ; CHECK: or [[REG:e[^a].]] |
451 ; CHECK: lock | 428 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}],[[REG]] |
452 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], [[REG]] | 429 ; CHECK: jne |
453 ; CHECK: jne -{{[0-9]}} | |
454 | 430 |
455 define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) { | 431 define i64 @test_atomic_rmw_or_64(i32 %iptr, i64 %v) { |
456 entry: | 432 entry: |
457 %ptr = inttoptr i32 %iptr to i64* | 433 %ptr = inttoptr i32 %iptr to i64* |
458 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6) | 434 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 3, i64* %ptr, i64 %v, i32 6) |
459 ret i64 %a | 435 ret i64 %a |
460 } | 436 } |
461 ; CHECK-LABEL: test_atomic_rmw_or_64 | 437 ; CHECK-LABEL: test_atomic_rmw_or_64 |
462 ; CHECK: push ebx | 438 ; CHECK: push ebx |
463 ; CHECK: mov eax, dword ptr [{{.*}}] | 439 ; CHECK: mov eax,DWORD PTR [{{.*}}] |
464 ; CHECK: mov edx, dword ptr [{{.*}} + 4] | 440 ; CHECK: mov edx,DWORD PTR [{{.*}}+0x4] |
465 ; CHECK: mov ebx, eax | 441 ; CHECK: [[LABEL:[^ ]*]]: {{.*}} mov ebx,eax |
466 ; CHECK: or ebx, {{.*e.[^x]}} | 442 ; CHECK: or ebx,{{.*e.[^x]}} |
467 ; CHECK: mov ecx, edx | 443 ; CHECK: mov ecx,edx |
468 ; CHECK: or ecx, {{.*e.[^x]}} | 444 ; CHECK: or ecx,{{.*e.[^x]}} |
469 ; CHECK: lock | 445 ; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}} |
470 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] | 446 ; CHECK: jne [[LABEL]] |
471 ; CHECK: jne -{{[0-9]}} | |
472 | 447 |
473 define i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) { | 448 define i32 @test_atomic_rmw_or_32_ignored(i32 %iptr, i32 %v) { |
474 entry: | 449 entry: |
475 %ptr = inttoptr i32 %iptr to i32* | 450 %ptr = inttoptr i32 %iptr to i32* |
476 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) | 451 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 3, i32* %ptr, i32 %v, i32 6) |
477 ret i32 %v | 452 ret i32 %v |
478 } | 453 } |
479 ; CHECK-LABEL: test_atomic_rmw_or_32_ignored | 454 ; CHECK-LABEL: test_atomic_rmw_or_32_ignored |
480 ; Could just "lock or", if we inspect the liveness information first. | 455 ; Could just "lock or", if we inspect the liveness information first. |
481 ; Would also need a way to introduce "lock"'edness to binary | 456 ; Would also need a way to introduce "lock"'edness to binary |
482 ; operators without introducing overhead on the more common binary ops. | 457 ; operators without introducing overhead on the more common binary ops. |
483 ; CHECK: mov eax, dword ptr | 458 ; CHECK: mov eax,DWORD PTR |
484 ; CHECK: or [[REG:e[^a].]] | 459 ; CHECK: or [[REG:e[^a].]] |
485 ; CHECK: lock | 460 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}],[[REG]] |
486 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], [[REG]] | 461 ; CHECK: jne |
487 ; CHECK: jne -{{[0-9]}} | |
488 | 462 |
489 ;; and | 463 ;; and |
490 | 464 |
491 define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) { | 465 define i32 @test_atomic_rmw_and_8(i32 %iptr, i32 %v) { |
492 entry: | 466 entry: |
493 %trunc = trunc i32 %v to i8 | 467 %trunc = trunc i32 %v to i8 |
494 %ptr = inttoptr i32 %iptr to i8* | 468 %ptr = inttoptr i32 %iptr to i8* |
495 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6) | 469 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 4, i8* %ptr, i8 %trunc, i32 6) |
496 %a_ext = zext i8 %a to i32 | 470 %a_ext = zext i8 %a to i32 |
497 ret i32 %a_ext | 471 ret i32 %a_ext |
498 } | 472 } |
499 ; CHECK-LABEL: test_atomic_rmw_and_8 | 473 ; CHECK-LABEL: test_atomic_rmw_and_8 |
500 ; CHECK: mov al, byte ptr | 474 ; CHECK: mov al,BYTE PTR |
501 ; CHECK: and [[REG:[^a].]] | 475 ; CHECK: and [[REG:[^a].]] |
502 ; CHECK: lock | 476 ; CHECK: lock cmpxchg BYTE PTR [e{{[^a].}}],[[REG]] |
503 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]] | 477 ; CHECK: jne |
504 ; CHECK: jne -{{[0-9]}} | |
505 | 478 |
506 define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) { | 479 define i32 @test_atomic_rmw_and_16(i32 %iptr, i32 %v) { |
507 entry: | 480 entry: |
508 %trunc = trunc i32 %v to i16 | 481 %trunc = trunc i32 %v to i16 |
509 %ptr = inttoptr i32 %iptr to i16* | 482 %ptr = inttoptr i32 %iptr to i16* |
510 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6) | 483 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 4, i16* %ptr, i16 %trunc, i32 6) |
511 %a_ext = zext i16 %a to i32 | 484 %a_ext = zext i16 %a to i32 |
512 ret i32 %a_ext | 485 ret i32 %a_ext |
513 } | 486 } |
514 ; CHECK-LABEL: test_atomic_rmw_and_16 | 487 ; CHECK-LABEL: test_atomic_rmw_and_16 |
515 ; CHECK: mov ax, word ptr | 488 ; CHECK: mov ax,WORD PTR |
516 ; CHECK: and | 489 ; CHECK: and |
517 ; CHECK: lock | 490 ; CHECK: lock cmpxchg WORD PTR [e{{[^a].}}] |
518 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}] | 491 ; CHECK: jne |
519 ; CHECK: jne -{{[0-9]}} | |
520 | 492 |
521 define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) { | 493 define i32 @test_atomic_rmw_and_32(i32 %iptr, i32 %v) { |
522 entry: | 494 entry: |
523 %ptr = inttoptr i32 %iptr to i32* | 495 %ptr = inttoptr i32 %iptr to i32* |
524 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) | 496 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) |
525 ret i32 %a | 497 ret i32 %a |
526 } | 498 } |
527 ; CHECK-LABEL: test_atomic_rmw_and_32 | 499 ; CHECK-LABEL: test_atomic_rmw_and_32 |
528 ; CHECK: mov eax, dword ptr | 500 ; CHECK: mov eax,DWORD PTR |
529 ; CHECK: and | 501 ; CHECK: and |
530 ; CHECK: lock | 502 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}] |
531 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] | 503 ; CHECK: jne |
532 ; CHECK: jne -{{[0-9]}} | |
533 | 504 |
534 define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) { | 505 define i64 @test_atomic_rmw_and_64(i32 %iptr, i64 %v) { |
535 entry: | 506 entry: |
536 %ptr = inttoptr i32 %iptr to i64* | 507 %ptr = inttoptr i32 %iptr to i64* |
537 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6) | 508 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 4, i64* %ptr, i64 %v, i32 6) |
538 ret i64 %a | 509 ret i64 %a |
539 } | 510 } |
540 ; CHECK-LABEL: test_atomic_rmw_and_64 | 511 ; CHECK-LABEL: test_atomic_rmw_and_64 |
541 ; CHECK: push ebx | 512 ; CHECK: push ebx |
542 ; CHECK: mov eax, dword ptr [{{.*}}] | 513 ; CHECK: mov eax,DWORD PTR [{{.*}}] |
543 ; CHECK: mov edx, dword ptr [{{.*}} + 4] | 514 ; CHECK: mov edx,DWORD PTR [{{.*}}+0x4] |
544 ; CHECK: mov ebx, eax | 515 ; CHECK: [[LABEL:[^ ]*]]: {{.*}} mov ebx,eax |
545 ; CHECK: and ebx, {{.*e.[^x]}} | 516 ; CHECK: and ebx,{{.*e.[^x]}} |
546 ; CHECK: mov ecx, edx | 517 ; CHECK: mov ecx,edx |
547 ; CHECK: and ecx, {{.*e.[^x]}} | 518 ; CHECK: and ecx,{{.*e.[^x]}} |
548 ; CHECK: lock | 519 ; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}} |
549 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] | 520 ; CHECK: jne [[LABEL]] |
550 ; CHECK: jne -{{[0-9]}} | |
551 | 521 |
552 define i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) { | 522 define i32 @test_atomic_rmw_and_32_ignored(i32 %iptr, i32 %v) { |
553 entry: | 523 entry: |
554 %ptr = inttoptr i32 %iptr to i32* | 524 %ptr = inttoptr i32 %iptr to i32* |
555 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) | 525 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 4, i32* %ptr, i32 %v, i32 6) |
556 ret i32 %v | 526 ret i32 %v |
557 } | 527 } |
558 ; CHECK-LABEL: test_atomic_rmw_and_32_ignored | 528 ; CHECK-LABEL: test_atomic_rmw_and_32_ignored |
559 ; Could just "lock and" | 529 ; Could just "lock and" |
560 ; CHECK: mov eax, dword ptr | 530 ; CHECK: mov eax,DWORD PTR |
561 ; CHECK: and | 531 ; CHECK: and |
562 ; CHECK: lock | 532 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}] |
563 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] | 533 ; CHECK: jne |
564 ; CHECK: jne -{{[0-9]}} | |
565 | 534 |
566 ;; xor | 535 ;; xor |
567 | 536 |
568 define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) { | 537 define i32 @test_atomic_rmw_xor_8(i32 %iptr, i32 %v) { |
569 entry: | 538 entry: |
570 %trunc = trunc i32 %v to i8 | 539 %trunc = trunc i32 %v to i8 |
571 %ptr = inttoptr i32 %iptr to i8* | 540 %ptr = inttoptr i32 %iptr to i8* |
572 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6) | 541 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 5, i8* %ptr, i8 %trunc, i32 6) |
573 %a_ext = zext i8 %a to i32 | 542 %a_ext = zext i8 %a to i32 |
574 ret i32 %a_ext | 543 ret i32 %a_ext |
575 } | 544 } |
576 ; CHECK-LABEL: test_atomic_rmw_xor_8 | 545 ; CHECK-LABEL: test_atomic_rmw_xor_8 |
577 ; CHECK: mov al, byte ptr | 546 ; CHECK: mov al,BYTE PTR |
578 ; CHECK: xor [[REG:[^a].]] | 547 ; CHECK: xor [[REG:[^a].]] |
579 ; CHECK: lock | 548 ; CHECK: lock cmpxchg BYTE PTR [e{{[^a].}}],[[REG]] |
580 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], [[REG]] | 549 ; CHECK: jne |
581 ; CHECK: jne -{{[0-9]}} | |
582 | 550 |
583 define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) { | 551 define i32 @test_atomic_rmw_xor_16(i32 %iptr, i32 %v) { |
584 entry: | 552 entry: |
585 %trunc = trunc i32 %v to i16 | 553 %trunc = trunc i32 %v to i16 |
586 %ptr = inttoptr i32 %iptr to i16* | 554 %ptr = inttoptr i32 %iptr to i16* |
587 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6) | 555 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 5, i16* %ptr, i16 %trunc, i32 6) |
588 %a_ext = zext i16 %a to i32 | 556 %a_ext = zext i16 %a to i32 |
589 ret i32 %a_ext | 557 ret i32 %a_ext |
590 } | 558 } |
591 ; CHECK-LABEL: test_atomic_rmw_xor_16 | 559 ; CHECK-LABEL: test_atomic_rmw_xor_16 |
592 ; CHECK: mov ax, word ptr | 560 ; CHECK: mov ax,WORD PTR |
593 ; CHECK: xor | 561 ; CHECK: xor |
594 ; CHECK: lock | 562 ; CHECK: lock cmpxchg WORD PTR [e{{[^a].}}] |
595 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}] | 563 ; CHECK: jne |
596 ; CHECK: jne -{{[0-9]}} | |
597 | 564 |
598 | 565 |
599 define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) { | 566 define i32 @test_atomic_rmw_xor_32(i32 %iptr, i32 %v) { |
600 entry: | 567 entry: |
601 %ptr = inttoptr i32 %iptr to i32* | 568 %ptr = inttoptr i32 %iptr to i32* |
602 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) | 569 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) |
603 ret i32 %a | 570 ret i32 %a |
604 } | 571 } |
605 ; CHECK-LABEL: test_atomic_rmw_xor_32 | 572 ; CHECK-LABEL: test_atomic_rmw_xor_32 |
606 ; CHECK: mov eax, dword ptr | 573 ; CHECK: mov eax,DWORD PTR |
607 ; CHECK: xor | 574 ; CHECK: xor |
608 ; CHECK: lock | 575 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}] |
609 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] | 576 ; CHECK: jne |
610 ; CHECK: jne -{{[0-9]}} | |
611 | 577 |
612 define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) { | 578 define i64 @test_atomic_rmw_xor_64(i32 %iptr, i64 %v) { |
613 entry: | 579 entry: |
614 %ptr = inttoptr i32 %iptr to i64* | 580 %ptr = inttoptr i32 %iptr to i64* |
615 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6) | 581 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 5, i64* %ptr, i64 %v, i32 6) |
616 ret i64 %a | 582 ret i64 %a |
617 } | 583 } |
618 ; CHECK-LABEL: test_atomic_rmw_xor_64 | 584 ; CHECK-LABEL: test_atomic_rmw_xor_64 |
619 ; CHECK: push ebx | 585 ; CHECK: push ebx |
620 ; CHECK: mov eax, dword ptr [{{.*}}] | 586 ; CHECK: mov eax,DWORD PTR [{{.*}}] |
621 ; CHECK: mov edx, dword ptr [{{.*}} + 4] | 587 ; CHECK: mov edx,DWORD PTR [{{.*}}+0x4] |
622 ; CHECK: mov ebx, eax | 588 ; CHECK: mov ebx,eax |
623 ; CHECK: or ebx, {{.*e.[^x]}} | 589 ; CHECK: or ebx,{{.*e.[^x]}} |
624 ; CHECK: mov ecx, edx | 590 ; CHECK: mov ecx,edx |
625 ; CHECK: or ecx, {{.*e.[^x]}} | 591 ; CHECK: or ecx,{{.*e.[^x]}} |
626 ; CHECK: lock | 592 ; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}} |
627 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] | 593 ; CHECK: jne |
628 ; CHECK: jne -{{[0-9]}} | |
629 | 594 |
630 define i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) { | 595 define i32 @test_atomic_rmw_xor_32_ignored(i32 %iptr, i32 %v) { |
631 entry: | 596 entry: |
632 %ptr = inttoptr i32 %iptr to i32* | 597 %ptr = inttoptr i32 %iptr to i32* |
633 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) | 598 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 5, i32* %ptr, i32 %v, i32 6) |
634 ret i32 %v | 599 ret i32 %v |
635 } | 600 } |
636 ; CHECK-LABEL: test_atomic_rmw_xor_32_ignored | 601 ; CHECK-LABEL: test_atomic_rmw_xor_32_ignored |
637 ; CHECK: mov eax, dword ptr | 602 ; CHECK: mov eax,DWORD PTR |
638 ; CHECK: xor | 603 ; CHECK: xor |
639 ; CHECK: lock | 604 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}] |
640 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] | 605 ; CHECK: jne |
641 ; CHECK: jne -{{[0-9]}} | |
642 | 606 |
643 ;; exchange | 607 ;; exchange |
644 | 608 |
645 define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) { | 609 define i32 @test_atomic_rmw_xchg_8(i32 %iptr, i32 %v) { |
646 entry: | 610 entry: |
647 %trunc = trunc i32 %v to i8 | 611 %trunc = trunc i32 %v to i8 |
648 %ptr = inttoptr i32 %iptr to i8* | 612 %ptr = inttoptr i32 %iptr to i8* |
649 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6) | 613 %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 6, i8* %ptr, i8 %trunc, i32 6) |
650 %a_ext = zext i8 %a to i32 | 614 %a_ext = zext i8 %a to i32 |
651 ret i32 %a_ext | 615 ret i32 %a_ext |
652 } | 616 } |
653 ; CHECK-LABEL: test_atomic_rmw_xchg_8 | 617 ; CHECK-LABEL: test_atomic_rmw_xchg_8 |
654 ; CHECK: xchg byte ptr {{.*}}, [[REG:.*]] | 618 ; CHECK: xchg BYTE PTR {{.*}},[[REG:.*]] |
655 | 619 |
656 define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) { | 620 define i32 @test_atomic_rmw_xchg_16(i32 %iptr, i32 %v) { |
657 entry: | 621 entry: |
658 %trunc = trunc i32 %v to i16 | 622 %trunc = trunc i32 %v to i16 |
659 %ptr = inttoptr i32 %iptr to i16* | 623 %ptr = inttoptr i32 %iptr to i16* |
660 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i16* %ptr, i16 %trunc, i32 6) | 624 %a = call i16 @llvm.nacl.atomic.rmw.i16(i32 6, i16* %ptr, i16 %trunc, i32 6) |
661 %a_ext = zext i16 %a to i32 | 625 %a_ext = zext i16 %a to i32 |
662 ret i32 %a_ext | 626 ret i32 %a_ext |
663 } | 627 } |
664 ; CHECK-LABEL: test_atomic_rmw_xchg_16 | 628 ; CHECK-LABEL: test_atomic_rmw_xchg_16 |
665 ; CHECK: xchg word ptr {{.*}}, [[REG:.*]] | 629 ; CHECK: xchg WORD PTR {{.*}},[[REG:.*]] |
666 | 630 |
667 define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) { | 631 define i32 @test_atomic_rmw_xchg_32(i32 %iptr, i32 %v) { |
668 entry: | 632 entry: |
669 %ptr = inttoptr i32 %iptr to i32* | 633 %ptr = inttoptr i32 %iptr to i32* |
670 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6) | 634 %a = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6) |
671 ret i32 %a | 635 ret i32 %a |
672 } | 636 } |
673 ; CHECK-LABEL: test_atomic_rmw_xchg_32 | 637 ; CHECK-LABEL: test_atomic_rmw_xchg_32 |
674 ; CHECK: xchg dword ptr {{.*}}, [[REG:.*]] | 638 ; CHECK: xchg DWORD PTR {{.*}},[[REG:.*]] |
675 | 639 |
676 define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) { | 640 define i64 @test_atomic_rmw_xchg_64(i32 %iptr, i64 %v) { |
677 entry: | 641 entry: |
678 %ptr = inttoptr i32 %iptr to i64* | 642 %ptr = inttoptr i32 %iptr to i64* |
679 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6) | 643 %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 6, i64* %ptr, i64 %v, i32 6) |
680 ret i64 %a | 644 ret i64 %a |
681 } | 645 } |
682 ; CHECK-LABEL: test_atomic_rmw_xchg_64 | 646 ; CHECK-LABEL: test_atomic_rmw_xchg_64 |
683 ; CHECK: push ebx | 647 ; CHECK: push ebx |
684 ; CHECK-DAG: mov edx | 648 ; CHECK-DAG: mov edx |
685 ; CHECK-DAG: mov eax | 649 ; CHECK-DAG: mov eax |
686 ; CHECK-DAG: mov ecx | 650 ; CHECK-DAG: mov ecx |
687 ; CHECK-DAG: mov ebx | 651 ; CHECK-DAG: mov ebx |
688 ; CHECK: lock | 652 ; CHECK: lock cmpxchg8b QWORD PTR [{{e.[^x]}} |
689 ; CHECK-NEXT: cmpxchg8b qword ptr [{{e.[^x]}}] | 653 ; CHECK: jne |
690 ; CHECK: jne -{{[0-9]}} | |
691 | 654 |
692 define i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) { | 655 define i32 @test_atomic_rmw_xchg_32_ignored(i32 %iptr, i32 %v) { |
693 entry: | 656 entry: |
694 %ptr = inttoptr i32 %iptr to i32* | 657 %ptr = inttoptr i32 %iptr to i32* |
695 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6) | 658 %ignored = call i32 @llvm.nacl.atomic.rmw.i32(i32 6, i32* %ptr, i32 %v, i32 6) |
696 ret i32 %v | 659 ret i32 %v |
697 } | 660 } |
698 ; In this case, ignoring the return value doesn't help. The xchg is | 661 ; In this case, ignoring the return value doesn't help. The xchg is |
699 ; used to do an atomic store. | 662 ; used to do an atomic store. |
700 ; CHECK-LABEL: test_atomic_rmw_xchg_32_ignored | 663 ; CHECK-LABEL: test_atomic_rmw_xchg_32_ignored |
701 ; CHECK: xchg dword ptr {{.*}}, [[REG:.*]] | 664 ; CHECK: xchg DWORD PTR {{.*}},[[REG:.*]] |
702 | 665 |
703 ;;;; Cmpxchg | 666 ;;;; Cmpxchg |
704 | 667 |
705 define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) { | 668 define i32 @test_atomic_cmpxchg_8(i32 %iptr, i32 %expected, i32 %desired) { |
706 entry: | 669 entry: |
707 %trunc_exp = trunc i32 %expected to i8 | 670 %trunc_exp = trunc i32 %expected to i8 |
708 %trunc_des = trunc i32 %desired to i8 | 671 %trunc_des = trunc i32 %desired to i8 |
709 %ptr = inttoptr i32 %iptr to i8* | 672 %ptr = inttoptr i32 %iptr to i8* |
710 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp, | 673 %old = call i8 @llvm.nacl.atomic.cmpxchg.i8(i8* %ptr, i8 %trunc_exp, |
711 i8 %trunc_des, i32 6, i32 6) | 674 i8 %trunc_des, i32 6, i32 6) |
712 %old_ext = zext i8 %old to i32 | 675 %old_ext = zext i8 %old to i32 |
713 ret i32 %old_ext | 676 ret i32 %old_ext |
714 } | 677 } |
715 ; CHECK-LABEL: test_atomic_cmpxchg_8 | 678 ; CHECK-LABEL: test_atomic_cmpxchg_8 |
716 ; CHECK: mov eax, {{.*}} | 679 ; CHECK: mov eax,{{.*}} |
717 ; Need to check that eax isn't used as the address register or the desired. | 680 ; Need to check that eax isn't used as the address register or the desired. |
718 ; since it is already used as the *expected* register. | 681 ; since it is already used as the *expected* register. |
719 ; CHECK: lock | 682 ; CHECK: lock cmpxchg BYTE PTR [e{{[^a].}}],{{[^a]}}l |
720 ; CHECK-NEXT: cmpxchg byte ptr [e{{[^a].}}], {{[^a]}}l | |
721 | 683 |
722 define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) { | 684 define i32 @test_atomic_cmpxchg_16(i32 %iptr, i32 %expected, i32 %desired) { |
723 entry: | 685 entry: |
724 %trunc_exp = trunc i32 %expected to i16 | 686 %trunc_exp = trunc i32 %expected to i16 |
725 %trunc_des = trunc i32 %desired to i16 | 687 %trunc_des = trunc i32 %desired to i16 |
726 %ptr = inttoptr i32 %iptr to i16* | 688 %ptr = inttoptr i32 %iptr to i16* |
727 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp, | 689 %old = call i16 @llvm.nacl.atomic.cmpxchg.i16(i16* %ptr, i16 %trunc_exp, |
728 i16 %trunc_des, i32 6, i32 6) | 690 i16 %trunc_des, i32 6, i32 6) |
729 %old_ext = zext i16 %old to i32 | 691 %old_ext = zext i16 %old to i32 |
730 ret i32 %old_ext | 692 ret i32 %old_ext |
731 } | 693 } |
732 ; CHECK-LABEL: test_atomic_cmpxchg_16 | 694 ; CHECK-LABEL: test_atomic_cmpxchg_16 |
733 ; CHECK: mov eax, {{.*}} | 695 ; CHECK: mov eax,{{.*}} |
734 ; CHECK: lock | 696 ; CHECK: lock cmpxchg WORD PTR [e{{[^a].}}],{{[^a]}}x |
735 ; CHECK-NEXT: cmpxchg word ptr [e{{[^a].}}], {{[^a]}}x | |
736 | 697 |
737 define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) { | 698 define i32 @test_atomic_cmpxchg_32(i32 %iptr, i32 %expected, i32 %desired) { |
738 entry: | 699 entry: |
739 %ptr = inttoptr i32 %iptr to i32* | 700 %ptr = inttoptr i32 %iptr to i32* |
740 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, | 701 %old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, |
741 i32 %desired, i32 6, i32 6) | 702 i32 %desired, i32 6, i32 6) |
742 ret i32 %old | 703 ret i32 %old |
743 } | 704 } |
744 ; CHECK-LABEL: test_atomic_cmpxchg_32 | 705 ; CHECK-LABEL: test_atomic_cmpxchg_32 |
745 ; CHECK: mov eax, {{.*}} | 706 ; CHECK: mov eax,{{.*}} |
746 ; CHECK: lock | 707 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}],e{{[^a]}} |
747 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}], e{{[^a]}} | |
748 | 708 |
749 define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) { | 709 define i64 @test_atomic_cmpxchg_64(i32 %iptr, i64 %expected, i64 %desired) { |
750 entry: | 710 entry: |
751 %ptr = inttoptr i32 %iptr to i64* | 711 %ptr = inttoptr i32 %iptr to i64* |
752 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, | 712 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, |
753 i64 %desired, i32 6, i32 6) | 713 i64 %desired, i32 6, i32 6) |
754 ret i64 %old | 714 ret i64 %old |
755 } | 715 } |
756 ; CHECK-LABEL: test_atomic_cmpxchg_64 | 716 ; CHECK-LABEL: test_atomic_cmpxchg_64 |
757 ; CHECK: push ebx | 717 ; CHECK: push ebx |
758 ; CHECK-DAG: mov edx | 718 ; CHECK-DAG: mov edx |
759 ; CHECK-DAG: mov eax | 719 ; CHECK-DAG: mov eax |
760 ; CHECK-DAG: mov ecx | 720 ; CHECK-DAG: mov ecx |
761 ; CHECK-DAG: mov ebx | 721 ; CHECK-DAG: mov ebx |
762 ; CHECK: lock | 722 ; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}}+0x0] |
763 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] | |
764 ; edx and eax are already the return registers, so they don't actually | 723 ; edx and eax are already the return registers, so they don't actually |
765 ; need to be reshuffled via movs. The next test stores the result | 724 ; need to be reshuffled via movs. The next test stores the result |
766 ; somewhere, so in that case they do need to be mov'ed. | 725 ; somewhere, so in that case they do need to be mov'ed. |
767 | 726 |
768 ; Test a case where %old really does need to be copied out of edx:eax. | 727 ; Test a case where %old really does need to be copied out of edx:eax. |
769 define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expecte
d, i64 %desired) { | 728 define void @test_atomic_cmpxchg_64_store(i32 %ret_iptr, i32 %iptr, i64 %expecte
d, i64 %desired) { |
770 entry: | 729 entry: |
771 %ptr = inttoptr i32 %iptr to i64* | 730 %ptr = inttoptr i32 %iptr to i64* |
772 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, | 731 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, |
773 i64 %desired, i32 6, i32 6) | 732 i64 %desired, i32 6, i32 6) |
774 %__6 = inttoptr i32 %ret_iptr to i64* | 733 %__6 = inttoptr i32 %ret_iptr to i64* |
775 store i64 %old, i64* %__6, align 1 | 734 store i64 %old, i64* %__6, align 1 |
776 ret void | 735 ret void |
777 } | 736 } |
778 ; CHECK-LABEL: test_atomic_cmpxchg_64_store | 737 ; CHECK-LABEL: test_atomic_cmpxchg_64_store |
779 ; CHECK: push ebx | 738 ; CHECK: push ebx |
780 ; CHECK-DAG: mov edx | 739 ; CHECK-DAG: mov edx |
781 ; CHECK-DAG: mov eax | 740 ; CHECK-DAG: mov eax |
782 ; CHECK-DAG: mov ecx | 741 ; CHECK-DAG: mov ecx |
783 ; CHECK-DAG: mov ebx | 742 ; CHECK-DAG: mov ebx |
784 ; CHECK: lock | 743 ; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}} |
785 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] | 744 ; CHECK-DAG: mov {{.*}},edx |
786 ; CHECK-DAG: mov {{.*}}, edx | 745 ; CHECK-DAG: mov {{.*}},eax |
787 ; CHECK-DAG: mov {{.*}}, eax | |
788 | 746 |
789 ; Test with some more register pressure. When we have an alloca, ebp is | 747 ; Test with some more register pressure. When we have an alloca, ebp is |
790 ; used to manage the stack frame, so it cannot be used as a register either. | 748 ; used to manage the stack frame, so it cannot be used as a register either. |
791 define i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected, i64 %desired
) { | 749 define i64 @test_atomic_cmpxchg_64_alloca(i32 %iptr, i64 %expected, i64 %desired
) { |
792 entry: | 750 entry: |
793 %alloca_ptr = alloca i8, i32 16, align 16 | 751 %alloca_ptr = alloca i8, i32 16, align 16 |
794 %ptr = inttoptr i32 %iptr to i64* | 752 %ptr = inttoptr i32 %iptr to i64* |
795 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, | 753 %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, |
796 i64 %desired, i32 6, i32 6) | 754 i64 %desired, i32 6, i32 6) |
797 store i8 0, i8* %alloca_ptr, align 1 | 755 store i8 0, i8* %alloca_ptr, align 1 |
798 store i8 1, i8* %alloca_ptr, align 1 | 756 store i8 1, i8* %alloca_ptr, align 1 |
799 store i8 2, i8* %alloca_ptr, align 1 | 757 store i8 2, i8* %alloca_ptr, align 1 |
800 store i8 3, i8* %alloca_ptr, align 1 | 758 store i8 3, i8* %alloca_ptr, align 1 |
801 %__6 = ptrtoint i8* %alloca_ptr to i32 | 759 %__6 = ptrtoint i8* %alloca_ptr to i32 |
802 call void @use_ptr(i32 %__6) | 760 call void @use_ptr(i32 %__6) |
803 ret i64 %old | 761 ret i64 %old |
804 } | 762 } |
805 ; CHECK-LABEL: test_atomic_cmpxchg_64_alloca | 763 ; CHECK-LABEL: test_atomic_cmpxchg_64_alloca |
806 ; CHECK: push ebx | 764 ; CHECK: push ebx |
807 ; CHECK-DAG: mov edx | 765 ; CHECK-DAG: mov edx |
808 ; CHECK-DAG: mov eax | 766 ; CHECK-DAG: mov eax |
809 ; CHECK-DAG: mov ecx | 767 ; CHECK-DAG: mov ecx |
810 ; CHECK-DAG: mov ebx | 768 ; CHECK-DAG: mov ebx |
811 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). | 769 ; Ptr cannot be eax, ebx, ecx, or edx (used up for the expected and desired). |
812 ; It also cannot be ebp since we use that for alloca. Also make sure it's | 770 ; It also cannot be ebp since we use that for alloca. Also make sure it's |
813 ; not esp, since that's the stack pointer and mucking with it will break | 771 ; not esp, since that's the stack pointer and mucking with it will break |
814 ; the later use_ptr function call. | 772 ; the later use_ptr function call. |
815 ; That pretty much leaves esi, or edi as the only viable registers. | 773 ; That pretty much leaves esi, or edi as the only viable registers. |
816 ; CHECK: lock | 774 ; CHECK: lock cmpxchg8b QWORD PTR [e{{[ds]}}i] |
817 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{[ds]}}i] | 775 ; CHECK: call {{.*}} R_{{.*}} use_ptr |
818 ; CHECK: call use_ptr | |
819 | 776 |
820 define i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected, i32 %desire
d) { | 777 define i32 @test_atomic_cmpxchg_32_ignored(i32 %iptr, i32 %expected, i32 %desire
d) { |
821 entry: | 778 entry: |
822 %ptr = inttoptr i32 %iptr to i32* | 779 %ptr = inttoptr i32 %iptr to i32* |
823 %ignored = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, | 780 %ignored = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected, |
824 i32 %desired, i32 6, i32 6) | 781 i32 %desired, i32 6, i32 6) |
825 ret i32 0 | 782 ret i32 0 |
826 } | 783 } |
827 ; CHECK-LABEL: test_atomic_cmpxchg_32_ignored | 784 ; CHECK-LABEL: test_atomic_cmpxchg_32_ignored |
828 ; CHECK: mov eax, {{.*}} | 785 ; CHECK: mov eax,{{.*}} |
829 ; CHECK: lock | 786 ; CHECK: lock cmpxchg DWORD PTR [e{{[^a].}}] |
830 ; CHECK-NEXT: cmpxchg dword ptr [e{{[^a].}}] | |
831 | 787 |
832 define i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected, i64 %desire
d) { | 788 define i64 @test_atomic_cmpxchg_64_ignored(i32 %iptr, i64 %expected, i64 %desire
d) { |
833 entry: | 789 entry: |
834 %ptr = inttoptr i32 %iptr to i64* | 790 %ptr = inttoptr i32 %iptr to i64* |
835 %ignored = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, | 791 %ignored = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, |
836 i64 %desired, i32 6, i32 6) | 792 i64 %desired, i32 6, i32 6) |
837 ret i64 0 | 793 ret i64 0 |
838 } | 794 } |
839 ; CHECK-LABEL: test_atomic_cmpxchg_64_ignored | 795 ; CHECK-LABEL: test_atomic_cmpxchg_64_ignored |
840 ; CHECK: push ebx | 796 ; CHECK: push ebx |
841 ; CHECK-DAG: mov edx | 797 ; CHECK-DAG: mov edx |
842 ; CHECK-DAG: mov eax | 798 ; CHECK-DAG: mov eax |
843 ; CHECK-DAG: mov ecx | 799 ; CHECK-DAG: mov ecx |
844 ; CHECK-DAG: mov ebx | 800 ; CHECK-DAG: mov ebx |
845 ; CHECK: lock | 801 ; CHECK: lock cmpxchg8b QWORD PTR [e{{.[^x]}}+0x0] |
846 ; CHECK-NEXT: cmpxchg8b qword ptr [e{{.[^x]}}] | |
847 | 802 |
848 ;;;; Fence and is-lock-free. | 803 ;;;; Fence and is-lock-free. |
849 | 804 |
850 define void @test_atomic_fence() { | 805 define void @test_atomic_fence() { |
851 entry: | 806 entry: |
852 call void @llvm.nacl.atomic.fence(i32 6) | 807 call void @llvm.nacl.atomic.fence(i32 6) |
853 ret void | 808 ret void |
854 } | 809 } |
855 ; CHECK-LABEL: test_atomic_fence | 810 ; CHECK-LABEL: test_atomic_fence |
856 ; CHECK: mfence | 811 ; CHECK: mfence |
857 | 812 |
858 define void @test_atomic_fence_all() { | 813 define void @test_atomic_fence_all() { |
859 entry: | 814 entry: |
860 call void @llvm.nacl.atomic.fence.all() | 815 call void @llvm.nacl.atomic.fence.all() |
861 ret void | 816 ret void |
862 } | 817 } |
863 ; CHECK-LABEL: test_atomic_fence_all | 818 ; CHECK-LABEL: test_atomic_fence_all |
864 ; CHECK: mfence | 819 ; CHECK: mfence |
865 | 820 |
866 define i32 @test_atomic_is_lock_free(i32 %iptr) { | 821 define i32 @test_atomic_is_lock_free(i32 %iptr) { |
867 entry: | 822 entry: |
868 %ptr = inttoptr i32 %iptr to i8* | 823 %ptr = inttoptr i32 %iptr to i8* |
869 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr) | 824 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr) |
870 %r = zext i1 %i to i32 | 825 %r = zext i1 %i to i32 |
871 ret i32 %r | 826 ret i32 %r |
872 } | 827 } |
873 ; CHECK-LABEL: test_atomic_is_lock_free | 828 ; CHECK-LABEL: test_atomic_is_lock_free |
874 ; CHECK: mov {{.*}}, 1 | 829 ; CHECK: mov {{.*}},0x1 |
875 | 830 |
876 define i32 @test_not_lock_free(i32 %iptr) { | 831 define i32 @test_not_lock_free(i32 %iptr) { |
877 entry: | 832 entry: |
878 %ptr = inttoptr i32 %iptr to i8* | 833 %ptr = inttoptr i32 %iptr to i8* |
879 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 7, i8* %ptr) | 834 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 7, i8* %ptr) |
880 %r = zext i1 %i to i32 | 835 %r = zext i1 %i to i32 |
881 ret i32 %r | 836 ret i32 %r |
882 } | 837 } |
883 ; CHECK-LABEL: test_not_lock_free | 838 ; CHECK-LABEL: test_not_lock_free |
884 ; CHECK: mov {{.*}}, 0 | 839 ; CHECK: mov {{.*}},0x0 |
885 | 840 |
886 define i32 @test_atomic_is_lock_free_ignored(i32 %iptr) { | 841 define i32 @test_atomic_is_lock_free_ignored(i32 %iptr) { |
887 entry: | 842 entry: |
888 %ptr = inttoptr i32 %iptr to i8* | 843 %ptr = inttoptr i32 %iptr to i8* |
889 %ignored = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr) | 844 %ignored = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr) |
890 ret i32 0 | 845 ret i32 0 |
891 } | 846 } |
892 ; CHECK-LABEL: test_atomic_is_lock_free_ignored | 847 ; CHECK-LABEL: test_atomic_is_lock_free_ignored |
893 ; CHECK: mov {{.*}}, 0 | 848 ; CHECK: mov {{.*}},0x0 |
894 ; This can get optimized out, because it's side-effect-free. | 849 ; This can get optimized out, because it's side-effect-free. |
895 ; CHECKO2-LABEL: test_atomic_is_lock_free_ignored | 850 ; O2-LABEL: test_atomic_is_lock_free_ignored |
896 ; CHECKO2-NOT: mov {{.*}}, 1 | 851 ; O2-NOT: mov {{.*}}, 1 |
897 ; CHECKO2: mov {{.*}}, 0 | 852 ; O2: mov {{.*}},0x0 |
898 | 853 |
899 ; TODO(jvoung): at some point we can take advantage of the | 854 ; TODO(jvoung): at some point we can take advantage of the |
900 ; fact that nacl.atomic.is.lock.free will resolve to a constant | 855 ; fact that nacl.atomic.is.lock.free will resolve to a constant |
901 ; (which adds DCE opportunities). Once we optimize, the test expectations | 856 ; (which adds DCE opportunities). Once we optimize, the test expectations |
902 ; for this case should change. | 857 ; for this case should change. |
903 define i32 @test_atomic_is_lock_free_can_dce(i32 %iptr, i32 %x, i32 %y) { | 858 define i32 @test_atomic_is_lock_free_can_dce(i32 %iptr, i32 %x, i32 %y) { |
904 entry: | 859 entry: |
905 %ptr = inttoptr i32 %iptr to i8* | 860 %ptr = inttoptr i32 %iptr to i8* |
906 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr) | 861 %i = call i1 @llvm.nacl.atomic.is.lock.free(i32 4, i8* %ptr) |
907 %i_ext = zext i1 %i to i32 | 862 %i_ext = zext i1 %i to i32 |
908 %cmp = icmp eq i32 %i_ext, 1 | 863 %cmp = icmp eq i32 %i_ext, 1 |
909 br i1 %cmp, label %lock_free, label %not_lock_free | 864 br i1 %cmp, label %lock_free, label %not_lock_free |
910 lock_free: | 865 lock_free: |
911 ret i32 %i_ext | 866 ret i32 %i_ext |
912 | 867 |
913 not_lock_free: | 868 not_lock_free: |
914 %z = add i32 %x, %y | 869 %z = add i32 %x, %y |
915 ret i32 %z | 870 ret i32 %z |
916 } | 871 } |
917 ; CHECK-LABEL: test_atomic_is_lock_free_can_dce | 872 ; CHECK-LABEL: test_atomic_is_lock_free_can_dce |
918 ; CHECK: mov {{.*}}, 1 | 873 ; CHECK: mov {{.*}},0x1 |
919 ; CHECK: ret | 874 ; CHECK: ret |
920 ; CHECK: add | 875 ; CHECK: add |
921 ; CHECK: ret | 876 ; CHECK: ret |
OLD | NEW |