OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "src/v8.h" | |
29 #include "test/cctest/cctest.h" | |
30 | |
31 #include "src/arm/simulator-arm.h" | |
32 #include "src/disassembler.h" | |
33 #include "src/factory.h" | |
34 #include "src/macro-assembler.h" | |
35 | |
36 #if defined(USE_SIMULATOR) | |
37 | |
38 #ifndef V8_TARGET_LITTLE_ENDIAN | |
39 #error Expected ARM to be little-endian | |
40 #endif | |
41 | |
42 using namespace v8::base; | |
43 using namespace v8::internal; | |
44 | |
45 // Define these function prototypes to match JSEntryFunction in execution.cc. | |
46 typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4); | |
47 typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4); | |
48 | |
49 #define __ assm. | |
50 | |
51 struct MemoryAccess { | |
52 enum class Kind { | |
53 None, | |
54 Load, | |
55 LoadExcl, | |
56 Store, | |
57 StoreExcl, | |
58 }; | |
59 | |
60 enum class Size { | |
61 Byte, | |
62 HalfWord, | |
63 Word, | |
64 }; | |
65 | |
66 MemoryAccess() : kind(Kind::None) {} | |
67 MemoryAccess(Kind kind, Size size, size_t offset, int value = 0) | |
68 : kind(kind), size(size), offset(offset), value(value) {} | |
69 | |
70 Kind kind; | |
71 Size size; | |
72 size_t offset; | |
73 int value; | |
74 }; | |
75 | |
76 struct TestData { | |
77 explicit TestData(int w) : w(w) {} | |
78 | |
79 union { | |
80 int32_t w; | |
81 int16_t h; | |
82 int8_t b; | |
83 }; | |
84 int dummy; | |
85 }; | |
86 | |
87 static void AssembleMemoryAccess(Assembler* assembler, MemoryAccess access, | |
88 Register addr_reg, Register value_reg, | |
jbramley
2017/01/04 17:45:41
I think it would be more natural to order them lik
binji
2017/01/04 20:35:53
Done.
| |
89 Register dest_reg = r0) { | |
jbramley
2017/01/04 17:45:41
Shouldn't the default be no_reg?
binji
2017/01/04 20:35:53
I ended up just removing the default and using an
| |
90 Assembler& assm = *assembler; | |
91 __ add(addr_reg, r0, Operand(access.offset)); | |
92 | |
93 switch (access.kind) { | |
94 case MemoryAccess::Kind::None: | |
95 break; | |
96 | |
97 case MemoryAccess::Kind::Load: | |
98 switch (access.size) { | |
99 case MemoryAccess::Size::Byte: | |
100 __ ldrb(value_reg, MemOperand(addr_reg)); | |
101 break; | |
102 | |
103 case MemoryAccess::Size::HalfWord: | |
104 __ ldrh(value_reg, MemOperand(addr_reg)); | |
105 break; | |
106 | |
107 case MemoryAccess::Size::Word: | |
108 __ ldr(value_reg, MemOperand(addr_reg)); | |
109 break; | |
110 } | |
111 break; | |
112 | |
113 case MemoryAccess::Kind::LoadExcl: | |
114 switch (access.size) { | |
115 case MemoryAccess::Size::Byte: | |
116 __ ldrexb(value_reg, addr_reg); | |
117 break; | |
118 | |
119 case MemoryAccess::Size::HalfWord: | |
120 __ ldrexh(value_reg, addr_reg); | |
121 break; | |
122 | |
123 case MemoryAccess::Size::Word: | |
124 __ ldrex(value_reg, addr_reg); | |
125 break; | |
126 } | |
127 break; | |
128 | |
129 case MemoryAccess::Kind::Store: | |
130 switch (access.size) { | |
131 case MemoryAccess::Size::Byte: | |
132 __ mov(value_reg, Operand(access.value)); | |
133 __ strb(value_reg, MemOperand(addr_reg)); | |
134 break; | |
135 | |
136 case MemoryAccess::Size::HalfWord: | |
137 __ mov(value_reg, Operand(access.value)); | |
138 __ strh(value_reg, MemOperand(addr_reg)); | |
139 break; | |
140 | |
141 case MemoryAccess::Size::Word: | |
142 __ mov(value_reg, Operand(access.value)); | |
143 __ str(value_reg, MemOperand(addr_reg)); | |
144 break; | |
145 } | |
146 break; | |
147 | |
148 case MemoryAccess::Kind::StoreExcl: | |
149 switch (access.size) { | |
150 case MemoryAccess::Size::Byte: | |
151 __ mov(value_reg, Operand(access.value)); | |
152 __ strexb(dest_reg, value_reg, addr_reg); | |
153 break; | |
154 | |
155 case MemoryAccess::Size::HalfWord: | |
156 __ mov(value_reg, Operand(access.value)); | |
157 __ strexh(dest_reg, value_reg, addr_reg); | |
158 break; | |
159 | |
160 case MemoryAccess::Size::Word: | |
161 __ mov(value_reg, Operand(access.value)); | |
162 __ strex(dest_reg, value_reg, addr_reg); | |
163 break; | |
164 } | |
165 break; | |
166 } | |
167 } | |
168 | |
169 static void TestInvalidateExclusiveAccess( | |
170 TestData initial_data, MemoryAccess access1, MemoryAccess access2, | |
171 MemoryAccess access3, int expected_res, TestData expected_data) { | |
172 Isolate* isolate = CcTest::i_isolate(); | |
173 HandleScope scope(isolate); | |
174 | |
175 Assembler assm(isolate, NULL, 0); | |
176 | |
177 DCHECK(access1.kind == MemoryAccess::Kind::LoadExcl); | |
178 DCHECK(access3.kind == MemoryAccess::Kind::StoreExcl); | |
179 | |
180 AssembleMemoryAccess(&assm, access1, r1, r1); | |
181 AssembleMemoryAccess(&assm, access2, r1, r2, r3); | |
182 AssembleMemoryAccess(&assm, access3, r1, r3, r0); | |
183 | |
184 __ mov(pc, Operand(lr)); | |
185 | |
186 CodeDesc desc; | |
187 assm.GetCode(&desc); | |
188 Handle<Code> code = isolate->factory()->NewCode( | |
189 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | |
190 F3 f = FUNCTION_CAST<F3>(code->entry()); | |
191 TestData t = initial_data; | |
192 | |
193 int res = | |
194 reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0)); | |
195 CHECK_EQ(expected_res, res); | |
196 switch (access3.size) { | |
197 case MemoryAccess::Size::Byte: | |
198 CHECK_EQ(expected_data.b, t.b); | |
199 break; | |
200 | |
201 case MemoryAccess::Size::HalfWord: | |
202 CHECK_EQ(expected_data.h, t.h); | |
203 break; | |
204 | |
205 case MemoryAccess::Size::Word: | |
206 CHECK_EQ(expected_data.w, t.w); | |
207 break; | |
208 } | |
209 } | |
210 | |
211 TEST(simulator_invalidate_exclusive_access) { | |
212 using Kind = MemoryAccess::Kind; | |
213 using Size = MemoryAccess::Size; | |
214 | |
215 MemoryAccess ldrex_w(Kind::LoadExcl, Size::Word, offsetof(TestData, w)); | |
216 MemoryAccess strex_w(Kind::StoreExcl, Size::Word, offsetof(TestData, w), 7); | |
217 | |
218 // Address mismatch. | |
219 TestInvalidateExclusiveAccess( | |
220 TestData(1), ldrex_w, | |
221 MemoryAccess(Kind::LoadExcl, Size::Word, offsetof(TestData, dummy)), | |
222 strex_w, 1, TestData(1)); | |
223 | |
224 // Size mismatch. | |
225 TestInvalidateExclusiveAccess( | |
226 TestData(1), ldrex_w, MemoryAccess(), | |
227 MemoryAccess(Kind::StoreExcl, Size::HalfWord, offsetof(TestData, w), 7), | |
228 1, TestData(1)); | |
229 | |
230 // Load between ldrex/strex. | |
231 TestInvalidateExclusiveAccess( | |
232 TestData(1), ldrex_w, | |
233 MemoryAccess(Kind::Load, Size::Word, offsetof(TestData, dummy)), strex_w, | |
234 1, TestData(1)); | |
235 | |
236 // Store between ldrex/strex. | |
237 TestInvalidateExclusiveAccess( | |
238 TestData(1), ldrex_w, | |
239 MemoryAccess(Kind::Store, Size::Word, offsetof(TestData, dummy)), strex_w, | |
240 1, TestData(1)); | |
241 | |
242 // Match | |
243 TestInvalidateExclusiveAccess(TestData(1), ldrex_w, MemoryAccess(), strex_w, | |
244 0, TestData(7)); | |
245 } | |
246 | |
247 static int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data, | |
248 MemoryAccess access) { | |
249 HandleScope scope(isolate); | |
250 Assembler assm(isolate, NULL, 0); | |
251 AssembleMemoryAccess(&assm, access, r1, r2, r0); | |
252 __ mov(pc, Operand(lr)); | |
jbramley
2017/01/04 17:45:41
"bx(lr)" would be better.
binji
2017/01/04 20:35:53
Done.
| |
253 | |
254 CodeDesc desc; | |
255 assm.GetCode(&desc); | |
256 Handle<Code> code = isolate->factory()->NewCode( | |
257 desc, Code::ComputeFlags(Code::STUB), Handle<Code>()); | |
258 F3 f = FUNCTION_CAST<F3>(code->entry()); | |
259 | |
260 return reinterpret_cast<int>( | |
261 CALL_GENERATED_CODE(isolate, f, test_data, 0, 0, 0, 0)); | |
262 } | |
263 | |
264 class MemoryAccessThread : public v8::base::Thread { | |
265 public: | |
266 MemoryAccessThread() | |
267 : Thread(Options("MemoryAccessThread")), | |
268 test_data_(NULL), | |
269 is_finished_(false), | |
270 has_request_(false), | |
271 did_request_(false) {} | |
272 | |
273 virtual void Run() { | |
274 v8::Isolate::CreateParams create_params; | |
275 create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); | |
276 v8::Isolate* isolate = v8::Isolate::New(create_params); | |
277 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate); | |
278 v8::Isolate::Scope scope(isolate); | |
279 | |
280 v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_); | |
281 while (!is_finished_) { | |
282 while (!(has_request_ || is_finished_)) { | |
283 has_request_cv_.Wait(&mutex_); | |
284 } | |
285 | |
286 if (is_finished_) { | |
287 break; | |
288 } | |
289 | |
290 ExecuteMemoryAccess(i_isolate, test_data_, access_); | |
291 has_request_ = false; | |
292 did_request_ = true; | |
293 did_request_cv_.NotifyOne(); | |
294 } | |
295 } | |
296 | |
297 void NextAndWait(TestData* test_data, MemoryAccess access) { | |
298 DCHECK(!has_request_); | |
299 v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_); | |
300 test_data_ = test_data; | |
301 access_ = access; | |
302 has_request_ = true; | |
303 has_request_cv_.NotifyOne(); | |
304 while (!did_request_) { | |
305 did_request_cv_.Wait(&mutex_); | |
306 } | |
307 did_request_ = false; | |
308 } | |
309 | |
310 void Finish() { | |
311 v8::base::LockGuard<v8::base::Mutex> lock_guard(&mutex_); | |
312 is_finished_ = true; | |
313 has_request_cv_.NotifyOne(); | |
314 } | |
315 | |
316 private: | |
317 TestData* test_data_; | |
318 MemoryAccess access_; | |
319 bool is_finished_; | |
320 bool has_request_; | |
321 bool did_request_; | |
322 v8::base::Mutex mutex_; | |
323 v8::base::ConditionVariable has_request_cv_; | |
324 v8::base::ConditionVariable did_request_cv_; | |
325 }; | |
326 | |
327 TEST(simulator_invalidate_exclusive_access_threaded) { | |
328 using Kind = MemoryAccess::Kind; | |
329 using Size = MemoryAccess::Size; | |
330 | |
331 Isolate* isolate = CcTest::i_isolate(); | |
332 HandleScope scope(isolate); | |
333 | |
334 TestData test_data(1); | |
335 | |
336 MemoryAccessThread thread; | |
337 thread.Start(); | |
338 | |
339 MemoryAccess ldrex_w(Kind::LoadExcl, Size::Word, offsetof(TestData, w)); | |
340 MemoryAccess strex_w(Kind::StoreExcl, Size::Word, offsetof(TestData, w), 7); | |
341 | |
342 // Exclusive store completed by another thread first. | |
343 test_data = TestData(1); | |
344 thread.NextAndWait(&test_data, MemoryAccess(Kind::LoadExcl, Size::Word, | |
345 offsetof(TestData, w))); | |
346 ExecuteMemoryAccess(isolate, &test_data, ldrex_w); | |
347 thread.NextAndWait(&test_data, MemoryAccess(Kind::StoreExcl, Size::Word, | |
348 offsetof(TestData, w), 5)); | |
349 CHECK_EQ(1, ExecuteMemoryAccess(isolate, &test_data, strex_w)); | |
350 CHECK_EQ(5, test_data.w); | |
351 | |
352 // Exclusive store completed by another thread; different address, but masked | |
353 // to same | |
354 test_data = TestData(1); | |
355 ExecuteMemoryAccess(isolate, &test_data, ldrex_w); | |
356 thread.NextAndWait(&test_data, MemoryAccess(Kind::LoadExcl, Size::Word, | |
357 offsetof(TestData, dummy))); | |
358 thread.NextAndWait(&test_data, MemoryAccess(Kind::StoreExcl, Size::Word, | |
359 offsetof(TestData, dummy), 5)); | |
360 CHECK_EQ(1, ExecuteMemoryAccess(isolate, &test_data, strex_w)); | |
361 CHECK_EQ(1, test_data.w); | |
362 | |
363 // Test failure when store between ldrex/strex. | |
364 test_data = TestData(1); | |
365 ExecuteMemoryAccess(isolate, &test_data, ldrex_w); | |
366 thread.NextAndWait(&test_data, MemoryAccess(Kind::Store, Size::Word, | |
367 offsetof(TestData, dummy))); | |
368 CHECK_EQ(1, ExecuteMemoryAccess(isolate, &test_data, strex_w)); | |
369 CHECK_EQ(1, test_data.w); | |
370 | |
371 thread.Finish(); | |
372 thread.Join(); | |
373 } | |
374 | |
375 #undef __ | |
376 | |
377 #endif // USE_SIMULATOR | |
OLD | NEW |