Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(179)

Side by Side Diff: src/wasm/wasm-interpreter.cc

Issue 1972153002: [wasm] Implement an interpreter for WASM. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: stupid skip Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/wasm/wasm-interpreter.h"
6 #include "src/wasm/ast-decoder.h"
7 #include "src/wasm/decoder.h"
8 #include "src/wasm/wasm-external-refs.h"
9 #include "src/wasm/wasm-module.h"
10
11 #include "src/base/accounting-allocator.h"
12 #include "src/zone-containers.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace wasm {
17
18 #if DEBUG
19 #define TRACE(...) \
20 do { \
21 if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
22 } while (false)
23 #else
24 #define TRACE(...)
25 #endif
26
27 #define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
28
29 #define FOREACH_SIMPLE_BINOP(V) \
30 V(I32Add, uint32_t, +) \
31 V(I32Sub, uint32_t, -) \
32 V(I32Mul, uint32_t, *) \
33 V(I32And, uint32_t, &) \
34 V(I32Ior, uint32_t, |) \
35 V(I32Xor, uint32_t, ^) \
36 V(I32Eq, uint32_t, ==) \
37 V(I32Ne, uint32_t, !=) \
38 V(I32LtU, uint32_t, <) \
39 V(I32LeU, uint32_t, <=) \
40 V(I32GtU, uint32_t, >) \
41 V(I32GeU, uint32_t, >=) \
42 V(I32LtS, int32_t, <) \
43 V(I32LeS, int32_t, <=) \
44 V(I32GtS, int32_t, >) \
45 V(I32GeS, int32_t, >=) \
46 V(I64Add, uint64_t, +) \
47 V(I64Sub, uint64_t, -) \
48 V(I64Mul, uint64_t, *) \
49 V(I64And, uint64_t, &) \
50 V(I64Ior, uint64_t, |) \
51 V(I64Xor, uint64_t, ^) \
52 V(I64Eq, uint64_t, ==) \
53 V(I64Ne, uint64_t, !=) \
54 V(I64LtU, uint64_t, <) \
55 V(I64LeU, uint64_t, <=) \
56 V(I64GtU, uint64_t, >) \
57 V(I64GeU, uint64_t, >=) \
58 V(I64LtS, int64_t, <) \
59 V(I64LeS, int64_t, <=) \
60 V(I64GtS, int64_t, >) \
61 V(I64GeS, int64_t, >=) \
62 V(F32Add, float, +) \
63 V(F32Sub, float, -) \
64 V(F32Mul, float, *) \
65 V(F32Div, float, /) \
66 V(F32Eq, float, ==) \
67 V(F32Ne, float, !=) \
68 V(F32Lt, float, <) \
69 V(F32Le, float, <=) \
70 V(F32Gt, float, >) \
71 V(F32Ge, float, >=) \
72 V(F64Add, double, +) \
73 V(F64Sub, double, -) \
74 V(F64Mul, double, *) \
75 V(F64Div, double, /) \
76 V(F64Eq, double, ==) \
77 V(F64Ne, double, !=) \
78 V(F64Lt, double, <) \
79 V(F64Le, double, <=) \
80 V(F64Gt, double, >) \
81 V(F64Ge, double, >=)
82
83 #define FOREACH_OTHER_BINOP(V) \
84 V(I32DivS, int32_t) \
85 V(I32DivU, uint32_t) \
86 V(I32RemS, int32_t) \
87 V(I32RemU, uint32_t) \
88 V(I32Shl, uint32_t) \
89 V(I32ShrU, uint32_t) \
90 V(I32ShrS, int32_t) \
91 V(I64DivS, int64_t) \
92 V(I64DivU, uint64_t) \
93 V(I64RemS, int64_t) \
94 V(I64RemU, uint64_t) \
95 V(I64Shl, uint64_t) \
96 V(I64ShrU, uint64_t) \
97 V(I64ShrS, int64_t) \
98 V(I32Ror, int32_t) \
99 V(I32Rol, int32_t) \
100 V(I64Ror, int64_t) \
101 V(I64Rol, int64_t) \
102 V(F32Min, float) \
103 V(F32Max, float) \
104 V(F32CopySign, float) \
105 V(F64Min, double) \
106 V(F64Max, double) \
107 V(F64CopySign, double) \
108 V(I32AsmjsDivS, int32_t) \
109 V(I32AsmjsDivU, uint32_t) \
110 V(I32AsmjsRemS, int32_t) \
111 V(I32AsmjsRemU, uint32_t)
112
113 static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
114 if (b == 0) {
115 *trap = kTrapDivByZero;
116 return 0;
117 }
118 if (b == -1 && a == 0x80000000) {
ahaas 2016/05/24 14:48:26 Use std::numeric_limits<int32_t>::min() here.
titzer 2016/05/24 15:09:52 Done.
119 *trap = kTrapDivUnrepresentable;
120 return 0;
121 }
122 return a / b;
123 }
124
125 static inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b,
126 TrapReason* trap) {
127 if (b == 0) {
128 *trap = kTrapDivByZero;
129 return 0;
130 }
131 return a / b;
132 }
133
134 static inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
135 if (b == 0) {
136 *trap = kTrapRemByZero;
137 return 0;
138 }
139 if (b == -1) return 0;
140 return a % b;
141 }
142
143 static inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b,
144 TrapReason* trap) {
145 if (b == 0) {
146 *trap = kTrapRemByZero;
147 return 0;
148 }
149 return a % b;
150 }
151
152 static inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
153 return a << (b & 0x1f);
154 }
155
156 static inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b,
157 TrapReason* trap) {
158 return a >> (b & 0x1f);
159 }
160
161 static inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
162 return a >> (b & 0x1f);
163 }
164
165 static inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
166 if (b == 0) {
167 *trap = kTrapDivByZero;
168 return 0;
169 }
170 if (b == -1 && a == 0x8000000000000000ULL) {
ahaas 2016/05/24 14:48:26 Use std::numeric_limits<int64_t>::min() here.
titzer 2016/05/24 15:09:52 Done.
171 *trap = kTrapDivUnrepresentable;
172 return 0;
173 }
174 return a / b;
175 }
176
177 static inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b,
178 TrapReason* trap) {
179 if (b == 0) {
180 *trap = kTrapDivByZero;
181 return 0;
182 }
183 return a / b;
184 }
185
186 static inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
187 if (b == 0) {
188 *trap = kTrapRemByZero;
189 return 0;
190 }
191 if (b == -1) return 0;
192 return a % b;
193 }
194
195 static inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b,
196 TrapReason* trap) {
197 if (b == 0) {
198 *trap = kTrapRemByZero;
199 return 0;
200 }
201 return a % b;
202 }
203
204 static inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
205 return a << (b & 0x3f);
206 }
207
208 static inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b,
209 TrapReason* trap) {
210 return a >> (b & 0x3f);
211 }
212
213 static inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
214 return a >> (b & 0x3f);
215 }
216
217 static inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
218 uint32_t shift = (b & 0x1f);
219 return (a >> shift) | (a << (32 - shift));
220 }
221
222 static inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
223 uint32_t shift = (b & 0x1f);
224 return (a << shift) | (a >> (32 - shift));
225 }
226
227 static inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
228 uint32_t shift = (b & 0x3f);
229 return (a >> shift) | (a << (64 - shift));
230 }
231
232 static inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
233 uint32_t shift = (b & 0x3f);
234 return (a << shift) | (a >> (64 - shift));
235 }
236
237 static float quiet(float a) {
238 static const uint32_t kSignalingBit = 1 << 22;
239 uint32_t q = bit_cast<uint32_t>(std::numeric_limits<float>::quiet_NaN());
240 if ((q & kSignalingBit) != 0) {
241 // On some machines, the signaling bit set indicates it's a quiet NaN.
242 return bit_cast<float>(bit_cast<uint32_t>(a) | kSignalingBit);
243 } else {
244 // On others, the signaling bit set indicates it's a signaling NaN.
245 return bit_cast<float>(bit_cast<uint32_t>(a) & ~kSignalingBit);
246 }
247 }
248
249 static double quiet(double a) {
250 static const uint64_t kSignalingBit = 1ULL << 51;
251 uint64_t q = bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN());
252 if ((q & kSignalingBit) != 0) {
253 // On some machines, the signaling bit set indicates it's a quiet NaN.
254 return bit_cast<double>(bit_cast<uint64_t>(a) | kSignalingBit);
255 } else {
256 // On others, the signaling bit set indicates it's a signaling NaN.
257 return bit_cast<double>(bit_cast<uint64_t>(a) & ~kSignalingBit);
258 }
259 }
260
261 static inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
262 if (std::isnan(a)) return quiet(a);
263 if (std::isnan(b)) return quiet(b);
264 return std::min(a, b);
265 }
266
267 static inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
268 if (std::isnan(a)) return quiet(a);
269 if (std::isnan(b)) return quiet(b);
270 return std::max(a, b);
271 }
272
273 static inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
274 return copysignf(a, b);
275 }
276
277 static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
278 if (std::isnan(a)) return quiet(a);
279 if (std::isnan(b)) return quiet(b);
280 return std::min(a, b);
281 }
282
283 static inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
284 if (std::isnan(a)) return quiet(a);
285 if (std::isnan(b)) return quiet(b);
286 return std::max(a, b);
287 }
288
289 static inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
290 return copysign(a, b);
291 }
292
293 static inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b,
294 TrapReason* trap) {
295 if (b == 0) return 0;
296 if (b == -1 && a == 0x80000000) return static_cast<int32_t>(0x80000000);
ahaas 2016/05/24 14:48:27 Use std::numeric_limits<int32_t>::min() here.
titzer 2016/05/24 15:09:52 Done.
297 return a / b;
298 }
299
300 static inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b,
301 TrapReason* trap) {
302 if (b == 0) return 0;
303 return a / b;
304 }
305
306 static inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b,
307 TrapReason* trap) {
308 if (b == 0) return 0;
309 if (b == -1) return 0;
310 return a % b;
311 }
312
313 static inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b,
314 TrapReason* trap) {
315 if (b == 0) return 0;
316 return a % b;
317 }
318
319 static inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
320 return DoubleToInt32(a);
321 }
322
323 static inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
324 return DoubleToUint32(a);
325 }
326
327 static inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
328 return DoubleToInt32(a);
329 }
330
331 static inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
332 return DoubleToUint32(a);
333 }
334
335 #define FOREACH_OTHER_UNOP(V) \
336 V(I32Clz, uint32_t) \
337 V(I32Ctz, uint32_t) \
338 V(I32Popcnt, uint32_t) \
339 V(I32Eqz, uint32_t) \
340 V(I64Clz, uint64_t) \
341 V(I64Ctz, uint64_t) \
342 V(I64Popcnt, uint64_t) \
343 V(I64Eqz, uint64_t) \
344 V(F32Abs, float) \
345 V(F32Neg, float) \
346 V(F32Ceil, float) \
347 V(F32Floor, float) \
348 V(F32Trunc, float) \
349 V(F32NearestInt, float) \
350 V(F32Sqrt, float) \
351 V(F64Abs, double) \
352 V(F64Neg, double) \
353 V(F64Ceil, double) \
354 V(F64Floor, double) \
355 V(F64Trunc, double) \
356 V(F64NearestInt, double) \
357 V(F64Sqrt, double) \
358 V(I32SConvertF32, float) \
359 V(I32SConvertF64, double) \
360 V(I32UConvertF32, float) \
361 V(I32UConvertF64, double) \
362 V(I32ConvertI64, int64_t) \
363 V(I64SConvertF32, float) \
364 V(I64SConvertF64, double) \
365 V(I64UConvertF32, float) \
366 V(I64UConvertF64, double) \
367 V(I64SConvertI32, int32_t) \
368 V(I64UConvertI32, uint32_t) \
369 V(F32SConvertI32, int32_t) \
370 V(F32UConvertI32, uint32_t) \
371 V(F32SConvertI64, int64_t) \
372 V(F32UConvertI64, uint64_t) \
373 V(F32ConvertF64, double) \
374 V(F32ReinterpretI32, int32_t) \
375 V(F64SConvertI32, int32_t) \
376 V(F64UConvertI32, uint32_t) \
377 V(F64SConvertI64, int64_t) \
378 V(F64UConvertI64, uint64_t) \
379 V(F64ConvertF32, float) \
380 V(F64ReinterpretI64, int64_t) \
381 V(I32ReinterpretF32, float) \
382 V(I64ReinterpretF64, double) \
383 V(I32AsmjsSConvertF32, float) \
384 V(I32AsmjsUConvertF32, float) \
385 V(I32AsmjsSConvertF64, double) \
386 V(I32AsmjsUConvertF64, double)
387
388 static int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
389 return base::bits::CountLeadingZeros32(val);
390 }
391
392 static uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
393 return base::bits::CountTrailingZeros32(val);
394 }
395
396 static uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
397 return word32_popcnt_wrapper(&val);
398 }
399
400 static inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
401 return val == 0 ? 1 : 0;
402 }
403
404 static int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
405 return base::bits::CountLeadingZeros64(val);
406 }
407
408 static inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
409 return base::bits::CountTrailingZeros64(val);
410 }
411
412 static inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
413 return word64_popcnt_wrapper(&val);
414 }
415
416 static inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
417 return val == 0 ? 1 : 0;
418 }
419
420 static inline float ExecuteF32Abs(float a, TrapReason* trap) {
421 return bit_cast<float>(bit_cast<uint32_t>(a) & 0x7fffffff);
422 }
423
424 static inline float ExecuteF32Neg(float a, TrapReason* trap) {
425 return bit_cast<float>(bit_cast<uint32_t>(a) ^ 0x80000000);
426 }
427
428 static inline float ExecuteF32Ceil(float a, TrapReason* trap) {
429 return ceilf(a);
430 }
431
432 static inline float ExecuteF32Floor(float a, TrapReason* trap) {
433 return floorf(a);
434 }
435
436 static inline float ExecuteF32Trunc(float a, TrapReason* trap) {
437 return truncf(a);
438 }
439
440 static inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
441 return nearbyintf(a);
442 }
443
444 static inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
445 return sqrtf(a);
446 }
447
448 static inline double ExecuteF64Abs(double a, TrapReason* trap) {
449 return bit_cast<double>(bit_cast<uint64_t>(a) & 0x7fffffffffffffff);
450 }
451
452 static inline double ExecuteF64Neg(double a, TrapReason* trap) {
453 return bit_cast<double>(bit_cast<uint64_t>(a) ^ 0x8000000000000000);
454 }
455
456 static inline double ExecuteF64Ceil(double a, TrapReason* trap) {
457 return ceil(a);
458 }
459
460 static inline double ExecuteF64Floor(double a, TrapReason* trap) {
461 return floor(a);
462 }
463
464 static inline double ExecuteF64Trunc(double a, TrapReason* trap) {
465 return trunc(a);
466 }
467
468 static inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
469 return nearbyint(a);
470 }
471
472 static inline double ExecuteF64Sqrt(double a, TrapReason* trap) {
473 return sqrt(a);
474 }
475
476 static int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
477 if (a < static_cast<float>(INT32_MAX) && a >= static_cast<float>(INT32_MIN)) {
478 return static_cast<int32_t>(a);
479 }
480 *trap = kTrapFloatUnrepresentable;
481 return 0;
482 }
483
484 static int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
485 if (a < (static_cast<double>(INT32_MAX) + 1.0) &&
486 a > (static_cast<double>(INT32_MIN) - 1.0)) {
487 return static_cast<int32_t>(a);
488 }
489 *trap = kTrapFloatUnrepresentable;
490 return 0;
491 }
492
493 static uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
494 if (a < (static_cast<float>(UINT32_MAX) + 1.0) && a > -1) {
495 return static_cast<uint32_t>(a);
496 }
497 *trap = kTrapFloatUnrepresentable;
498 return 0;
499 }
500
501 static uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
502 if (a < (static_cast<float>(UINT32_MAX) + 1.0) && a > -1) {
503 return static_cast<uint32_t>(a);
504 }
505 *trap = kTrapFloatUnrepresentable;
506 return 0;
507 }
508
509 static inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
510 return static_cast<uint32_t>(a & 0xFFFFFFFF);
511 }
512
513 static int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
514 int64_t output;
515 if (!float32_to_int64_wrapper(&a, &output)) {
516 *trap = kTrapFloatUnrepresentable;
517 }
518 return output;
519 }
520
521 static int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
522 int64_t output;
523 if (!float64_to_int64_wrapper(&a, &output)) {
524 *trap = kTrapFloatUnrepresentable;
525 }
526 return output;
527 }
528
529 static uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
530 uint64_t output;
531 if (!float32_to_uint64_wrapper(&a, &output)) {
532 *trap = kTrapFloatUnrepresentable;
533 }
534 return output;
535 }
536
537 static uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
538 uint64_t output;
539 if (!float64_to_uint64_wrapper(&a, &output)) {
540 *trap = kTrapFloatUnrepresentable;
541 }
542 return output;
543 }
544
545 static inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
546 return static_cast<int64_t>(a);
547 }
548
549 static inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
550 return static_cast<uint64_t>(a);
551 }
552
553 static inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
554 return static_cast<float>(a);
555 }
556
557 static inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
558 return static_cast<float>(a);
559 }
560
561 static inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
562 float output;
563 int64_to_float32_wrapper(&a, &output);
564 return output;
565 }
566
567 static inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
568 float output;
569 uint64_to_float32_wrapper(&a, &output);
570 return output;
571 }
572
573 static inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
574 return static_cast<float>(a);
575 }
576
577 static inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
578 return bit_cast<float>(a);
579 }
580
581 static inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
582 return static_cast<double>(a);
583 }
584
585 static inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
586 return static_cast<double>(a);
587 }
588
589 static inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
590 double output;
591 int64_to_float64_wrapper(&a, &output);
592 return output;
593 }
594
595 static inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
596 double output;
597 uint64_to_float64_wrapper(&a, &output);
598 return output;
599 }
600
601 static inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
602 return static_cast<double>(a);
603 }
604
605 static inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
606 return bit_cast<double>(a);
607 }
608
609 static inline int32_t ExecuteI32ReinterpretF32(float a, TrapReason* trap) {
610 return bit_cast<int32_t>(a);
611 }
612
613 static inline int64_t ExecuteI64ReinterpretF64(double a, TrapReason* trap) {
614 return bit_cast<int64_t>(a);
615 }
616
617 enum InternalOpcode {
618 #define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
619 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
620 #undef DECL_INTERNAL_ENUM
621 };
622
623 static const char* OpcodeName(uint32_t val) {
624 switch (val) {
625 #define DECL_INTERNAL_CASE(name, value) \
626 case kInternal##name: \
627 return "Internal" #name;
628 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
629 #undef DECL_INTERNAL_CASE
630 }
631 return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
632 }
633
634 static const int kRunSteps = 1000;
635
636 // A helper class to compute the control transfers for each bytecode offset.
637 // Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
638 // be directly executed without the need to dynamically track blocks.
639 class ControlTransfers : public ZoneObject {
640 public:
641 ControlTransferMap map_;
642
643 ControlTransfers(Zone* zone, size_t locals_encoded_size, const byte* start,
644 const byte* end)
645 : map_(zone) {
646 // A control reference including from PC, from value depth, and whether
647 // a value is explicitly passed (e.g. br/br_if/br_table with value).
648 struct CRef {
649 const byte* pc;
650 sp_t value_depth;
651 bool explicit_value;
652 };
653
654 // Represents a control flow label.
655 struct CLabel : public ZoneObject {
656 const byte* target;
657 size_t value_depth;
658 ZoneVector<CRef> refs;
659
660 CLabel(Zone* zone, size_t v)
661 : target(nullptr), value_depth(v), refs(zone) {}
662
663 // Bind this label to the given PC.
664 void Bind(ControlTransferMap* map, const byte* start, const byte* pc,
665 bool expect_value) {
666 DCHECK_NULL(target);
667 target = pc;
668 for (auto from : refs) {
669 auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
670 auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
671 ControlTransfer::StackAction action = ControlTransfer::kNoAction;
672 if (expect_value && !from.explicit_value) {
673 action = spdiff == 0 ? ControlTransfer::kPushVoid
674 : ControlTransfer::kPopAndRepush;
675 }
676 pc_t offset = static_cast<size_t>(from.pc - start);
677 (*map)[offset] = {pcdiff, spdiff, action};
678 }
679 }
680
681 // Reference this label from the given location.
682 void Ref(ControlTransferMap* map, const byte* start, CRef from) {
683 DCHECK_GE(from.value_depth, value_depth);
684 if (target) {
685 auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
686 auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
687 pc_t offset = static_cast<size_t>(from.pc - start);
688 (*map)[offset] = {pcdiff, spdiff, ControlTransfer::kNoAction};
689 } else {
690 refs.push_back(from);
691 }
692 }
693 };
694
695 // An entry in the control stack.
696 struct Control {
697 const byte* pc;
698 CLabel* end_label;
699 CLabel* else_label;
700
701 void Ref(ControlTransferMap* map, const byte* start, const byte* from_pc,
702 size_t from_value_depth, bool explicit_value) {
703 end_label->Ref(map, start, {from_pc, from_value_depth, explicit_value});
704 }
705 };
706
707 // Compute the ControlTransfer map.
708 // This works by maintaining a stack of control constructs similar to the
709 // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
710 // bytecodes with their target, as well as determining whether the current
711 // bytecodes are within the true or false block of an else.
712 // The value stack depth is tracked as {value_depth} and is needed to
713 // determine how many values to pop off the stack for explicit and
714 // implicit control flow.
715
716 std::vector<Control> control_stack;
717 size_t value_depth = 0;
718 Decoder decoder(start, end); // for reading operands.
719 const byte* pc = start + locals_encoded_size;
720
721 while (pc < end) {
722 WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
723 TRACE("@%td: control %s (depth = %zu)\n", (pc - start),
724 WasmOpcodes::OpcodeName(opcode), value_depth);
725 switch (opcode) {
726 case kExprBlock: {
727 TRACE("control @%td $%zu: Block\n", (pc - start), value_depth);
728 CLabel* label = new (zone) CLabel(zone, value_depth);
729 control_stack.push_back({pc, label, nullptr});
730 break;
731 }
732 case kExprLoop: {
733 TRACE("control @%td $%zu: Loop\n", (pc - start), value_depth);
734 CLabel* label1 = new (zone) CLabel(zone, value_depth);
735 CLabel* label2 = new (zone) CLabel(zone, value_depth);
736 control_stack.push_back({pc, label1, nullptr});
737 control_stack.push_back({pc, label2, nullptr});
738 label2->Bind(&map_, start, pc, false);
739 break;
740 }
741 case kExprIf: {
742 TRACE("control @%td $%zu: If\n", (pc - start), value_depth);
743 value_depth--;
744 CLabel* end_label = new (zone) CLabel(zone, value_depth);
745 CLabel* else_label = new (zone) CLabel(zone, value_depth);
746 control_stack.push_back({pc, end_label, else_label});
747 else_label->Ref(&map_, start, {pc, value_depth, false});
748 break;
749 }
750 case kExprElse: {
751 Control* c = &control_stack.back();
752 TRACE("control @%td $%zu: Else\n", (pc - start), value_depth);
753 c->end_label->Ref(&map_, start, {pc, value_depth, false});
754 value_depth = c->end_label->value_depth;
755 DCHECK_NOT_NULL(c->else_label);
756 c->else_label->Bind(&map_, start, pc + 1, false);
757 c->else_label = nullptr;
758 break;
759 }
760 case kExprEnd: {
761 Control* c = &control_stack.back();
762 TRACE("control @%td $%zu: End\n", (pc - start), value_depth);
763 if (c->end_label->target) {
764 // only loops have bound labels.
765 DCHECK_EQ(kExprLoop, *c->pc);
766 control_stack.pop_back();
767 c = &control_stack.back();
768 }
769 if (c->else_label) c->else_label->Bind(&map_, start, pc + 1, true);
770 c->end_label->Ref(&map_, start, {pc, value_depth, false});
771 c->end_label->Bind(&map_, start, pc + 1, true);
772 value_depth = c->end_label->value_depth + 1;
773 control_stack.pop_back();
774 break;
775 }
776 case kExprBr: {
777 BreakDepthOperand operand(&decoder, pc);
778 TRACE("control @%td $%zu: Br[arity=%u, depth=%u]\n", (pc - start),
779 value_depth, operand.arity, operand.depth);
780 value_depth -= operand.arity;
781 control_stack[control_stack.size() - operand.depth - 1].Ref(
782 &map_, start, pc, value_depth, operand.arity > 0);
783 value_depth++;
784 break;
785 }
786 case kExprBrIf: {
787 BreakDepthOperand operand(&decoder, pc);
788 TRACE("control @%td $%zu: BrIf[arity=%u, depth=%u]\n", (pc - start),
789 value_depth, operand.arity, operand.depth);
790 value_depth -= (operand.arity + 1);
791 control_stack[control_stack.size() - operand.depth - 1].Ref(
792 &map_, start, pc, value_depth, operand.arity > 0);
793 value_depth++;
794 break;
795 }
796 case kExprBrTable: {
797 BranchTableOperand operand(&decoder, pc);
798 TRACE("control @%td $%zu: BrTable[arity=%u count=%u]\n", (pc - start),
799 value_depth, operand.arity, operand.table_count);
800 value_depth -= (operand.arity + 1);
801 for (uint32_t i = 0; i < operand.table_count + 1; i++) {
802 uint32_t target = operand.read_entry(&decoder, i);
803 control_stack[control_stack.size() - target - 1].Ref(
804 &map_, start, pc + i, value_depth, operand.arity > 0);
805 }
806 value_depth++;
807 break;
808 }
809 default: {
810 value_depth = value_depth - OpcodeArity(pc, end) + 1;
811 break;
812 }
813 }
814
815 pc += OpcodeLength(pc, end);
816 }
817 }
818
819 ControlTransfer Lookup(pc_t from) {
820 auto result = map_.find(from);
821 if (result == map_.end()) {
822 V8_Fatal(__FILE__, __LINE__, "no control target for pc %zu", from);
823 }
824 return result->second;
825 }
826 };
827
828 // Code and metadata needed to execute a function.
829 struct InterpreterCode {
830 WasmFunction* function; // wasm function
831 AstLocalDecls locals; // local declarations
832 const byte* orig_start; // start of original code
833 const byte* orig_end; // end of original code
834 byte* start; // start of (maybe altered) code
835 byte* end; // end of (maybe altered) code
836 ControlTransfers* targets; // helper for control flow.
837
838 const byte* at(pc_t pc) { return start + pc; }
839 };
840
841 // The main storage for interpreter code. It maps {WasmFunction} to the
842 // metadata needed to execute each function.
843 class CodeMap {
844 public:
845 Zone* zone_;
846 WasmModule* module_;
847 ZoneVector<InterpreterCode> interpreter_code_;
848
849 CodeMap(WasmModule* module, Zone* zone)
850 : zone_(zone), module_(module), interpreter_code_(zone) {
851 if (module == nullptr) return;
852 for (size_t i = 0; i < module->functions.size(); i++) {
853 WasmFunction* function = &module->functions[i];
854 const byte* code_start =
855 module->module_start + function->code_start_offset;
856 const byte* code_end = module->module_start + function->code_end_offset;
857 AddFunction(function, code_start, code_end);
858 }
859 }
860
861 InterpreterCode* FindCode(WasmFunction* function) {
862 if (function->func_index > interpreter_code_.size()) {
Clemens Hammacher 2016/05/24 12:47:03 You wanted to check for "<".
ahaas 2016/05/24 14:48:26 Do you mean "<"?
titzer 2016/05/24 14:50:47 Done.
863 InterpreterCode* code = &interpreter_code_[function->func_index];
864 if (code && code->function == function) return code;
Clemens Hammacher 2016/05/24 12:47:03 Why the null-check here, and not below? Also, you
ahaas 2016/05/24 14:48:27 What's the reason why you don't return Preprocess(
titzer 2016/05/24 14:50:47 I've changed this function so that it no longer se
865 }
866
867 for (size_t i = 0; i < interpreter_code_.size(); i++) {
868 if (interpreter_code_[i].function == function) {
869 return Preprocess(&interpreter_code_[i]);
870 }
871 }
872 return nullptr;
873 }
874
875 InterpreterCode* GetCode(uint32_t function_index) {
876 CHECK_LT(function_index, interpreter_code_.size());
877 return Preprocess(&interpreter_code_[function_index]);
878 }
879
880 InterpreterCode* GetIndirectCode(uint32_t indirect_index) {
881 if (indirect_index >= module_->function_table.size()) return nullptr;
882 uint32_t index = module_->function_table[indirect_index];
883 if (index >= interpreter_code_.size()) return nullptr;
884 return Preprocess(&interpreter_code_[index]);
ahaas 2016/05/24 14:48:26 I think "return GetCode(index);" would be nicer he
titzer 2016/05/24 15:09:52 Done.
885 }
886
887 InterpreterCode* Preprocess(InterpreterCode* code) {
888 if (code->targets == nullptr && code->start) {
889 // Compute the expr_ends map and the local declarations.
890 CHECK(DecodeLocalDecls(code->locals, code->start, code->end));
891 code->targets = new (zone_) ControlTransfers(
892 zone_, code->locals.decls_encoded_size, code->start, code->end);
893 }
894 return code;
895 }
896
897 int AddFunction(WasmFunction* function, const byte* code_start,
898 const byte* code_end) {
899 InterpreterCode code = {
900 function, AstLocalDecls(zone_), code_start,
901 code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
902 nullptr};
903
ahaas 2016/05/24 14:48:26 Is it true that you assume that function->func_ind
titzer 2016/05/24 15:09:52 Done.
904 interpreter_code_.push_back(code);
905 return static_cast<int>(interpreter_code_.size()) - 1;
906 }
907
908 bool SetFunctionCode(WasmFunction* function, const byte* start,
909 const byte* end) {
910 InterpreterCode* code = FindCode(function);
911 if (code == nullptr) return false;
912 code->targets = nullptr;
913 code->orig_start = start;
914 code->orig_end = end;
915 code->start = const_cast<byte*>(start);
916 code->end = const_cast<byte*>(end);
917 Preprocess(code);
918 return true;
919 }
920 };
921
922 // Responsible for executing code directly.
923 class ThreadImpl : public WasmInterpreter::Thread {
924 public:
925 ThreadImpl(Zone* zone, CodeMap* codemap, WasmModuleInstance* instance)
926 : codemap_(codemap),
927 instance_(instance),
928 stack_(zone),
929 frames_(zone),
930 state_(WasmInterpreter::STOPPED),
931 trap_reason_(kTrapCount) {}
932
933 virtual ~ThreadImpl() {}
934
935 //==========================================================================
936 // Implementation of public interface for WasmInterpreter::Thread.
937 //==========================================================================
938
939 virtual WasmInterpreter::State state() { return state_; }
940
941 virtual void PushFrame(WasmFunction* function, WasmVal* args) {
942 InterpreterCode* code = codemap()->FindCode(function);
943 CHECK_NOT_NULL(code);
944 frames_.push_back({code, 0, 0, stack_.size()});
945 for (size_t i = 0; i < function->sig->parameter_count(); i++) {
946 stack_.push_back(args[i]);
947 }
948 frames_.back().ret_pc = InitLocals(code);
949 TRACE(" => push func#%u @%zu\n", code->function->func_index,
950 frames_.back().ret_pc);
951 }
952
953 virtual WasmInterpreter::State Run() {
954 do {
955 if (state_ == WasmInterpreter::STOPPED ||
956 state_ == WasmInterpreter::PAUSED) {
957 state_ = WasmInterpreter::RUNNING;
958 Execute(frames_.back().code, frames_.back().ret_pc, kRunSteps);
959 }
960 } while (state_ == WasmInterpreter::STOPPED);
961 return state_;
962 }
963
964 virtual WasmInterpreter::State Step() {
965 UNIMPLEMENTED();
966 return WasmInterpreter::STOPPED;
967 }
968
969 virtual void Pause() { UNIMPLEMENTED(); }
970
971 virtual void Reset() {
972 TRACE("----- RESET -----\n");
973 stack_.clear();
974 frames_.clear();
975 state_ = WasmInterpreter::STOPPED;
976 trap_reason_ = kTrapCount;
977 }
978
979 virtual int GetFrameCount() { return static_cast<int>(frames_.size()); }
980
981 virtual const WasmFrame* GetFrame(int index) {
982 UNIMPLEMENTED();
983 return nullptr;
984 }
985
986 virtual WasmFrame* GetMutableFrame(int index) {
987 UNIMPLEMENTED();
988 return nullptr;
989 }
990
991 virtual WasmVal GetReturnValue() {
992 if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
993 CHECK_EQ(WasmInterpreter::FINISHED, state_);
994 CHECK_EQ(1, stack_.size());
995 return stack_[0];
996 }
997
998 bool Terminated() {
999 return state_ == WasmInterpreter::TRAPPED ||
1000 state_ == WasmInterpreter::FINISHED;
1001 }
1002
1003 private:
1004 // Entries on the stack of functions being evaluated.
1005 struct Frame {
1006 InterpreterCode* code;
1007 pc_t call_pc;
1008 pc_t ret_pc;
1009 sp_t sp;
1010
1011 // Limit of parameters.
1012 sp_t plimit() { return sp + code->function->sig->parameter_count(); }
1013 // Limit of locals.
1014 sp_t llimit() { return plimit() + code->locals.total_local_count; }
1015 };
1016
1017 CodeMap* codemap_;
1018 WasmModuleInstance* instance_;
1019 ZoneVector<WasmVal> stack_;
1020 ZoneVector<Frame> frames_;
1021 WasmInterpreter::State state_;
1022 TrapReason trap_reason_;
1023
1024 CodeMap* codemap() { return codemap_; }
1025 WasmModuleInstance* instance() { return instance_; }
1026 WasmModule* module() { return instance_->module; }
1027
1028 void DoTrap(TrapReason trap, pc_t pc) {
1029 state_ = WasmInterpreter::TRAPPED;
1030 trap_reason_ = trap;
1031 CommitPc(pc);
1032 }
1033
1034 // Push a frame with arguments already on the stack.
1035 void PushFrame(InterpreterCode* code, pc_t call_pc, pc_t ret_pc) {
1036 CHECK_NOT_NULL(code);
1037 DCHECK(!frames_.empty());
1038 frames_.back().call_pc = call_pc;
1039 frames_.back().ret_pc = ret_pc;
1040 size_t arity = code->function->sig->parameter_count();
1041 DCHECK_GE(stack_.size(), arity);
1042 // The parameters will overlap the arguments already on the stack.
1043 frames_.push_back({code, 0, 0, stack_.size() - arity});
1044 frames_.back().ret_pc = InitLocals(code);
ahaas 2016/05/24 14:48:26 Could you add a comment that you set {ret_pc} in a
titzer 2016/05/24 15:09:52 InitLocals() changes the value stack, but not the
1045 TRACE(" => push func#%u @%zu\n", code->function->func_index,
1046 frames_.back().ret_pc);
1047 }
1048
1049 pc_t InitLocals(InterpreterCode* code) {
1050 for (auto p : code->locals.local_types) {
1051 WasmVal val;
1052 switch (p.first) {
1053 case kAstI32:
1054 val = WasmVal(static_cast<int32_t>(0));
1055 break;
1056 case kAstI64:
1057 val = WasmVal(static_cast<int64_t>(0));
1058 break;
1059 case kAstF32:
1060 val = WasmVal(static_cast<float>(0));
1061 break;
1062 case kAstF64:
1063 val = WasmVal(static_cast<double>(0));
1064 break;
1065 default:
1066 UNREACHABLE();
1067 break;
1068 }
1069 stack_.insert(stack_.end(), p.second, val);
1070 }
1071 return code->locals.decls_encoded_size;
1072 }
1073
1074 void CommitPc(pc_t pc) {
1075 if (!frames_.empty()) {
1076 frames_.back().ret_pc = pc;
1077 }
1078 }
1079
1080 bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
1081 // TODO(titzer): skip a breakpoint if we are resuming from it, or it
1082 // is set for another thread only.
1083 return false;
1084 }
1085
1086 bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, WasmVal val) {
1087 DCHECK_GT(frames_.size(), 0u);
1088 stack_.resize(frames_.back().sp);
1089 frames_.pop_back();
1090 if (frames_.size() == 0) {
1091 // A return from the top frame terminates the execution.
1092 state_ = WasmInterpreter::FINISHED;
1093 stack_.clear();
1094 stack_.push_back(val);
1095 TRACE(" => finish\n");
1096 return false;
1097 } else {
1098 // Return to caller frame.
1099 Frame* top = &frames_.back();
1100 *code = top->code;
1101 *pc = top->ret_pc;
1102 *limit = top->code->end - top->code->start;
1103 if (top->code->start[top->call_pc] == kExprCallIndirect ||
1104 (top->code->orig_start &&
1105 top->code->orig_start[top->call_pc] == kExprCallIndirect)) {
1106 // UGLY: An indirect call has the additional function index on the
ahaas 2016/05/24 14:48:26 Why can't you pop the function index before the ca
titzer 2016/05/24 15:09:52 Because the arguments remain on the stack too, so
1107 // stack.
1108 stack_.pop_back();
1109 }
1110 TRACE(" => pop func#%u @%zu\n", (*code)->function->func_index, *pc);
1111
1112 stack_.push_back(val);
1113 return true;
1114 }
1115 }
1116
1117 // Adjust the program counter {pc} and the stack contents according to the
1118 // code's precomputed control transfer map. Returns the different between
1119 // the new pc and the old pc.
1120 int DoControlTransfer(InterpreterCode* code, pc_t pc) {
1121 auto target = code->targets->Lookup(pc);
1122 switch (target.action) {
1123 case ControlTransfer::kNoAction:
1124 TRACE(" action [sp-%u]\n", target.spdiff);
1125 PopN(target.spdiff);
1126 break;
1127 case ControlTransfer::kPopAndRepush: {
1128 WasmVal val = Pop();
1129 TRACE(" action [pop x, sp-%u, push x]\n", target.spdiff - 1);
1130 DCHECK_GE(target.spdiff, 1u);
1131 PopN(target.spdiff - 1);
1132 Push(pc, val);
1133 break;
1134 }
1135 case ControlTransfer::kPushVoid:
1136 TRACE(" action [sp-%u, push void]\n", target.spdiff);
1137 PopN(target.spdiff);
1138 Push(pc, WasmVal());
1139 break;
1140 }
1141 return target.pcdiff;
1142 }
1143
1144 void Execute(InterpreterCode* code, pc_t pc, int max) {
1145 Decoder decoder(code->start, code->end);
1146 pc_t limit = code->end - code->start;
1147 while (true) {
1148 if (max-- <= 0) {
1149 // Maximum number of instructions reached.
1150 state_ = WasmInterpreter::PAUSED;
1151 return CommitPc(pc);
1152 }
1153
1154 if (pc >= limit) {
1155 // Fell off end of code; do an implicit return.
1156 TRACE("@%-3zu: ImplicitReturn\n", pc);
1157 WasmVal val = PopArity(code->function->sig->return_count());
1158 if (!DoReturn(&code, &pc, &limit, val)) return;
1159 decoder.Reset(code->start, code->end);
1160 continue;
1161 }
1162
1163 const char* skip = "";
1164 int len = 1;
1165 byte opcode = code->start[pc];
1166 byte orig = opcode;
1167 if (opcode == kInternalBreakpoint) {
1168 if (SkipBreakpoint(code, pc)) {
1169 // skip breakpoint by switching on original code.
1170 orig = code->orig_start[pc];
1171 skip = "[skip] ";
1172 } else {
1173 state_ = WasmInterpreter::PAUSED;
1174 return CommitPc(pc);
1175 }
1176 }
1177
1178 USE(skip);
1179 TRACE("@%-3zu: %s%-24s:", pc, skip,
1180 WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
1181 TraceValueStack();
1182 TRACE("\n");
1183
1184 switch (orig) {
1185 case kExprNop:
1186 Push(pc, WasmVal());
1187 break;
1188 case kExprBlock:
1189 case kExprLoop: {
1190 // Do nothing.
1191 break;
1192 }
1193 case kExprIf: {
1194 WasmVal cond = Pop();
1195 bool is_true = cond.to<uint32_t>() != 0;
1196 if (is_true) {
1197 // fall through to the true block.
1198 TRACE(" true => fallthrough\n");
1199 } else {
1200 len = DoControlTransfer(code, pc);
1201 TRACE(" false => @%zu\n", pc + len);
1202 }
1203 break;
1204 }
1205 case kExprElse: {
1206 len = DoControlTransfer(code, pc);
1207 TRACE(" end => @%zu\n", pc + len);
1208 break;
1209 }
1210 case kExprSelect: {
1211 WasmVal cond = Pop();
1212 WasmVal fval = Pop();
1213 WasmVal tval = Pop();
1214 Push(pc, cond.to<int32_t>() != 0 ? tval : fval);
1215 break;
1216 }
1217 case kExprBr: {
1218 BreakDepthOperand operand(&decoder, code->at(pc));
1219 WasmVal val = PopArity(operand.arity);
1220 len = DoControlTransfer(code, pc);
1221 TRACE(" br => @%zu\n", pc + len);
1222 if (operand.arity > 0) Push(pc, val);
1223 break;
1224 }
1225 case kExprBrIf: {
1226 BreakDepthOperand operand(&decoder, code->at(pc));
1227 WasmVal cond = Pop();
1228 WasmVal val = PopArity(operand.arity);
1229 bool is_true = cond.to<uint32_t>() != 0;
1230 if (is_true) {
1231 len = DoControlTransfer(code, pc);
1232 TRACE(" br_if => @%zu\n", pc + len);
1233 if (operand.arity > 0) Push(pc, val);
1234 } else {
1235 TRACE(" false => fallthrough\n");
1236 len = 1 + operand.length;
1237 Push(pc, WasmVal());
1238 }
1239 break;
1240 }
1241 case kExprBrTable: {
1242 BranchTableOperand operand(&decoder, code->at(pc));
1243 uint32_t key = Pop().to<uint32_t>();
1244 WasmVal val = PopArity(operand.arity);
1245 if (key >= operand.table_count) key = operand.table_count;
1246 len = DoControlTransfer(code, pc + key) + key;
1247 TRACE(" br[%u] => @%zu\n", key, pc + len);
1248 if (operand.arity > 0) Push(pc, val);
1249 break;
1250 }
1251 case kExprReturn: {
1252 ReturnArityOperand operand(&decoder, code->at(pc));
1253 WasmVal val = PopArity(operand.arity);
1254 if (!DoReturn(&code, &pc, &limit, val)) return;
1255 decoder.Reset(code->start, code->end);
ahaas 2016/05/24 14:48:27 Could {decoder.Reset} be called in {DoReturn}?
titzer 2016/05/24 15:09:52 We also do a decoder.Reset() on calls, so I'll kee
1256 continue;
1257 }
1258 case kExprUnreachable: {
1259 DoTrap(kTrapUnreachable, pc);
1260 return CommitPc(pc);
1261 }
1262 case kExprEnd: {
1263 len = DoControlTransfer(code, pc);
1264 DCHECK_EQ(1, len);
1265 break;
1266 }
1267 case kExprI8Const: {
1268 ImmI8Operand operand(&decoder, code->at(pc));
1269 Push(pc, WasmVal(operand.value));
1270 len = 1 + operand.length;
1271 break;
1272 }
1273 case kExprI32Const: {
1274 ImmI32Operand operand(&decoder, code->at(pc));
1275 Push(pc, WasmVal(operand.value));
1276 len = 1 + operand.length;
1277 break;
1278 }
1279 case kExprI64Const: {
1280 ImmI64Operand operand(&decoder, code->at(pc));
1281 Push(pc, WasmVal(operand.value));
1282 len = 1 + operand.length;
1283 break;
1284 }
1285 case kExprF32Const: {
1286 ImmF32Operand operand(&decoder, code->at(pc));
1287 Push(pc, WasmVal(operand.value));
1288 len = 1 + operand.length;
1289 break;
1290 }
1291 case kExprF64Const: {
1292 ImmF64Operand operand(&decoder, code->at(pc));
1293 Push(pc, WasmVal(operand.value));
1294 len = 1 + operand.length;
1295 break;
1296 }
1297 case kExprGetLocal: {
1298 LocalIndexOperand operand(&decoder, code->at(pc));
1299 Push(pc, stack_[frames_.back().sp + operand.index]);
1300 len = 1 + operand.length;
1301 break;
1302 }
1303 case kExprSetLocal: {
1304 LocalIndexOperand operand(&decoder, code->at(pc));
1305 WasmVal val = Pop();
1306 stack_[frames_.back().sp + operand.index] = val;
1307 Push(pc, val);
1308 len = 1 + operand.length;
1309 break;
1310 }
1311 case kExprCallFunction: {
1312 CallFunctionOperand operand(&decoder, code->at(pc));
1313 InterpreterCode* target = codemap()->GetCode(operand.index);
1314 PushFrame(target, pc, pc + 1 + operand.length);
1315 code = target;
ahaas 2016/05/24 14:48:26 You could make a DoCall function for these lines.
titzer 2016/05/24 15:09:52 Done.
1316 decoder.Reset(code->start, code->end);
1317 pc = frames_.back().ret_pc;
1318 limit = code->end - code->start;
1319 continue;
1320 }
1321 case kExprCallIndirect: {
1322 CallIndirectOperand operand(&decoder, code->at(pc));
1323 size_t index = stack_.size() - operand.arity - 1;
1324 DCHECK_LT(index, stack_.size());
1325 uint32_t table_index = stack_[index].to<uint32_t>();
1326 if (table_index >= module()->function_table.size()) {
1327 return DoTrap(kTrapFuncInvalid, pc);
1328 }
1329 uint16_t function_index = module()->function_table[table_index];
1330 InterpreterCode* target = codemap()->GetCode(function_index);
1331 DCHECK(target);
1332 if (target->function->sig_index != operand.index) {
1333 return DoTrap(kTrapFuncSigMismatch, pc);
1334 }
1335
1336 PushFrame(target, pc, pc + 1 + operand.length);
1337 code = target;
1338 decoder.Reset(code->start, code->end);
1339 pc = frames_.back().ret_pc;
1340 limit = code->end - code->start;
1341 continue;
1342 }
1343 case kExprCallImport: {
1344 UNIMPLEMENTED();
1345 break;
1346 }
1347 case kExprLoadGlobal: {
1348 GlobalIndexOperand operand(&decoder, code->at(pc));
1349 WasmGlobal* global = &module()->globals[operand.index];
1350 byte* ptr = instance()->globals_start + global->offset;
1351 MachineType type = global->type;
1352 WasmVal val;
1353 if (type == MachineType::Int8()) {
1354 val =
1355 WasmVal(static_cast<int32_t>(*reinterpret_cast<int8_t*>(ptr)));
1356 } else if (type == MachineType::Uint8()) {
1357 val =
1358 WasmVal(static_cast<int32_t>(*reinterpret_cast<uint8_t*>(ptr)));
1359 } else if (type == MachineType::Int16()) {
1360 val =
1361 WasmVal(static_cast<int32_t>(*reinterpret_cast<int16_t*>(ptr)));
1362 } else if (type == MachineType::Uint16()) {
1363 val = WasmVal(
1364 static_cast<int32_t>(*reinterpret_cast<uint16_t*>(ptr)));
1365 } else if (type == MachineType::Int32()) {
1366 val = WasmVal(*reinterpret_cast<int32_t*>(ptr));
1367 } else if (type == MachineType::Uint32()) {
1368 val = WasmVal(*reinterpret_cast<uint32_t*>(ptr));
1369 } else if (type == MachineType::Int64()) {
1370 val = WasmVal(*reinterpret_cast<int64_t*>(ptr));
1371 } else if (type == MachineType::Uint64()) {
1372 val = WasmVal(*reinterpret_cast<uint64_t*>(ptr));
1373 } else if (type == MachineType::Float32()) {
1374 val = WasmVal(*reinterpret_cast<float*>(ptr));
1375 } else if (type == MachineType::Float64()) {
1376 val = WasmVal(*reinterpret_cast<double*>(ptr));
1377 } else {
1378 UNREACHABLE();
1379 }
1380 Push(pc, val);
1381 len = 1 + operand.length;
1382 break;
1383 }
1384 case kExprStoreGlobal: {
1385 GlobalIndexOperand operand(&decoder, code->at(pc));
1386 WasmGlobal* global = &module()->globals[operand.index];
1387 byte* ptr = instance()->globals_start + global->offset;
1388 MachineType type = global->type;
1389 WasmVal val = Pop();
1390 if (type == MachineType::Int8()) {
1391 *reinterpret_cast<int8_t*>(ptr) =
1392 static_cast<int8_t>(val.to<int32_t>());
1393 } else if (type == MachineType::Uint8()) {
1394 *reinterpret_cast<uint8_t*>(ptr) =
1395 static_cast<uint8_t>(val.to<uint32_t>());
1396 } else if (type == MachineType::Int16()) {
1397 *reinterpret_cast<int16_t*>(ptr) =
1398 static_cast<int16_t>(val.to<int32_t>());
1399 } else if (type == MachineType::Uint16()) {
1400 *reinterpret_cast<uint16_t*>(ptr) =
1401 static_cast<uint16_t>(val.to<uint32_t>());
1402 } else if (type == MachineType::Int32()) {
1403 *reinterpret_cast<int32_t*>(ptr) = val.to<int32_t>();
1404 } else if (type == MachineType::Uint32()) {
1405 *reinterpret_cast<uint32_t*>(ptr) = val.to<uint32_t>();
1406 } else if (type == MachineType::Int64()) {
1407 *reinterpret_cast<int64_t*>(ptr) = val.to<int64_t>();
1408 } else if (type == MachineType::Uint64()) {
1409 *reinterpret_cast<uint64_t*>(ptr) = val.to<uint64_t>();
1410 } else if (type == MachineType::Float32()) {
1411 *reinterpret_cast<float*>(ptr) = val.to<float>();
1412 } else if (type == MachineType::Float64()) {
1413 *reinterpret_cast<double*>(ptr) = val.to<double>();
1414 } else {
1415 UNREACHABLE();
1416 }
1417 Push(pc, val);
1418 len = 1 + operand.length;
1419 break;
1420 }
1421
1422 #define LOAD_CASE(name, ctype, mtype) \
1423 case kExpr##name: { \
1424 MemoryAccessOperand operand(&decoder, code->at(pc)); \
1425 uint32_t index = Pop().to<uint32_t>(); \
1426 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
1427 if (operand.offset > effective_mem_size || \
1428 index > (effective_mem_size - operand.offset)) { \
1429 return DoTrap(kTrapMemOutOfBounds, pc); \
1430 } \
1431 byte* addr = instance()->mem_start + operand.offset + index; \
1432 /* TODO(titzer): alignment, endianness for load mem */ \
1433 WasmVal result(static_cast<ctype>(*reinterpret_cast<mtype*>(addr))); \
1434 Push(pc, result); \
1435 len = 1 + operand.length; \
1436 break; \
1437 }
1438
1439 LOAD_CASE(I32LoadMem8S, int32_t, int8_t);
1440 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t);
1441 LOAD_CASE(I32LoadMem16S, int32_t, int16_t);
1442 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t);
1443 LOAD_CASE(I64LoadMem8S, int64_t, int8_t);
1444 LOAD_CASE(I64LoadMem8U, int64_t, uint8_t);
1445 LOAD_CASE(I64LoadMem16S, int64_t, int16_t);
1446 LOAD_CASE(I64LoadMem16U, int64_t, uint16_t);
1447 LOAD_CASE(I64LoadMem32S, int64_t, int32_t);
1448 LOAD_CASE(I64LoadMem32U, int64_t, uint32_t);
1449 LOAD_CASE(I32LoadMem, int32_t, int32_t);
1450 LOAD_CASE(I64LoadMem, int64_t, int64_t);
1451 LOAD_CASE(F32LoadMem, float, float);
1452 LOAD_CASE(F64LoadMem, double, double);
1453 #undef LOAD_CASE
1454
1455 #define STORE_CASE(name, ctype, mtype) \
1456 case kExpr##name: { \
1457 MemoryAccessOperand operand(&decoder, code->at(pc)); \
1458 WasmVal val = Pop(); \
1459 uint32_t index = Pop().to<uint32_t>(); \
1460 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
1461 if (operand.offset > effective_mem_size || \
1462 index > (effective_mem_size - operand.offset)) { \
1463 return DoTrap(kTrapMemOutOfBounds, pc); \
1464 } \
1465 byte* addr = instance()->mem_start + operand.offset + index; \
1466 /* TODO(titzer): alignment, endianness for store mem */ \
1467 *reinterpret_cast<mtype*>(addr) = static_cast<mtype>(val.to<ctype>()); \
1468 Push(pc, val); \
1469 len = 1 + operand.length; \
1470 break; \
1471 }
1472
1473 STORE_CASE(I32StoreMem8, int32_t, int8_t);
1474 STORE_CASE(I32StoreMem16, int32_t, int16_t);
1475 STORE_CASE(I64StoreMem8, int64_t, int8_t);
1476 STORE_CASE(I64StoreMem16, int64_t, int16_t);
1477 STORE_CASE(I64StoreMem32, int64_t, int32_t);
1478 STORE_CASE(I32StoreMem, int32_t, int32_t);
1479 STORE_CASE(I64StoreMem, int64_t, int64_t);
1480 STORE_CASE(F32StoreMem, float, float);
1481 STORE_CASE(F64StoreMem, double, double);
1482 #undef STORE_CASE
1483
1484 #define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
1485 case kExpr##name: { \
1486 uint32_t index = Pop().to<uint32_t>(); \
1487 ctype result; \
1488 if (index >= (instance()->mem_size - sizeof(mtype))) { \
1489 result = defval; \
1490 } else { \
1491 byte* addr = instance()->mem_start + index; \
1492 /* TODO(titzer): alignment for asmjs load mem? */ \
1493 result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
1494 } \
1495 Push(pc, WasmVal(result)); \
1496 break; \
1497 }
1498 ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
1499 ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
1500 ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
1501 ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
1502 ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
1503 ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
1504 std::numeric_limits<float>::quiet_NaN());
1505 ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
1506 std::numeric_limits<double>::quiet_NaN());
1507 #undef ASMJS_LOAD_CASE
1508
1509 #define ASMJS_STORE_CASE(name, ctype, mtype) \
1510 case kExpr##name: { \
1511 WasmVal val = Pop(); \
1512 uint32_t index = Pop().to<uint32_t>(); \
1513 if (index < (instance()->mem_size - sizeof(mtype))) { \
1514 byte* addr = instance()->mem_start + index; \
1515 /* TODO(titzer): alignment for asmjs store mem? */ \
1516 *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
1517 } \
1518 Push(pc, val); \
1519 break; \
1520 }
1521
1522 ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
1523 ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
1524 ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
1525 ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
1526 ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
1527 #undef ASMJS_STORE_CASE
1528
1529 case kExprMemorySize: {
1530 Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size)));
1531 break;
1532 }
1533 #define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
1534 case kExpr##name: { \
1535 WasmVal rval = Pop(); \
1536 WasmVal lval = Pop(); \
1537 WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \
1538 Push(pc, result); \
1539 break; \
1540 }
1541 FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
1542 #undef EXECUTE_SIMPLE_BINOP
1543
1544 #define EXECUTE_OTHER_BINOP(name, ctype) \
1545 case kExpr##name: { \
1546 TrapReason trap = kTrapCount; \
1547 volatile ctype rval = Pop().to<ctype>(); \
1548 volatile ctype lval = Pop().to<ctype>(); \
1549 WasmVal result(Execute##name(lval, rval, &trap)); \
1550 if (trap != kTrapCount) return DoTrap(trap, pc); \
1551 Push(pc, result); \
1552 break; \
1553 }
1554 FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
1555 #undef EXECUTE_OTHER_BINOP
1556
1557 #define EXECUTE_OTHER_UNOP(name, ctype) \
1558 case kExpr##name: { \
1559 TrapReason trap = kTrapCount; \
1560 volatile ctype val = Pop().to<ctype>(); \
1561 WasmVal result(Execute##name(val, &trap)); \
1562 if (trap != kTrapCount) return DoTrap(trap, pc); \
1563 Push(pc, result); \
1564 break; \
1565 }
1566 FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
1567 #undef EXECUTE_OTHER_UNOP
1568
1569 default:
1570 V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
1571 code->start[pc], OpcodeName(code->start[pc]));
1572 UNREACHABLE();
1573 }
1574
1575 pc += len;
1576 }
1577 UNREACHABLE(); // above decoding loop should run forever.
1578 }
1579
1580 WasmVal Pop() {
1581 DCHECK_GT(stack_.size(), 0u);
1582 DCHECK_GT(frames_.size(), 0u);
1583 DCHECK_GT(stack_.size(), frames_.back().llimit()); // can't pop into locals
1584 WasmVal val = stack_.back();
1585 stack_.pop_back();
1586 return val;
1587 }
1588
1589 void PopN(int n) {
1590 DCHECK_GE(stack_.size(), static_cast<size_t>(n));
1591 DCHECK_GT(frames_.size(), 0u);
1592 size_t nsize = stack_.size() - n;
1593 DCHECK_GE(nsize, frames_.back().llimit()); // can't pop into locals
1594 stack_.resize(nsize);
1595 }
1596
1597 WasmVal PopArity(size_t arity) {
1598 if (arity == 0) return WasmVal();
1599 CHECK_EQ(1, arity);
1600 return Pop();
1601 }
1602
1603 void Push(pc_t pc, WasmVal val) {
1604 // TODO(titzer): store PC as well?
1605 stack_.push_back(val);
1606 }
1607
1608 void TraceStack(const char* phase, pc_t pc) {
1609 if (FLAG_trace_wasm_interpreter) {
1610 PrintF("%s @%zu", phase, pc);
1611 UNIMPLEMENTED();
1612 PrintF("\n");
1613 }
1614 }
1615
1616 void TraceValueStack() {
1617 Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
1618 sp_t sp = top ? top->sp : 0;
1619 sp_t plimit = top ? top->plimit() : 0;
1620 sp_t llimit = top ? top->llimit() : 0;
1621 if (FLAG_trace_wasm_interpreter) {
1622 for (size_t i = sp; i < stack_.size(); i++) {
1623 if (i < plimit)
1624 PrintF(" p%zu:", i);
1625 else if (i < llimit)
1626 PrintF(" l%zu:", i);
1627 else
1628 PrintF(" s%zu:", i);
1629 WasmVal val = stack_[i];
1630 switch (val.type) {
1631 case kAstI32:
1632 PrintF("i32:%d", val.to<int32_t>());
1633 break;
1634 case kAstI64:
1635 PrintF("i64:%" PRId64 "", val.to<int64_t>());
1636 break;
1637 case kAstF32:
1638 PrintF("f32:%f", val.to<float>());
1639 break;
1640 case kAstF64:
1641 PrintF("f64:%lf", val.to<double>());
1642 break;
1643 case kAstStmt:
1644 PrintF("void");
1645 break;
1646 default:
1647 UNREACHABLE();
1648 break;
1649 }
1650 }
1651 }
1652 }
1653 };
1654
1655 //============================================================================
1656 // The implementation details of the interpreter.
1657 //============================================================================
1658 class WasmInterpreterInternals : public ZoneObject {
1659 public:
1660 WasmModuleInstance* instance_;
1661 CodeMap codemap_;
1662 ZoneVector<ThreadImpl> threads_;
1663
1664 WasmInterpreterInternals(Zone* zone, WasmModuleInstance* instance)
1665 : instance_(instance),
1666 codemap_(instance_ ? instance_->module : nullptr, zone),
1667 threads_(zone) {
1668 threads_.push_back(ThreadImpl(zone, &codemap_, instance));
1669 }
1670 };
1671
1672 //============================================================================
1673 // Implementation of the public interface of the interpreter.
1674 //============================================================================
1675 WasmInterpreter::WasmInterpreter(WasmModuleInstance* instance,
1676 base::AccountingAllocator* allocator)
1677 : zone_(allocator),
1678 internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
1679
1680 WasmInterpreter::~WasmInterpreter() {}
1681
1682 void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
1683
1684 void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
1685
1686 bool WasmInterpreter::SetBreakpoint(WasmFunction* function, int pc,
1687 bool enabled) {
1688 InterpreterCode* code = internals_->codemap_.FindCode(function);
1689 if (!code) return false;
1690 int size = static_cast<int>(code->end - code->start);
1691 // Check bounds for {pc}.
1692 if (pc < 0 || pc >= size) return false;
1693 // Make a copy of the code before enabling a breakpoint.
1694 if (enabled && code->orig_start == code->start) {
1695 code->start = reinterpret_cast<byte*>(zone_.New(size));
1696 memcpy(code->start, code->orig_start, size);
1697 code->end = code->start + size;
1698 }
1699 bool prev = code->start[pc] == kInternalBreakpoint;
1700 if (enabled) {
1701 code->start[pc] = kInternalBreakpoint;
1702 } else {
1703 code->start[pc] = code->orig_start[pc];
1704 }
1705 return prev;
1706 }
1707
1708 bool WasmInterpreter::GetBreakpoint(WasmFunction* function, int pc) {
1709 InterpreterCode* code = internals_->codemap_.FindCode(function);
1710 if (!code) return false;
1711 int size = static_cast<int>(code->end - code->start);
1712 // Check bounds for {pc}.
1713 if (pc < 0 || pc >= size) return false;
1714 // Check if a breakpoint is present at that place in the code.
1715 return code->start[pc] == kInternalBreakpoint;
1716 }
1717
1718 bool WasmInterpreter::SetTracing(WasmFunction* function, bool enabled) {
1719 UNIMPLEMENTED();
1720 return false;
1721 }
1722
1723 int WasmInterpreter::GetThreadCount() {
1724 return 1; // only one thread for now.
1725 }
1726
1727 WasmInterpreter::Thread& WasmInterpreter::GetThread(int id) {
1728 CHECK_EQ(0, id); // only one thread for now.
1729 return internals_->threads_[id];
1730 }
1731
1732 WasmVal WasmInterpreter::GetLocalVal(const WasmFrame* frame, int index) {
1733 CHECK_GE(index, 0);
1734 UNIMPLEMENTED();
1735 WasmVal none;
1736 none.type = kAstStmt;
1737 return none;
1738 }
1739
1740 WasmVal WasmInterpreter::GetExprVal(const WasmFrame* frame, int pc) {
1741 UNIMPLEMENTED();
1742 WasmVal none;
1743 none.type = kAstStmt;
1744 return none;
1745 }
1746
1747 void WasmInterpreter::SetLocalVal(WasmFrame* frame, int index, WasmVal val) {
1748 UNIMPLEMENTED();
1749 }
1750
1751 void WasmInterpreter::SetExprVal(WasmFrame* frame, int pc, WasmVal val) {
1752 UNIMPLEMENTED();
1753 }
1754
1755 size_t WasmInterpreter::GetMemorySize() {
1756 return internals_->instance_->mem_size;
1757 }
1758
1759 WasmVal WasmInterpreter::ReadMemory(size_t offset) {
1760 UNIMPLEMENTED();
1761 return WasmVal();
1762 }
1763
1764 void WasmInterpreter::WriteMemory(size_t offset, WasmVal val) {
1765 UNIMPLEMENTED();
1766 }
1767
1768 int WasmInterpreter::AddFunctionForTesting(WasmFunction* function) {
1769 return internals_->codemap_.AddFunction(function, nullptr, nullptr);
1770 }
1771
1772 bool WasmInterpreter::SetFunctionCodeForTesting(WasmFunction* function,
1773 const byte* start,
1774 const byte* end) {
1775 return internals_->codemap_.SetFunctionCode(function, start, end);
1776 }
1777
1778 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
1779 Zone* zone, const byte* start, const byte* end) {
1780 ControlTransfers targets(zone, 0, start, end);
1781 return targets.map_;
1782 }
1783
1784 } // namespace wasm
1785 } // namespace internal
1786 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698