Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/wasm/wasm-interpreter.h" | |
| 6 #include "src/wasm/ast-decoder.h" | |
| 7 #include "src/wasm/decoder.h" | |
| 8 #include "src/wasm/wasm-external-refs.h" | |
| 9 #include "src/wasm/wasm-module.h" | |
| 10 | |
| 11 #include "src/base/accounting-allocator.h" | |
| 12 #include "src/zone-containers.h" | |
| 13 | |
| 14 namespace v8 { | |
| 15 namespace internal { | |
| 16 namespace wasm { | |
| 17 | |
| 18 #if DEBUG | |
| 19 #define TRACE(...) \ | |
| 20 do { \ | |
| 21 if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \ | |
| 22 } while (false) | |
| 23 #else | |
| 24 #define TRACE(...) | |
| 25 #endif | |
| 26 | |
| 27 #define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF) | |
| 28 | |
| 29 #define FOREACH_SIMPLE_BINOP(V) \ | |
| 30 V(I32Add, uint32_t, +) \ | |
| 31 V(I32Sub, uint32_t, -) \ | |
| 32 V(I32Mul, uint32_t, *) \ | |
| 33 V(I32And, uint32_t, &) \ | |
| 34 V(I32Ior, uint32_t, |) \ | |
| 35 V(I32Xor, uint32_t, ^) \ | |
| 36 V(I32Eq, uint32_t, ==) \ | |
| 37 V(I32Ne, uint32_t, !=) \ | |
| 38 V(I32LtU, uint32_t, <) \ | |
| 39 V(I32LeU, uint32_t, <=) \ | |
| 40 V(I32GtU, uint32_t, >) \ | |
| 41 V(I32GeU, uint32_t, >=) \ | |
| 42 V(I32LtS, int32_t, <) \ | |
| 43 V(I32LeS, int32_t, <=) \ | |
| 44 V(I32GtS, int32_t, >) \ | |
| 45 V(I32GeS, int32_t, >=) \ | |
| 46 V(I64Add, uint64_t, +) \ | |
| 47 V(I64Sub, uint64_t, -) \ | |
| 48 V(I64Mul, uint64_t, *) \ | |
| 49 V(I64And, uint64_t, &) \ | |
| 50 V(I64Ior, uint64_t, |) \ | |
| 51 V(I64Xor, uint64_t, ^) \ | |
| 52 V(I64Eq, uint64_t, ==) \ | |
| 53 V(I64Ne, uint64_t, !=) \ | |
| 54 V(I64LtU, uint64_t, <) \ | |
| 55 V(I64LeU, uint64_t, <=) \ | |
| 56 V(I64GtU, uint64_t, >) \ | |
| 57 V(I64GeU, uint64_t, >=) \ | |
| 58 V(I64LtS, int64_t, <) \ | |
| 59 V(I64LeS, int64_t, <=) \ | |
| 60 V(I64GtS, int64_t, >) \ | |
| 61 V(I64GeS, int64_t, >=) \ | |
| 62 V(F32Add, float, +) \ | |
| 63 V(F32Sub, float, -) \ | |
| 64 V(F32Mul, float, *) \ | |
| 65 V(F32Div, float, /) \ | |
| 66 V(F32Eq, float, ==) \ | |
| 67 V(F32Ne, float, !=) \ | |
| 68 V(F32Lt, float, <) \ | |
| 69 V(F32Le, float, <=) \ | |
| 70 V(F32Gt, float, >) \ | |
| 71 V(F32Ge, float, >=) \ | |
| 72 V(F64Add, double, +) \ | |
| 73 V(F64Sub, double, -) \ | |
| 74 V(F64Mul, double, *) \ | |
| 75 V(F64Div, double, /) \ | |
| 76 V(F64Eq, double, ==) \ | |
| 77 V(F64Ne, double, !=) \ | |
| 78 V(F64Lt, double, <) \ | |
| 79 V(F64Le, double, <=) \ | |
| 80 V(F64Gt, double, >) \ | |
| 81 V(F64Ge, double, >=) | |
| 82 | |
| 83 #define FOREACH_OTHER_BINOP(V) \ | |
| 84 V(I32DivS, int32_t, _) \ | |
|
Clemens Hammacher
2016/05/12 15:20:55
The third argument is never actually used, you cou
titzer
2016/05/23 11:41:38
Good catch. Done.
| |
| 85 V(I32DivU, uint32_t, _) \ | |
| 86 V(I32RemS, int32_t, _) \ | |
| 87 V(I32RemU, uint32_t, _) \ | |
| 88 V(I32Shl, uint32_t, _) \ | |
| 89 V(I32ShrU, uint32_t, _) \ | |
| 90 V(I32ShrS, int32_t, _) \ | |
| 91 V(I64DivS, int64_t, _) \ | |
| 92 V(I64DivU, uint64_t, _) \ | |
| 93 V(I64RemS, int64_t, _) \ | |
| 94 V(I64RemU, uint64_t, _) \ | |
| 95 V(I64Shl, uint64_t, _) \ | |
| 96 V(I64ShrU, uint64_t, _) \ | |
| 97 V(I64ShrS, int64_t, _) \ | |
| 98 V(I32Ror, int32_t, _) \ | |
| 99 V(I32Rol, int32_t, _) \ | |
| 100 V(I64Ror, int64_t, _) \ | |
| 101 V(I64Rol, int64_t, _) \ | |
| 102 V(F32Min, float, _) \ | |
| 103 V(F32Max, float, _) \ | |
| 104 V(F32CopySign, float, _) \ | |
| 105 V(F64Min, double, _) \ | |
| 106 V(F64Max, double, _) \ | |
| 107 V(F64CopySign, double, _) \ | |
| 108 V(I32AsmjsDivS, int32_t, _) \ | |
| 109 V(I32AsmjsDivU, uint32_t, _) \ | |
| 110 V(I32AsmjsRemS, int32_t, _) \ | |
| 111 V(I32AsmjsRemU, uint32_t, _) | |
| 112 | |
| 113 static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) { | |
| 114 if (b == 0) { | |
| 115 *trap = kTrapDivByZero; | |
| 116 return 0; | |
| 117 } | |
| 118 if (b == -1 && a == 0x80000000) { | |
| 119 *trap = kTrapDivUnrepresentable; | |
| 120 return 0; | |
| 121 } | |
| 122 return a / b; | |
| 123 } | |
| 124 | |
| 125 static inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b, | |
| 126 TrapReason* trap) { | |
| 127 if (b == 0) { | |
| 128 *trap = kTrapDivByZero; | |
| 129 return 0; | |
| 130 } | |
| 131 return a / b; | |
| 132 } | |
| 133 | |
| 134 static inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) { | |
| 135 if (b == 0) { | |
| 136 *trap = kTrapRemByZero; | |
| 137 return 0; | |
| 138 } | |
| 139 if (b == -1) return 0; | |
| 140 return a % b; | |
| 141 } | |
| 142 | |
| 143 static inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, | |
| 144 TrapReason* trap) { | |
| 145 if (b == 0) { | |
| 146 *trap = kTrapRemByZero; | |
| 147 return 0; | |
| 148 } | |
| 149 return a % b; | |
| 150 } | |
| 151 | |
| 152 static inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) { | |
| 153 return a << (b & 0x1f); | |
| 154 } | |
| 155 | |
| 156 static inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, | |
| 157 TrapReason* trap) { | |
| 158 return a >> (b & 0x1f); | |
| 159 } | |
| 160 | |
| 161 static inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) { | |
| 162 return a >> (b & 0x1f); | |
| 163 } | |
| 164 | |
| 165 static inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) { | |
| 166 if (b == 0) { | |
| 167 *trap = kTrapDivByZero; | |
| 168 return 0; | |
| 169 } | |
| 170 if (b == -1 && a == 0x8000000000000000ULL) { | |
| 171 *trap = kTrapDivUnrepresentable; | |
| 172 return 0; | |
| 173 } | |
| 174 return a / b; | |
| 175 } | |
| 176 | |
| 177 static inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, | |
| 178 TrapReason* trap) { | |
| 179 if (b == 0) { | |
| 180 *trap = kTrapDivByZero; | |
| 181 return 0; | |
| 182 } | |
| 183 return a / b; | |
| 184 } | |
| 185 | |
| 186 static inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) { | |
| 187 if (b == 0) { | |
| 188 *trap = kTrapRemByZero; | |
| 189 return 0; | |
| 190 } | |
| 191 if (b == -1) return 0; | |
| 192 return a % b; | |
| 193 } | |
| 194 | |
| 195 static inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, | |
| 196 TrapReason* trap) { | |
| 197 if (b == 0) { | |
| 198 *trap = kTrapRemByZero; | |
| 199 return 0; | |
| 200 } | |
| 201 return a % b; | |
| 202 } | |
| 203 | |
| 204 static inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) { | |
| 205 return a << (b & 0x3f); | |
| 206 } | |
| 207 | |
| 208 static inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, | |
| 209 TrapReason* trap) { | |
| 210 return a >> (b & 0x3f); | |
| 211 } | |
| 212 | |
| 213 static inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) { | |
| 214 return a >> (b & 0x3f); | |
| 215 } | |
| 216 | |
| 217 static inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) { | |
| 218 uint32_t shift = (b & 0x1f); | |
| 219 return (a >> shift) | (a << (32 - shift)); | |
| 220 } | |
| 221 | |
| 222 static inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) { | |
| 223 uint32_t shift = (b & 0x1f); | |
| 224 return (a << shift) | (a >> (32 - shift)); | |
| 225 } | |
| 226 | |
| 227 static inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) { | |
| 228 uint32_t shift = (b & 0x3f); | |
| 229 return (a >> shift) | (a << (64 - shift)); | |
| 230 } | |
| 231 | |
| 232 static inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) { | |
| 233 uint32_t shift = (b & 0x3f); | |
| 234 return (a << shift) | (a >> (64 - shift)); | |
| 235 } | |
| 236 | |
| 237 static inline float ExecuteF32Min(float a, float b, TrapReason* trap) { | |
| 238 if (std::isnan(a)) return a - 0.0f; | |
| 239 if (std::isnan(b)) return b - 0.0f; | |
| 240 return std::min(a, b); | |
| 241 } | |
| 242 | |
| 243 static inline float ExecuteF32Max(float a, float b, TrapReason* trap) { | |
| 244 if (std::isnan(a)) return a - 0.0f; | |
| 245 if (std::isnan(b)) return b - 0.0f; | |
| 246 return std::max(a, b); | |
| 247 } | |
| 248 | |
| 249 static inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) { | |
| 250 uint32_t ia = bit_cast<uint32_t>(a) & 0x7fffffff; | |
|
ahaas
2016/05/13 12:18:55
There exists a {copysign} function in math.h
titzer
2016/05/23 11:41:37
Done.
| |
| 251 uint32_t ib = bit_cast<uint32_t>(b) & 0x80000000; | |
| 252 return bit_cast<float>(ia | ib); | |
| 253 } | |
| 254 | |
| 255 static inline double ExecuteF64Min(double a, double b, TrapReason* trap) { | |
| 256 if (std::isnan(a)) return a - 0.0; | |
| 257 if (std::isnan(b)) return b - 0.0; | |
| 258 return std::min(a, b); | |
| 259 } | |
| 260 | |
| 261 static inline double ExecuteF64Max(double a, double b, TrapReason* trap) { | |
| 262 if (std::isnan(a)) return a - 0.0; | |
| 263 if (std::isnan(b)) return b - 0.0; | |
| 264 return std::max(a, b); | |
| 265 } | |
| 266 | |
| 267 static inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) { | |
| 268 uint64_t ia = bit_cast<uint64_t>(a) & 0x7fffffffffffffffULL; | |
| 269 uint64_t ib = bit_cast<uint64_t>(b) & 0x8000000000000000ULL; | |
| 270 return bit_cast<double>(ia | ib); | |
| 271 } | |
| 272 | |
| 273 static inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, | |
| 274 TrapReason* trap) { | |
| 275 if (b == 0) return 0; | |
| 276 if (b == -1 && a == 0x80000000) return static_cast<int32_t>(0x80000000); | |
| 277 return a / b; | |
| 278 } | |
| 279 | |
| 280 static inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b, | |
| 281 TrapReason* trap) { | |
| 282 if (b == 0) return 0; | |
| 283 return a / b; | |
| 284 } | |
| 285 | |
| 286 static inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, | |
| 287 TrapReason* trap) { | |
| 288 if (b == 0) return 0; | |
| 289 if (b == -1) return 0; | |
| 290 return a % b; | |
| 291 } | |
| 292 | |
| 293 static inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b, | |
| 294 TrapReason* trap) { | |
| 295 if (b == 0) return 0; | |
| 296 return a % b; | |
| 297 } | |
| 298 | |
| 299 static inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) { | |
| 300 return DoubleToInt32(a); | |
| 301 } | |
| 302 | |
| 303 static inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) { | |
| 304 return DoubleToUint32(a); | |
| 305 } | |
| 306 | |
| 307 static inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) { | |
| 308 return DoubleToInt32(a); | |
| 309 } | |
| 310 | |
| 311 static inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) { | |
| 312 return DoubleToUint32(a); | |
| 313 } | |
| 314 | |
| 315 #define FOREACH_OTHER_UNOP(V) \ | |
| 316 V(I32Clz, uint32_t, _) \ | |
|
Clemens Hammacher
2016/05/12 15:20:55
Same here: third argument can be dropped. Also, yo
titzer
2016/05/23 11:41:37
The distinction between UNOP and BINOP operators i
| |
| 317 V(I32Ctz, uint32_t, _) \ | |
| 318 V(I32Popcnt, uint32_t, _) \ | |
| 319 V(I32Eqz, uint32_t, _) \ | |
| 320 V(I64Clz, uint64_t, _) \ | |
| 321 V(I64Ctz, uint64_t, _) \ | |
| 322 V(I64Popcnt, uint64_t, _) \ | |
| 323 V(I64Eqz, uint64_t, _) \ | |
| 324 V(F32Abs, float, _) \ | |
| 325 V(F32Neg, float, _) \ | |
| 326 V(F32Ceil, float, _) \ | |
| 327 V(F32Floor, float, _) \ | |
| 328 V(F32Trunc, float, _) \ | |
| 329 V(F32NearestInt, float, _) \ | |
| 330 V(F32Sqrt, float, _) \ | |
| 331 V(F64Abs, double, _) \ | |
| 332 V(F64Neg, double, _) \ | |
| 333 V(F64Ceil, double, _) \ | |
| 334 V(F64Floor, double, _) \ | |
| 335 V(F64Trunc, double, _) \ | |
| 336 V(F64NearestInt, double, _) \ | |
| 337 V(F64Sqrt, double, _) \ | |
| 338 V(I32SConvertF32, float, _) \ | |
| 339 V(I32SConvertF64, double, _) \ | |
| 340 V(I32UConvertF32, float, _) \ | |
| 341 V(I32UConvertF64, double, _) \ | |
| 342 V(I32ConvertI64, int64_t, _) \ | |
| 343 V(I64SConvertF32, float, _) \ | |
| 344 V(I64SConvertF64, double, _) \ | |
| 345 V(I64UConvertF32, float, _) \ | |
| 346 V(I64UConvertF64, double, _) \ | |
| 347 V(I64SConvertI32, int32_t, _) \ | |
| 348 V(I64UConvertI32, uint32_t, _) \ | |
| 349 V(F32SConvertI32, int32_t, _) \ | |
| 350 V(F32UConvertI32, uint32_t, _) \ | |
| 351 V(F32SConvertI64, int64_t, _) \ | |
| 352 V(F32UConvertI64, uint64_t, _) \ | |
| 353 V(F32ConvertF64, double, _) \ | |
| 354 V(F32ReinterpretI32, int32_t, _) \ | |
| 355 V(F64SConvertI32, int32_t, _) \ | |
| 356 V(F64UConvertI32, uint32_t, _) \ | |
| 357 V(F64SConvertI64, int64_t, _) \ | |
| 358 V(F64UConvertI64, uint64_t, _) \ | |
| 359 V(F64ConvertF32, float, _) \ | |
| 360 V(F64ReinterpretI64, int64_t, _) \ | |
| 361 V(I32ReinterpretF32, float, _) \ | |
| 362 V(I64ReinterpretF64, double, _) \ | |
| 363 V(I32AsmjsSConvertF32, float, _) \ | |
| 364 V(I32AsmjsUConvertF32, float, _) \ | |
| 365 V(I32AsmjsSConvertF64, double, _) \ | |
| 366 V(I32AsmjsUConvertF64, double, _) | |
| 367 | |
| 368 static int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) { | |
| 369 int32_t count = 0; | |
|
ahaas
2016/05/13 12:18:55
you could use bits::CountTrailingZeroes32 (bits.h)
titzer
2016/05/23 11:41:38
Done.
| |
| 370 if (val == 0) return 32; | |
| 371 while ((val & 0x80000000) == 0) { | |
| 372 count++; | |
| 373 val <<= 1; | |
| 374 } | |
| 375 return count; | |
| 376 } | |
| 377 | |
| 378 static uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) { | |
| 379 return word32_ctz_wrapper(&val); | |
| 380 } | |
| 381 | |
| 382 static uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) { | |
| 383 return word32_popcnt_wrapper(&val); | |
| 384 } | |
| 385 | |
| 386 static inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) { | |
| 387 return val == 0 ? 1 : 0; | |
| 388 } | |
| 389 | |
| 390 static int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) { | |
| 391 int count = 0; | |
| 392 if (val == 0) return 64; | |
| 393 while ((val & 0x8000000000000000ULL) == 0) { | |
| 394 count++; | |
| 395 val <<= 1; | |
| 396 } | |
| 397 return count; | |
| 398 } | |
| 399 | |
| 400 static inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) { | |
| 401 return word64_ctz_wrapper(&val); | |
| 402 } | |
| 403 | |
| 404 static inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) { | |
| 405 return word64_popcnt_wrapper(&val); | |
| 406 } | |
| 407 | |
| 408 static inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) { | |
| 409 return val == 0 ? 1 : 0; | |
| 410 } | |
| 411 | |
| 412 static inline float ExecuteF32Abs(float a, TrapReason* trap) { | |
| 413 return bit_cast<float>(bit_cast<uint32_t>(a) & 0x7fffffff); | |
| 414 } | |
| 415 | |
| 416 static inline float ExecuteF32Neg(float a, TrapReason* trap) { | |
| 417 return bit_cast<float>(bit_cast<uint32_t>(a) ^ 0x80000000); | |
| 418 } | |
| 419 | |
| 420 static inline float ExecuteF32Ceil(float a, TrapReason* trap) { | |
| 421 return ceilf(a); | |
| 422 } | |
| 423 | |
| 424 static inline float ExecuteF32Floor(float a, TrapReason* trap) { | |
| 425 return floorf(a); | |
| 426 } | |
| 427 | |
| 428 static inline float ExecuteF32Trunc(float a, TrapReason* trap) { | |
| 429 return truncf(a); | |
| 430 } | |
| 431 | |
| 432 static inline float ExecuteF32NearestInt(float a, TrapReason* trap) { | |
| 433 return nearbyintf(a); | |
| 434 } | |
| 435 | |
| 436 static inline float ExecuteF32Sqrt(float a, TrapReason* trap) { | |
| 437 return sqrtf(a); | |
| 438 } | |
| 439 | |
| 440 static inline double ExecuteF64Abs(double a, TrapReason* trap) { | |
| 441 return bit_cast<double>(bit_cast<uint64_t>(a) & 0x7fffffffffffffff); | |
| 442 } | |
| 443 | |
| 444 static inline double ExecuteF64Neg(double a, TrapReason* trap) { | |
| 445 return bit_cast<double>(bit_cast<uint64_t>(a) ^ 0x8000000000000000); | |
| 446 } | |
| 447 | |
| 448 static inline double ExecuteF64Ceil(double a, TrapReason* trap) { | |
| 449 return ceil(a); | |
| 450 } | |
| 451 | |
| 452 static inline double ExecuteF64Floor(double a, TrapReason* trap) { | |
| 453 return floor(a); | |
| 454 } | |
| 455 | |
| 456 static inline double ExecuteF64Trunc(double a, TrapReason* trap) { | |
| 457 return trunc(a); | |
| 458 } | |
| 459 | |
| 460 static inline double ExecuteF64NearestInt(double a, TrapReason* trap) { | |
| 461 return nearbyint(a); | |
| 462 } | |
| 463 | |
| 464 static inline double ExecuteF64Sqrt(double a, TrapReason* trap) { | |
| 465 return sqrt(a); | |
| 466 } | |
| 467 | |
| 468 static int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) { | |
| 469 if (a < static_cast<float>(INT32_MAX) && a >= static_cast<float>(INT32_MIN)) { | |
| 470 return static_cast<int32_t>(a); | |
| 471 } | |
| 472 *trap = kTrapFloatUnrepresentable; | |
| 473 return 0; | |
| 474 } | |
| 475 | |
| 476 static int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) { | |
| 477 if (a < (static_cast<double>(INT32_MAX) + 1.0) && | |
| 478 a > (static_cast<double>(INT32_MIN) - 1.0)) { | |
| 479 return static_cast<int32_t>(a); | |
| 480 } | |
| 481 *trap = kTrapFloatUnrepresentable; | |
| 482 return 0; | |
| 483 } | |
| 484 | |
| 485 static uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) { | |
| 486 if (a < (static_cast<float>(UINT32_MAX) + 1.0) && a > -1) { | |
| 487 return static_cast<uint32_t>(a); | |
| 488 } | |
| 489 *trap = kTrapFloatUnrepresentable; | |
| 490 return 0; | |
| 491 } | |
| 492 | |
| 493 static uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) { | |
| 494 if (a < (static_cast<float>(UINT32_MAX) + 1.0) && a > -1) { | |
| 495 return static_cast<uint32_t>(a); | |
| 496 } | |
| 497 *trap = kTrapFloatUnrepresentable; | |
| 498 return 0; | |
| 499 } | |
| 500 | |
| 501 static inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) { | |
| 502 return static_cast<uint32_t>(a & 0xFFFFFFFF); | |
| 503 } | |
| 504 | |
| 505 static int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) { | |
| 506 int64_t output; | |
| 507 if (!float32_to_int64_wrapper(&a, &output)) { | |
| 508 *trap = kTrapFloatUnrepresentable; | |
| 509 } | |
| 510 return output; | |
| 511 } | |
| 512 | |
| 513 static int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) { | |
| 514 int64_t output; | |
| 515 if (!float64_to_int64_wrapper(&a, &output)) { | |
| 516 *trap = kTrapFloatUnrepresentable; | |
| 517 } | |
| 518 return output; | |
| 519 } | |
| 520 | |
| 521 static uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) { | |
| 522 uint64_t output; | |
| 523 if (!float32_to_uint64_wrapper(&a, &output)) { | |
| 524 *trap = kTrapFloatUnrepresentable; | |
| 525 } | |
| 526 return output; | |
| 527 } | |
| 528 | |
| 529 static uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) { | |
| 530 uint64_t output; | |
| 531 if (!float64_to_uint64_wrapper(&a, &output)) { | |
| 532 *trap = kTrapFloatUnrepresentable; | |
| 533 } | |
| 534 return output; | |
| 535 } | |
| 536 | |
| 537 static inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) { | |
| 538 return static_cast<int64_t>(a); | |
| 539 } | |
| 540 | |
| 541 static inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) { | |
| 542 return static_cast<uint64_t>(a); | |
| 543 } | |
| 544 | |
| 545 static inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) { | |
| 546 return static_cast<float>(a); | |
| 547 } | |
| 548 | |
| 549 static inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) { | |
| 550 return static_cast<float>(a); | |
| 551 } | |
| 552 | |
| 553 static inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) { | |
| 554 float output; | |
| 555 int64_to_float32_wrapper(&a, &output); | |
| 556 return output; | |
| 557 } | |
| 558 | |
| 559 static inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) { | |
| 560 float output; | |
| 561 uint64_to_float32_wrapper(&a, &output); | |
| 562 return output; | |
| 563 } | |
| 564 | |
| 565 static inline float ExecuteF32ConvertF64(double a, TrapReason* trap) { | |
| 566 return static_cast<float>(a); | |
| 567 } | |
| 568 | |
| 569 static inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) { | |
| 570 return bit_cast<float>(a); | |
| 571 } | |
| 572 | |
| 573 static inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) { | |
| 574 return static_cast<double>(a); | |
| 575 } | |
| 576 | |
| 577 static inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) { | |
| 578 return static_cast<double>(a); | |
| 579 } | |
| 580 | |
| 581 static inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) { | |
| 582 double output; | |
| 583 int64_to_float64_wrapper(&a, &output); | |
| 584 return output; | |
| 585 } | |
| 586 | |
| 587 static inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) { | |
| 588 double output; | |
| 589 uint64_to_float64_wrapper(&a, &output); | |
| 590 return output; | |
| 591 } | |
| 592 | |
| 593 static inline double ExecuteF64ConvertF32(float a, TrapReason* trap) { | |
| 594 return static_cast<double>(a); | |
| 595 } | |
| 596 | |
| 597 static inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) { | |
| 598 return bit_cast<double>(a); | |
| 599 } | |
| 600 | |
| 601 static inline int32_t ExecuteI32ReinterpretF32(float a, TrapReason* trap) { | |
| 602 return bit_cast<int32_t>(a); | |
| 603 } | |
| 604 | |
| 605 static inline int64_t ExecuteI64ReinterpretF64(double a, TrapReason* trap) { | |
| 606 return bit_cast<int64_t>(a); | |
| 607 } | |
| 608 | |
| 609 enum InternalOpcode { | |
| 610 #define DECL_INTERNAL_ENUM(name, value) kInternal##name = value, | |
| 611 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM) | |
| 612 #undef DECL_INTERNAL_ENUM | |
| 613 }; | |
| 614 | |
| 615 static const char* OpcodeName(uint32_t val) { | |
| 616 switch (val) { | |
| 617 #define DECL_INTERNAL_CASE(name, value) \ | |
| 618 case kInternal##name: \ | |
| 619 return "Internal" #name; | |
| 620 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE) | |
| 621 #undef DECL_INTERNAL_CASE | |
| 622 } | |
| 623 return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val)); | |
| 624 } | |
| 625 | |
| 626 static const int kRunSteps = 1000; | |
|
ahaas
2016/05/13 12:18:55
This declaration does not belong here. Is there a
| |
| 627 | |
| 628 // A helper class to compute the control transfers for each bytecode offset. | |
| 629 // Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to | |
| 630 // be directly executed without the need to dynamically track blocks. | |
| 631 class ControlTransfers { | |
| 632 public: | |
| 633 ControlTransferMap map; | |
|
ahaas
2016/05/13 12:18:55
Can you call it map_ so that it's easier to distin
titzer
2016/05/23 11:41:38
Done.
| |
| 634 | |
| 635 ControlTransfers(Zone* zone, size_t locals_encoded_size, const byte* start, | |
| 636 const byte* end) | |
| 637 : map(zone) { | |
| 638 // A control reference including from PC, from value depth, and whether | |
| 639 // a value is explicitly passed (e.g. br/br_if/br_table with value). | |
| 640 struct CRef { | |
|
ahaas
2016/05/13 12:18:55
The code would be easier to read if these structs
titzer
2016/05/23 11:41:38
I can see that, but this way the are not visible o
| |
| 641 const byte* pc; | |
| 642 sp_t value_depth; | |
| 643 bool explicit_value; | |
| 644 }; | |
| 645 | |
| 646 // Represents a control flow label. | |
| 647 struct CLabel : public ZoneObject { | |
| 648 const byte* target; | |
| 649 size_t value_depth; | |
| 650 ZoneVector<CRef> refs; | |
| 651 | |
| 652 CLabel(Zone* zone, size_t v) | |
| 653 : target(nullptr), value_depth(v), refs(zone) {} | |
| 654 | |
| 655 // Bind this label to the given PC. | |
| 656 void Bind(ControlTransferMap* map, const byte* start, const byte* pc, | |
| 657 bool expect_value) { | |
| 658 DCHECK_NULL(target); | |
| 659 target = pc; | |
| 660 for (auto from : refs) { | |
| 661 auto pcdiff = static_cast<pcdiff_t>(target - from.pc); | |
| 662 auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth); | |
| 663 ControlTransfer::StackAction action = ControlTransfer::kNoAction; | |
| 664 if (expect_value && !from.explicit_value) { | |
| 665 action = spdiff == 0 ? ControlTransfer::kPushVoid | |
| 666 : ControlTransfer::kPopAndRepush; | |
| 667 } | |
| 668 pc_t offset = static_cast<size_t>(from.pc - start); | |
| 669 (*map)[offset] = {pcdiff, spdiff, action}; | |
| 670 } | |
| 671 } | |
| 672 | |
| 673 // Reference this label from the given location. | |
| 674 void Ref(ControlTransferMap* map, const byte* start, CRef from) { | |
| 675 DCHECK_GE(from.value_depth, value_depth); | |
| 676 if (target) { | |
| 677 auto pcdiff = static_cast<pcdiff_t>(target - from.pc); | |
| 678 auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth); | |
| 679 pc_t offset = static_cast<size_t>(from.pc - start); | |
| 680 (*map)[offset] = {pcdiff, spdiff, ControlTransfer::kNoAction}; | |
| 681 } else { | |
| 682 refs.push_back(from); | |
| 683 } | |
| 684 } | |
| 685 }; | |
| 686 | |
| 687 // An entry in the control stack. | |
| 688 struct Control { | |
| 689 const byte* pc; | |
| 690 CLabel* end_label; | |
| 691 CLabel* else_label; | |
| 692 | |
| 693 void Ref(ControlTransferMap* map, const byte* start, const byte* from_pc, | |
| 694 size_t from_value_depth, bool explicit_value) { | |
| 695 end_label->Ref(map, start, {from_pc, from_value_depth, explicit_value}); | |
| 696 } | |
| 697 }; | |
| 698 | |
| 699 std::vector<Control> control_stack; | |
| 700 size_t value_depth = 0; | |
|
ahaas
2016/05/13 12:18:54
Please add a comment which describes how the follo
titzer
2016/05/23 11:41:38
Done.
| |
| 701 Decoder decoder(start, end); // for reading operands. | |
| 702 const byte* pc = start + locals_encoded_size; | |
| 703 | |
| 704 while (pc < end) { | |
| 705 WasmOpcode opcode = static_cast<WasmOpcode>(*pc); | |
| 706 TRACE("@%zu: control %s (depth = %zu)\n", (pc - start), | |
| 707 WasmOpcodes::OpcodeName(opcode), value_depth); | |
| 708 switch (opcode) { | |
| 709 case kExprBlock: { | |
| 710 TRACE("control @%zu $%zu: Block\n", (pc - start), value_depth); | |
| 711 CLabel* label = new (zone) CLabel(zone, value_depth); | |
| 712 control_stack.push_back({pc, label, nullptr}); | |
| 713 break; | |
| 714 } | |
| 715 case kExprLoop: { | |
| 716 TRACE("control @%zu $%zu: Loop\n", (pc - start), value_depth); | |
| 717 CLabel* label1 = new (zone) CLabel(zone, value_depth); | |
| 718 CLabel* label2 = new (zone) CLabel(zone, value_depth); | |
| 719 control_stack.push_back({pc, label1, nullptr}); | |
| 720 control_stack.push_back({pc, label2, nullptr}); | |
| 721 label2->Bind(&map, start, pc, false); | |
| 722 break; | |
| 723 } | |
| 724 case kExprIf: { | |
| 725 TRACE("control @%zu $%zu: If\n", (pc - start), value_depth); | |
| 726 value_depth--; | |
| 727 CLabel* end_label = new (zone) CLabel(zone, value_depth); | |
| 728 CLabel* else_label = new (zone) CLabel(zone, value_depth); | |
| 729 control_stack.push_back({pc, end_label, else_label}); | |
| 730 else_label->Ref(&map, start, {pc, value_depth, false}); | |
| 731 break; | |
| 732 } | |
| 733 case kExprElse: { | |
| 734 Control* c = &control_stack.back(); | |
| 735 TRACE("control @%zu $%zu: Else\n", (pc - start), value_depth); | |
| 736 c->end_label->Ref(&map, start, {pc, value_depth, false}); | |
| 737 value_depth = c->end_label->value_depth; | |
| 738 DCHECK_NOT_NULL(c->else_label); | |
| 739 c->else_label->Bind(&map, start, pc + 1, false); | |
| 740 c->else_label = nullptr; | |
| 741 break; | |
| 742 } | |
| 743 case kExprEnd: { | |
| 744 Control* c = &control_stack.back(); | |
| 745 TRACE("control @%zu $%zu: End\n", (pc - start), value_depth); | |
| 746 if (c->end_label->target) { | |
| 747 // only loops have bound labels. | |
| 748 DCHECK_EQ(kExprLoop, *c->pc); | |
| 749 control_stack.pop_back(); | |
| 750 c = &control_stack.back(); | |
| 751 } | |
| 752 if (c->else_label) c->else_label->Bind(&map, start, pc + 1, true); | |
| 753 c->end_label->Ref(&map, start, {pc, value_depth, false}); | |
| 754 c->end_label->Bind(&map, start, pc + 1, true); | |
| 755 value_depth = c->end_label->value_depth + 1; | |
| 756 control_stack.pop_back(); | |
| 757 break; | |
| 758 } | |
| 759 case kExprBr: { | |
| 760 BreakDepthOperand operand(&decoder, pc); | |
| 761 TRACE("control @%zu $%zu: Br[arity=%u, depth=%u]\n", (pc - start), | |
| 762 value_depth, operand.arity, operand.depth); | |
| 763 value_depth -= operand.arity; | |
| 764 control_stack[control_stack.size() - operand.depth - 1].Ref( | |
| 765 &map, start, pc, value_depth, operand.arity > 0); | |
| 766 value_depth++; | |
| 767 break; | |
| 768 } | |
| 769 case kExprBrIf: { | |
| 770 BreakDepthOperand operand(&decoder, pc); | |
| 771 TRACE("control @%zu $%zu: BrIf[arity=%u, depth=%u]\n", (pc - start), | |
| 772 value_depth, operand.arity, operand.depth); | |
| 773 value_depth -= (operand.arity + 1); | |
| 774 control_stack[control_stack.size() - operand.depth - 1].Ref( | |
| 775 &map, start, pc, value_depth, operand.arity > 0); | |
| 776 value_depth++; | |
| 777 break; | |
| 778 } | |
| 779 case kExprBrTable: { | |
| 780 BranchTableOperand operand(&decoder, pc); | |
| 781 TRACE("control @%zu $%zu: BrTable[arity=%u count=%u]\n", (pc - start), | |
| 782 value_depth, operand.arity, operand.table_count); | |
| 783 value_depth -= (operand.arity + 1); | |
| 784 for (uint32_t i = 0; i < operand.table_count + 1; i++) { | |
| 785 uint32_t target = operand.read_entry(&decoder, i); | |
| 786 control_stack[control_stack.size() - target - 1].Ref( | |
| 787 &map, start, pc + i, value_depth, operand.arity > 0); | |
| 788 } | |
| 789 value_depth++; | |
| 790 break; | |
| 791 } | |
| 792 default: { | |
| 793 value_depth = value_depth - OpcodeArity(pc, end) + 1; | |
| 794 break; | |
|
dougc
2016/05/13 13:23:27
Might the value_depth need to be checked against t
| |
| 795 } | |
| 796 } | |
| 797 | |
| 798 pc += OpcodeLength(pc, end); | |
| 799 } | |
| 800 } | |
| 801 | |
| 802 ControlTransfer Lookup(pc_t from) { | |
| 803 auto result = map.find(from); | |
| 804 if (result == map.end()) { | |
| 805 V8_Fatal(__FILE__, __LINE__, "no control target for pc %zu", from); | |
| 806 } | |
| 807 return result->second; | |
| 808 } | |
| 809 }; | |
| 810 | |
| 811 // Code and metadata needed to execute a function. | |
| 812 struct InterpreterCode { | |
| 813 WasmFunction* function; // wasm function | |
| 814 AstLocalDecls locals; // local declarations | |
| 815 const byte* orig_start; // start of original code | |
| 816 const byte* orig_end; // end of original code | |
| 817 byte* start; // start of (maybe altered) code | |
| 818 byte* end; // end of (maybe altered) code | |
| 819 ControlTransfers* targets; // helper for control flow. | |
| 820 | |
| 821 const byte* at(pc_t pc) { return start + pc; } | |
| 822 }; | |
| 823 | |
| 824 // The main storage for interpreter code. It maps {WasmFunction} to the | |
| 825 // metadata needed to execute each function. | |
| 826 class CodeMap { | |
| 827 public: | |
| 828 Zone* zone_; | |
| 829 WasmModule* module_; | |
| 830 ZoneVector<InterpreterCode> interpreter_code_; | |
| 831 | |
| 832 CodeMap(WasmModule* module, Zone* zone) | |
| 833 : zone_(zone), module_(module), interpreter_code_(zone) { | |
| 834 if (module == nullptr) return; | |
| 835 for (size_t i = 0; i < module->functions.size(); i++) { | |
| 836 WasmFunction* function = &module->functions[i]; | |
| 837 const byte* code_start = | |
| 838 module->module_start + function->code_start_offset; | |
| 839 const byte* code_end = module->module_start + function->code_end_offset; | |
| 840 AddFunction(function, code_start, code_end); | |
| 841 } | |
| 842 } | |
| 843 | |
| 844 InterpreterCode* FindCode(WasmFunction* function) { | |
| 845 for (size_t i = 0; i < interpreter_code_.size(); i++) { | |
|
Clemens Hammacher
2016/05/12 15:20:55
In which situations is this different from GetCode
titzer
2016/05/23 11:41:38
Good catch. I think this was written before functi
| |
| 846 if (interpreter_code_[i].function == function) { | |
| 847 return Preprocess(&interpreter_code_[i]); | |
| 848 } | |
| 849 } | |
| 850 return nullptr; | |
| 851 } | |
| 852 | |
| 853 InterpreterCode* GetCode(uint32_t function_index) { | |
| 854 CHECK_LT(function_index, interpreter_code_.size()); | |
| 855 return Preprocess(&interpreter_code_[function_index]); | |
| 856 } | |
| 857 | |
| 858 InterpreterCode* GetIndirectCode(uint32_t indirect_index) { | |
| 859 if (indirect_index >= module_->function_table.size()) return nullptr; | |
| 860 uint32_t index = module_->function_table[indirect_index]; | |
| 861 if (index >= interpreter_code_.size()) return nullptr; | |
| 862 return Preprocess(&interpreter_code_[index]); | |
| 863 } | |
| 864 | |
| 865 InterpreterCode* Preprocess(InterpreterCode* code) { | |
| 866 if (code->targets == nullptr && code->start) { | |
| 867 // Compute the expr_ends map and the local declarations. | |
| 868 ModuleEnv module_env; | |
| 869 module_env.module = module_; | |
| 870 CHECK(DecodeLocalDecls(code->locals, code->start, code->end)); | |
| 871 code->targets = new ControlTransfers( | |
| 872 zone_, code->locals.decls_encoded_size, code->start, code->end); | |
| 873 base::AccountingAllocator allocator; | |
|
Clemens Hammacher
2016/05/12 15:20:55
This does not seem to have any effect.
titzer
2016/05/23 11:41:38
Done.
| |
| 874 } | |
| 875 return code; | |
| 876 } | |
| 877 | |
| 878 int AddFunction(WasmFunction* function, const byte* code_start, | |
| 879 const byte* code_end) { | |
| 880 InterpreterCode code = { | |
| 881 function, AstLocalDecls(zone_), code_start, | |
| 882 code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end), | |
| 883 nullptr}; | |
| 884 | |
| 885 interpreter_code_.push_back(code); | |
| 886 return static_cast<int>(interpreter_code_.size()) - 1; | |
| 887 } | |
| 888 | |
| 889 bool SetFunctionCode(WasmFunction* function, const byte* start, | |
| 890 const byte* end) { | |
| 891 InterpreterCode* code = FindCode(function); | |
| 892 if (code == nullptr) return false; | |
| 893 code->targets = nullptr; | |
| 894 code->orig_start = start; | |
| 895 code->orig_end = end; | |
| 896 code->start = const_cast<byte*>(start); | |
| 897 code->end = const_cast<byte*>(end); | |
| 898 Preprocess(code); | |
| 899 return true; | |
| 900 } | |
| 901 }; | |
| 902 | |
| 903 // Responsible for executing code directly. | |
| 904 class ThreadImpl : public WasmInterpreter::Thread { | |
| 905 public: | |
| 906 ThreadImpl(Zone* zone, CodeMap* codemap, WasmModuleInstance* instance) | |
| 907 : codemap_(codemap), | |
| 908 instance_(instance), | |
| 909 stack_(zone), | |
| 910 frames_(zone), | |
| 911 state_(WasmInterpreter::STOPPED), | |
| 912 trap_reason_(kTrapCount) {} | |
| 913 | |
| 914 virtual ~ThreadImpl() {} | |
| 915 | |
| 916 //========================================================================== | |
| 917 // Implementation of public interface for WasmInterpreter::Thread. | |
| 918 //========================================================================== | |
| 919 | |
| 920 virtual WasmInterpreter::State state() { return state_; } | |
| 921 | |
| 922 virtual void PushFrame(WasmFunction* function, WasmVal* args) { | |
| 923 InterpreterCode* code = codemap()->FindCode(function); | |
| 924 CHECK_NOT_NULL(code); | |
| 925 frames_.push_back({code, 0, 0, stack_.size()}); | |
| 926 for (size_t i = 0; i < function->sig->parameter_count(); i++) { | |
| 927 stack_.push_back(args[i]); | |
| 928 } | |
| 929 frames_.back().ret_pc = InitLocals(code); | |
| 930 TRACE(" => push func#%u @%zu\n", code->function->func_index, | |
| 931 frames_.back().ret_pc); | |
| 932 } | |
| 933 | |
| 934 virtual WasmInterpreter::State Run() { | |
| 935 do { | |
| 936 if (state_ == WasmInterpreter::STOPPED || | |
| 937 state_ == WasmInterpreter::PAUSED) { | |
| 938 state_ = WasmInterpreter::RUNNING; | |
| 939 Execute(frames_.back().code, frames_.back().ret_pc, kRunSteps); | |
| 940 } | |
| 941 } while (state_ == WasmInterpreter::STOPPED); | |
| 942 return state_; | |
| 943 } | |
| 944 | |
| 945 virtual WasmInterpreter::State Step() { | |
| 946 UNIMPLEMENTED(); | |
| 947 return WasmInterpreter::STOPPED; | |
| 948 } | |
| 949 | |
| 950 virtual void Pause() { UNIMPLEMENTED(); } | |
| 951 | |
| 952 virtual void Reset() { | |
| 953 TRACE("----- RESET -----\n"); | |
| 954 stack_.clear(); | |
| 955 frames_.clear(); | |
| 956 state_ = WasmInterpreter::STOPPED; | |
| 957 trap_reason_ = kTrapCount; | |
| 958 } | |
| 959 | |
| 960 virtual int GetFrameCount() { return static_cast<int>(frames_.size()); } | |
| 961 | |
| 962 virtual const WasmFrame& GetFrame(int index) { UNIMPLEMENTED(); } | |
| 963 | |
| 964 virtual WasmFrame& GetMutableFrame(int index) { UNIMPLEMENTED(); } | |
| 965 | |
| 966 virtual WasmVal GetReturnValue() { | |
| 967 if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef); | |
| 968 CHECK_EQ(WasmInterpreter::FINISHED, state_); | |
| 969 CHECK_EQ(1, stack_.size()); | |
| 970 return stack_[0]; | |
| 971 } | |
| 972 | |
| 973 bool Terminated() { | |
| 974 return state_ == WasmInterpreter::TRAPPED || | |
| 975 state_ == WasmInterpreter::FINISHED; | |
| 976 } | |
| 977 | |
| 978 private: | |
| 979 // Entries on the stack of functions being evaluated. | |
| 980 struct Frame { | |
| 981 InterpreterCode* code; | |
| 982 pc_t call_pc; | |
|
ahaas
2016/05/13 12:18:55
Instead of storing the call_pc you could just stor
titzer
2016/05/23 11:41:38
Ah. I was going to expose the "caller PC" to the d
| |
| 983 pc_t ret_pc; | |
| 984 sp_t sp; | |
| 985 | |
| 986 // Limit of parameters. | |
| 987 sp_t plimit() { return sp + code->function->sig->parameter_count(); } | |
| 988 // Limit of locals. | |
| 989 sp_t llimit() { return plimit() + code->locals.total_local_count; } | |
| 990 }; | |
| 991 | |
| 992 CodeMap* codemap_; | |
| 993 WasmModuleInstance* instance_; | |
| 994 ZoneVector<WasmVal> stack_; | |
| 995 ZoneVector<Frame> frames_; | |
| 996 WasmInterpreter::State state_; | |
| 997 TrapReason trap_reason_; | |
| 998 | |
| 999 CodeMap* codemap() { return codemap_; } | |
| 1000 WasmModuleInstance* instance() { return instance_; } | |
| 1001 WasmModule* module() { return instance_->module; } | |
| 1002 | |
| 1003 void DoTrap(TrapReason trap, pc_t pc) { | |
| 1004 state_ = WasmInterpreter::TRAPPED; | |
| 1005 trap_reason_ = trap; | |
| 1006 CommitPc(pc); | |
| 1007 } | |
| 1008 | |
| 1009 // Push a frame with arguments already on the stack. | |
| 1010 void PushFrame(InterpreterCode* code, pc_t call_pc, pc_t ret_pc) { | |
| 1011 CHECK_NOT_NULL(code); | |
| 1012 DCHECK(!frames_.empty()); | |
| 1013 frames_.back().call_pc = call_pc; | |
| 1014 frames_.back().ret_pc = ret_pc; | |
| 1015 size_t arity = code->function->sig->parameter_count(); | |
| 1016 DCHECK_GE(stack_.size(), arity); | |
| 1017 // The parameters will overlap the arguments already on the stack. | |
| 1018 frames_.push_back({code, 0, 0, stack_.size() - arity}); | |
| 1019 frames_.back().ret_pc = InitLocals(code); | |
| 1020 TRACE(" => push func#%u @%zu\n", code->function->func_index, | |
| 1021 frames_.back().ret_pc); | |
| 1022 } | |
| 1023 | |
| 1024 pc_t InitLocals(InterpreterCode* code) { | |
| 1025 for (auto p : code->locals.local_types) { | |
| 1026 WasmVal val; | |
| 1027 switch (p.first) { | |
| 1028 case kAstI32: | |
| 1029 val = WasmVal(static_cast<int32_t>(0)); | |
| 1030 break; | |
| 1031 case kAstI64: | |
| 1032 val = WasmVal(static_cast<int64_t>(0)); | |
| 1033 break; | |
| 1034 case kAstF32: | |
| 1035 val = WasmVal(static_cast<float>(0)); | |
| 1036 break; | |
| 1037 case kAstF64: | |
| 1038 val = WasmVal(static_cast<double>(0)); | |
| 1039 break; | |
| 1040 default: | |
| 1041 UNREACHABLE(); | |
| 1042 break; | |
| 1043 } | |
| 1044 stack_.insert(stack_.end(), p.second, val); | |
| 1045 } | |
| 1046 return code->locals.decls_encoded_size; | |
| 1047 } | |
| 1048 | |
| 1049 void CommitPc(pc_t pc) { | |
| 1050 if (!frames_.empty()) { | |
| 1051 frames_.back().ret_pc = pc; | |
| 1052 } | |
| 1053 } | |
| 1054 | |
| 1055 bool SkipBreakpoint(InterpreterCode* code, pc_t pc) { | |
| 1056 // TODO(titzer): skip a breakpoint if we are resuming from it, or it | |
| 1057 // is set for another thread only. | |
| 1058 return false; | |
| 1059 } | |
| 1060 | |
| 1061 bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, WasmVal val) { | |
| 1062 DCHECK_GT(frames_.size(), 0); | |
| 1063 stack_.resize(frames_.back().sp); | |
| 1064 frames_.pop_back(); | |
| 1065 if (frames_.size() == 0) { | |
| 1066 // A return from the top frame terminates the execution. | |
| 1067 state_ = WasmInterpreter::FINISHED; | |
| 1068 stack_.clear(); | |
| 1069 stack_.push_back(val); | |
| 1070 TRACE(" => finish\n"); | |
| 1071 return false; | |
| 1072 } else { | |
| 1073 // Return to caller frame. | |
| 1074 Frame* top = &frames_.back(); | |
| 1075 *code = top->code; | |
| 1076 *pc = top->ret_pc; | |
| 1077 *limit = top->code->end - top->code->start; | |
| 1078 if (top->code->start[top->call_pc] == kExprCallIndirect || | |
| 1079 (top->code->orig_start && | |
| 1080 top->code->orig_start[top->call_pc] == kExprCallIndirect)) { | |
| 1081 // UGLY: An indirect call has the additional function index on the | |
| 1082 // stack. | |
| 1083 stack_.pop_back(); | |
| 1084 } | |
| 1085 TRACE(" => pop func#%u @%zu\n", (*code)->function->func_index, *pc); | |
| 1086 | |
| 1087 stack_.push_back(val); | |
| 1088 return true; | |
| 1089 } | |
| 1090 } | |
| 1091 | |
| 1092 int DoGoto(InterpreterCode* code, pc_t pc) { | |
|
ahaas
2016/05/13 12:18:54
Could you mention in the function name that you do
titzer
2016/05/23 11:41:38
Done.
| |
| 1093 auto target = code->targets->Lookup(pc); | |
| 1094 switch (target.action) { | |
| 1095 case ControlTransfer::kNoAction: | |
| 1096 TRACE(" action [sp-%u]\n", target.spdiff); | |
| 1097 PopN(target.spdiff); | |
| 1098 break; | |
| 1099 case ControlTransfer::kPopAndRepush: { | |
| 1100 WasmVal val = Pop(); | |
| 1101 TRACE(" action [pop x, sp-%u, push x]\n", target.spdiff - 1); | |
| 1102 DCHECK_GE(target.spdiff, 1); | |
| 1103 PopN(target.spdiff - 1); | |
| 1104 Push(pc, val); | |
| 1105 break; | |
| 1106 } | |
| 1107 case ControlTransfer::kPushVoid: | |
| 1108 TRACE(" action [sp-%u, push void]\n", target.spdiff); | |
| 1109 PopN(target.spdiff); | |
| 1110 Push(pc, WasmVal()); | |
| 1111 break; | |
| 1112 } | |
| 1113 return target.pcdiff; | |
| 1114 } | |
| 1115 | |
| 1116 void Execute(InterpreterCode* code, pc_t pc, int max) { | |
| 1117 Decoder decoder(code->start, code->end); | |
| 1118 pc_t limit = code->end - code->start; | |
| 1119 while (true) { | |
| 1120 if (max-- <= 0) { | |
| 1121 // Maximum number of instructions reached. | |
| 1122 state_ = WasmInterpreter::PAUSED; | |
| 1123 return CommitPc(pc); | |
|
ahaas
2016/05/13 12:18:55
It's not necessary to have the {return} and the {C
titzer
2016/05/23 11:41:37
Yeah, but it saves a line :-)
| |
| 1124 } | |
| 1125 | |
| 1126 if (pc >= limit) { | |
| 1127 // Fell off end of code; do an implicit return. | |
| 1128 TRACE("@%-3zu: ImplicitReturn\n", pc); | |
| 1129 WasmVal val = PopArity(code->function->sig->return_count()); | |
| 1130 if (!DoReturn(&code, &pc, &limit, val)) return; | |
| 1131 decoder.Reset(code->start, code->end); | |
| 1132 continue; | |
| 1133 } | |
| 1134 | |
| 1135 const char* skip = ""; | |
| 1136 int len = 1; | |
| 1137 byte opcode = code->start[pc]; | |
| 1138 byte orig = opcode; | |
| 1139 if (opcode == kInternalBreakpoint) { | |
| 1140 if (SkipBreakpoint(code, pc)) { | |
| 1141 // skip breakpoint by switching on original code. | |
| 1142 orig = code->orig_start[pc]; | |
| 1143 skip = "[skip] "; | |
| 1144 } else { | |
| 1145 state_ = WasmInterpreter::PAUSED; | |
| 1146 return CommitPc(pc); | |
|
ahaas
2016/05/13 12:18:55
Same here.
titzer
2016/05/23 11:41:38
Acknowledged.
| |
| 1147 } | |
| 1148 } | |
| 1149 | |
| 1150 TRACE("@%-3zu: %s%-24s:", pc, skip, | |
| 1151 WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig))); | |
| 1152 TraceValueStack(); | |
| 1153 TRACE("\n"); | |
| 1154 | |
| 1155 switch (orig) { | |
| 1156 case kExprNop: | |
| 1157 Push(pc, WasmVal()); | |
| 1158 break; | |
| 1159 case kExprBlock: | |
| 1160 case kExprLoop: { | |
| 1161 // Do nothing. | |
| 1162 break; | |
| 1163 } | |
| 1164 case kExprIf: { | |
| 1165 WasmVal cond = Pop(); | |
| 1166 bool is_true = cond.to<uint32_t>() != 0; | |
| 1167 if (is_true) { | |
| 1168 // fall through to the true block. | |
| 1169 TRACE(" true => fallthrough\n"); | |
| 1170 } else { | |
| 1171 len = DoGoto(code, pc); | |
| 1172 TRACE(" false => @%zu\n", pc + len); | |
| 1173 } | |
| 1174 break; | |
| 1175 } | |
| 1176 case kExprElse: { | |
| 1177 len = DoGoto(code, pc); | |
| 1178 TRACE(" end => @%zu\n", pc + len); | |
| 1179 break; | |
| 1180 } | |
| 1181 case kExprSelect: { | |
| 1182 WasmVal cond = Pop(); | |
| 1183 WasmVal fval = Pop(); | |
| 1184 WasmVal tval = Pop(); | |
| 1185 Push(pc, cond.to<int32_t>() != 0 ? tval : fval); | |
| 1186 break; | |
| 1187 } | |
| 1188 case kExprBr: { | |
| 1189 BreakDepthOperand operand(&decoder, code->at(pc)); | |
| 1190 WasmVal val = PopArity(operand.arity); | |
| 1191 len = DoGoto(code, pc); | |
| 1192 TRACE(" br => @%zu\n", pc + len); | |
| 1193 if (operand.arity > 0) Push(pc, val); | |
| 1194 break; | |
| 1195 } | |
| 1196 case kExprBrIf: { | |
| 1197 BreakDepthOperand operand(&decoder, code->at(pc)); | |
| 1198 WasmVal cond = Pop(); | |
| 1199 WasmVal val = PopArity(operand.arity); | |
| 1200 bool is_true = cond.to<uint32_t>() != 0; | |
| 1201 if (is_true) { | |
| 1202 len = DoGoto(code, pc); | |
| 1203 TRACE(" br_if => @%zu\n", pc + len); | |
| 1204 if (operand.arity > 0) Push(pc, val); | |
| 1205 } else { | |
| 1206 TRACE(" false => fallthrough\n"); | |
| 1207 len = 1 + operand.length; | |
| 1208 Push(pc, WasmVal()); | |
| 1209 } | |
| 1210 break; | |
| 1211 } | |
| 1212 case kExprBrTable: { | |
| 1213 BranchTableOperand operand(&decoder, code->at(pc)); | |
| 1214 uint32_t key = Pop().to<uint32_t>(); | |
| 1215 WasmVal val = PopArity(operand.arity); | |
| 1216 if (key >= operand.table_count) key = operand.table_count; | |
| 1217 len = DoGoto(code, pc + key) + key; | |
| 1218 TRACE(" br[%u] => @%zu\n", key, pc + len); | |
| 1219 if (operand.arity > 0) Push(pc, val); | |
| 1220 break; | |
| 1221 } | |
| 1222 case kExprReturn: { | |
| 1223 ReturnArityOperand operand(&decoder, code->at(pc)); | |
| 1224 WasmVal val = PopArity(operand.arity); | |
| 1225 if (!DoReturn(&code, &pc, &limit, val)) return; | |
| 1226 decoder.Reset(code->start, code->end); | |
| 1227 continue; | |
| 1228 } | |
| 1229 case kExprUnreachable: { | |
| 1230 DoTrap(kTrapUnreachable, pc); | |
| 1231 return CommitPc(pc); | |
|
ahaas
2016/05/13 12:18:55
Same here.
titzer
2016/05/23 11:41:38
Acknowledged.
| |
| 1232 } | |
| 1233 case kExprEnd: { | |
| 1234 len = DoGoto(code, pc); | |
| 1235 DCHECK_EQ(1, len); | |
| 1236 break; | |
| 1237 } | |
| 1238 case kExprI8Const: { | |
| 1239 ImmI8Operand operand(&decoder, code->at(pc)); | |
| 1240 Push(pc, WasmVal(operand.value)); | |
| 1241 len = 1 + operand.length; | |
| 1242 break; | |
| 1243 } | |
| 1244 case kExprI32Const: { | |
| 1245 ImmI32Operand operand(&decoder, code->at(pc)); | |
| 1246 Push(pc, WasmVal(operand.value)); | |
| 1247 len = 1 + operand.length; | |
| 1248 break; | |
| 1249 } | |
| 1250 case kExprI64Const: { | |
| 1251 ImmI64Operand operand(&decoder, code->at(pc)); | |
| 1252 Push(pc, WasmVal(operand.value)); | |
| 1253 len = 1 + operand.length; | |
| 1254 break; | |
| 1255 } | |
| 1256 case kExprF32Const: { | |
| 1257 ImmF32Operand operand(&decoder, code->at(pc)); | |
| 1258 Push(pc, WasmVal(operand.value)); | |
| 1259 len = 1 + operand.length; | |
| 1260 break; | |
| 1261 } | |
| 1262 case kExprF64Const: { | |
| 1263 ImmF64Operand operand(&decoder, code->at(pc)); | |
| 1264 Push(pc, WasmVal(operand.value)); | |
| 1265 len = 1 + operand.length; | |
| 1266 break; | |
| 1267 } | |
| 1268 case kExprGetLocal: { | |
| 1269 LocalIndexOperand operand(&decoder, code->at(pc)); | |
| 1270 Push(pc, stack_[frames_.back().sp + operand.index]); | |
| 1271 len = 1 + operand.length; | |
| 1272 break; | |
| 1273 } | |
| 1274 case kExprSetLocal: { | |
| 1275 LocalIndexOperand operand(&decoder, code->at(pc)); | |
| 1276 WasmVal val = Pop(); | |
| 1277 stack_[frames_.back().sp + operand.index] = val; | |
| 1278 Push(pc, val); | |
| 1279 len = 1 + operand.length; | |
| 1280 break; | |
| 1281 } | |
| 1282 case kExprCallFunction: { | |
| 1283 CallFunctionOperand operand(&decoder, code->at(pc)); | |
| 1284 InterpreterCode* target = codemap()->GetCode(operand.index); | |
| 1285 PushFrame(target, pc, pc + 1 + operand.length); | |
| 1286 code = target; | |
| 1287 decoder.Reset(code->start, code->end); | |
| 1288 pc = frames_.back().ret_pc; | |
| 1289 limit = code->end - code->start; | |
| 1290 continue; | |
| 1291 } | |
| 1292 case kExprCallIndirect: { | |
| 1293 CallIndirectOperand operand(&decoder, code->at(pc)); | |
| 1294 uint32_t table_index = | |
| 1295 stack_[stack_.size() - operand.arity - 1].to<uint32_t>(); | |
|
ahaas
2016/05/13 12:18:55
Can it be that {(stack_.size() - operand.arity - 1
titzer
2016/05/23 11:41:38
That could result from invalid bytecode or an inte
| |
| 1296 if (table_index >= module()->function_table.size()) { | |
| 1297 return DoTrap(kTrapFuncInvalid, pc); | |
| 1298 } | |
| 1299 uint16_t function_index = module()->function_table[table_index]; | |
| 1300 InterpreterCode* target = codemap()->GetCode(function_index); | |
| 1301 DCHECK(target); | |
| 1302 if (target->function->sig_index != operand.index) { | |
| 1303 return DoTrap(kTrapFuncSigMismatch, pc); | |
| 1304 } | |
| 1305 | |
| 1306 PushFrame(target, pc, pc + 1 + operand.length); | |
| 1307 code = target; | |
| 1308 decoder.Reset(code->start, code->end); | |
| 1309 pc = frames_.back().ret_pc; | |
| 1310 limit = code->end - code->start; | |
| 1311 continue; | |
| 1312 } | |
| 1313 case kExprCallImport: { | |
| 1314 UNIMPLEMENTED(); | |
| 1315 break; | |
| 1316 } | |
| 1317 case kExprLoadGlobal: { | |
| 1318 GlobalIndexOperand operand(&decoder, code->at(pc)); | |
| 1319 WasmGlobal* global = &module()->globals[operand.index]; | |
| 1320 byte* ptr = instance()->globals_start + global->offset; | |
| 1321 MachineType type = global->type; | |
| 1322 WasmVal val; | |
| 1323 if (type == MachineType::Int8()) { | |
| 1324 val = | |
| 1325 WasmVal(static_cast<int32_t>(*reinterpret_cast<int8_t*>(ptr))); | |
| 1326 } else if (type == MachineType::Uint8()) { | |
| 1327 val = | |
| 1328 WasmVal(static_cast<int32_t>(*reinterpret_cast<uint8_t*>(ptr))); | |
| 1329 } else if (type == MachineType::Int16()) { | |
| 1330 val = | |
| 1331 WasmVal(static_cast<int32_t>(*reinterpret_cast<int16_t*>(ptr))); | |
| 1332 } else if (type == MachineType::Uint16()) { | |
| 1333 val = WasmVal( | |
| 1334 static_cast<int32_t>(*reinterpret_cast<uint16_t*>(ptr))); | |
| 1335 } else if (type == MachineType::Int32()) { | |
| 1336 val = WasmVal(*reinterpret_cast<int32_t*>(ptr)); | |
| 1337 } else if (type == MachineType::Uint32()) { | |
| 1338 val = WasmVal(*reinterpret_cast<uint32_t*>(ptr)); | |
| 1339 } else if (type == MachineType::Int64()) { | |
| 1340 val = WasmVal(*reinterpret_cast<int64_t*>(ptr)); | |
| 1341 } else if (type == MachineType::Uint64()) { | |
| 1342 val = WasmVal(*reinterpret_cast<uint64_t*>(ptr)); | |
| 1343 } else if (type == MachineType::Float32()) { | |
| 1344 val = WasmVal(*reinterpret_cast<float*>(ptr)); | |
| 1345 } else if (type == MachineType::Float64()) { | |
| 1346 val = WasmVal(*reinterpret_cast<double*>(ptr)); | |
| 1347 } else { | |
| 1348 UNREACHABLE(); | |
| 1349 } | |
| 1350 Push(pc, val); | |
| 1351 len = 1 + operand.length; | |
| 1352 break; | |
| 1353 } | |
| 1354 case kExprStoreGlobal: { | |
| 1355 GlobalIndexOperand operand(&decoder, code->at(pc)); | |
| 1356 WasmGlobal* global = &module()->globals[operand.index]; | |
| 1357 byte* ptr = instance()->globals_start + global->offset; | |
| 1358 MachineType type = global->type; | |
| 1359 WasmVal val = Pop(); | |
| 1360 if (type == MachineType::Int8()) { | |
| 1361 *reinterpret_cast<int8_t*>(ptr) = | |
| 1362 static_cast<int8_t>(val.to<int32_t>()); | |
| 1363 } else if (type == MachineType::Uint8()) { | |
| 1364 *reinterpret_cast<uint8_t*>(ptr) = | |
| 1365 static_cast<uint8_t>(val.to<uint32_t>()); | |
| 1366 } else if (type == MachineType::Int16()) { | |
| 1367 *reinterpret_cast<int16_t*>(ptr) = | |
| 1368 static_cast<int16_t>(val.to<int32_t>()); | |
| 1369 } else if (type == MachineType::Uint16()) { | |
| 1370 *reinterpret_cast<uint16_t*>(ptr) = | |
| 1371 static_cast<uint16_t>(val.to<uint32_t>()); | |
| 1372 } else if (type == MachineType::Int32()) { | |
| 1373 *reinterpret_cast<int32_t*>(ptr) = val.to<int32_t>(); | |
| 1374 } else if (type == MachineType::Uint32()) { | |
| 1375 *reinterpret_cast<uint32_t*>(ptr) = val.to<uint32_t>(); | |
| 1376 } else if (type == MachineType::Int64()) { | |
| 1377 *reinterpret_cast<int64_t*>(ptr) = val.to<int64_t>(); | |
| 1378 } else if (type == MachineType::Uint64()) { | |
| 1379 *reinterpret_cast<uint64_t*>(ptr) = val.to<uint64_t>(); | |
| 1380 } else if (type == MachineType::Float32()) { | |
| 1381 *reinterpret_cast<float*>(ptr) = val.to<float>(); | |
| 1382 } else if (type == MachineType::Float64()) { | |
| 1383 *reinterpret_cast<double*>(ptr) = val.to<double>(); | |
| 1384 } else { | |
| 1385 UNREACHABLE(); | |
| 1386 } | |
| 1387 Push(pc, val); | |
| 1388 len = 1 + operand.length; | |
| 1389 break; | |
| 1390 } | |
| 1391 | |
| 1392 #define LOAD_CASE(name, ctype, mtype) \ | |
| 1393 case kExpr##name: { \ | |
| 1394 MemoryAccessOperand operand(&decoder, code->at(pc)); \ | |
| 1395 uint32_t index = Pop().to<uint32_t>(); \ | |
| 1396 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \ | |
| 1397 if (operand.offset > effective_mem_size || \ | |
| 1398 index > (effective_mem_size - operand.offset)) { \ | |
| 1399 return DoTrap(kTrapMemOutOfBounds, pc); \ | |
| 1400 } \ | |
| 1401 byte* addr = instance()->mem_start + operand.offset + index; \ | |
| 1402 /* TODO(titzer): alignment, endianness for load mem */ \ | |
| 1403 WasmVal result(static_cast<ctype>(*reinterpret_cast<mtype*>(addr))); \ | |
| 1404 Push(pc, result); \ | |
| 1405 len = 1 + operand.length; \ | |
| 1406 break; \ | |
| 1407 } | |
| 1408 | |
| 1409 LOAD_CASE(I32LoadMem8S, int32_t, int8_t); | |
| 1410 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t); | |
| 1411 LOAD_CASE(I32LoadMem16S, int32_t, int16_t); | |
| 1412 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t); | |
| 1413 LOAD_CASE(I64LoadMem8S, int64_t, int8_t); | |
| 1414 LOAD_CASE(I64LoadMem8U, int64_t, uint8_t); | |
| 1415 LOAD_CASE(I64LoadMem16S, int64_t, int16_t); | |
| 1416 LOAD_CASE(I64LoadMem16U, int64_t, uint16_t); | |
| 1417 LOAD_CASE(I64LoadMem32S, int64_t, int32_t); | |
| 1418 LOAD_CASE(I64LoadMem32U, int64_t, uint32_t); | |
| 1419 LOAD_CASE(I32LoadMem, int32_t, int32_t); | |
|
ahaas
2016/05/13 12:18:55
I think the I32 cases should all be together.
titzer
2016/05/23 11:41:38
They are in the same order as the opcode declarati
| |
| 1420 LOAD_CASE(I64LoadMem, int64_t, int64_t); | |
| 1421 LOAD_CASE(F32LoadMem, float, float); | |
| 1422 LOAD_CASE(F64LoadMem, double, double); | |
| 1423 | |
|
ahaas
2016/05/13 12:18:55
is there a reason why you left out the {#undef LOA
titzer
2016/05/23 11:41:38
Done.
| |
| 1424 #define STORE_CASE(name, ctype, mtype) \ | |
| 1425 case kExpr##name: { \ | |
| 1426 MemoryAccessOperand operand(&decoder, code->at(pc)); \ | |
| 1427 WasmVal val = Pop(); \ | |
| 1428 uint32_t index = Pop().to<uint32_t>(); \ | |
| 1429 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \ | |
| 1430 if (operand.offset > effective_mem_size || \ | |
| 1431 index > (effective_mem_size - operand.offset)) { \ | |
| 1432 return DoTrap(kTrapMemOutOfBounds, pc); \ | |
| 1433 } \ | |
| 1434 byte* addr = instance()->mem_start + operand.offset + index; \ | |
| 1435 /* TODO(titzer): alignment, endianness for store mem */ \ | |
| 1436 *reinterpret_cast<mtype*>(addr) = static_cast<mtype>(val.to<ctype>()); \ | |
| 1437 Push(pc, val); \ | |
| 1438 len = 1 + operand.length; \ | |
| 1439 break; \ | |
| 1440 } | |
| 1441 | |
| 1442 STORE_CASE(I32StoreMem8, int32_t, int8_t); | |
| 1443 STORE_CASE(I32StoreMem16, int32_t, int16_t); | |
| 1444 STORE_CASE(I64StoreMem8, int64_t, int8_t); | |
| 1445 STORE_CASE(I64StoreMem16, int64_t, int16_t); | |
| 1446 STORE_CASE(I64StoreMem32, int64_t, int32_t); | |
| 1447 STORE_CASE(I32StoreMem, int32_t, int32_t); | |
| 1448 STORE_CASE(I64StoreMem, int64_t, int64_t); | |
| 1449 STORE_CASE(F32StoreMem, float, float); | |
| 1450 STORE_CASE(F64StoreMem, double, double); | |
| 1451 | |
| 1452 #define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \ | |
| 1453 case kExpr##name: { \ | |
| 1454 uint32_t index = Pop().to<uint32_t>(); \ | |
| 1455 ctype result; \ | |
| 1456 if (index >= (instance()->mem_size - sizeof(mtype))) { \ | |
| 1457 result = defval; \ | |
| 1458 } else { \ | |
| 1459 byte* addr = instance()->mem_start + index; \ | |
| 1460 /* TODO(titzer): alignment for asmjs load mem? */ \ | |
| 1461 result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \ | |
| 1462 } \ | |
| 1463 Push(pc, WasmVal(result)); \ | |
| 1464 break; \ | |
| 1465 } | |
| 1466 | |
| 1467 ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0); | |
| 1468 ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0); | |
| 1469 ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0); | |
| 1470 ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0); | |
| 1471 ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0); | |
| 1472 ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float, | |
| 1473 std::numeric_limits<float>::quiet_NaN()); | |
| 1474 ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double, | |
| 1475 std::numeric_limits<double>::quiet_NaN()); | |
| 1476 | |
| 1477 #define ASMJS_STORE_CASE(name, ctype, mtype) \ | |
| 1478 case kExpr##name: { \ | |
| 1479 WasmVal val = Pop(); \ | |
| 1480 uint32_t index = Pop().to<uint32_t>(); \ | |
| 1481 if (index < (instance()->mem_size - sizeof(mtype))) { \ | |
| 1482 byte* addr = instance()->mem_start + index; \ | |
| 1483 /* TODO(titzer): alignment for asmjs store mem? */ \ | |
| 1484 *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \ | |
| 1485 } \ | |
| 1486 Push(pc, val); \ | |
| 1487 break; \ | |
| 1488 } | |
| 1489 | |
| 1490 ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t); | |
| 1491 ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t); | |
| 1492 ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t); | |
| 1493 ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float); | |
| 1494 ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double); | |
| 1495 | |
| 1496 case kExprMemorySize: { | |
| 1497 Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size))); | |
| 1498 break; | |
| 1499 } | |
| 1500 #define EXECUTE_SIMPLE_BINOP(name, ctype, op) \ | |
| 1501 case kExpr##name: { \ | |
| 1502 WasmVal rval = Pop(); \ | |
| 1503 WasmVal lval = Pop(); \ | |
| 1504 WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \ | |
| 1505 Push(pc, result); \ | |
| 1506 break; \ | |
| 1507 } | |
| 1508 #define EXECUTE_OTHER_BINOP(name, ctype, unused) \ | |
| 1509 case kExpr##name: { \ | |
| 1510 TrapReason trap = kTrapCount; \ | |
| 1511 volatile ctype rval = Pop().to<ctype>(); \ | |
| 1512 volatile ctype lval = Pop().to<ctype>(); \ | |
| 1513 WasmVal result(Execute##name(lval, rval, &trap)); \ | |
| 1514 if (trap != kTrapCount) return DoTrap(trap, pc); \ | |
| 1515 Push(pc, result); \ | |
| 1516 break; \ | |
| 1517 } | |
| 1518 #define EXECUTE_OTHER_UNOP(name, ctype, unused) \ | |
| 1519 case kExpr##name: { \ | |
| 1520 TrapReason trap = kTrapCount; \ | |
| 1521 volatile ctype rval = Pop().to<ctype>(); \ | |
|
ahaas
2016/05/13 12:18:55
not rval, just val.
titzer
2016/05/23 11:41:37
Done.
| |
| 1522 WasmVal result(Execute##name(rval, &trap)); \ | |
| 1523 if (trap != kTrapCount) return DoTrap(trap, pc); \ | |
| 1524 Push(pc, result); \ | |
| 1525 break; \ | |
| 1526 } | |
| 1527 | |
| 1528 FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP) | |
|
ahaas
2016/05/13 12:18:55
move the FOREACH... to the corresponding #define.
titzer
2016/05/23 11:41:38
Done.
| |
| 1529 FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP) | |
| 1530 FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP) | |
| 1531 default: | |
| 1532 V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s", | |
| 1533 code->start[pc], OpcodeName(code->start[pc])); | |
| 1534 UNREACHABLE(); | |
| 1535 } | |
| 1536 | |
| 1537 pc += len; | |
| 1538 } | |
| 1539 UNREACHABLE(); // above decoding loop should run forever. | |
| 1540 } | |
| 1541 | |
| 1542 WasmVal Pop() { | |
| 1543 DCHECK_GT(stack_.size(), 0); | |
| 1544 DCHECK_GT(frames_.size(), 0); | |
| 1545 DCHECK_GT(stack_.size(), frames_.back().llimit()); // can't pop into locals | |
| 1546 WasmVal val = stack_.back(); | |
| 1547 stack_.pop_back(); | |
| 1548 return val; | |
|
dougc
2016/05/13 13:23:27
These appear to be debug checks, but does it need
titzer
2016/05/23 11:41:38
The interpreter is not designed to catch validatio
| |
| 1549 } | |
| 1550 | |
| 1551 void PopN(int n) { | |
| 1552 DCHECK_GE(stack_.size(), static_cast<size_t>(n)); | |
| 1553 DCHECK_GT(frames_.size(), 0); | |
| 1554 size_t nsize = stack_.size() - n; | |
| 1555 DCHECK_GE(nsize, frames_.back().llimit()); // can't pop into locals | |
| 1556 stack_.resize(nsize); | |
| 1557 } | |
| 1558 | |
| 1559 WasmVal PopArity(size_t arity) { | |
| 1560 if (arity == 0) return WasmVal(); | |
| 1561 CHECK_EQ(1, arity); | |
| 1562 return Pop(); | |
| 1563 } | |
| 1564 | |
| 1565 void Push(pc_t pc, WasmVal val) { | |
| 1566 // TODO(titzer): store PC as well? | |
| 1567 stack_.push_back(val); | |
| 1568 } | |
| 1569 | |
| 1570 void TraceStack(const char* phase, pc_t pc) { | |
| 1571 if (FLAG_trace_wasm_interpreter) { | |
| 1572 PrintF("%s @%zu", phase, pc); | |
| 1573 UNIMPLEMENTED(); | |
| 1574 PrintF("\n"); | |
| 1575 } | |
| 1576 } | |
| 1577 | |
| 1578 void TraceValueStack() { | |
| 1579 Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr; | |
| 1580 sp_t sp = top ? top->sp : 0; | |
| 1581 sp_t plimit = top ? top->plimit() : 0; | |
| 1582 sp_t llimit = top ? top->llimit() : 0; | |
| 1583 if (FLAG_trace_wasm_interpreter) { | |
| 1584 for (size_t i = sp; i < stack_.size(); i++) { | |
| 1585 if (i < plimit) | |
| 1586 PrintF(" p%zu:", i); | |
| 1587 else if (i < llimit) | |
| 1588 PrintF(" l%zu:", i); | |
| 1589 else | |
| 1590 PrintF(" s%zu:", i); | |
| 1591 WasmVal val = stack_[i]; | |
| 1592 switch (val.type) { | |
| 1593 case kAstI32: | |
| 1594 PrintF("i32:%d", val.to<int32_t>()); | |
| 1595 break; | |
| 1596 case kAstI64: | |
| 1597 PrintF("i64:%ld", val.to<int64_t>()); | |
| 1598 break; | |
| 1599 case kAstF32: | |
| 1600 PrintF("f32:%f", val.to<float>()); | |
| 1601 break; | |
| 1602 case kAstF64: | |
| 1603 PrintF("f64:%lf", val.to<double>()); | |
| 1604 break; | |
| 1605 case kAstStmt: | |
| 1606 PrintF("void"); | |
| 1607 break; | |
| 1608 default: | |
| 1609 UNREACHABLE(); | |
| 1610 break; | |
| 1611 } | |
| 1612 } | |
| 1613 } | |
| 1614 } | |
| 1615 }; | |
| 1616 | |
| 1617 //============================================================================ | |
| 1618 // The implementation details of the interpreter. | |
| 1619 //============================================================================ | |
| 1620 class WasmInterpreterInternals : public ZoneObject { | |
| 1621 public: | |
| 1622 WasmModuleInstance* instance_; | |
| 1623 CodeMap codemap_; | |
| 1624 ZoneVector<ThreadImpl> threads_; | |
| 1625 | |
| 1626 WasmInterpreterInternals(Zone* zone, WasmModuleInstance* instance) | |
| 1627 : instance_(instance), | |
| 1628 codemap_(instance_ ? instance_->module : nullptr, zone), | |
| 1629 threads_(zone) { | |
| 1630 threads_.push_back(ThreadImpl(zone, &codemap_, instance)); | |
| 1631 } | |
| 1632 }; | |
| 1633 | |
| 1634 //============================================================================ | |
| 1635 // Implementation of the public interface of the interpreter. | |
| 1636 //============================================================================ | |
| 1637 WasmInterpreter::WasmInterpreter(WasmModuleInstance* instance, | |
| 1638 base::AccountingAllocator* allocator) | |
| 1639 : zone_(allocator), | |
| 1640 internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {} | |
| 1641 | |
| 1642 WasmInterpreter::~WasmInterpreter() {} | |
| 1643 | |
| 1644 void WasmInterpreter::Run() { internals_->threads_[0].Run(); } | |
| 1645 | |
| 1646 void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); } | |
| 1647 | |
| 1648 bool WasmInterpreter::SetBreakpoint(WasmFunction* function, int pc, | |
| 1649 bool enabled) { | |
| 1650 InterpreterCode* code = internals_->codemap_.FindCode(function); | |
| 1651 if (!code) return false; | |
| 1652 int size = static_cast<int>(code->end - code->start); | |
| 1653 // Check bounds for {pc}. | |
| 1654 if (pc < 0 || pc >= size) return false; | |
| 1655 if (code->orig_start == code->start) { | |
| 1656 code->start = reinterpret_cast<byte*>(malloc(size)); | |
|
Clemens Hammacher
2016/05/12 15:20:55
You should allocate this in the Zone. It would als
titzer
2016/05/23 11:41:38
Good catch. Done.
| |
| 1657 memcpy(code->start, code->orig_start, size); | |
| 1658 code->end = code->start + size; | |
| 1659 bool prev = code->start[pc] == kInternalBreakpoint; | |
|
Clemens Hammacher
2016/05/12 15:20:55
I think the if should close here. Otherwise this f
titzer
2016/05/23 11:41:38
Good catch. Done.
| |
| 1660 if (enabled) { | |
| 1661 code->start[pc] = kInternalBreakpoint; | |
| 1662 } else { | |
| 1663 code->start[pc] = code->orig_start[pc]; | |
| 1664 } | |
| 1665 return prev; | |
|
Clemens Hammacher
2016/05/12 15:20:55
So the function returns whether a breakpoint was s
titzer
2016/05/23 11:41:37
Done.
| |
| 1666 } | |
| 1667 return false; | |
| 1668 } | |
| 1669 | |
| 1670 bool WasmInterpreter::GetBreakpoint(WasmFunction* function, int pc) { | |
| 1671 InterpreterCode* code = internals_->codemap_.FindCode(function); | |
| 1672 if (!code) return false; | |
| 1673 int size = static_cast<int>(code->end - code->start); | |
| 1674 // Check bounds for {pc}. | |
| 1675 if (pc < 0 || pc >= size) return false; | |
| 1676 // Check if a breakpoint is present at that place in the code. | |
| 1677 return code->start[pc] == kInternalBreakpoint; | |
| 1678 } | |
| 1679 | |
| 1680 bool WasmInterpreter::SetTracing(WasmFunction* function, bool enabled) { | |
| 1681 UNIMPLEMENTED(); | |
| 1682 } | |
| 1683 | |
| 1684 int WasmInterpreter::GetThreadCount() { | |
| 1685 return 1; // only one thread for now. | |
| 1686 } | |
| 1687 | |
| 1688 WasmInterpreter::Thread& WasmInterpreter::GetThread(int id) { | |
| 1689 CHECK_EQ(0, id); // only one thread for now. | |
| 1690 return internals_->threads_[id]; | |
| 1691 } | |
| 1692 | |
| 1693 WasmVal WasmInterpreter::GetLocalVal(const WasmFrame& frame, int index) { | |
| 1694 CHECK_GE(index, 0); | |
| 1695 UNIMPLEMENTED(); | |
| 1696 WasmVal none; | |
| 1697 none.type = kAstStmt; | |
| 1698 return none; | |
| 1699 } | |
| 1700 | |
| 1701 WasmVal WasmInterpreter::GetExprVal(const WasmFrame& frame, int pc) { | |
| 1702 UNIMPLEMENTED(); | |
| 1703 WasmVal none; | |
| 1704 none.type = kAstStmt; | |
| 1705 return none; | |
| 1706 } | |
| 1707 | |
| 1708 void WasmInterpreter::SetLocalVal(WasmFrame& frame, int index, WasmVal val) { | |
| 1709 UNIMPLEMENTED(); | |
| 1710 } | |
| 1711 | |
| 1712 void WasmInterpreter::SetExprVal(WasmFrame& frame, int pc, WasmVal val) { | |
| 1713 UNIMPLEMENTED(); | |
| 1714 } | |
| 1715 | |
| 1716 size_t WasmInterpreter::GetMemorySize() { | |
| 1717 return internals_->instance_->mem_size; | |
| 1718 } | |
| 1719 | |
| 1720 WasmVal WasmInterpreter::ReadMemory(size_t offset) { | |
| 1721 UNIMPLEMENTED(); | |
| 1722 return WasmVal(); | |
| 1723 } | |
| 1724 | |
| 1725 void WasmInterpreter::WriteMemory(size_t offset, WasmVal val) { | |
| 1726 UNIMPLEMENTED(); | |
| 1727 } | |
| 1728 | |
| 1729 int WasmInterpreter::AddFunctionForTesting(WasmFunction* function) { | |
| 1730 return internals_->codemap_.AddFunction(function, nullptr, nullptr); | |
| 1731 } | |
| 1732 | |
| 1733 bool WasmInterpreter::SetFunctionCodeForTesting(WasmFunction* function, | |
| 1734 const byte* start, | |
| 1735 const byte* end) { | |
| 1736 return internals_->codemap_.SetFunctionCode(function, start, end); | |
| 1737 } | |
| 1738 | |
| 1739 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting( | |
| 1740 Zone* zone, const byte* start, const byte* end) { | |
| 1741 ControlTransfers targets(zone, 0, start, end); | |
| 1742 return targets.map; | |
| 1743 } | |
| 1744 | |
| 1745 } // namespace wasm | |
| 1746 } // namespace internal | |
| 1747 } // namespace v8 | |
| OLD | NEW |