OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
| 5 #include <type_traits> |
| 6 |
5 #include "src/wasm/wasm-interpreter.h" | 7 #include "src/wasm/wasm-interpreter.h" |
6 | 8 |
7 #include "src/utils.h" | 9 #include "src/utils.h" |
8 #include "src/wasm/decoder.h" | 10 #include "src/wasm/decoder.h" |
9 #include "src/wasm/function-body-decoder.h" | 11 #include "src/wasm/function-body-decoder.h" |
10 #include "src/wasm/wasm-external-refs.h" | 12 #include "src/wasm/wasm-external-refs.h" |
11 #include "src/wasm/wasm-limits.h" | 13 #include "src/wasm/wasm-limits.h" |
12 #include "src/wasm/wasm-module.h" | 14 #include "src/wasm/wasm-module.h" |
13 | 15 |
14 #include "src/zone/accounting-allocator.h" | 16 #include "src/zone/accounting-allocator.h" |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
70 V(F32Le, float, <=) \ | 72 V(F32Le, float, <=) \ |
71 V(F32Gt, float, >) \ | 73 V(F32Gt, float, >) \ |
72 V(F32Ge, float, >=) \ | 74 V(F32Ge, float, >=) \ |
73 V(F64Add, double, +) \ | 75 V(F64Add, double, +) \ |
74 V(F64Sub, double, -) \ | 76 V(F64Sub, double, -) \ |
75 V(F64Eq, double, ==) \ | 77 V(F64Eq, double, ==) \ |
76 V(F64Ne, double, !=) \ | 78 V(F64Ne, double, !=) \ |
77 V(F64Lt, double, <) \ | 79 V(F64Lt, double, <) \ |
78 V(F64Le, double, <=) \ | 80 V(F64Le, double, <=) \ |
79 V(F64Gt, double, >) \ | 81 V(F64Gt, double, >) \ |
80 V(F64Ge, double, >=) | 82 V(F64Ge, double, >=) \ |
81 | 83 V(F32Mul, float, *) \ |
82 #define FOREACH_SIMPLE_BINOP_NAN(V) \ | 84 V(F64Mul, double, *) \ |
83 V(F32Mul, float, *) \ | 85 V(F32Div, float, /) \ |
84 V(F64Mul, double, *) \ | |
85 V(F32Div, float, /) \ | |
86 V(F64Div, double, /) | 86 V(F64Div, double, /) |
87 | 87 |
88 #define FOREACH_OTHER_BINOP(V) \ | 88 #define FOREACH_OTHER_BINOP(V) \ |
89 V(I32DivS, int32_t) \ | 89 V(I32DivS, int32_t) \ |
90 V(I32DivU, uint32_t) \ | 90 V(I32DivU, uint32_t) \ |
91 V(I32RemS, int32_t) \ | 91 V(I32RemS, int32_t) \ |
92 V(I32RemU, uint32_t) \ | 92 V(I32RemU, uint32_t) \ |
93 V(I32Shl, uint32_t) \ | 93 V(I32Shl, uint32_t) \ |
94 V(I32ShrU, uint32_t) \ | 94 V(I32ShrU, uint32_t) \ |
95 V(I32ShrS, int32_t) \ | 95 V(I32ShrS, int32_t) \ |
96 V(I64DivS, int64_t) \ | 96 V(I64DivS, int64_t) \ |
97 V(I64DivU, uint64_t) \ | 97 V(I64DivU, uint64_t) \ |
98 V(I64RemS, int64_t) \ | 98 V(I64RemS, int64_t) \ |
99 V(I64RemU, uint64_t) \ | 99 V(I64RemU, uint64_t) \ |
100 V(I64Shl, uint64_t) \ | 100 V(I64Shl, uint64_t) \ |
101 V(I64ShrU, uint64_t) \ | 101 V(I64ShrU, uint64_t) \ |
102 V(I64ShrS, int64_t) \ | 102 V(I64ShrS, int64_t) \ |
103 V(I32Ror, int32_t) \ | 103 V(I32Ror, int32_t) \ |
104 V(I32Rol, int32_t) \ | 104 V(I32Rol, int32_t) \ |
105 V(I64Ror, int64_t) \ | 105 V(I64Ror, int64_t) \ |
106 V(I64Rol, int64_t) \ | 106 V(I64Rol, int64_t) \ |
107 V(F32Min, float) \ | 107 V(F32Min, float) \ |
108 V(F32Max, float) \ | 108 V(F32Max, float) \ |
109 V(F32CopySign, float) \ | |
110 V(F64Min, double) \ | 109 V(F64Min, double) \ |
111 V(F64Max, double) \ | 110 V(F64Max, double) \ |
112 V(F64CopySign, double) \ | |
113 V(I32AsmjsDivS, int32_t) \ | 111 V(I32AsmjsDivS, int32_t) \ |
114 V(I32AsmjsDivU, uint32_t) \ | 112 V(I32AsmjsDivU, uint32_t) \ |
115 V(I32AsmjsRemS, int32_t) \ | 113 V(I32AsmjsRemS, int32_t) \ |
116 V(I32AsmjsRemU, uint32_t) | 114 V(I32AsmjsRemU, uint32_t) |
117 | 115 |
118 #define FOREACH_OTHER_UNOP(V) \ | 116 #define FOREACH_OTHER_UNOP(V) \ |
119 V(I32Clz, uint32_t) \ | 117 V(I32Clz, uint32_t) \ |
120 V(I32Ctz, uint32_t) \ | 118 V(I32Ctz, uint32_t) \ |
121 V(I32Popcnt, uint32_t) \ | 119 V(I32Popcnt, uint32_t) \ |
122 V(I32Eqz, uint32_t) \ | 120 V(I32Eqz, uint32_t) \ |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
155 V(F32ReinterpretI32, int32_t) \ | 153 V(F32ReinterpretI32, int32_t) \ |
156 V(F64SConvertI32, int32_t) \ | 154 V(F64SConvertI32, int32_t) \ |
157 V(F64UConvertI32, uint32_t) \ | 155 V(F64UConvertI32, uint32_t) \ |
158 V(F64SConvertI64, int64_t) \ | 156 V(F64SConvertI64, int64_t) \ |
159 V(F64UConvertI64, uint64_t) \ | 157 V(F64UConvertI64, uint64_t) \ |
160 V(F64ConvertF32, float) \ | 158 V(F64ConvertF32, float) \ |
161 V(F64ReinterpretI64, int64_t) \ | 159 V(F64ReinterpretI64, int64_t) \ |
162 V(I32AsmjsSConvertF32, float) \ | 160 V(I32AsmjsSConvertF32, float) \ |
163 V(I32AsmjsUConvertF32, float) \ | 161 V(I32AsmjsUConvertF32, float) \ |
164 V(I32AsmjsSConvertF64, double) \ | 162 V(I32AsmjsSConvertF64, double) \ |
165 V(I32AsmjsUConvertF64, double) | 163 V(I32AsmjsUConvertF64, double) \ |
166 | 164 V(F32Sqrt, float) \ |
167 #define FOREACH_OTHER_UNOP_NAN(V) \ | |
168 V(F32Sqrt, float) \ | |
169 V(F64Sqrt, double) | 165 V(F64Sqrt, double) |
170 | 166 |
171 static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) { | 167 static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) { |
172 if (b == 0) { | 168 if (b == 0) { |
173 *trap = kTrapDivByZero; | 169 *trap = kTrapDivByZero; |
174 return 0; | 170 return 0; |
175 } | 171 } |
176 if (b == -1 && a == std::numeric_limits<int32_t>::min()) { | 172 if (b == -1 && a == std::numeric_limits<int32_t>::min()) { |
177 *trap = kTrapDivUnrepresentable; | 173 *trap = kTrapDivUnrepresentable; |
178 return 0; | 174 return 0; |
(...skipping 990 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1169 // ^ 0 ^ stack_.size() | 1165 // ^ 0 ^ stack_.size() |
1170 DCHECK_LE(dest, stack_.size()); | 1166 DCHECK_LE(dest, stack_.size()); |
1171 DCHECK_LE(dest + arity, stack_.size()); | 1167 DCHECK_LE(dest + arity, stack_.size()); |
1172 size_t pop_count = stack_.size() - dest - arity; | 1168 size_t pop_count = stack_.size() - dest - arity; |
1173 for (size_t i = 0; i < arity; i++) { | 1169 for (size_t i = 0; i < arity; i++) { |
1174 stack_[dest + i] = stack_[dest + pop_count + i]; | 1170 stack_[dest + i] = stack_[dest + pop_count + i]; |
1175 } | 1171 } |
1176 stack_.resize(stack_.size() - pop_count); | 1172 stack_.resize(stack_.size() - pop_count); |
1177 } | 1173 } |
1178 | 1174 |
| 1175 template <typename ctype, typename mtype> |
| 1176 bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len) { |
| 1177 MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype)); |
| 1178 uint32_t index = Pop().to<uint32_t>(); |
| 1179 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); |
| 1180 if (operand.offset > effective_mem_size || |
| 1181 index > (effective_mem_size - operand.offset)) { |
| 1182 DoTrap(kTrapMemOutOfBounds, pc); |
| 1183 return false; |
| 1184 } |
| 1185 byte* addr = instance()->mem_start + operand.offset + index; |
| 1186 WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr))); |
| 1187 |
| 1188 Push(pc, result); |
| 1189 len = 1 + operand.length; |
| 1190 return true; |
| 1191 } |
| 1192 |
| 1193 template <typename ctype, typename mtype> |
| 1194 bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc, |
| 1195 int& len) { |
| 1196 MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype)); |
| 1197 WasmVal val = Pop(); |
| 1198 |
| 1199 uint32_t index = Pop().to<uint32_t>(); |
| 1200 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); |
| 1201 if (operand.offset > effective_mem_size || |
| 1202 index > (effective_mem_size - operand.offset)) { |
| 1203 DoTrap(kTrapMemOutOfBounds, pc); |
| 1204 return false; |
| 1205 } |
| 1206 byte* addr = instance()->mem_start + operand.offset + index; |
| 1207 WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>())); |
| 1208 len = 1 + operand.length; |
| 1209 |
| 1210 if (std::is_same<float, ctype>::value) { |
| 1211 possible_nondeterminism_ |= std::isnan(val.to<float>()); |
| 1212 } else if (std::is_same<double, ctype>::value) { |
| 1213 possible_nondeterminism_ |= std::isnan(val.to<double>()); |
| 1214 } |
| 1215 return true; |
| 1216 } |
| 1217 |
1179 void Execute(InterpreterCode* code, pc_t pc, int max) { | 1218 void Execute(InterpreterCode* code, pc_t pc, int max) { |
1180 Decoder decoder(code->start, code->end); | 1219 Decoder decoder(code->start, code->end); |
1181 pc_t limit = code->end - code->start; | 1220 pc_t limit = code->end - code->start; |
1182 while (--max >= 0) { | 1221 while (--max >= 0) { |
1183 #define PAUSE_IF_BREAK_FLAG(flag) \ | 1222 #define PAUSE_IF_BREAK_FLAG(flag) \ |
1184 if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) max = 0; | 1223 if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) max = 0; |
1185 | 1224 |
1186 DCHECK_GT(limit, pc); | 1225 DCHECK_GT(limit, pc); |
1187 | 1226 |
1188 const char* skip = " "; | 1227 const char* skip = " "; |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1420 *reinterpret_cast<float*>(ptr) = val.to<float>(); | 1459 *reinterpret_cast<float*>(ptr) = val.to<float>(); |
1421 } else if (type == kWasmF64) { | 1460 } else if (type == kWasmF64) { |
1422 *reinterpret_cast<double*>(ptr) = val.to<double>(); | 1461 *reinterpret_cast<double*>(ptr) = val.to<double>(); |
1423 } else { | 1462 } else { |
1424 UNREACHABLE(); | 1463 UNREACHABLE(); |
1425 } | 1464 } |
1426 len = 1 + operand.length; | 1465 len = 1 + operand.length; |
1427 break; | 1466 break; |
1428 } | 1467 } |
1429 | 1468 |
1430 #define LOAD_CASE(name, ctype, mtype) \ | 1469 #define LOAD_CASE(name, ctype, mtype) \ |
1431 case kExpr##name: { \ | 1470 case kExpr##name: { \ |
1432 MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype)); \ | 1471 if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len)) return; \ |
1433 uint32_t index = Pop().to<uint32_t>(); \ | 1472 break; \ |
1434 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \ | |
1435 if (operand.offset > effective_mem_size || \ | |
1436 index > (effective_mem_size - operand.offset)) { \ | |
1437 return DoTrap(kTrapMemOutOfBounds, pc); \ | |
1438 } \ | |
1439 byte* addr = instance()->mem_start + operand.offset + index; \ | |
1440 WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr))); \ | |
1441 Push(pc, result); \ | |
1442 len = 1 + operand.length; \ | |
1443 break; \ | |
1444 } | 1473 } |
1445 | 1474 |
1446 LOAD_CASE(I32LoadMem8S, int32_t, int8_t); | 1475 LOAD_CASE(I32LoadMem8S, int32_t, int8_t); |
1447 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t); | 1476 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t); |
1448 LOAD_CASE(I32LoadMem16S, int32_t, int16_t); | 1477 LOAD_CASE(I32LoadMem16S, int32_t, int16_t); |
1449 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t); | 1478 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t); |
1450 LOAD_CASE(I64LoadMem8S, int64_t, int8_t); | 1479 LOAD_CASE(I64LoadMem8S, int64_t, int8_t); |
1451 LOAD_CASE(I64LoadMem8U, int64_t, uint8_t); | 1480 LOAD_CASE(I64LoadMem8U, int64_t, uint8_t); |
1452 LOAD_CASE(I64LoadMem16S, int64_t, int16_t); | 1481 LOAD_CASE(I64LoadMem16S, int64_t, int16_t); |
1453 LOAD_CASE(I64LoadMem16U, int64_t, uint16_t); | 1482 LOAD_CASE(I64LoadMem16U, int64_t, uint16_t); |
1454 LOAD_CASE(I64LoadMem32S, int64_t, int32_t); | 1483 LOAD_CASE(I64LoadMem32S, int64_t, int32_t); |
1455 LOAD_CASE(I64LoadMem32U, int64_t, uint32_t); | 1484 LOAD_CASE(I64LoadMem32U, int64_t, uint32_t); |
1456 LOAD_CASE(I32LoadMem, int32_t, int32_t); | 1485 LOAD_CASE(I32LoadMem, int32_t, int32_t); |
1457 LOAD_CASE(I64LoadMem, int64_t, int64_t); | 1486 LOAD_CASE(I64LoadMem, int64_t, int64_t); |
1458 LOAD_CASE(F32LoadMem, float, float); | 1487 LOAD_CASE(F32LoadMem, float, float); |
1459 LOAD_CASE(F64LoadMem, double, double); | 1488 LOAD_CASE(F64LoadMem, double, double); |
1460 #undef LOAD_CASE | 1489 #undef LOAD_CASE |
1461 | 1490 |
1462 #define STORE_CASE(name, ctype, mtype) \ | 1491 #define STORE_CASE(name, ctype, mtype) \ |
1463 case kExpr##name: { \ | 1492 case kExpr##name: { \ |
1464 MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype)); \ | 1493 if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len)) return; \ |
1465 WasmVal val = Pop(); \ | 1494 break; \ |
1466 uint32_t index = Pop().to<uint32_t>(); \ | |
1467 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \ | |
1468 if (operand.offset > effective_mem_size || \ | |
1469 index > (effective_mem_size - operand.offset)) { \ | |
1470 return DoTrap(kTrapMemOutOfBounds, pc); \ | |
1471 } \ | |
1472 byte* addr = instance()->mem_start + operand.offset + index; \ | |
1473 WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>())); \ | |
1474 len = 1 + operand.length; \ | |
1475 break; \ | |
1476 } | 1495 } |
1477 | 1496 |
1478 STORE_CASE(I32StoreMem8, int32_t, int8_t); | 1497 STORE_CASE(I32StoreMem8, int32_t, int8_t); |
1479 STORE_CASE(I32StoreMem16, int32_t, int16_t); | 1498 STORE_CASE(I32StoreMem16, int32_t, int16_t); |
1480 STORE_CASE(I64StoreMem8, int64_t, int8_t); | 1499 STORE_CASE(I64StoreMem8, int64_t, int8_t); |
1481 STORE_CASE(I64StoreMem16, int64_t, int16_t); | 1500 STORE_CASE(I64StoreMem16, int64_t, int16_t); |
1482 STORE_CASE(I64StoreMem32, int64_t, int32_t); | 1501 STORE_CASE(I64StoreMem32, int64_t, int32_t); |
1483 STORE_CASE(I32StoreMem, int32_t, int32_t); | 1502 STORE_CASE(I32StoreMem, int32_t, int32_t); |
1484 STORE_CASE(I64StoreMem, int64_t, int64_t); | 1503 STORE_CASE(I64StoreMem, int64_t, int64_t); |
1485 STORE_CASE(F32StoreMem, float, float); | 1504 STORE_CASE(F32StoreMem, float, float); |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1541 MemoryIndexOperand operand(&decoder, code->at(pc)); | 1560 MemoryIndexOperand operand(&decoder, code->at(pc)); |
1542 Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size / | 1561 Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size / |
1543 WasmModule::kPageSize))); | 1562 WasmModule::kPageSize))); |
1544 len = 1 + operand.length; | 1563 len = 1 + operand.length; |
1545 break; | 1564 break; |
1546 } | 1565 } |
1547 // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64 | 1566 // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64 |
1548 // specially to guarantee that the quiet bit of a NaN is preserved on | 1567 // specially to guarantee that the quiet bit of a NaN is preserved on |
1549 // ia32 by the reinterpret casts. | 1568 // ia32 by the reinterpret casts. |
1550 case kExprI32ReinterpretF32: { | 1569 case kExprI32ReinterpretF32: { |
1551 WasmVal result(ExecuteI32ReinterpretF32(Pop())); | 1570 WasmVal val = Pop(); |
| 1571 WasmVal result(ExecuteI32ReinterpretF32(val)); |
1552 Push(pc, result); | 1572 Push(pc, result); |
| 1573 possible_nondeterminism_ |= std::isnan(val.to<float>()); |
1553 break; | 1574 break; |
1554 } | 1575 } |
1555 case kExprI64ReinterpretF64: { | 1576 case kExprI64ReinterpretF64: { |
1556 WasmVal result(ExecuteI64ReinterpretF64(Pop())); | 1577 WasmVal val = Pop(); |
| 1578 WasmVal result(ExecuteI64ReinterpretF64(val)); |
1557 Push(pc, result); | 1579 Push(pc, result); |
| 1580 possible_nondeterminism_ |= std::isnan(val.to<double>()); |
1558 break; | 1581 break; |
1559 } | 1582 } |
1560 #define EXECUTE_SIMPLE_BINOP(name, ctype, op) \ | 1583 #define EXECUTE_SIMPLE_BINOP(name, ctype, op) \ |
1561 case kExpr##name: { \ | 1584 case kExpr##name: { \ |
1562 WasmVal rval = Pop(); \ | 1585 WasmVal rval = Pop(); \ |
1563 WasmVal lval = Pop(); \ | 1586 WasmVal lval = Pop(); \ |
1564 WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \ | 1587 WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \ |
1565 Push(pc, result); \ | 1588 Push(pc, result); \ |
1566 break; \ | 1589 break; \ |
1567 } | 1590 } |
1568 FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP) | 1591 FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP) |
1569 #undef EXECUTE_SIMPLE_BINOP | 1592 #undef EXECUTE_SIMPLE_BINOP |
1570 | 1593 |
1571 #define EXECUTE_SIMPLE_BINOP_NAN(name, ctype, op) \ | |
1572 case kExpr##name: { \ | |
1573 WasmVal rval = Pop(); \ | |
1574 WasmVal lval = Pop(); \ | |
1575 ctype result = lval.to<ctype>() op rval.to<ctype>(); \ | |
1576 possible_nondeterminism_ |= std::isnan(result); \ | |
1577 WasmVal result_val(result); \ | |
1578 Push(pc, result_val); \ | |
1579 break; \ | |
1580 } | |
1581 FOREACH_SIMPLE_BINOP_NAN(EXECUTE_SIMPLE_BINOP_NAN) | |
1582 #undef EXECUTE_SIMPLE_BINOP_NAN | |
1583 | |
1584 #define EXECUTE_OTHER_BINOP(name, ctype) \ | 1594 #define EXECUTE_OTHER_BINOP(name, ctype) \ |
1585 case kExpr##name: { \ | 1595 case kExpr##name: { \ |
1586 TrapReason trap = kTrapCount; \ | 1596 TrapReason trap = kTrapCount; \ |
1587 volatile ctype rval = Pop().to<ctype>(); \ | 1597 volatile ctype rval = Pop().to<ctype>(); \ |
1588 volatile ctype lval = Pop().to<ctype>(); \ | 1598 volatile ctype lval = Pop().to<ctype>(); \ |
1589 WasmVal result(Execute##name(lval, rval, &trap)); \ | 1599 WasmVal result(Execute##name(lval, rval, &trap)); \ |
1590 if (trap != kTrapCount) return DoTrap(trap, pc); \ | 1600 if (trap != kTrapCount) return DoTrap(trap, pc); \ |
1591 Push(pc, result); \ | 1601 Push(pc, result); \ |
1592 break; \ | 1602 break; \ |
1593 } | 1603 } |
1594 FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP) | 1604 FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP) |
1595 #undef EXECUTE_OTHER_BINOP | 1605 #undef EXECUTE_OTHER_BINOP |
1596 | 1606 |
| 1607 case kExprF32CopySign: { |
| 1608 // Handle kExprF32CopySign separately because it may introduce |
| 1609 // observable non-determinism. |
| 1610 TrapReason trap = kTrapCount; |
| 1611 volatile float rval = Pop().to<float>(); |
| 1612 volatile float lval = Pop().to<float>(); |
| 1613 WasmVal result(ExecuteF32CopySign(lval, rval, &trap)); |
| 1614 Push(pc, result); |
| 1615 possible_nondeterminism_ |= std::isnan(rval); |
| 1616 break; |
| 1617 } |
| 1618 case kExprF64CopySign: { |
| 1619 // Handle kExprF32CopySign separately because it may introduce |
| 1620 // observable non-determinism. |
| 1621 TrapReason trap = kTrapCount; |
| 1622 volatile double rval = Pop().to<double>(); |
| 1623 volatile double lval = Pop().to<double>(); |
| 1624 WasmVal result(ExecuteF64CopySign(lval, rval, &trap)); |
| 1625 Push(pc, result); |
| 1626 possible_nondeterminism_ |= std::isnan(rval); |
| 1627 break; |
| 1628 } |
1597 #define EXECUTE_OTHER_UNOP(name, ctype) \ | 1629 #define EXECUTE_OTHER_UNOP(name, ctype) \ |
1598 case kExpr##name: { \ | 1630 case kExpr##name: { \ |
1599 TrapReason trap = kTrapCount; \ | 1631 TrapReason trap = kTrapCount; \ |
1600 volatile ctype val = Pop().to<ctype>(); \ | 1632 volatile ctype val = Pop().to<ctype>(); \ |
1601 WasmVal result(Execute##name(val, &trap)); \ | 1633 WasmVal result(Execute##name(val, &trap)); \ |
1602 if (trap != kTrapCount) return DoTrap(trap, pc); \ | 1634 if (trap != kTrapCount) return DoTrap(trap, pc); \ |
1603 Push(pc, result); \ | 1635 Push(pc, result); \ |
1604 break; \ | 1636 break; \ |
1605 } | 1637 } |
1606 FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP) | 1638 FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP) |
1607 #undef EXECUTE_OTHER_UNOP | 1639 #undef EXECUTE_OTHER_UNOP |
1608 | 1640 |
1609 #define EXECUTE_OTHER_UNOP_NAN(name, ctype) \ | |
1610 case kExpr##name: { \ | |
1611 TrapReason trap = kTrapCount; \ | |
1612 volatile ctype val = Pop().to<ctype>(); \ | |
1613 ctype result = Execute##name(val, &trap); \ | |
1614 possible_nondeterminism_ |= std::isnan(result); \ | |
1615 WasmVal result_val(result); \ | |
1616 if (trap != kTrapCount) return DoTrap(trap, pc); \ | |
1617 Push(pc, result_val); \ | |
1618 break; \ | |
1619 } | |
1620 FOREACH_OTHER_UNOP_NAN(EXECUTE_OTHER_UNOP_NAN) | |
1621 #undef EXECUTE_OTHER_UNOP_NAN | |
1622 | |
1623 default: | 1641 default: |
1624 V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s", | 1642 V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s", |
1625 code->start[pc], OpcodeName(code->start[pc])); | 1643 code->start[pc], OpcodeName(code->start[pc])); |
1626 UNREACHABLE(); | 1644 UNREACHABLE(); |
1627 } | 1645 } |
1628 | 1646 |
1629 pc += len; | 1647 pc += len; |
1630 if (pc == limit) { | 1648 if (pc == limit) { |
1631 // Fell off end of code; do an implicit return. | 1649 // Fell off end of code; do an implicit return. |
1632 TRACE("@%-3zu: ImplicitReturn\n", pc); | 1650 TRACE("@%-3zu: ImplicitReturn\n", pc); |
(...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1922 return none; | 1940 return none; |
1923 } | 1941 } |
1924 | 1942 |
1925 void InterpretedFrame::SetLocalVal(int index, WasmVal val) { UNIMPLEMENTED(); } | 1943 void InterpretedFrame::SetLocalVal(int index, WasmVal val) { UNIMPLEMENTED(); } |
1926 | 1944 |
1927 void InterpretedFrame::SetExprVal(int pc, WasmVal val) { UNIMPLEMENTED(); } | 1945 void InterpretedFrame::SetExprVal(int pc, WasmVal val) { UNIMPLEMENTED(); } |
1928 | 1946 |
1929 } // namespace wasm | 1947 } // namespace wasm |
1930 } // namespace internal | 1948 } // namespace internal |
1931 } // namespace v8 | 1949 } // namespace v8 |
OLD | NEW |