Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/interpreter/interpreter.cc

Issue 2552883012: [interpreter][stubs] Fixing issues found by machine graph verifier. (Closed)
Patch Set: Addressing nits Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/interface-descriptors.h ('k') | src/interpreter/interpreter-assembler.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/interpreter/interpreter.h" 5 #include "src/interpreter/interpreter.h"
6 6
7 #include <fstream> 7 #include <fstream>
8 #include <memory> 8 #include <memory>
9 9
10 #include "src/ast/prettyprinter.h" 10 #include "src/ast/prettyprinter.h"
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
91 91
92 // Initialization should have been successful. 92 // Initialization should have been successful.
93 DCHECK(IsDispatchTableInitialized()); 93 DCHECK(IsDispatchTableInitialized());
94 } 94 }
95 95
96 void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode, 96 void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
97 OperandScale operand_scale, 97 OperandScale operand_scale,
98 BytecodeGeneratorFunc generator) { 98 BytecodeGeneratorFunc generator) {
99 if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return; 99 if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
100 100
101 // TODO(ishell): remove this when code stub assembler graphs verification
102 // is enabled for all stubs.
103 bool sav_csa_verify = FLAG_csa_verify;
104 // Enable verification only in mksnapshot.
105 FLAG_csa_verify = DEBUG_BOOL && FLAG_startup_blob != nullptr;
106
101 InterpreterDispatchDescriptor descriptor(isolate_); 107 InterpreterDispatchDescriptor descriptor(isolate_);
102 compiler::CodeAssemblerState state( 108 compiler::CodeAssemblerState state(
103 isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER), 109 isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER),
104 Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode)); 110 Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
105 InterpreterAssembler assembler(&state, bytecode, operand_scale); 111 InterpreterAssembler assembler(&state, bytecode, operand_scale);
106 (this->*generator)(&assembler); 112 (this->*generator)(&assembler);
107 Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state); 113 Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
108 size_t index = GetDispatchTableIndex(bytecode, operand_scale); 114 size_t index = GetDispatchTableIndex(bytecode, operand_scale);
109 dispatch_table_[index] = code->entry(); 115 dispatch_table_[index] = code->entry();
110 TraceCodegen(code); 116 TraceCodegen(code);
111 PROFILE(isolate_, CodeCreateEvent( 117 PROFILE(isolate_, CodeCreateEvent(
112 CodeEventListener::BYTECODE_HANDLER_TAG, 118 CodeEventListener::BYTECODE_HANDLER_TAG,
113 AbstractCode::cast(*code), 119 AbstractCode::cast(*code),
114 Bytecodes::ToString(bytecode, operand_scale).c_str())); 120 Bytecodes::ToString(bytecode, operand_scale).c_str()));
121 FLAG_csa_verify = sav_csa_verify;
115 } 122 }
116 123
117 Code* Interpreter::GetBytecodeHandler(Bytecode bytecode, 124 Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
118 OperandScale operand_scale) { 125 OperandScale operand_scale) {
119 DCHECK(IsDispatchTableInitialized()); 126 DCHECK(IsDispatchTableInitialized());
120 DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale)); 127 DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
121 size_t index = GetDispatchTableIndex(bytecode, operand_scale); 128 size_t index = GetDispatchTableIndex(bytecode, operand_scale);
122 Address code_entry = dispatch_table_[index]; 129 Address code_entry = dispatch_table_[index];
123 return Code::GetCodeFromTargetAddress(code_entry); 130 return Code::GetCodeFromTargetAddress(code_entry);
124 } 131 }
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 void Interpreter::DoLdaZero(InterpreterAssembler* assembler) { 321 void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
315 Node* zero_value = __ NumberConstant(0.0); 322 Node* zero_value = __ NumberConstant(0.0);
316 __ SetAccumulator(zero_value); 323 __ SetAccumulator(zero_value);
317 __ Dispatch(); 324 __ Dispatch();
318 } 325 }
319 326
320 // LdaSmi <imm> 327 // LdaSmi <imm>
321 // 328 //
322 // Load an integer literal into the accumulator as a Smi. 329 // Load an integer literal into the accumulator as a Smi.
323 void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) { 330 void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
324 Node* raw_int = __ BytecodeOperandImm(0); 331 Node* smi_int = __ BytecodeOperandImmSmi(0);
325 Node* smi_int = __ SmiTag(raw_int);
326 __ SetAccumulator(smi_int); 332 __ SetAccumulator(smi_int);
327 __ Dispatch(); 333 __ Dispatch();
328 } 334 }
329 335
330 // LdaConstant <idx> 336 // LdaConstant <idx>
331 // 337 //
332 // Load constant literal at |idx| in the constant pool into the accumulator. 338 // Load constant literal at |idx| in the constant pool into the accumulator.
333 void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) { 339 void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
334 Node* index = __ BytecodeOperandIdx(0); 340 Node* index = __ BytecodeOperandIdx(0);
335 Node* constant = __ LoadConstantPoolEntry(index); 341 Node* constant = __ LoadConstantPoolEntry(index);
(...skipping 525 matching lines...) Expand 10 before | Expand all | Expand 10 after
861 value, attrs, set_function_name); 867 value, attrs, set_function_name);
862 __ Dispatch(); 868 __ Dispatch();
863 } 869 }
864 870
865 // LdaModuleVariable <cell_index> <depth> 871 // LdaModuleVariable <cell_index> <depth>
866 // 872 //
867 // Load the contents of a module variable into the accumulator. The variable is 873 // Load the contents of a module variable into the accumulator. The variable is
868 // identified by <cell_index>. <depth> is the depth of the current context 874 // identified by <cell_index>. <depth> is the depth of the current context
869 // relative to the module context. 875 // relative to the module context.
870 void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) { 876 void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
871 Node* cell_index = __ BytecodeOperandImm(0); 877 Node* cell_index = __ BytecodeOperandImmIntPtr(0);
872 Node* depth = __ BytecodeOperandUImm(1); 878 Node* depth = __ BytecodeOperandUImm(1);
873 879
874 Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); 880 Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
875 Node* module = 881 Node* module =
876 __ LoadContextElement(module_context, Context::EXTENSION_INDEX); 882 __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
877 883
878 Label if_export(assembler), if_import(assembler), end(assembler); 884 Label if_export(assembler), if_import(assembler), end(assembler);
879 __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, 885 __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
880 &if_import); 886 &if_import);
881 887
882 __ Bind(&if_export); 888 __ Bind(&if_export);
883 { 889 {
884 Node* regular_exports = 890 Node* regular_exports =
885 __ LoadObjectField(module, Module::kRegularExportsOffset); 891 __ LoadObjectField(module, Module::kRegularExportsOffset);
886 // The actual array index is (cell_index - 1). 892 // The actual array index is (cell_index - 1).
887 Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); 893 Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
888 Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); 894 Node* cell = __ LoadFixedArrayElement(regular_exports, export_index, 0,
895 CodeStubAssembler::INTPTR_PARAMETERS);
889 __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); 896 __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
890 __ Goto(&end); 897 __ Goto(&end);
891 } 898 }
892 899
893 __ Bind(&if_import); 900 __ Bind(&if_import);
894 { 901 {
895 Node* regular_imports = 902 Node* regular_imports =
896 __ LoadObjectField(module, Module::kRegularImportsOffset); 903 __ LoadObjectField(module, Module::kRegularImportsOffset);
897 // The actual array index is (-cell_index - 1). 904 // The actual array index is (-cell_index - 1).
898 Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index); 905 Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index);
899 Node* cell = __ LoadFixedArrayElement(regular_imports, import_index); 906 Node* cell = __ LoadFixedArrayElement(regular_imports, import_index, 0,
907 CodeStubAssembler::INTPTR_PARAMETERS);
900 __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset)); 908 __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
901 __ Goto(&end); 909 __ Goto(&end);
902 } 910 }
903 911
904 __ Bind(&end); 912 __ Bind(&end);
905 __ Dispatch(); 913 __ Dispatch();
906 } 914 }
907 915
908 // StaModuleVariable <cell_index> <depth> 916 // StaModuleVariable <cell_index> <depth>
909 // 917 //
910 // Store accumulator to the module variable identified by <cell_index>. 918 // Store accumulator to the module variable identified by <cell_index>.
911 // <depth> is the depth of the current context relative to the module context. 919 // <depth> is the depth of the current context relative to the module context.
912 void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) { 920 void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
913 Node* value = __ GetAccumulator(); 921 Node* value = __ GetAccumulator();
914 Node* cell_index = __ BytecodeOperandImm(0); 922 Node* cell_index = __ BytecodeOperandImmIntPtr(0);
915 Node* depth = __ BytecodeOperandUImm(1); 923 Node* depth = __ BytecodeOperandUImm(1);
916 924
917 Node* module_context = __ GetContextAtDepth(__ GetContext(), depth); 925 Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
918 Node* module = 926 Node* module =
919 __ LoadContextElement(module_context, Context::EXTENSION_INDEX); 927 __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
920 928
921 Label if_export(assembler), if_import(assembler), end(assembler); 929 Label if_export(assembler), if_import(assembler), end(assembler);
922 __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export, 930 __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
923 &if_import); 931 &if_import);
924 932
925 __ Bind(&if_export); 933 __ Bind(&if_export);
926 { 934 {
927 Node* regular_exports = 935 Node* regular_exports =
928 __ LoadObjectField(module, Module::kRegularExportsOffset); 936 __ LoadObjectField(module, Module::kRegularExportsOffset);
929 // The actual array index is (cell_index - 1). 937 // The actual array index is (cell_index - 1).
930 Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1)); 938 Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
931 Node* cell = __ LoadFixedArrayElement(regular_exports, export_index); 939 Node* cell = __ LoadFixedArrayElement(regular_exports, export_index, 0,
940 CodeStubAssembler::INTPTR_PARAMETERS);
932 __ StoreObjectField(cell, Cell::kValueOffset, value); 941 __ StoreObjectField(cell, Cell::kValueOffset, value);
933 __ Goto(&end); 942 __ Goto(&end);
934 } 943 }
935 944
936 __ Bind(&if_import); 945 __ Bind(&if_import);
937 { 946 {
938 // Not supported (probably never). 947 // Not supported (probably never).
939 __ Abort(kUnsupportedModuleOperation); 948 __ Abort(kUnsupportedModuleOperation);
940 __ Goto(&end); 949 __ Goto(&end);
941 } 950 }
(...skipping 378 matching lines...) Expand 10 before | Expand all | Expand 10 after
1320 // 1329 //
1321 // Adds an immediate value <imm> to register <reg>. For this 1330 // Adds an immediate value <imm> to register <reg>. For this
1322 // operation <reg> is the lhs operand and <imm> is the <rhs> operand. 1331 // operation <reg> is the lhs operand and <imm> is the <rhs> operand.
1323 void Interpreter::DoAddSmi(InterpreterAssembler* assembler) { 1332 void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
1324 Variable var_result(assembler, MachineRepresentation::kTagged); 1333 Variable var_result(assembler, MachineRepresentation::kTagged);
1325 Label fastpath(assembler), slowpath(assembler, Label::kDeferred), 1334 Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
1326 end(assembler); 1335 end(assembler);
1327 1336
1328 Node* reg_index = __ BytecodeOperandReg(1); 1337 Node* reg_index = __ BytecodeOperandReg(1);
1329 Node* left = __ LoadRegister(reg_index); 1338 Node* left = __ LoadRegister(reg_index);
1330 Node* raw_int = __ BytecodeOperandImm(0); 1339 Node* right = __ BytecodeOperandImmSmi(0);
1331 Node* right = __ SmiTag(raw_int);
1332 Node* slot_index = __ BytecodeOperandIdx(2); 1340 Node* slot_index = __ BytecodeOperandIdx(2);
1333 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); 1341 Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1334 1342
1335 // {right} is known to be a Smi. 1343 // {right} is known to be a Smi.
1336 // Check if the {left} is a Smi take the fast path. 1344 // Check if the {left} is a Smi take the fast path.
1337 __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); 1345 __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
1338 __ Bind(&fastpath); 1346 __ Bind(&fastpath);
1339 { 1347 {
1340 // Try fast Smi addition first. 1348 // Try fast Smi addition first.
1341 Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left), 1349 Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left),
(...skipping 10 matching lines...) Expand all
1352 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); 1360 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
1353 __ Goto(&end); 1361 __ Goto(&end);
1354 } 1362 }
1355 } 1363 }
1356 __ Bind(&slowpath); 1364 __ Bind(&slowpath);
1357 { 1365 {
1358 Node* context = __ GetContext(); 1366 Node* context = __ GetContext();
1359 AddWithFeedbackStub stub(__ isolate()); 1367 AddWithFeedbackStub stub(__ isolate());
1360 Callable callable = 1368 Callable callable =
1361 Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate())); 1369 Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate()));
1362 Node* args[] = {left, right, slot_index, type_feedback_vector, context}; 1370 Node* args[] = {left, right, __ TruncateWordToWord32(slot_index),
1371 type_feedback_vector, context};
1363 var_result.Bind(__ CallStubN(callable, args, 1)); 1372 var_result.Bind(__ CallStubN(callable, args, 1));
1364 __ Goto(&end); 1373 __ Goto(&end);
1365 } 1374 }
1366 __ Bind(&end); 1375 __ Bind(&end);
1367 { 1376 {
1368 __ SetAccumulator(var_result.value()); 1377 __ SetAccumulator(var_result.value());
1369 __ Dispatch(); 1378 __ Dispatch();
1370 } 1379 }
1371 } 1380 }
1372 1381
1373 // SubSmi <imm> <reg> 1382 // SubSmi <imm> <reg>
1374 // 1383 //
1375 // Subtracts an immediate value <imm> to register <reg>. For this 1384 // Subtracts an immediate value <imm> to register <reg>. For this
1376 // operation <reg> is the lhs operand and <imm> is the rhs operand. 1385 // operation <reg> is the lhs operand and <imm> is the rhs operand.
1377 void Interpreter::DoSubSmi(InterpreterAssembler* assembler) { 1386 void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
1378 Variable var_result(assembler, MachineRepresentation::kTagged); 1387 Variable var_result(assembler, MachineRepresentation::kTagged);
1379 Label fastpath(assembler), slowpath(assembler, Label::kDeferred), 1388 Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
1380 end(assembler); 1389 end(assembler);
1381 1390
1382 Node* reg_index = __ BytecodeOperandReg(1); 1391 Node* reg_index = __ BytecodeOperandReg(1);
1383 Node* left = __ LoadRegister(reg_index); 1392 Node* left = __ LoadRegister(reg_index);
1384 Node* raw_int = __ BytecodeOperandImm(0); 1393 Node* right = __ BytecodeOperandImmSmi(0);
1385 Node* right = __ SmiTag(raw_int);
1386 Node* slot_index = __ BytecodeOperandIdx(2); 1394 Node* slot_index = __ BytecodeOperandIdx(2);
1387 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); 1395 Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1388 1396
1389 // {right} is known to be a Smi. 1397 // {right} is known to be a Smi.
1390 // Check if the {left} is a Smi take the fast path. 1398 // Check if the {left} is a Smi take the fast path.
1391 __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); 1399 __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
1392 __ Bind(&fastpath); 1400 __ Bind(&fastpath);
1393 { 1401 {
1394 // Try fast Smi subtraction first. 1402 // Try fast Smi subtraction first.
1395 Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left), 1403 Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left),
(...skipping 10 matching lines...) Expand all
1406 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); 1414 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
1407 __ Goto(&end); 1415 __ Goto(&end);
1408 } 1416 }
1409 } 1417 }
1410 __ Bind(&slowpath); 1418 __ Bind(&slowpath);
1411 { 1419 {
1412 Node* context = __ GetContext(); 1420 Node* context = __ GetContext();
1413 SubtractWithFeedbackStub stub(__ isolate()); 1421 SubtractWithFeedbackStub stub(__ isolate());
1414 Callable callable = Callable( 1422 Callable callable = Callable(
1415 stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate())); 1423 stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate()));
1416 Node* args[] = {left, right, slot_index, type_feedback_vector, context}; 1424 Node* args[] = {left, right, __ TruncateWordToWord32(slot_index),
1425 type_feedback_vector, context};
1417 var_result.Bind(__ CallStubN(callable, args, 1)); 1426 var_result.Bind(__ CallStubN(callable, args, 1));
1418 __ Goto(&end); 1427 __ Goto(&end);
1419 } 1428 }
1420 __ Bind(&end); 1429 __ Bind(&end);
1421 { 1430 {
1422 __ SetAccumulator(var_result.value()); 1431 __ SetAccumulator(var_result.value());
1423 __ Dispatch(); 1432 __ Dispatch();
1424 } 1433 }
1425 } 1434 }
1426 1435
1427 // BitwiseOr <imm> <reg> 1436 // BitwiseOr <imm> <reg>
1428 // 1437 //
1429 // BitwiseOr <reg> with <imm>. For this operation <reg> is the lhs 1438 // BitwiseOr <reg> with <imm>. For this operation <reg> is the lhs
1430 // operand and <imm> is the rhs operand. 1439 // operand and <imm> is the rhs operand.
1431 void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) { 1440 void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
1432 Node* reg_index = __ BytecodeOperandReg(1); 1441 Node* reg_index = __ BytecodeOperandReg(1);
1433 Node* left = __ LoadRegister(reg_index); 1442 Node* left = __ LoadRegister(reg_index);
1434 Node* raw_int = __ BytecodeOperandImm(0); 1443 Node* right = __ BytecodeOperandImmSmi(0);
1435 Node* right = __ SmiTag(raw_int);
1436 Node* context = __ GetContext(); 1444 Node* context = __ GetContext();
1437 Node* slot_index = __ BytecodeOperandIdx(2); 1445 Node* slot_index = __ BytecodeOperandIdx(2);
1438 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); 1446 Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1439 Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32); 1447 Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
1440 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( 1448 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1441 context, left, &var_lhs_type_feedback); 1449 context, left, &var_lhs_type_feedback);
1442 Node* rhs_value = __ SmiToWord32(right); 1450 Node* rhs_value = __ SmiToWord32(right);
1443 Node* value = __ Word32Or(lhs_value, rhs_value); 1451 Node* value = __ Word32Or(lhs_value, rhs_value);
1444 Node* result = __ ChangeInt32ToTagged(value); 1452 Node* result = __ ChangeInt32ToTagged(value);
1445 Node* result_type = __ SelectInt32Constant( 1453 Node* result_type = __ SelectInt32Constant(
1446 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, 1454 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1447 BinaryOperationFeedback::kNumber); 1455 BinaryOperationFeedback::kNumber);
1448 __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()), 1456 __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
1449 type_feedback_vector, slot_index); 1457 type_feedback_vector, slot_index);
1450 __ SetAccumulator(result); 1458 __ SetAccumulator(result);
1451 __ Dispatch(); 1459 __ Dispatch();
1452 } 1460 }
1453 1461
1454 // BitwiseAnd <imm> <reg> 1462 // BitwiseAnd <imm> <reg>
1455 // 1463 //
1456 // BitwiseAnd <reg> with <imm>. For this operation <reg> is the lhs 1464 // BitwiseAnd <reg> with <imm>. For this operation <reg> is the lhs
1457 // operand and <imm> is the rhs operand. 1465 // operand and <imm> is the rhs operand.
1458 void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) { 1466 void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
1459 Node* reg_index = __ BytecodeOperandReg(1); 1467 Node* reg_index = __ BytecodeOperandReg(1);
1460 Node* left = __ LoadRegister(reg_index); 1468 Node* left = __ LoadRegister(reg_index);
1461 Node* raw_int = __ BytecodeOperandImm(0); 1469 Node* right = __ BytecodeOperandImmSmi(0);
1462 Node* right = __ SmiTag(raw_int);
1463 Node* context = __ GetContext(); 1470 Node* context = __ GetContext();
1464 Node* slot_index = __ BytecodeOperandIdx(2); 1471 Node* slot_index = __ BytecodeOperandIdx(2);
1465 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); 1472 Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1466 Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32); 1473 Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
1467 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( 1474 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1468 context, left, &var_lhs_type_feedback); 1475 context, left, &var_lhs_type_feedback);
1469 Node* rhs_value = __ SmiToWord32(right); 1476 Node* rhs_value = __ SmiToWord32(right);
1470 Node* value = __ Word32And(lhs_value, rhs_value); 1477 Node* value = __ Word32And(lhs_value, rhs_value);
1471 Node* result = __ ChangeInt32ToTagged(value); 1478 Node* result = __ ChangeInt32ToTagged(value);
1472 Node* result_type = __ SelectInt32Constant( 1479 Node* result_type = __ SelectInt32Constant(
1473 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, 1480 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1474 BinaryOperationFeedback::kNumber); 1481 BinaryOperationFeedback::kNumber);
1475 __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()), 1482 __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
1476 type_feedback_vector, slot_index); 1483 type_feedback_vector, slot_index);
1477 __ SetAccumulator(result); 1484 __ SetAccumulator(result);
1478 __ Dispatch(); 1485 __ Dispatch();
1479 } 1486 }
1480 1487
1481 // ShiftLeftSmi <imm> <reg> 1488 // ShiftLeftSmi <imm> <reg>
1482 // 1489 //
1483 // Left shifts register <src> by the count specified in <imm>. 1490 // Left shifts register <src> by the count specified in <imm>.
1484 // Register <src> is converted to an int32 before the operation. The 5 1491 // Register <src> is converted to an int32 before the operation. The 5
1485 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F). 1492 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
1486 void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) { 1493 void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
1487 Node* reg_index = __ BytecodeOperandReg(1); 1494 Node* reg_index = __ BytecodeOperandReg(1);
1488 Node* left = __ LoadRegister(reg_index); 1495 Node* left = __ LoadRegister(reg_index);
1489 Node* raw_int = __ BytecodeOperandImm(0); 1496 Node* right = __ BytecodeOperandImmSmi(0);
1490 Node* right = __ SmiTag(raw_int);
1491 Node* context = __ GetContext(); 1497 Node* context = __ GetContext();
1492 Node* slot_index = __ BytecodeOperandIdx(2); 1498 Node* slot_index = __ BytecodeOperandIdx(2);
1493 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); 1499 Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1494 Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32); 1500 Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
1495 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( 1501 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1496 context, left, &var_lhs_type_feedback); 1502 context, left, &var_lhs_type_feedback);
1497 Node* rhs_value = __ SmiToWord32(right); 1503 Node* rhs_value = __ SmiToWord32(right);
1498 Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); 1504 Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
1499 Node* value = __ Word32Shl(lhs_value, shift_count); 1505 Node* value = __ Word32Shl(lhs_value, shift_count);
1500 Node* result = __ ChangeInt32ToTagged(value); 1506 Node* result = __ ChangeInt32ToTagged(value);
1501 Node* result_type = __ SelectInt32Constant( 1507 Node* result_type = __ SelectInt32Constant(
1502 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall, 1508 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1503 BinaryOperationFeedback::kNumber); 1509 BinaryOperationFeedback::kNumber);
1504 __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()), 1510 __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
1505 type_feedback_vector, slot_index); 1511 type_feedback_vector, slot_index);
1506 __ SetAccumulator(result); 1512 __ SetAccumulator(result);
1507 __ Dispatch(); 1513 __ Dispatch();
1508 } 1514 }
1509 1515
1510 // ShiftRightSmi <imm> <reg> 1516 // ShiftRightSmi <imm> <reg>
1511 // 1517 //
1512 // Right shifts register <src> by the count specified in <imm>. 1518 // Right shifts register <src> by the count specified in <imm>.
1513 // Register <src> is converted to an int32 before the operation. The 5 1519 // Register <src> is converted to an int32 before the operation. The 5
1514 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F). 1520 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
1515 void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) { 1521 void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
1516 Node* reg_index = __ BytecodeOperandReg(1); 1522 Node* reg_index = __ BytecodeOperandReg(1);
1517 Node* left = __ LoadRegister(reg_index); 1523 Node* left = __ LoadRegister(reg_index);
1518 Node* raw_int = __ BytecodeOperandImm(0); 1524 Node* right = __ BytecodeOperandImmSmi(0);
1519 Node* right = __ SmiTag(raw_int);
1520 Node* context = __ GetContext(); 1525 Node* context = __ GetContext();
1521 Node* slot_index = __ BytecodeOperandIdx(2); 1526 Node* slot_index = __ BytecodeOperandIdx(2);
1522 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); 1527 Node* type_feedback_vector = __ LoadTypeFeedbackVector();
1523 Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32); 1528 Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
1524 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback( 1529 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1525 context, left, &var_lhs_type_feedback); 1530 context, left, &var_lhs_type_feedback);
1526 Node* rhs_value = __ SmiToWord32(right); 1531 Node* rhs_value = __ SmiToWord32(right);
1527 Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f)); 1532 Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
1528 Node* value = __ Word32Sar(lhs_value, shift_count); 1533 Node* value = __ Word32Sar(lhs_value, shift_count);
1529 Node* result = __ ChangeInt32ToTagged(value); 1534 Node* result = __ ChangeInt32ToTagged(value);
(...skipping 437 matching lines...) Expand 10 before | Expand all | Expand 10 after
1967 } 1972 }
1968 1973
1969 __ Bind(&end); 1974 __ Bind(&end);
1970 __ Dispatch(); 1975 __ Dispatch();
1971 } 1976 }
1972 1977
1973 // Jump <imm> 1978 // Jump <imm>
1974 // 1979 //
1975 // Jump by number of bytes represented by the immediate operand |imm|. 1980 // Jump by number of bytes represented by the immediate operand |imm|.
1976 void Interpreter::DoJump(InterpreterAssembler* assembler) { 1981 void Interpreter::DoJump(InterpreterAssembler* assembler) {
1977 Node* relative_jump = __ BytecodeOperandImm(0); 1982 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
1978 __ Jump(relative_jump); 1983 __ Jump(relative_jump);
1979 } 1984 }
1980 1985
1981 // JumpConstant <idx> 1986 // JumpConstant <idx>
1982 // 1987 //
1983 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool. 1988 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
1984 void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) { 1989 void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
1985 Node* index = __ BytecodeOperandIdx(0); 1990 Node* index = __ BytecodeOperandIdx(0);
1986 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); 1991 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
1987 __ Jump(relative_jump); 1992 __ Jump(relative_jump);
1988 } 1993 }
1989 1994
1990 // JumpIfTrue <imm> 1995 // JumpIfTrue <imm>
1991 // 1996 //
1992 // Jump by number of bytes represented by an immediate operand if the 1997 // Jump by number of bytes represented by an immediate operand if the
1993 // accumulator contains true. 1998 // accumulator contains true.
1994 void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) { 1999 void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
1995 Node* accumulator = __ GetAccumulator(); 2000 Node* accumulator = __ GetAccumulator();
1996 Node* relative_jump = __ BytecodeOperandImm(0); 2001 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
1997 Node* true_value = __ BooleanConstant(true); 2002 Node* true_value = __ BooleanConstant(true);
1998 __ JumpIfWordEqual(accumulator, true_value, relative_jump); 2003 __ JumpIfWordEqual(accumulator, true_value, relative_jump);
1999 } 2004 }
2000 2005
2001 // JumpIfTrueConstant <idx> 2006 // JumpIfTrueConstant <idx>
2002 // 2007 //
2003 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool 2008 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2004 // if the accumulator contains true. 2009 // if the accumulator contains true.
2005 void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) { 2010 void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
2006 Node* accumulator = __ GetAccumulator(); 2011 Node* accumulator = __ GetAccumulator();
2007 Node* index = __ BytecodeOperandIdx(0); 2012 Node* index = __ BytecodeOperandIdx(0);
2008 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); 2013 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2009 Node* true_value = __ BooleanConstant(true); 2014 Node* true_value = __ BooleanConstant(true);
2010 __ JumpIfWordEqual(accumulator, true_value, relative_jump); 2015 __ JumpIfWordEqual(accumulator, true_value, relative_jump);
2011 } 2016 }
2012 2017
2013 // JumpIfFalse <imm> 2018 // JumpIfFalse <imm>
2014 // 2019 //
2015 // Jump by number of bytes represented by an immediate operand if the 2020 // Jump by number of bytes represented by an immediate operand if the
2016 // accumulator contains false. 2021 // accumulator contains false.
2017 void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) { 2022 void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
2018 Node* accumulator = __ GetAccumulator(); 2023 Node* accumulator = __ GetAccumulator();
2019 Node* relative_jump = __ BytecodeOperandImm(0); 2024 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
2020 Node* false_value = __ BooleanConstant(false); 2025 Node* false_value = __ BooleanConstant(false);
2021 __ JumpIfWordEqual(accumulator, false_value, relative_jump); 2026 __ JumpIfWordEqual(accumulator, false_value, relative_jump);
2022 } 2027 }
2023 2028
2024 // JumpIfFalseConstant <idx> 2029 // JumpIfFalseConstant <idx>
2025 // 2030 //
2026 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool 2031 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2027 // if the accumulator contains false. 2032 // if the accumulator contains false.
2028 void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) { 2033 void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
2029 Node* accumulator = __ GetAccumulator(); 2034 Node* accumulator = __ GetAccumulator();
2030 Node* index = __ BytecodeOperandIdx(0); 2035 Node* index = __ BytecodeOperandIdx(0);
2031 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); 2036 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2032 Node* false_value = __ BooleanConstant(false); 2037 Node* false_value = __ BooleanConstant(false);
2033 __ JumpIfWordEqual(accumulator, false_value, relative_jump); 2038 __ JumpIfWordEqual(accumulator, false_value, relative_jump);
2034 } 2039 }
2035 2040
2036 // JumpIfToBooleanTrue <imm> 2041 // JumpIfToBooleanTrue <imm>
2037 // 2042 //
2038 // Jump by number of bytes represented by an immediate operand if the object 2043 // Jump by number of bytes represented by an immediate operand if the object
2039 // referenced by the accumulator is true when the object is cast to boolean. 2044 // referenced by the accumulator is true when the object is cast to boolean.
2040 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) { 2045 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
2041 Node* value = __ GetAccumulator(); 2046 Node* value = __ GetAccumulator();
2042 Node* relative_jump = __ BytecodeOperandImm(0); 2047 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
2043 Label if_true(assembler), if_false(assembler); 2048 Label if_true(assembler), if_false(assembler);
2044 __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); 2049 __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2045 __ Bind(&if_true); 2050 __ Bind(&if_true);
2046 __ Jump(relative_jump); 2051 __ Jump(relative_jump);
2047 __ Bind(&if_false); 2052 __ Bind(&if_false);
2048 __ Dispatch(); 2053 __ Dispatch();
2049 } 2054 }
2050 2055
2051 // JumpIfToBooleanTrueConstant <idx> 2056 // JumpIfToBooleanTrueConstant <idx>
2052 // 2057 //
(...skipping 12 matching lines...) Expand all
2065 __ Bind(&if_false); 2070 __ Bind(&if_false);
2066 __ Dispatch(); 2071 __ Dispatch();
2067 } 2072 }
2068 2073
2069 // JumpIfToBooleanFalse <imm> 2074 // JumpIfToBooleanFalse <imm>
2070 // 2075 //
2071 // Jump by number of bytes represented by an immediate operand if the object 2076 // Jump by number of bytes represented by an immediate operand if the object
2072 // referenced by the accumulator is false when the object is cast to boolean. 2077 // referenced by the accumulator is false when the object is cast to boolean.
2073 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) { 2078 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
2074 Node* value = __ GetAccumulator(); 2079 Node* value = __ GetAccumulator();
2075 Node* relative_jump = __ BytecodeOperandImm(0); 2080 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
2076 Label if_true(assembler), if_false(assembler); 2081 Label if_true(assembler), if_false(assembler);
2077 __ BranchIfToBooleanIsTrue(value, &if_true, &if_false); 2082 __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2078 __ Bind(&if_true); 2083 __ Bind(&if_true);
2079 __ Dispatch(); 2084 __ Dispatch();
2080 __ Bind(&if_false); 2085 __ Bind(&if_false);
2081 __ Jump(relative_jump); 2086 __ Jump(relative_jump);
2082 } 2087 }
2083 2088
2084 // JumpIfToBooleanFalseConstant <idx> 2089 // JumpIfToBooleanFalseConstant <idx>
2085 // 2090 //
(...skipping 13 matching lines...) Expand all
2099 __ Jump(relative_jump); 2104 __ Jump(relative_jump);
2100 } 2105 }
2101 2106
2102 // JumpIfNull <imm> 2107 // JumpIfNull <imm>
2103 // 2108 //
2104 // Jump by number of bytes represented by an immediate operand if the object 2109 // Jump by number of bytes represented by an immediate operand if the object
2105 // referenced by the accumulator is the null constant. 2110 // referenced by the accumulator is the null constant.
2106 void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) { 2111 void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
2107 Node* accumulator = __ GetAccumulator(); 2112 Node* accumulator = __ GetAccumulator();
2108 Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); 2113 Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
2109 Node* relative_jump = __ BytecodeOperandImm(0); 2114 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
2110 __ JumpIfWordEqual(accumulator, null_value, relative_jump); 2115 __ JumpIfWordEqual(accumulator, null_value, relative_jump);
2111 } 2116 }
2112 2117
2113 // JumpIfNullConstant <idx> 2118 // JumpIfNullConstant <idx>
2114 // 2119 //
2115 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool 2120 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2116 // if the object referenced by the accumulator is the null constant. 2121 // if the object referenced by the accumulator is the null constant.
2117 void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) { 2122 void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
2118 Node* accumulator = __ GetAccumulator(); 2123 Node* accumulator = __ GetAccumulator();
2119 Node* null_value = __ HeapConstant(isolate_->factory()->null_value()); 2124 Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
2120 Node* index = __ BytecodeOperandIdx(0); 2125 Node* index = __ BytecodeOperandIdx(0);
2121 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); 2126 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2122 __ JumpIfWordEqual(accumulator, null_value, relative_jump); 2127 __ JumpIfWordEqual(accumulator, null_value, relative_jump);
2123 } 2128 }
2124 2129
2125 // JumpIfUndefined <imm> 2130 // JumpIfUndefined <imm>
2126 // 2131 //
2127 // Jump by number of bytes represented by an immediate operand if the object 2132 // Jump by number of bytes represented by an immediate operand if the object
2128 // referenced by the accumulator is the undefined constant. 2133 // referenced by the accumulator is the undefined constant.
2129 void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) { 2134 void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
2130 Node* accumulator = __ GetAccumulator(); 2135 Node* accumulator = __ GetAccumulator();
2131 Node* undefined_value = 2136 Node* undefined_value =
2132 __ HeapConstant(isolate_->factory()->undefined_value()); 2137 __ HeapConstant(isolate_->factory()->undefined_value());
2133 Node* relative_jump = __ BytecodeOperandImm(0); 2138 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
2134 __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); 2139 __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
2135 } 2140 }
2136 2141
2137 // JumpIfUndefinedConstant <idx> 2142 // JumpIfUndefinedConstant <idx>
2138 // 2143 //
2139 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool 2144 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2140 // if the object referenced by the accumulator is the undefined constant. 2145 // if the object referenced by the accumulator is the undefined constant.
2141 void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) { 2146 void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
2142 Node* accumulator = __ GetAccumulator(); 2147 Node* accumulator = __ GetAccumulator();
2143 Node* undefined_value = 2148 Node* undefined_value =
2144 __ HeapConstant(isolate_->factory()->undefined_value()); 2149 __ HeapConstant(isolate_->factory()->undefined_value());
2145 Node* index = __ BytecodeOperandIdx(0); 2150 Node* index = __ BytecodeOperandIdx(0);
2146 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); 2151 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2147 __ JumpIfWordEqual(accumulator, undefined_value, relative_jump); 2152 __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
2148 } 2153 }
2149 2154
2150 // JumpIfJSReceiver <imm> 2155 // JumpIfJSReceiver <imm>
2151 // 2156 //
2152 // Jump by number of bytes represented by an immediate operand if the object 2157 // Jump by number of bytes represented by an immediate operand if the object
2153 // referenced by the accumulator is a JSReceiver. 2158 // referenced by the accumulator is a JSReceiver.
2154 void Interpreter::DoJumpIfJSReceiver(InterpreterAssembler* assembler) { 2159 void Interpreter::DoJumpIfJSReceiver(InterpreterAssembler* assembler) {
2155 Node* accumulator = __ GetAccumulator(); 2160 Node* accumulator = __ GetAccumulator();
2156 Node* relative_jump = __ BytecodeOperandImm(0); 2161 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
2157 2162
2158 Label if_object(assembler), if_notobject(assembler, Label::kDeferred), 2163 Label if_object(assembler), if_notobject(assembler, Label::kDeferred),
2159 if_notsmi(assembler); 2164 if_notsmi(assembler);
2160 __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi); 2165 __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
2161 2166
2162 __ Bind(&if_notsmi); 2167 __ Bind(&if_notsmi);
2163 __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject); 2168 __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
2164 __ Bind(&if_object); 2169 __ Bind(&if_object);
2165 __ Jump(relative_jump); 2170 __ Jump(relative_jump);
2166 2171
(...skipping 23 matching lines...) Expand all
2190 __ Dispatch(); 2195 __ Dispatch();
2191 } 2196 }
2192 2197
2193 // JumpIfNotHole <imm> 2198 // JumpIfNotHole <imm>
2194 // 2199 //
2195 // Jump by number of bytes represented by an immediate operand if the object 2200 // Jump by number of bytes represented by an immediate operand if the object
2196 // referenced by the accumulator is the hole. 2201 // referenced by the accumulator is the hole.
2197 void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) { 2202 void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
2198 Node* accumulator = __ GetAccumulator(); 2203 Node* accumulator = __ GetAccumulator();
2199 Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); 2204 Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
2200 Node* relative_jump = __ BytecodeOperandImm(0); 2205 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
2201 __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); 2206 __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
2202 } 2207 }
2203 2208
2204 // JumpIfNotHoleConstant <idx> 2209 // JumpIfNotHoleConstant <idx>
2205 // 2210 //
2206 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool 2211 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2207 // if the object referenced by the accumulator is the hole constant. 2212 // if the object referenced by the accumulator is the hole constant.
2208 void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) { 2213 void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
2209 Node* accumulator = __ GetAccumulator(); 2214 Node* accumulator = __ GetAccumulator();
2210 Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value()); 2215 Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
2211 Node* index = __ BytecodeOperandIdx(0); 2216 Node* index = __ BytecodeOperandIdx(0);
2212 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index); 2217 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2213 __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump); 2218 __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
2214 } 2219 }
2215 2220
2216 // JumpLoop <imm> <loop_depth> 2221 // JumpLoop <imm> <loop_depth>
2217 // 2222 //
2218 // Jump by number of bytes represented by the immediate operand |imm|. Also 2223 // Jump by number of bytes represented by the immediate operand |imm|. Also
2219 // performs a loop nesting check and potentially triggers OSR in case the 2224 // performs a loop nesting check and potentially triggers OSR in case the
2220 // current OSR level matches (or exceeds) the specified |loop_depth|. 2225 // current OSR level matches (or exceeds) the specified |loop_depth|.
2221 void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) { 2226 void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
2222 Node* relative_jump = __ BytecodeOperandImm(0); 2227 Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
2223 Node* loop_depth = __ BytecodeOperandImm(1); 2228 Node* loop_depth = __ BytecodeOperandImm(1);
2224 Node* osr_level = __ LoadOSRNestingLevel(); 2229 Node* osr_level = __ LoadOSRNestingLevel();
2225 2230
2226 // Check if OSR points at the given {loop_depth} are armed by comparing it to 2231 // Check if OSR points at the given {loop_depth} are armed by comparing it to
2227 // the current {osr_level} loaded from the header of the BytecodeArray. 2232 // the current {osr_level} loaded from the header of the BytecodeArray.
2228 Label ok(assembler), osr_armed(assembler, Label::kDeferred); 2233 Label ok(assembler), osr_armed(assembler, Label::kDeferred);
2229 Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level); 2234 Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
2230 __ Branch(condition, &ok, &osr_armed); 2235 __ Branch(condition, &ok, &osr_armed);
2231 2236
2232 __ Bind(&ok); 2237 __ Bind(&ok);
2233 __ Jump(relative_jump); 2238 __ Jump(relative_jump);
2234 2239
2235 __ Bind(&osr_armed); 2240 __ Bind(&osr_armed);
2236 { 2241 {
2237 Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_); 2242 Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
2238 Node* target = __ HeapConstant(callable.code()); 2243 Node* target = __ HeapConstant(callable.code());
2239 Node* context = __ GetContext(); 2244 Node* context = __ GetContext();
2240 __ CallStub(callable.descriptor(), target, context); 2245 __ CallStub(callable.descriptor(), target, context);
2241 __ Jump(relative_jump); 2246 __ Jump(relative_jump);
2242 } 2247 }
2243 } 2248 }
2244 2249
2245 // CreateRegExpLiteral <pattern_idx> <literal_idx> <flags> 2250 // CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
2246 // 2251 //
2247 // Creates a regular expression literal for literal index <literal_idx> with 2252 // Creates a regular expression literal for literal index <literal_idx> with
2248 // <flags> and the pattern in <pattern_idx>. 2253 // <flags> and the pattern in <pattern_idx>.
2249 void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) { 2254 void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
2250 Node* index = __ BytecodeOperandIdx(0); 2255 Node* index = __ BytecodeOperandIdx(0);
2251 Node* pattern = __ LoadConstantPoolEntry(index); 2256 Node* pattern = __ LoadConstantPoolEntry(index);
2252 Node* literal_index_raw = __ BytecodeOperandIdx(1); 2257 Node* literal_index = __ BytecodeOperandIdxSmi(1);
2253 Node* literal_index = __ SmiTag(literal_index_raw); 2258 Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
2254 Node* flags_raw = __ BytecodeOperandFlag(2);
2255 Node* flags = __ SmiTag(flags_raw);
2256 Node* closure = __ LoadRegister(Register::function_closure()); 2259 Node* closure = __ LoadRegister(Register::function_closure());
2257 Node* context = __ GetContext(); 2260 Node* context = __ GetContext();
2258 Node* result = FastCloneRegExpStub::Generate( 2261 Node* result = FastCloneRegExpStub::Generate(
2259 assembler, closure, literal_index, pattern, flags, context); 2262 assembler, closure, literal_index, pattern, flags, context);
2260 __ SetAccumulator(result); 2263 __ SetAccumulator(result);
2261 __ Dispatch(); 2264 __ Dispatch();
2262 } 2265 }
2263 2266
2264 // CreateArrayLiteral <element_idx> <literal_idx> <flags> 2267 // CreateArrayLiteral <element_idx> <literal_idx> <flags>
2265 // 2268 //
2266 // Creates an array literal for literal index <literal_idx> with 2269 // Creates an array literal for literal index <literal_idx> with
2267 // CreateArrayLiteral flags <flags> and constant elements in <element_idx>. 2270 // CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
2268 void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) { 2271 void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
2269 Node* literal_index_raw = __ BytecodeOperandIdx(1); 2272 Node* literal_index = __ BytecodeOperandIdxSmi(1);
2270 Node* literal_index = __ SmiTag(literal_index_raw);
2271 Node* closure = __ LoadRegister(Register::function_closure()); 2273 Node* closure = __ LoadRegister(Register::function_closure());
2272 Node* context = __ GetContext(); 2274 Node* context = __ GetContext();
2273 Node* bytecode_flags = __ BytecodeOperandFlag(2); 2275 Node* bytecode_flags = __ BytecodeOperandFlag(2);
2274 2276
2275 Label fast_shallow_clone(assembler), 2277 Label fast_shallow_clone(assembler),
2276 call_runtime(assembler, Label::kDeferred); 2278 call_runtime(assembler, Label::kDeferred);
2277 Node* use_fast_shallow_clone = __ Word32And( 2279 __ Branch(__ IsSetWord32<CreateArrayLiteralFlags::FastShallowCloneBit>(
2278 bytecode_flags, 2280 bytecode_flags),
2279 __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask)); 2281 &fast_shallow_clone, &call_runtime);
2280 __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
2281 2282
2282 __ Bind(&fast_shallow_clone); 2283 __ Bind(&fast_shallow_clone);
2283 { 2284 {
2284 DCHECK(FLAG_allocation_site_pretenuring); 2285 DCHECK(FLAG_allocation_site_pretenuring);
2285 Node* result = FastCloneShallowArrayStub::Generate( 2286 Node* result = FastCloneShallowArrayStub::Generate(
2286 assembler, closure, literal_index, context, &call_runtime, 2287 assembler, closure, literal_index, context, &call_runtime,
2287 TRACK_ALLOCATION_SITE); 2288 TRACK_ALLOCATION_SITE);
2288 __ SetAccumulator(result); 2289 __ SetAccumulator(result);
2289 __ Dispatch(); 2290 __ Dispatch();
2290 } 2291 }
2291 2292
2292 __ Bind(&call_runtime); 2293 __ Bind(&call_runtime);
2293 { 2294 {
2294 STATIC_ASSERT(CreateArrayLiteralFlags::FlagsBits::kShift == 0); 2295 Node* flags_raw =
2295 Node* flags_raw = __ Word32And( 2296 __ DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
2296 bytecode_flags, 2297 bytecode_flags);
2297 __ Int32Constant(CreateArrayLiteralFlags::FlagsBits::kMask));
2298 Node* flags = __ SmiTag(flags_raw); 2298 Node* flags = __ SmiTag(flags_raw);
2299 Node* index = __ BytecodeOperandIdx(0); 2299 Node* index = __ BytecodeOperandIdx(0);
2300 Node* constant_elements = __ LoadConstantPoolEntry(index); 2300 Node* constant_elements = __ LoadConstantPoolEntry(index);
2301 Node* result = 2301 Node* result =
2302 __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure, 2302 __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
2303 literal_index, constant_elements, flags); 2303 literal_index, constant_elements, flags);
2304 __ SetAccumulator(result); 2304 __ SetAccumulator(result);
2305 __ Dispatch(); 2305 __ Dispatch();
2306 } 2306 }
2307 } 2307 }
2308 2308
2309 // CreateObjectLiteral <element_idx> <literal_idx> <flags> 2309 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
2310 // 2310 //
2311 // Creates an object literal for literal index <literal_idx> with 2311 // Creates an object literal for literal index <literal_idx> with
2312 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>. 2312 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
2313 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { 2313 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
2314 Node* literal_index_raw = __ BytecodeOperandIdx(1); 2314 Node* literal_index = __ BytecodeOperandIdxSmi(1);
2315 Node* literal_index = __ SmiTag(literal_index_raw);
2316 Node* bytecode_flags = __ BytecodeOperandFlag(2); 2315 Node* bytecode_flags = __ BytecodeOperandFlag(2);
2317 Node* closure = __ LoadRegister(Register::function_closure()); 2316 Node* closure = __ LoadRegister(Register::function_closure());
2318 2317
2319 // Check if we can do a fast clone or have to call the runtime. 2318 // Check if we can do a fast clone or have to call the runtime.
2320 Label if_fast_clone(assembler), 2319 Label if_fast_clone(assembler),
2321 if_not_fast_clone(assembler, Label::kDeferred); 2320 if_not_fast_clone(assembler, Label::kDeferred);
2322 Node* fast_clone_properties_count = 2321 Node* fast_clone_properties_count = __ DecodeWordFromWord32<
2323 __ DecodeWord32<CreateObjectLiteralFlags::FastClonePropertiesCountBits>( 2322 CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags);
2324 bytecode_flags); 2323 __ Branch(__ WordNotEqual(fast_clone_properties_count, __ IntPtrConstant(0)),
2325 __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone); 2324 &if_fast_clone, &if_not_fast_clone);
2326 2325
2327 __ Bind(&if_fast_clone); 2326 __ Bind(&if_fast_clone);
2328 { 2327 {
2329 // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. 2328 // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
2330 Node* result = FastCloneShallowObjectStub::GenerateFastPath( 2329 Node* result = FastCloneShallowObjectStub::GenerateFastPath(
2331 assembler, &if_not_fast_clone, closure, literal_index, 2330 assembler, &if_not_fast_clone, closure, literal_index,
2332 fast_clone_properties_count); 2331 fast_clone_properties_count);
2333 __ StoreRegister(result, __ BytecodeOperandReg(3)); 2332 __ StoreRegister(result, __ BytecodeOperandReg(3));
2334 __ Dispatch(); 2333 __ Dispatch();
2335 } 2334 }
2336 2335
2337 __ Bind(&if_not_fast_clone); 2336 __ Bind(&if_not_fast_clone);
2338 { 2337 {
2339 // If we can't do a fast clone, call into the runtime. 2338 // If we can't do a fast clone, call into the runtime.
2340 Node* index = __ BytecodeOperandIdx(0); 2339 Node* index = __ BytecodeOperandIdx(0);
2341 Node* constant_elements = __ LoadConstantPoolEntry(index); 2340 Node* constant_elements = __ LoadConstantPoolEntry(index);
2342 Node* context = __ GetContext(); 2341 Node* context = __ GetContext();
2343 2342
2344 STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0); 2343 Node* flags_raw =
2345 Node* flags_raw = __ Word32And( 2344 __ DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
2346 bytecode_flags, 2345 bytecode_flags);
2347 __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
2348 Node* flags = __ SmiTag(flags_raw); 2346 Node* flags = __ SmiTag(flags_raw);
2349 2347
2350 Node* result = 2348 Node* result =
2351 __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure, 2349 __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
2352 literal_index, constant_elements, flags); 2350 literal_index, constant_elements, flags);
2353 __ StoreRegister(result, __ BytecodeOperandReg(3)); 2351 __ StoreRegister(result, __ BytecodeOperandReg(3));
2354 // TODO(klaasb) build a single dispatch once the call is inlined 2352 // TODO(klaasb) build a single dispatch once the call is inlined
2355 __ Dispatch(); 2353 __ Dispatch();
2356 } 2354 }
2357 } 2355 }
2358 2356
2359 // CreateClosure <index> <tenured> 2357 // CreateClosure <index> <tenured>
2360 // 2358 //
2361 // Creates a new closure for SharedFunctionInfo at position |index| in the 2359 // Creates a new closure for SharedFunctionInfo at position |index| in the
2362 // constant pool and with the PretenureFlag <tenured>. 2360 // constant pool and with the PretenureFlag <tenured>.
2363 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) { 2361 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
2364 Node* index = __ BytecodeOperandIdx(0); 2362 Node* index = __ BytecodeOperandIdx(0);
2365 Node* shared = __ LoadConstantPoolEntry(index); 2363 Node* shared = __ LoadConstantPoolEntry(index);
2366 Node* flags = __ BytecodeOperandFlag(1); 2364 Node* flags = __ BytecodeOperandFlag(1);
2367 Node* context = __ GetContext(); 2365 Node* context = __ GetContext();
2368 2366
2369 Label call_runtime(assembler, Label::kDeferred); 2367 Label call_runtime(assembler, Label::kDeferred);
2370 Node* fast_new_closure = __ Word32And( 2368 __ GotoUnless(__ IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
2371 flags, __ Int32Constant(CreateClosureFlags::FastNewClosureBit::kMask)); 2369 &call_runtime);
2372 __ GotoUnless(fast_new_closure, &call_runtime);
2373 __ SetAccumulator(FastNewClosureStub::Generate(assembler, shared, context)); 2370 __ SetAccumulator(FastNewClosureStub::Generate(assembler, shared, context));
2374 __ Dispatch(); 2371 __ Dispatch();
2375 2372
2376 __ Bind(&call_runtime); 2373 __ Bind(&call_runtime);
2377 { 2374 {
2378 STATIC_ASSERT(CreateClosureFlags::PretenuredBit::kShift == 0); 2375 Node* tenured_raw =
2379 Node* tenured_raw = __ Word32And( 2376 __ DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
2380 flags, __ Int32Constant(CreateClosureFlags::PretenuredBit::kMask));
2381 Node* tenured = __ SmiTag(tenured_raw); 2377 Node* tenured = __ SmiTag(tenured_raw);
2382 Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context, 2378 Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context,
2383 shared, tenured); 2379 shared, tenured);
2384 __ SetAccumulator(result); 2380 __ SetAccumulator(result);
2385 __ Dispatch(); 2381 __ Dispatch();
2386 } 2382 }
2387 } 2383 }
2388 2384
2389 // CreateBlockContext <index> 2385 // CreateBlockContext <index>
2390 // 2386 //
(...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after
2722 __ Dispatch(); 2718 __ Dispatch();
2723 } 2719 }
2724 __ Bind(&if_slow); 2720 __ Bind(&if_slow);
2725 { 2721 {
2726 // Record the fact that we hit the for-in slow path. 2722 // Record the fact that we hit the for-in slow path.
2727 Node* vector_index = __ BytecodeOperandIdx(3); 2723 Node* vector_index = __ BytecodeOperandIdx(3);
2728 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); 2724 Node* type_feedback_vector = __ LoadTypeFeedbackVector();
2729 Node* megamorphic_sentinel = 2725 Node* megamorphic_sentinel =
2730 __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_)); 2726 __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
2731 __ StoreFixedArrayElement(type_feedback_vector, vector_index, 2727 __ StoreFixedArrayElement(type_feedback_vector, vector_index,
2732 megamorphic_sentinel, SKIP_WRITE_BARRIER); 2728 megamorphic_sentinel, SKIP_WRITE_BARRIER, 0,
2729 CodeStubAssembler::INTPTR_PARAMETERS);
2733 2730
2734 // Need to filter the {key} for the {receiver}. 2731 // Need to filter the {key} for the {receiver}.
2735 Node* context = __ GetContext(); 2732 Node* context = __ GetContext();
2736 Callable callable = CodeFactory::ForInFilter(assembler->isolate()); 2733 Callable callable = CodeFactory::ForInFilter(assembler->isolate());
2737 Node* result = __ CallStub(callable, context, key, receiver); 2734 Node* result = __ CallStub(callable, context, key, receiver);
2738 __ SetAccumulator(result); 2735 __ SetAccumulator(result);
2739 __ Dispatch(); 2736 __ Dispatch();
2740 } 2737 }
2741 } 2738 }
2742 2739
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
2866 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, 2863 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
2867 __ SmiTag(new_state)); 2864 __ SmiTag(new_state));
2868 __ SetAccumulator(old_state); 2865 __ SetAccumulator(old_state);
2869 2866
2870 __ Dispatch(); 2867 __ Dispatch();
2871 } 2868 }
2872 2869
2873 } // namespace interpreter 2870 } // namespace interpreter
2874 } // namespace internal 2871 } // namespace internal
2875 } // namespace v8 2872 } // namespace v8
OLDNEW
« no previous file with comments | « src/interface-descriptors.h ('k') | src/interpreter/interpreter-assembler.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698