Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(136)

Side by Side Diff: src/interpreter/interpreter.cc

Issue 1973873004: [Interpreter] Make Fast-paths of StackCheck, Jump Return, ForInNext not build a frame. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/compiler/code-assembler.h ('k') | src/interpreter/interpreter-assembler.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/interpreter/interpreter.h" 5 #include "src/interpreter/interpreter.h"
6 6
7 #include <fstream> 7 #include <fstream>
8 8
9 #include "src/ast/prettyprinter.h" 9 #include "src/ast/prettyprinter.h"
10 #include "src/code-factory.h" 10 #include "src/code-factory.h"
(...skipping 844 matching lines...) Expand 10 before | Expand all | Expand 10 after
855 // 855 //
856 // Perform logical-not on the accumulator, first casting the 856 // Perform logical-not on the accumulator, first casting the
857 // accumulator to a boolean value if required. 857 // accumulator to a boolean value if required.
858 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { 858 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
859 Callable callable = CodeFactory::ToBoolean(isolate_); 859 Callable callable = CodeFactory::ToBoolean(isolate_);
860 Node* target = __ HeapConstant(callable.code()); 860 Node* target = __ HeapConstant(callable.code());
861 Node* accumulator = __ GetAccumulator(); 861 Node* accumulator = __ GetAccumulator();
862 Node* context = __ GetContext(); 862 Node* context = __ GetContext();
863 Node* to_boolean_value = 863 Node* to_boolean_value =
864 __ CallStub(callable.descriptor(), target, context, accumulator); 864 __ CallStub(callable.descriptor(), target, context, accumulator);
865 InterpreterAssembler::Label if_true(assembler), if_false(assembler); 865 Label if_true(assembler), if_false(assembler), end(assembler);
866 Node* true_value = __ BooleanConstant(true); 866 Node* true_value = __ BooleanConstant(true);
867 Node* false_value = __ BooleanConstant(false); 867 Node* false_value = __ BooleanConstant(false);
868 Node* condition = __ WordEqual(to_boolean_value, true_value); 868 __ BranchIfWordEqual(to_boolean_value, true_value, &if_true, &if_false);
869 __ Branch(condition, &if_true, &if_false);
870 __ Bind(&if_true); 869 __ Bind(&if_true);
871 { 870 {
872 __ SetAccumulator(false_value); 871 __ SetAccumulator(false_value);
873 __ Dispatch(); 872 __ Goto(&end);
874 } 873 }
875 __ Bind(&if_false); 874 __ Bind(&if_false);
876 { 875 {
877 __ SetAccumulator(true_value); 876 __ SetAccumulator(true_value);
878 __ Dispatch(); 877 __ Goto(&end);
879 } 878 }
879 __ Bind(&end);
880 __ Dispatch();
880 } 881 }
881 882
882 // TypeOf 883 // TypeOf
883 // 884 //
884 // Load the accumulator with the string representating type of the 885 // Load the accumulator with the string representating type of the
885 // object in the accumulator. 886 // object in the accumulator.
886 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) { 887 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
887 Callable callable = CodeFactory::Typeof(isolate_); 888 Callable callable = CodeFactory::Typeof(isolate_);
888 Node* target = __ HeapConstant(callable.code()); 889 Node* target = __ HeapConstant(callable.code());
889 Node* accumulator = __ GetAccumulator(); 890 Node* accumulator = __ GetAccumulator();
(...skipping 556 matching lines...) Expand 10 before | Expand all | Expand 10 after
1446 // CreateObjectLiteral <element_idx> <literal_idx> <flags> 1447 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
1447 // 1448 //
1448 // Creates an object literal for literal index <literal_idx> with 1449 // Creates an object literal for literal index <literal_idx> with
1449 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>. 1450 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
1450 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { 1451 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
1451 Node* literal_index_raw = __ BytecodeOperandIdx(1); 1452 Node* literal_index_raw = __ BytecodeOperandIdx(1);
1452 Node* literal_index = __ SmiTag(literal_index_raw); 1453 Node* literal_index = __ SmiTag(literal_index_raw);
1453 Node* bytecode_flags = __ BytecodeOperandFlag(2); 1454 Node* bytecode_flags = __ BytecodeOperandFlag(2);
1454 Node* closure = __ LoadRegister(Register::function_closure()); 1455 Node* closure = __ LoadRegister(Register::function_closure());
1455 1456
1456 Variable result(assembler, MachineRepresentation::kTagged);
1457
1458 // Check if we can do a fast clone or have to call the runtime. 1457 // Check if we can do a fast clone or have to call the runtime.
1459 Label end(assembler), if_fast_clone(assembler), 1458 Label if_fast_clone(assembler),
1460 if_not_fast_clone(assembler, Label::kDeferred); 1459 if_not_fast_clone(assembler, Label::kDeferred);
1461 Node* fast_clone_properties_count = 1460 Node* fast_clone_properties_count =
1462 __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>( 1461 __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
1463 bytecode_flags); 1462 bytecode_flags);
1464 __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone); 1463 __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
1465 1464
1466 __ Bind(&if_fast_clone); 1465 __ Bind(&if_fast_clone);
1467 { 1466 {
1468 // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. 1467 // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
1469 Node* clone = FastCloneShallowObjectStub::GenerateFastPath( 1468 Node* result = FastCloneShallowObjectStub::GenerateFastPath(
1470 assembler, &if_not_fast_clone, closure, literal_index, 1469 assembler, &if_not_fast_clone, closure, literal_index,
1471 fast_clone_properties_count); 1470 fast_clone_properties_count);
1472 result.Bind(clone); 1471 __ SetAccumulator(result);
1473 __ Goto(&end); 1472 __ Dispatch();
1474 } 1473 }
1475 1474
1476 __ Bind(&if_not_fast_clone); 1475 __ Bind(&if_not_fast_clone);
1477 { 1476 {
1478 // If we can't do a fast clone, call into the runtime. 1477 // If we can't do a fast clone, call into the runtime.
1479 Node* index = __ BytecodeOperandIdx(0); 1478 Node* index = __ BytecodeOperandIdx(0);
1480 Node* constant_elements = __ LoadConstantPoolEntry(index); 1479 Node* constant_elements = __ LoadConstantPoolEntry(index);
1481 Node* context = __ GetContext(); 1480 Node* context = __ GetContext();
1482 1481
1483 STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0); 1482 STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
1484 Node* flags_raw = __ Word32And( 1483 Node* flags_raw = __ Word32And(
1485 bytecode_flags, 1484 bytecode_flags,
1486 __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask)); 1485 __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
1487 Node* flags = __ SmiTag(flags_raw); 1486 Node* flags = __ SmiTag(flags_raw);
1488 1487
1489 result.Bind(__ CallRuntime(Runtime::kCreateObjectLiteral, context, closure, 1488 Node* result =
1490 literal_index, constant_elements, flags)); 1489 __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
1491 __ Goto(&end); 1490 literal_index, constant_elements, flags);
1491 __ SetAccumulator(result);
1492 __ Dispatch();
1492 } 1493 }
1493
1494 __ Bind(&end);
1495 __ SetAccumulator(result.value());
1496 __ Dispatch();
1497 } 1494 }
1498 1495
1499 // CreateClosure <index> <tenured> 1496 // CreateClosure <index> <tenured>
1500 // 1497 //
1501 // Creates a new closure for SharedFunctionInfo at position |index| in the 1498 // Creates a new closure for SharedFunctionInfo at position |index| in the
1502 // constant pool and with the PretenureFlag <tenured>. 1499 // constant pool and with the PretenureFlag <tenured>.
1503 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) { 1500 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
1504 // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of 1501 // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
1505 // calling into the runtime. 1502 // calling into the runtime.
1506 Node* index = __ BytecodeOperandIdx(0); 1503 Node* index = __ BytecodeOperandIdx(0);
1507 Node* shared = __ LoadConstantPoolEntry(index); 1504 Node* shared = __ LoadConstantPoolEntry(index);
1508 Node* tenured_raw = __ BytecodeOperandFlag(1); 1505 Node* tenured_raw = __ BytecodeOperandFlag(1);
1509 Node* tenured = __ SmiTag(tenured_raw); 1506 Node* tenured = __ SmiTag(tenured_raw);
1510 Node* context = __ GetContext(); 1507 Node* context = __ GetContext();
1511 Node* result = 1508 Node* result =
1512 __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured); 1509 __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured);
1513 __ SetAccumulator(result); 1510 __ SetAccumulator(result);
1514 __ Dispatch(); 1511 __ Dispatch();
1515 } 1512 }
1516 1513
1517 // CreateMappedArguments 1514 // CreateMappedArguments
1518 // 1515 //
1519 // Creates a new mapped arguments object. 1516 // Creates a new mapped arguments object.
1520 void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { 1517 void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
1521 Node* closure = __ LoadRegister(Register::function_closure()); 1518 Node* closure = __ LoadRegister(Register::function_closure());
1522 Node* context = __ GetContext(); 1519 Node* context = __ GetContext();
1523 1520
1524 Variable result(assembler, MachineRepresentation::kTagged); 1521 Label if_duplicate_parameters(assembler, Label::kDeferred);
1525 Label end(assembler), if_duplicate_parameters(assembler, Label::kDeferred), 1522 Label if_not_duplicate_parameters(assembler);
1526 if_not_duplicate_parameters(assembler);
1527 1523
1528 // Check if function has duplicate parameters. 1524 // Check if function has duplicate parameters.
1529 // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports 1525 // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
1530 // duplicate parameters. 1526 // duplicate parameters.
1531 Node* shared_info = 1527 Node* shared_info =
1532 __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); 1528 __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
1533 Node* compiler_hints = __ LoadObjectField( 1529 Node* compiler_hints = __ LoadObjectField(
1534 shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset, 1530 shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
1535 MachineType::Uint8()); 1531 MachineType::Uint8());
1536 Node* duplicate_parameters_bit = __ Int32Constant( 1532 Node* duplicate_parameters_bit = __ Int32Constant(
1537 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); 1533 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
1538 Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); 1534 Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
1539 __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); 1535 __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
1540 1536
1537 __ Bind(&if_not_duplicate_parameters);
1538 {
1539 // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
1540 Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
1541 Node* target = __ HeapConstant(callable.code());
1542 Node* result = __ CallStub(callable.descriptor(), target, context, closure);
1543 __ SetAccumulator(result);
1544 __ Dispatch();
1545 }
1546
1541 __ Bind(&if_duplicate_parameters); 1547 __ Bind(&if_duplicate_parameters);
1542 { 1548 {
1543 result.Bind( 1549 Node* result =
1544 __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure)); 1550 __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
1545 __ Goto(&end); 1551 __ SetAccumulator(result);
1552 __ Dispatch();
1546 } 1553 }
1547
1548 __ Bind(&if_not_duplicate_parameters);
1549 {
1550 Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
1551 Node* target = __ HeapConstant(callable.code());
1552 result.Bind(__ CallStub(callable.descriptor(), target, context, closure));
1553 __ Goto(&end);
1554 }
1555 __ Bind(&end);
1556 __ SetAccumulator(result.value());
1557 __ Dispatch();
1558 } 1554 }
1559 1555
1560 1556
1561 // CreateUnmappedArguments 1557 // CreateUnmappedArguments
1562 // 1558 //
1563 // Creates a new unmapped arguments object. 1559 // Creates a new unmapped arguments object.
1564 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) { 1560 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
1561 // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
1565 Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true); 1562 Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
1566 Node* target = __ HeapConstant(callable.code()); 1563 Node* target = __ HeapConstant(callable.code());
1567 Node* context = __ GetContext(); 1564 Node* context = __ GetContext();
1568 Node* closure = __ LoadRegister(Register::function_closure()); 1565 Node* closure = __ LoadRegister(Register::function_closure());
1569 Node* result = __ CallStub(callable.descriptor(), target, context, closure); 1566 Node* result = __ CallStub(callable.descriptor(), target, context, closure);
1570 __ SetAccumulator(result); 1567 __ SetAccumulator(result);
1571 __ Dispatch(); 1568 __ Dispatch();
1572 } 1569 }
1573 1570
1574 // CreateRestParameter 1571 // CreateRestParameter
1575 // 1572 //
1576 // Creates a new rest parameter array. 1573 // Creates a new rest parameter array.
1577 void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) { 1574 void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
1575 // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
1578 Callable callable = CodeFactory::FastNewRestParameter(isolate_, true); 1576 Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
1579 Node* target = __ HeapConstant(callable.code()); 1577 Node* target = __ HeapConstant(callable.code());
1580 Node* closure = __ LoadRegister(Register::function_closure()); 1578 Node* closure = __ LoadRegister(Register::function_closure());
1581 Node* context = __ GetContext(); 1579 Node* context = __ GetContext();
1582 Node* result = __ CallStub(callable.descriptor(), target, context, closure); 1580 Node* result = __ CallStub(callable.descriptor(), target, context, closure);
1583 __ SetAccumulator(result); 1581 __ SetAccumulator(result);
1584 __ Dispatch(); 1582 __ Dispatch();
1585 } 1583 }
1586 1584
1587 // StackCheck 1585 // StackCheck
1588 // 1586 //
1589 // Performs a stack guard check. 1587 // Performs a stack guard check.
1590 void Interpreter::DoStackCheck(InterpreterAssembler* assembler) { 1588 void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
1591 __ StackCheck(); 1589 Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
1590
1591 Node* interrupt = __ StackCheckTriggeredInterrupt();
1592 __ BranchIf(interrupt, &stack_check_interrupt, &ok);
1593
1594 __ Bind(&ok);
1592 __ Dispatch(); 1595 __ Dispatch();
1596
1597 __ Bind(&stack_check_interrupt);
1598 {
1599 Node* context = __ GetContext();
1600 __ CallRuntime(Runtime::kStackGuard, context);
1601 __ Dispatch();
1602 }
1593 } 1603 }
1594 1604
1595 // Throw 1605 // Throw
1596 // 1606 //
1597 // Throws the exception in the accumulator. 1607 // Throws the exception in the accumulator.
1598 void Interpreter::DoThrow(InterpreterAssembler* assembler) { 1608 void Interpreter::DoThrow(InterpreterAssembler* assembler) {
1599 Node* exception = __ GetAccumulator(); 1609 Node* exception = __ GetAccumulator();
1600 Node* context = __ GetContext(); 1610 Node* context = __ GetContext();
1601 __ CallRuntime(Runtime::kThrow, context, exception); 1611 __ CallRuntime(Runtime::kThrow, context, exception);
1602 // We shouldn't ever return from a throw. 1612 // We shouldn't ever return from a throw.
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
1678 Node* index = __ LoadRegister(index_reg); 1688 Node* index = __ LoadRegister(index_reg);
1679 Node* cache_type_reg = __ BytecodeOperandReg(2); 1689 Node* cache_type_reg = __ BytecodeOperandReg(2);
1680 Node* cache_type = __ LoadRegister(cache_type_reg); 1690 Node* cache_type = __ LoadRegister(cache_type_reg);
1681 Node* cache_array_reg = __ NextRegister(cache_type_reg); 1691 Node* cache_array_reg = __ NextRegister(cache_type_reg);
1682 Node* cache_array = __ LoadRegister(cache_array_reg); 1692 Node* cache_array = __ LoadRegister(cache_array_reg);
1683 1693
1684 // Load the next key from the enumeration array. 1694 // Load the next key from the enumeration array.
1685 Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index); 1695 Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
1686 1696
1687 // Check if we can use the for-in fast path potentially using the enum cache. 1697 // Check if we can use the for-in fast path potentially using the enum cache.
1688 InterpreterAssembler::Label if_fast(assembler), if_slow(assembler); 1698 Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
1689 Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); 1699 Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
1690 Node* condition = __ WordEqual(receiver_map, cache_type); 1700 Node* condition = __ WordEqual(receiver_map, cache_type);
1691 __ Branch(condition, &if_fast, &if_slow); 1701 __ BranchIf(condition, &if_fast, &if_slow);
1692 __ Bind(&if_fast); 1702 __ Bind(&if_fast);
1693 { 1703 {
1694 // Enum cache in use for {receiver}, the {key} is definitely valid. 1704 // Enum cache in use for {receiver}, the {key} is definitely valid.
1695 __ SetAccumulator(key); 1705 __ SetAccumulator(key);
1696 __ Dispatch(); 1706 __ Dispatch();
1697 } 1707 }
1698 __ Bind(&if_slow); 1708 __ Bind(&if_slow);
1699 { 1709 {
1700 // Record the fact that we hit the for-in slow path. 1710 // Record the fact that we hit the for-in slow path.
1701 Node* vector_index = __ BytecodeOperandIdx(3); 1711 Node* vector_index = __ BytecodeOperandIdx(3);
(...skipping 15 matching lines...) Expand all
1717 // ForInDone <index> <cache_length> 1727 // ForInDone <index> <cache_length>
1718 // 1728 //
1719 // Returns true if the end of the enumerable properties has been reached. 1729 // Returns true if the end of the enumerable properties has been reached.
1720 void Interpreter::DoForInDone(InterpreterAssembler* assembler) { 1730 void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
1721 Node* index_reg = __ BytecodeOperandReg(0); 1731 Node* index_reg = __ BytecodeOperandReg(0);
1722 Node* index = __ LoadRegister(index_reg); 1732 Node* index = __ LoadRegister(index_reg);
1723 Node* cache_length_reg = __ BytecodeOperandReg(1); 1733 Node* cache_length_reg = __ BytecodeOperandReg(1);
1724 Node* cache_length = __ LoadRegister(cache_length_reg); 1734 Node* cache_length = __ LoadRegister(cache_length_reg);
1725 1735
1726 // Check if {index} is at {cache_length} already. 1736 // Check if {index} is at {cache_length} already.
1727 InterpreterAssembler::Label if_true(assembler), if_false(assembler); 1737 Label if_true(assembler), if_false(assembler), end(assembler);
1728 Node* condition = __ WordEqual(index, cache_length); 1738 __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
1729 __ Branch(condition, &if_true, &if_false);
1730 __ Bind(&if_true); 1739 __ Bind(&if_true);
1731 { 1740 {
1732 Node* result = __ BooleanConstant(true); 1741 __ SetAccumulator(__ BooleanConstant(true));
1733 __ SetAccumulator(result); 1742 __ Goto(&end);
1734 __ Dispatch();
1735 } 1743 }
1736 __ Bind(&if_false); 1744 __ Bind(&if_false);
1737 { 1745 {
1738 Node* result = __ BooleanConstant(false); 1746 __ SetAccumulator(__ BooleanConstant(false));
1739 __ SetAccumulator(result); 1747 __ Goto(&end);
1740 __ Dispatch();
1741 } 1748 }
1749 __ Bind(&end);
1750 __ Dispatch();
1742 } 1751 }
1743 1752
1744 // ForInStep <index> 1753 // ForInStep <index>
1745 // 1754 //
1746 // Increments the loop counter in register |index| and stores the result 1755 // Increments the loop counter in register |index| and stores the result
1747 // in the accumulator. 1756 // in the accumulator.
1748 void Interpreter::DoForInStep(InterpreterAssembler* assembler) { 1757 void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
1749 Node* index_reg = __ BytecodeOperandReg(0); 1758 Node* index_reg = __ BytecodeOperandReg(0);
1750 Node* index = __ LoadRegister(index_reg); 1759 Node* index = __ LoadRegister(index_reg);
1751 Node* one = __ SmiConstant(Smi::FromInt(1)); 1760 Node* one = __ SmiConstant(Smi::FromInt(1));
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
1818 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, 1827 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
1819 __ SmiTag(new_state)); 1828 __ SmiTag(new_state));
1820 __ SetAccumulator(old_state); 1829 __ SetAccumulator(old_state);
1821 1830
1822 __ Dispatch(); 1831 __ Dispatch();
1823 } 1832 }
1824 1833
1825 } // namespace interpreter 1834 } // namespace interpreter
1826 } // namespace internal 1835 } // namespace internal
1827 } // namespace v8 1836 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/code-assembler.h ('k') | src/interpreter/interpreter-assembler.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698