Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(79)

Side by Side Diff: src/interpreter/interpreter.cc

Issue 2142273003: [interpreter] Inline Star on dispatch for some bytecodes (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: remove 100% dispatching bytecodes again Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/interpreter/interpreter.h" 5 #include "src/interpreter/interpreter.h"
6 6
7 #include <fstream> 7 #include <fstream>
8 8
9 #include "src/ast/prettyprinter.h" 9 #include "src/ast/prettyprinter.h"
10 #include "src/code-factory.h" 10 #include "src/code-factory.h"
(...skipping 1600 matching lines...) Expand 10 before | Expand all | Expand 10 after
1611 // Creates an object literal for literal index <literal_idx> with 1611 // Creates an object literal for literal index <literal_idx> with
1612 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>. 1612 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
1613 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) { 1613 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
1614 Node* literal_index_raw = __ BytecodeOperandIdx(1); 1614 Node* literal_index_raw = __ BytecodeOperandIdx(1);
1615 Node* literal_index = __ SmiTag(literal_index_raw); 1615 Node* literal_index = __ SmiTag(literal_index_raw);
1616 Node* bytecode_flags = __ BytecodeOperandFlag(2); 1616 Node* bytecode_flags = __ BytecodeOperandFlag(2);
1617 Node* closure = __ LoadRegister(Register::function_closure()); 1617 Node* closure = __ LoadRegister(Register::function_closure());
1618 1618
1619 // Check if we can do a fast clone or have to call the runtime. 1619 // Check if we can do a fast clone or have to call the runtime.
1620 Label if_fast_clone(assembler), 1620 Label if_fast_clone(assembler),
1621 if_not_fast_clone(assembler, Label::kDeferred); 1621 if_not_fast_clone(assembler, Label::kDeferred), dispatch(assembler);
1622 Node* fast_clone_properties_count = 1622 Node* fast_clone_properties_count =
1623 __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>( 1623 __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
1624 bytecode_flags); 1624 bytecode_flags);
1625 __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone); 1625 __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
1626 1626
1627 __ Bind(&if_fast_clone); 1627 __ Bind(&if_fast_clone);
1628 { 1628 {
1629 // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. 1629 // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
1630 Node* result = FastCloneShallowObjectStub::GenerateFastPath( 1630 Node* result = FastCloneShallowObjectStub::GenerateFastPath(
1631 assembler, &if_not_fast_clone, closure, literal_index, 1631 assembler, &if_not_fast_clone, closure, literal_index,
1632 fast_clone_properties_count); 1632 fast_clone_properties_count);
1633 __ SetAccumulator(result); 1633 __ SetAccumulator(result);
1634 __ Dispatch(); 1634 __ Goto(&dispatch);
1635 } 1635 }
1636 1636
1637 __ Bind(&if_not_fast_clone); 1637 __ Bind(&if_not_fast_clone);
1638 { 1638 {
1639 // If we can't do a fast clone, call into the runtime. 1639 // If we can't do a fast clone, call into the runtime.
1640 Node* index = __ BytecodeOperandIdx(0); 1640 Node* index = __ BytecodeOperandIdx(0);
1641 Node* constant_elements = __ LoadConstantPoolEntry(index); 1641 Node* constant_elements = __ LoadConstantPoolEntry(index);
1642 Node* context = __ GetContext(); 1642 Node* context = __ GetContext();
1643 1643
1644 STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0); 1644 STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
1645 Node* flags_raw = __ Word32And( 1645 Node* flags_raw = __ Word32And(
1646 bytecode_flags, 1646 bytecode_flags,
1647 __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask)); 1647 __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
1648 Node* flags = __ SmiTag(flags_raw); 1648 Node* flags = __ SmiTag(flags_raw);
1649 1649
1650 Node* result = 1650 Node* result =
1651 __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure, 1651 __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
1652 literal_index, constant_elements, flags); 1652 literal_index, constant_elements, flags);
1653 __ SetAccumulator(result); 1653 __ SetAccumulator(result);
1654 __ Dispatch(); 1654 __ Goto(&dispatch);
1655 } 1655 }
1656 __ Bind(&dispatch);
1657 __ Dispatch();
rmcilroy 2016/07/19 10:03:56 Can we do this change separately (if at all) - as
klaasb 2016/07/19 14:24:13 Done.
1656 } 1658 }
1657 1659
1658 // CreateClosure <index> <tenured> 1660 // CreateClosure <index> <tenured>
1659 // 1661 //
1660 // Creates a new closure for SharedFunctionInfo at position |index| in the 1662 // Creates a new closure for SharedFunctionInfo at position |index| in the
1661 // constant pool and with the PretenureFlag <tenured>. 1663 // constant pool and with the PretenureFlag <tenured>.
1662 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) { 1664 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
1663 Node* index = __ BytecodeOperandIdx(0); 1665 Node* index = __ BytecodeOperandIdx(0);
1664 Node* shared = __ LoadConstantPoolEntry(index); 1666 Node* shared = __ LoadConstantPoolEntry(index);
1665 Node* flags = __ BytecodeOperandFlag(1); 1667 Node* flags = __ BytecodeOperandFlag(1);
(...skipping 21 matching lines...) Expand all
1687 1689
1688 // CreateMappedArguments 1690 // CreateMappedArguments
1689 // 1691 //
1690 // Creates a new mapped arguments object. 1692 // Creates a new mapped arguments object.
1691 void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) { 1693 void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
1692 Node* closure = __ LoadRegister(Register::function_closure()); 1694 Node* closure = __ LoadRegister(Register::function_closure());
1693 Node* context = __ GetContext(); 1695 Node* context = __ GetContext();
1694 1696
1695 Label if_duplicate_parameters(assembler, Label::kDeferred); 1697 Label if_duplicate_parameters(assembler, Label::kDeferred);
1696 Label if_not_duplicate_parameters(assembler); 1698 Label if_not_duplicate_parameters(assembler);
1699 Label dispatch(assembler);
1697 1700
1698 // Check if function has duplicate parameters. 1701 // Check if function has duplicate parameters.
1699 // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports 1702 // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
1700 // duplicate parameters. 1703 // duplicate parameters.
1701 Node* shared_info = 1704 Node* shared_info =
1702 __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); 1705 __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
1703 Node* compiler_hints = __ LoadObjectField( 1706 Node* compiler_hints = __ LoadObjectField(
1704 shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset, 1707 shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
1705 MachineType::Uint8()); 1708 MachineType::Uint8());
1706 Node* duplicate_parameters_bit = __ Int32Constant( 1709 Node* duplicate_parameters_bit = __ Int32Constant(
1707 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); 1710 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
1708 Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); 1711 Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
1709 __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); 1712 __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
1710 1713
1711 __ Bind(&if_not_duplicate_parameters); 1714 __ Bind(&if_not_duplicate_parameters);
1712 { 1715 {
1713 // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub. 1716 // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
1714 Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true); 1717 Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
1715 Node* target = __ HeapConstant(callable.code()); 1718 Node* target = __ HeapConstant(callable.code());
1716 Node* result = __ CallStub(callable.descriptor(), target, context, closure); 1719 Node* result = __ CallStub(callable.descriptor(), target, context, closure);
1717 __ SetAccumulator(result); 1720 __ SetAccumulator(result);
1718 __ Dispatch(); 1721 __ Goto(&dispatch);
1719 } 1722 }
1720 1723
1721 __ Bind(&if_duplicate_parameters); 1724 __ Bind(&if_duplicate_parameters);
1722 { 1725 {
1723 Node* result = 1726 Node* result =
1724 __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure); 1727 __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
1725 __ SetAccumulator(result); 1728 __ SetAccumulator(result);
1726 __ Dispatch(); 1729 __ Goto(&dispatch);
1727 } 1730 }
1731 __ Bind(&dispatch);
1732 __ Dispatch();
rmcilroy 2016/07/19 10:03:56 Ditto (same will apply once we can inline FastNewS
klaasb 2016/07/19 14:24:13 Done.
1728 } 1733 }
1729 1734
1730 // CreateUnmappedArguments 1735 // CreateUnmappedArguments
1731 // 1736 //
1732 // Creates a new unmapped arguments object. 1737 // Creates a new unmapped arguments object.
1733 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) { 1738 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
1734 // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub. 1739 // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
1735 Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true); 1740 Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
1736 Node* target = __ HeapConstant(callable.code()); 1741 Node* target = __ HeapConstant(callable.code());
1737 Node* context = __ GetContext(); 1742 Node* context = __ GetContext();
(...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after
2024 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, 2029 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
2025 __ SmiTag(new_state)); 2030 __ SmiTag(new_state));
2026 __ SetAccumulator(old_state); 2031 __ SetAccumulator(old_state);
2027 2032
2028 __ Dispatch(); 2033 __ Dispatch();
2029 } 2034 }
2030 2035
2031 } // namespace interpreter 2036 } // namespace interpreter
2032 } // namespace internal 2037 } // namespace internal
2033 } // namespace v8 2038 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698