Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(72)

Side by Side Diff: src/interpreter/interpreter.cc

Issue 2155153002: [interpreter] Update ForInPrepare to conditionally use runtime. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Trim interpreter assembler. Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/interpreter/interpreter.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/interpreter/interpreter.h" 5 #include "src/interpreter/interpreter.h"
6 6
7 #include <fstream> 7 #include <fstream>
8 8
9 #include "src/ast/prettyprinter.h" 9 #include "src/ast/prettyprinter.h"
10 #include "src/code-factory.h" 10 #include "src/code-factory.h"
(...skipping 1822 matching lines...) Expand 10 before | Expand all | Expand 10 after
1833 void Interpreter::Do##Name(InterpreterAssembler* assembler) { \ 1833 void Interpreter::Do##Name(InterpreterAssembler* assembler) { \
1834 Node* context = __ GetContext(); \ 1834 Node* context = __ GetContext(); \
1835 Node* accumulator = __ GetAccumulator(); \ 1835 Node* accumulator = __ GetAccumulator(); \
1836 Node* original_handler = \ 1836 Node* original_handler = \
1837 __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \ 1837 __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
1838 __ DispatchToBytecodeHandler(original_handler); \ 1838 __ DispatchToBytecodeHandler(original_handler); \
1839 } 1839 }
1840 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); 1840 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
1841 #undef DEBUG_BREAK 1841 #undef DEBUG_BREAK
1842 1842
1843 void Interpreter::BuildForInPrepareResult(Node* output_register,
1844 Node* cache_type, Node* cache_array,
1845 Node* cache_length,
1846 InterpreterAssembler* assembler) {
1847 __ StoreRegister(cache_type, output_register);
1848 output_register = __ NextRegister(output_register);
1849 __ StoreRegister(cache_array, output_register);
1850 output_register = __ NextRegister(output_register);
1851 __ StoreRegister(cache_length, output_register);
1852 }
1853
1843 // ForInPrepare <cache_info_triple> 1854 // ForInPrepare <cache_info_triple>
1844 // 1855 //
1845 // Returns state for for..in loop execution based on the object in the 1856 // Returns state for for..in loop execution based on the object in the
1846 // accumulator. The result is output in registers |cache_info_triple| to 1857 // accumulator. The result is output in registers |cache_info_triple| to
1847 // |cache_info_triple + 2|, with the registers holding cache_type, cache_array, 1858 // |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
1848 // and cache_length respectively. 1859 // and cache_length respectively.
1849 void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) { 1860 void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
1850 Node* object = __ GetAccumulator(); 1861 Node* object = __ GetAccumulator();
1851 Node* context = __ GetContext(); 1862 Node* context = __ GetContext();
1852 Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object); 1863 Node* const zero_smi = __ SmiConstant(Smi::FromInt(0));
1853 1864
1854 // Set output registers: 1865 Label test_if_null(assembler), test_if_undefined(assembler),
1855 // 0 == cache_type, 1 == cache_array, 2 == cache_length 1866 nothing_to_iterate(assembler, Label::kDeferred),
1856 Node* output_register = __ BytecodeOperandReg(0); 1867 convert_to_receiver(assembler, Label::kDeferred),
1857 for (int i = 0; i < 3; i++) { 1868 already_receiver(assembler), check_enum_cache(assembler);
1858 Node* cache_info = __ Projection(i, result_triple); 1869
1859 __ StoreRegister(cache_info, output_register); 1870 Variable receiver(assembler, MachineRepresentation::kTagged);
1860 output_register = __ NextRegister(output_register); 1871
1872 // Test if object is already a receiver, no conversion necessary if so.
1873 Node* instance_type = __ LoadInstanceType(object);
1874 Node* first_receiver_type = __ Int32Constant(FIRST_JS_RECEIVER_TYPE);
1875 __ BranchIfInt32GreaterThanOrEqual(instance_type, first_receiver_type,
1876 &already_receiver, &test_if_null);
1877
1878 __ Bind(&test_if_null);
1879 {
1880 __ BranchIfWordEqual(object, assembler->NullConstant(), &nothing_to_iterate,
1881 &test_if_undefined);
1861 } 1882 }
1862 __ Dispatch(); 1883
1884 __ Bind(&test_if_undefined);
1885 {
1886 __ BranchIfWordEqual(object, assembler->UndefinedConstant(),
1887 &nothing_to_iterate, &convert_to_receiver);
1888 }
1889
1890 __ Bind(&convert_to_receiver);
1891 {
1892 Callable callable = CodeFactory::ToObject(assembler->isolate());
1893 Node* target = __ HeapConstant(callable.code());
1894 Node* result = __ CallStub(callable.descriptor(), target, context, object);
1895 receiver.Bind(result);
1896 __ Goto(&check_enum_cache);
1897 }
1898
1899 __ Bind(&already_receiver);
1900 {
1901 receiver.Bind(object);
1902 __ Goto(&check_enum_cache);
1903 }
1904
1905 Label use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
1906 __ Bind(&check_enum_cache);
1907 { __ CheckEnumCache(receiver.value(), &use_enum_cache, &use_runtime); }
1908
1909 __ Bind(&use_enum_cache);
1910 {
1911 // The enum cache is valid. Load the map of the object being
1912 // iterated over and use the cache for the iteration.
1913 Node* cache_type = __ LoadMap(receiver.value());
1914 Node* cache_length = __ EnumLength(cache_type);
1915 __ GotoIf(assembler->WordEqual(cache_length, zero_smi),
1916 &nothing_to_iterate);
1917 Node* descriptors = __ LoadMapDescriptors(cache_type);
1918 Node* cache_offset =
1919 __ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
1920 Node* cache_array = __ LoadObjectField(
1921 cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
1922 Node* output_register = __ BytecodeOperandReg(0);
1923 BuildForInPrepareResult(output_register, cache_type, cache_array,
1924 cache_length, assembler);
1925 __ Dispatch();
1926 }
1927
1928 __ Bind(&use_runtime);
1929 {
1930 Node* result_triple =
1931 __ CallRuntime(Runtime::kForInPrepare, context, object);
1932 Node* cache_type = __ Projection(0, result_triple);
1933 Node* cache_array = __ Projection(1, result_triple);
1934 Node* cache_length = __ Projection(2, result_triple);
1935 Node* output_register = __ BytecodeOperandReg(0);
1936 BuildForInPrepareResult(output_register, cache_type, cache_array,
1937 cache_length, assembler);
1938 __ Dispatch();
1939 }
1940
1941 __ Bind(&nothing_to_iterate);
1942 {
1943 // Receiver is null or undefined or descriptors are zero length.
1944 Node* output_register = __ BytecodeOperandReg(0);
1945 BuildForInPrepareResult(output_register, zero_smi, zero_smi, zero_smi,
1946 assembler);
1947 __ Dispatch();
1948 }
1863 } 1949 }
1864 1950
1865 // ForInNext <receiver> <index> <cache_info_pair> 1951 // ForInNext <receiver> <index> <cache_info_pair>
1866 // 1952 //
1867 // Returns the next enumerable property in the the accumulator. 1953 // Returns the next enumerable property in the the accumulator.
1868 void Interpreter::DoForInNext(InterpreterAssembler* assembler) { 1954 void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
1869 Node* receiver_reg = __ BytecodeOperandReg(0); 1955 Node* receiver_reg = __ BytecodeOperandReg(0);
1870 Node* receiver = __ LoadRegister(receiver_reg); 1956 Node* receiver = __ LoadRegister(receiver_reg);
1871 Node* index_reg = __ BytecodeOperandReg(1); 1957 Node* index_reg = __ BytecodeOperandReg(1);
1872 Node* index = __ LoadRegister(index_reg); 1958 Node* index = __ LoadRegister(index_reg);
1873 Node* cache_type_reg = __ BytecodeOperandReg(2); 1959 Node* cache_type_reg = __ BytecodeOperandReg(2);
1874 Node* cache_type = __ LoadRegister(cache_type_reg); 1960 Node* cache_type = __ LoadRegister(cache_type_reg);
1875 Node* cache_array_reg = __ NextRegister(cache_type_reg); 1961 Node* cache_array_reg = __ NextRegister(cache_type_reg);
1876 Node* cache_array = __ LoadRegister(cache_array_reg); 1962 Node* cache_array = __ LoadRegister(cache_array_reg);
1877 1963
1878 // Load the next key from the enumeration array. 1964 // Load the next key from the enumeration array.
1879 Node* key = __ LoadFixedArrayElement(cache_array, index, 0, 1965 Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
1880 CodeStubAssembler::SMI_PARAMETERS); 1966 CodeStubAssembler::SMI_PARAMETERS);
1881 1967
1882 // Check if we can use the for-in fast path potentially using the enum cache. 1968 // Check if we can use the for-in fast path potentially using the enum cache.
1883 Label if_fast(assembler), if_slow(assembler, Label::kDeferred); 1969 Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
1884 Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); 1970 Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
1885 Node* condition = __ WordEqual(receiver_map, cache_type); 1971 __ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow);
1886 __ BranchIf(condition, &if_fast, &if_slow);
1887 __ Bind(&if_fast); 1972 __ Bind(&if_fast);
1888 { 1973 {
1889 // Enum cache in use for {receiver}, the {key} is definitely valid. 1974 // Enum cache in use for {receiver}, the {key} is definitely valid.
1890 __ SetAccumulator(key); 1975 __ SetAccumulator(key);
1891 __ Dispatch(); 1976 __ Dispatch();
1892 } 1977 }
1893 __ Bind(&if_slow); 1978 __ Bind(&if_slow);
1894 { 1979 {
1895 // Record the fact that we hit the for-in slow path. 1980 // Record the fact that we hit the for-in slow path.
1896 Node* vector_index = __ BytecodeOperandIdx(3); 1981 Node* vector_index = __ BytecodeOperandIdx(3);
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
2035 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, 2120 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
2036 __ SmiTag(new_state)); 2121 __ SmiTag(new_state));
2037 __ SetAccumulator(old_state); 2122 __ SetAccumulator(old_state);
2038 2123
2039 __ Dispatch(); 2124 __ Dispatch();
2040 } 2125 }
2041 2126
2042 } // namespace interpreter 2127 } // namespace interpreter
2043 } // namespace internal 2128 } // namespace internal
2044 } // namespace v8 2129 } // namespace v8
OLDNEW
« no previous file with comments | « src/interpreter/interpreter.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698