Index: src/interpreter/interpreter.cc |
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc |
index 4830f1dc72efa1f90dfece7c047bee278d960916..a890591e5afc433f909e2ea1ebc366f410400d4d 100644 |
--- a/src/interpreter/interpreter.cc |
+++ b/src/interpreter/interpreter.cc |
@@ -1840,6 +1840,17 @@ void Interpreter::DoDebugger(InterpreterAssembler* assembler) { |
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); |
#undef DEBUG_BREAK |
+void Interpreter::BuildForInPrepareResult(Node* output_register, |
+ Node* cache_type, Node* cache_array, |
+ Node* cache_length, |
+ InterpreterAssembler* assembler) { |
+ __ StoreRegister(cache_type, output_register); |
+ output_register = __ NextRegister(output_register); |
+ __ StoreRegister(cache_array, output_register); |
+ output_register = __ NextRegister(output_register); |
+ __ StoreRegister(cache_length, output_register); |
+} |
+ |
// ForInPrepare <cache_info_triple> |
// |
// Returns state for for..in loop execution based on the object in the |
@@ -1849,17 +1860,93 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK); |
void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) { |
Node* object = __ GetAccumulator(); |
Node* context = __ GetContext(); |
- Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object); |
- |
- // Set output registers: |
- // 0 == cache_type, 1 == cache_array, 2 == cache_length |
- Node* output_register = __ BytecodeOperandReg(0); |
- for (int i = 0; i < 3; i++) { |
- Node* cache_info = __ Projection(i, result_triple); |
- __ StoreRegister(cache_info, output_register); |
- output_register = __ NextRegister(output_register); |
+ Node* const zero_smi = __ SmiConstant(Smi::FromInt(0)); |
+ |
+ Label test_if_null(assembler), test_if_undefined(assembler), |
+ nothing_to_iterate(assembler, Label::kDeferred), |
rmcilroy
2016/07/19 11:08:54
Just wondering if nothing_to_iterate should be kDe
oth
2016/07/19 12:59:25
I expect this is the lesser trodden path - it's de
|
+ convert_to_receiver(assembler), already_receiver(assembler), |
+ check_enum_cache(assembler); |
+ |
+ Variable receiver(assembler, MachineRepresentation::kTagged); |
+ |
+ // Test if object is already a receiver, no conversion necessary if so. |
+ Node* instance_type = __ LoadInstanceType(object); |
+ Node* first_receiver_type = __ Int32Constant(FIRST_JS_RECEIVER_TYPE); |
+ __ BranchIf( |
+ assembler->Int32GreaterThanOrEqual(instance_type, first_receiver_type), |
rmcilroy
2016/07/19 11:08:54
There is a BranchIfInt32GreaterThanOrEqual (I thin
oth
2016/07/19 12:59:25
Thanks! Done.
|
+ &already_receiver, &test_if_null); |
+ |
+ __ Bind(&test_if_null); |
+ { |
+ __ BranchIf(assembler->WordEqual(object, assembler->NullConstant()), |
rmcilroy
2016/07/19 11:08:54
BranchIfWordEqual (and below)
oth
2016/07/19 12:59:25
Done.
|
+ ¬hing_to_iterate, &test_if_undefined); |
+ } |
+ |
+ __ Bind(&test_if_undefined); |
+ { |
+ __ BranchIf(assembler->WordEqual(object, assembler->UndefinedConstant()), |
+ ¬hing_to_iterate, &convert_to_receiver); |
+ } |
+ |
+ __ Bind(&convert_to_receiver); |
rmcilroy
2016/07/19 11:08:54
I'm assuming this is relatively uncommon. If so, c
oth
2016/07/19 12:59:25
Done.
|
+ { |
+ Callable callable = CodeFactory::ToObject(assembler->isolate()); |
+ Node* target = __ HeapConstant(callable.code()); |
+ Node* result = __ CallStub(callable.descriptor(), target, context, object); |
+ receiver.Bind(result); |
+ __ Goto(&check_enum_cache); |
+ } |
+ |
+ __ Bind(&already_receiver); |
+ { |
+ receiver.Bind(object); |
+ __ Goto(&check_enum_cache); |
+ } |
+ |
+ Label use_enum_cache(assembler), use_runtime(assembler); |
rmcilroy
2016/07/19 11:08:54
make use_runtime kDeferred.
oth
2016/07/19 12:59:25
Done.
|
+ __ Bind(&check_enum_cache); |
+ { __ CheckEnumCache(receiver.value(), &use_enum_cache, &use_runtime); } |
+ |
+ __ Bind(&use_enum_cache); |
+ { |
+ // The enum cache is valid. Load the map of the object being |
+ // iterated over and use the cache for the iteration. |
+ Node* cache_type = __ LoadMap(receiver.value()); |
+ Node* cache_length = __ EnumLength(cache_type); |
+ __ GotoIf(assembler->WordEqual(cache_length, zero_smi), |
+ ¬hing_to_iterate); |
+ Node* descriptors = __ LoadMapDescriptors(cache_type); |
+ Node* cache_offset = |
+ __ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset); |
+ Node* cache_array = __ LoadObjectField( |
+ cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset); |
+ Node* output_register = __ BytecodeOperandReg(0); |
+ BuildForInPrepareResult(output_register, cache_type, cache_array, |
+ cache_length, assembler); |
+ __ Dispatch(); |
+ } |
+ |
+ __ Bind(&use_runtime); |
+ { |
+ Node* result_triple = |
+ __ CallRuntime(Runtime::kForInPrepare, context, object); |
+ Node* cache_type = __ Projection(0, result_triple); |
+ Node* cache_array = __ Projection(1, result_triple); |
+ Node* cache_length = __ Projection(2, result_triple); |
+ Node* output_register = __ BytecodeOperandReg(0); |
+ BuildForInPrepareResult(output_register, cache_type, cache_array, |
+ cache_length, assembler); |
+ __ Dispatch(); |
+ } |
+ |
+ __ Bind(¬hing_to_iterate); |
+ { |
+ // Receiver is null or undefined or descriptors are zero length. |
+ Node* output_register = __ BytecodeOperandReg(0); |
+ BuildForInPrepareResult(output_register, zero_smi, zero_smi, zero_smi, |
+ assembler); |
+ __ Dispatch(); |
} |
- __ Dispatch(); |
} |
// ForInNext <receiver> <index> <cache_info_pair> |