OLD | NEW |
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/interpreter/interpreter.h" | 5 #include "src/interpreter/interpreter.h" |
6 | 6 |
7 #include <fstream> | 7 #include <fstream> |
8 #include <memory> | 8 #include <memory> |
9 | 9 |
10 #include "src/ast/prettyprinter.h" | 10 #include "src/ast/prettyprinter.h" |
(...skipping 1204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1215 | 1215 |
1216 Node* reg_index = __ BytecodeOperandReg(1); | 1216 Node* reg_index = __ BytecodeOperandReg(1); |
1217 Node* left = __ LoadRegister(reg_index); | 1217 Node* left = __ LoadRegister(reg_index); |
1218 Node* raw_int = __ BytecodeOperandImm(0); | 1218 Node* raw_int = __ BytecodeOperandImm(0); |
1219 Node* right = __ SmiTag(raw_int); | 1219 Node* right = __ SmiTag(raw_int); |
1220 Node* slot_index = __ BytecodeOperandIdx(2); | 1220 Node* slot_index = __ BytecodeOperandIdx(2); |
1221 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); | 1221 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); |
1222 | 1222 |
1223 // {right} is known to be a Smi. | 1223 // {right} is known to be a Smi. |
1224 // Check if the {left} is a Smi take the fast path. | 1224 // Check if the {left} is a Smi take the fast path. |
1225 __ BranchIf(__ TaggedIsSmi(left), &fastpath, &slowpath); | 1225 __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); |
1226 __ Bind(&fastpath); | 1226 __ Bind(&fastpath); |
1227 { | 1227 { |
1228 // Try fast Smi addition first. | 1228 // Try fast Smi addition first. |
1229 Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left), | 1229 Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left), |
1230 __ BitcastTaggedToWord(right)); | 1230 __ BitcastTaggedToWord(right)); |
1231 Node* overflow = __ Projection(1, pair); | 1231 Node* overflow = __ Projection(1, pair); |
1232 | 1232 |
1233 // Check if the Smi additon overflowed. | 1233 // Check if the Smi additon overflowed. |
1234 Label if_notoverflow(assembler); | 1234 Label if_notoverflow(assembler); |
1235 __ BranchIf(overflow, &slowpath, &if_notoverflow); | 1235 __ Branch(overflow, &slowpath, &if_notoverflow); |
1236 __ Bind(&if_notoverflow); | 1236 __ Bind(&if_notoverflow); |
1237 { | 1237 { |
1238 __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall), | 1238 __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall), |
1239 type_feedback_vector, slot_index); | 1239 type_feedback_vector, slot_index); |
1240 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); | 1240 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); |
1241 __ Goto(&end); | 1241 __ Goto(&end); |
1242 } | 1242 } |
1243 } | 1243 } |
1244 __ Bind(&slowpath); | 1244 __ Bind(&slowpath); |
1245 { | 1245 { |
(...skipping 23 matching lines...) Expand all Loading... |
1269 | 1269 |
1270 Node* reg_index = __ BytecodeOperandReg(1); | 1270 Node* reg_index = __ BytecodeOperandReg(1); |
1271 Node* left = __ LoadRegister(reg_index); | 1271 Node* left = __ LoadRegister(reg_index); |
1272 Node* raw_int = __ BytecodeOperandImm(0); | 1272 Node* raw_int = __ BytecodeOperandImm(0); |
1273 Node* right = __ SmiTag(raw_int); | 1273 Node* right = __ SmiTag(raw_int); |
1274 Node* slot_index = __ BytecodeOperandIdx(2); | 1274 Node* slot_index = __ BytecodeOperandIdx(2); |
1275 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); | 1275 Node* type_feedback_vector = __ LoadTypeFeedbackVector(); |
1276 | 1276 |
1277 // {right} is known to be a Smi. | 1277 // {right} is known to be a Smi. |
1278 // Check if the {left} is a Smi take the fast path. | 1278 // Check if the {left} is a Smi take the fast path. |
1279 __ BranchIf(__ TaggedIsSmi(left), &fastpath, &slowpath); | 1279 __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath); |
1280 __ Bind(&fastpath); | 1280 __ Bind(&fastpath); |
1281 { | 1281 { |
1282 // Try fast Smi subtraction first. | 1282 // Try fast Smi subtraction first. |
1283 Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left), | 1283 Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left), |
1284 __ BitcastTaggedToWord(right)); | 1284 __ BitcastTaggedToWord(right)); |
1285 Node* overflow = __ Projection(1, pair); | 1285 Node* overflow = __ Projection(1, pair); |
1286 | 1286 |
1287 // Check if the Smi subtraction overflowed. | 1287 // Check if the Smi subtraction overflowed. |
1288 Label if_notoverflow(assembler); | 1288 Label if_notoverflow(assembler); |
1289 __ BranchIf(overflow, &slowpath, &if_notoverflow); | 1289 __ Branch(overflow, &slowpath, &if_notoverflow); |
1290 __ Bind(&if_notoverflow); | 1290 __ Bind(&if_notoverflow); |
1291 { | 1291 { |
1292 __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall), | 1292 __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall), |
1293 type_feedback_vector, slot_index); | 1293 type_feedback_vector, slot_index); |
1294 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); | 1294 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair))); |
1295 __ Goto(&end); | 1295 __ Goto(&end); |
1296 } | 1296 } |
1297 } | 1297 } |
1298 __ Bind(&slowpath); | 1298 __ Bind(&slowpath); |
1299 { | 1299 { |
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1523 // LogicalNot | 1523 // LogicalNot |
1524 // | 1524 // |
1525 // Perform logical-not on the accumulator, which must already be a boolean | 1525 // Perform logical-not on the accumulator, which must already be a boolean |
1526 // value. | 1526 // value. |
1527 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { | 1527 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) { |
1528 Node* value = __ GetAccumulator(); | 1528 Node* value = __ GetAccumulator(); |
1529 Variable result(assembler, MachineRepresentation::kTagged); | 1529 Variable result(assembler, MachineRepresentation::kTagged); |
1530 Label if_true(assembler), if_false(assembler), end(assembler); | 1530 Label if_true(assembler), if_false(assembler), end(assembler); |
1531 Node* true_value = __ BooleanConstant(true); | 1531 Node* true_value = __ BooleanConstant(true); |
1532 Node* false_value = __ BooleanConstant(false); | 1532 Node* false_value = __ BooleanConstant(false); |
1533 __ BranchIfWordEqual(value, true_value, &if_true, &if_false); | 1533 __ Branch(__ WordEqual(value, true_value), &if_true, &if_false); |
1534 __ Bind(&if_true); | 1534 __ Bind(&if_true); |
1535 { | 1535 { |
1536 result.Bind(false_value); | 1536 result.Bind(false_value); |
1537 __ Goto(&end); | 1537 __ Goto(&end); |
1538 } | 1538 } |
1539 __ Bind(&if_false); | 1539 __ Bind(&if_false); |
1540 { | 1540 { |
1541 if (FLAG_debug_code) { | 1541 if (FLAG_debug_code) { |
1542 __ AbortIfWordNotEqual(value, false_value, | 1542 __ AbortIfWordNotEqual(value, false_value, |
1543 BailoutReason::kExpectedBooleanValue); | 1543 BailoutReason::kExpectedBooleanValue); |
(...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2057 Node* literal_index = __ SmiTag(literal_index_raw); | 2057 Node* literal_index = __ SmiTag(literal_index_raw); |
2058 Node* closure = __ LoadRegister(Register::function_closure()); | 2058 Node* closure = __ LoadRegister(Register::function_closure()); |
2059 Node* context = __ GetContext(); | 2059 Node* context = __ GetContext(); |
2060 Node* bytecode_flags = __ BytecodeOperandFlag(2); | 2060 Node* bytecode_flags = __ BytecodeOperandFlag(2); |
2061 | 2061 |
2062 Label fast_shallow_clone(assembler), | 2062 Label fast_shallow_clone(assembler), |
2063 call_runtime(assembler, Label::kDeferred); | 2063 call_runtime(assembler, Label::kDeferred); |
2064 Node* use_fast_shallow_clone = __ Word32And( | 2064 Node* use_fast_shallow_clone = __ Word32And( |
2065 bytecode_flags, | 2065 bytecode_flags, |
2066 __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask)); | 2066 __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask)); |
2067 __ BranchIf(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime); | 2067 __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime); |
2068 | 2068 |
2069 __ Bind(&fast_shallow_clone); | 2069 __ Bind(&fast_shallow_clone); |
2070 { | 2070 { |
2071 DCHECK(FLAG_allocation_site_pretenuring); | 2071 DCHECK(FLAG_allocation_site_pretenuring); |
2072 Node* result = FastCloneShallowArrayStub::Generate( | 2072 Node* result = FastCloneShallowArrayStub::Generate( |
2073 assembler, closure, literal_index, context, &call_runtime, | 2073 assembler, closure, literal_index, context, &call_runtime, |
2074 TRACK_ALLOCATION_SITE); | 2074 TRACK_ALLOCATION_SITE); |
2075 __ SetAccumulator(result); | 2075 __ SetAccumulator(result); |
2076 __ Dispatch(); | 2076 __ Dispatch(); |
2077 } | 2077 } |
(...skipping 24 matching lines...) Expand all Loading... |
2102 Node* literal_index = __ SmiTag(literal_index_raw); | 2102 Node* literal_index = __ SmiTag(literal_index_raw); |
2103 Node* bytecode_flags = __ BytecodeOperandFlag(2); | 2103 Node* bytecode_flags = __ BytecodeOperandFlag(2); |
2104 Node* closure = __ LoadRegister(Register::function_closure()); | 2104 Node* closure = __ LoadRegister(Register::function_closure()); |
2105 | 2105 |
2106 // Check if we can do a fast clone or have to call the runtime. | 2106 // Check if we can do a fast clone or have to call the runtime. |
2107 Label if_fast_clone(assembler), | 2107 Label if_fast_clone(assembler), |
2108 if_not_fast_clone(assembler, Label::kDeferred); | 2108 if_not_fast_clone(assembler, Label::kDeferred); |
2109 Node* fast_clone_properties_count = | 2109 Node* fast_clone_properties_count = |
2110 __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>( | 2110 __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>( |
2111 bytecode_flags); | 2111 bytecode_flags); |
2112 __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone); | 2112 __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone); |
2113 | 2113 |
2114 __ Bind(&if_fast_clone); | 2114 __ Bind(&if_fast_clone); |
2115 { | 2115 { |
2116 // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. | 2116 // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub. |
2117 Node* result = FastCloneShallowObjectStub::GenerateFastPath( | 2117 Node* result = FastCloneShallowObjectStub::GenerateFastPath( |
2118 assembler, &if_not_fast_clone, closure, literal_index, | 2118 assembler, &if_not_fast_clone, closure, literal_index, |
2119 fast_clone_properties_count); | 2119 fast_clone_properties_count); |
2120 __ StoreRegister(result, __ BytecodeOperandReg(3)); | 2120 __ StoreRegister(result, __ BytecodeOperandReg(3)); |
2121 __ Dispatch(); | 2121 __ Dispatch(); |
2122 } | 2122 } |
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2249 // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports | 2249 // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports |
2250 // duplicate parameters. | 2250 // duplicate parameters. |
2251 Node* shared_info = | 2251 Node* shared_info = |
2252 __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); | 2252 __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset); |
2253 Node* compiler_hints = __ LoadObjectField( | 2253 Node* compiler_hints = __ LoadObjectField( |
2254 shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset, | 2254 shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset, |
2255 MachineType::Uint8()); | 2255 MachineType::Uint8()); |
2256 Node* duplicate_parameters_bit = __ Int32Constant( | 2256 Node* duplicate_parameters_bit = __ Int32Constant( |
2257 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); | 2257 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte); |
2258 Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); | 2258 Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit); |
2259 __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); | 2259 __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters); |
2260 | 2260 |
2261 __ Bind(&if_not_duplicate_parameters); | 2261 __ Bind(&if_not_duplicate_parameters); |
2262 { | 2262 { |
2263 // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub. | 2263 // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub. |
2264 Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true); | 2264 Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true); |
2265 Node* target = __ HeapConstant(callable.code()); | 2265 Node* target = __ HeapConstant(callable.code()); |
2266 Node* result = __ CallStub(callable.descriptor(), target, context, closure); | 2266 Node* result = __ CallStub(callable.descriptor(), target, context, closure); |
2267 __ SetAccumulator(result); | 2267 __ SetAccumulator(result); |
2268 __ Dispatch(); | 2268 __ Dispatch(); |
2269 } | 2269 } |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2305 __ Dispatch(); | 2305 __ Dispatch(); |
2306 } | 2306 } |
2307 | 2307 |
2308 // StackCheck | 2308 // StackCheck |
2309 // | 2309 // |
2310 // Performs a stack guard check. | 2310 // Performs a stack guard check. |
2311 void Interpreter::DoStackCheck(InterpreterAssembler* assembler) { | 2311 void Interpreter::DoStackCheck(InterpreterAssembler* assembler) { |
2312 Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred); | 2312 Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred); |
2313 | 2313 |
2314 Node* interrupt = __ StackCheckTriggeredInterrupt(); | 2314 Node* interrupt = __ StackCheckTriggeredInterrupt(); |
2315 __ BranchIf(interrupt, &stack_check_interrupt, &ok); | 2315 __ Branch(interrupt, &stack_check_interrupt, &ok); |
2316 | 2316 |
2317 __ Bind(&ok); | 2317 __ Bind(&ok); |
2318 __ Dispatch(); | 2318 __ Dispatch(); |
2319 | 2319 |
2320 __ Bind(&stack_check_interrupt); | 2320 __ Bind(&stack_check_interrupt); |
2321 { | 2321 { |
2322 Node* context = __ GetContext(); | 2322 Node* context = __ GetContext(); |
2323 __ CallRuntime(Runtime::kStackGuard, context); | 2323 __ CallRuntime(Runtime::kStackGuard, context); |
2324 __ Dispatch(); | 2324 __ Dispatch(); |
2325 } | 2325 } |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2478 Node* cache_array_reg = __ NextRegister(cache_type_reg); | 2478 Node* cache_array_reg = __ NextRegister(cache_type_reg); |
2479 Node* cache_array = __ LoadRegister(cache_array_reg); | 2479 Node* cache_array = __ LoadRegister(cache_array_reg); |
2480 | 2480 |
2481 // Load the next key from the enumeration array. | 2481 // Load the next key from the enumeration array. |
2482 Node* key = __ LoadFixedArrayElement(cache_array, index, 0, | 2482 Node* key = __ LoadFixedArrayElement(cache_array, index, 0, |
2483 CodeStubAssembler::SMI_PARAMETERS); | 2483 CodeStubAssembler::SMI_PARAMETERS); |
2484 | 2484 |
2485 // Check if we can use the for-in fast path potentially using the enum cache. | 2485 // Check if we can use the for-in fast path potentially using the enum cache. |
2486 Label if_fast(assembler), if_slow(assembler, Label::kDeferred); | 2486 Label if_fast(assembler), if_slow(assembler, Label::kDeferred); |
2487 Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); | 2487 Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset); |
2488 __ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow); | 2488 __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow); |
2489 __ Bind(&if_fast); | 2489 __ Bind(&if_fast); |
2490 { | 2490 { |
2491 // Enum cache in use for {receiver}, the {key} is definitely valid. | 2491 // Enum cache in use for {receiver}, the {key} is definitely valid. |
2492 __ SetAccumulator(key); | 2492 __ SetAccumulator(key); |
2493 __ Dispatch(); | 2493 __ Dispatch(); |
2494 } | 2494 } |
2495 __ Bind(&if_slow); | 2495 __ Bind(&if_slow); |
2496 { | 2496 { |
2497 // Record the fact that we hit the for-in slow path. | 2497 // Record the fact that we hit the for-in slow path. |
2498 Node* vector_index = __ BytecodeOperandIdx(3); | 2498 Node* vector_index = __ BytecodeOperandIdx(3); |
(...skipping 16 matching lines...) Expand all Loading... |
2515 // | 2515 // |
2516 // Returns false if the end of the enumerable properties has been reached. | 2516 // Returns false if the end of the enumerable properties has been reached. |
2517 void Interpreter::DoForInContinue(InterpreterAssembler* assembler) { | 2517 void Interpreter::DoForInContinue(InterpreterAssembler* assembler) { |
2518 Node* index_reg = __ BytecodeOperandReg(0); | 2518 Node* index_reg = __ BytecodeOperandReg(0); |
2519 Node* index = __ LoadRegister(index_reg); | 2519 Node* index = __ LoadRegister(index_reg); |
2520 Node* cache_length_reg = __ BytecodeOperandReg(1); | 2520 Node* cache_length_reg = __ BytecodeOperandReg(1); |
2521 Node* cache_length = __ LoadRegister(cache_length_reg); | 2521 Node* cache_length = __ LoadRegister(cache_length_reg); |
2522 | 2522 |
2523 // Check if {index} is at {cache_length} already. | 2523 // Check if {index} is at {cache_length} already. |
2524 Label if_true(assembler), if_false(assembler), end(assembler); | 2524 Label if_true(assembler), if_false(assembler), end(assembler); |
2525 __ BranchIfWordEqual(index, cache_length, &if_true, &if_false); | 2525 __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false); |
2526 __ Bind(&if_true); | 2526 __ Bind(&if_true); |
2527 { | 2527 { |
2528 __ SetAccumulator(__ BooleanConstant(false)); | 2528 __ SetAccumulator(__ BooleanConstant(false)); |
2529 __ Goto(&end); | 2529 __ Goto(&end); |
2530 } | 2530 } |
2531 __ Bind(&if_false); | 2531 __ Bind(&if_false); |
2532 { | 2532 { |
2533 __ SetAccumulator(__ BooleanConstant(true)); | 2533 __ SetAccumulator(__ BooleanConstant(true)); |
2534 __ Goto(&end); | 2534 __ Goto(&end); |
2535 } | 2535 } |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2586 Node* generator = __ LoadRegister(generator_reg); | 2586 Node* generator = __ LoadRegister(generator_reg); |
2587 | 2587 |
2588 Label if_stepping(assembler, Label::kDeferred), ok(assembler); | 2588 Label if_stepping(assembler, Label::kDeferred), ok(assembler); |
2589 Node* step_action_address = __ ExternalConstant( | 2589 Node* step_action_address = __ ExternalConstant( |
2590 ExternalReference::debug_last_step_action_address(isolate_)); | 2590 ExternalReference::debug_last_step_action_address(isolate_)); |
2591 Node* step_action = __ Load(MachineType::Int8(), step_action_address); | 2591 Node* step_action = __ Load(MachineType::Int8(), step_action_address); |
2592 STATIC_ASSERT(StepIn > StepNext); | 2592 STATIC_ASSERT(StepIn > StepNext); |
2593 STATIC_ASSERT(StepFrame > StepNext); | 2593 STATIC_ASSERT(StepFrame > StepNext); |
2594 STATIC_ASSERT(LastStepAction == StepFrame); | 2594 STATIC_ASSERT(LastStepAction == StepFrame); |
2595 Node* step_next = __ Int32Constant(StepNext); | 2595 Node* step_next = __ Int32Constant(StepNext); |
2596 __ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok); | 2596 __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok); |
2597 __ Bind(&ok); | 2597 __ Bind(&ok); |
2598 | 2598 |
2599 Node* array = | 2599 Node* array = |
2600 __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset); | 2600 __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset); |
2601 Node* context = __ GetContext(); | 2601 Node* context = __ GetContext(); |
2602 Node* state = __ GetAccumulator(); | 2602 Node* state = __ GetAccumulator(); |
2603 | 2603 |
2604 __ ExportRegisterFile(array); | 2604 __ ExportRegisterFile(array); |
2605 __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context); | 2605 __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context); |
2606 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state); | 2606 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state); |
(...skipping 30 matching lines...) Expand all Loading... |
2637 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, | 2637 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, |
2638 __ SmiTag(new_state)); | 2638 __ SmiTag(new_state)); |
2639 __ SetAccumulator(old_state); | 2639 __ SetAccumulator(old_state); |
2640 | 2640 |
2641 __ Dispatch(); | 2641 __ Dispatch(); |
2642 } | 2642 } |
2643 | 2643 |
2644 } // namespace interpreter | 2644 } // namespace interpreter |
2645 } // namespace internal | 2645 } // namespace internal |
2646 } // namespace v8 | 2646 } // namespace v8 |
OLD | NEW |