Chromium Code Reviews| Index: src/builtins/s390/builtins-s390.cc |
| diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc |
| index d6feab89dde33dad7d0fa4721999bd2184438c94..7cc2ebc8f235099447b7b980696945f43c218326 100644 |
| --- a/src/builtins/s390/builtins-s390.cc |
| +++ b/src/builtins/s390/builtins-s390.cc |
| @@ -1191,8 +1191,10 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl( |
| __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny, |
| tail_call_mode), |
| RelocInfo::CODE_TARGET); |
| + } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { |
| + __ Jump(masm->isolate()->builtins()->CallWithSpread(), |
| + RelocInfo::CODE_TARGET); |
| } else { |
| - DCHECK_EQ(mode, InterpreterPushArgsMode::kOther); |
| __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny, |
| tail_call_mode), |
| RelocInfo::CODE_TARGET); |
| @@ -2701,6 +2703,153 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode, |
| } |
| } |
| +static void CheckSpreadAndPushToStack(MacroAssembler* masm) { |
| + Register argc = r2; |
| + Register constructor = r3; |
| + Register new_target = r5; |
| + |
| + Register scratch = r4; |
| + Register scratch2 = r8; |
| + |
| + Register spread = r6; |
| + Register spread_map = r7; |
| + Register spread_len = r7; |
| + Label runtime_call, push_args; |
| + __ LoadP(spread, MemOperand(sp, 0)); |
| + __ JumpIfSmi(spread, &runtime_call); |
| + __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset)); |
| + |
| + // Check that the spread is an array. |
| + __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE); |
| + __ bne(&runtime_call); |
| + |
| + // Check that we have the original ArrayPrototype. |
| + __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset)); |
| + __ LoadP(scratch2, NativeContextMemOperand()); |
| + __ LoadP(scratch2, |
| + ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX)); |
| + __ CmpP(scratch, scratch2); |
| + __ bne(&runtime_call); |
| + |
| + // Check that the ArrayPrototype hasn't been modified in a way that would |
| + // affect iteration. |
| + __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex); |
| + __ LoadP(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); |
| + __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0); |
| + __ bne(&runtime_call); |
| + |
| + // Check that the map of the initial array iterator hasn't changed. |
| + __ LoadP(scratch2, NativeContextMemOperand()); |
| + __ LoadP(scratch, |
| + ContextMemOperand(scratch2, |
| + Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)); |
| + __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| + __ LoadP(scratch2, |
| + ContextMemOperand( |
| + scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX)); |
| + __ CmpP(scratch, scratch2); |
| + __ bne(&runtime_call); |
| + |
| + // For FastPacked kinds, iteration will have the same effect as simply |
| + // accessing each property in order. |
| + Label no_protector_check; |
| + __ LoadP(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset)); |
|
john.yan
2017/02/01 03:26:42
Use LoadB here will fix the endian issue.
|
| + __ DecodeField<Map::ElementsKindBits>(scratch); |
| + __ CmpP(scratch, Operand(FAST_HOLEY_ELEMENTS)); |
| + __ bgt(&runtime_call); |
| + // For non-FastHoley kinds, we can skip the protector check. |
| + __ CmpP(scratch, Operand(FAST_SMI_ELEMENTS)); |
| + __ beq(&no_protector_check); |
| + __ CmpP(scratch, Operand(FAST_ELEMENTS)); |
| + __ beq(&no_protector_check); |
| + // Check the ArrayProtector cell. |
| + __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex); |
| + __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset)); |
| + __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0); |
| + __ bne(&runtime_call); |
| + |
| + __ bind(&no_protector_check); |
| + // Load the FixedArray backing store, but use the length from the array. |
| + __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset)); |
| + __ SmiUntag(spread_len); |
| + __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset)); |
| + __ b(&push_args); |
| + |
| + __ bind(&runtime_call); |
| + { |
| + // Call the builtin for the result of the spread. |
| + FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); |
| + __ SmiTag(argc); |
| + __ Push(constructor, new_target, argc, spread); |
| + __ CallRuntime(Runtime::kSpreadIterableFixed); |
| + __ LoadRR(spread, r2); |
| + __ Pop(constructor, new_target, argc); |
| + __ SmiUntag(argc); |
| + } |
| + |
| + { |
| + // Calculate the new nargs including the result of the spread. |
| + __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset)); |
| + __ SmiUntag(spread_len); |
| + |
| + __ bind(&push_args); |
| + // argc += spread_len - 1. Subtract 1 for the spread itself. |
| + __ AddP(argc, argc, spread_len); |
| + __ SubP(argc, argc, Operand(1)); |
| + |
| + // Pop the spread argument off the stack. |
| + __ Pop(scratch); |
| + } |
| + |
| + // Check for stack overflow. |
| + { |
| + // Check the stack for overflow. We are not trying to catch interruptions |
| + // (i.e. debug break and preemption) here, so check the "real stack limit". |
| + Label done; |
| + __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); |
| + // Make scratch the space we have left. The stack might already be |
| + // overflowed here which will cause scratch to become negative. |
| + __ SubP(scratch, sp, scratch); |
| + // Check if the arguments will overflow the stack. |
| + __ ShiftLeftP(r0, spread_len, Operand(kPointerSizeLog2)); |
| + __ CmpP(scratch, r0); |
| + __ bgt(&done); // Signed comparison. |
| + __ TailCallRuntime(Runtime::kThrowStackOverflow); |
| + __ bind(&done); |
| + } |
| + |
| + // Put the evaluated spread onto the stack as additional arguments. |
| + { |
| + __ LoadImmP(scratch, Operand::Zero()); |
| + Label done, loop; |
| + __ bind(&loop); |
| + __ CmpP(scratch, spread_len); |
| + __ beq(&done); |
| + __ ShiftLeftP(r0, scratch, Operand(kPointerSizeLog2)); |
| + __ AddP(scratch2, spread, r0); |
| + __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize)); |
| + __ Push(scratch2); |
| + __ AddP(scratch, scratch, Operand(1)); |
| + __ b(&loop); |
| + __ bind(&done); |
| + } |
| +} |
| + |
| +// static |
| +void Builtins::Generate_CallWithSpread(MacroAssembler* masm) { |
| + // ----------- S t a t e ------------- |
| + // -- r2 : the number of arguments (not including the receiver) |
| + // -- r3 : the constructor to call (can be any Object) |
| + // ----------------------------------- |
| + |
| + // CheckSpreadAndPushToStack will push r5 to save it. |
| + __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); |
| + CheckSpreadAndPushToStack(masm); |
| + __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny, |
| + TailCallMode::kDisallow), |
| + RelocInfo::CODE_TARGET); |
| +} |
| + |
| // static |
| void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { |
| // ----------- S t a t e ------------- |
| @@ -2829,132 +2978,7 @@ void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) { |
| // the JSFunction on which new was invoked initially) |
| // ----------------------------------- |
| - Register argc = r2; |
| - Register constructor = r3; |
| - Register new_target = r5; |
| - |
| - Register scratch = r4; |
| - Register scratch2 = r8; |
| - |
| - Register spread = r6; |
| - Register spread_map = r7; |
| - __ LoadP(spread, MemOperand(sp, 0)); |
| - __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset)); |
| - |
| - Label runtime_call, push_args; |
| - // Check that the spread is an array. |
| - __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE); |
| - __ bne(&runtime_call); |
| - |
| - // Check that we have the original ArrayPrototype. |
| - __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset)); |
| - __ LoadP(scratch2, NativeContextMemOperand()); |
| - __ LoadP(scratch2, |
| - ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX)); |
| - __ CmpP(scratch, scratch2); |
| - __ bne(&runtime_call); |
| - |
| - // Check that the ArrayPrototype hasn't been modified in a way that would |
| - // affect iteration. |
| - __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex); |
| - __ LoadP(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); |
| - __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0); |
| - __ bne(&runtime_call); |
| - |
| - // Check that the map of the initial array iterator hasn't changed. |
| - __ LoadP(scratch2, NativeContextMemOperand()); |
| - __ LoadP(scratch, |
| - ContextMemOperand(scratch2, |
| - Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)); |
| - __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| - __ LoadP(scratch2, |
| - ContextMemOperand( |
| - scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX)); |
| - __ CmpP(scratch, scratch2); |
| - __ bne(&runtime_call); |
| - |
| - // For FastPacked kinds, iteration will have the same effect as simply |
| - // accessing each property in order. |
| - Label no_protector_check; |
| - __ LoadP(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset)); |
| - __ DecodeField<Map::ElementsKindBits>(scratch); |
| - __ CmpP(scratch, Operand(FAST_HOLEY_ELEMENTS)); |
| - __ bgt(&runtime_call); |
| - // For non-FastHoley kinds, we can skip the protector check. |
| - __ CmpP(scratch, Operand(FAST_SMI_ELEMENTS)); |
| - __ beq(&no_protector_check); |
| - __ CmpP(scratch, Operand(FAST_ELEMENTS)); |
| - __ beq(&no_protector_check); |
| - // Check the ArrayProtector cell. |
| - __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex); |
| - __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset)); |
| - __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0); |
| - __ bne(&runtime_call); |
| - |
| - __ bind(&no_protector_check); |
| - // Load the FixedArray backing store. |
| - __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset)); |
| - __ b(&push_args); |
| - |
| - __ bind(&runtime_call); |
| - { |
| - // Call the builtin for the result of the spread. |
| - FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); |
| - __ SmiTag(argc); |
| - __ Push(constructor, new_target, argc, spread); |
| - __ CallRuntime(Runtime::kSpreadIterableFixed); |
| - __ LoadRR(spread, r2); |
| - __ Pop(constructor, new_target, argc); |
| - __ SmiUntag(argc); |
| - } |
| - |
| - Register spread_len = r7; |
| - __ bind(&push_args); |
| - { |
| - // Pop the spread argument off the stack. |
| - __ Pop(scratch); |
| - // Calculate the new nargs including the result of the spread. |
| - __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset)); |
| - __ SmiUntag(spread_len); |
| - // argc += spread_len - 1. Subtract 1 for the spread itself. |
| - __ AddP(argc, argc, spread_len); |
| - __ SubP(argc, argc, Operand(1)); |
| - } |
| - |
| - // Check for stack overflow. |
| - { |
| - // Check the stack for overflow. We are not trying to catch interruptions |
| - // (i.e. debug break and preemption) here, so check the "real stack limit". |
| - Label done; |
| - __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex); |
| - // Make scratch the space we have left. The stack might already be |
| - // overflowed here which will cause scratch to become negative. |
| - __ SubP(scratch, sp, scratch); |
| - // Check if the arguments will overflow the stack. |
| - __ ShiftLeftP(r0, spread_len, Operand(kPointerSizeLog2)); |
| - __ CmpP(scratch, r0); |
| - __ bgt(&done); // Signed comparison. |
| - __ TailCallRuntime(Runtime::kThrowStackOverflow); |
| - __ bind(&done); |
| - } |
| - |
| - // Put the evaluated spread onto the stack as additional arguments. |
| - { |
| - __ LoadImmP(scratch, Operand::Zero()); |
| - Label done, loop; |
| - __ bind(&loop); |
| - __ CmpP(scratch, spread_len); |
| - __ beq(&done); |
| - __ ShiftLeftP(r0, scratch, Operand(kPointerSizeLog2)); |
| - __ AddP(scratch2, spread, r0); |
| - __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize)); |
| - __ Push(scratch2); |
| - __ AddP(scratch, scratch, Operand(1)); |
| - __ b(&loop); |
| - __ bind(&done); |
| - } |
| - |
| - // Dispatch. |
| + CheckSpreadAndPushToStack(masm); |
| __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); |
| } |