| Index: src/builtins/mips/builtins-mips.cc
|
| diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc
|
| index 56dc7479e5c4e9e58f491e83f1696567279ff6b5..b6b6a5f715d50fc56afc0a428269fe6dca10de78 100644
|
| --- a/src/builtins/mips/builtins-mips.cc
|
| +++ b/src/builtins/mips/builtins-mips.cc
|
| @@ -1158,7 +1158,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
|
| // static
|
| void Builtins::Generate_InterpreterPushArgsAndCallImpl(
|
| MacroAssembler* masm, TailCallMode tail_call_mode,
|
| - CallableType function_type) {
|
| + InterpreterPushArgsMode mode) {
|
| // ----------- S t a t e -------------
|
| // -- a0 : the number of arguments (not including the receiver)
|
| // -- a2 : the address of the first argument to be pushed. Subsequent
|
| @@ -1174,12 +1174,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
|
| Generate_InterpreterPushArgs(masm, t0, a2, t4, t1, &stack_overflow);
|
|
|
| // Call the target.
|
| - if (function_type == CallableType::kJSFunction) {
|
| + if (mode == InterpreterPushArgsMode::kJSFunction) {
|
| __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
|
| tail_call_mode),
|
| RelocInfo::CODE_TARGET);
|
| + } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
|
| + __ Jump(masm->isolate()->builtins()->CallWithSpread(),
|
| + RelocInfo::CODE_TARGET);
|
| } else {
|
| - DCHECK_EQ(function_type, CallableType::kAny);
|
| __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
|
| tail_call_mode),
|
| RelocInfo::CODE_TARGET);
|
| @@ -1195,7 +1197,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
|
|
|
| // static
|
| void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
|
| - MacroAssembler* masm, PushArgsConstructMode mode) {
|
| + MacroAssembler* masm, InterpreterPushArgsMode mode) {
|
| // ----------- S t a t e -------------
|
| // -- a0 : argument count (not including receiver)
|
| // -- a3 : new target
|
| @@ -1212,7 +1214,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
|
| Generate_InterpreterPushArgs(masm, a0, t4, t1, t0, &stack_overflow);
|
|
|
| __ AssertUndefinedOrAllocationSite(a2, t0);
|
| - if (mode == PushArgsConstructMode::kJSFunction) {
|
| + if (mode == InterpreterPushArgsMode::kJSFunction) {
|
| __ AssertFunction(a1);
|
|
|
| // Tail call to the function-specific construct stub (still in the caller
|
| @@ -1221,12 +1223,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
|
| __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
|
| __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ Jump(at);
|
| - } else if (mode == PushArgsConstructMode::kWithFinalSpread) {
|
| + } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
|
| // Call the constructor with a0, a1, and a3 unmodified.
|
| __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
|
| RelocInfo::CODE_TARGET);
|
| } else {
|
| - DCHECK_EQ(PushArgsConstructMode::kOther, mode);
|
| + DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
|
| // Call the constructor with a0, a1, and a3 unmodified.
|
| __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
|
| }
|
| @@ -2626,6 +2628,147 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
|
| }
|
| }
|
|
|
| +static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
|
| + Register argc = a0;
|
| + Register constructor = a1;
|
| + Register new_target = a3;
|
| +
|
| + Register scratch = t0;
|
| + Register scratch2 = t1;
|
| +
|
| + Register spread = a2;
|
| + Register spread_map = t3;
|
| +
|
| + Register spread_len = t3;
|
| +
|
| + Register native_context = t4;
|
| +
|
| + __ lw(spread, MemOperand(sp, 0));
|
| + __ lw(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
|
| + __ lw(native_context, NativeContextMemOperand());
|
| +
|
| + Label runtime_call, push_args;
|
| + // Check that the spread is an array.
|
| + __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
|
| + __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
|
| +
|
| + // Check that we have the original ArrayPrototype.
|
| + __ lw(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
|
| + __ lw(scratch2, ContextMemOperand(native_context,
|
| + Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
|
| + __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
|
| +
|
| + // Check that the ArrayPrototype hasn't been modified in a way that would
|
| + // affect iteration.
|
| + __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
|
| + __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
|
| + __ Branch(&runtime_call, ne, scratch,
|
| + Operand(Smi::FromInt(Isolate::kProtectorValid)));
|
| +
|
| + // Check that the map of the initial array iterator hasn't changed.
|
| + __ lw(scratch,
|
| + ContextMemOperand(native_context,
|
| + Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
|
| + __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
| + __ lw(scratch2,
|
| + ContextMemOperand(native_context,
|
| + Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
|
| + __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
|
| +
|
| + // For FastPacked kinds, iteration will have the same effect as simply
|
| + // accessing each property in order.
|
| + Label no_protector_check;
|
| + __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
|
| + __ DecodeField<Map::ElementsKindBits>(scratch);
|
| + __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
|
| + // For non-FastHoley kinds, we can skip the protector check.
|
| + __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
|
| + __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
|
| + // Check the ArrayProtector cell.
|
| + __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
|
| + __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
|
| + __ Branch(&runtime_call, ne, scratch,
|
| + Operand(Smi::FromInt(Isolate::kProtectorValid)));
|
| +
|
| + __ bind(&no_protector_check);
|
| + // Load the FixedArray backing store, but use the length from the array.
|
| + __ lw(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
|
| + __ SmiUntag(spread_len);
|
| + __ lw(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
|
| + __ Branch(&push_args);
|
| +
|
| + __ bind(&runtime_call);
|
| + {
|
| + // Call the builtin for the result of the spread.
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + __ SmiTag(argc);
|
| + __ Push(constructor, new_target, argc, spread);
|
| + __ CallRuntime(Runtime::kSpreadIterableFixed);
|
| + __ mov(spread, v0);
|
| + __ Pop(constructor, new_target, argc);
|
| + __ SmiUntag(argc);
|
| + }
|
| +
|
| + {
|
| + // Calculate the new nargs including the result of the spread.
|
| + __ lw(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
|
| + __ SmiUntag(spread_len);
|
| +
|
| + __ bind(&push_args);
|
| + // argc += spread_len - 1. Subtract 1 for the spread itself.
|
| + __ Addu(argc, argc, spread_len);
|
| + __ Subu(argc, argc, Operand(1));
|
| +
|
| + // Pop the spread argument off the stack.
|
| + __ Pop(scratch);
|
| + }
|
| +
|
| + // Check for stack overflow.
|
| + {
|
| + // Check the stack for overflow. We are not trying to catch interruptions
|
| + // (i.e. debug break and preemption) here, so check the "real stack limit".
|
| + Label done;
|
| + __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
|
| + // Make scratch the space we have left. The stack might already be
|
| + // overflowed here which will cause ip to become negative.
|
| + __ Subu(scratch, sp, scratch);
|
| + // Check if the arguments will overflow the stack.
|
| + __ sll(at, spread_len, kPointerSizeLog2);
|
| + __ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
|
| + __ TailCallRuntime(Runtime::kThrowStackOverflow);
|
| + __ bind(&done);
|
| + }
|
| +
|
| + // Put the evaluated spread onto the stack as additional arguments.
|
| + {
|
| + __ mov(scratch, zero_reg);
|
| + Label done, loop;
|
| + __ bind(&loop);
|
| + __ Branch(&done, eq, scratch, Operand(spread_len));
|
| + __ Lsa(scratch2, spread, scratch, kPointerSizeLog2);
|
| + __ lw(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
|
| + __ Push(scratch2);
|
| + __ Addu(scratch, scratch, Operand(1));
|
| + __ Branch(&loop);
|
| + __ bind(&done);
|
| + }
|
| +}
|
| +
|
| +// static
|
| +void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
|
| + // ----------- S t a t e -------------
|
| + // -- a0 : the number of arguments (not including the receiver)
|
| + // -- a1 : the target to call (can be any Object).
|
| + // -----------------------------------
|
| +
|
| + // CheckSpreadAndPushToStack will push a3 to save it.
|
| + __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
|
| + CheckSpreadAndPushToStack(masm);
|
| + __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
|
| + TailCallMode::kDisallow),
|
| + RelocInfo::CODE_TARGET);
|
| +}
|
| +
|
| // static
|
| void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
| // ----------- S t a t e -------------
|
| @@ -2816,126 +2959,7 @@ void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
|
| // the JSFunction on which new was invoked initially)
|
| // -----------------------------------
|
|
|
| - Register argc = a0;
|
| - Register constructor = a1;
|
| - Register new_target = a3;
|
| -
|
| - Register scratch = t0;
|
| - Register scratch2 = t1;
|
| -
|
| - Register spread = a2;
|
| - Register spread_map = t3;
|
| -
|
| - Register native_context = t4;
|
| -
|
| - __ lw(spread, MemOperand(sp, 0));
|
| - __ lw(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
|
| - __ lw(native_context, NativeContextMemOperand());
|
| -
|
| - Label runtime_call, push_args;
|
| - // Check that the spread is an array.
|
| - __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
|
| - __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
|
| -
|
| - // Check that we have the original ArrayPrototype.
|
| - __ lw(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
|
| - __ lw(scratch2, ContextMemOperand(native_context,
|
| - Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
|
| - __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
|
| -
|
| - // Check that the ArrayPrototype hasn't been modified in a way that would
|
| - // affect iteration.
|
| - __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
|
| - __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
|
| - __ Branch(&runtime_call, ne, scratch,
|
| - Operand(Smi::FromInt(Isolate::kProtectorValid)));
|
| -
|
| - // Check that the map of the initial array iterator hasn't changed.
|
| - __ lw(scratch,
|
| - ContextMemOperand(native_context,
|
| - Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
|
| - __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
| - __ lw(scratch2,
|
| - ContextMemOperand(native_context,
|
| - Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
|
| - __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
|
| -
|
| - // For FastPacked kinds, iteration will have the same effect as simply
|
| - // accessing each property in order.
|
| - Label no_protector_check;
|
| - __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
|
| - __ DecodeField<Map::ElementsKindBits>(scratch);
|
| - __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
|
| - // For non-FastHoley kinds, we can skip the protector check.
|
| - __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
|
| - __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
|
| - // Check the ArrayProtector cell.
|
| - __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
|
| - __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
|
| - __ Branch(&runtime_call, ne, scratch,
|
| - Operand(Smi::FromInt(Isolate::kProtectorValid)));
|
| -
|
| - __ bind(&no_protector_check);
|
| - // Load the FixedArray backing store.
|
| - __ lw(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
|
| - __ Branch(&push_args);
|
| -
|
| - __ bind(&runtime_call);
|
| - {
|
| - // Call the builtin for the result of the spread.
|
| - FrameScope scope(masm, StackFrame::INTERNAL);
|
| - __ SmiTag(argc);
|
| - __ Push(constructor, new_target, argc, spread);
|
| - __ CallRuntime(Runtime::kSpreadIterableFixed);
|
| - __ mov(spread, v0);
|
| - __ Pop(constructor, new_target, argc);
|
| - __ SmiUntag(argc);
|
| - }
|
| -
|
| - Register spread_len = t3;
|
| - __ bind(&push_args);
|
| - {
|
| - // Pop the spread argument off the stack.
|
| - __ Pop(scratch);
|
| - // Calculate the new nargs including the result of the spread.
|
| - __ lw(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
|
| - __ SmiUntag(spread_len);
|
| - // argc += spread_len - 1. Subtract 1 for the spread itself.
|
| - __ Addu(argc, argc, spread_len);
|
| - __ Subu(argc, argc, Operand(1));
|
| - }
|
| -
|
| - // Check for stack overflow.
|
| - {
|
| - // Check the stack for overflow. We are not trying to catch interruptions
|
| - // (i.e. debug break and preemption) here, so check the "real stack limit".
|
| - Label done;
|
| - __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
|
| - // Make scratch the space we have left. The stack might already be
|
| - // overflowed here which will cause ip to become negative.
|
| - __ Subu(scratch, sp, scratch);
|
| - // Check if the arguments will overflow the stack.
|
| - __ sll(at, spread_len, kPointerSizeLog2);
|
| - __ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
|
| - __ TailCallRuntime(Runtime::kThrowStackOverflow);
|
| - __ bind(&done);
|
| - }
|
| -
|
| - // Put the evaluated spread onto the stack as additional arguments.
|
| - {
|
| - __ mov(scratch, zero_reg);
|
| - Label done, loop;
|
| - __ bind(&loop);
|
| - __ Branch(&done, eq, scratch, Operand(spread_len));
|
| - __ Lsa(scratch2, spread, scratch, kPointerSizeLog2);
|
| - __ lw(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
|
| - __ Push(scratch2);
|
| - __ Addu(scratch, scratch, Operand(1));
|
| - __ Branch(&loop);
|
| - __ bind(&done);
|
| - }
|
| -
|
| - // Dispatch.
|
| + CheckSpreadAndPushToStack(masm);
|
| __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
|
| }
|
|
|
|
|