| Index: src/builtins/mips64/builtins-mips64.cc
|
| diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc
|
| index feb9b97e9ac2f45d5e9f17d80281a1bf5737dc37..5b35c77df8a401a2d658219dc96f3cc6c2d93d4a 100644
|
| --- a/src/builtins/mips64/builtins-mips64.cc
|
| +++ b/src/builtins/mips64/builtins-mips64.cc
|
| @@ -1187,7 +1187,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
|
|
|
| // static
|
| void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
|
| - MacroAssembler* masm, CallableType construct_type) {
|
| + MacroAssembler* masm, PushArgsConstructMode mode) {
|
| // ----------- S t a t e -------------
|
| // -- a0 : argument count (not including receiver)
|
| // -- a3 : new target
|
| @@ -1204,7 +1204,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
|
| Generate_InterpreterPushArgs(masm, a0, a4, a5, t0, &stack_overflow);
|
|
|
| __ AssertUndefinedOrAllocationSite(a2, t0);
|
| - if (construct_type == CallableType::kJSFunction) {
|
| + if (mode == PushArgsConstructMode::kJSFunction) {
|
| __ AssertFunction(a1);
|
|
|
| // Tail call to the function-specific construct stub (still in the caller
|
| @@ -1213,8 +1213,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
|
| __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
|
| __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ Jump(at);
|
| + } else if (mode == PushArgsConstructMode::kWithFinalSpread) {
|
| + // Call the constructor with a0, a1, and a3 unmodified.
|
| + __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
|
| + RelocInfo::CODE_TARGET);
|
| } else {
|
| - DCHECK_EQ(construct_type, CallableType::kAny);
|
| + DCHECK_EQ(PushArgsConstructMode::kOther, mode);
|
| // Call the constructor with a0, a1, and a3 unmodified.
|
| __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
|
| }
|
| @@ -2818,6 +2822,139 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
| }
|
|
|
| // static
|
| +void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
|
| + // ----------- S t a t e -------------
|
| + // -- a0 : the number of arguments (not including the receiver)
|
| + // -- a1 : the constructor to call (can be any Object)
|
| + // -- a3 : the new target (either the same as the constructor or
|
| + // the JSFunction on which new was invoked initially)
|
| + // -----------------------------------
|
| +
|
| + Register argc = a0;
|
| + Register constructor = a1;
|
| + Register new_target = a3;
|
| +
|
| + Register scratch = t0;
|
| + Register scratch2 = t1;
|
| +
|
| + Register spread = a2;
|
| + Register spread_map = a4;
|
| +
|
| + Register native_context = a5;
|
| +
|
| + __ ld(spread, MemOperand(sp, 0));
|
| + __ ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
|
| + __ ld(native_context, NativeContextMemOperand());
|
| +
|
| + Label runtime_call, push_args;
|
| + // Check that the spread is an array.
|
| + __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
|
| + __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
|
| +
|
| + // Check that we have the original ArrayPrototype.
|
| + __ ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
|
| + __ ld(scratch2, ContextMemOperand(native_context,
|
| + Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
|
| + __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
|
| +
|
| + // Check that the ArrayPrototype hasn't been modified in a way that would
|
| + // affect iteration.
|
| + __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
|
| + __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
|
| + __ Branch(&runtime_call, ne, scratch,
|
| + Operand(Smi::FromInt(Isolate::kProtectorValid)));
|
| +
|
| + // Check that the map of the initial array iterator hasn't changed.
|
| + __ ld(scratch,
|
| + ContextMemOperand(native_context,
|
| + Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
|
| + __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
| + __ ld(scratch2,
|
| + ContextMemOperand(native_context,
|
| + Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
|
| + __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
|
| +
|
| + // For FastPacked kinds, iteration will have the same effect as simply
|
| + // accessing each property in order.
|
| + Label no_protector_check;
|
| + __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
|
| + __ DecodeField<Map::ElementsKindBits>(scratch);
|
| + __ Branch(&runtime_call, hi, scratch, Operand(LAST_FAST_ELEMENTS_KIND));
|
| + // For non-FastHoley kinds, we can skip the protector check.
|
| + __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
|
| + __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
|
| + __ Branch(&no_protector_check, eq, scratch, Operand(FAST_DOUBLE_ELEMENTS));
|
| + // Check the ArrayProtector cell.
|
| + __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
|
| + __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
|
| + __ Branch(&runtime_call, ne, scratch,
|
| + Operand(Smi::FromInt(Isolate::kProtectorValid)));
|
| +
|
| + __ bind(&no_protector_check);
|
| + // Load the FixedArray backing store.
|
| + __ ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
|
| + __ Branch(&push_args);
|
| +
|
| + __ bind(&runtime_call);
|
| + {
|
| + // Call the builtin for the result of the spread.
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + __ SmiTag(argc);
|
| + __ Push(constructor, new_target, argc, spread);
|
| + __ CallRuntime(Runtime::kSpreadIterableFixed);
|
| + __ mov(spread, v0);
|
| + __ Pop(constructor, new_target, argc);
|
| + __ SmiUntag(argc);
|
| + }
|
| +
|
| + Register spread_len = a4;
|
| + __ bind(&push_args);
|
| + {
|
| + // Pop the spread argument off the stack.
|
| + __ Pop(scratch);
|
| + // Calculate the new nargs including the result of the spread.
|
| + __ lw(spread_len,
|
| + UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
|
| + // argc += spread_len - 1. Subtract 1 for the spread itself.
|
| + __ Daddu(argc, argc, spread_len);
|
| + __ Dsubu(argc, argc, Operand(1));
|
| + }
|
| +
|
| + // Check for stack overflow.
|
| + {
|
| + // Check the stack for overflow. We are not trying to catch interruptions
|
| + // (i.e. debug break and preemption) here, so check the "real stack limit".
|
| + Label done;
|
| + __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
|
| + // Make scratch the space we have left. The stack might already be
|
| + // overflowed here which will cause ip to become negative.
|
| + __ Dsubu(scratch, sp, scratch);
|
| + // Check if the arguments will overflow the stack.
|
| + __ dsll(at, spread_len, kPointerSizeLog2);
|
| + __ Branch(&done, gt, scratch, Operand(at)); // Signed comparison.
|
| + __ TailCallRuntime(Runtime::kThrowStackOverflow);
|
| + __ bind(&done);
|
| + }
|
| +
|
| + // Put the evaluated spread onto the stack as additional arguments.
|
| + {
|
| + __ mov(scratch, zero_reg);
|
| + Label done, loop;
|
| + __ bind(&loop);
|
| + __ Branch(&done, eq, scratch, Operand(spread_len));
|
| + __ Dlsa(scratch2, spread, scratch, kPointerSizeLog2);
|
| + __ ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
|
| + __ Push(scratch2);
|
| + __ Daddu(scratch, scratch, Operand(1));
|
| + __ Branch(&loop);
|
| + __ bind(&done);
|
| + }
|
| +
|
| + // Dispatch.
|
| + __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
|
| +}
|
| +
|
| +// static
|
| void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
|
| // ----------- S t a t e -------------
|
| // -- a0 : requested object size (untagged)
|
|
|