Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(56)

Unified Diff: src/builtins/x64/builtins-x64.cc

Issue 2571563004: [Turbofan] Implement super calls with spread bytecode in assembly code. (Closed)
Patch Set: Change arm64 loop to be similar to the rest Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/builtins/x64/builtins-x64.cc
diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc
index 87dfc7d3a6b6305a4d7ec63c05c36e47b5418a7c..9e3c10d2f4bc3bead2d3c9801de14f747c0c5c8c 100644
--- a/src/builtins/x64/builtins-x64.cc
+++ b/src/builtins/x64/builtins-x64.cc
@@ -913,6 +913,50 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstructWithSpread(
rmcilroy 2017/01/11 15:24:44 High level question - this seems to be functionall
petermarshall 2017/01/11 16:50:03 Yes good point they are identical. I implemented t
rmcilroy 2017/01/12 10:40:38 Right, I wasn't suggesting to modifiy CallableType
petermarshall 2017/01/16 16:06:06 Ah I misread it a bit. How does this look now? Can
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- rdi : the constructor to call (can be any Object)
+ // -- rbx : the allocation site feedback (always null currently)
+ // -- rcx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -----------------------------------
+ Label stack_overflow;
+
+ // Add a stack check before pushing arguments.
+ Generate_StackOverflowCheck(masm, rax, r8, r9, &stack_overflow);
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ PopReturnAddressTo(kScratchRegister);
+
+ // Push slot for the receiver to be constructed.
+ __ Push(Immediate(0));
+
+ // rcx and r8 will be modified.
+ Generate_InterpreterPushArgs(masm, rax, rcx, r8);
+
+ // Push return address in preparation for the tail-call.
+ __ PushReturnAddressFrom(kScratchRegister);
+
+ __ AssertUndefinedOrAllocationSite(rbx);
+ // Call the constructor (rax, rdx, rdi passed on).
+ __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+ RelocInfo::CODE_TARGET);
+
+ // Throw stack overflow exception.
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // This should be unreachable.
+ __ int3();
+ }
+}
+
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
@@ -2881,6 +2925,138 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- rdi : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ // Load the spread argument into rbx.
+ __ movp(rbx, Operand(rsp, kPointerSize));
+ // Load the map of the spread into r15.
+ __ movp(r15, FieldOperand(rbx, HeapObject::kMapOffset));
+ // Load native context into r14.
+ __ movp(r14, NativeContextOperand());
+
+ Label runtime_call, push_args;
+ // Check that the spread is an array.
+ __ CmpInstanceType(r15, JS_ARRAY_TYPE);
+ __ j(not_equal, &runtime_call);
+
+ // Check that we have the original ArrayPrototype.
+ __ movp(rcx, FieldOperand(r15, Map::kPrototypeOffset));
+ __ cmpp(rcx, ContextOperand(r14, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &runtime_call);
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ __ LoadRoot(rcx, Heap::kArrayIteratorProtectorRootIndex);
+ __ Cmp(FieldOperand(rcx, Cell::kValueOffset),
+ Smi::FromInt(Isolate::kProtectorValid));
+ __ j(not_equal, &runtime_call);
+
+ // Check that the map of the initial array iterator hasn't changed.
+ __ movp(rcx,
+ ContextOperand(r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+ __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ cmpp(rcx, ContextOperand(
+ r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+ __ j(not_equal, &runtime_call);
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ Label no_protector_check;
+ __ movzxbp(rcx, FieldOperand(r15, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(rcx);
+ __ cmpp(rcx, Immediate(LAST_FAST_ELEMENTS_KIND));
+ __ j(above, &runtime_call);
+ // For non-FastHoley kinds, we can skip the protector check.
+ __ cmpp(rcx, Immediate(FAST_SMI_ELEMENTS));
+ __ j(equal, &no_protector_check);
+ __ cmpp(rcx, Immediate(FAST_ELEMENTS));
+ __ j(equal, &no_protector_check);
+ __ cmpp(rcx, Immediate(FAST_DOUBLE_ELEMENTS));
+ __ j(equal, &no_protector_check);
+ // Check the ArrayProtector cell.
+ __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
+ __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
+ Smi::FromInt(Isolate::kProtectorValid));
+ __ j(not_equal, &runtime_call);
+
+ __ bind(&no_protector_check);
+ // Load the FixedArray backing store.
+ __ movp(rbx, FieldOperand(rbx, JSArray::kElementsOffset));
+ __ jmp(&push_args);
+
+ __ bind(&runtime_call);
+ {
+ // Call the builtin for the result of the spread.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi); // target
+ __ Push(rdx); // new target
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax); // nargs
+ __ Push(rbx);
+ __ CallRuntime(Runtime::kSpreadIterableFixed);
+ __ movp(rbx, rax);
+ __ Pop(rax); // nargs
+ __ SmiToInteger32(rax, rax);
+ __ Pop(rdx); // new target
+ __ Pop(rdi); // target
+ }
+
+ __ bind(&push_args);
+ {
+ // Pop the return address and spread argument.
+ __ PopReturnAddressTo(r8);
+ __ Pop(rcx);
+
+ // Calculate the new nargs including the result of the spread.
+ __ SmiToInteger32(r9, FieldOperand(rbx, FixedArray::kLengthOffset));
+ // rax += r9 - 1. Subtract 1 for the spread itself.
+ __ leap(rax, Operand(rax, r9, times_1, -1));
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movp(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subp(rcx, kScratchRegister);
+ __ sarp(rcx, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(rcx, r9);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // Put the evaluated spread onto the stack as additional arguments.
+ {
+ __ Set(rcx, 0);
+ Label done, loop;
+ __ bind(&loop);
+ __ cmpl(rcx, r9);
+ __ j(equal, &done, Label::kNear);
+ __ movp(kScratchRegister, FieldOperand(rbx, rcx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ Push(kScratchRegister);
+ __ incl(rcx);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(r8);
+ }
+ // Dispatch.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
Register scratch0, Register scratch1,

Powered by Google App Engine
This is Rietveld 408576698