| Index: src/a64/builtins-a64.cc
|
| diff --git a/src/a64/builtins-a64.cc b/src/a64/builtins-a64.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..ee6654ab758fd22b31c29bfbb686bf4c3795693e
|
| --- /dev/null
|
| +++ b/src/a64/builtins-a64.cc
|
| @@ -0,0 +1,1483 @@
|
| +// Copyright 2013 the V8 project authors. All rights reserved.
|
| +// Redistribution and use in source and binary forms, with or without
|
| +// modification, are permitted provided that the following conditions are
|
| +// met:
|
| +//
|
| +// * Redistributions of source code must retain the above copyright
|
| +// notice, this list of conditions and the following disclaimer.
|
| +// * Redistributions in binary form must reproduce the above
|
| +// copyright notice, this list of conditions and the following
|
| +// disclaimer in the documentation and/or other materials provided
|
| +// with the distribution.
|
| +// * Neither the name of Google Inc. nor the names of its
|
| +// contributors may be used to endorse or promote products derived
|
| +// from this software without specific prior written permission.
|
| +//
|
| +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| +
|
| +#include "v8.h"
|
| +
|
| +#if V8_TARGET_ARCH_A64
|
| +
|
| +#include "codegen.h"
|
| +#include "debug.h"
|
| +#include "deoptimizer.h"
|
| +#include "full-codegen.h"
|
| +#include "runtime.h"
|
| +#include "stub-cache.h"
|
| +
|
| +namespace v8 {
|
| +namespace internal {
|
| +
|
| +
|
| +#define __ ACCESS_MASM(masm)
|
| +
|
| +
|
| +// Load the built-in Array function from the current context.
|
| +static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
|
| + // Load the native context.
|
| + __ Ldr(result, GlobalObjectMemOperand());
|
| + __ Ldr(result,
|
| + FieldMemOperand(result, GlobalObject::kNativeContextOffset));
|
| + // Load the InternalArray function from the native context.
|
| + __ Ldr(result,
|
| + MemOperand(result,
|
| + Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
|
| +}
|
| +
|
| +
|
| +// Load the built-in InternalArray function from the current context.
|
| +static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
|
| + Register result) {
|
| + // Load the native context.
|
| + __ Ldr(result, GlobalObjectMemOperand());
|
| + __ Ldr(result,
|
| + FieldMemOperand(result, GlobalObject::kNativeContextOffset));
|
| + // Load the InternalArray function from the native context.
|
| + __ Ldr(result, ContextMemOperand(result,
|
| + Context::INTERNAL_ARRAY_FUNCTION_INDEX));
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_Adaptor(MacroAssembler* masm,
|
| + CFunctionId id,
|
| + BuiltinExtraArguments extra_args) {
|
| + // ----------- S t a t e -------------
|
| + // -- x0 : number of arguments excluding receiver
|
| + // -- x1 : called function (only guaranteed when
|
| + // extra_args requires it)
|
| + // -- cp : context
|
| + // -- sp[0] : last argument
|
| + // -- ...
|
| + // -- sp[4 * (argc - 1)] : first argument (argc == x0)
|
| + // -- sp[4 * argc] : receiver
|
| + // -----------------------------------
|
| +
|
| + // Insert extra arguments.
|
| + int num_extra_args = 0;
|
| + if (extra_args == NEEDS_CALLED_FUNCTION) {
|
| + num_extra_args = 1;
|
| + __ Push(x1);
|
| + } else {
|
| + ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
|
| + }
|
| +
|
| + // JumpToExternalReference expects x0 to contain the number of arguments
|
| + // including the receiver and the extra arguments.
|
| + __ Add(x0, x0, num_extra_args + 1);
|
| + __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
|
| + // ----------- S t a t e -------------
|
| + // -- x0 : number of arguments
|
| + // -- lr : return address
|
| + // -- sp[...]: constructor arguments
|
| + // -----------------------------------
|
| + ASM_LOCATION("Builtins::Generate_InternalArrayCode");
|
| + Label generic_array_code;
|
| +
|
| + // Get the InternalArray function.
|
| + GenerateLoadInternalArrayFunction(masm, x1);
|
| +
|
| + if (FLAG_debug_code) {
|
| + // Initial map for the builtin InternalArray functions should be maps.
|
| + __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
|
| + __ Tst(x10, kSmiTagMask);
|
| + __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction);
|
| + __ CompareObjectType(x10, x11, x12, MAP_TYPE);
|
| + __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
|
| + }
|
| +
|
| + // Run the native code for the InternalArray function called as a normal
|
| + // function.
|
| + InternalArrayConstructorStub stub(masm->isolate());
|
| + __ TailCallStub(&stub);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
|
| + // ----------- S t a t e -------------
|
| + // -- x0 : number of arguments
|
| + // -- lr : return address
|
| + // -- sp[...]: constructor arguments
|
| + // -----------------------------------
|
| + ASM_LOCATION("Builtins::Generate_ArrayCode");
|
| + Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
|
| +
|
| + // Get the Array function.
|
| + GenerateLoadArrayFunction(masm, x1);
|
| +
|
| + if (FLAG_debug_code) {
|
| + // Initial map for the builtin Array functions should be maps.
|
| + __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
|
| + __ Tst(x10, kSmiTagMask);
|
| + __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
|
| + __ CompareObjectType(x10, x11, x12, MAP_TYPE);
|
| + __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
|
| + }
|
| +
|
| + // Run the native code for the Array function called as a normal function.
|
| + Handle<Object> undefined_sentinel(
|
| + masm->isolate()->heap()->undefined_value(),
|
| + masm->isolate());
|
| + __ Mov(x2, Operand(undefined_sentinel));
|
| + ArrayConstructorStub stub(masm->isolate());
|
| + __ TailCallStub(&stub);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
|
| + // ----------- S t a t e -------------
|
| + // -- x0 : number of arguments
|
| + // -- x1 : constructor function
|
| + // -- lr : return address
|
| + // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
|
| + // -- sp[argc * 8] : receiver
|
| + // -----------------------------------
|
| + ASM_LOCATION("Builtins::Generate_StringConstructCode");
|
| + Counters* counters = masm->isolate()->counters();
|
| + __ IncrementCounter(counters->string_ctor_calls(), 1, x10, x11);
|
| +
|
| + Register argc = x0;
|
| + Register function = x1;
|
| + if (FLAG_debug_code) {
|
| + __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, x10);
|
| + __ Cmp(function, x10);
|
| + __ Assert(eq, kUnexpectedStringFunction);
|
| + }
|
| +
|
| + // Load the first arguments in x0 and get rid of the rest.
|
| + Label no_arguments;
|
| + __ Cbz(argc, &no_arguments);
|
| + // First args = sp[(argc - 1) * 8].
|
| + __ Sub(argc, argc, 1);
|
| + __ Claim(argc, kXRegSizeInBytes);
|
| + // jssp now point to args[0], load and drop args[0] + receiver.
|
| + // TODO(jbramley): Consider adding ClaimAndPoke.
|
| + __ Ldr(argc, MemOperand(jssp, 2 * kPointerSize, PostIndex));
|
| +
|
| + Register argument = x2;
|
| + Label not_cached, argument_is_string;
|
| + __ LookupNumberStringCache(argc, // Input.
|
| + argument, // Result.
|
| + x10, // Scratch.
|
| + x11, // Scratch.
|
| + x12, // Scratch.
|
| + ¬_cached);
|
| + __ IncrementCounter(counters->string_ctor_cached_number(), 1, x10, x11);
|
| + __ Bind(&argument_is_string);
|
| +
|
| + // ----------- S t a t e -------------
|
| + // -- x2 : argument converted to string
|
| + // -- x1 : constructor function
|
| + // -- lr : return address
|
| + // -----------------------------------
|
| +
|
| + Label gc_required;
|
| + Register new_obj = x0;
|
| + __ Allocate(JSValue::kSize, new_obj, x10, x11, &gc_required, TAG_OBJECT);
|
| +
|
| + // Initialize the String object.
|
| + Register map = x3;
|
| + __ LoadGlobalFunctionInitialMap(function, map, x10);
|
| + if (FLAG_debug_code) {
|
| + __ Ldrb(x4, FieldMemOperand(map, Map::kInstanceSizeOffset));
|
| + __ Cmp(x4, JSValue::kSize >> kPointerSizeLog2);
|
| + __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
|
| + __ Ldrb(x4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
|
| + __ Cmp(x4, 0);
|
| + __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
|
| + }
|
| + __ Str(map, FieldMemOperand(new_obj, HeapObject::kMapOffset));
|
| +
|
| + Register empty = x3;
|
| + __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
|
| + __ Str(empty, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
|
| + __ Str(empty, FieldMemOperand(new_obj, JSObject::kElementsOffset));
|
| +
|
| + __ Str(argument, FieldMemOperand(new_obj, JSValue::kValueOffset));
|
| +
|
| + // Ensure the object is fully initialized.
|
| + STATIC_ASSERT(JSValue::kSize == (4 * kPointerSize));
|
| +
|
| + __ Ret();
|
| +
|
| + // The argument was not found in the number to string cache. Check
|
| + // if it's a string already before calling the conversion builtin.
|
| + Label convert_argument;
|
| + __ Bind(¬_cached);
|
| + __ JumpIfSmi(argc, &convert_argument);
|
| +
|
| + // Is it a String?
|
| + __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
|
| + __ Ldrb(x11, FieldMemOperand(x10, Map::kInstanceTypeOffset));
|
| + __ Tbnz(x11, MaskToBit(kIsNotStringMask), &convert_argument);
|
| + __ Mov(argument, argc);
|
| + __ IncrementCounter(counters->string_ctor_string_value(), 1, x10, x11);
|
| + __ B(&argument_is_string);
|
| +
|
| + // Invoke the conversion builtin and put the result into x2.
|
| + __ Bind(&convert_argument);
|
| + __ Push(function); // Preserve the function.
|
| + __ IncrementCounter(counters->string_ctor_conversions(), 1, x10, x11);
|
| + {
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + __ Push(argc);
|
| + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
|
| + }
|
| + __ Pop(function);
|
| + __ Mov(argument, x0);
|
| + __ B(&argument_is_string);
|
| +
|
| + // Load the empty string into x2, remove the receiver from the
|
| + // stack, and jump back to the case where the argument is a string.
|
| + __ Bind(&no_arguments);
|
| + __ LoadRoot(argument, Heap::kempty_stringRootIndex);
|
| + __ Drop(1);
|
| + __ B(&argument_is_string);
|
| +
|
| + // At this point the argument is already a string. Call runtime to create a
|
| + // string wrapper.
|
| + __ Bind(&gc_required);
|
| + __ IncrementCounter(counters->string_ctor_gc_required(), 1, x10, x11);
|
| + {
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + __ Push(argument);
|
| + __ CallRuntime(Runtime::kNewStringWrapper, 1);
|
| + }
|
| + __ Ret();
|
| +}
|
| +
|
| +
|
| +static void CallRuntimePassFunction(MacroAssembler* masm,
|
| + Runtime::FunctionId function_id) {
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + // - Push a copy of the function onto the stack.
|
| + // - Push another copy as a parameter to the runtime call.
|
| + __ Push(x1, x1);
|
| +
|
| + __ CallRuntime(function_id, 1);
|
| +
|
| + // - Restore receiver.
|
| + __ Pop(x1);
|
| +}
|
| +
|
| +
|
| +static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
|
| + __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
|
| + __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
|
| + __ Br(x2);
|
| +}
|
| +
|
| +
|
| +static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
|
| + __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag);
|
| + __ Br(x0);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
|
| + // Checking whether the queued function is ready for install is optional,
|
| + // since we come across interrupts and stack checks elsewhere. However, not
|
| + // checking may delay installing ready functions, and always checking would be
|
| + // quite expensive. A good compromise is to first check against stack limit as
|
| + // a cue for an interrupt signal.
|
| + Label ok;
|
| + __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
|
| + __ B(hs, &ok);
|
| +
|
| + CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
|
| + GenerateTailCallToReturnedCode(masm);
|
| +
|
| + __ Bind(&ok);
|
| + GenerateTailCallToSharedCode(masm);
|
| +}
|
| +
|
| +
|
| +static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
| + bool is_api_function,
|
| + bool count_constructions) {
|
| + // ----------- S t a t e -------------
|
| + // -- x0 : number of arguments
|
| + // -- x1 : constructor function
|
| + // -- lr : return address
|
| + // -- sp[...]: constructor arguments
|
| + // -----------------------------------
|
| +
|
| + ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
|
| + // Should never count constructions for api objects.
|
| + ASSERT(!is_api_function || !count_constructions);
|
| +
|
| + Isolate* isolate = masm->isolate();
|
| +
|
| + // Enter a construct frame.
|
| + {
|
| + FrameScope scope(masm, StackFrame::CONSTRUCT);
|
| +
|
| + // Preserve the two incoming parameters on the stack.
|
| + Register argc = x0;
|
| + Register constructor = x1;
|
| + // x1: constructor function
|
| + __ SmiTag(argc);
|
| + __ Push(argc, constructor);
|
| + // sp[0] : Constructor function.
|
| + // sp[1]: number of arguments (smi-tagged)
|
| +
|
| + // Try to allocate the object without transitioning into C code. If any of
|
| + // the preconditions is not met, the code bails out to the runtime call.
|
| + Label rt_call, allocated;
|
| + if (FLAG_inline_new) {
|
| + Label undo_allocation;
|
| +#if ENABLE_DEBUGGER_SUPPORT
|
| + ExternalReference debug_step_in_fp =
|
| + ExternalReference::debug_step_in_fp_address(isolate);
|
| + __ Mov(x2, Operand(debug_step_in_fp));
|
| + __ Ldr(x2, MemOperand(x2));
|
| + __ Cbnz(x2, &rt_call);
|
| +#endif
|
| + // Load the initial map and verify that it is in fact a map.
|
| + Register init_map = x2;
|
| + __ Ldr(init_map,
|
| + FieldMemOperand(constructor,
|
| + JSFunction::kPrototypeOrInitialMapOffset));
|
| + __ JumpIfSmi(init_map, &rt_call);
|
| + __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
|
| +
|
| + // Check that the constructor is not constructing a JSFunction (see
|
| + // comments in Runtime_NewObject in runtime.cc). In which case the initial
|
| + // map's instance type would be JS_FUNCTION_TYPE.
|
| + __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
|
| + __ B(eq, &rt_call);
|
| +
|
| + if (count_constructions) {
|
| + Label allocate;
|
| + // Decrease generous allocation count.
|
| + __ Ldr(x3, FieldMemOperand(constructor,
|
| + JSFunction::kSharedFunctionInfoOffset));
|
| + MemOperand constructor_count =
|
| + FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset);
|
| + __ Ldrb(x4, constructor_count);
|
| + __ Subs(x4, x4, 1);
|
| + __ Strb(x4, constructor_count);
|
| + __ B(ne, &allocate);
|
| +
|
| + // Push the constructor and map to the stack, and the constructor again
|
| + // as argument to the runtime call.
|
| + __ Push(constructor, init_map, constructor);
|
| + // The call will replace the stub, so the countdown is only done once.
|
| + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
|
| + __ Pop(init_map, constructor);
|
| + __ Bind(&allocate);
|
| + }
|
| +
|
| + // Now allocate the JSObject on the heap.
|
| + Register obj_size = x3;
|
| + Register new_obj = x4;
|
| + __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
|
| + __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS);
|
| +
|
| + // Allocated the JSObject, now initialize the fields. Map is set to
|
| + // initial map and properties and elements are set to empty fixed array.
|
| + // NB. the object pointer is not tagged, so MemOperand is used.
|
| + Register empty = x5;
|
| + __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
|
| + __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset));
|
| + __ Str(empty, MemOperand(new_obj, JSObject::kPropertiesOffset));
|
| + __ Str(empty, MemOperand(new_obj, JSObject::kElementsOffset));
|
| +
|
| + Register first_prop = x5;
|
| + __ Add(first_prop, new_obj, JSObject::kHeaderSize);
|
| +
|
| + // Fill all of the in-object properties with the appropriate filler.
|
| + Register obj_end = x6;
|
| + __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
|
| + Register undef = x7;
|
| + __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
|
| +
|
| + // Obtain number of pre-allocated property fields and in-object
|
| + // properties.
|
| + Register prealloc_fields = x10;
|
| + Register inobject_props = x11;
|
| + Register inst_sizes = x11;
|
| + __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset));
|
| + __ Ubfx(prealloc_fields, inst_sizes,
|
| + Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
|
| + kBitsPerByte);
|
| + __ Ubfx(inobject_props, inst_sizes,
|
| + Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte);
|
| +
|
| + if (count_constructions) {
|
| + // Register first_non_prealloc is the offset of the first field after
|
| + // pre-allocated fields.
|
| + Register first_non_prealloc = x12;
|
| + __ Add(first_non_prealloc, first_prop,
|
| + Operand(prealloc_fields, LSL, kPointerSizeLog2));
|
| +
|
| + if (FLAG_debug_code) {
|
| + __ Cmp(first_non_prealloc, obj_end);
|
| + __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
|
| + }
|
| + __ InitializeFieldsWithFiller(first_prop, first_non_prealloc, undef);
|
| + // To allow for truncation.
|
| + __ LoadRoot(x12, Heap::kOnePointerFillerMapRootIndex);
|
| + __ InitializeFieldsWithFiller(first_prop, obj_end, x12);
|
| + } else {
|
| + __ InitializeFieldsWithFiller(first_prop, obj_end, undef);
|
| + }
|
| +
|
| + // Add the object tag to make the JSObject real, so that we can continue
|
| + // and jump into the continuation code at any time from now on. Any
|
| + // failures need to undo the allocation, so that the heap is in a
|
| + // consistent state and verifiable.
|
| + __ Add(new_obj, new_obj, kHeapObjectTag);
|
| +
|
| + // Check if a non-empty properties array is needed. Continue with
|
| + // allocated object if not, or fall through to runtime call if it is.
|
| + Register element_count = x3;
|
| + __ Ldrb(x3, FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
|
| + // The field instance sizes contains both pre-allocated property fields
|
| + // and in-object properties.
|
| + __ Add(x3, x3, prealloc_fields);
|
| + __ Subs(element_count, x3, inobject_props);
|
| +
|
| + // Done if no extra properties are to be allocated.
|
| + __ B(eq, &allocated);
|
| + __ Assert(pl, kPropertyAllocationCountFailed);
|
| +
|
| + // Scale the number of elements by pointer size and add the header for
|
| + // FixedArrays to the start of the next object calculation from above.
|
| + Register new_array = x5;
|
| + Register array_size = x6;
|
| + __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize);
|
| + __ Allocate(array_size, new_array, x11, x12, &undo_allocation,
|
| + static_cast<AllocationFlags>(RESULT_CONTAINS_TOP |
|
| + SIZE_IN_WORDS));
|
| +
|
| + Register array_map = x10;
|
| + __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex);
|
| + __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset));
|
| + __ SmiTag(x0, element_count);
|
| + __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset));
|
| +
|
| + // Initialize the fields to undefined.
|
| + Register elements = x10;
|
| + Register elements_end = x11;
|
| + __ Add(elements, new_array, FixedArray::kHeaderSize);
|
| + __ Add(elements_end, elements,
|
| + Operand(element_count, LSL, kPointerSizeLog2));
|
| + __ InitializeFieldsWithFiller(elements, elements_end, undef);
|
| +
|
| + // Store the initialized FixedArray into the properties field of the
|
| + // JSObject.
|
| + __ Add(new_array, new_array, kHeapObjectTag);
|
| + __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
|
| +
|
| + // Continue with JSObject being successfully allocated.
|
| + __ B(&allocated);
|
| +
|
| + // Undo the setting of the new top so that the heap is verifiable. For
|
| + // example, the map's unused properties potentially do not match the
|
| + // allocated objects unused properties.
|
| + __ Bind(&undo_allocation);
|
| + __ UndoAllocationInNewSpace(new_obj, x14);
|
| + }
|
| +
|
| + // Allocate the new receiver object using the runtime call.
|
| + __ Bind(&rt_call);
|
| + __ Push(constructor); // Argument for Runtime_NewObject.
|
| + __ CallRuntime(Runtime::kNewObject, 1);
|
| + __ Mov(x4, x0);
|
| +
|
| + // Receiver for constructor call allocated.
|
| + // x4: JSObject
|
| + __ Bind(&allocated);
|
| + __ Push(x4, x4);
|
| +
|
| + // Reload the number of arguments from the stack.
|
| + // Set it up in x0 for the function call below.
|
| + // jssp[0]: receiver
|
| + // jssp[1]: receiver
|
| + // jssp[2]: constructor function
|
| + // jssp[3]: number of arguments (smi-tagged)
|
| + __ Peek(constructor, 2 * kXRegSizeInBytes); // Load constructor.
|
| + __ Peek(argc, 3 * kXRegSizeInBytes); // Load number of arguments.
|
| + __ SmiUntag(argc);
|
| +
|
| + // Set up pointer to last argument.
|
| + __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
|
| +
|
| + // Copy arguments and receiver to the expression stack.
|
| + // Copy 2 values every loop to use ldp/stp.
|
| + // x0: number of arguments
|
| + // x1: constructor function
|
| + // x2: address of last argument (caller sp)
|
| + // jssp[0]: receiver
|
| + // jssp[1]: receiver
|
| + // jssp[2]: constructor function
|
| + // jssp[3]: number of arguments (smi-tagged)
|
| + // Compute the start address of the copy in x3.
|
| + __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
|
| + Label loop, entry, done_copying_arguments;
|
| + __ B(&entry);
|
| + __ Bind(&loop);
|
| + __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex));
|
| + __ Push(x11, x10);
|
| + __ Bind(&entry);
|
| + __ Cmp(x3, x2);
|
| + __ B(gt, &loop);
|
| + // Because we copied values 2 by 2 we may have copied one extra value.
|
| + // Drop it if that is the case.
|
| + __ B(eq, &done_copying_arguments);
|
| + __ Drop(1);
|
| + __ Bind(&done_copying_arguments);
|
| +
|
| + // Call the function.
|
| + // x0: number of arguments
|
| + // x1: constructor function
|
| + if (is_api_function) {
|
| + __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
|
| + Handle<Code> code =
|
| + masm->isolate()->builtins()->HandleApiCallConstruct();
|
| + __ Call(code, RelocInfo::CODE_TARGET);
|
| + } else {
|
| + ParameterCount actual(argc);
|
| + __ InvokeFunction(constructor, actual, CALL_FUNCTION, NullCallWrapper());
|
| + }
|
| +
|
| + // Store offset of return address for deoptimizer.
|
| + if (!is_api_function && !count_constructions) {
|
| + masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
|
| + }
|
| +
|
| + // Restore the context from the frame.
|
| + // x0: result
|
| + // jssp[0]: receiver
|
| + // jssp[1]: constructor function
|
| + // jssp[2]: number of arguments (smi-tagged)
|
| + __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| +
|
| + // If the result is an object (in the ECMA sense), we should get rid
|
| + // of the receiver and use the result; see ECMA-262 section 13.2.2-7
|
| + // on page 74.
|
| + Label use_receiver, exit;
|
| +
|
| + // If the result is a smi, it is *not* an object in the ECMA sense.
|
| + // x0: result
|
| + // jssp[0]: receiver (newly allocated object)
|
| + // jssp[1]: constructor function
|
| + // jssp[2]: number of arguments (smi-tagged)
|
| + __ JumpIfSmi(x0, &use_receiver);
|
| +
|
| + // If the type of the result (stored in its map) is less than
|
| + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
|
| + __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge);
|
| +
|
| + // Throw away the result of the constructor invocation and use the
|
| + // on-stack receiver as the result.
|
| + __ Bind(&use_receiver);
|
| + __ Peek(x0, 0);
|
| +
|
| + // Remove the receiver from the stack, remove caller arguments, and
|
| + // return.
|
| + __ Bind(&exit);
|
| + // x0: result
|
| + // jssp[0]: receiver (newly allocated object)
|
| + // jssp[1]: constructor function
|
| + // jssp[2]: number of arguments (smi-tagged)
|
| + __ Peek(x1, 2 * kXRegSizeInBytes);
|
| +
|
| + // Leave construct frame.
|
| + }
|
| +
|
| + __ DropBySMI(x1);
|
| + __ Drop(1);
|
| + __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
|
| + __ Ret();
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
|
| + Generate_JSConstructStubHelper(masm, false, true);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
| + Generate_JSConstructStubHelper(masm, false, false);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
|
| + Generate_JSConstructStubHelper(masm, true, false);
|
| +}
|
| +
|
| +
|
| +// Input:
|
| +// x0: code entry.
|
| +// x1: function.
|
| +// x2: receiver.
|
| +// x3: argc.
|
| +// x4: argv.
|
| +// Output:
|
| +// x0: result.
|
| +static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
| + bool is_construct) {
|
| + // Called from JSEntryStub::GenerateBody().
|
| + Register function = x1;
|
| + Register receiver = x2;
|
| + Register argc = x3;
|
| + Register argv = x4;
|
| +
|
| + ProfileEntryHookStub::MaybeCallEntryHook(masm);
|
| +
|
| + // Clear the context before we push it when entering the internal frame.
|
| + __ Mov(cp, 0);
|
| +
|
| + {
|
| + // Enter an internal frame.
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| +
|
| + // Set up the context from the function argument.
|
| + __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
| +
|
| + __ InitializeRootRegister();
|
| +
|
| + // Push the function and the receiver onto the stack.
|
| + __ Push(function, receiver);
|
| +
|
| + // Copy arguments to the stack in a loop, in reverse order.
|
| + // x3: argc.
|
| + // x4: argv.
|
| + Label loop, entry;
|
| + // Compute the copy end address.
|
| + __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2));
|
| +
|
| + // TODO(all): This can potentially be optimized with ldp/stp to speed up
|
| + // arguments passing from C++ to JS.
|
| + __ B(&entry);
|
| + __ Bind(&loop);
|
| + __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
|
| + __ Ldr(x12, MemOperand(x11)); // Dereference the handle.
|
| + __ Push(x12); // Push the argument.
|
| + __ Bind(&entry);
|
| + __ Cmp(x10, argv);
|
| + __ B(ne, &loop);
|
| +
|
| + // Initialize all JavaScript callee-saved registers, since they will be seen
|
| + // by the garbage collector as part of handlers.
|
| + // The original values have been saved in JSEntryStub::GenerateBody().
|
| + __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
|
| + __ Mov(x20, x19);
|
| + __ Mov(x21, x19);
|
| + __ Mov(x22, x19);
|
| + __ Mov(x23, x19);
|
| + __ Mov(x24, x19);
|
| + __ Mov(x25, x19);
|
| + // Don't initialize the reserved registers.
|
| + // x26 : root register (root).
|
| + // x27 : context pointer (cp).
|
| + // x28 : JS stack pointer (jssp).
|
| + // x29 : frame pointer (fp).
|
| +
|
| + // TODO(alexandre): Revisit the MAsm function invocation mechanisms.
|
| + // Currently there is a mix of statically and dynamically allocated
|
| + // registers.
|
| + __ Mov(x0, argc);
|
| + if (is_construct) {
|
| + // No type feedback cell is available.
|
| + Handle<Object> undefined_sentinel(
|
| + masm->isolate()->heap()->undefined_value(), masm->isolate());
|
| + __ Mov(x2, Operand(undefined_sentinel));
|
| +
|
| + CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
|
| + __ CallStub(&stub);
|
| + } else {
|
| + ParameterCount actual(x0);
|
| + __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
|
| + }
|
| + // Exit the JS internal frame and remove the parameters (except function),
|
| + // and return.
|
| + }
|
| +
|
| + // Result is in x0. Return.
|
| + __ Ret();
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
|
| + Generate_JSEntryTrampolineHelper(masm, false);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
|
| + Generate_JSEntryTrampolineHelper(masm, true);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
|
| + CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
|
| + GenerateTailCallToReturnedCode(masm);
|
| +}
|
| +
|
| +
|
| +static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + Register function = x1;
|
| +
|
| + // Preserve function. At the same time, push arguments for
|
| + // kCompileOptimized.
|
| + __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
|
| + __ Push(function, function, x10);
|
| +
|
| + __ CallRuntime(Runtime::kCompileOptimized, 2);
|
| +
|
| + // Restore receiver.
|
| + __ Pop(function);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
|
| + CallCompileOptimized(masm, false);
|
| + GenerateTailCallToReturnedCode(masm);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
|
| + CallCompileOptimized(masm, true);
|
| + GenerateTailCallToReturnedCode(masm);
|
| +}
|
| +
|
| +
|
| +static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
|
| + // For now, we are relying on the fact that make_code_young doesn't do any
|
| + // garbage collection which allows us to save/restore the registers without
|
| + // worrying about which of them contain pointers. We also don't build an
|
| + // internal frame to make the code fast, since we shouldn't have to do stack
|
| + // crawls in MakeCodeYoung. This seems a bit fragile.
|
| +
|
| + // The following caller-saved registers must be saved and restored when
|
| + // calling through to the runtime:
|
| + // x0 - The address from which to resume execution.
|
| + // x1 - isolate
|
| + // lr - The return address for the JSFunction itself. It has not yet been
|
| + // preserved on the stack because the frame setup code was replaced
|
| + // with a call to this stub, to handle code ageing.
|
| + {
|
| + FrameScope scope(masm, StackFrame::MANUAL);
|
| + __ Push(x0, x1, fp, lr);
|
| + __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
|
| + __ CallCFunction(
|
| + ExternalReference::get_make_code_young_function(masm->isolate()), 2);
|
| + __ Pop(lr, fp, x1, x0);
|
| + }
|
| +
|
| + // The calling function has been made young again, so return to execute the
|
| + // real frame set-up code.
|
| + __ Br(x0);
|
| +}
|
| +
|
| +#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
|
| +void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
|
| + MacroAssembler* masm) { \
|
| + GenerateMakeCodeYoungAgainCommon(masm); \
|
| +} \
|
| +void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
|
| + MacroAssembler* masm) { \
|
| + GenerateMakeCodeYoungAgainCommon(masm); \
|
| +}
|
| +CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
|
| +#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
|
| +
|
| +
|
| +void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
|
| + // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
|
| + // that make_code_young doesn't do any garbage collection which allows us to
|
| + // save/restore the registers without worrying about which of them contain
|
| + // pointers.
|
| +
|
| + // The following caller-saved registers must be saved and restored when
|
| + // calling through to the runtime:
|
| + // x0 - The address from which to resume execution.
|
| + // x1 - isolate
|
| + // lr - The return address for the JSFunction itself. It has not yet been
|
| + // preserved on the stack because the frame setup code was replaced
|
| + // with a call to this stub, to handle code ageing.
|
| + {
|
| + FrameScope scope(masm, StackFrame::MANUAL);
|
| + __ Push(x0, x1, fp, lr);
|
| + __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
|
| + __ CallCFunction(
|
| + ExternalReference::get_mark_code_as_executed_function(
|
| + masm->isolate()), 2);
|
| + __ Pop(lr, fp, x1, x0);
|
| +
|
| + // Perform prologue operations usually performed by the young code stub.
|
| + __ EmitFrameSetupForCodeAgePatching(masm);
|
| + }
|
| +
|
| + // Jump to point after the code-age stub.
|
| + __ Add(x0, x0, kCodeAgeSequenceSize);
|
| + __ Br(x0);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
|
| + GenerateMakeCodeYoungAgainCommon(masm);
|
| +}
|
| +
|
| +
|
| +static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
|
| + SaveFPRegsMode save_doubles) {
|
| + {
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| +
|
| + // Preserve registers across notification, this is important for compiled
|
| + // stubs that tail call the runtime on deopts passing their parameters in
|
| + // registers.
|
| + // TODO(jbramley): Is it correct (and appropriate) to use safepoint
|
| + // registers here? According to the comment above, we should only need to
|
| + // preserve the registers with parameters.
|
| + __ PushXRegList(kSafepointSavedRegisters);
|
| + // Pass the function and deoptimization type to the runtime system.
|
| + __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
|
| + __ PopXRegList(kSafepointSavedRegisters);
|
| + }
|
| +
|
| + // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate).
|
| + __ Drop(1);
|
| +
|
| + // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this
|
| + // into lr before it jumps here.
|
| + __ Br(lr);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
| + Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
|
| + Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
| +}
|
| +
|
| +
|
| +static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
| + Deoptimizer::BailoutType type) {
|
| + {
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + // Pass the deoptimization type to the runtime system.
|
| + __ Mov(x0, Operand(Smi::FromInt(static_cast<int>(type))));
|
| + __ Push(x0);
|
| + __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
|
| + }
|
| +
|
| + // Get the full codegen state from the stack and untag it.
|
| + Register state = x6;
|
| + __ Peek(state, 0);
|
| + __ SmiUntag(state);
|
| +
|
| + // Switch on the state.
|
| + Label with_tos_register, unknown_state;
|
| + __ CompareAndBranch(
|
| + state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register);
|
| + __ Drop(1); // Remove state.
|
| + __ Ret();
|
| +
|
| + __ Bind(&with_tos_register);
|
| + // Reload TOS register.
|
| + __ Peek(x0, kPointerSize);
|
| + __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state);
|
| + __ Drop(2); // Remove state and TOS.
|
| + __ Ret();
|
| +
|
| + __ Bind(&unknown_state);
|
| + __ Abort(kInvalidFullCodegenState);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
|
| + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
|
| + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
|
| + Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
|
| + // Lookup the function in the JavaScript frame.
|
| + __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| + {
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + // Pass function as argument.
|
| + __ Push(x0);
|
| + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
|
| + }
|
| +
|
| + // If the code object is null, just return to the unoptimized code.
|
| + Label skip;
|
| + __ CompareAndBranch(x0, Operand(Smi::FromInt(0)), ne, &skip);
|
| + __ Ret();
|
| +
|
| + __ Bind(&skip);
|
| +
|
| + // Load deoptimization data from the code object.
|
| + // <deopt_data> = <code>[#deoptimization_data_offset]
|
| + __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
|
| +
|
| + // Load the OSR entrypoint offset from the deoptimization data.
|
| + // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
|
| + __ Ldrsw(w1, UntagSmiFieldMemOperand(x1, FixedArray::OffsetOfElementAt(
|
| + DeoptimizationInputData::kOsrPcOffsetIndex)));
|
| +
|
| + // Compute the target address = code_obj + header_size + osr_offset
|
| + // <entry_addr> = <code_obj> + #header_size + <osr_offset>
|
| + __ Add(x0, x0, x1);
|
| + __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
|
| +
|
| + // And "return" to the OSR entry point of the function.
|
| + __ Ret();
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
|
| + // We check the stack limit as indicator that recompilation might be done.
|
| + Label ok;
|
| + __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
|
| + __ B(hs, &ok);
|
| + {
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + __ CallRuntime(Runtime::kStackGuard, 0);
|
| + }
|
| + __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
|
| + RelocInfo::CODE_TARGET);
|
| +
|
| + __ Bind(&ok);
|
| + __ Ret();
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
| + Register receiver_type = x13;
|
| +
|
| + ASM_LOCATION("Builtins::Generate_FunctionCall");
|
| + // TODO(all/rames): Optimize and use named registers.
|
| + // 1. Make sure we have at least one argument.
|
| + // x0: actual number of arguments
|
| + { Label done;
|
| + __ Cbnz(x0, &done);
|
| + __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
|
| + __ Push(x10);
|
| + __ Mov(x0, 1);
|
| + __ Bind(&done);
|
| + }
|
| +
|
| + // 2. Get the function to call (passed as receiver) from the stack, check
|
| + // if it is a function.
|
| + // x0: actual number of arguments
|
| + Label slow, non_function;
|
| + // TODO(jbramley): Consider giving Peek a unit_size parameter, like Claim and
|
| + // Drop. This usage pattern is very common.
|
| + __ Peek(x1, Operand(x0, LSL, kXRegSizeInBytesLog2));
|
| + __ JumpIfSmi(x1, &non_function);
|
| + __ JumpIfNotObjectType(x1, x10, receiver_type, JS_FUNCTION_TYPE, &slow);
|
| +
|
| + // 3a. Patch the first argument if necessary when calling a function.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + Label shift_arguments;
|
| + __ Mov(x4, 0); // Indicates a regular JS_FUNCTION.
|
| + { Label convert_to_object, use_global_receiver, patch_receiver;
|
| + // Change context eagerly in case we need the global receiver.
|
| + __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
|
| +
|
| + // Do not transform the receiver for strict mode functions.
|
| + __ Ldr(x10, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ldr(w11, FieldMemOperand(x10, SharedFunctionInfo::kCompilerHintsOffset));
|
| + __ Tbnz(x11, SharedFunctionInfo::kStrictModeFunction, &shift_arguments);
|
| +
|
| + // TODO(all): Shoudld we insert space to avoid BTAC collisions?
|
| + // Do not transform the receiver for native (Compilerhints already in x3).
|
| + __ Tbnz(x11, SharedFunctionInfo::kNative, &shift_arguments);
|
| +
|
| + // Compute the receiver in non-strict mode.
|
| + __ Sub(x10, x0, 1);
|
| + __ Peek(x2, Operand(x10, LSL, kXRegSizeInBytesLog2));
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + // x2: first argument
|
| + __ JumpIfSmi(x2, &convert_to_object);
|
| +
|
| + // TODO(all): We could potentially work to optimize loads of root values.
|
| + // TODO(all): If the indexes are successive we can use 'ldp'.
|
| + __ JumpIfRoot(x2, Heap::kUndefinedValueRootIndex, &use_global_receiver);
|
| + __ JumpIfRoot(x2, Heap::kNullValueRootIndex, &use_global_receiver);
|
| +
|
| + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
|
| + __ JumpIfObjectType(x2, x10, x11, FIRST_SPEC_OBJECT_TYPE, &shift_arguments,
|
| + ge);
|
| +
|
| + __ Bind(&convert_to_object);
|
| +
|
| + {
|
| + // Enter an internal frame in order to preserve argument count.
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + __ SmiTag(x0);
|
| +
|
| + __ Push(x0, x2);
|
| + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
|
| + __ Mov(x2, x0);
|
| +
|
| + __ Pop(x0);
|
| + __ SmiUntag(x0);
|
| +
|
| + // Exit the internal frame.
|
| + }
|
| +
|
| + // Restore the function to x1, and the flag to x4.
|
| + __ Peek(x1, Operand(x0, LSL, kXRegSizeInBytesLog2));
|
| + __ Mov(x4, 0);
|
| + __ B(&patch_receiver);
|
| +
|
| + __ Bind(&use_global_receiver);
|
| + __ Ldr(x2, GlobalObjectMemOperand());
|
| + __ Ldr(x2, FieldMemOperand(x2, GlobalObject::kGlobalReceiverOffset));
|
| +
|
| + __ Bind(&patch_receiver);
|
| + __ Sub(x10, x0, 1);
|
| + __ Poke(x2, Operand(x10, LSL, kXRegSizeInBytesLog2));
|
| +
|
| + __ B(&shift_arguments);
|
| + }
|
| +
|
| + // 3b. Check for function proxy.
|
| + __ Bind(&slow);
|
| + __ Mov(x4, 1); // Indicate function proxy.
|
| + __ Cmp(receiver_type, JS_FUNCTION_PROXY_TYPE);
|
| + __ B(eq, &shift_arguments);
|
| + __ Bind(&non_function);
|
| + __ Mov(x4, 2); // Indicate non-function.
|
| +
|
| + // 3c. Patch the first argument when calling a non-function. The
|
| + // CALL_NON_FUNCTION builtin expects the non-function callee as
|
| + // receiver, so overwrite the first argument which will ultimately
|
| + // become the receiver.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + // x4: call type (0: JS function, 1: function proxy, 2: non-function)
|
| + __ Sub(x10, x0, 1);
|
| + __ Poke(x1, Operand(x10, LSL, kXRegSizeInBytesLog2));
|
| +
|
| + // 4. Shift arguments and return address one slot down on the stack
|
| + // (overwriting the original receiver). Adjust argument count to make
|
| + // the original first argument the new receiver.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + // x4: call type (0: JS function, 1: function proxy, 2: non-function)
|
| + __ Bind(&shift_arguments);
|
| + { Label loop;
|
| + // Calculate the copy start address (destination). Copy end address is jssp.
|
| + __ Add(x11, jssp, Operand(x0, LSL, kPointerSizeLog2));
|
| + __ Sub(x10, x11, kPointerSize);
|
| +
|
| + // TODO(all): Optimize to copy values 2 by 2?
|
| + __ Bind(&loop);
|
| + __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
|
| + __ Str(x12, MemOperand(x11, -kPointerSize, PostIndex));
|
| + __ Cmp(x10, jssp);
|
| + __ B(ge, &loop);
|
| + // Adjust the actual number of arguments and remove the top element
|
| + // (which is a copy of the last argument).
|
| + __ Sub(x0, x0, 1);
|
| + __ Drop(1);
|
| + }
|
| +
|
| + // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
|
| + // or a function proxy via CALL_FUNCTION_PROXY.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + // x4: call type (0: JS function, 1: function proxy, 2: non-function)
|
| + { Label function, non_proxy;
|
| + __ Cbz(x4, &function);
|
| + // Expected number of arguments is 0 for CALL_NON_FUNCTION.
|
| + __ Mov(x2, 0);
|
| + __ Cmp(x4, 1);
|
| + __ B(ne, &non_proxy);
|
| +
|
| + __ Push(x1); // Re-add proxy object as additional argument.
|
| + __ Add(x0, x0, 1);
|
| + __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
|
| + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
| + RelocInfo::CODE_TARGET);
|
| +
|
| + __ Bind(&non_proxy);
|
| + __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION);
|
| + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
| + RelocInfo::CODE_TARGET);
|
| + __ Bind(&function);
|
| + }
|
| +
|
| + // 5b. Get the code to call from the function and check that the number of
|
| + // expected arguments matches what we're providing. If so, jump
|
| + // (tail-call) to the code in register edx without checking arguments.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ldrsw(x2,
|
| + FieldMemOperand(x3,
|
| + SharedFunctionInfo::kFormalParameterCountOffset));
|
| + Label dont_adapt_args;
|
| + __ Cmp(x2, x0); // Check formal and actual parameter counts.
|
| + __ B(eq, &dont_adapt_args);
|
| + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
| + RelocInfo::CODE_TARGET);
|
| + __ Bind(&dont_adapt_args);
|
| +
|
| + __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
|
| + ParameterCount expected(0);
|
| + __ InvokeCode(x3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
| + ASM_LOCATION("Builtins::Generate_FunctionApply");
|
| + const int kIndexOffset =
|
| + StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
|
| + const int kLimitOffset =
|
| + StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
|
| + const int kArgsOffset = 2 * kPointerSize;
|
| + const int kReceiverOffset = 3 * kPointerSize;
|
| + const int kFunctionOffset = 4 * kPointerSize;
|
| +
|
| + {
|
| + FrameScope frame_scope(masm, StackFrame::INTERNAL);
|
| +
|
| + Register args = x12;
|
| + Register receiver = x14;
|
| + Register function = x15;
|
| +
|
| + // Get the length of the arguments via a builtin call.
|
| + __ Ldr(function, MemOperand(fp, kFunctionOffset));
|
| + __ Ldr(args, MemOperand(fp, kArgsOffset));
|
| + __ Push(function, args);
|
| + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
|
| + Register argc = x0;
|
| +
|
| + // Check the stack for overflow.
|
| + // We are not trying to catch interruptions (e.g. debug break and
|
| + // preemption) here, so the "real stack limit" is checked.
|
| + Label enough_stack_space;
|
| + __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
|
| + __ Ldr(function, MemOperand(fp, kFunctionOffset));
|
| + // Make x10 the space we have left. The stack might already be overflowed
|
| + // here which will cause x10 to become negative.
|
| + // TODO(jbramley): Check that the stack usage here is safe.
|
| + __ Sub(x10, jssp, x10);
|
| + // Check if the arguments will overflow the stack.
|
| + __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
|
| + __ B(gt, &enough_stack_space);
|
| + // There is not enough stack space, so use a builtin to throw an appropriate
|
| + // error.
|
| + __ Push(function, argc);
|
| + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
|
| + // We should never return from the APPLY_OVERFLOW builtin.
|
| + if (__ emit_debug_code()) {
|
| + __ Unreachable();
|
| + }
|
| +
|
| + __ Bind(&enough_stack_space);
|
| + // Push current limit and index.
|
| + __ Mov(x1, 0); // Initial index.
|
| + __ Push(argc, x1);
|
| +
|
| + Label push_receiver;
|
| + __ Ldr(receiver, MemOperand(fp, kReceiverOffset));
|
| +
|
| + // Check that the function is a JS function. Otherwise it must be a proxy.
|
| + // When it is not the function proxy will be invoked later.
|
| + __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE,
|
| + &push_receiver);
|
| +
|
| + // Change context eagerly to get the right global object if necessary.
|
| + __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
|
| + // Load the shared function info.
|
| + __ Ldr(x2, FieldMemOperand(function,
|
| + JSFunction::kSharedFunctionInfoOffset));
|
| +
|
| + // Compute and push the receiver.
|
| + // Do not transform the receiver for strict mode functions.
|
| + Label convert_receiver_to_object, use_global_receiver;
|
| + __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
|
| + __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
|
| + // Do not transform the receiver for native functions.
|
| + __ Tbnz(x10, SharedFunctionInfo::kNative, &push_receiver);
|
| +
|
| + // Compute the receiver in non-strict mode.
|
| + __ JumpIfSmi(receiver, &convert_receiver_to_object);
|
| + __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
|
| + __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
|
| + &use_global_receiver);
|
| +
|
| + // Check if the receiver is already a JavaScript object.
|
| + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
|
| + __ JumpIfObjectType(receiver, x10, x11, FIRST_SPEC_OBJECT_TYPE,
|
| + &push_receiver, ge);
|
| +
|
| + // Call a builtin to convert the receiver to a regular object.
|
| + __ Bind(&convert_receiver_to_object);
|
| + __ Push(receiver);
|
| + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
|
| + __ Mov(receiver, x0);
|
| + __ B(&push_receiver);
|
| +
|
| + __ Bind(&use_global_receiver);
|
| + __ Ldr(x10, GlobalObjectMemOperand());
|
| + __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
|
| +
|
| + // Push the receiver
|
| + __ Bind(&push_receiver);
|
| + __ Push(receiver);
|
| +
|
| + // Copy all arguments from the array to the stack.
|
| + Label entry, loop;
|
| + Register current = x0;
|
| + __ Ldr(current, MemOperand(fp, kIndexOffset));
|
| + __ B(&entry);
|
| +
|
| + __ Bind(&loop);
|
| + // Load the current argument from the arguments array and push it.
|
| + // TODO(all): Couldn't we optimize this for JS arrays?
|
| +
|
| + __ Ldr(x1, MemOperand(fp, kArgsOffset));
|
| + __ Push(x1, current);
|
| +
|
| + // Call the runtime to access the property in the arguments array.
|
| + __ CallRuntime(Runtime::kGetProperty, 2);
|
| + __ Push(x0);
|
| +
|
| + // Use inline caching to access the arguments.
|
| + __ Ldr(current, MemOperand(fp, kIndexOffset));
|
| + __ Add(current, current, Operand(Smi::FromInt(1)));
|
| + __ Str(current, MemOperand(fp, kIndexOffset));
|
| +
|
| + // Test if the copy loop has finished copying all the elements from the
|
| + // arguments object.
|
| + __ Bind(&entry);
|
| + __ Ldr(x1, MemOperand(fp, kLimitOffset));
|
| + __ Cmp(current, x1);
|
| + __ B(ne, &loop);
|
| +
|
| + // At the end of the loop, the number of arguments is stored in 'current',
|
| + // represented as a smi.
|
| +
|
| + function = x1; // From now on we want the function to be kept in x1;
|
| + __ Ldr(function, MemOperand(fp, kFunctionOffset));
|
| +
|
| + // Call the function.
|
| + Label call_proxy;
|
| + ParameterCount actual(current);
|
| + __ SmiUntag(current);
|
| + __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
|
| + __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
|
| + frame_scope.GenerateLeaveFrame();
|
| + __ Drop(3);
|
| + __ Ret();
|
| +
|
| + // Call the function proxy.
|
| + __ Bind(&call_proxy);
|
| + // x0 : argc
|
| + // x1 : function
|
| + __ Push(function); // Add function proxy as last argument.
|
| + __ Add(x0, x0, 1);
|
| + __ Mov(x2, 0);
|
| + __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
|
| + __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
| + RelocInfo::CODE_TARGET);
|
| + }
|
| + __ Drop(3);
|
| + __ Ret();
|
| +}
|
| +
|
| +
|
| +static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
|
| + __ SmiTag(x10, x0);
|
| + __ Mov(x11, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| + __ Push(lr, fp);
|
| + __ Push(x11, x1, x10);
|
| + __ Add(fp, jssp,
|
| + StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
|
| +}
|
| +
|
| +
|
| +static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
|
| + // ----------- S t a t e -------------
|
| + // -- x0 : result being passed through
|
| + // -----------------------------------
|
| + // Get the number of arguments passed (as a smi), tear down the frame and
|
| + // then drop the parameters and the receiver.
|
| + __ Ldr(x10, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
|
| + kPointerSize)));
|
| + __ Mov(jssp, fp);
|
| + __ Pop(fp, lr);
|
| + __ DropBySMI(x10, kXRegSizeInBytes);
|
| + __ Drop(1);
|
| +}
|
| +
|
| +
|
| +void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
| + ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
|
| + // ----------- S t a t e -------------
|
| + // -- x0 : actual number of arguments
|
| + // -- x1 : function (passed through to callee)
|
| + // -- x2 : expected number of arguments
|
| + // -----------------------------------
|
| +
|
| + Label invoke, dont_adapt_arguments;
|
| +
|
| + Label enough, too_few;
|
| + __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
|
| + __ Cmp(x0, x2);
|
| + __ B(lt, &too_few);
|
| + __ Cmp(x2, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
|
| + __ B(eq, &dont_adapt_arguments);
|
| +
|
| + { // Enough parameters: actual >= expected
|
| + EnterArgumentsAdaptorFrame(masm);
|
| +
|
| + // Calculate copy start address into x10 and end address into x11.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + // x2: expected number of arguments
|
| + // x3: code entry to call
|
| + __ Add(x10, fp, Operand(x0, LSL, kPointerSizeLog2));
|
| + // Adjust for return address and receiver
|
| + __ Add(x10, x10, 2 * kPointerSize);
|
| + __ Sub(x11, x10, Operand(x2, LSL, kPointerSizeLog2));
|
| +
|
| + // Copy the arguments (including the receiver) to the new stack frame.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + // x2: expected number of arguments
|
| + // x3: code entry to call
|
| + // x10: copy start address
|
| + // x11: copy end address
|
| +
|
| + // TODO(all): Should we push values 2 by 2?
|
| + Label copy;
|
| + __ Bind(©);
|
| + __ Cmp(x10, x11);
|
| + __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
|
| + __ Push(x12);
|
| + __ B(gt, ©);
|
| +
|
| + __ B(&invoke);
|
| + }
|
| +
|
| + { // Too few parameters: Actual < expected
|
| + __ Bind(&too_few);
|
| + EnterArgumentsAdaptorFrame(masm);
|
| +
|
| + // Calculate copy start address into x10 and copy end address into x11.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + // x2: expected number of arguments
|
| + // x3: code entry to call
|
| + // Adjust for return address.
|
| + __ Add(x11, fp, 1 * kPointerSize);
|
| + __ Add(x10, x11, Operand(x0, LSL, kPointerSizeLog2));
|
| + __ Add(x10, x10, 1 * kPointerSize);
|
| +
|
| + // Copy the arguments (including the receiver) to the new stack frame.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + // x2: expected number of arguments
|
| + // x3: code entry to call
|
| + // x10: copy start address
|
| + // x11: copy end address
|
| + Label copy;
|
| + __ Bind(©);
|
| + __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex));
|
| + __ Push(x12);
|
| + __ Cmp(x10, x11); // Compare before moving to next argument.
|
| + __ B(ne, ©);
|
| +
|
| + // Fill the remaining expected arguments with undefined.
|
| + // x0: actual number of arguments
|
| + // x1: function
|
| + // x2: expected number of arguments
|
| + // x3: code entry to call
|
| + __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
|
| + __ Sub(x11, fp, Operand(x2, LSL, kPointerSizeLog2));
|
| + // Adjust for the arguments adaptor frame and already pushed receiver.
|
| + __ Sub(x11, x11,
|
| + StandardFrameConstants::kFixedFrameSizeFromFp + (2 * kPointerSize));
|
| +
|
| + // TODO(all): Optimize this to use ldp?
|
| + Label fill;
|
| + __ Bind(&fill);
|
| + __ Push(x10);
|
| + __ Cmp(jssp, x11);
|
| + __ B(ne, &fill);
|
| + }
|
| +
|
| + // Arguments have been adapted. Now call the entry point.
|
| + __ Bind(&invoke);
|
| + __ Call(x3);
|
| +
|
| + // Store offset of return address for deoptimizer.
|
| + masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
|
| +
|
| + // Exit frame and return.
|
| + LeaveArgumentsAdaptorFrame(masm);
|
| + __ Ret();
|
| +
|
| + // Call the entry point without adapting the arguments.
|
| + __ Bind(&dont_adapt_arguments);
|
| + __ Jump(x3);
|
| +}
|
| +
|
| +
|
| +#undef __
|
| +
|
| +} } // namespace v8::internal
|
| +
|
| +#endif // V8_TARGET_ARCH_ARM
|
|
|