| Index: src/builtins/mips64/builtins-mips64.cc
|
| diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc
|
| index 2698c2542bf189bd53f636a661901e8a74b80564..5b5a823a7c0112f6ae40dc83bf63e8547132bf33 100644
|
| --- a/src/builtins/mips64/builtins-mips64.cc
|
| +++ b/src/builtins/mips64/builtins-mips64.cc
|
| @@ -32,7 +32,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
|
| // ConstructStubs implemented in C++ will be run in the context of the caller
|
| // instead of the callee, due to the way that [[Construct]] is defined for
|
| // ordinary functions).
|
| - __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
| + __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
|
|
| // JumpToExternalReference expects a0 to contain the number of arguments
|
| // including the receiver and the extra arguments.
|
| @@ -74,7 +74,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
|
|
|
| if (FLAG_debug_code) {
|
| // Initial map for the builtin InternalArray functions should be maps.
|
| - __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
| + __ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
| __ SmiTst(a2, a4);
|
| __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, a4,
|
| Operand(zero_reg));
|
| @@ -103,7 +103,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
|
|
|
| if (FLAG_debug_code) {
|
| // Initial map for the builtin Array functions should be maps.
|
| - __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
| + __ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
| __ SmiTst(a2, a4);
|
| __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, a4,
|
| Operand(zero_reg));
|
| @@ -139,7 +139,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
|
| __ Dsubu(t1, a0, Operand(1)); // In delay slot.
|
| __ mov(t0, a0); // Store argc in t0.
|
| __ Dlsa(at, sp, t1, kPointerSizeLog2);
|
| - __ ld(a0, MemOperand(at));
|
| + __ Ld(a0, MemOperand(at));
|
| }
|
|
|
| // 2a. Convert first argument to number.
|
| @@ -176,7 +176,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
|
| // -----------------------------------
|
|
|
| // 1. Make sure we operate in the context of the called function.
|
| - __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
| + __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
|
|
| // 2. Load the first argument into a0 and get rid of the rest (including the
|
| // receiver).
|
| @@ -186,7 +186,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
|
| __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
|
| __ Dsubu(a0, a0, Operand(1)); // In delay slot.
|
| __ Dlsa(at, sp, a0, kPointerSizeLog2);
|
| - __ ld(a0, MemOperand(at));
|
| + __ Ld(a0, MemOperand(at));
|
| __ jmp(&done);
|
| __ bind(&no_arguments);
|
| __ Move(a0, Smi::kZero);
|
| @@ -234,7 +234,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
|
| __ LeaveBuiltinFrame(cp, a1, t0);
|
| __ SmiUntag(t0);
|
| }
|
| - __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
|
| + __ Sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
|
|
|
| __ bind(&drop_frame_and_ret);
|
| {
|
| @@ -262,7 +262,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
|
| __ Dsubu(t1, a0, Operand(1)); // In delay slot.
|
| __ mov(t0, a0); // Store argc in t0.
|
| __ Dlsa(at, sp, t1, kPointerSizeLog2);
|
| - __ ld(a0, MemOperand(at));
|
| + __ Ld(a0, MemOperand(at));
|
| }
|
|
|
| // 2a. At least one argument, return a0 if it's a string, otherwise
|
| @@ -326,7 +326,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
|
| // -----------------------------------
|
|
|
| // 1. Make sure we operate in the context of the called function.
|
| - __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
| + __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
|
|
| // 2. Load the first argument into a0 and get rid of the rest (including the
|
| // receiver).
|
| @@ -336,7 +336,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
|
| __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
|
| __ Dsubu(a0, a0, Operand(1));
|
| __ Dlsa(at, sp, a0, kPointerSizeLog2);
|
| - __ ld(a0, MemOperand(at));
|
| + __ Ld(a0, MemOperand(at));
|
| __ jmp(&done);
|
| __ bind(&no_arguments);
|
| __ LoadRoot(a0, Heap::kempty_stringRootIndex);
|
| @@ -386,7 +386,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
|
| __ LeaveBuiltinFrame(cp, a1, t0);
|
| __ SmiUntag(t0);
|
| }
|
| - __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
|
| + __ Sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
|
|
|
| __ bind(&drop_frame_and_ret);
|
| {
|
| @@ -396,8 +396,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
|
| }
|
|
|
| static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
|
| - __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| - __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
|
| + __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
|
| __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ Jump(at);
|
| }
|
| @@ -480,7 +480,7 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
|
| // -- a3: new target
|
| // -- t0: newly allocated object
|
| // -----------------------------------
|
| - __ ld(a0, MemOperand(sp));
|
| + __ Ld(a0, MemOperand(sp));
|
| }
|
| __ SmiUntag(a0);
|
|
|
| @@ -513,7 +513,7 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
|
| __ jmp(&entry);
|
| __ bind(&loop);
|
| __ Dlsa(a4, a2, t0, kPointerSizeLog2);
|
| - __ ld(a5, MemOperand(a4));
|
| + __ Ld(a5, MemOperand(a4));
|
| __ push(a5);
|
| __ bind(&entry);
|
| __ Daddu(t0, t0, Operand(-1));
|
| @@ -535,7 +535,7 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
|
| }
|
|
|
| // Restore context from the frame.
|
| - __ ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
|
| + __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
|
|
|
| if (create_implicit_receiver) {
|
| // If the result is an object (in the ECMA sense), we should get rid
|
| @@ -571,7 +571,7 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
|
| // Throw away the result of the constructor invocation and use the
|
| // on-stack receiver as the result.
|
| __ bind(&use_receiver);
|
| - __ ld(v0, MemOperand(sp));
|
| + __ Ld(v0, MemOperand(sp));
|
|
|
| // Remove receiver from the stack, remove caller arguments, and
|
| // return.
|
| @@ -579,9 +579,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
|
| // v0: result
|
| // sp[0]: receiver (newly allocated object)
|
| // sp[1]: number of arguments (smi-tagged)
|
| - __ ld(a1, MemOperand(sp, 1 * kPointerSize));
|
| + __ Ld(a1, MemOperand(sp, 1 * kPointerSize));
|
| } else {
|
| - __ ld(a1, MemOperand(sp));
|
| + __ Ld(a1, MemOperand(sp));
|
| }
|
|
|
| // Leave construct frame.
|
| @@ -630,14 +630,14 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
|
| __ Push(a0, a0);
|
|
|
| // Retrieve smi-tagged arguments count from the stack.
|
| - __ ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
|
| + __ Ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
|
| __ SmiUntag(a0);
|
|
|
| // Retrieve the new target value from the stack. This was placed into the
|
| // frame description in place of the receiver by the optimizing compiler.
|
| __ Daddu(a3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
|
| __ Dlsa(a3, a3, a0, kPointerSizeLog2);
|
| - __ ld(a3, MemOperand(a3));
|
| + __ Ld(a3, MemOperand(a3));
|
|
|
| // Continue with constructor function invocation.
|
| __ jmp(&post_instantiation_deopt_entry);
|
| @@ -686,13 +686,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
| __ Branch(&async_await, equal, t8,
|
| Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
|
|
|
| - __ sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
|
| + __ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
|
| __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
|
| kRAHasNotBeenSaved, kDontSaveFPRegs);
|
| __ jmp(&done_store_input);
|
|
|
| __ bind(&async_await);
|
| - __ sd(v0, FieldMemOperand(
|
| + __ Sd(v0, FieldMemOperand(
|
| a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
|
| __ RecordWriteField(a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
|
| v0, a3, kRAHasNotBeenSaved, kDontSaveFPRegs);
|
| @@ -701,11 +701,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
| // `a3` no longer holds SuspendFlags
|
|
|
| // Store resume mode into generator object.
|
| - __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
|
| + __ Sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
|
|
|
| // Load suspended function and context.
|
| - __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
| - __ ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
|
| + __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
| + __ Ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
|
|
|
| // Flood function if we are stepping.
|
| Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
| @@ -713,19 +713,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
| ExternalReference debug_hook =
|
| ExternalReference::debug_hook_on_function_call_address(masm->isolate());
|
| __ li(a5, Operand(debug_hook));
|
| - __ lb(a5, MemOperand(a5));
|
| + __ Lb(a5, MemOperand(a5));
|
| __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
|
|
|
| // Flood function if we need to continue stepping in the suspended generator.
|
| ExternalReference debug_suspended_generator =
|
| ExternalReference::debug_suspended_generator_address(masm->isolate());
|
| __ li(a5, Operand(debug_suspended_generator));
|
| - __ ld(a5, MemOperand(a5));
|
| + __ Ld(a5, MemOperand(a5));
|
| __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
|
| __ bind(&stepping_prepared);
|
|
|
| // Push receiver.
|
| - __ ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
|
| + __ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
|
| __ Push(a5);
|
|
|
| // ----------- S t a t e -------------
|
| @@ -741,8 +741,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
| // context allocation for any variables in generators, the actual argument
|
| // values have already been copied into the context and these dummy values
|
| // will never be used.
|
| - __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
| - __ lw(a3,
|
| + __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Lw(a3,
|
| FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
|
| {
|
| Label done_loop, loop;
|
| @@ -756,23 +756,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|
|
| // Underlying function needs to have bytecode available.
|
| if (FLAG_debug_code) {
|
| - __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
| - __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
|
| + __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
|
| __ GetObjectType(a3, a3, a3);
|
| __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
|
| }
|
|
|
| // Resume (Ignition/TurboFan) generator object.
|
| {
|
| - __ ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
| - __ lw(a0,
|
| + __ Ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Lw(a0,
|
| FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
|
| // We abuse new.target both to indicate that this is a resume call and to
|
| // pass in the generator object. In ordinary calls, new.target is always
|
| // undefined because generator functions are non-constructable.
|
| __ Move(a3, a1);
|
| __ Move(a1, a4);
|
| - __ ld(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
| + __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
| __ Jump(a2);
|
| }
|
|
|
| @@ -784,7 +784,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
| __ Pop(a1, a2);
|
| }
|
| __ Branch(USE_DELAY_SLOT, &stepping_prepared);
|
| - __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
| + __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
|
|
| __ bind(&prepare_step_in_suspended_generator);
|
| {
|
| @@ -794,7 +794,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
| __ Pop(a1, a2);
|
| }
|
| __ Branch(USE_DELAY_SLOT, &stepping_prepared);
|
| - __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
| + __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
| }
|
|
|
| void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
|
| @@ -852,7 +852,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
| ExternalReference context_address(Isolate::kContextAddress,
|
| masm->isolate());
|
| __ li(cp, Operand(context_address));
|
| - __ ld(cp, MemOperand(cp));
|
| + __ Ld(cp, MemOperand(cp));
|
|
|
| // Push the function and the receiver onto the stack.
|
| __ Push(a1, a2);
|
| @@ -873,9 +873,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
| __ nop(); // Branch delay slot nop.
|
| // a6 points past last arg.
|
| __ bind(&loop);
|
| - __ ld(a4, MemOperand(s0)); // Read next parameter.
|
| + __ Ld(a4, MemOperand(s0)); // Read next parameter.
|
| __ daddiu(s0, s0, kPointerSize);
|
| - __ ld(a4, MemOperand(a4)); // Dereference handle.
|
| + __ Ld(a4, MemOperand(a4)); // Dereference handle.
|
| __ push(a4); // Push parameter.
|
| __ bind(&entry);
|
| __ Branch(&loop, ne, s0, Operand(a6));
|
| @@ -918,9 +918,9 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
|
| Register args_count = scratch;
|
|
|
| // Get the arguments + receiver count.
|
| - __ ld(args_count,
|
| + __ Ld(args_count,
|
| MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
| - __ lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
|
| + __ Lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
|
|
|
| // Leave the frame (also dropping the register file).
|
| __ LeaveFrame(StackFrame::JAVA_SCRIPT);
|
| @@ -955,13 +955,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
|
|
| // Get the bytecode array from the function object (or from the DebugInfo if
|
| // it is present) and load it into kInterpreterBytecodeArrayRegister.
|
| - __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| Label load_debug_bytecode_array, bytecode_array_loaded;
|
| Register debug_info = kInterpreterBytecodeArrayRegister;
|
| DCHECK(!debug_info.is(a0));
|
| - __ ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
|
| + __ Ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
|
| __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
|
| - __ ld(kInterpreterBytecodeArrayRegister,
|
| + __ Ld(kInterpreterBytecodeArrayRegister,
|
| FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
|
| __ bind(&bytecode_array_loaded);
|
|
|
| @@ -969,18 +969,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
| // TODO(rmcilroy) Remove self healing once liveedit only has to deal with
|
| // Ignition bytecode.
|
| Label switch_to_different_code_kind;
|
| - __ ld(a0, FieldMemOperand(a0, SharedFunctionInfo::kCodeOffset));
|
| + __ Ld(a0, FieldMemOperand(a0, SharedFunctionInfo::kCodeOffset));
|
| __ Branch(&switch_to_different_code_kind, ne, a0,
|
| Operand(masm->CodeObject())); // Self-reference to this code.
|
|
|
| // Increment invocation count for the function.
|
| - __ ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
|
| - __ ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
|
| - __ ld(a4, FieldMemOperand(
|
| + __ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
|
| + __ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
|
| + __ Ld(a4, FieldMemOperand(
|
| a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
|
| FeedbackVector::kHeaderSize));
|
| __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
|
| - __ sd(a4, FieldMemOperand(
|
| + __ Sd(a4, FieldMemOperand(
|
| a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
|
| FeedbackVector::kHeaderSize));
|
|
|
| @@ -1010,7 +1010,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
| // Allocate the local and temporary register file on the stack.
|
| {
|
| // Load frame size (word) from the BytecodeArray object.
|
| - __ lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
|
| + __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
|
| BytecodeArray::kFrameSizeOffset));
|
|
|
| // Do a stack check to ensure we don't go over the limit.
|
| @@ -1044,9 +1044,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
| // Dispatch to the first bytecode handler for the function.
|
| __ Daddu(a0, kInterpreterBytecodeArrayRegister,
|
| kInterpreterBytecodeOffsetRegister);
|
| - __ lbu(a0, MemOperand(a0));
|
| + __ Lbu(a0, MemOperand(a0));
|
| __ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
|
| - __ ld(at, MemOperand(at));
|
| + __ Ld(at, MemOperand(at));
|
| __ Call(at);
|
| masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
|
|
|
| @@ -1056,7 +1056,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
|
|
| // Load debug copy of the bytecode array.
|
| __ bind(&load_debug_bytecode_array);
|
| - __ ld(kInterpreterBytecodeArrayRegister,
|
| + __ Ld(kInterpreterBytecodeArrayRegister,
|
| FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
|
| __ Branch(&bytecode_array_loaded);
|
|
|
| @@ -1065,10 +1065,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
| // closure by switching the code entry field over to the new code as well.
|
| __ bind(&switch_to_different_code_kind);
|
| __ LeaveFrame(StackFrame::JAVA_SCRIPT);
|
| - __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| - __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
|
| + __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
|
| __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| - __ sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
| + __ Sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
| __ RecordWriteCodeEntryField(a1, a4, a5);
|
| __ Jump(a4);
|
| }
|
| @@ -1101,7 +1101,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
|
| Label loop_header, loop_check;
|
| __ Branch(&loop_check);
|
| __ bind(&loop_header);
|
| - __ ld(scratch, MemOperand(index));
|
| + __ Ld(scratch, MemOperand(index));
|
| __ Daddu(index, index, Operand(-kPointerSize));
|
| __ push(scratch);
|
| __ bind(&loop_check);
|
| @@ -1182,8 +1182,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
|
|
|
| // Tail call to the function-specific construct stub (still in the caller
|
| // context at this point).
|
| - __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| - __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
|
| + __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
|
| __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ Jump(at);
|
| } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
|
| @@ -1255,7 +1255,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
| masm->isolate())));
|
|
|
| // Get the bytecode array pointer from the frame.
|
| - __ ld(kInterpreterBytecodeArrayRegister,
|
| + __ Ld(kInterpreterBytecodeArrayRegister,
|
| MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
|
|
| if (FLAG_debug_code) {
|
| @@ -1269,16 +1269,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
| }
|
|
|
| // Get the target bytecode offset from the frame.
|
| - __ lw(
|
| + __ Lw(
|
| kInterpreterBytecodeOffsetRegister,
|
| UntagSmiMemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
|
|
| // Dispatch to the target bytecode.
|
| __ Daddu(a1, kInterpreterBytecodeArrayRegister,
|
| kInterpreterBytecodeOffsetRegister);
|
| - __ lbu(a1, MemOperand(a1));
|
| + __ Lbu(a1, MemOperand(a1));
|
| __ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
|
| - __ ld(a1, MemOperand(a1));
|
| + __ Ld(a1, MemOperand(a1));
|
| __ Jump(a1);
|
| }
|
|
|
| @@ -1286,9 +1286,9 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
| // Advance the current bytecode offset stored within the given interpreter
|
| // stack frame. This simulates what all bytecode handlers do upon completion
|
| // of the underlying operation.
|
| - __ ld(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
| - __ ld(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
| - __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| + __ Ld(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
| + __ Ld(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
| + __ Ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| {
|
| FrameScope scope(masm, StackFrame::INTERNAL);
|
| __ Push(kInterpreterAccumulatorRegister, a1, a2);
|
| @@ -1296,7 +1296,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
| __ mov(a2, v0); // Result is the new bytecode offset.
|
| __ Pop(kInterpreterAccumulatorRegister);
|
| }
|
| - __ sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
| + __ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
|
|
| Generate_InterpreterEnterBytecode(masm);
|
| }
|
| @@ -1323,8 +1323,8 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
| Register index = a2;
|
|
|
| // Do we have a valid feedback vector?
|
| - __ ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
|
| - __ ld(index, FieldMemOperand(index, Cell::kValueOffset));
|
| + __ Ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
|
| + __ Ld(index, FieldMemOperand(index, Cell::kValueOffset));
|
| __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
|
| &gotta_call_runtime_no_stack);
|
|
|
| @@ -1332,9 +1332,9 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
| __ push(new_target);
|
| __ push(closure);
|
|
|
| - __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
| - __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
|
| - __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
|
| + __ Ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
|
| + __ Ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
|
| __ Branch(&try_shared, lt, index, Operand(Smi::FromInt(2)));
|
|
|
| // a3 : native context
|
| @@ -1343,7 +1343,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
| // stack[0] : new target
|
| // stack[4] : closure
|
| Register native_context = a3;
|
| - __ ld(native_context, NativeContextMemOperand());
|
| + __ Ld(native_context, NativeContextMemOperand());
|
|
|
| __ bind(&loop_top);
|
| Register temp = a1;
|
| @@ -1352,39 +1352,39 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
| // Does the native context match?
|
| __ SmiScale(at, index, kPointerSizeLog2);
|
| __ Daddu(array_pointer, map, Operand(at));
|
| - __ ld(temp, FieldMemOperand(array_pointer,
|
| + __ Ld(temp, FieldMemOperand(array_pointer,
|
| SharedFunctionInfo::kOffsetToPreviousContext));
|
| - __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
| + __ Ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
|
| __ Branch(&loop_bottom, ne, temp, Operand(native_context));
|
|
|
| // Code available?
|
| Register entry = a4;
|
| - __ ld(entry,
|
| + __ Ld(entry,
|
| FieldMemOperand(array_pointer,
|
| SharedFunctionInfo::kOffsetToPreviousCachedCode));
|
| - __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
| + __ Ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
|
| __ JumpIfSmi(entry, &try_shared);
|
|
|
| // Found code. Get it into the closure and return.
|
| __ pop(closure);
|
| // Store code entry in the closure.
|
| __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| - __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
| + __ Sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
| __ RecordWriteCodeEntryField(closure, entry, a5);
|
|
|
| // Link the closure into the optimized function list.
|
| // a4 : code entry
|
| // a3 : native context
|
| // a1 : closure
|
| - __ ld(a5,
|
| + __ Ld(a5,
|
| ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
| - __ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
|
| + __ Sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
|
| __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0,
|
| kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
|
| OMIT_SMI_CHECK);
|
| const int function_list_offset =
|
| Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
|
| - __ sd(closure,
|
| + __ Sd(closure,
|
| ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
|
| // Save closure before the write barrier.
|
| __ mov(a5, closure);
|
| @@ -1405,22 +1405,22 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
|
| __ pop(closure);
|
| __ pop(new_target);
|
| __ pop(argument_count);
|
| - __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
| // Is the shared function marked for tier up?
|
| - __ lbu(a5, FieldMemOperand(entry,
|
| + __ Lbu(a5, FieldMemOperand(entry,
|
| SharedFunctionInfo::kMarkedForTierUpByteOffset));
|
| __ And(a5, a5,
|
| Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
|
| __ Branch(&gotta_call_runtime_no_stack, ne, a5, Operand(zero_reg));
|
|
|
| // If SFI points to anything other than CompileLazy, install that.
|
| - __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
|
| + __ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
|
| __ Move(t1, masm->CodeObject());
|
| __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
|
|
|
| // Install the SFI's code entry.
|
| __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| - __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
| + __ Sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
|
| __ RecordWriteCodeEntryField(closure, entry, a5);
|
| __ Jump(entry);
|
|
|
| @@ -1464,7 +1464,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
|
| __ Branch(&over, ne, t2, Operand(j));
|
| }
|
| for (int i = j - 1; i >= 0; --i) {
|
| - __ ld(t2, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
|
| + __ Ld(t2, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
|
| i * kPointerSize));
|
| __ push(t2);
|
| }
|
| @@ -1613,7 +1613,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
| }
|
|
|
| // Get the full codegen state from the stack and untag it -> a6.
|
| - __ lw(a6, UntagSmiMemOperand(sp, 0 * kPointerSize));
|
| + __ Lw(a6, UntagSmiMemOperand(sp, 0 * kPointerSize));
|
| // Switch on the state.
|
| Label with_tos_register, unknown_state;
|
| __ Branch(
|
| @@ -1625,7 +1625,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
|
|
| __ bind(&with_tos_register);
|
| DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
|
| - __ ld(v0, MemOperand(sp, 1 * kPointerSize));
|
| + __ Ld(v0, MemOperand(sp, 1 * kPointerSize));
|
| __ Branch(
|
| &unknown_state, ne, a6,
|
| Operand(static_cast<int64_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
|
| @@ -1654,10 +1654,10 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
|
| bool has_handler_frame) {
|
| // Lookup the function in the JavaScript frame.
|
| if (has_handler_frame) {
|
| - __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| - __ ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
|
| + __ Ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ Ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
|
| } else {
|
| - __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| + __ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| }
|
|
|
| {
|
| @@ -1678,11 +1678,11 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
|
|
|
| // Load deoptimization data from the code object.
|
| // <deopt_data> = <code>[#deoptimization_data_offset]
|
| - __ ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
|
| + __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
|
|
|
| // Load the OSR entrypoint offset from the deoptimization data.
|
| // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
|
| - __ lw(a1,
|
| + __ Lw(a1,
|
| UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt(
|
| DeoptimizationInputData::kOsrPcOffsetIndex) -
|
| kHeapObjectTag));
|
| @@ -1736,8 +1736,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
| __ Movz(this_arg, undefined_value, scratch); // if argc == 0
|
| __ Dsubu(scratch, scratch, Operand(1));
|
| __ Movz(arg_array, undefined_value, scratch); // if argc == 1
|
| - __ ld(receiver, MemOperand(sp));
|
| - __ sd(this_arg, MemOperand(sp));
|
| + __ Ld(receiver, MemOperand(sp));
|
| + __ Sd(this_arg, MemOperand(sp));
|
| }
|
|
|
| // ----------- S t a t e -------------
|
| @@ -1750,8 +1750,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
| // 2. Make sure the receiver is actually callable.
|
| Label receiver_not_callable;
|
| __ JumpIfSmi(receiver, &receiver_not_callable);
|
| - __ ld(a4, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| - __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
|
| + __ Ld(a4, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| + __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
|
| __ And(a4, a4, Operand(1 << Map::kIsCallable));
|
| __ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
|
|
|
| @@ -1777,7 +1777,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
| // 4c. The receiver is not callable, throw an appropriate TypeError.
|
| __ bind(&receiver_not_callable);
|
| {
|
| - __ sd(receiver, MemOperand(sp));
|
| + __ Sd(receiver, MemOperand(sp));
|
| __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
|
| }
|
| }
|
| @@ -1797,7 +1797,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
| // 2. Get the function to call (passed as receiver) from the stack.
|
| // a0: actual number of arguments
|
| __ Dlsa(at, sp, a0, kPointerSizeLog2);
|
| - __ ld(a1, MemOperand(at));
|
| + __ Ld(a1, MemOperand(at));
|
|
|
| // 3. Shift arguments and return address one slot down on the stack
|
| // (overwriting the original receiver). Adjust argument count to make
|
| @@ -1810,8 +1810,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
| __ Dlsa(a2, sp, a0, kPointerSizeLog2);
|
|
|
| __ bind(&loop);
|
| - __ ld(at, MemOperand(a2, -kPointerSize));
|
| - __ sd(at, MemOperand(a2));
|
| + __ Ld(at, MemOperand(a2, -kPointerSize));
|
| + __ Sd(at, MemOperand(a2));
|
| __ Dsubu(a2, a2, Operand(kPointerSize));
|
| __ Branch(&loop, ne, a2, Operand(sp));
|
| // Adjust the actual number of arguments and remove the top element
|
| @@ -1861,7 +1861,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
| __ Dsubu(scratch, scratch, Operand(1));
|
| __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
|
|
|
| - __ sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
|
| + __ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
|
| }
|
|
|
| // ----------- S t a t e -------------
|
| @@ -1874,8 +1874,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
| // 2. Make sure the target is actually callable.
|
| Label target_not_callable;
|
| __ JumpIfSmi(target, &target_not_callable);
|
| - __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
|
| - __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
|
| + __ Ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
|
| + __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
|
| __ And(a4, a4, Operand(1 << Map::kIsCallable));
|
| __ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
|
|
|
| @@ -1887,7 +1887,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
| // 3b. The target is not callable, throw an appropriate TypeError.
|
| __ bind(&target_not_callable);
|
| {
|
| - __ sd(target, MemOperand(sp));
|
| + __ Sd(target, MemOperand(sp));
|
| __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
|
| }
|
| }
|
| @@ -1928,7 +1928,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
| __ Dsubu(scratch, scratch, Operand(1));
|
| __ Movz(new_target, target, scratch); // if argc == 2
|
|
|
| - __ sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
|
| + __ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
|
| }
|
|
|
| // ----------- S t a t e -------------
|
| @@ -1941,16 +1941,16 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
| // 2. Make sure the target is actually a constructor.
|
| Label target_not_constructor;
|
| __ JumpIfSmi(target, &target_not_constructor);
|
| - __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
|
| - __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
|
| + __ Ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
|
| + __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
|
| __ And(a4, a4, Operand(1 << Map::kIsConstructor));
|
| __ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
|
|
|
| // 3. Make sure the target is actually a constructor.
|
| Label new_target_not_constructor;
|
| __ JumpIfSmi(new_target, &new_target_not_constructor);
|
| - __ ld(a4, FieldMemOperand(new_target, HeapObject::kMapOffset));
|
| - __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
|
| + __ Ld(a4, FieldMemOperand(new_target, HeapObject::kMapOffset));
|
| + __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
|
| __ And(a4, a4, Operand(1 << Map::kIsConstructor));
|
| __ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
|
|
|
| @@ -1960,14 +1960,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
| // 4b. The target is not a constructor, throw an appropriate TypeError.
|
| __ bind(&target_not_constructor);
|
| {
|
| - __ sd(target, MemOperand(sp));
|
| + __ Sd(target, MemOperand(sp));
|
| __ TailCallRuntime(Runtime::kThrowNotConstructor);
|
| }
|
|
|
| // 4c. The new.target is not a constructor, throw an appropriate TypeError.
|
| __ bind(&new_target_not_constructor);
|
| {
|
| - __ sd(new_target, MemOperand(sp));
|
| + __ Sd(new_target, MemOperand(sp));
|
| __ TailCallRuntime(Runtime::kThrowNotConstructor);
|
| }
|
| }
|
| @@ -1987,7 +1987,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
|
| // -----------------------------------
|
| // Get the number of arguments passed (as a smi), tear down the frame and
|
| // then tear down the parameters.
|
| - __ ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
|
| + __ Ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
|
| kPointerSize)));
|
| __ mov(sp, fp);
|
| __ MultiPop(fp.bit() | ra.bit());
|
| @@ -2021,23 +2021,23 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
|
|
|
| // Load the map of argumentsList into a2.
|
| Register arguments_list_map = a2;
|
| - __ ld(arguments_list_map,
|
| + __ Ld(arguments_list_map,
|
| FieldMemOperand(arguments_list, HeapObject::kMapOffset));
|
|
|
| // Load native context into a4.
|
| Register native_context = a4;
|
| - __ ld(native_context, NativeContextMemOperand());
|
| + __ Ld(native_context, NativeContextMemOperand());
|
|
|
| // Check if argumentsList is an (unmodified) arguments object.
|
| - __ ld(at, ContextMemOperand(native_context,
|
| + __ Ld(at, ContextMemOperand(native_context,
|
| Context::SLOPPY_ARGUMENTS_MAP_INDEX));
|
| __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
|
| - __ ld(at, ContextMemOperand(native_context,
|
| + __ Ld(at, ContextMemOperand(native_context,
|
| Context::STRICT_ARGUMENTS_MAP_INDEX));
|
| __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
|
|
|
| // Check if argumentsList is a fast JSArray.
|
| - __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
|
| + __ Lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
|
| __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
|
|
|
| // Ask the runtime to create the list (actually a FixedArray).
|
| @@ -2048,16 +2048,16 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
|
| __ CallRuntime(Runtime::kCreateListFromArrayLike);
|
| __ mov(arguments_list, v0);
|
| __ Pop(target, new_target);
|
| - __ lw(len, UntagSmiFieldMemOperand(v0, FixedArray::kLengthOffset));
|
| + __ Lw(len, UntagSmiFieldMemOperand(v0, FixedArray::kLengthOffset));
|
| }
|
| __ Branch(&done_create);
|
|
|
| // Try to create the list from an arguments object.
|
| __ bind(&create_arguments);
|
| - __ lw(len, UntagSmiFieldMemOperand(arguments_list,
|
| + __ Lw(len, UntagSmiFieldMemOperand(arguments_list,
|
| JSArgumentsObject::kLengthOffset));
|
| - __ ld(a4, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
|
| - __ lw(at, UntagSmiFieldMemOperand(a4, FixedArray::kLengthOffset));
|
| + __ Ld(a4, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
|
| + __ Lw(at, UntagSmiFieldMemOperand(a4, FixedArray::kLengthOffset));
|
| __ Branch(&create_runtime, ne, len, Operand(at));
|
| __ mov(args, a4);
|
|
|
| @@ -2066,21 +2066,21 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
|
| // For holey JSArrays we need to check that the array prototype chain
|
| // protector is intact and our prototype is the Array.prototype actually.
|
| __ bind(&create_holey_array);
|
| - __ ld(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
|
| - __ ld(at, ContextMemOperand(native_context,
|
| + __ Ld(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
|
| + __ Ld(at, ContextMemOperand(native_context,
|
| Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
|
| __ Branch(&create_runtime, ne, a2, Operand(at));
|
| __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
|
| - __ lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
|
| + __ Lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
|
| __ Branch(&create_runtime, ne, a2,
|
| Operand(Smi::FromInt(Isolate::kProtectorValid)));
|
| - __ lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
|
| - __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
|
| + __ Lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
|
| + __ Ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
|
| __ Branch(&done_create);
|
|
|
| // Try to create the list from a JSArray object.
|
| __ bind(&create_array);
|
| - __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
|
| + __ Lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
|
| __ DecodeField<Map::ElementsKindBits>(t1);
|
| STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
|
| STATIC_ASSERT(FAST_ELEMENTS == 2);
|
| @@ -2088,8 +2088,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
|
| __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
|
| __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
|
| __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
|
| - __ lw(a2, UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
|
| - __ ld(a0, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
|
| + __ Lw(a2, UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
|
| + __ Ld(a0, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
|
|
|
| __ bind(&done_create);
|
| }
|
| @@ -2131,7 +2131,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
|
| __ Dsubu(scratch, sp, Operand(scratch));
|
| __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
|
| __ bind(&loop);
|
| - __ ld(a5, MemOperand(src));
|
| + __ Ld(a5, MemOperand(src));
|
| __ Branch(&push, ne, a5, Operand(t1));
|
| __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
|
| __ bind(&push);
|
| @@ -2175,14 +2175,14 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
|
|
|
| // Check if we have an arguments adaptor frame below the function frame.
|
| Label arguments_adaptor, arguments_done;
|
| - __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| - __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
|
| + __ Ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ Ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
|
| __ Branch(&arguments_adaptor, eq, a0,
|
| Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
|
| {
|
| - __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| - __ ld(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
|
| - __ lw(a0,
|
| + __ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| + __ Ld(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Lw(a0,
|
| FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
|
| __ mov(a3, fp);
|
| }
|
| @@ -2190,7 +2190,7 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
|
| __ bind(&arguments_adaptor);
|
| {
|
| // Just get the length from the ArgumentsAdaptorFrame.
|
| - __ lw(a0, UntagSmiMemOperand(
|
| + __ Lw(a0, UntagSmiMemOperand(
|
| a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| }
|
| __ bind(&arguments_done);
|
| @@ -2209,7 +2209,7 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
|
| __ bind(&loop);
|
| {
|
| __ Dlsa(at, a3, a2, kPointerSizeLog2);
|
| - __ ld(at, MemOperand(at, 1 * kPointerSize));
|
| + __ Ld(at, MemOperand(at, 1 * kPointerSize));
|
| __ push(at);
|
| __ Subu(a2, a2, Operand(1));
|
| __ Branch(&loop, ne, a2, Operand(zero_reg));
|
| @@ -2270,42 +2270,42 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
|
| ExternalReference::is_tail_call_elimination_enabled_address(
|
| masm->isolate());
|
| __ li(at, Operand(is_tail_call_elimination_enabled));
|
| - __ lb(scratch1, MemOperand(at));
|
| + __ Lb(scratch1, MemOperand(at));
|
| __ Branch(&done, eq, scratch1, Operand(zero_reg));
|
|
|
| // Drop possible interpreter handler/stub frame.
|
| {
|
| Label no_interpreter_frame;
|
| - __ ld(scratch3,
|
| + __ Ld(scratch3,
|
| MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
|
| __ Branch(&no_interpreter_frame, ne, scratch3,
|
| Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
|
| - __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| __ bind(&no_interpreter_frame);
|
| }
|
|
|
| // Check if next frame is an arguments adaptor frame.
|
| Register caller_args_count_reg = scratch1;
|
| Label no_arguments_adaptor, formal_parameter_count_loaded;
|
| - __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| - __ ld(scratch3,
|
| + __ Ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ Ld(scratch3,
|
| MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
|
| __ Branch(&no_arguments_adaptor, ne, scratch3,
|
| Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
|
| // Drop current frame and load arguments count from arguments adaptor frame.
|
| __ mov(fp, scratch2);
|
| - __ lw(caller_args_count_reg,
|
| + __ Lw(caller_args_count_reg,
|
| UntagSmiMemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| __ Branch(&formal_parameter_count_loaded);
|
|
|
| __ bind(&no_arguments_adaptor);
|
| // Load caller's formal parameter count
|
| - __ ld(scratch1,
|
| + __ Ld(scratch1,
|
| MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
|
| - __ ld(scratch1,
|
| + __ Ld(scratch1,
|
| FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
|
| - __ lw(caller_args_count_reg,
|
| + __ Lw(caller_args_count_reg,
|
| FieldMemOperand(scratch1,
|
| SharedFunctionInfo::kFormalParameterCountOffset));
|
|
|
| @@ -2331,8 +2331,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
| // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
|
| // Check that function is not a "classConstructor".
|
| Label class_constructor;
|
| - __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| - __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
|
| + __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
|
| __ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
|
| __ Branch(&class_constructor, ne, at, Operand(zero_reg));
|
|
|
| @@ -2341,10 +2341,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
| // context in case of conversion.
|
| STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
|
| SharedFunctionInfo::kStrictModeByteOffset);
|
| - __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
| + __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
| // We need to convert the receiver for non-native sloppy mode functions.
|
| Label done_convert;
|
| - __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
|
| + __ Lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
|
| __ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
|
| (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
|
| __ Branch(&done_convert, ne, at, Operand(zero_reg));
|
| @@ -2362,7 +2362,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
| } else {
|
| Label convert_to_object, convert_receiver;
|
| __ Dlsa(at, sp, a0, kPointerSizeLog2);
|
| - __ ld(a3, MemOperand(at));
|
| + __ Ld(a3, MemOperand(at));
|
| __ JumpIfSmi(a3, &convert_to_object);
|
| STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
|
| __ GetObjectType(a3, a4, a4);
|
| @@ -2396,11 +2396,11 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
| __ Pop(a0, a1);
|
| __ SmiUntag(a0);
|
| }
|
| - __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| __ bind(&convert_receiver);
|
| }
|
| __ Dlsa(at, sp, a0, kPointerSizeLog2);
|
| - __ sd(a3, MemOperand(at));
|
| + __ Sd(a3, MemOperand(at));
|
| }
|
| __ bind(&done_convert);
|
|
|
| @@ -2415,7 +2415,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
| PrepareForTailCall(masm, a0, t0, t1, t2);
|
| }
|
|
|
| - __ lw(a2,
|
| + __ Lw(a2,
|
| FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
|
| ParameterCount actual(a0);
|
| ParameterCount expected(a2);
|
| @@ -2446,14 +2446,14 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
|
|
|
| // Patch the receiver to [[BoundThis]].
|
| {
|
| - __ ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
|
| + __ Ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
|
| __ Dlsa(a4, sp, a0, kPointerSizeLog2);
|
| - __ sd(at, MemOperand(a4));
|
| + __ Sd(at, MemOperand(a4));
|
| }
|
|
|
| // Load [[BoundArguments]] into a2 and length of that into a4.
|
| - __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
|
| - __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
|
| + __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
|
| + __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
|
|
|
| // ----------- S t a t e -------------
|
| // -- a0 : the number of arguments (not including the receiver)
|
| @@ -2488,9 +2488,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
|
| __ bind(&loop);
|
| __ Branch(&done_loop, gt, a5, Operand(a0));
|
| __ Dlsa(a6, sp, a4, kPointerSizeLog2);
|
| - __ ld(at, MemOperand(a6));
|
| + __ Ld(at, MemOperand(a6));
|
| __ Dlsa(a6, sp, a5, kPointerSizeLog2);
|
| - __ sd(at, MemOperand(a6));
|
| + __ Sd(at, MemOperand(a6));
|
| __ Daddu(a4, a4, Operand(1));
|
| __ Daddu(a5, a5, Operand(1));
|
| __ Branch(&loop);
|
| @@ -2500,25 +2500,25 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
|
| // Copy [[BoundArguments]] to the stack (below the arguments).
|
| {
|
| Label loop, done_loop;
|
| - __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
|
| + __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
|
| __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| __ bind(&loop);
|
| __ Dsubu(a4, a4, Operand(1));
|
| __ Branch(&done_loop, lt, a4, Operand(zero_reg));
|
| __ Dlsa(a5, a2, a4, kPointerSizeLog2);
|
| - __ ld(at, MemOperand(a5));
|
| + __ Ld(at, MemOperand(a5));
|
| __ Dlsa(a5, sp, a0, kPointerSizeLog2);
|
| - __ sd(at, MemOperand(a5));
|
| + __ Sd(at, MemOperand(a5));
|
| __ Daddu(a0, a0, Operand(1));
|
| __ Branch(&loop);
|
| __ bind(&done_loop);
|
| }
|
|
|
| // Call the [[BoundTargetFunction]] via the Call builtin.
|
| - __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
| + __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
| __ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
|
| masm->isolate())));
|
| - __ ld(at, MemOperand(at));
|
| + __ Ld(at, MemOperand(at));
|
| __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ Jump(at);
|
| }
|
| @@ -2541,7 +2541,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
|
| RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
|
|
|
| // Check if target has a [[Call]] internal method.
|
| - __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
|
| + __ Lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
|
| __ And(t1, t1, Operand(1 << Map::kIsCallable));
|
| __ Branch(&non_callable, eq, t1, Operand(zero_reg));
|
|
|
| @@ -2566,7 +2566,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
|
| __ bind(&non_function);
|
| // Overwrite the original receiver with the (original) target.
|
| __ Dlsa(at, sp, a0, kPointerSizeLog2);
|
| - __ sd(a1, MemOperand(at));
|
| + __ Sd(a1, MemOperand(at));
|
| // Let the "call_as_function_delegate" take care of the rest.
|
| __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
|
| __ Jump(masm->isolate()->builtins()->CallFunction(
|
| @@ -2598,34 +2598,34 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
|
| Register native_context = a5;
|
|
|
| Label runtime_call, push_args;
|
| - __ ld(spread, MemOperand(sp, 0));
|
| + __ Ld(spread, MemOperand(sp, 0));
|
| __ JumpIfSmi(spread, &runtime_call);
|
| - __ ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
|
| - __ ld(native_context, NativeContextMemOperand());
|
| + __ Ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
|
| + __ Ld(native_context, NativeContextMemOperand());
|
|
|
| // Check that the spread is an array.
|
| - __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
|
| + __ Lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
|
| __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
|
|
|
| // Check that we have the original ArrayPrototype.
|
| - __ ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
|
| - __ ld(scratch2, ContextMemOperand(native_context,
|
| + __ Ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
|
| + __ Ld(scratch2, ContextMemOperand(native_context,
|
| Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
|
| __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
|
|
|
| // Check that the ArrayPrototype hasn't been modified in a way that would
|
| // affect iteration.
|
| __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
|
| - __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
|
| + __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
|
| __ Branch(&runtime_call, ne, scratch,
|
| Operand(Smi::FromInt(Isolate::kProtectorValid)));
|
|
|
| // Check that the map of the initial array iterator hasn't changed.
|
| - __ ld(scratch,
|
| + __ Ld(scratch,
|
| ContextMemOperand(native_context,
|
| Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
|
| - __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
| - __ ld(scratch2,
|
| + __ Ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
| + __ Ld(scratch2,
|
| ContextMemOperand(native_context,
|
| Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
|
| __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
|
| @@ -2633,7 +2633,7 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
|
| // For FastPacked kinds, iteration will have the same effect as simply
|
| // accessing each property in order.
|
| Label no_protector_check;
|
| - __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
|
| + __ Lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
|
| __ DecodeField<Map::ElementsKindBits>(scratch);
|
| __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
|
| // For non-FastHoley kinds, we can skip the protector check.
|
| @@ -2641,14 +2641,14 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
|
| __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
|
| // Check the ArrayProtector cell.
|
| __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
|
| - __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
|
| + __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
|
| __ Branch(&runtime_call, ne, scratch,
|
| Operand(Smi::FromInt(Isolate::kProtectorValid)));
|
|
|
| __ bind(&no_protector_check);
|
| // Load the FixedArray backing store, but use the length from the array.
|
| - __ lw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
|
| - __ ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
|
| + __ Lw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
|
| + __ Ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
|
| __ Branch(&push_args);
|
|
|
| __ bind(&runtime_call);
|
| @@ -2665,7 +2665,7 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
|
|
|
| {
|
| // Calculate the new nargs including the result of the spread.
|
| - __ lw(spread_len,
|
| + __ Lw(spread_len,
|
| UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
|
|
|
| __ bind(&push_args);
|
| @@ -2700,7 +2700,7 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
|
| __ bind(&loop);
|
| __ Branch(&done, eq, scratch, Operand(spread_len));
|
| __ Dlsa(scratch2, spread, scratch, kPointerSizeLog2);
|
| - __ ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
|
| + __ Ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
|
| __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
|
| __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
|
| __ bind(&push);
|
| @@ -2740,8 +2740,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|
|
| // Tail call to the function-specific construct stub (still in the caller
|
| // context at this point).
|
| - __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| - __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
|
| + __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
|
| __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ Jump(at);
|
| }
|
| @@ -2756,8 +2756,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
| __ AssertBoundFunction(a1);
|
|
|
| // Load [[BoundArguments]] into a2 and length of that into a4.
|
| - __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
|
| - __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
|
| + __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
|
| + __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
|
|
|
| // ----------- S t a t e -------------
|
| // -- a0 : the number of arguments (not including the receiver)
|
| @@ -2793,9 +2793,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
| __ bind(&loop);
|
| __ Branch(&done_loop, ge, a5, Operand(a0));
|
| __ Dlsa(a6, sp, a4, kPointerSizeLog2);
|
| - __ ld(at, MemOperand(a6));
|
| + __ Ld(at, MemOperand(a6));
|
| __ Dlsa(a6, sp, a5, kPointerSizeLog2);
|
| - __ sd(at, MemOperand(a6));
|
| + __ Sd(at, MemOperand(a6));
|
| __ Daddu(a4, a4, Operand(1));
|
| __ Daddu(a5, a5, Operand(1));
|
| __ Branch(&loop);
|
| @@ -2805,15 +2805,15 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
| // Copy [[BoundArguments]] to the stack (below the arguments).
|
| {
|
| Label loop, done_loop;
|
| - __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
|
| + __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
|
| __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| __ bind(&loop);
|
| __ Dsubu(a4, a4, Operand(1));
|
| __ Branch(&done_loop, lt, a4, Operand(zero_reg));
|
| __ Dlsa(a5, a2, a4, kPointerSizeLog2);
|
| - __ ld(at, MemOperand(a5));
|
| + __ Ld(at, MemOperand(a5));
|
| __ Dlsa(a5, sp, a0, kPointerSizeLog2);
|
| - __ sd(at, MemOperand(a5));
|
| + __ Sd(at, MemOperand(a5));
|
| __ Daddu(a0, a0, Operand(1));
|
| __ Branch(&loop);
|
| __ bind(&done_loop);
|
| @@ -2823,14 +2823,14 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
| {
|
| Label skip_load;
|
| __ Branch(&skip_load, ne, a1, Operand(a3));
|
| - __ ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
| + __ Ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
| __ bind(&skip_load);
|
| }
|
|
|
| // Construct the [[BoundTargetFunction]] via the Construct builtin.
|
| - __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
| + __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
| __ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
|
| - __ ld(at, MemOperand(at));
|
| + __ Ld(at, MemOperand(at));
|
| __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ Jump(at);
|
| }
|
| @@ -2867,13 +2867,13 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
| __ JumpIfSmi(a1, &non_constructor);
|
|
|
| // Dispatch based on instance type.
|
| - __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
|
| - __ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
|
| + __ Ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
|
| + __ Lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
|
| __ Jump(masm->isolate()->builtins()->ConstructFunction(),
|
| RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
|
|
|
| // Check if target has a [[Construct]] internal method.
|
| - __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
|
| + __ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
|
| __ And(t3, t3, Operand(1 << Map::kIsConstructor));
|
| __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
|
|
|
| @@ -2890,7 +2890,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
| {
|
| // Overwrite the original receiver with the (original) target.
|
| __ Dlsa(at, sp, a0, kPointerSizeLog2);
|
| - __ sd(a1, MemOperand(at));
|
| + __ Sd(a1, MemOperand(at));
|
| // Let the "call_as_constructor_delegate" take care of the rest.
|
| __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
|
| __ Jump(masm->isolate()->builtins()->CallFunction(),
|
| @@ -2997,7 +2997,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
|
|
| Label copy;
|
| __ bind(©);
|
| - __ ld(a5, MemOperand(a0));
|
| + __ Ld(a5, MemOperand(a0));
|
| __ push(a5);
|
| __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a4));
|
| __ daddiu(a0, a0, -kPointerSize); // In delay slot.
|
| @@ -3030,11 +3030,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
| // a7: copy end address
|
| Label copy;
|
| __ bind(©);
|
| - __ ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver.
|
| + __ Ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver.
|
| __ Dsubu(sp, sp, kPointerSize);
|
| __ Dsubu(a0, a0, kPointerSize);
|
| __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a7));
|
| - __ sd(a4, MemOperand(sp)); // In the delay slot.
|
| + __ Sd(a4, MemOperand(sp)); // In the delay slot.
|
|
|
| // Fill the remaining expected arguments with undefined.
|
| // a1: function
|
| @@ -3051,7 +3051,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
| __ bind(&fill);
|
| __ Dsubu(sp, sp, kPointerSize);
|
| __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a4));
|
| - __ sd(a5, MemOperand(sp));
|
| + __ Sd(a5, MemOperand(sp));
|
| }
|
|
|
| // Call the entry point.
|
| @@ -3060,7 +3060,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
| // a0 : expected number of arguments
|
| // a1 : function (passed through to callee)
|
| // a3: new target (passed through to callee)
|
| - __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
| + __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
| __ Call(a4);
|
|
|
| // Store offset of return address for deoptimizer.
|
| @@ -3074,7 +3074,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
| // Don't adapt arguments.
|
| // -------------------------------------------
|
| __ bind(&dont_adapt_arguments);
|
| - __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
| + __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
| __ Jump(a4);
|
|
|
| __ bind(&stack_overflow);
|
|
|