| Index: src/mips64/code-stubs-mips64.cc
|
| diff --git a/src/mips/code-stubs-mips.cc b/src/mips64/code-stubs-mips64.cc
|
| similarity index 82%
|
| copy from src/mips/code-stubs-mips.cc
|
| copy to src/mips64/code-stubs-mips64.cc
|
| index a2e1a27b953b851db28ff9b85b0568a0aae22dc2..db42d4b6c5e1575a4df00c1e37ae1ef1598dc53f 100644
|
| --- a/src/mips/code-stubs-mips.cc
|
| +++ b/src/mips64/code-stubs-mips64.cc
|
| @@ -4,7 +4,7 @@
|
|
|
| #include "src/v8.h"
|
|
|
| -#if V8_TARGET_ARCH_MIPS
|
| +#if V8_TARGET_ARCH_MIPS64
|
|
|
| #include "src/bootstrapper.h"
|
| #include "src/code-stubs.h"
|
| @@ -330,7 +330,7 @@ void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
|
| CallInterfaceDescriptor* descriptor =
|
| isolate->call_descriptor(Isolate::ApiFunctionCall);
|
| Register registers[] = { a0, // callee
|
| - t0, // call_data
|
| + a4, // call_data
|
| a2, // holder
|
| a1, // api_function_address
|
| cp, // context
|
| @@ -376,10 +376,10 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
|
| ASSERT(descriptor->register_param_count() == 0 ||
|
| a0.is(descriptor->GetParameterRegister(param_count - 1)));
|
| // Push arguments, adjust sp.
|
| - __ Subu(sp, sp, Operand(param_count * kPointerSize));
|
| + __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
|
| for (int i = 0; i < param_count; ++i) {
|
| // Store argument to stack.
|
| - __ sw(descriptor->GetParameterRegister(i),
|
| + __ sd(descriptor->GetParameterRegister(i),
|
| MemOperand(sp, (param_count-1-i) * kPointerSize));
|
| }
|
| ExternalReference miss = descriptor->miss_handler();
|
| @@ -432,17 +432,16 @@ class ConvertToDoubleStub : public PlatformCodeStub {
|
|
|
|
|
| void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
|
| - Register exponent, mantissa;
|
| - if (kArchEndian == kLittle) {
|
| - exponent = result1_;
|
| - mantissa = result2_;
|
| - } else {
|
| - exponent = result2_;
|
| - mantissa = result1_;
|
| - }
|
| +#ifndef BIG_ENDIAN_FLOATING_POINT
|
| + Register exponent = result1_;
|
| + Register mantissa = result2_;
|
| +#else
|
| + Register exponent = result2_;
|
| + Register mantissa = result1_;
|
| +#endif
|
| Label not_special;
|
| // Convert from Smi to integer.
|
| - __ sra(source_, source_, kSmiTagSize);
|
| + __ SmiUntag(source_);
|
| // Move sign bit from source to destination. This works because the sign bit
|
| // in the exponent word of the double has the same position and polarity as
|
| // the 2's complement sign bit in a Smi.
|
| @@ -510,7 +509,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
| DoubleRegister double_scratch = kLithiumScratchDouble;
|
|
|
| __ Push(scratch, scratch2, scratch3);
|
| -
|
| if (!skip_fastpath()) {
|
| // Load double input.
|
| __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
|
| @@ -545,10 +543,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
| Register input_high = scratch2;
|
| Register input_low = scratch3;
|
|
|
| - __ lw(input_low,
|
| - MemOperand(input_reg, double_offset + Register::kMantissaOffset));
|
| - __ lw(input_high,
|
| - MemOperand(input_reg, double_offset + Register::kExponentOffset));
|
| + __ lw(input_low, MemOperand(input_reg, double_offset));
|
| + __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
|
|
|
| Label normal_exponent, restore_sign;
|
| // Extract the biased exponent in result.
|
| @@ -700,7 +696,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
| Condition cc) {
|
| Label not_identical;
|
| Label heap_number, return_equal;
|
| - Register exp_mask_reg = t5;
|
| + Register exp_mask_reg = t1;
|
|
|
| __ Branch(¬_identical, ne, a0, Operand(a1));
|
|
|
| @@ -711,21 +707,21 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
| // They are both equal and they are not both Smis so both of them are not
|
| // Smis. If it's not a heap number, then return equal.
|
| if (cc == less || cc == greater) {
|
| - __ GetObjectType(a0, t4, t4);
|
| - __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| + __ GetObjectType(a0, t0, t0);
|
| + __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| } else {
|
| - __ GetObjectType(a0, t4, t4);
|
| - __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
|
| + __ GetObjectType(a0, t0, t0);
|
| + __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
|
| // Comparing JS objects with <=, >= is complicated.
|
| if (cc != eq) {
|
| - __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| + __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| // Normally here we fall through to return_equal, but undefined is
|
| // special: (undefined == undefined) == true, but
|
| // (undefined <= undefined) == false! See ECMAScript 11.8.5.
|
| if (cc == less_equal || cc == greater_equal) {
|
| - __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
|
| - __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
|
| - __ Branch(&return_equal, ne, a0, Operand(t2));
|
| + __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
|
| + __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
|
| + __ Branch(&return_equal, ne, a0, Operand(a6));
|
| ASSERT(is_int16(GREATER) && is_int16(LESS));
|
| __ Ret(USE_DELAY_SLOT);
|
| if (cc == le) {
|
| @@ -749,7 +745,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
| } else {
|
| __ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
|
| }
|
| -
|
| // For less and greater we don't have to check for NaN since the result of
|
| // x < x is false regardless. For the others here is some code to check
|
| // for NaN.
|
| @@ -761,17 +756,17 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
|
| // The representation of NaN values has all exponent bits (52..62) set,
|
| // and not all mantissa bits (0..51) clear.
|
| // Read top bits of double representation (second word of value).
|
| - __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
|
| + __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
|
| // Test that exponent bits are all set.
|
| - __ And(t3, t2, Operand(exp_mask_reg));
|
| + __ And(a7, a6, Operand(exp_mask_reg));
|
| // If all bits not set (ne cond), then not a NaN, objects are equal.
|
| - __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
|
| + __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
|
|
|
| // Shift out flag and all exponent bits, retaining only mantissa.
|
| - __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
|
| + __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
|
| // Or with all low-bits of mantissa.
|
| - __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
|
| - __ Or(v0, t3, Operand(t2));
|
| + __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
|
| + __ Or(v0, a7, Operand(a6));
|
| // For equal we already have the right value in v0: Return zero (equal)
|
| // if all bits in mantissa are zero (it's an Infinity) and non-zero if
|
| // not (it's a NaN). For <= and >= we need to load v0 with the failing
|
| @@ -807,21 +802,20 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
| __ JumpIfSmi(lhs, &lhs_is_smi);
|
| // Rhs is a Smi.
|
| // Check whether the non-smi is a heap number.
|
| - __ GetObjectType(lhs, t4, t4);
|
| + __ GetObjectType(lhs, t0, t0);
|
| if (strict) {
|
| // If lhs was not a number and rhs was a Smi then strict equality cannot
|
| // succeed. Return non-equal (lhs is already not zero).
|
| - __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
|
| + __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
|
| __ mov(v0, lhs);
|
| } else {
|
| // Smi compared non-strictly with a non-Smi non-heap-number. Call
|
| // the runtime.
|
| - __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
|
| + __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
|
| }
|
| -
|
| // Rhs is a smi, lhs is a number.
|
| // Convert smi rhs to double.
|
| - __ sra(at, rhs, kSmiTagSize);
|
| + __ SmiUntag(at, rhs);
|
| __ mtc1(at, f14);
|
| __ cvt_d_w(f14, f14);
|
| __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
|
| @@ -831,21 +825,21 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
|
|
|
| __ bind(&lhs_is_smi);
|
| // Lhs is a Smi. Check whether the non-smi is a heap number.
|
| - __ GetObjectType(rhs, t4, t4);
|
| + __ GetObjectType(rhs, t0, t0);
|
| if (strict) {
|
| // If lhs was not a number and rhs was a Smi then strict equality cannot
|
| // succeed. Return non-equal.
|
| - __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
|
| + __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
|
| __ li(v0, Operand(1));
|
| } else {
|
| // Smi compared non-strictly with a non-Smi non-heap-number. Call
|
| // the runtime.
|
| - __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
|
| + __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
|
| }
|
|
|
| // Lhs is a smi, rhs is a number.
|
| // Convert smi lhs to double.
|
| - __ sra(at, lhs, kSmiTagSize);
|
| + __ SmiUntag(at, lhs);
|
| __ mtc1(at, f12);
|
| __ cvt_d_w(f12, f12);
|
| __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
|
| @@ -899,7 +893,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
|
| Label* slow) {
|
| __ GetObjectType(lhs, a3, a2);
|
| __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
|
| - __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
|
| + __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
|
| // If first was a heap number & second wasn't, go to slow case.
|
| __ Branch(slow, ne, a3, Operand(a2));
|
|
|
| @@ -946,7 +940,7 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
|
| // If both objects are undetectable, they are equal. Otherwise, they
|
| // are not equal, since they are different objects and an object is not
|
| // equal to undefined.
|
| - __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
|
| + __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
|
| __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
|
| __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
|
| __ and_(a0, a2, a3);
|
| @@ -993,10 +987,11 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
| Label not_two_smis, smi_done;
|
| __ Or(a2, a1, a0);
|
| __ JumpIfNotSmi(a2, ¬_two_smis);
|
| - __ sra(a1, a1, 1);
|
| - __ sra(a0, a0, 1);
|
| + __ SmiUntag(a1);
|
| + __ SmiUntag(a0);
|
| +
|
| __ Ret(USE_DELAY_SLOT);
|
| - __ subu(v0, a1, a0);
|
| + __ dsubu(v0, a1, a0);
|
| __ bind(¬_two_smis);
|
|
|
| // NOTICE! This code is only reached after a smi-fast-case check, so
|
| @@ -1010,8 +1005,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
| // be strictly equal if the other is a HeapNumber.
|
| STATIC_ASSERT(kSmiTag == 0);
|
| ASSERT_EQ(0, Smi::FromInt(0));
|
| - __ And(t2, lhs, Operand(rhs));
|
| - __ JumpIfNotSmi(t2, ¬_smis, t0);
|
| + __ And(a6, lhs, Operand(rhs));
|
| + __ JumpIfNotSmi(a6, ¬_smis, a4);
|
| // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
|
| // 1) Return the answer.
|
| // 2) Go to slow.
|
| @@ -1027,10 +1022,11 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
| // f12, f14 are the double representations of the left hand side
|
| // and the right hand side if we have FPU. Otherwise a2, a3 represent
|
| // left hand side and a0, a1 represent right hand side.
|
| +
|
| Label nan;
|
| - __ li(t0, Operand(LESS));
|
| - __ li(t1, Operand(GREATER));
|
| - __ li(t2, Operand(EQUAL));
|
| + __ li(a4, Operand(LESS));
|
| + __ li(a5, Operand(GREATER));
|
| + __ li(a6, Operand(EQUAL));
|
|
|
| // Check if either rhs or lhs is NaN.
|
| __ BranchF(NULL, &nan, eq, f12, f14);
|
| @@ -1038,15 +1034,15 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
| // Check if LESS condition is satisfied. If true, move conditionally
|
| // result to v0.
|
| __ c(OLT, D, f12, f14);
|
| - __ Movt(v0, t0);
|
| + __ Movt(v0, a4);
|
| // Use previous check to store conditionally to v0 oposite condition
|
| // (GREATER). If rhs is equal to lhs, this will be corrected in next
|
| // check.
|
| - __ Movf(v0, t1);
|
| + __ Movf(v0, a5);
|
| // Check if EQUAL condition is satisfied. If true, move conditionally
|
| // result to v0.
|
| __ c(EQ, D, f12, f14);
|
| - __ Movt(v0, t2);
|
| + __ Movt(v0, a6);
|
|
|
| __ Ret();
|
|
|
| @@ -1109,15 +1105,15 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
| rhs,
|
| a2,
|
| a3,
|
| - t0);
|
| + a4);
|
| } else {
|
| StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
|
| lhs,
|
| rhs,
|
| a2,
|
| a3,
|
| - t0,
|
| - t1);
|
| + a4,
|
| + a5);
|
| }
|
| // Never falls through to here.
|
|
|
| @@ -1166,7 +1162,6 @@ void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
|
| void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
|
| __ mov(t9, ra);
|
| __ pop(ra);
|
| - __ StoreToSafepointRegisterSlot(t9, t9);
|
| if (save_doubles_ == kSaveFPRegs) {
|
| __ PopSafepointRegistersAndDoubles();
|
| } else {
|
| @@ -1206,15 +1201,15 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
|
| void MathPowStub::Generate(MacroAssembler* masm) {
|
| const Register base = a1;
|
| const Register exponent = a2;
|
| - const Register heapnumbermap = t1;
|
| + const Register heapnumbermap = a5;
|
| const Register heapnumber = v0;
|
| const DoubleRegister double_base = f2;
|
| const DoubleRegister double_exponent = f4;
|
| const DoubleRegister double_result = f0;
|
| const DoubleRegister double_scratch = f6;
|
| const FPURegister single_scratch = f8;
|
| - const Register scratch = t5;
|
| - const Register scratch2 = t3;
|
| + const Register scratch = t1;
|
| + const Register scratch2 = a7;
|
|
|
| Label call_runtime, done, int_exponent;
|
| if (exponent_type_ == ON_STACK) {
|
| @@ -1222,13 +1217,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
| // The exponent and base are supplied as arguments on the stack.
|
| // This can only happen if the stub is called from non-optimized code.
|
| // Load input parameters from stack to double registers.
|
| - __ lw(base, MemOperand(sp, 1 * kPointerSize));
|
| - __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
|
| + __ ld(base, MemOperand(sp, 1 * kPointerSize));
|
| + __ ld(exponent, MemOperand(sp, 0 * kPointerSize));
|
|
|
| __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
|
|
|
| __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
|
| - __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
|
| + __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset));
|
| __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
|
|
|
| __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
|
| @@ -1241,7 +1236,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
|
|
| __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
|
|
| - __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
|
| + __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
|
| __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
|
| __ ldc1(double_exponent,
|
| FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
| @@ -1348,7 +1343,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
| // Get absolute value of exponent.
|
| Label positive_exponent;
|
| __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
|
| - __ Subu(scratch, zero_reg, scratch);
|
| + __ Dsubu(scratch, zero_reg, scratch);
|
| __ bind(&positive_exponent);
|
|
|
| Label while_true, no_carry, loop_end;
|
| @@ -1360,7 +1355,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
| __ mul_d(double_result, double_result, double_scratch);
|
| __ bind(&no_carry);
|
|
|
| - __ sra(scratch, scratch, 1);
|
| + __ dsra(scratch, scratch, 1);
|
|
|
| __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
|
| __ mul_d(double_scratch, double_scratch, double_scratch);
|
| @@ -1499,7 +1494,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
| // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
|
|
|
| // Compute the argv pointer in a callee-saved register.
|
| - __ Addu(s1, sp, s1);
|
| + __ Daddu(s1, sp, s1);
|
|
|
| // Enter the exit frame that transitions from JavaScript to C++.
|
| FrameScope scope(masm, StackFrame::MANUAL);
|
| @@ -1539,8 +1534,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
| // instruction past the real call into C code (the jalr(t9)), and push it.
|
| // This is the return address of the exit frame.
|
| const int kNumInstructionsToJump = 5;
|
| - masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
|
| - masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
|
| + masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
|
| + masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
|
| // Stack space reservation moved to the branch delay slot below.
|
| // Stack is still aligned.
|
|
|
| @@ -1548,27 +1543,26 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
| masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
|
| masm->jalr(t9);
|
| // Set up sp in the delay slot.
|
| - masm->addiu(sp, sp, -kCArgsSlotsSize);
|
| + masm->daddiu(sp, sp, -kCArgsSlotsSize);
|
| // Make sure the stored 'ra' points to this position.
|
| ASSERT_EQ(kNumInstructionsToJump,
|
| masm->InstructionsGeneratedSince(&find_ra));
|
| }
|
|
|
| -
|
| // Runtime functions should not return 'the hole'. Allowing it to escape may
|
| // lead to crashes in the IC code later.
|
| if (FLAG_debug_code) {
|
| Label okay;
|
| - __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
|
| - __ Branch(&okay, ne, v0, Operand(t0));
|
| + __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
|
| + __ Branch(&okay, ne, v0, Operand(a4));
|
| __ stop("The hole escaped");
|
| __ bind(&okay);
|
| }
|
|
|
| // Check result for exception sentinel.
|
| Label exception_returned;
|
| - __ LoadRoot(t0, Heap::kExceptionRootIndex);
|
| - __ Branch(&exception_returned, eq, t0, Operand(v0));
|
| + __ LoadRoot(a4, Heap::kExceptionRootIndex);
|
| + __ Branch(&exception_returned, eq, a4, Operand(v0));
|
|
|
| ExternalReference pending_exception_address(
|
| Isolate::kPendingExceptionAddress, isolate());
|
| @@ -1578,10 +1572,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
| if (FLAG_debug_code) {
|
| Label okay;
|
| __ li(a2, Operand(pending_exception_address));
|
| - __ lw(a2, MemOperand(a2));
|
| - __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
|
| + __ ld(a2, MemOperand(a2));
|
| + __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
|
| // Cannot use check here as it attempts to generate call into runtime.
|
| - __ Branch(&okay, eq, t0, Operand(a2));
|
| + __ Branch(&okay, eq, a4, Operand(a2));
|
| __ stop("Unexpected pending exception");
|
| __ bind(&okay);
|
| }
|
| @@ -1598,17 +1592,17 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
|
|
| // Retrieve the pending exception.
|
| __ li(a2, Operand(pending_exception_address));
|
| - __ lw(v0, MemOperand(a2));
|
| + __ ld(v0, MemOperand(a2));
|
|
|
| // Clear the pending exception.
|
| __ li(a3, Operand(isolate()->factory()->the_hole_value()));
|
| - __ sw(a3, MemOperand(a2));
|
| + __ sd(a3, MemOperand(a2));
|
|
|
| // Special handling of termination exceptions which are uncatchable
|
| // by javascript code.
|
| Label throw_termination_exception;
|
| - __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
|
| - __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
|
| + __ LoadRoot(a4, Heap::kTerminationExceptionRootIndex);
|
| + __ Branch(&throw_termination_exception, eq, v0, Operand(a4));
|
|
|
| // Handle normal exception.
|
| __ Throw(v0);
|
| @@ -1622,15 +1616,17 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| Label invoke, handler_entry, exit;
|
| Isolate* isolate = masm->isolate();
|
|
|
| + // TODO(plind): unify the ABI description here.
|
| // Registers:
|
| // a0: entry address
|
| // a1: function
|
| // a2: receiver
|
| // a3: argc
|
| - //
|
| + // a4 (a4): on mips64
|
| +
|
| // Stack:
|
| - // 4 args slots
|
| - // args
|
| + // 0 arg slots on mips64 (4 args slots on mips)
|
| + // args -- in a4/a4 on mips64, on stack on mips
|
|
|
| ProfileEntryHookStub::MaybeCallEntryHook(masm);
|
|
|
| @@ -1642,25 +1638,29 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| // Set up the reserved register for 0.0.
|
| __ Move(kDoubleRegZero, 0.0);
|
|
|
| -
|
| // Load argv in s0 register.
|
| - int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
|
| - offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
|
| + if (kMipsAbi == kN64) {
|
| + __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register.
|
| + } else { // Abi O32.
|
| + // 5th parameter on stack for O32 abi.
|
| + int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
|
| + offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
|
| + __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
|
| + }
|
|
|
| __ InitializeRootRegister();
|
| - __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
|
|
|
| // We build an EntryFrame.
|
| - __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
|
| + __ li(a7, Operand(-1)); // Push a bad frame pointer to fail if it is used.
|
| int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
|
| - __ li(t2, Operand(Smi::FromInt(marker)));
|
| - __ li(t1, Operand(Smi::FromInt(marker)));
|
| - __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
|
| - isolate)));
|
| - __ lw(t0, MemOperand(t0));
|
| - __ Push(t3, t2, t1, t0);
|
| + __ li(a6, Operand(Smi::FromInt(marker)));
|
| + __ li(a5, Operand(Smi::FromInt(marker)));
|
| + ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
|
| + __ li(a4, Operand(c_entry_fp));
|
| + __ ld(a4, MemOperand(a4));
|
| + __ Push(a7, a6, a5, a4);
|
| // Set up frame pointer for the frame to be pushed.
|
| - __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
|
| + __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
|
|
|
| // Registers:
|
| // a0: entry_address
|
| @@ -1675,24 +1675,24 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| // context slot |
|
| // bad fp (0xff...f) |
|
| // callee saved registers + ra
|
| - // 4 args slots
|
| + // [ O32: 4 args slots]
|
| // args
|
|
|
| // If this is the outermost JS call, set js_entry_sp value.
|
| Label non_outermost_js;
|
| ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
|
| - __ li(t1, Operand(ExternalReference(js_entry_sp)));
|
| - __ lw(t2, MemOperand(t1));
|
| - __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
|
| - __ sw(fp, MemOperand(t1));
|
| - __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
|
| + __ li(a5, Operand(ExternalReference(js_entry_sp)));
|
| + __ ld(a6, MemOperand(a5));
|
| + __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
|
| + __ sd(fp, MemOperand(a5));
|
| + __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
|
| Label cont;
|
| __ b(&cont);
|
| __ nop(); // Branch delay slot nop.
|
| __ bind(&non_outermost_js);
|
| - __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
|
| + __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
|
| __ bind(&cont);
|
| - __ push(t0);
|
| + __ push(a4);
|
|
|
| // Jump to a faked try block that does the invoke, with a faked catch
|
| // block that sets the pending exception.
|
| @@ -1703,9 +1703,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| // field in the JSEnv and return a failure sentinel. Coming in here the
|
| // fp will be invalid because the PushTryHandler below sets it to 0 to
|
| // signal the existence of the JSEntry frame.
|
| - __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
|
| + __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
|
| isolate)));
|
| - __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
|
| + __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
|
| __ LoadRoot(v0, Heap::kExceptionRootIndex);
|
| __ b(&exit); // b exposes branch delay slot.
|
| __ nop(); // Branch delay slot nop.
|
| @@ -1720,10 +1720,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| // saved values before returning a failure to C.
|
|
|
| // Clear any pending exceptions.
|
| - __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
|
| - __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
|
| + __ LoadRoot(a5, Heap::kTheHoleValueRootIndex);
|
| + __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
|
| isolate)));
|
| - __ sw(t1, MemOperand(t0));
|
| + __ sd(a5, MemOperand(a4));
|
|
|
| // Invoke the function by calling through JS entry trampoline builtin.
|
| // Notice that we cannot store a reference to the trampoline code directly in
|
| @@ -1740,21 +1740,20 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| // handler frame
|
| // entry frame
|
| // callee saved registers + ra
|
| - // 4 args slots
|
| + // [ O32: 4 args slots]
|
| // args
|
|
|
| if (is_construct) {
|
| ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
|
| isolate);
|
| - __ li(t0, Operand(construct_entry));
|
| + __ li(a4, Operand(construct_entry));
|
| } else {
|
| ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
|
| - __ li(t0, Operand(entry));
|
| + __ li(a4, Operand(entry));
|
| }
|
| - __ lw(t9, MemOperand(t0)); // Deref address.
|
| -
|
| + __ ld(t9, MemOperand(a4)); // Deref address.
|
| // Call JSEntryTrampoline.
|
| - __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
|
| + __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
|
| __ Call(t9);
|
|
|
| // Unlink this frame from the handler chain.
|
| @@ -1763,23 +1762,23 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| __ bind(&exit); // v0 holds result
|
| // Check if the current stack frame is marked as the outermost JS frame.
|
| Label non_outermost_js_2;
|
| - __ pop(t1);
|
| + __ pop(a5);
|
| __ Branch(&non_outermost_js_2,
|
| ne,
|
| - t1,
|
| + a5,
|
| Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
|
| - __ li(t1, Operand(ExternalReference(js_entry_sp)));
|
| - __ sw(zero_reg, MemOperand(t1));
|
| + __ li(a5, Operand(ExternalReference(js_entry_sp)));
|
| + __ sd(zero_reg, MemOperand(a5));
|
| __ bind(&non_outermost_js_2);
|
|
|
| // Restore the top frame descriptors from the stack.
|
| - __ pop(t1);
|
| - __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
|
| + __ pop(a5);
|
| + __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
|
| isolate)));
|
| - __ sw(t1, MemOperand(t0));
|
| + __ sd(a5, MemOperand(a4));
|
|
|
| // Reset the stack to the callee saved registers.
|
| - __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
|
| + __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
|
|
|
| // Restore callee-saved fpu registers.
|
| __ MultiPopFPU(kCalleeSavedFPU);
|
| @@ -1791,14 +1790,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| }
|
|
|
|
|
| -// Uses registers a0 to t0.
|
| +// Uses registers a0 to a4.
|
| // Expected input (depending on whether args are in registers or on the stack):
|
| // * object: a0 or at sp + 1 * kPointerSize.
|
| // * function: a1 or at sp.
|
| //
|
| // An inlined call site may have been generated before calling this stub.
|
| // In this case the offset to the inline site to patch is passed on the stack,
|
| -// in the safepoint slot for register t0.
|
| +// in the safepoint slot for register a4.
|
| void InstanceofStub::Generate(MacroAssembler* masm) {
|
| // Call site inlining and patching implies arguments in registers.
|
| ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
|
| @@ -1809,17 +1808,17 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
| const Register object = a0; // Object (lhs).
|
| Register map = a3; // Map of the object.
|
| const Register function = a1; // Function (rhs).
|
| - const Register prototype = t0; // Prototype of the function.
|
| - const Register inline_site = t5;
|
| + const Register prototype = a4; // Prototype of the function.
|
| + const Register inline_site = t1;
|
| const Register scratch = a2;
|
|
|
| - const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
|
| + const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize;
|
|
|
| Label slow, loop, is_instance, is_not_instance, not_js_object;
|
|
|
| if (!HasArgsInRegisters()) {
|
| - __ lw(object, MemOperand(sp, 1 * kPointerSize));
|
| - __ lw(function, MemOperand(sp, 0));
|
| + __ ld(object, MemOperand(sp, 1 * kPointerSize));
|
| + __ ld(function, MemOperand(sp, 0));
|
| }
|
|
|
| // Check that the left hand is a JS object and load map.
|
| @@ -1856,18 +1855,18 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
| ASSERT(HasArgsInRegisters());
|
| // Patch the (relocated) inlined map check.
|
|
|
| - // The offset was stored in t0 safepoint slot.
|
| + // The offset was stored in a4 safepoint slot.
|
| // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
|
| - __ LoadFromSafepointRegisterSlot(scratch, t0);
|
| - __ Subu(inline_site, ra, scratch);
|
| + __ LoadFromSafepointRegisterSlot(scratch, a4);
|
| + __ Dsubu(inline_site, ra, scratch);
|
| // Get the map location in scratch and patch it.
|
| __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
|
| - __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
|
| + __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset));
|
| }
|
|
|
| - // Register mapping: a3 is object map and t0 is function prototype.
|
| + // Register mapping: a3 is object map and a4 is function prototype.
|
| // Get prototype of object into a2.
|
| - __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
|
| + __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
|
|
|
| // We don't need map any more. Use it as a scratch register.
|
| Register scratch2 = map;
|
| @@ -1878,8 +1877,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
| __ bind(&loop);
|
| __ Branch(&is_instance, eq, scratch, Operand(prototype));
|
| __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
|
| - __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
| - __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
|
| + __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
|
| + __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
|
| __ Branch(&loop);
|
|
|
| __ bind(&is_instance);
|
| @@ -1890,7 +1889,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
| } else {
|
| // Patch the call site to return true.
|
| __ LoadRoot(v0, Heap::kTrueValueRootIndex);
|
| - __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
| + __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
| // Get the boolean result location in scratch and patch it.
|
| __ PatchRelocatedValue(inline_site, scratch, v0);
|
|
|
| @@ -1908,7 +1907,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
| } else {
|
| // Patch the call site to return false.
|
| __ LoadRoot(v0, Heap::kFalseValueRootIndex);
|
| - __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
| + __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
|
| // Get the boolean result location in scratch and patch it.
|
| __ PatchRelocatedValue(inline_site, scratch, v0);
|
|
|
| @@ -1982,7 +1981,7 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
| Operand(isolate()->factory()->prototype_string()));
|
| }
|
|
|
| - StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
|
| + StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, a4, &miss);
|
| __ bind(&miss);
|
| StubCompiler::TailCallBuiltin(
|
| masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
|
| @@ -2007,8 +2006,8 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
|
|
| // Check if the calling frame is an arguments adaptor frame.
|
| Label adaptor;
|
| - __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| - __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
|
| + __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
|
| __ Branch(&adaptor,
|
| eq,
|
| a3,
|
| @@ -2020,25 +2019,25 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
| __ Branch(&slow, hs, a1, Operand(a0));
|
|
|
| // Read the argument from the stack and return it.
|
| - __ subu(a3, a0, a1);
|
| - __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(a3, fp, Operand(t3));
|
| + __ dsubu(a3, a0, a1);
|
| + __ SmiScale(a7, a3, kPointerSizeLog2);
|
| + __ Daddu(a3, fp, Operand(a7));
|
| __ Ret(USE_DELAY_SLOT);
|
| - __ lw(v0, MemOperand(a3, kDisplacement));
|
| + __ ld(v0, MemOperand(a3, kDisplacement));
|
|
|
| // Arguments adaptor case: Check index (a1) against actual arguments
|
| // limit found in the arguments adaptor frame. Use unsigned
|
| // comparison to get negative check for free.
|
| __ bind(&adaptor);
|
| - __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| + __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
|
|
|
| // Read the argument from the adaptor frame and return it.
|
| - __ subu(a3, a0, a1);
|
| - __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(a3, a2, Operand(t3));
|
| + __ dsubu(a3, a0, a1);
|
| + __ SmiScale(a7, a3, kPointerSizeLog2);
|
| + __ Daddu(a3, a2, Operand(a7));
|
| __ Ret(USE_DELAY_SLOT);
|
| - __ lw(v0, MemOperand(a3, kDisplacement));
|
| + __ ld(v0, MemOperand(a3, kDisplacement));
|
|
|
| // Slow-case: Handle non-smi or out-of-bounds access to arguments
|
| // by calling the runtime system.
|
| @@ -2054,20 +2053,20 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
|
| // sp[8] : function
|
| // Check if the calling frame is an arguments adaptor frame.
|
| Label runtime;
|
| - __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| - __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
|
| + __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
|
| __ Branch(&runtime,
|
| ne,
|
| a2,
|
| Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
|
| // Patch the arguments.length and the parameters pointer in the current frame.
|
| - __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| - __ sw(a2, MemOperand(sp, 0 * kPointerSize));
|
| - __ sll(t3, a2, 1);
|
| - __ Addu(a3, a3, Operand(t3));
|
| - __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
|
| - __ sw(a3, MemOperand(sp, 1 * kPointerSize));
|
| + __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| + __ sd(a2, MemOperand(sp, 0 * kPointerSize));
|
| + __ SmiScale(a7, a2, kPointerSizeLog2);
|
| + __ Daddu(a3, a3, Operand(a7));
|
| + __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
|
| + __ sd(a3, MemOperand(sp, 1 * kPointerSize));
|
|
|
| __ bind(&runtime);
|
| __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
|
| @@ -2080,17 +2079,17 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
|
| // sp[4] : address of receiver argument
|
| // sp[8] : function
|
| // Registers used over whole function:
|
| - // t2 : allocated object (tagged)
|
| - // t5 : mapped parameter count (tagged)
|
| + // a6 : allocated object (tagged)
|
| + // t1 : mapped parameter count (tagged)
|
|
|
| - __ lw(a1, MemOperand(sp, 0 * kPointerSize));
|
| + __ ld(a1, MemOperand(sp, 0 * kPointerSize));
|
| // a1 = parameter count (tagged)
|
|
|
| // Check if the calling frame is an arguments adaptor frame.
|
| Label runtime;
|
| Label adaptor_frame, try_allocate;
|
| - __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| - __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
|
| + __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
|
| __ Branch(&adaptor_frame,
|
| eq,
|
| a2,
|
| @@ -2098,16 +2097,15 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
|
|
|
| // No adaptor, parameter count = argument count.
|
| __ mov(a2, a1);
|
| - __ b(&try_allocate);
|
| - __ nop(); // Branch delay slot nop.
|
| + __ Branch(&try_allocate);
|
|
|
| // We have an adaptor frame. Patch the parameters pointer.
|
| __ bind(&adaptor_frame);
|
| - __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| - __ sll(t6, a2, 1);
|
| - __ Addu(a3, a3, Operand(t6));
|
| - __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
|
| - __ sw(a3, MemOperand(sp, 1 * kPointerSize));
|
| + __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| + __ SmiScale(t2, a2, kPointerSizeLog2);
|
| + __ Daddu(a3, a3, Operand(t2));
|
| + __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
|
| + __ sd(a3, MemOperand(sp, 1 * kPointerSize));
|
|
|
| // a1 = parameter count (tagged)
|
| // a2 = argument count (tagged)
|
| @@ -2127,95 +2125,94 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
|
| Label param_map_size;
|
| ASSERT_EQ(0, Smi::FromInt(0));
|
| __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg));
|
| - __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
|
| - __ sll(t5, a1, 1);
|
| - __ addiu(t5, t5, kParameterMapHeaderSize);
|
| + __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
|
| + __ SmiScale(t1, a1, kPointerSizeLog2);
|
| + __ daddiu(t1, t1, kParameterMapHeaderSize);
|
| __ bind(¶m_map_size);
|
|
|
| // 2. Backing store.
|
| - __ sll(t6, a2, 1);
|
| - __ Addu(t5, t5, Operand(t6));
|
| - __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
|
| + __ SmiScale(t2, a2, kPointerSizeLog2);
|
| + __ Daddu(t1, t1, Operand(t2));
|
| + __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
|
|
|
| // 3. Arguments object.
|
| - __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
|
| + __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
|
|
|
| // Do the allocation of all three objects in one go.
|
| - __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
|
| + __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT);
|
|
|
| // v0 = address of new object(s) (tagged)
|
| // a2 = argument count (smi-tagged)
|
| - // Get the arguments boilerplate from the current native context into t0.
|
| + // Get the arguments boilerplate from the current native context into a4.
|
| const int kNormalOffset =
|
| Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
|
| const int kAliasedOffset =
|
| Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
|
|
|
| - __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| - __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
|
| + __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| + __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
|
| Label skip2_ne, skip2_eq;
|
| __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
|
| - __ lw(t0, MemOperand(t0, kNormalOffset));
|
| + __ ld(a4, MemOperand(a4, kNormalOffset));
|
| __ bind(&skip2_ne);
|
|
|
| __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
|
| - __ lw(t0, MemOperand(t0, kAliasedOffset));
|
| + __ ld(a4, MemOperand(a4, kAliasedOffset));
|
| __ bind(&skip2_eq);
|
|
|
| // v0 = address of new object (tagged)
|
| // a1 = mapped parameter count (tagged)
|
| // a2 = argument count (smi-tagged)
|
| - // t0 = address of arguments map (tagged)
|
| - __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
|
| + // a4 = address of arguments map (tagged)
|
| + __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
|
| __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
|
| - __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
|
| - __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
|
| + __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
|
| + __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
|
|
|
| // Set up the callee in-object property.
|
| STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
|
| - __ lw(a3, MemOperand(sp, 2 * kPointerSize));
|
| + __ ld(a3, MemOperand(sp, 2 * kPointerSize));
|
| __ AssertNotSmi(a3);
|
| const int kCalleeOffset = JSObject::kHeaderSize +
|
| Heap::kArgumentsCalleeIndex * kPointerSize;
|
| - __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
|
| + __ sd(a3, FieldMemOperand(v0, kCalleeOffset));
|
|
|
| // Use the length (smi tagged) and set that as an in-object property too.
|
| - __ AssertSmi(a2);
|
| STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
|
| const int kLengthOffset = JSObject::kHeaderSize +
|
| Heap::kArgumentsLengthIndex * kPointerSize;
|
| - __ sw(a2, FieldMemOperand(v0, kLengthOffset));
|
| + __ sd(a2, FieldMemOperand(v0, kLengthOffset));
|
|
|
| // Set up the elements pointer in the allocated arguments object.
|
| - // If we allocated a parameter map, t0 will point there, otherwise
|
| + // If we allocated a parameter map, a4 will point there, otherwise
|
| // it will point to the backing store.
|
| - __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
|
| - __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
|
| + __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
|
| + __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
|
|
|
| // v0 = address of new object (tagged)
|
| // a1 = mapped parameter count (tagged)
|
| // a2 = argument count (tagged)
|
| - // t0 = address of parameter map or backing store (tagged)
|
| + // a4 = address of parameter map or backing store (tagged)
|
| // Initialize parameter map. If there are no mapped arguments, we're done.
|
| Label skip_parameter_map;
|
| Label skip3;
|
| __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
|
| // Move backing store address to a3, because it is
|
| // expected there when filling in the unmapped arguments.
|
| - __ mov(a3, t0);
|
| + __ mov(a3, a4);
|
| __ bind(&skip3);
|
|
|
| __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
|
|
|
| - __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
|
| - __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
|
| - __ Addu(t2, a1, Operand(Smi::FromInt(2)));
|
| - __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
|
| - __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
|
| - __ sll(t6, a1, 1);
|
| - __ Addu(t2, t0, Operand(t6));
|
| - __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
|
| - __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
|
| + __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex);
|
| + __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset));
|
| + __ Daddu(a6, a1, Operand(Smi::FromInt(2)));
|
| + __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset));
|
| + __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
|
| + __ SmiScale(t2, a1, kPointerSizeLog2);
|
| + __ Daddu(a6, a4, Operand(t2));
|
| + __ Daddu(a6, a6, Operand(kParameterMapHeaderSize));
|
| + __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
|
|
|
| // Copy the parameter slots and the holes in the arguments.
|
| // We need to fill in mapped_parameter_count slots. They index the context,
|
| @@ -2226,62 +2223,63 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
|
| // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
|
| // We loop from right to left.
|
| Label parameters_loop, parameters_test;
|
| - __ mov(t2, a1);
|
| - __ lw(t5, MemOperand(sp, 0 * kPointerSize));
|
| - __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
|
| - __ Subu(t5, t5, Operand(a1));
|
| - __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
|
| - __ sll(t6, t2, 1);
|
| - __ Addu(a3, t0, Operand(t6));
|
| - __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
|
| -
|
| - // t2 = loop variable (tagged)
|
| + __ mov(a6, a1);
|
| + __ ld(t1, MemOperand(sp, 0 * kPointerSize));
|
| + __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
|
| + __ Dsubu(t1, t1, Operand(a1));
|
| + __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
|
| + __ SmiScale(t2, a6, kPointerSizeLog2);
|
| + __ Daddu(a3, a4, Operand(t2));
|
| + __ Daddu(a3, a3, Operand(kParameterMapHeaderSize));
|
| +
|
| + // a6 = loop variable (tagged)
|
| // a1 = mapping index (tagged)
|
| // a3 = address of backing store (tagged)
|
| - // t0 = address of parameter map (tagged)
|
| - // t1 = temporary scratch (a.o., for address calculation)
|
| - // t3 = the hole value
|
| + // a4 = address of parameter map (tagged)
|
| + // a5 = temporary scratch (a.o., for address calculation)
|
| + // a7 = the hole value
|
| __ jmp(¶meters_test);
|
|
|
| __ bind(¶meters_loop);
|
| - __ Subu(t2, t2, Operand(Smi::FromInt(1)));
|
| - __ sll(t1, t2, 1);
|
| - __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
|
| - __ Addu(t6, t0, t1);
|
| - __ sw(t5, MemOperand(t6));
|
| - __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
|
| - __ Addu(t6, a3, t1);
|
| - __ sw(t3, MemOperand(t6));
|
| - __ Addu(t5, t5, Operand(Smi::FromInt(1)));
|
| +
|
| + __ Dsubu(a6, a6, Operand(Smi::FromInt(1)));
|
| + __ SmiScale(a5, a6, kPointerSizeLog2);
|
| + __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
|
| + __ Daddu(t2, a4, a5);
|
| + __ sd(t1, MemOperand(t2));
|
| + __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
|
| + __ Daddu(t2, a3, a5);
|
| + __ sd(a7, MemOperand(t2));
|
| + __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
|
| __ bind(¶meters_test);
|
| - __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0)));
|
| + __ Branch(¶meters_loop, ne, a6, Operand(Smi::FromInt(0)));
|
|
|
| __ bind(&skip_parameter_map);
|
| // a2 = argument count (tagged)
|
| // a3 = address of backing store (tagged)
|
| - // t1 = scratch
|
| + // a5 = scratch
|
| // Copy arguments header and remaining slots (if there are any).
|
| - __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
|
| - __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
|
| - __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
|
| + __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
|
| + __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset));
|
| + __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
|
|
|
| Label arguments_loop, arguments_test;
|
| - __ mov(t5, a1);
|
| - __ lw(t0, MemOperand(sp, 1 * kPointerSize));
|
| - __ sll(t6, t5, 1);
|
| - __ Subu(t0, t0, Operand(t6));
|
| + __ mov(t1, a1);
|
| + __ ld(a4, MemOperand(sp, 1 * kPointerSize));
|
| + __ SmiScale(t2, t1, kPointerSizeLog2);
|
| + __ Dsubu(a4, a4, Operand(t2));
|
| __ jmp(&arguments_test);
|
|
|
| __ bind(&arguments_loop);
|
| - __ Subu(t0, t0, Operand(kPointerSize));
|
| - __ lw(t2, MemOperand(t0, 0));
|
| - __ sll(t6, t5, 1);
|
| - __ Addu(t1, a3, Operand(t6));
|
| - __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
|
| - __ Addu(t5, t5, Operand(Smi::FromInt(1)));
|
| + __ Dsubu(a4, a4, Operand(kPointerSize));
|
| + __ ld(a6, MemOperand(a4, 0));
|
| + __ SmiScale(t2, t1, kPointerSizeLog2);
|
| + __ Daddu(a5, a3, Operand(t2));
|
| + __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize));
|
| + __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
|
|
|
| __ bind(&arguments_test);
|
| - __ Branch(&arguments_loop, lt, t5, Operand(a2));
|
| + __ Branch(&arguments_loop, lt, t1, Operand(a2));
|
|
|
| // Return and remove the on-stack parameters.
|
| __ DropAndRet(3);
|
| @@ -2289,7 +2287,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
|
| // Do the runtime call to allocate the arguments object.
|
| // a2 = argument count (tagged)
|
| __ bind(&runtime);
|
| - __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
|
| + __ sd(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
|
| __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
|
| }
|
|
|
| @@ -2300,89 +2298,91 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
| // sp[8] : function
|
| // Check if the calling frame is an arguments adaptor frame.
|
| Label adaptor_frame, try_allocate, runtime;
|
| - __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| - __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
|
| + __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
|
| __ Branch(&adaptor_frame,
|
| eq,
|
| a3,
|
| Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
|
|
| // Get the length from the frame.
|
| - __ lw(a1, MemOperand(sp, 0));
|
| + __ ld(a1, MemOperand(sp, 0));
|
| __ Branch(&try_allocate);
|
|
|
| // Patch the arguments.length and the parameters pointer.
|
| __ bind(&adaptor_frame);
|
| - __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| - __ sw(a1, MemOperand(sp, 0));
|
| - __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(a3, a2, Operand(at));
|
| + __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| + __ sd(a1, MemOperand(sp, 0));
|
| + __ SmiScale(at, a1, kPointerSizeLog2);
|
|
|
| - __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
|
| - __ sw(a3, MemOperand(sp, 1 * kPointerSize));
|
| + __ Daddu(a3, a2, Operand(at));
|
| +
|
| + __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
|
| + __ sd(a3, MemOperand(sp, 1 * kPointerSize));
|
|
|
| // Try the new space allocation. Start out with computing the size
|
| // of the arguments object and the elements array in words.
|
| Label add_arguments_object;
|
| __ bind(&try_allocate);
|
| __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
|
| - __ srl(a1, a1, kSmiTagSize);
|
| + __ SmiUntag(a1);
|
|
|
| - __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
|
| + __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
|
| __ bind(&add_arguments_object);
|
| - __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
|
| + __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
|
|
|
| // Do the allocation of both objects in one go.
|
| __ Allocate(a1, v0, a2, a3, &runtime,
|
| static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
|
|
|
| // Get the arguments boilerplate from the current native context.
|
| - __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| - __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
|
| - __ lw(t0, MemOperand(
|
| - t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
|
| + __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| + __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
|
| + __ ld(a4, MemOperand(a4, Context::SlotOffset(
|
| + Context::STRICT_ARGUMENTS_MAP_INDEX)));
|
|
|
| - __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
|
| + __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
|
| __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
|
| - __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
|
| - __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
|
| + __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
|
| + __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
|
|
|
| // Get the length (smi tagged) and set that as an in-object property too.
|
| STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
|
| - __ lw(a1, MemOperand(sp, 0 * kPointerSize));
|
| + __ ld(a1, MemOperand(sp, 0 * kPointerSize));
|
| __ AssertSmi(a1);
|
| - __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
|
| + __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
|
| Heap::kArgumentsLengthIndex * kPointerSize));
|
|
|
| Label done;
|
| __ Branch(&done, eq, a1, Operand(zero_reg));
|
|
|
| // Get the parameters pointer from the stack.
|
| - __ lw(a2, MemOperand(sp, 1 * kPointerSize));
|
| + __ ld(a2, MemOperand(sp, 1 * kPointerSize));
|
|
|
| // Set up the elements pointer in the allocated arguments object and
|
| // initialize the header in the elements fixed array.
|
| - __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
|
| - __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
|
| + __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
|
| + __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
|
| __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
|
| - __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
|
| - __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
|
| + __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset));
|
| + __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset));
|
| // Untag the length for the loop.
|
| - __ srl(a1, a1, kSmiTagSize);
|
| + __ SmiUntag(a1);
|
| +
|
|
|
| // Copy the fixed array slots.
|
| Label loop;
|
| - // Set up t0 to point to the first array slot.
|
| - __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| + // Set up a4 to point to the first array slot.
|
| + __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| __ bind(&loop);
|
| // Pre-decrement a2 with kPointerSize on each iteration.
|
| // Pre-decrement in order to skip receiver.
|
| - __ Addu(a2, a2, Operand(-kPointerSize));
|
| - __ lw(a3, MemOperand(a2));
|
| - // Post-increment t0 with kPointerSize on each iteration.
|
| - __ sw(a3, MemOperand(t0));
|
| - __ Addu(t0, t0, Operand(kPointerSize));
|
| - __ Subu(a1, a1, Operand(1));
|
| + __ Daddu(a2, a2, Operand(-kPointerSize));
|
| + __ ld(a3, MemOperand(a2));
|
| + // Post-increment a4 with kPointerSize on each iteration.
|
| + __ sd(a3, MemOperand(a4));
|
| + __ Daddu(a4, a4, Operand(kPointerSize));
|
| + __ Dsubu(a1, a1, Operand(1));
|
| __ Branch(&loop, ne, a1, Operand(zero_reg));
|
|
|
| // Return and remove the on-stack parameters.
|
| @@ -2432,23 +2432,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| ExternalReference address_of_regexp_stack_memory_size =
|
| ExternalReference::address_of_regexp_stack_memory_size(isolate());
|
| __ li(a0, Operand(address_of_regexp_stack_memory_size));
|
| - __ lw(a0, MemOperand(a0, 0));
|
| + __ ld(a0, MemOperand(a0, 0));
|
| __ Branch(&runtime, eq, a0, Operand(zero_reg));
|
|
|
| // Check that the first argument is a JSRegExp object.
|
| - __ lw(a0, MemOperand(sp, kJSRegExpOffset));
|
| + __ ld(a0, MemOperand(sp, kJSRegExpOffset));
|
| STATIC_ASSERT(kSmiTag == 0);
|
| __ JumpIfSmi(a0, &runtime);
|
| __ GetObjectType(a0, a1, a1);
|
| __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
|
|
|
| // Check that the RegExp has been compiled (data contains a fixed array).
|
| - __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
|
| + __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
|
| if (FLAG_debug_code) {
|
| - __ SmiTst(regexp_data, t0);
|
| + __ SmiTst(regexp_data, a4);
|
| __ Check(nz,
|
| kUnexpectedTypeForRegExpDataFixedArrayExpected,
|
| - t0,
|
| + a4,
|
| Operand(zero_reg));
|
| __ GetObjectType(regexp_data, a0, a0);
|
| __ Check(eq,
|
| @@ -2459,28 +2459,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
|
| // regexp_data: RegExp data (FixedArray)
|
| // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
|
| - __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
|
| + __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
|
| __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
|
|
|
| // regexp_data: RegExp data (FixedArray)
|
| // Check that the number of captures fit in the static offsets vector buffer.
|
| - __ lw(a2,
|
| + __ ld(a2,
|
| FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
|
| // Check (number_of_captures + 1) * 2 <= offsets vector size
|
| // Or number_of_captures * 2 <= offsets vector size - 2
|
| + // Or number_of_captures <= offsets vector size / 2 - 1
|
| // Multiplying by 2 comes for free since a2 is smi-tagged.
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
| STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
|
| - __ Branch(
|
| - &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
|
| + int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
|
| + __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
|
|
|
| // Reset offset for possibly sliced string.
|
| __ mov(t0, zero_reg);
|
| - __ lw(subject, MemOperand(sp, kSubjectOffset));
|
| + __ ld(subject, MemOperand(sp, kSubjectOffset));
|
| __ JumpIfSmi(subject, &runtime);
|
| __ mov(a3, subject); // Make a copy of the original subject string.
|
| - __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
|
| + __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
|
| __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
|
| // subject: subject string
|
| // a3: subject string
|
| @@ -2503,9 +2502,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // (8) Short external string or not a string? If yes, bail out to runtime.
|
| // (9) Sliced string. Replace subject with parent. Go to (4).
|
|
|
| - Label seq_string /* 5 */, external_string /* 7 */,
|
| - check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
|
| - not_long_external /* 8 */;
|
| + Label check_underlying; // (4)
|
| + Label seq_string; // (5)
|
| + Label not_seq_nor_cons; // (6)
|
| + Label external_string; // (7)
|
| + Label not_long_external; // (8)
|
|
|
| // (1) Sequential string? If yes, go to (5).
|
| __ And(a1,
|
| @@ -2526,14 +2527,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
|
| // (3) Cons string. Check that it's flat.
|
| // Replace subject with first string and reload instance type.
|
| - __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
|
| + __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
|
| __ LoadRoot(a1, Heap::kempty_stringRootIndex);
|
| __ Branch(&runtime, ne, a0, Operand(a1));
|
| - __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
|
| + __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
|
|
|
| // (4) Is subject external? If yes, go to (7).
|
| __ bind(&check_underlying);
|
| - __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
|
| + __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
|
| __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
|
| STATIC_ASSERT(kSeqStringTag == 0);
|
| __ And(at, a0, Operand(kStringRepresentationMask));
|
| @@ -2549,20 +2550,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // Load previous index and check range before a3 is overwritten. We have to
|
| // use a3 instead of subject here because subject might have been only made
|
| // to look like a sequential string when it actually is an external string.
|
| - __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
|
| + __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
|
| __ JumpIfNotSmi(a1, &runtime);
|
| - __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
|
| + __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
|
| __ Branch(&runtime, ls, a3, Operand(a1));
|
| - __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
|
| + __ SmiUntag(a1);
|
|
|
| STATIC_ASSERT(kStringEncodingMask == 4);
|
| STATIC_ASSERT(kOneByteStringTag == 4);
|
| STATIC_ASSERT(kTwoByteStringTag == 0);
|
| __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
|
| - __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
|
| - __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
|
| - __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
|
| - __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
|
| + __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
|
| + __ dsra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
|
| + __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
|
| + __ Movz(t9, a5, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
|
|
|
| // (E) Carry on. String handling is done.
|
| // t9: irregexp code
|
| @@ -2582,7 +2583,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
|
| // Isolates: note we add an additional parameter here (isolate pointer).
|
| const int kRegExpExecuteArguments = 9;
|
| - const int kParameterRegisters = 4;
|
| + const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
|
| __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
|
|
|
| // Stack pointer now points to cell where return address is to be written.
|
| @@ -2592,62 +2593,93 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // allocating space for the c argument slots, we don't need to calculate
|
| // that into the argument positions on the stack. This is how the stack will
|
| // look (sp meaning the value of sp at this moment):
|
| - // [sp + 5] - Argument 9
|
| - // [sp + 4] - Argument 8
|
| - // [sp + 3] - Argument 7
|
| - // [sp + 2] - Argument 6
|
| - // [sp + 1] - Argument 5
|
| - // [sp + 0] - saved ra
|
| -
|
| - // Argument 9: Pass current isolate address.
|
| - // CFunctionArgumentOperand handles MIPS stack argument slots.
|
| - __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
|
| - __ sw(a0, MemOperand(sp, 5 * kPointerSize));
|
| -
|
| - // Argument 8: Indicate that this is a direct call from JavaScript.
|
| - __ li(a0, Operand(1));
|
| - __ sw(a0, MemOperand(sp, 4 * kPointerSize));
|
| -
|
| - // Argument 7: Start (high end) of backtracking stack memory area.
|
| - __ li(a0, Operand(address_of_regexp_stack_memory_address));
|
| - __ lw(a0, MemOperand(a0, 0));
|
| - __ li(a2, Operand(address_of_regexp_stack_memory_size));
|
| - __ lw(a2, MemOperand(a2, 0));
|
| - __ addu(a0, a0, a2);
|
| - __ sw(a0, MemOperand(sp, 3 * kPointerSize));
|
| -
|
| - // Argument 6: Set the number of capture registers to zero to force global
|
| - // regexps to behave as non-global. This does not affect non-global regexps.
|
| - __ mov(a0, zero_reg);
|
| - __ sw(a0, MemOperand(sp, 2 * kPointerSize));
|
| -
|
| - // Argument 5: static offsets vector buffer.
|
| - __ li(a0, Operand(
|
| - ExternalReference::address_of_static_offsets_vector(isolate())));
|
| - __ sw(a0, MemOperand(sp, 1 * kPointerSize));
|
| + // Abi n64:
|
| + // [sp + 1] - Argument 9
|
| + // [sp + 0] - saved ra
|
| + // Abi O32:
|
| + // [sp + 5] - Argument 9
|
| + // [sp + 4] - Argument 8
|
| + // [sp + 3] - Argument 7
|
| + // [sp + 2] - Argument 6
|
| + // [sp + 1] - Argument 5
|
| + // [sp + 0] - saved ra
|
| +
|
| + if (kMipsAbi == kN64) {
|
| + // Argument 9: Pass current isolate address.
|
| + __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
|
| + __ sd(a0, MemOperand(sp, 1 * kPointerSize));
|
| +
|
| + // Argument 8: Indicate that this is a direct call from JavaScript.
|
| + __ li(a7, Operand(1));
|
| +
|
| + // Argument 7: Start (high end) of backtracking stack memory area.
|
| + __ li(a0, Operand(address_of_regexp_stack_memory_address));
|
| + __ ld(a0, MemOperand(a0, 0));
|
| + __ li(a2, Operand(address_of_regexp_stack_memory_size));
|
| + __ ld(a2, MemOperand(a2, 0));
|
| + __ daddu(a6, a0, a2);
|
| +
|
| + // Argument 6: Set the number of capture registers to zero to force global
|
| + // regexps to behave as non-global. This does not affect non-global regexps.
|
| + __ mov(a5, zero_reg);
|
| +
|
| + // Argument 5: static offsets vector buffer.
|
| + __ li(a4, Operand(
|
| + ExternalReference::address_of_static_offsets_vector(isolate())));
|
| + } else { // O32.
|
| + ASSERT(kMipsAbi == kO32);
|
| +
|
| + // Argument 9: Pass current isolate address.
|
| + // CFunctionArgumentOperand handles MIPS stack argument slots.
|
| + __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
|
| + __ sd(a0, MemOperand(sp, 5 * kPointerSize));
|
| +
|
| + // Argument 8: Indicate that this is a direct call from JavaScript.
|
| + __ li(a0, Operand(1));
|
| + __ sd(a0, MemOperand(sp, 4 * kPointerSize));
|
| +
|
| + // Argument 7: Start (high end) of backtracking stack memory area.
|
| + __ li(a0, Operand(address_of_regexp_stack_memory_address));
|
| + __ ld(a0, MemOperand(a0, 0));
|
| + __ li(a2, Operand(address_of_regexp_stack_memory_size));
|
| + __ ld(a2, MemOperand(a2, 0));
|
| + __ daddu(a0, a0, a2);
|
| + __ sd(a0, MemOperand(sp, 3 * kPointerSize));
|
| +
|
| + // Argument 6: Set the number of capture registers to zero to force global
|
| + // regexps to behave as non-global. This does not affect non-global regexps.
|
| + __ mov(a0, zero_reg);
|
| + __ sd(a0, MemOperand(sp, 2 * kPointerSize));
|
| +
|
| + // Argument 5: static offsets vector buffer.
|
| + __ li(a0, Operand(
|
| + ExternalReference::address_of_static_offsets_vector(isolate())));
|
| + __ sd(a0, MemOperand(sp, 1 * kPointerSize));
|
| + }
|
|
|
| // For arguments 4 and 3 get string length, calculate start of string data
|
| // and calculate the shift of the index (0 for ASCII and 1 for two byte).
|
| - __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
|
| + __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
|
| __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
|
| // Load the length from the original subject string from the previous stack
|
| // frame. Therefore we have to use fp, which points exactly to two pointer
|
| // sizes below the previous sp. (Because creating a new stack frame pushes
|
| // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
|
| - __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
|
| + __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
|
| // If slice offset is not 0, load the length from the original sliced string.
|
| // Argument 4, a3: End of string data
|
| // Argument 3, a2: Start of string data
|
| // Prepare start and end index of the input.
|
| - __ sllv(t1, t0, a3);
|
| - __ addu(t0, t2, t1);
|
| - __ sllv(t1, a1, a3);
|
| - __ addu(a2, t0, t1);
|
| -
|
| - __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
|
| - __ sra(t2, t2, kSmiTagSize);
|
| - __ sllv(t1, t2, a3);
|
| - __ addu(a3, t0, t1);
|
| + __ dsllv(t1, t0, a3);
|
| + __ daddu(t0, t2, t1);
|
| + __ dsllv(t1, a1, a3);
|
| + __ daddu(a2, t0, t1);
|
| +
|
| + __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
|
| +
|
| + __ SmiUntag(t2);
|
| + __ dsllv(t1, t2, a3);
|
| + __ daddu(a3, t0, t1);
|
| // Argument 2 (a1): Previous index.
|
| // Already there
|
|
|
| @@ -2655,7 +2687,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| __ mov(a0, subject);
|
|
|
| // Locate the code entry and call it.
|
| - __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| DirectCEntryStub stub(isolate());
|
| stub.GenerateCall(masm, t9);
|
|
|
| @@ -2681,10 +2713,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| __ li(a1, Operand(isolate()->factory()->the_hole_value()));
|
| __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
|
| isolate())));
|
| - __ lw(v0, MemOperand(a2, 0));
|
| + __ ld(v0, MemOperand(a2, 0));
|
| __ Branch(&runtime, eq, v0, Operand(a1));
|
|
|
| - __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
|
| + __ sd(a1, MemOperand(a2, 0)); // Clear pending exception.
|
|
|
| // Check if the exception is a termination. If so, throw as uncatchable.
|
| __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
|
| @@ -2703,57 +2735,57 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
|
| // Process the result from the native regexp code.
|
| __ bind(&success);
|
| - __ lw(a1,
|
| - FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
|
| +
|
| + __ lw(a1, UntagSmiFieldMemOperand(
|
| + regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
|
| // Calculate number of capture registers (number_of_captures + 1) * 2.
|
| - // Multiplying by 2 comes for free since r1 is smi-tagged.
|
| - STATIC_ASSERT(kSmiTag == 0);
|
| - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
| - __ Addu(a1, a1, Operand(2)); // a1 was a smi.
|
| + __ Daddu(a1, a1, Operand(1));
|
| + __ dsll(a1, a1, 1); // Multiply by 2.
|
|
|
| - __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
|
| + __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
|
| __ JumpIfSmi(a0, &runtime);
|
| __ GetObjectType(a0, a2, a2);
|
| __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
|
| // Check that the JSArray is in fast case.
|
| - __ lw(last_match_info_elements,
|
| + __ ld(last_match_info_elements,
|
| FieldMemOperand(a0, JSArray::kElementsOffset));
|
| - __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
|
| + __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
|
| __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
|
| __ Branch(&runtime, ne, a0, Operand(at));
|
| // Check that the last match info has space for the capture registers and the
|
| // additional information.
|
| - __ lw(a0,
|
| + __ ld(a0,
|
| FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
|
| - __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
|
| - __ sra(at, a0, kSmiTagSize);
|
| + __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
|
| +
|
| + __ SmiUntag(at, a0);
|
| __ Branch(&runtime, gt, a2, Operand(at));
|
|
|
| // a1: number of capture registers
|
| // subject: subject string
|
| // Store the capture count.
|
| - __ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
|
| - __ sw(a2, FieldMemOperand(last_match_info_elements,
|
| + __ SmiTag(a2, a1); // To smi.
|
| + __ sd(a2, FieldMemOperand(last_match_info_elements,
|
| RegExpImpl::kLastCaptureCountOffset));
|
| // Store last subject and last input.
|
| - __ sw(subject,
|
| + __ sd(subject,
|
| FieldMemOperand(last_match_info_elements,
|
| RegExpImpl::kLastSubjectOffset));
|
| __ mov(a2, subject);
|
| __ RecordWriteField(last_match_info_elements,
|
| RegExpImpl::kLastSubjectOffset,
|
| subject,
|
| - t3,
|
| + a7,
|
| kRAHasNotBeenSaved,
|
| kDontSaveFPRegs);
|
| __ mov(subject, a2);
|
| - __ sw(subject,
|
| + __ sd(subject,
|
| FieldMemOperand(last_match_info_elements,
|
| RegExpImpl::kLastInputOffset));
|
| __ RecordWriteField(last_match_info_elements,
|
| RegExpImpl::kLastInputOffset,
|
| subject,
|
| - t3,
|
| + a7,
|
| kRAHasNotBeenSaved,
|
| kDontSaveFPRegs);
|
|
|
| @@ -2767,25 +2799,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| Label next_capture, done;
|
| // Capture register counter starts from number of capture registers and
|
| // counts down until wrapping after zero.
|
| - __ Addu(a0,
|
| + __ Daddu(a0,
|
| last_match_info_elements,
|
| Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
|
| __ bind(&next_capture);
|
| - __ Subu(a1, a1, Operand(1));
|
| + __ Dsubu(a1, a1, Operand(1));
|
| __ Branch(&done, lt, a1, Operand(zero_reg));
|
| // Read the value from the static offsets vector buffer.
|
| __ lw(a3, MemOperand(a2, 0));
|
| - __ addiu(a2, a2, kPointerSize);
|
| + __ daddiu(a2, a2, kIntSize);
|
| // Store the smi value in the last match info.
|
| - __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
|
| - __ sw(a3, MemOperand(a0, 0));
|
| + __ SmiTag(a3);
|
| + __ sd(a3, MemOperand(a0, 0));
|
| __ Branch(&next_capture, USE_DELAY_SLOT);
|
| - __ addiu(a0, a0, kPointerSize); // In branch delay slot.
|
| + __ daddiu(a0, a0, kPointerSize); // In branch delay slot.
|
|
|
| __ bind(&done);
|
|
|
| // Return last match info.
|
| - __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
|
| + __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
|
| __ DropAndRet(4);
|
|
|
| // Do the runtime call to execute the regexp.
|
| @@ -2800,7 +2832,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
|
| // (7) External string. Make it, offset-wise, look like a sequential string.
|
| __ bind(&external_string);
|
| - __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
|
| + __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
|
| __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
|
| if (FLAG_debug_code) {
|
| // Assert that we do not have a cons or slice (indirect strings) here.
|
| @@ -2811,11 +2843,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| at,
|
| Operand(zero_reg));
|
| }
|
| - __ lw(subject,
|
| + __ ld(subject,
|
| FieldMemOperand(subject, ExternalString::kResourceDataOffset));
|
| // Move the pointer so that offset-wise, it looks like a sequential string.
|
| STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
|
| - __ Subu(subject,
|
| + __ Dsubu(subject,
|
| subject,
|
| SeqTwoByteString::kHeaderSize - kHeapObjectTag);
|
| __ jmp(&seq_string); // Go to (5).
|
| @@ -2828,9 +2860,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|
|
| // (9) Sliced string. Replace subject with parent. Go to (4).
|
| // Load offset into t0 and replace subject string with parent.
|
| - __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
|
| - __ sra(t0, t0, kSmiTagSize);
|
| - __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
|
| + __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
|
| + __ SmiUntag(t0);
|
| + __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
|
| __ jmp(&check_underlying); // Go to (4).
|
| #endif // V8_INTERPRETED_REGEXP
|
| }
|
| @@ -2851,27 +2883,27 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
|
| ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
|
| masm->isolate()->heap()->uninitialized_symbol());
|
|
|
| - // Load the cache state into t0.
|
| - __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(t0, a2, Operand(t0));
|
| - __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
|
| + // Load the cache state into a4.
|
| + __ dsrl(a4, a3, 32 - kPointerSizeLog2);
|
| + __ Daddu(a4, a2, Operand(a4));
|
| + __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
|
|
|
| // A monomorphic cache hit or an already megamorphic state: invoke the
|
| // function without changing the state.
|
| - __ Branch(&done, eq, t0, Operand(a1));
|
| + __ Branch(&done, eq, a4, Operand(a1));
|
|
|
| if (!FLAG_pretenuring_call_new) {
|
| // If we came here, we need to see if we are the array function.
|
| // If we didn't have a matching function, and we didn't find the megamorph
|
| // sentinel, then we have in the slot either some other function or an
|
| // AllocationSite. Do a map check on the object in a3.
|
| - __ lw(t1, FieldMemOperand(t0, 0));
|
| + __ ld(a5, FieldMemOperand(a4, 0));
|
| __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
|
| - __ Branch(&miss, ne, t1, Operand(at));
|
| + __ Branch(&miss, ne, a5, Operand(at));
|
|
|
| // Make sure the function is the Array() function
|
| - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
|
| - __ Branch(&megamorphic, ne, a1, Operand(t0));
|
| + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
|
| + __ Branch(&megamorphic, ne, a1, Operand(a4));
|
| __ jmp(&done);
|
| }
|
|
|
| @@ -2880,22 +2912,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
|
| // A monomorphic miss (i.e, here the cache is not uninitialized) goes
|
| // megamorphic.
|
| __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
|
| - __ Branch(&initialize, eq, t0, Operand(at));
|
| + __ Branch(&initialize, eq, a4, Operand(at));
|
| // MegamorphicSentinel is an immortal immovable object (undefined) so no
|
| // write-barrier is needed.
|
| __ bind(&megamorphic);
|
| - __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(t0, a2, Operand(t0));
|
| + __ dsrl(a4, a3, 32- kPointerSizeLog2);
|
| + __ Daddu(a4, a2, Operand(a4));
|
| __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
|
| - __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
|
| + __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
|
| __ jmp(&done);
|
|
|
| // An uninitialized cache is patched with the function.
|
| __ bind(&initialize);
|
| if (!FLAG_pretenuring_call_new) {
|
| // Make sure the function is the Array() function.
|
| - __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
|
| - __ Branch(¬_array_function, ne, a1, Operand(t0));
|
| + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
|
| + __ Branch(¬_array_function, ne, a1, Operand(a4));
|
|
|
| // The target function is the Array constructor,
|
| // Create an AllocationSite if we don't already have it, store it in the
|
| @@ -2923,30 +2955,34 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
|
| __ bind(¬_array_function);
|
| }
|
|
|
| - __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(t0, a2, Operand(t0));
|
| - __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - __ sw(a1, MemOperand(t0, 0));
|
| + __ dsrl(a4, a3, 32 - kPointerSizeLog2);
|
| + __ Daddu(a4, a2, Operand(a4));
|
| + __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| + __ sd(a1, MemOperand(a4, 0));
|
|
|
| - __ Push(t0, a2, a1);
|
| - __ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
|
| + __ Push(a4, a2, a1);
|
| + __ RecordWrite(a2, a4, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
|
| EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
| - __ Pop(t0, a2, a1);
|
| + __ Pop(a4, a2, a1);
|
|
|
| __ bind(&done);
|
| }
|
|
|
|
|
| static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
|
| - __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| - __ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
|
| + __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
|
|
| // Do not transform the receiver for strict mode functions.
|
| int32_t strict_mode_function_mask =
|
| - 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
|
| + 1 << SharedFunctionInfo::kStrictModeBitWithinByte ;
|
| // Do not transform the receiver for native (Compilerhints already in a3).
|
| - int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
|
| - __ And(at, t0, Operand(strict_mode_function_mask | native_mask));
|
| + int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
|
| +
|
| + __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset));
|
| + __ And(at, a4, Operand(strict_mode_function_mask));
|
| + __ Branch(cont, ne, at, Operand(zero_reg));
|
| + __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset));
|
| + __ And(at, a4, Operand(native_mask));
|
| __ Branch(cont, ne, at, Operand(zero_reg));
|
| }
|
|
|
| @@ -2955,7 +2991,7 @@ static void EmitSlowCase(MacroAssembler* masm,
|
| int argc,
|
| Label* non_function) {
|
| // Check for function proxy.
|
| - __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
|
| + __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
|
| __ push(a1); // put proxy as additional argument
|
| __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
|
| __ mov(a2, zero_reg);
|
| @@ -2969,7 +3005,7 @@ static void EmitSlowCase(MacroAssembler* masm,
|
| // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
|
| // of the original receiver from the call site).
|
| __ bind(non_function);
|
| - __ sw(a1, MemOperand(sp, argc * kPointerSize));
|
| + __ sd(a1, MemOperand(sp, argc * kPointerSize));
|
| __ li(a0, Operand(argc)); // Set up the number of arguments.
|
| __ mov(a2, zero_reg);
|
| __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
|
| @@ -2986,7 +3022,7 @@ static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
|
| __ pop(a1);
|
| }
|
| __ Branch(USE_DELAY_SLOT, cont);
|
| - __ sw(v0, MemOperand(sp, argc * kPointerSize));
|
| + __ sd(v0, MemOperand(sp, argc * kPointerSize));
|
| }
|
|
|
|
|
| @@ -3002,8 +3038,8 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
|
| __ JumpIfSmi(a1, &non_function);
|
|
|
| // Goto slow case if we do not have a function.
|
| - __ GetObjectType(a1, t0, t0);
|
| - __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
|
| + __ GetObjectType(a1, a4, a4);
|
| + __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
|
| }
|
|
|
| // Fast-case: Invoke the function now.
|
| @@ -3016,19 +3052,18 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
|
| }
|
|
|
| // Compute the receiver in sloppy mode.
|
| - __ lw(a3, MemOperand(sp, argc * kPointerSize));
|
| + __ ld(a3, MemOperand(sp, argc * kPointerSize));
|
|
|
| if (needs_checks) {
|
| __ JumpIfSmi(a3, &wrap);
|
| - __ GetObjectType(a3, t0, t0);
|
| - __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| + __ GetObjectType(a3, a4, a4);
|
| + __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| } else {
|
| __ jmp(&wrap);
|
| }
|
|
|
| __ bind(&cont);
|
| }
|
| -
|
| __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
|
|
|
| if (needs_checks) {
|
| @@ -3056,51 +3091,50 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
|
| // a2 : feedback vector
|
| // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
|
| Label slow, non_function_call;
|
| -
|
| // Check that the function is not a smi.
|
| __ JumpIfSmi(a1, &non_function_call);
|
| // Check that the function is a JSFunction.
|
| - __ GetObjectType(a1, t0, t0);
|
| - __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
|
| + __ GetObjectType(a1, a4, a4);
|
| + __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
|
|
|
| if (RecordCallTarget()) {
|
| GenerateRecordCallTarget(masm);
|
|
|
| - __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(t1, a2, at);
|
| + __ dsrl(at, a3, 32 - kPointerSizeLog2);
|
| + __ Daddu(a5, a2, at);
|
| if (FLAG_pretenuring_call_new) {
|
| // Put the AllocationSite from the feedback vector into a2.
|
| // By adding kPointerSize we encode that we know the AllocationSite
|
| // entry is at the feedback vector slot given by a3 + 1.
|
| - __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
|
| + __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
|
| } else {
|
| Label feedback_register_initialized;
|
| // Put the AllocationSite from the feedback vector into a2, or undefined.
|
| - __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
|
| - __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
|
| + __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
|
| + __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
|
| __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
|
| - __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
|
| + __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
|
| __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
|
| __ bind(&feedback_register_initialized);
|
| }
|
|
|
| - __ AssertUndefinedOrAllocationSite(a2, t1);
|
| + __ AssertUndefinedOrAllocationSite(a2, a5);
|
| }
|
|
|
| // Jump to the function-specific construct stub.
|
| - Register jmp_reg = t0;
|
| - __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| - __ lw(jmp_reg, FieldMemOperand(jmp_reg,
|
| + Register jmp_reg = a4;
|
| + __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| + __ ld(jmp_reg, FieldMemOperand(jmp_reg,
|
| SharedFunctionInfo::kConstructStubOffset));
|
| - __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ Jump(at);
|
|
|
| // a0: number of arguments
|
| // a1: called object
|
| - // t0: object type
|
| + // a4: object type
|
| Label do_call;
|
| __ bind(&slow);
|
| - __ Branch(&non_function_call, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
|
| + __ Branch(&non_function_call, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
|
| __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
|
| __ jmp(&do_call);
|
|
|
| @@ -3114,11 +3148,54 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
|
| }
|
|
|
|
|
| +// StringCharCodeAtGenerator.
|
| +void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
| + Label flat_string;
|
| + Label ascii_string;
|
| + Label got_char_code;
|
| + Label sliced_string;
|
| +
|
| + ASSERT(!a4.is(index_));
|
| + ASSERT(!a4.is(result_));
|
| + ASSERT(!a4.is(object_));
|
| +
|
| + // If the receiver is a smi trigger the non-string case.
|
| + __ JumpIfSmi(object_, receiver_not_string_);
|
| +
|
| + // Fetch the instance type of the receiver into result register.
|
| + __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
|
| + __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
|
| + // If the receiver is not a string trigger the non-string case.
|
| + __ And(a4, result_, Operand(kIsNotStringMask));
|
| + __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
|
| +
|
| + // If the index is non-smi trigger the non-smi case.
|
| + __ JumpIfNotSmi(index_, &index_not_smi_);
|
| +
|
| + __ bind(&got_smi_index_);
|
| +
|
| + // Check for index out of range.
|
| + __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
|
| + __ Branch(index_out_of_range_, ls, a4, Operand(index_));
|
| +
|
| + __ SmiUntag(index_);
|
| +
|
| + StringCharLoadGenerator::Generate(masm,
|
| + object_,
|
| + index_,
|
| + result_,
|
| + &call_runtime_);
|
| +
|
| + __ SmiTag(result_);
|
| + __ bind(&exit_);
|
| +}
|
| +
|
| +
|
| static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
|
| - __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| - __ lw(vector, FieldMemOperand(vector,
|
| + __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| + __ ld(vector, FieldMemOperand(vector,
|
| JSFunction::kSharedFunctionInfoOffset));
|
| - __ lw(vector, FieldMemOperand(vector,
|
| + __ ld(vector, FieldMemOperand(vector,
|
| SharedFunctionInfo::kFeedbackVectorOffset));
|
| }
|
|
|
| @@ -3134,9 +3211,9 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
|
| __ Branch(&miss, ne, a1, Operand(at));
|
|
|
| __ li(a0, Operand(arg_count()));
|
| - __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(at, a2, Operand(at));
|
| - __ lw(a2, FieldMemOperand(at, FixedArray::kHeaderSize));
|
| + __ dsrl(at, a3, 32 - kPointerSizeLog2);
|
| + __ Daddu(at, a2, Operand(at));
|
| + __ ld(a2, FieldMemOperand(at, FixedArray::kHeaderSize));
|
| // Verify that a2 contains an AllocationSite
|
| __ AssertUndefinedOrAllocationSite(a2, at);
|
| ArrayConstructorStub stub(masm->isolate(), arg_count());
|
| @@ -3147,9 +3224,9 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
|
|
|
| // The slow case, we need this no matter what to complete a call after a miss.
|
| CallFunctionNoFeedback(masm,
|
| - arg_count(),
|
| - true,
|
| - CallAsMethod());
|
| + arg_count(),
|
| + true,
|
| + CallAsMethod());
|
|
|
| // Unreachable.
|
| __ stop("Unexpected code address");
|
| @@ -3157,8 +3234,8 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
|
|
|
|
|
| void CallICStub::Generate(MacroAssembler* masm) {
|
| - // r1 - function
|
| - // r3 - slot id (Smi)
|
| + // a1 - function
|
| + // a3 - slot id (Smi)
|
| Label extra_checks_or_miss, slow_start;
|
| Label slow, non_function, wrap, cont;
|
| Label have_js_function;
|
| @@ -3168,20 +3245,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
|
| EmitLoadTypeFeedbackVector(masm, a2);
|
|
|
| // The checks. First, does r1 match the recorded monomorphic target?
|
| - __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(t0, a2, Operand(t0));
|
| - __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
|
| - __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
|
| + __ dsrl(a4, a3, 32 - kPointerSizeLog2);
|
| + __ Daddu(a4, a2, Operand(a4));
|
| + __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
|
| + __ Branch(&extra_checks_or_miss, ne, a1, Operand(a4));
|
|
|
| __ bind(&have_js_function);
|
| if (state_.CallAsMethod()) {
|
| EmitContinueIfStrictOrNative(masm, &cont);
|
| // Compute the receiver in sloppy mode.
|
| - __ lw(a3, MemOperand(sp, argc * kPointerSize));
|
| + __ ld(a3, MemOperand(sp, argc * kPointerSize));
|
|
|
| __ JumpIfSmi(a3, &wrap);
|
| - __ GetObjectType(a3, t0, t0);
|
| - __ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| + __ GetObjectType(a3, a4, a4);
|
| + __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
|
|
|
| __ bind(&cont);
|
| }
|
| @@ -3200,16 +3277,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
|
| Label miss;
|
|
|
| __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
|
| - __ Branch(&slow_start, eq, t0, Operand(at));
|
| + __ Branch(&slow_start, eq, a4, Operand(at));
|
| __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
|
| - __ Branch(&miss, eq, t0, Operand(at));
|
| + __ Branch(&miss, eq, a4, Operand(at));
|
|
|
| if (!FLAG_trace_ic) {
|
| // We are going megamorphic, and we don't want to visit the runtime.
|
| - __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(t0, a2, Operand(t0));
|
| + __ dsrl(a4, a3, 32 - kPointerSizeLog2);
|
| + __ Daddu(a4, a2, Operand(a4));
|
| __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
|
| - __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
|
| + __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
|
| __ Branch(&slow_start);
|
| }
|
|
|
| @@ -3224,21 +3301,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
|
| __ JumpIfSmi(a1, &non_function);
|
|
|
| // Goto slow case if we do not have a function.
|
| - __ GetObjectType(a1, t0, t0);
|
| - __ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
|
| + __ GetObjectType(a1, a4, a4);
|
| + __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
|
| __ Branch(&have_js_function);
|
| }
|
|
|
|
|
| void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
|
| // Get the receiver of the function from the stack; 1 ~ return address.
|
| - __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
|
| + __ ld(a4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
|
|
|
| {
|
| FrameScope scope(masm, StackFrame::INTERNAL);
|
|
|
| // Push the receiver and the function and feedback info.
|
| - __ Push(t0, a1, a2, a3);
|
| + __ Push(a4, a1, a2, a3);
|
|
|
| // Call the entry.
|
| ExternalReference miss = ExternalReference(IC_Utility(id),
|
| @@ -3251,49 +3328,6 @@ void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
|
| }
|
|
|
|
|
| -// StringCharCodeAtGenerator.
|
| -void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
| - Label flat_string;
|
| - Label ascii_string;
|
| - Label got_char_code;
|
| - Label sliced_string;
|
| -
|
| - ASSERT(!t0.is(index_));
|
| - ASSERT(!t0.is(result_));
|
| - ASSERT(!t0.is(object_));
|
| -
|
| - // If the receiver is a smi trigger the non-string case.
|
| - __ JumpIfSmi(object_, receiver_not_string_);
|
| -
|
| - // Fetch the instance type of the receiver into result register.
|
| - __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
|
| - __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
|
| - // If the receiver is not a string trigger the non-string case.
|
| - __ And(t0, result_, Operand(kIsNotStringMask));
|
| - __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
|
| -
|
| - // If the index is non-smi trigger the non-smi case.
|
| - __ JumpIfNotSmi(index_, &index_not_smi_);
|
| -
|
| - __ bind(&got_smi_index_);
|
| -
|
| - // Check for index out of range.
|
| - __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
|
| - __ Branch(index_out_of_range_, ls, t0, Operand(index_));
|
| -
|
| - __ sra(index_, index_, kSmiTagSize);
|
| -
|
| - StringCharLoadGenerator::Generate(masm,
|
| - object_,
|
| - index_,
|
| - result_,
|
| - &call_runtime_);
|
| -
|
| - __ sll(result_, result_, kSmiTagSize);
|
| - __ bind(&exit_);
|
| -}
|
| -
|
| -
|
| void StringCharCodeAtGenerator::GenerateSlow(
|
| MacroAssembler* masm,
|
| const RuntimeCallHelper& call_helper) {
|
| @@ -3324,7 +3358,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
| __ Move(index_, v0);
|
| __ pop(object_);
|
| // Reload the instance type.
|
| - __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
|
| + __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
|
| __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
|
| call_helper.AfterCall(masm);
|
| // If index is still not a smi, it must be out of range.
|
| @@ -3337,7 +3371,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
| // is too complex (e.g., when the string needs to be flattened).
|
| __ bind(&call_runtime_);
|
| call_helper.BeforeCall(masm);
|
| - __ sll(index_, index_, kSmiTagSize);
|
| + __ SmiTag(index_);
|
| __ Push(object_, index_);
|
| __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
|
|
|
| @@ -3356,26 +3390,26 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
| void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
|
| // Fast case of Heap::LookupSingleCharacterStringFromCode.
|
|
|
| - ASSERT(!t0.is(result_));
|
| - ASSERT(!t0.is(code_));
|
| + ASSERT(!a4.is(result_));
|
| + ASSERT(!a4.is(code_));
|
|
|
| STATIC_ASSERT(kSmiTag == 0);
|
| - STATIC_ASSERT(kSmiShiftSize == 0);
|
| ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
|
| - __ And(t0,
|
| + __ And(a4,
|
| code_,
|
| Operand(kSmiTagMask |
|
| ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
|
| - __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
|
| + __ Branch(&slow_case_, ne, a4, Operand(zero_reg));
|
| +
|
|
|
| __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
|
| // At this point code register contains smi tagged ASCII char code.
|
| STATIC_ASSERT(kSmiTag == 0);
|
| - __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(result_, result_, t0);
|
| - __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
|
| - __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
|
| - __ Branch(&slow_case_, eq, result_, Operand(t0));
|
| + __ SmiScale(a4, code_, kPointerSizeLog2);
|
| + __ Daddu(result_, result_, a4);
|
| + __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
|
| + __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
|
| + __ Branch(&slow_case_, eq, result_, Operand(a4));
|
| __ bind(&exit_);
|
| }
|
|
|
| @@ -3424,20 +3458,20 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
|
| Label done;
|
|
|
| if (encoding == String::TWO_BYTE_ENCODING) {
|
| - __ Addu(count, count, count);
|
| + __ Daddu(count, count, count);
|
| }
|
|
|
| Register limit = count; // Read until dest equals this.
|
| - __ Addu(limit, dest, Operand(count));
|
| + __ Daddu(limit, dest, Operand(count));
|
|
|
| Label loop_entry, loop;
|
| // Copy bytes from src to dest until dest hits limit.
|
| __ Branch(&loop_entry);
|
| __ bind(&loop);
|
| __ lbu(scratch, MemOperand(src));
|
| - __ Addu(src, src, Operand(1));
|
| + __ daddiu(src, src, 1);
|
| __ sb(scratch, MemOperand(dest));
|
| - __ Addu(dest, dest, Operand(1));
|
| + __ daddiu(dest, dest, 1);
|
| __ bind(&loop_entry);
|
| __ Branch(&loop, lt, dest, Operand(limit));
|
|
|
| @@ -3515,31 +3549,35 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| const int kFromOffset = 1 * kPointerSize;
|
| const int kStringOffset = 2 * kPointerSize;
|
|
|
| - __ lw(a2, MemOperand(sp, kToOffset));
|
| - __ lw(a3, MemOperand(sp, kFromOffset));
|
| - STATIC_ASSERT(kFromOffset == kToOffset + 4);
|
| + __ ld(a2, MemOperand(sp, kToOffset));
|
| + __ ld(a3, MemOperand(sp, kFromOffset));
|
| +// Does not needed?
|
| +// STATIC_ASSERT(kFromOffset == kToOffset + 4);
|
| STATIC_ASSERT(kSmiTag == 0);
|
| - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
| +// Does not needed?
|
| +// STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
|
|
| // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
|
| // safe in this case.
|
| - __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
|
| - __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
|
| + __ JumpIfNotSmi(a2, &runtime);
|
| + __ JumpIfNotSmi(a3, &runtime);
|
| // Both a2 and a3 are untagged integers.
|
|
|
| + __ SmiUntag(a2, a2);
|
| + __ SmiUntag(a3, a3);
|
| __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
|
|
|
| __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
|
| - __ Subu(a2, a2, a3);
|
| + __ Dsubu(a2, a2, a3);
|
|
|
| // Make sure first argument is a string.
|
| - __ lw(v0, MemOperand(sp, kStringOffset));
|
| + __ ld(v0, MemOperand(sp, kStringOffset));
|
| __ JumpIfSmi(v0, &runtime);
|
| - __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
|
| + __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
|
| __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
|
| - __ And(t0, a1, Operand(kIsNotStringMask));
|
| + __ And(a4, a1, Operand(kIsNotStringMask));
|
|
|
| - __ Branch(&runtime, ne, t0, Operand(zero_reg));
|
| + __ Branch(&runtime, ne, a4, Operand(zero_reg));
|
|
|
| Label single_char;
|
| __ Branch(&single_char, eq, a2, Operand(1));
|
| @@ -3548,16 +3586,16 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| Label return_v0;
|
| // v0: original string
|
| // a2: result string length
|
| - __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
|
| - __ sra(t0, t0, 1);
|
| + __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
|
| + __ SmiUntag(a4);
|
| // Return original string.
|
| - __ Branch(&return_v0, eq, a2, Operand(t0));
|
| + __ Branch(&return_v0, eq, a2, Operand(a4));
|
| // Longer than original string's length or negative: unsafe arguments.
|
| - __ Branch(&runtime, hi, a2, Operand(t0));
|
| + __ Branch(&runtime, hi, a2, Operand(a4));
|
| // Shorter than original string's length: an actual substring.
|
|
|
| // Deal with different string types: update the index if necessary
|
| - // and put the underlying string into t1.
|
| + // and put the underlying string into a5.
|
| // v0: original string
|
| // a1: instance type
|
| // a2: length
|
| @@ -3566,41 +3604,41 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| // If the string is not indirect, it can only be sequential or external.
|
| STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
|
| STATIC_ASSERT(kIsIndirectStringMask != 0);
|
| - __ And(t0, a1, Operand(kIsIndirectStringMask));
|
| - __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
|
| - // t0 is used as a scratch register and can be overwritten in either case.
|
| - __ And(t0, a1, Operand(kSlicedNotConsMask));
|
| - __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
|
| + __ And(a4, a1, Operand(kIsIndirectStringMask));
|
| + __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
|
| + // a4 is used as a scratch register and can be overwritten in either case.
|
| + __ And(a4, a1, Operand(kSlicedNotConsMask));
|
| + __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
|
| // Cons string. Check whether it is flat, then fetch first part.
|
| - __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
|
| - __ LoadRoot(t0, Heap::kempty_stringRootIndex);
|
| - __ Branch(&runtime, ne, t1, Operand(t0));
|
| - __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
|
| + __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
|
| + __ LoadRoot(a4, Heap::kempty_stringRootIndex);
|
| + __ Branch(&runtime, ne, a5, Operand(a4));
|
| + __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
|
| // Update instance type.
|
| - __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
|
| + __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
|
| __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
|
| __ jmp(&underlying_unpacked);
|
|
|
| __ bind(&sliced_string);
|
| // Sliced string. Fetch parent and correct start index by offset.
|
| - __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
|
| - __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
| - __ sra(t0, t0, 1); // Add offset to index.
|
| - __ Addu(a3, a3, t0);
|
| + __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
|
| + __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
| + __ SmiUntag(a4); // Add offset to index.
|
| + __ Daddu(a3, a3, a4);
|
| // Update instance type.
|
| - __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
|
| + __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
|
| __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
|
| __ jmp(&underlying_unpacked);
|
|
|
| __ bind(&seq_or_external_string);
|
| // Sequential or external string. Just move string to the expected register.
|
| - __ mov(t1, v0);
|
| + __ mov(a5, v0);
|
|
|
| __ bind(&underlying_unpacked);
|
|
|
| if (FLAG_string_slices) {
|
| Label copy_routine;
|
| - // t1: underlying subject string
|
| + // a5: underlying subject string
|
| // a1: instance type of underlying subject string
|
| // a2: length
|
| // a3: adjusted start index (untagged)
|
| @@ -3614,91 +3652,91 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| Label two_byte_slice, set_slice_header;
|
| STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
|
| STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
|
| - __ And(t0, a1, Operand(kStringEncodingMask));
|
| - __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
|
| - __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
|
| + __ And(a4, a1, Operand(kStringEncodingMask));
|
| + __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
|
| + __ AllocateAsciiSlicedString(v0, a2, a6, a7, &runtime);
|
| __ jmp(&set_slice_header);
|
| __ bind(&two_byte_slice);
|
| - __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
|
| + __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
|
| __ bind(&set_slice_header);
|
| - __ sll(a3, a3, 1);
|
| - __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
|
| - __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
| + __ SmiTag(a3);
|
| + __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
|
| + __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
| __ jmp(&return_v0);
|
|
|
| __ bind(©_routine);
|
| }
|
|
|
| - // t1: underlying subject string
|
| + // a5: underlying subject string
|
| // a1: instance type of underlying subject string
|
| // a2: length
|
| // a3: adjusted start index (untagged)
|
| Label two_byte_sequential, sequential_string, allocate_result;
|
| STATIC_ASSERT(kExternalStringTag != 0);
|
| STATIC_ASSERT(kSeqStringTag == 0);
|
| - __ And(t0, a1, Operand(kExternalStringTag));
|
| - __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
|
| + __ And(a4, a1, Operand(kExternalStringTag));
|
| + __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
|
|
|
| // Handle external string.
|
| // Rule out short external strings.
|
| STATIC_ASSERT(kShortExternalStringTag != 0);
|
| - __ And(t0, a1, Operand(kShortExternalStringTag));
|
| - __ Branch(&runtime, ne, t0, Operand(zero_reg));
|
| - __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
|
| - // t1 already points to the first character of underlying string.
|
| + __ And(a4, a1, Operand(kShortExternalStringTag));
|
| + __ Branch(&runtime, ne, a4, Operand(zero_reg));
|
| + __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
|
| + // a5 already points to the first character of underlying string.
|
| __ jmp(&allocate_result);
|
|
|
| __ bind(&sequential_string);
|
| // Locate first character of underlying subject string.
|
| STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
|
| - __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
| + __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
|
|
| __ bind(&allocate_result);
|
| // Sequential acii string. Allocate the result.
|
| STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
|
| - __ And(t0, a1, Operand(kStringEncodingMask));
|
| - __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
|
| + __ And(a4, a1, Operand(kStringEncodingMask));
|
| + __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
|
|
|
| // Allocate and copy the resulting ASCII string.
|
| - __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
|
| + __ AllocateAsciiString(v0, a2, a4, a6, a7, &runtime);
|
|
|
| // Locate first character of substring to copy.
|
| - __ Addu(t1, t1, a3);
|
| + __ Daddu(a5, a5, a3);
|
|
|
| // Locate first character of result.
|
| - __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
| + __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
|
|
| // v0: result string
|
| // a1: first character of result string
|
| // a2: result string length
|
| - // t1: first character of substring to copy
|
| + // a5: first character of substring to copy
|
| STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
| StringHelper::GenerateCopyCharacters(
|
| - masm, a1, t1, a2, a3, String::ONE_BYTE_ENCODING);
|
| + masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
|
| __ jmp(&return_v0);
|
|
|
| // Allocate and copy the resulting two-byte string.
|
| __ bind(&two_byte_sequential);
|
| - __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
|
| + __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
|
|
|
| // Locate first character of substring to copy.
|
| STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
|
| - __ sll(t0, a3, 1);
|
| - __ Addu(t1, t1, t0);
|
| + __ dsll(a4, a3, 1);
|
| + __ Daddu(a5, a5, a4);
|
| // Locate first character of result.
|
| - __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
|
| + __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
|
|
|
| // v0: result string.
|
| // a1: first character of result.
|
| // a2: result length.
|
| - // t1: first character of substring to copy.
|
| + // a5: first character of substring to copy.
|
| STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
| StringHelper::GenerateCopyCharacters(
|
| - masm, a1, t1, a2, a3, String::TWO_BYTE_ENCODING);
|
| + masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
|
|
|
| __ bind(&return_v0);
|
| Counters* counters = isolate()->counters();
|
| - __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
|
| + __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
|
| __ DropAndRet(3);
|
|
|
| // Just jump to runtime to create the sub string.
|
| @@ -3710,7 +3748,6 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
| // a1: instance type
|
| // a2: length
|
| // a3: from index (untagged)
|
| - __ SmiTag(a3, a3);
|
| StringCharAtGenerator generator(
|
| v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
|
| generator.GenerateFast(masm);
|
| @@ -3729,20 +3766,20 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
|
|
|
| // Compare lengths.
|
| Label strings_not_equal, check_zero_length;
|
| - __ lw(length, FieldMemOperand(left, String::kLengthOffset));
|
| - __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
|
| + __ ld(length, FieldMemOperand(left, String::kLengthOffset));
|
| + __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
|
| __ Branch(&check_zero_length, eq, length, Operand(scratch2));
|
| __ bind(&strings_not_equal);
|
| - ASSERT(is_int16(NOT_EQUAL));
|
| - __ Ret(USE_DELAY_SLOT);
|
| + // Can not put li in delayslot, it has multi instructions.
|
| __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
|
| + __ Ret();
|
|
|
| // Check if the length is zero.
|
| Label compare_chars;
|
| __ bind(&check_zero_length);
|
| STATIC_ASSERT(kSmiTag == 0);
|
| __ Branch(&compare_chars, ne, length, Operand(zero_reg));
|
| - ASSERT(is_int16(EQUAL));
|
| + ASSERT(is_int16((intptr_t)Smi::FromInt(EQUAL)));
|
| __ Ret(USE_DELAY_SLOT);
|
| __ li(v0, Operand(Smi::FromInt(EQUAL)));
|
|
|
| @@ -3768,9 +3805,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
|
| Register scratch4) {
|
| Label result_not_equal, compare_lengths;
|
| // Find minimum length and length difference.
|
| - __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
|
| - __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
|
| - __ Subu(scratch3, scratch1, Operand(scratch2));
|
| + __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
|
| + __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
|
| + __ Dsubu(scratch3, scratch1, Operand(scratch2));
|
| Register length_delta = scratch3;
|
| __ slt(scratch4, scratch2, scratch1);
|
| __ Movn(scratch1, scratch2, scratch4);
|
| @@ -3817,23 +3854,23 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
|
| // start. This means that loop ends when index reaches zero, which
|
| // doesn't need an additional compare.
|
| __ SmiUntag(length);
|
| - __ Addu(scratch1, length,
|
| + __ Daddu(scratch1, length,
|
| Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
|
| - __ Addu(left, left, Operand(scratch1));
|
| - __ Addu(right, right, Operand(scratch1));
|
| - __ Subu(length, zero_reg, length);
|
| + __ Daddu(left, left, Operand(scratch1));
|
| + __ Daddu(right, right, Operand(scratch1));
|
| + __ Dsubu(length, zero_reg, length);
|
| Register index = length; // index = -length;
|
|
|
|
|
| // Compare loop.
|
| Label loop;
|
| __ bind(&loop);
|
| - __ Addu(scratch3, left, index);
|
| + __ Daddu(scratch3, left, index);
|
| __ lbu(scratch1, MemOperand(scratch3));
|
| - __ Addu(scratch3, right, index);
|
| + __ Daddu(scratch3, right, index);
|
| __ lbu(scratch2, MemOperand(scratch3));
|
| __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
|
| - __ Addu(index, index, 1);
|
| + __ Daddu(index, index, 1);
|
| __ Branch(&loop, ne, index, Operand(zero_reg));
|
| }
|
|
|
| @@ -3846,8 +3883,8 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
|
| // Stack frame on entry.
|
| // sp[0]: right string
|
| // sp[4]: left string
|
| - __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
|
| - __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
|
| + __ ld(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
|
| + __ ld(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
|
|
|
| Label not_same;
|
| __ Branch(¬_same, ne, a0, Operand(a1));
|
| @@ -3864,8 +3901,8 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
|
|
|
| // Compare flat ASCII strings natively. Remove arguments from stack first.
|
| __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
|
| - __ Addu(sp, sp, Operand(2 * kPointerSize));
|
| - GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
|
| + __ Daddu(sp, sp, Operand(2 * kPointerSize));
|
| + GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, a4, a5);
|
|
|
| __ bind(&runtime);
|
| __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
|
| @@ -3888,9 +3925,9 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
|
| if (FLAG_debug_code) {
|
| __ And(at, a2, Operand(kSmiTagMask));
|
| __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
|
| - __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
|
| + __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
|
| __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
|
| - __ Assert(eq, kExpectedAllocationSite, t0, Operand(at));
|
| + __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
|
| }
|
|
|
| // Tail call into the stub that handles binary operations with allocation
|
| @@ -3909,13 +3946,13 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
| if (GetCondition() == eq) {
|
| // For equality we do not care about the sign of the result.
|
| __ Ret(USE_DELAY_SLOT);
|
| - __ Subu(v0, a0, a1);
|
| + __ Dsubu(v0, a0, a1);
|
| } else {
|
| // Untag before subtracting to avoid handling overflow.
|
| __ SmiUntag(a1);
|
| __ SmiUntag(a0);
|
| __ Ret(USE_DELAY_SLOT);
|
| - __ Subu(v0, a1, a0);
|
| + __ Dsubu(v0, a1, a0);
|
| }
|
|
|
| __ bind(&miss);
|
| @@ -3944,7 +3981,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
|
| __ JumpIfSmi(a0, &right_smi);
|
| __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
|
| DONT_DO_SMI_CHECK);
|
| - __ Subu(a2, a0, Operand(kHeapObjectTag));
|
| + __ Dsubu(a2, a0, Operand(kHeapObjectTag));
|
| __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
|
| __ Branch(&left);
|
| __ bind(&right_smi);
|
| @@ -3957,7 +3994,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
|
| __ JumpIfSmi(a1, &left_smi);
|
| __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
|
| DONT_DO_SMI_CHECK);
|
| - __ Subu(a2, a1, Operand(kHeapObjectTag));
|
| + __ Dsubu(a2, a1, Operand(kHeapObjectTag));
|
| __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
|
| __ Branch(&done);
|
| __ bind(&left_smi);
|
| @@ -4030,8 +4067,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
|
| __ JumpIfEitherSmi(left, right, &miss);
|
|
|
| // Check that both operands are internalized strings.
|
| - __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
| - __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
|
| + __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
| + __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
|
| __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
|
| __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
|
| STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
|
| @@ -4072,8 +4109,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
|
|
|
| // Check that both operands are unique names. This leaves the instance
|
| // types loaded in tmp1 and tmp2.
|
| - __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
| - __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
|
| + __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
| + __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
|
| __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
|
| __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
|
|
|
| @@ -4111,17 +4148,17 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
| Register right = a0;
|
| Register tmp1 = a2;
|
| Register tmp2 = a3;
|
| - Register tmp3 = t0;
|
| - Register tmp4 = t1;
|
| - Register tmp5 = t2;
|
| + Register tmp3 = a4;
|
| + Register tmp4 = a5;
|
| + Register tmp5 = a6;
|
|
|
| // Check that both operands are heap objects.
|
| __ JumpIfEitherSmi(left, right, &miss);
|
|
|
| // Check that both operands are strings. This leaves the instance
|
| // types loaded in tmp1 and tmp2.
|
| - __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
| - __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
|
| + __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
|
| + __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
|
| __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
|
| __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
|
| STATIC_ASSERT(kNotStringTag != 0);
|
| @@ -4199,7 +4236,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
|
|
|
| ASSERT(GetCondition() == eq);
|
| __ Ret(USE_DELAY_SLOT);
|
| - __ subu(v0, a0, a1);
|
| + __ dsubu(v0, a0, a1);
|
|
|
| __ bind(&miss);
|
| GenerateMiss(masm);
|
| @@ -4210,13 +4247,13 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
|
| Label miss;
|
| __ And(a2, a1, a0);
|
| __ JumpIfSmi(a2, &miss);
|
| - __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
|
| - __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
|
| + __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
|
| + __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
|
| __ Branch(&miss, ne, a2, Operand(known_map_));
|
| __ Branch(&miss, ne, a3, Operand(known_map_));
|
|
|
| __ Ret(USE_DELAY_SLOT);
|
| - __ subu(v0, a0, a1);
|
| + __ dsubu(v0, a0, a1);
|
|
|
| __ bind(&miss);
|
| GenerateMiss(masm);
|
| @@ -4231,12 +4268,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
|
| FrameScope scope(masm, StackFrame::INTERNAL);
|
| __ Push(a1, a0);
|
| __ Push(ra, a1, a0);
|
| - __ li(t0, Operand(Smi::FromInt(op_)));
|
| - __ addiu(sp, sp, -kPointerSize);
|
| + __ li(a4, Operand(Smi::FromInt(op_)));
|
| + __ daddiu(sp, sp, -kPointerSize);
|
| __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
|
| - __ sw(t0, MemOperand(sp)); // In the delay slot.
|
| + __ sd(a4, MemOperand(sp)); // In the delay slot.
|
| // Compute the entry point of the rewritten stub.
|
| - __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| // Restore registers.
|
| __ Pop(a1, a0, ra);
|
| }
|
| @@ -4250,20 +4287,20 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
|
| // so they handle stack restoring and we don't have to do that here.
|
| // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
|
| // kCArgsSlotsSize stack space after the call.
|
| - __ Subu(sp, sp, Operand(kCArgsSlotsSize));
|
| + __ daddiu(sp, sp, -kCArgsSlotsSize);
|
| // Place the return address on the stack, making the call
|
| // GC safe. The RegExp backend also relies on this.
|
| - __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
|
| + __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
|
| __ Call(t9); // Call the C++ function.
|
| - __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
|
| + __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
|
|
|
| if (FLAG_debug_code && FLAG_enable_slow_asserts) {
|
| // In case of an error the return address may point to a memory area
|
| // filled with kZapValue by the GC.
|
| // Dereference the address and check for this.
|
| - __ lw(t0, MemOperand(t9));
|
| - __ Assert(ne, kReceivedInvalidReturnAddress, t0,
|
| - Operand(reinterpret_cast<uint32_t>(kZapValue)));
|
| + __ Uld(a4, MemOperand(t9));
|
| + __ Assert(ne, kReceivedInvalidReturnAddress, a4,
|
| + Operand(reinterpret_cast<uint64_t>(kZapValue)));
|
| }
|
| __ Jump(t9);
|
| }
|
| @@ -4297,23 +4334,24 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
| // Compute the masked index: (hash + i + i * i) & mask.
|
| Register index = scratch0;
|
| // Capacity is smi 2^n.
|
| - __ lw(index, FieldMemOperand(properties, kCapacityOffset));
|
| - __ Subu(index, index, Operand(1));
|
| - __ And(index, index, Operand(
|
| - Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
|
| + __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
|
| + __ Dsubu(index, index, Operand(1));
|
| + __ And(index, index,
|
| + Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
|
|
|
| // Scale the index by multiplying by the entry size.
|
| ASSERT(NameDictionary::kEntrySize == 3);
|
| - __ sll(at, index, 1);
|
| - __ Addu(index, index, at);
|
| + __ dsll(at, index, 1);
|
| + __ Daddu(index, index, at); // index *= 3.
|
|
|
| Register entity_name = scratch0;
|
| // Having undefined at this place means the name is not contained.
|
| ASSERT_EQ(kSmiTagSize, 1);
|
| Register tmp = properties;
|
| - __ sll(scratch0, index, 1);
|
| - __ Addu(tmp, properties, scratch0);
|
| - __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
|
| +
|
| + __ dsll(scratch0, index, kPointerSizeLog2);
|
| + __ Daddu(tmp, properties, scratch0);
|
| + __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
|
|
|
| ASSERT(!tmp.is(entity_name));
|
| __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
|
| @@ -4329,23 +4367,23 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
| __ Branch(&good, eq, entity_name, Operand(tmp));
|
|
|
| // Check if the entry name is not a unique name.
|
| - __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
|
| + __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
|
| __ lbu(entity_name,
|
| FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
|
| __ JumpIfNotUniqueName(entity_name, miss);
|
| __ bind(&good);
|
|
|
| // Restore the properties.
|
| - __ lw(properties,
|
| + __ ld(properties,
|
| FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
| }
|
|
|
| const int spill_mask =
|
| - (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
|
| + (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
|
| a2.bit() | a1.bit() | a0.bit() | v0.bit());
|
|
|
| __ MultiPush(spill_mask);
|
| - __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
| + __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
| __ li(a1, Operand(Handle<Name>(name)));
|
| NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
|
| __ CallStub(&stub);
|
| @@ -4376,44 +4414,44 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
| __ AssertName(name);
|
|
|
| // Compute the capacity mask.
|
| - __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
|
| - __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
|
| - __ Subu(scratch1, scratch1, Operand(1));
|
| + __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
|
| + __ SmiUntag(scratch1);
|
| + __ Dsubu(scratch1, scratch1, Operand(1));
|
|
|
| // Generate an unrolled loop that performs a few probes before
|
| // giving up. Measurements done on Gmail indicate that 2 probes
|
| // cover ~93% of loads from dictionaries.
|
| for (int i = 0; i < kInlinedProbes; i++) {
|
| // Compute the masked index: (hash + i + i * i) & mask.
|
| - __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
|
| + __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
|
| if (i > 0) {
|
| // Add the probe offset (i + i * i) left shifted to avoid right shifting
|
| // the hash in a separate instruction. The value hash + i + i * i is right
|
| // shifted in the following and instruction.
|
| ASSERT(NameDictionary::GetProbeOffset(i) <
|
| 1 << (32 - Name::kHashFieldOffset));
|
| - __ Addu(scratch2, scratch2, Operand(
|
| + __ Daddu(scratch2, scratch2, Operand(
|
| NameDictionary::GetProbeOffset(i) << Name::kHashShift));
|
| }
|
| - __ srl(scratch2, scratch2, Name::kHashShift);
|
| + __ dsrl(scratch2, scratch2, Name::kHashShift);
|
| __ And(scratch2, scratch1, scratch2);
|
|
|
| // Scale the index by multiplying by the element size.
|
| ASSERT(NameDictionary::kEntrySize == 3);
|
| // scratch2 = scratch2 * 3.
|
|
|
| - __ sll(at, scratch2, 1);
|
| - __ Addu(scratch2, scratch2, at);
|
| + __ dsll(at, scratch2, 1);
|
| + __ Daddu(scratch2, scratch2, at);
|
|
|
| // Check if the key is identical to the name.
|
| - __ sll(at, scratch2, 2);
|
| - __ Addu(scratch2, elements, at);
|
| - __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
|
| + __ dsll(at, scratch2, kPointerSizeLog2);
|
| + __ Daddu(scratch2, elements, at);
|
| + __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
|
| __ Branch(done, eq, name, Operand(at));
|
| }
|
|
|
| const int spill_mask =
|
| - (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
|
| + (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
|
| a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
|
| ~(scratch1.bit() | scratch2.bit());
|
|
|
| @@ -4454,17 +4492,17 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
| Register key = a1;
|
| Register index = a2;
|
| Register mask = a3;
|
| - Register hash = t0;
|
| - Register undefined = t1;
|
| - Register entry_key = t2;
|
| + Register hash = a4;
|
| + Register undefined = a5;
|
| + Register entry_key = a6;
|
|
|
| Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
|
|
|
| - __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
|
| - __ sra(mask, mask, kSmiTagSize);
|
| - __ Subu(mask, mask, Operand(1));
|
| + __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
|
| + __ SmiUntag(mask);
|
| + __ Dsubu(mask, mask, Operand(1));
|
|
|
| - __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
|
| + __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
|
|
|
| __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
|
|
|
| @@ -4477,26 +4515,26 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
| // shifted in the following and instruction.
|
| ASSERT(NameDictionary::GetProbeOffset(i) <
|
| 1 << (32 - Name::kHashFieldOffset));
|
| - __ Addu(index, hash, Operand(
|
| + __ Daddu(index, hash, Operand(
|
| NameDictionary::GetProbeOffset(i) << Name::kHashShift));
|
| } else {
|
| __ mov(index, hash);
|
| }
|
| - __ srl(index, index, Name::kHashShift);
|
| + __ dsrl(index, index, Name::kHashShift);
|
| __ And(index, mask, index);
|
|
|
| // Scale the index by multiplying by the entry size.
|
| ASSERT(NameDictionary::kEntrySize == 3);
|
| // index *= 3.
|
| __ mov(at, index);
|
| - __ sll(index, index, 1);
|
| - __ Addu(index, index, at);
|
| + __ dsll(index, index, 1);
|
| + __ Daddu(index, index, at);
|
|
|
|
|
| ASSERT_EQ(kSmiTagSize, 1);
|
| - __ sll(index, index, 2);
|
| - __ Addu(index, index, dictionary);
|
| - __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
|
| + __ dsll(index, index, kPointerSizeLog2);
|
| + __ Daddu(index, index, dictionary);
|
| + __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
|
|
|
| // Having undefined at this place means the name is not contained.
|
| __ Branch(¬_in_dictionary, eq, entry_key, Operand(undefined));
|
| @@ -4506,7 +4544,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
|
|
| if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
|
| // Check if the entry name is not a unique name.
|
| - __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
|
| + __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
|
| __ lbu(entry_key,
|
| FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
|
| __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
|
| @@ -4590,7 +4628,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
|
| if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
|
| Label dont_need_remembered_set;
|
|
|
| - __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
|
| + __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
|
| __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
|
| regs_.scratch0(),
|
| &dont_need_remembered_set);
|
| @@ -4654,11 +4692,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
| Label need_incremental_pop_scratch;
|
|
|
| __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
|
| - __ lw(regs_.scratch1(),
|
| + __ ld(regs_.scratch1(),
|
| MemOperand(regs_.scratch0(),
|
| MemoryChunk::kWriteBarrierCounterOffset));
|
| - __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
|
| - __ sw(regs_.scratch1(),
|
| + __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
|
| + __ sd(regs_.scratch1(),
|
| MemOperand(regs_.scratch0(),
|
| MemoryChunk::kWriteBarrierCounterOffset));
|
| __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
|
| @@ -4681,7 +4719,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
| __ bind(&on_black);
|
|
|
| // Get the value from the slot.
|
| - __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
|
| + __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
|
|
|
| if (mode == INCREMENTAL_COMPACTION) {
|
| Label ensure_not_white;
|
| @@ -4737,7 +4775,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
| // -- a3 : element index as smi
|
| // -- sp[0] : array literal index in function as smi
|
| // -- sp[4] : array literal
|
| - // clobbers a1, a2, t0
|
| + // clobbers a1, a2, a4
|
| // -----------------------------------
|
|
|
| Label element_done;
|
| @@ -4747,34 +4785,34 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
| Label fast_elements;
|
|
|
| // Get array literal index, array literal and its map.
|
| - __ lw(t0, MemOperand(sp, 0 * kPointerSize));
|
| - __ lw(a1, MemOperand(sp, 1 * kPointerSize));
|
| - __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
|
| + __ ld(a4, MemOperand(sp, 0 * kPointerSize));
|
| + __ ld(a1, MemOperand(sp, 1 * kPointerSize));
|
| + __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset));
|
|
|
| - __ CheckFastElements(a2, t1, &double_elements);
|
| + __ CheckFastElements(a2, a5, &double_elements);
|
| // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
|
| __ JumpIfSmi(a0, &smi_element);
|
| - __ CheckFastSmiElements(a2, t1, &fast_elements);
|
| + __ CheckFastSmiElements(a2, a5, &fast_elements);
|
|
|
| // Store into the array literal requires a elements transition. Call into
|
| // the runtime.
|
| __ bind(&slow_elements);
|
| // call.
|
| __ Push(a1, a3, a0);
|
| - __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| - __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
|
| - __ Push(t1, t0);
|
| + __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| + __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset));
|
| + __ Push(a5, a4);
|
| __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
|
|
|
| // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
|
| __ bind(&fast_elements);
|
| - __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
|
| - __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(t2, t1, t2);
|
| - __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - __ sw(a0, MemOperand(t2, 0));
|
| + __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
|
| + __ SmiScale(a6, a3, kPointerSizeLog2);
|
| + __ Daddu(a6, a5, a6);
|
| + __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| + __ sd(a0, MemOperand(a6, 0));
|
| // Update the write barrier for the array store.
|
| - __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
|
| + __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
|
| EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
| __ Ret(USE_DELAY_SLOT);
|
| __ mov(v0, a0);
|
| @@ -4782,17 +4820,17 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
| // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
|
| // and value is Smi.
|
| __ bind(&smi_element);
|
| - __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
|
| - __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(t2, t1, t2);
|
| - __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
|
| + __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
|
| + __ SmiScale(a6, a3, kPointerSizeLog2);
|
| + __ Daddu(a6, a5, a6);
|
| + __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize));
|
| __ Ret(USE_DELAY_SLOT);
|
| __ mov(v0, a0);
|
|
|
| // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
|
| __ bind(&double_elements);
|
| - __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
|
| - __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
|
| + __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
|
| + __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, a2, &slow_elements);
|
| __ Ret(USE_DELAY_SLOT);
|
| __ mov(v0, a0);
|
| }
|
| @@ -4803,14 +4841,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
|
| __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
|
| int parameter_count_offset =
|
| StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
|
| - __ lw(a1, MemOperand(fp, parameter_count_offset));
|
| + __ ld(a1, MemOperand(fp, parameter_count_offset));
|
| if (function_mode_ == JS_FUNCTION_STUB_MODE) {
|
| - __ Addu(a1, a1, Operand(1));
|
| + __ Daddu(a1, a1, Operand(1));
|
| }
|
| masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
|
| - __ sll(a1, a1, kPointerSizeLog2);
|
| + __ dsll(a1, a1, kPointerSizeLog2);
|
| __ Ret(USE_DELAY_SLOT);
|
| - __ Addu(sp, sp, a1);
|
| + __ Daddu(sp, sp, a1);
|
| }
|
|
|
|
|
| @@ -4842,11 +4880,11 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
| __ MultiPush(kSavedRegs | ra.bit());
|
|
|
| // Compute the function's address for the first argument.
|
| - __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
|
| + __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
|
|
|
| // The caller's return address is above the saved temporaries.
|
| // Grab that for the second argument to the hook.
|
| - __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
|
| + __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
|
|
|
| // Align the stack if necessary.
|
| int frame_alignment = masm->ActivationFrameAlignment();
|
| @@ -4855,10 +4893,11 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
| ASSERT(IsPowerOf2(frame_alignment));
|
| __ And(sp, sp, Operand(-frame_alignment));
|
| }
|
| - __ Subu(sp, sp, kCArgsSlotsSize);
|
| -#if defined(V8_HOST_ARCH_MIPS)
|
| - int32_t entry_hook =
|
| - reinterpret_cast<int32_t>(isolate()->function_entry_hook());
|
| +
|
| + __ Dsubu(sp, sp, kCArgsSlotsSize);
|
| +#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
|
| + int64_t entry_hook =
|
| + reinterpret_cast<int64_t>(isolate()->function_entry_hook());
|
| __ li(t9, Operand(entry_hook));
|
| #else
|
| // Under the simulator we need to indirect the entry hook through a
|
| @@ -4878,7 +4917,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
| if (frame_alignment > kPointerSize) {
|
| __ mov(sp, s5);
|
| } else {
|
| - __ Addu(sp, sp, kCArgsSlotsSize);
|
| + __ Daddu(sp, sp, kCArgsSlotsSize);
|
| }
|
|
|
| // Also pop ra to get Ret(0).
|
| @@ -4930,10 +4969,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
|
| __ And(at, a3, Operand(1));
|
| __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
|
| }
|
| -
|
| // look at the first argument
|
| - __ lw(t1, MemOperand(sp, 0));
|
| - __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
|
| + __ ld(a5, MemOperand(sp, 0));
|
| + __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
|
|
|
| if (mode == DISABLE_ALLOCATION_SITES) {
|
| ElementsKind initial = GetInitialFastElementsKind();
|
| @@ -4952,21 +4990,21 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
|
| } else if (mode == DONT_OVERRIDE) {
|
| // We are going to create a holey array, but our kind is non-holey.
|
| // Fix kind and retry (only if we have an allocation site in the slot).
|
| - __ Addu(a3, a3, Operand(1));
|
| + __ Daddu(a3, a3, Operand(1));
|
|
|
| if (FLAG_debug_code) {
|
| - __ lw(t1, FieldMemOperand(a2, 0));
|
| + __ ld(a5, FieldMemOperand(a2, 0));
|
| __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
|
| - __ Assert(eq, kExpectedAllocationSite, t1, Operand(at));
|
| + __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
|
| }
|
|
|
| // Save the resulting elements kind in type info. We can't just store a3
|
| // in the AllocationSite::transition_info field because elements kind is
|
| // restricted to a portion of the field...upper bits need to be left alone.
|
| STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
|
| - __ lw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
|
| - __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
|
| - __ sw(t0, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
|
| + __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
|
| + __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
|
| + __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
|
|
|
|
|
| __ bind(&normal_sequence);
|
| @@ -5068,17 +5106,17 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
|
| // builtin Array functions which always have maps.
|
|
|
| // Initial map for the builtin Array function should be a map.
|
| - __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
| + __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
| // Will both indicate a NULL and a Smi.
|
| - __ SmiTst(t0, at);
|
| + __ SmiTst(a4, at);
|
| __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
|
| at, Operand(zero_reg));
|
| - __ GetObjectType(t0, t0, t1);
|
| + __ GetObjectType(a4, a4, a5);
|
| __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
|
| - t1, Operand(MAP_TYPE));
|
| + a5, Operand(MAP_TYPE));
|
|
|
| // We should either have undefined in a2 or a valid AllocationSite
|
| - __ AssertUndefinedOrAllocationSite(a2, t0);
|
| + __ AssertUndefinedOrAllocationSite(a2, a4);
|
| }
|
|
|
| Label no_info;
|
| @@ -5086,7 +5124,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
|
| __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
| __ Branch(&no_info, eq, a2, Operand(at));
|
|
|
| - __ lw(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
|
| + __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
|
| __ SmiUntag(a3);
|
| STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
|
| __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
|
| @@ -5109,7 +5147,7 @@ void InternalArrayConstructorStub::GenerateCase(
|
| if (IsFastPackedElementsKind(kind)) {
|
| // We might need to create a holey array
|
| // look at the first argument.
|
| - __ lw(at, MemOperand(sp, 0));
|
| + __ ld(at, MemOperand(sp, 0));
|
|
|
| InternalArraySingleArgumentConstructorStub
|
| stub1_holey(isolate(), GetHoleyElementsKind(kind));
|
| @@ -5134,18 +5172,18 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
|
| // builtin Array functions which always have maps.
|
|
|
| // Initial map for the builtin Array function should be a map.
|
| - __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
| + __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
| // Will both indicate a NULL and a Smi.
|
| __ SmiTst(a3, at);
|
| __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
|
| at, Operand(zero_reg));
|
| - __ GetObjectType(a3, a3, t0);
|
| + __ GetObjectType(a3, a3, a4);
|
| __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
|
| - t0, Operand(MAP_TYPE));
|
| + a4, Operand(MAP_TYPE));
|
| }
|
|
|
| // Figure out the right elements kind.
|
| - __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
| + __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
|
|
|
| // Load the map's "bit field 2" into a3. We only need the first byte,
|
| // but the following bit field extraction takes care of that anyway.
|
| @@ -5174,7 +5212,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
|
| void CallApiFunctionStub::Generate(MacroAssembler* masm) {
|
| // ----------- S t a t e -------------
|
| // -- a0 : callee
|
| - // -- t0 : call_data
|
| + // -- a4 : call_data
|
| // -- a2 : holder
|
| // -- a1 : api_function_address
|
| // -- cp : context
|
| @@ -5186,7 +5224,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
|
| // -----------------------------------
|
|
|
| Register callee = a0;
|
| - Register call_data = t0;
|
| + Register call_data = a4;
|
| Register holder = a2;
|
| Register api_function_address = a1;
|
| Register context = cp;
|
| @@ -5209,7 +5247,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
|
| // Save context, callee and call data.
|
| __ Push(context, callee, call_data);
|
| // Load context from callee.
|
| - __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
|
| + __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
|
|
|
| Register scratch = call_data;
|
| if (!call_data_undefined) {
|
| @@ -5235,17 +5273,17 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
|
| ASSERT(!api_function_address.is(a0) && !scratch.is(a0));
|
| // a0 = FunctionCallbackInfo&
|
| // Arguments is after the return address.
|
| - __ Addu(a0, sp, Operand(1 * kPointerSize));
|
| + __ Daddu(a0, sp, Operand(1 * kPointerSize));
|
| // FunctionCallbackInfo::implicit_args_
|
| - __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
|
| + __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
|
| // FunctionCallbackInfo::values_
|
| - __ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
|
| - __ sw(at, MemOperand(a0, 1 * kPointerSize));
|
| + __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
|
| + __ sd(at, MemOperand(a0, 1 * kPointerSize));
|
| // FunctionCallbackInfo::length_ = argc
|
| __ li(at, Operand(argc));
|
| - __ sw(at, MemOperand(a0, 2 * kPointerSize));
|
| + __ sd(at, MemOperand(a0, 2 * kPointerSize));
|
| // FunctionCallbackInfo::is_construct_call = 0
|
| - __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
|
| + __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
|
|
|
| const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
|
| ExternalReference thunk_ref =
|
| @@ -5282,7 +5320,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
| Register api_function_address = a2;
|
|
|
| __ mov(a0, sp); // a0 = Handle<Name>
|
| - __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
|
| + __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
|
|
|
| const int kApiStackSpace = 1;
|
| FrameScope frame_scope(masm, StackFrame::MANUAL);
|
| @@ -5290,8 +5328,8 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
|
|
| // Create PropertyAccessorInfo instance on the stack above the exit frame with
|
| // a1 (internal::Object** args_) as the data.
|
| - __ sw(a1, MemOperand(sp, 1 * kPointerSize));
|
| - __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
|
| + __ sd(a1, MemOperand(sp, 1 * kPointerSize));
|
| + __ Daddu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
|
|
|
| const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
|
|
|
| @@ -5309,4 +5347,4 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
|
|
|
| } } // namespace v8::internal
|
|
|
| -#endif // V8_TARGET_ARCH_MIPS
|
| +#endif // V8_TARGET_ARCH_MIPS64
|
|
|